text stringlengths 8 6.05M |
|---|
#coding=utf-8
from rest_framework import serializers
from rest_framework.serializers import ModelSerializer, BaseSerializer, Serializer
from drf_writable_nested import WritableNestedModelSerializer
from TestOnline.models import User
from TestOnline.models import *
class FormatDataTimeField(BaseSerializer):
def to_representation(self, instance):
return instance.now().strftime('%Y-%m-%d %H:%M')
class RegisterSerializer(Serializer):
username = serializers.CharField()
password = serializers.CharField()
class UserSerializer(ModelSerializer):
# paper_set = serializers.HyperlinkedRelatedField(many=True,read_only=True,view_name='paper-detail')
# paper_set = serializers.PrimaryKeyRelatedField(many=True,queryset=Paper.objects.filter(deletedAt=None))
class Meta:
model = User
extra_kwargs = {'password': {'write_only': True}}
depth = 0
exclude = ['createdAt','updatedAt','deletedAt']
class OptionSerializer(ModelSerializer):
class Meta:
model = QuestionOption
exclude = ['createdAt','updatedAt','deletedAt']
class OptionNoSolutionSerializer(ModelSerializer):
class Meta:
model = QuestionOption
fields = ('id','content')
class QuestionNoSolutionSerializer(ModelSerializer):
options = OptionNoSolutionSerializer(many=True)
class Meta:
model = Question
exclude = ['createdAt','updatedAt','deletedAt']
class QuestionSerializer(WritableNestedModelSerializer):
options = OptionSerializer(many=True)
class Meta:
model = Question
# fields = ('options',)
exclude = ['createdAt','updatedAt','deletedAt']
class QuestionCreateSerializer(ModelSerializer):
class Meta:
model = Question
exclude = ['createdAt','updatedAt','deletedAt']
class PaperSerializer(ModelSerializer):
# questions = serializers.HyperlinkedRelatedField(many=True,read_only=True,view_name='question-detail')
questions = QuestionSerializer(many=True)
class Meta:
model = Paper
# depth = 2
exclude = ['updatedAt','deletedAt']
class PaperCreateSerializer(ModelSerializer):
class Meta:
model = Paper
exclude = ['createdAt','updatedAt','deletedAt']
class CompanySerializer(ModelSerializer):
class Meta:
model = Company
exclude = ['createdAt','updatedAt','deletedAt']
extra_kwargs = {'password': {'write_only': True}}
class AdminSerializer(ModelSerializer):
class Meta:
model = Admin
exclude = ['createdAt','updatedAt','deletedAt']
extra_kwargs = {'password': {'write_only': True}}
class LoginRecordSerializer(ModelSerializer):
class Meta:
model = LoginRecord
exclude = ['createdAt','updatedAt','deletedAt']
class TypeSerializer(ModelSerializer):
class Meta:
model = Type
fields = ('id','type')
class CompanyPermissionSerializer(ModelSerializer):
class Meta:
model = BusinessPermission
fields = ('maxPaper','maxQuestion','maxTester')
class SiftQuestionSerializer(Serializer):
type = serializers.IntegerField(default=-1,required=False)
keyword = serializers.CharField(default="",required=False,allow_blank=True)
difficulty = serializers.IntegerField(default=-1,required=False)
tag = serializers.IntegerField(default=-1,required=False)
difficulty_sort = serializers.CharField(default="",required=False)
page = serializers.IntegerField(default=1,required=False)
class TagSerializer(ModelSerializer):
class Meta:
model = Tag
fields = ('id','tag','type') |
import sys
from itertools import permutations
if __name__ == "__main__":
'''
Given: A positive integer n≤7.
Return: The total number of permutations of length n, followed by a list of all such permutations (in any order).
'''
n = int(sys.stdin.read().splitlines()[0])
permutations = list(permutations(range(1, n + 1)))
print(len(permutations))
for perm in permutations:
print(' '.join([str(x) for x in perm])) |
# Generated by Django 2.2.3 on 2019-11-01 19:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('basket', '0003_auto_20191102_0131'),
]
operations = [
migrations.AlterField(
model_name='order',
name='customer_email',
field=models.EmailField(blank=True, max_length=254, null=True, verbose_name='email'),
),
]
|
# Generated by Django 3.0.7 on 2020-10-12 10:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cl_app', '0005_auto_20201012_0655'),
]
operations = [
migrations.AlterField(
model_name='sitegroup',
name='code',
field=models.CharField(db_column='Code', max_length=20, null=True),
),
migrations.AlterField(
model_name='sitegroup',
name='description',
field=models.CharField(db_column='Description', max_length=100, null=True),
),
migrations.AlterField(
model_name='sitegroup',
name='is_delete',
field=models.BooleanField(db_column='Is_Delete', null=True),
),
]
|
# https://www.hackerrank.com/challenges/2d-array/problem
# Complete the hourglassSum function below.
def hourglassSum(arr):
result = []
for i in range(len(arr)-2):
for j in range(len(arr)-2):
total = 0
for k in range(i, i+3):
for l in range(j, j+3):
total = total + arr[k][l]
total = total - arr[i+1][j] - arr[i+1][j+2]
result.append(total)
return max(result)
|
#substruct two numbers
#author Angelina B
#reads input and converts it into float
a = float(input("Please enter first number: "))
b = float(input("Please enter second number: "))
print("{} minus {} is {}".format(a, b, a-b)) |
# Problem
# There are N houses for sale. The i-th house costs Ai dollars to buy. You have a budget of B dollars to spend.
# What is the maximum number of houses you can buy?
# Input
# The first line of the input gives the number of test cases, T. T test cases follow. Each test case begins with a single line containing the two integers N and B. The second line contains N integers. The i-th integer is Ai, the cost of the i-th house.
# Output
# For each test case, output one line containing Case #x: y, where x is the test case number (starting from 1) and y is the maximum number of houses you can buy.
# Limits
# Time limit: 15 seconds per test set.
# Memory limit: 1GB.
# 1 ≤ T ≤ 100.
# 1 ≤ B ≤ 105.
# 1 ≤ Ai ≤ 1000, for all i.
# Test set 1
# 1 ≤ N ≤ 100.
# Test set 2
# 1 ≤ N ≤ 105.
# Sample
# Input
# Output
# 3
# 4 100
# 20 90 40 90
# 4 50
# 30 30 10 10
# 3 300
# 999 999 999
# Case #1: 2
# Case #2: 3
# Case #3: 0
# In Sample Case #1, you have a budget of 100 dollars. You can buy the 1st and 3rd houses for 20 + 40 = 60 dollars.
# In Sample Case #2, you have a budget of 50 dollars. You can buy the 1st, 3rd and 4th houses for 30 + 10 + 10 = 50 dollars.
# In Sample Case #3, you have a budget of 300 dollars. You cannot buy any houses (so the answer is 0).
# Note: Unlike previous editions, in Kick Start 2020, all test sets are visible verdict test sets, meaning you receive instant feedback upon submission.
buyAble = []
for t in range(int(input())):
N, B = map(int,input().split())
housePrices = list(map(int, input().split()))
housePrices.sort()
totalPrice = 0
bought = 0
for price in housePrices:
totalPrice += price
if totalPrice <= B:
bought += 1
elif totalPrice >= B:
break
else:
pass
buyAble.append(bought)
for i in enumerate(buyAble):
print("Case #{c}: {th}".format(c=i[0]+1, th=i[1]))
|
import OpenPNM
class SGL10Test:
def setup_class(self):
self.net = OpenPNM.Network.Cubic(shape=[3, 3, 3])
self.geo = OpenPNM.Geometry.SGL10(network=self.net,
pores=self.net.Ps,
throats=self.net.Ts)
|
from django.shortcuts import render
from DB_Crawling import route_table
# Create your views here.
def route_table(request):
routTable = route_table()
return render(request, 'cTable/cTable.html', rout_table) |
def length_words(string):
string = string.lower()
repl = ",.?!"
for i in repl:
string = string.replace(i, "")
string = string.split()
dic = {}
#print(string)
for word in string:
word_length = len(word)
dic[word_length] = []
if word_length in dic:
dic[word_length].append(word)
return dic
print(length_words("I ate a bowl of cereal out of a dog bowl today."))
|
from werkzeug.security import check_password_hash
class User:
def __init__(self, username, email, password):
self.username=username
self.email=email
self.password=password
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return self.username
def check_password(self, password_input):
# takes the hash and the input password and return true if they match
return check_password_hash(self.password, password_input) |
#import sys
#input = sys.stdin.readline
def main():
N, X = map( int, input().split())
L = list( map( int, input().split()))
ans = 1
now = 0
for i in range(N):
now += L[i]
if now > X:
break
ans += 1
print(ans)
if __name__ == '__main__':
main()
|
import pytest
import Triangulo
#Exemplo de teste 1(simples sem criar classes)
'''def testa_perimetro():
t = Triangulo.Triangulo(1,1,1)
assert t.perimetro() == 3'''
#Exemplo de teste 3
'''@pytest.mark.parametrize("entrada, esperado", [
((1,1,1),(3)),
(),
(),
()
])'''
'''def testa_perimetro(entrada, esperado):
t = Triangulo.Triangulo(entrada)
assert t.perimetro == esperado'''
class TestaTriangulo:
#Exemplo de teste 2
'''@pytest.fixture
def t(self):
return Triangulo.Triangulo(1,1,1)
def testa_perimetro(self, t):
assert t.perimetro() == 3'''
|
import numpy
import math
import matplotlib.pyplot as pyplot
import pyfits
fits = pyfits.open("nr29.fits")
image = fits[0].data
t = numpy.arange(image.shape[0])
print "Loaded data"
def load(image, quadrant):
if quadrant == 1:
image_subset = image[:, :, 1024:1536]
if quadrant == 2:
image_subset = image[:, :, 1535:1023:-1]
if quadrant == 3:
image_subset = image[:, :, 1536:2048]
image_subset_shape = image_subset.shape
image_subset = numpy.reshape(image_subset, (image_subset_shape[0], image_subset_shape[1]*image_subset_shape[2]))
polycoeff = numpy.polyfit(t, image_subset, 1)
print "Done polyfit"
polycoeff = polycoeff[0,:]
polycoeff = polycoeff - numpy.median(polycoeff)
image_repo = numpy.reshape(polycoeff, (image_subset_shape[1], image_subset_shape[2]) )
pyplot.hist(numpy.reshape(image_repo, -1), 512, (-5, 5))
pyplot.yscale("log")
pyplot.xlabel("Slope")
pyplot.savefig("hw4-q%d-hist.png" % quadrant)
pyplot.clf()
pyplot.imshow(image_repo[0:512,0:512], vmin=-0.5,vmax=0.5)
pyplot.savefig("hw4-q%d-darkcurrent.png" % quadrant)
pyplot.clf()
image_repo = numpy.reshape(image_repo, -1)
image_repo_filtered = numpy.copy(image_repo)
hotcoldpixels = numpy.append(numpy.where(image_repo > 1.0)[0], numpy.where(image_repo < -1.0)[0])
for i in hotcoldpixels:
if i < (len(image_repo) - 10) and i >= 10:
image_repo_filtered[i] = numpy.median(image_repo[i-10:i+10])
print "Done Filtering"
pyplot.imshow(numpy.reshape(image_repo_filtered, (image_subset_shape[1], image_subset_shape[2]))[0:512,0:512], vmin=-0.5, vmax=0.5)
pyplot.savefig("hw4-q%d-darkcurrent-filtered.png" % quadrant)
pyplot.clf()
image_repo = numpy.reshape(image_repo, (image_subset_shape[1], image_subset_shape[2]))
image_repo_filtered = numpy.reshape(image_repo_filtered, (image_subset_shape[1], image_subset_shape[2]))
return image_repo,image_repo_filtered
image2 = load(image, 2)[1]
image3 = load(image, 3)[1]
shape = image2.shape
image = image3 - image2
row_average = numpy.sum(image, 1) / shape[1]
image = image - row_average[:, numpy.newaxis]
print "Done subtraction"
pyplot.imshow(image, vmin=-0.2, vmax=1.0)
pyplot.savefig("hw4-image.png")
pyplot.clf()
lumi = numpy.sum(image, 0) / shape[0]
pyplot.plot(lumi)
pyplot.xlabel("Column [px]")
pyplot.ylabel("Luminosity [arb]")
pyplot.savefig("hw4-lumi.png")
pyplot.clf()
jump_delta = numpy.sum(numpy.abs(numpy.diff(lumi)))/2.0/(lumi.shape[0]-1)
correction = numpy.reshape(numpy.repeat([[jump_delta, -jump_delta]], lumi.shape[0]/2, 0), -1)
print "Done jump correction"
image = image + correction
lumi_jumpcorrected = numpy.sum(image, 0) / shape[0]
pyplot.plot(lumi_jumpcorrected)
pyplot.xlabel("Column [px]")
pyplot.ylabel("Luminosity [arb]")
pyplot.savefig("hw4-lumi-corrected.png")
pyplot.clf()
fft_win = numpy.hanning(shape[1])
fft = numpy.fft.rfft(image * fft_win)
fft_freqs = numpy.arange(fft.shape[1])
mag2 = numpy.sum(numpy.abs(fft)**2, 0) #mag^2 of each column
arg = numpy.angle(numpy.sum(fft / numpy.roll(fft, 1, 0), 0)) #divide row i by row i+1 and sum each column
coeff = numpy.polynomial.polynomial.polyfit(fft_freqs[0:130], arg[0:130], 1, w=mag2[0:130])
pyplot.plot(arg)
pyplot.plot(fft_freqs*coeff[1] + coeff[0])
pyplot.xlabel("Row frequency space [px^-1]")
pyplot.ylabel("Arg. [rad]")
pyplot.legend(("Data", "Fit"), 'upper right', fancybox=True)
pyplot.savefig("hw4-rotation-fit.png")
pyplot.clf()
print "Done argument fit for rotation"
print "Rotation factor: %f" % coeff[1]
fft = numpy.fft.rfft(image)
rotation_factor_row = numpy.arange(fft.shape[0]) - fft.shape[0]/2
rotation_factor_column = numpy.arange(fft.shape[1])
rotation_factor = numpy.exp(-coeff[1]*numpy.outer(rotation_factor_row, rotation_factor_column)*1.0j)
image_rotated = numpy.fft.irfft(fft*rotation_factor)
print "Done image rotation"
pyplot.figure(figsize=(10,5))
pyplot.subplot(121)
pyplot.imshow(image, vmin=-0.2, vmax=1.0, aspect='auto')
pyplot.subplot(122)
pyplot.imshow(image_rotated, vmin=-0.2, vmax=1.0, aspect='auto')
pyplot.savefig("hw4-image-rotation.png")
pyplot.clf()
lumi_rotated = numpy.sum(image_rotated, 0) / shape[0]
pyplot.plot(lumi_jumpcorrected)
pyplot.plot(lumi_rotated)
pyplot.xlabel("Column [px]")
pyplot.ylabel("Luminosity [arb]")
pyplot.legend(("Before rotation", "After rotation"), 'upper right', fancybox=True)
pyplot.savefig("hw4-lumi-rotated.png")
pyplot.clf()
#Chosen galaxy in column 257
weighting = numpy.kaiser(6,3)
spectrum = numpy.sum(image[:, 254:260] * weighting, 1)[::-1]
weighting = numpy.kaiser(40,8)
weighting = weighting / numpy.sum(weighting)
freq = numpy.arange(spectrum.shape[0])
freq_smoothed = numpy.arange(spectrum.shape[0])[19:-20]
spectrum_smoothed = numpy.convolve(spectrum, weighting, 'valid')
pyplot.plot(freq_smoothed, spectrum_smoothed)
pyplot.xlabel("Frequency for Galaxy in Column 257 [px]")
pyplot.ylabel("Luminosity [arb]")
pyplot.savefig("hw4-spectrum.png")
pyplot.clf()
print "Obtained spectrum from Galaxy col 257"
templates = numpy.transpose(numpy.load("galaxy_templates.npz")['gal_spectra'])
from scipy.interpolate import interp1d
from scipy.optimize import curve_fit
fitting_funcs = []
params = []
residuals = []
for i in range(1,5):
template_f = numpy.log(templates[0])[::-1]
template_flux = templates[i][::-1]
template_function = interp1d(template_f, template_flux, bounds_error=False, fill_value=0)
fittingfunc = lambda v, a, z, c1, c2, template_function=template_function: a*template_function(v/(1.0+z) + c1) + c2
param, _ = curve_fit(fittingfunc, freq, spectrum, p0=(0.2, 800, 31.64, 0.0))
fitting_funcs.append(fittingfunc)
params.append(param)
residuals.append(numpy.sum((fittingfunc(freq, *param) - spectrum)**2))
print "Done curve fitting"
print "Residuals are:"
print residuals
pyplot.plot(freq_smoothed, spectrum_smoothed)
for i in range(4):
pyplot.plot(freq, fitting_funcs[i](freq, *params[i]))
pyplot.xlabel("Frequency for Galaxy in Column 257 [px]")
pyplot.ylabel("Luminosity after curve fitting [arb]")
pyplot.legend(("Data", "1068", "m82", "orp220", "ngc6946"), 'upper right', fancybox=True)
pyplot.savefig("hw4-spectrum-fit.png")
pyplot.clf()
pyplot.plot(freq_smoothed, spectrum_smoothed)
for i in range(4):
pyplot.plot(freq, fitting_funcs[i](freq, 0.2, 800, 31.64, 0))
pyplot.xlabel("Frequency for Galaxy in Column 257 [px]")
pyplot.ylabel("Luminosity before curve fitting [arb]")
pyplot.legend(("Data", "1068", "m82", "orp220", "ngc6946"), 'upper right', fancybox=True)
pyplot.savefig("hw4-spectrum-unfit.png")
pyplot.clf()
|
from django.apps import AppConfig
class MyappConfig(AppConfig):
name = 'myapp'
def ready(self):
# Makes sure all signal handlers are connected
from myapp import handler # noqa
|
# -*- coding: utf-8 -*-
# @Author : 赵永健
# @Time : 2020/1/8 13:20
# -*- coding: utf-8 -*-
# 本地图片在html中显示
import os
import re
import base64
import time
def findimg(content):
'''
查找网页中所有的img,类似img src='1.png',返回1.png
:param content: 网页内容
:return: 返回找到的所有图片文件名列表
'''
patt = re.compile("<img src='(.+)'") #正则表达式查找所有的img
grp = re.findall(patt,content)
# print(grp)
return grp
def findcomment(content):
'''
查找注释,删除注释
:param content: 网页源内容
:return: 删除注释后的网页内容
'''
patt = re.compile('(<!--.+-->)')
grp = re.findall(patt,content)
# print(grp)
for g in grp:
content = content.replace(g,'')
return content
def imgbase64(pic_path):
'''
实现图片的base64编码,返回编码字符串
:param pic_path:
:return:
'''
pic_basename = os.path.basename(pic_path)
file_ext = pic_basename.split('.')[-1].lower()
# print(file_ext)
if file_ext == "jpg" :
tag = "jpg"
elif file_ext == "jpeg" :
tag = "jpg"
elif file_ext == "png" :
tag = "png"
elif file_ext == "bmp" :
tag = "bmp"
else:
print("Unsupported image format !")
return None
with open(pic_path, "rb") as imageFile:
str_pic = base64.b64encode(imageFile.read()).decode('ascii')
# print(str_pic)
str_begin = 'data:image/' + tag + ';base64,'
result_str = str_begin + str_pic
# print(result_str)
return result_str
def readhtml():
'''
读取当前目录下的所有html文件,并查找本地图片,实现嵌入图片的base64编码
:return: 无
'''
for file in os.listdir('./result'):
if file.endswith(".html"):
root = 'D:/code/96UIAutoC/result/'
with open(root+file,'r+',encoding='utf-8') as html:
content = html.read() # 读取html文件内容
pics = findimg(content) # 查找所有内容中的本地图片
for pic in pics:
# log_path = os.path.join(os.getcwd(), 'result')
# if os.path.exists(pic):
picPath='D:/code/96UIAutoC/save_img/'+pic[12:]
# print(picPath)
base64code = imgbase64(picPath) # base64编码图片文件
content = content.replace(pic,base64code) # 替换html文件内容
content = findcomment(content) # 去除html文件中的注释
html.seek(0)
html.write(content) # 覆写
def readhtmly():
'''
读取当前目录下的所有html文件,并查找本地图片,实现嵌入图片的base64编码
:return: 无
'''
# log_dir = os.getcwd() + '\log\\'
for file in os.listdir('../result'):
if file.endswith(".html"):
a = os.getcwd()[:-6]
root = a+'\\result\\'
with open(root+file,'r+',encoding='utf-8') as html:
content = html.read() # 读取html文件内容
pics = findimg(content) # 查找所有内容中的本地图片
for pic in pics:
# log_path = os.path.join(os.getcwd(), 'result')
# if os.path.exists(pic):
picPath=a+'\save_img\\'+pic[12:]
# print(picPath)
base64code = imgbase64(picPath) # base64编码图片文件
content = content.replace(pic,base64code) # 替换html文件内容
content = findcomment(content) # 去除html文件中的注释
html.seek(0)
html.write(content) # 覆写
print(a)
# 打包生成可执行程序的命令
# pyinstaller --noupx -F -w --icon=mylogo.ico imgbase64.py
# if __name__ == '__main__':
# readhtml()
# readhtmly()
|
import struct
import os
UTF_16 = 'UTF-16-LE'
class DBError(Exception):
pass
class Field(object):
def __init__(self, name, dtype, to_py=None, to_db=None):
self.name = name
self.dtype = dtype
self.to_py = to_py
self.to_db = to_db
self.value = None
def __setattr__(self, key, value):
# todo: fix hackiness
if key == 'value' and self.to_py and value is not None:
value = self.to_py(value)
self.__dict__[key] = value
@staticmethod
def wchar_to_str(b):
s = b.decode(UTF_16)
return s.rstrip('\0')
@staticmethod
def str_to_wchar(s, pad=64):
b = s.encode(UTF_16)
while len(b) < pad:
b += '\0'.encode(UTF_16)
return b # todo: test padding
class DBStruct(object):
def __init__(self, db):
self.f = db.f
self.fields = []
self.data = []
def _add_field(self, name, dtype='I', n=1, **kwargs):
field = None
for i in xrange(n):
field = Field(name, dtype, **kwargs)
self.fields.append(field)
if n == 1:
return field
else:
return self.fields[-n:]
def read(self, offset):
self.f.seek(offset)
self.data = list(
struct.unpack(self.format, self.f.read(self.size)))
for field, value in zip(self.fields, self.data):
field.value = value
return self.data
@property
def format(self):
return ' '.join(field.dtype for field in self.fields)
@property
def size(self):
return struct.calcsize(self.format)
class Header(DBStruct):
"""
int32 magic always 0x01 0x00 0x00 0x00
int32 numSoundtracks
int32 nextSoundtrackId
int32 soundtrackIds[100]
int32 nextSongId
char padding[96]
"""
def __init__(self, db):
super(Header, self).__init__(db)
self.field_magic = self._add_field('magic')
self.field_count_albums = self._add_field('count_albums')
self.field_next_album_id = self._add_field('next_album_id')
self.field_album_ids = self._add_field('album_id', n=100)
self.field_next_track_id = self._add_field('next_track_id')
self.read(0)
class Album(DBStruct):
"""
int32 magic always 0x71 0x13 0x02 0x00
int32 id
int32 numSongs source gist labeled as "numSongGroups"
int32 songGroupIds[84]
int32 totalTimeMilliseconds
wchar name[64] Unicode string
char padding[64]
"""
def __init__(self, db, offset):
super(Album, self).__init__(db)
self.field_magic = self._add_field('magic')
self.field_album_id = self._add_field('album_id')
self.field_count_tracks = self._add_field('count_tracks')
self.field_track_group_ids = self._add_field('track_group_id', n=84)
self.field_album_length_ms = self._add_field('album_length_ms')
self.field_album_name = self._add_field('name', dtype='64s',
to_py=Field.wchar_to_str,
to_db=Field.str_to_wchar)
self.read(offset)
self.hex_id = '{:04x}'.format(self.field_album_id.value)
self.path = os.path.join(db.root, self.hex_id)
if not os.path.exists(self.path):
raise DBError('directory "{}" does not exist for album "{}"'
.format(self.path, self.field_album_name.value))
self.track_groups = {}
self.tracks = {}
def __repr__(self):
return '<{}> {}'.format(self.hex_id, self.field_album_name.value)
class TrackGroup(DBStruct):
"""
int32 magic always 0x73 0x10 0x03 0x00
int32 soundtrackId
int32 id
int32 padding why is this not null?
int32 songId[6]
int32 songTimeMilliseconds[6]
wchar songName[64][6]
char padding[64] todo: verify
"""
def __init__(self, db, offset):
super(TrackGroup, self).__init__(db)
self.db = db
self.field_magic = self._add_field('magic')
self.field_album_id = self._add_field('album_id')
self.field_track_group_id = self._add_field('track_group_id')
self.field_padding = self._add_field('padding')
self.field_track_id = self._add_field('track_id', n=6)
self.field_track_length_ms = self._add_field('track_length_ms', n=6)
self.field_track_name = self._add_field('track_name', dtype='64s', n=6,
to_py=Field.wchar_to_str,
to_db=Field.str_to_wchar)
self.read(offset)
self.uid = '{:04x}{:04x}'.format(self.field_album_id.value,
self.field_track_group_id.value)
self.tracks = {}
def __repr__(self):
return '<{}>'.format(self.uid)
class Track(object):
"""for convenience, not a native struct"""
def __init__(self, group, index):
self.field_track_id = group.field_track_id[index]
self.field_track_name = group.field_track_name[index]
self.field_track_length_ms = group.field_track_length_ms[index]
self.field_track_group_id = group.field_track_group_id
self.field_album_id = group.field_album_id
self.fields = (
self.field_track_id,
self.field_track_name,
self.field_track_length_ms,
self.field_track_group_id)
self.hex_id = '{:08x}'.format(self.field_track_id.value)
self.name = '{}.wma'.format(self.hex_id)
self.path = os.path.join(
group.db.root, self.hex_id[:4], self.name)
if not os.path.exists(self.path):
raise DBError('file "{}" does not exist for track "{}"'
.format(self.path, self.field_track_name.value))
def __repr__(self):
return '<{}> {}'.format(self.name, self.field_track_name.value)
class STDB:
block_size = 512
def __init__(self, path):
self.f = open(path, 'r')
self.path = path
self.root = os.path.dirname(path)
self.header = Header(self)
# dicts
self.albums = self._get_albums()
self.track_groups = self._get_track_groups()
self.tracks = self._get_tracks()
def _get_albums(self):
albums = {}
count = self.header.field_count_albums.value
for offset in xrange(self.block_size,
self.block_size * count + self.block_size,
self.block_size):
album = Album(self, offset)
album_id = album.field_album_id.value
albums[album_id] = album
return albums
def _get_track_groups(self):
self.f.seek(0, 2)
f_len = self.f.tell()
group_beg = self.block_size * 101 # header + 100 soundtracks
group_end = f_len # EOF
groups = {}
for i, offset in enumerate(
xrange(group_beg, group_end, self.block_size)):
group = TrackGroup(self, offset)
group_id = group.field_track_group_id.value
album_id = group.field_album_id.value
groups[group.uid] = group
self.albums[album_id].track_groups[group_id] = group
return groups
def _get_tracks(self):
tracks = {}
for group in self.track_groups.itervalues():
for i, field_id in enumerate(group.field_track_id):
track_id = field_id.value
if not track_id:
continue # not sure if always consecutive
track = Track(group, i)
album_id = track.field_album_id.value
tracks[track_id] = track
group.tracks[track_id] = track
self.albums[album_id].tracks[track_id] = track
return tracks
def main():
db = STDB(r'/Users/greg/Scripts/pystdb/data/fffe0000/music/ST.DB')
print 'Database: {}'.format(db.path)
for album in db.albums.itervalues():
print '\n{}'.format(album)
for track in album.tracks.itervalues():
print '{} ({:0.0f}:{:02.0f})'.format(
track,
*divmod(track.field_track_length_ms.value / 1000., 60))
if __name__ == '__main__':
main()
|
file = open('writexample.txt', 'w')
file.write('Line 1\n')
file.close()
file = open('writexample.txt', 'a')
file.write('Line 2\n')
file.close()
|
import math
import collections
import aer
import warnings
def get_lprob(s_word, t_word, lprobs):
return lprobs[s_word].get(t_word, 0) #prob 0 if s/t word do not co-occur in training
def source_dependencies(s_sentence, t_word, t_pos, t_length, lprobs, jump_probs):
s_length = len(s_sentence)
jump_probs = [get_jump_prob(
s_word_pos, t_pos, s_length, t_length, jump_probs
) for s_word_pos in range(s_length)]
sum_jump_probs = sum(jump_probs)
return [
(jump_probs[s_pos]/ sum_jump_probs) * get_lprob(s_word, t_word, lprobs)
for s_pos, s_word in enumerate(s_sentence)
]
# returns the index in the jump_probabilities list
# for given source and target positions and sentence lengths
# s_pos and s_length for source sentence including the special NULL word
# sentence positions start at index 0
def get_jump_prob_index(s_pos, t_pos, s_length, t_length, jump_probs):
if s_pos == 0:
return len(jump_probs) - 1
jump = int(s_pos - math.floor((t_pos + 1) * (s_length - 1) / t_length))
max_jump = int((len(jump_probs) - 2)/2)
jump_prob_index = jump + max_jump
if jump_prob_index < 0:
warnings.warn(
f'Jump prob index {jump_prob_index} (jump:{jump}) out of range.'
)
return 0 #approximate with prob of largest negative jump
if jump_prob_index >= len(jump_probs) - 1:
warnings.warn(
f'Jump prob index {jump_prob_index} (jump:{jump}) out of range.'
)
return len(jump_probs) - 2 #approximate with prob of largest positive jump
return jump_prob_index
# returns jump probability for given source and target positions
# and lengths. Positions start at index 0, source sentence contains
# special NULL word at position 0
def get_jump_prob(s_pos, t_pos, s_length, t_length, jump_probs):
jump_prob_index = get_jump_prob_index(s_pos, t_pos, s_length, t_length, jump_probs)
return jump_probs[jump_prob_index]
def align(lprobs, jump_probs, sentence_pairs):
if isinstance(sentence_pairs, tuple):
return _align_sentence_pair(lprobs, jump_probs, sentence_pairs)
return [ _align_sentence_pair(lprobs, jump_probs, sentence_pair) for sentence_pair in sentence_pairs ]
def _align_sentence_pair(lprobs, jump_probs, sentence_pair):
s_sentence = sentence_pair[0]
t_sentence = sentence_pair[1]
s_length = len(s_sentence)
t_length = len(t_sentence)
best_alignment = set()
for t_pos, t_word in enumerate(t_sentence):
sd = source_dependencies(s_sentence, t_word, t_pos, t_length, lprobs, jump_probs)
(best_align_pos, _) = max(enumerate(sd), key=lambda t: t[1])
if (best_align_pos > 0): # Leave out NULL-alignments (and alignments between unseen words)
best_alignment.add((best_align_pos, t_pos + 1)) # word positions start at 1
return best_alignment
def EM(s_t_pairs, lprobs, jump_probs, max_iterations = 10,
val_sentence_pairs = None, reference_alignments = None, fn_after_E = None, mname='IBM2'):
i = 0
log_likelihoods = []
AERs = []
while i < max_iterations:
# initialize
log_likelihood = 0
AER = 0
counts_t_given_s = collections.defaultdict(lambda: collections.defaultdict(int))
total_s = collections.defaultdict(int)
jump_counts = [0]*len(jump_probs)
# calculate counts and log likelihood
for (s_sentence, t_sentence) in s_t_pairs:
s_length = len(s_sentence)
t_length = len(t_sentence)
for t_pos, t_word in enumerate(t_sentence):
prob_counts = source_dependencies(
s_sentence, t_word, t_pos, t_length, lprobs, jump_probs)
s_total_t = sum(prob_counts)
log_likelihood += math.log(s_total_t)
for s_pos, s_word in enumerate(s_sentence):
update = prob_counts[s_pos]/s_total_t
counts_t_given_s[s_word][t_word] += update
total_s[s_word] += update
jump_count_index = get_jump_prob_index(s_pos, t_pos, s_length, t_length, jump_probs)
jump_counts[jump_count_index] += update
# store log_likelihood and AER values
log_likelihoods.append(log_likelihood)
if val_sentence_pairs and reference_alignments:
predicted_alignments = align(lprobs, jump_probs, val_sentence_pairs)
AER = aer.calculate_AER(reference_alignments, predicted_alignments)
AERs.append(AER)
# print debug info or store models on disk
if fn_after_E:
prev_llhood = None
prev_AER = None
if len(log_likelihoods) > 1:
prev_llhood = log_likelihoods[-2]
if len(AERs) > 1:
prev_AER = AERs[-2]
fn_after_E(i, log_likelihood, AER, prev_llhood, prev_AER,
lprobs, jump_probs, mname)
# update probabilities
for s in lprobs.keys():
for t in lprobs[s].keys():
lprobs[s][t] = counts_t_given_s[s][t]/total_s[s]
jump_count_sum = sum(jump_counts)
jump_probs = [jc/jump_count_sum for jc in jump_counts]
# update iteration number
i += 1
# add AER after final update
if val_sentence_pairs and reference_alignments:
predicted_alignments = align(lprobs, jump_probs, val_sentence_pairs)
AER = aer.calculate_AER(reference_alignments, predicted_alignments)
AERs.append(AER)
# add llhood after final update
log_likelihood = 0
for (s_sentence, t_sentence) in s_t_pairs:
s_length = len(s_sentence)
t_length = len(t_sentence)
for t_pos, t_word in enumerate(t_sentence):
prob_counts = source_dependencies(
s_sentence, t_word, t_pos, t_length, lprobs, jump_probs)
s_total_t = sum(prob_counts)
log_likelihood += math.log(s_total_t)
return lprobs, jump_probs, log_likelihoods, AERs
|
from django.core import validators
from django import forms
from .models import Patient
class Register_patient(forms.ModelForm):
class Meta:
model = Patient
fields = ['patient_name','patient_code','complain','gender','address']
widgets= {
'patient_name' : forms.TextInput(attrs={'class':'form-control'}),
'patient_code' : forms.TextInput(attrs={'class':'form-control'}),
'complain' : forms.TextInput(attrs={'class':'form-control'}),
'gender' : forms.TextInput(attrs={'class':'form-control'}),
'address' : forms.TextInput(attrs={'class':'form-control'}),
} |
#!/usr/bin/env python
import sys, collections, random
from tcc import tcc
tag_whitelist = (
'RB', 'CD', 'VB', 'VBD', 'VBG', 'VBZ', 'JJ', 'JJR', 'JJS',
'NN', 'NNS', 'NNP', 'NNPS'
)
tag_whitelist_phrases = ('JJ', 'NNP', 'NNPS', 'VB', 'NN', 'CD')
word_blacklist = (
'be', 'is', '\'s', '(', ')', 'was', 'n\'t', 'so', 'have',
'here', '@', '..', 'GMT'
)
IGNORE = '--IGNORE--'
rules = collections.defaultdict(list)
words = []
tags = []
for line in sys.stdin:
word, tag = line.strip().rsplit('/', 1)
if tag in tag_whitelist and word not in word_blacklist:
rules[tag].append(word)
if tag in tag_whitelist_phrases:
words.append(word)
tags.append(tag)
continue
# stop word from being a repeat
words.append(str(random.random()))
tags.append(IGNORE)
# print some single word rules
for tag, tag_words in rules.iteritems():
for word in tag_words:
print tag, word
# print some more complicated phrases
for phrase_len in range(2, 20):
results = tcc(words, tags, phrase_len, 15)
for count, phrase, variations in results:
for variation in variations:
if IGNORE in variation:
continue
uniq = set(variation.split())
if len(uniq) == 1 and variation != 'NN':
print uniq.pop(),
elif 'NNP' in variation:
print 'NNP',
else:
print 'PHRASE',
print phrase
|
import numpy as np
from scipy.stats import hypergeom
# n - unknown population size
# Desired confidence level
p = 95/100
# Marked in first group
m = 24
# Caught in second group
c = 19
# Tagged in second group
t = 3
print("Lincoln-Petersen estimator = ",(m*c/t))
print("Chapman estimator = ",((m+1)*(c+1)/(t+1)-1))
lower=None
upper=None
for n in range(m+c-t,20000):
n_lower = hypergeom.ppf(1/2-p/2,n,m,c)
n_upper = hypergeom.ppf(1/2+p/2,n,m,c)
#print(n_lower,n_upper)
if lower==None and round(n_lower)==t:
lower=n
if upper==None and round(n_upper)==t:
upper=n
break
print(p,"-level confidence interval is (",lower,upper,")")
p = 0/100
lower=None
upper=None
for n in range(m+c-t,20000):
n_lower = hypergeom.ppf(1/2-p/2,n,m,c)
n_upper = hypergeom.ppf(1/2+p/2,n,m,c)
#print(n_lower,n_upper)
if lower==None and round(n_lower)==t:
lower=n
if upper==None and round(n_upper)==t:
upper=n
break
print("Squeezed estimator bounds = (",lower,upper,")")
|
#! /usr/bin/python
print 'Content-type: text/html'
print ''
for i in range(11):
print i
for i in range (5, 21):
print i
myFoodList = ["pizza", "chicken", "chocolate"]
for food in myFoodList:
print "I like eating " + food + " . "
x = 0
while x <= 10:
print x
x += 1
ages = {"Bob" : 23, "Sam" : 45, "Kate" : 30, "Rob" : 50 }
for age in ages:
print age + " is " + str(ages[age])
|
from flask import Flask, render_template, request, flash, redirect, session, jsonify
from werkzeug.security import generate_password_hash as passgen
from werkzeug.security import check_password_hash as passcheck
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
import os
from datetime import datetime
cred = credentials.Certificate("keys.json")
firebase_admin.initialize_app(cred)
db = firestore.client()
blogsRef = db.collection(u'blogs')
authorsRef = db.collection(u'authors')
app = Flask(__name__)
app.config['SECRET_KEY'] = os.urandom(24)
@app.route('/')
def index():
query = blogsRef.order_by(u"postedOn", direction=firestore.Query.DESCENDING)
blogs_stream = query.stream()
blogs = []
for blog in blogs_stream:
blogs.append(blog.to_dict())
return render_template("index.html", blogs=blogs)
@app.route('/admin/register/', methods=['GET', 'POST'])
def register():
if request.method == 'POST':
details = request.form
if details['admin_key'] == os.environ['LocalEyesAdminKey']:
fullname = str(details['first_name']).capitalize() + " " + str(details['last_name']).capitalize()
password = passgen(details['password'])
role = "Content Writer"
identifier = str(details['first_name']).lower() + "." + str(details['last_name']).lower()
authorsRef.document(identifier).set({
u"fullname": fullname,
u"identifier": identifier,
u"password": password,
u"role": role,
u"isVerified": False
})
flash(f"ACCOUNT CREATION SUCCESSFULL. Welcome to LocalEyes {fullname}", "success")
return redirect('/')
else:
flash("You are not authorized to create a Writer Account at LocalEyes.", "danger")
return render_template('register.html')
@app.route('/admin/login/', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
details = request.form
identifier = details['fullname']
result = authorsRef.document(identifier).get()
if result.exists:
author = result.to_dict()
check_pass = passcheck(author['password'], details['password'])
if check_pass:
session['logged_in'] = True
session['author'] = author['fullname']
session['isVerified'] = bool(author['isVerified'])
session['role'] = author['role']
flash(f"Login Successful. Welcome {session['author']}", "success")
print(session['logged_in'], session['author'], session['isVerified'], session['role'])
return redirect('/')
else:
flash("Password is incorrect, please try again.", "danger")
return render_template("login.html")
else:
flash("User does not exist", "danger")
return render_template("login.html")
return render_template("login.html")
@app.route('/author/<identifier>')
def author(identifier):
authorRef = authorsRef.document(identifier).get()
if authorRef.exists:
author = authorRef.to_dict()
return jsonify({"result": author})
else:
return "No Such Author"
@app.route('/blog/<id>/')
def blogs(id):
blog = blogsRef.document(id).get()
nextBlogsQuery = blogsRef.order_by(u'postedOn', direction=firestore.Query.DESCENDING).get()
# nextBlogsQuery.where(u'id', u'!=', u'{}'.format(id)).limit_to_last(2).get()
nextBlogs = []
for nxtBlg in nextBlogsQuery:
_blog = nxtBlg.to_dict()
if _blog['id'] == id:
pass
else:
nextBlogs.append(_blog)
if blog.exists:
return render_template('blogs.html', blog=blog.to_dict(), nextBlogs=nextBlogs)
return "Blog Not Found"
@app.route('/write-blog/', methods=['GET', 'POST'])
def write_blog():
if request.method == 'POST':
if session['isVerified']:
blogpost = request.form
tagline = blogpost['title'].replace(" ", "-").replace(",", "").replace("!", "").replace(".", "").lower()
blogData = {
u'id' : tagline,
u'title' : blogpost['title'],
u'body' : blogpost['body'],
u'category' : blogpost['category'],
u'author' : session['author'],
u'postedOn': datetime.now()
}
blogsRef.document(tagline).set(blogData)
flash('Blog Posted Successfully', 'success')
return redirect('/')
else:
flash("You are not verified to write blogs right now", "danger")
return redirect('/')
return render_template('write-blog.html')
@app.route('/my-blogs/')
def my_blogs():
author = session['author']
resultBlog = blogsRef.where(u"author", u"==", author).stream()
return render_template('my-blogs.html', my_blogs=resultBlog)
@app.route('/categories')
def categories():
_categories = ['LAUNCH', 'DESIGN', 'FEATURES']
blogsList = {}
if request.args:
category = str(request.args.get('q')).upper()
blogs = blogsRef.where(u"category", u"==", category).get()
blogsList[category] = []
else:
for _category in _categories:
blogsList[_category] = []
blogs = blogsRef.order_by(u"category", direction=firestore.Query.DESCENDING).get()
for blog in blogs:
blogsList[blog.to_dict()['category']].append(blog.to_dict())
return jsonify(blogsList)
@app.route('/edit-blog/<int:id>/', methods=['GET', 'POST'])
def edit_blog(id):
if request.method == 'POST':
cur = mysql.connection.cursor()
body = request.form['body']
title = request.form['title']
cur.execute("UPDATE blog SET title=%s, body=%s WHERE blog_id=%s", (title, body, id))
mysql.connection.commit()
cur.close()
flash('Blog Updated Successfully', 'success')
return redirect('/blog/{}'.format(id))
cur = mysql.connection.cursor()
resultvalue = cur.execute("SELECT * FROM blog WHERE blog_id={}".format(id))
if resultvalue > 0:
blog = cur.fetchone()
blog_form = {}
blog_form['title'] = blog[1]
blog_form['body'] = blog[3]
return render_template('edit-blog.html', blog_form=blog_form)
@app.route('/delete-blog/<int:id>/')
def delete_blog(id):
cur = mysql.connection.cursor()
cur.execute("DELETE FROM blog WHERE blog_id={}".format(id))
mysql.connection.commit()
flash('Blog Deleted Successfully !!', 'success')
return redirect('/')
@app.route('/logout/')
def logout():
session.clear()
flash("Logged Out Successfully", "warning")
return redirect('/')
if __name__ == '__main__':
app.run(debug=True)
|
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
# carrega datasets
ds_l = pd.read_csv('grpc_metrics_localhost.csv')
ds_r = pd.read_csv('grpc_metrics_192.168.0.109.csv')
ds_rpyc = pd.read_csv('rpyc_metrics_localhost_2.csv')
ds_rpyc_r = pd.read_csv('rpyc_metrics_192.168.0.109.csv')
def remove_outliers(ds, min, max):
mins = ds.quantile([min])
maxs = ds.quantile([max])
for col in ds.columns:
ds = ds[ds.loc[:,col] > mins[col].values[0]]
ds = ds[ds.loc[:,col] < maxs[col].values[0]]
return ds
# remocao de outliers
ds_l = remove_outliers(ds_l, .02, .98)
ds_r = remove_outliers(ds_r, .02, .98)
ds_rpyc = remove_outliers(ds_rpyc, .02, .98)
ds_rpyc_r = remove_outliers(ds_rpyc_r, .02, .98)
# boxplot
ax = ds_r.iloc[:,:-1].plot.box(figsize=(12,6), title="Tempos por função (grpc remoto)")
ax.set_ylabel("Tempo em segundos")
plt.xticks(rotation=45)
ax.set_xlabel("Função")
# barplot mean
fig, ax = plt.subplots(figsize=(12,6))
error_config = {'ecolor': '0.3'}
ds = ds_rpyc_r
for i, col in enumerate(ds_r.columns):
if col == 'euclidean_arg': continue
ax.barh(i, ds[col].mean(), color=cm.Pastel2(i), xerr=ds[col].std(), error_kw=error_config)
ax.text(ds[col].mean()*.3, i, str(round(ds[col].mean(),8)))
ax.set_yticklabels(ds_r.columns[:-1])
ax.set_yticks(np.arange(len(ds_r.columns)) + .2)
ax.set_title('Tempo médio por função (rpyc remoto)')
ax.set_xlabel("Tempo em segundos")
ax.set_ylabel("Função")
# scatter for latency
fig, ax = plt.subplots(figsize=(12,6))
ax.scatter(x=np.arange(len(ds_rpyc)), y=ds_rpyc['int_arg'], label='local', color='green', marker='o')
ax.scatter(x=np.arange(len(ds_rpyc_r)), y=ds_rpyc_r['int_arg'], label='remoto', color='orange', marker='o')
ax.set_title('RPyC: comparativo de oscilação de latência remoto vs local (int_arg)')
ax.set_ylabel("Tempo em segundos")
ax.set_xlabel("Iteração de teste")
ax.legend()
# comparativo
fig, ax = plt.subplots(figsize=(12,6))
error_config = {'ecolor': '0.3'}
bar_w = .35
ds_1 = ds_l
ds_2 = ds_rpyc
for i, col in enumerate(ds_l.columns):
if col != 'euclidean_arg': continue
ax.barh(i, ds_1[col].mean(), bar_w, color=cm.Pastel2(i))
ax.text(0.00002, i, 'grpc: '+str(round(ds_1[col].mean(),8)))
ax.barh(i+bar_w, ds_2[col].mean(), bar_w, color=cm.Pastel2(i))
ax.text(0.00002, i+bar_w, 'rpyc: '+str(round(ds_2[col].mean(),8)))
ax.set_yticklabels([ds_l.columns[8]])
# ax.set_yticks(np.arange(len(ds_l.columns))+.2)
ax.set_title('Comparativo euclidean tempo médio (execução local)')
ax.set_xlabel("Tempo em segundos")
ax.set_ylabel("Função") |
r=int(input())
print(3.141592653589793*(r**2)) |
from conf import all as conf
|
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 24 22:18:50 2017
@author: JATIN
"""
import zipfile
import pandas as pd
import urllib.request, urllib.error, urllib.parse
from bs4 import BeautifulSoup
from datetime import datetime
from dateutil.parser import parse
def sec_dataframe():
#Set variable for page to be open and url to be concatenated
url = "https://www.sec.gov"
page = urllib.request.urlopen('https://www.sec.gov/help/foiadocsinvafoiahtm.html')
#File extension to be looked for.
extension = ".zip"
#Use BeautifulSoup to clean up the page
soup = BeautifulSoup(page)
soup.prettify()
zipfiles=[]
#Find all the links on the page that end in .zip
for anchor in soup.findAll('a', href=True):
links = url + anchor['href']
if links.endswith(extension):
zipfiles.append(links)
dates=[]
for line in zipfiles:
if (line[-10:-4]=='exempt'):
year='20'+line[-13:-11]
day=line[-15:-13]
month=line[-17:-15]
elif (line[-12:-6]=='exempt'):
year='20'+line[-15:-13]
month=line[-19:-17]
day=line[-17:-15]
elif (line[-6]=='_'):
year='20'+line[-8:-6]
day=line[-10:-8]
month=line[-12:-10]
else:
year='20'+line[-6:-4]
day=line[-8:-6]
month=line[-10:-8]
dates.append(year+'-'+month+'-'+day)
Type=[]
for line in zipfiles:
if (line[-10:-4]=='exempt' or line[-12:-6]=='exempt'):
Type.append('exempt')
else:
Type.append('non-exempt')
global df
df=pd.DataFrame(zipfiles)
df.columns=['File_URL']
Date = [datetime.strptime(date, '%Y-%m-%d').date() for date in dates]
df['Date']=Date
df['Type']=Type
sec_dataframe()
def get_sec_zip_by_period(period,is_exempt=bool,only_most_recent=bool):
if(is_exempt is True):
is_exempt='exempt'
else:
is_exempt='non-exempt'
zips=[]
zips_date=[]
for i in df:
if((i['Date'] in period) and i['Type']==is_exempt):
zips.append(i['File_URL'])
zips_date.append(i['Date'])
zips=pd.DataFrame(zips)
zips['Date']=zips_date
if(only_most_recent):
recent=max(zips['Date']).iloc
recent=zips[0][recent]
return zipfile.Zipfile(recent,'r')
return zipfile.ZipFile(zips[0],'r')
|
# Generated by Django 2.2.3 on 2019-07-04 15:13
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('budget', '0037_remove_account_bank'),
]
operations = [
migrations.DeleteModel(
name='Bank',
),
]
|
from django.contrib import admin
from .models import Image, WebPage, AsyncResults
@admin.register(WebPage)
class WebPageAdmin(admin.ModelAdmin):
pass
@admin.register(Image)
class ImagePageAdmin(admin.ModelAdmin):
pass
@admin.register(AsyncResults)
class AsyncResultAdmin(admin.ModelAdmin):
pass
|
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Category, Book, User
engine = create_engine('postgresql:///books')
# Bind the engine to the metadata of the Base class so that the
# declaratives can be accessed through a DBSession instance
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
# A DBSession() instance establishes all conversations with the database
# and represents a "staging zone" for all the objects loaded into the
# database session object. Any change made against the objects in the
# session won't be persisted into the database until you call
# session.commit(). If you're not happy about the changes, you can
# revert all of them back to the last commit by calling
# session.rollback()
session = DBSession()
user = User(id="112992310265322312899", name="Yuanhao Lu")
session.add(user)
session.commit()
# Books for big data
category1 = Category(name="big data", desc="big data book list", user=user)
session.add(category1)
session.commit()
desc1 = '''
Data is at the center of many challenges in system design today.
Difficult issues need to be figured out, such as scalability,
consistency, reliability, efficiency, and maintainability.
In addition, we have an overwhelming variety of tools, including
relational databases, NoSQL datastores, stream or batch
processors, and message brokers. What are the right choices
for your application? How do you make sense
of all these buzzwords?\n
In this practical and comprehensive guide, author Martin Kleppmann
helps you navigate this diverse landscape by examining the pros
and cons of various technologies for processing and storing data.
Software keeps changing, but the fundamental principles remain
the same. With this book, software engineers and architects will
learn how to apply those ideas in practice, and how to make
full use of data in modern applications.\n
Peer under the hood of the systems you already use, and learn
how to use and operate them more effectively\n
Make informed decisions by identifying the strengths and
weaknesses of different tools\n
Navigate the trade-offs around consistency, scalability,
fault tolerance, and complexity\n
Understand the distributed systems research upon which modern
databases are built\n
Peek behind the scenes of major online services, and learn
from their architectures
'''
book1 = Book(
name="Designing Data-Intensive Applications",
author="Martin Kleppmann",
category=category1,
desc=desc1,
user=user
)
session.add(book1)
session.commit()
desc2 = '''
Apache Spark is amazing when everything clicks. But if you
haven't seen the performance improvements you expected, or
still don't feel confident enough to use Spark in production,
this practical book is for you. Authors Holden Karau and
Rachel Warren demonstrate performance optimizations to
help your Spark queries run faster and handle
larger data sizes, while using fewer resources.\n
Ideal for software engineers, data engineers, developers,
and system administrators working with large-scale data
applications, this book describes techniques that can
reduce data infrastructure costs and developer hours.
Not only will you gain a more comprehensive understanding
of Spark, you'll also learn how to make it sing.\n
With this book, you'll explore:\n
How Spark SQL's new interfaces improve performance
over SQL's RDD data structure\n
The choice between data joins in Core Spark and Spark SQL\n
Techniques for getting the most out of standard RDD transformations\n
How to work around performance issues in Spark's key/value pair paradigm\n
Writing high-performance Spark code without Scala or the JVM\n
How to test for functionality and performance when
applying suggested improvements\n
Using Spark MLlib and Spark ML machine learning libraries\n
Spark's Streaming components and external community packages
'''
book2 = Book(
name="High Performance Spark",
author="Holden Karau & Rachel Warren",
category=category1,
desc=desc2,
user=user
)
session.add(book2)
session.commit()
# Books for Javascript
category1 = Category(name="JavaScript", desc="JS book list", user=user)
session.add(category1)
session.commit()
desc1 = '''
If you're like most developers, you rely heavily on
JavaScript to build interactive and quick-responding web
applications. The problem is that all of those lines of
JavaScript code can slow down your apps. This book reveals
techniques and strategies to help you eliminate performance
bottlenecks during development. You'll learn how to
improve execution time, downloading, interaction with
the DOM, page life cycle, and more.\n
Yahoo! frontend engineer Nicholas C. Zakas and five
other JavaScript experts-Ross Harmes, Julien Lecomte,
Steven Levithan, Stoyan Stefanov, and Matt Sweeney-demonstrate
optimal ways to load code onto a page, and offer programming
tips to help your JavaScript run as efficiently and quickly
as possible. You'll learn the best practices
to build and deploy your files to a production environment,
and tools that can help you find problems once your site goes live.\n
Identify problem code and use faster alternatives to
accomplish the same task\n
Improve scripts by learning how JavaScript stores and accesses data\n
Implement JavaScript code so that it doesn't slow down
interaction with the DOM\n
Use optimization techniques to improve runtime performance\n
Learn ways to ensure the UI is responsive at all times\n
Achieve faster client-server communication\n
Use a build system to minify files, and HTTP compression to
deliver them to the browser
'''
book1 = Book(
name="High Performance JavaScript",
author="Nicholas C. Zakas",
category=category1,
desc=desc1,
user=user
)
session.add(book1)
session.commit()
desc2 = '''
No matter how much experience you have with JavaScript,
odds are you don't fully understand the language. As part of
the "You Don't Know JS" series, this concise yet in-depth
guide focuses on new asynchronous features and performance
techniques-including Promises, generators, and Web Workers-that
let you create sophisticated single-page web applications and
escape callback hell in the process.\n
Like other books in this series, You Don't Know JS: Async & Performance
dives into trickier parts of the language that many JavaScript
programmers simply avoid. Armed with this knowledge, you can
become a true JavaScript master.\n
With this book you will:\n
Explore old and new JavaScript methods for handling
asynchronous programming\n
Understand how callbacks let third parties control your
program's execution\n
Address the "inversion of control" issue with JavaScript Promises\n
Use generators to express async flow in a sequential,
synchronous-looking fashion\n
Tackle program-level performance with Web Workers, SIMD, and asm.js\n
Learn valuable resources and techniques for benchmarking and
tuning your expressions and statements
'''
book2 = Book(
name="You Don't Know JS: Async & Performance",
author="Kyle Simpson",
category=category1,
desc=desc2,
user=user
)
session.add(book2)
session.commit()
print "added books!"
|
from translationstring import TranslationStringFactory
_ = TranslationStringFactory('onegov.gazette')
|
#!/usr/local/bin/python
from ROOT import *
import subprocess
import os
import sys
XrayInDir = sys.argv[1]
def placeHRFiles():
for f in os.listdir(XrayInDir + '/000_FPIXTest_p17'):
if 'hr' in f and 'root' in f and 'NoCal' not in f:
hrdig = list(f)[2] + list(f)[3]
hrVal = int(hrdig)/5 + 1
hrDir = '/%03d'%hrVal + '*'
subprocess.call('cp '+ XrayInDir + '/000_FPIXTest_p17/' + f + ' ' + topDir + '/' + hrDir, shell = True)
def writeIniFile():
inTmpFile = open('elComandante.ini.tmp')
outTmpFile = open('elComandante.ini', 'w')
for line in inTmpFile:
if 'insertTestsHere' in line:
line = 'Test = HRData@60MHz/cm2,HRData@130MHz/cm2>{HREfficiency@30MHz/cm2,HREfficiency@60MHz/cm2,HREfficiency@100MHz/cm2,HREfficiency@130MHz/cm2,HREfficiency@170MHz/cm2}' + '\n'
outTmpFile.write(line)
inTmpFile.close()
outTmpFile.close()
subprocess.call('mv elComandante.ini ' + topDir + '/configfiles', shell = True)
module = XrayInDir.split('_')[0]
date = XrayInDir.split('_')[2]
time = XrayInDir.split('_')[3]
number = XrayInDir.split('_')[4]
if '/' in number:
number = int(number.replace("/", ""))
else:
number = int(number)
number += 550
topDir = module + '_XrayQualification_' + date + '_' + time + '_' + str(number)
print 'Creating directory: ' + topDir
print 'tarring directory...'
mainDirList = ['HRData_60','HRData_130','HREfficiency_30', 'HREfficiency_60', 'HREfficiency_100', 'HREfficiency_130', 'HREfficiency_170']
configParamFilePath = XrayInDir + '/000_FPIXTest_p17/configParameters.dat'
testParamFilePath = XrayInDir + '/000_FPIXTest_p17/testParameters.dat'
defaultMaskFilePath = XrayInDir + '/000_FPIXTest_p17/defaultMaskFile.dat'
subprocess.call('mkdir ' + topDir, shell = True)
for i in range (0, len(mainDirList)):
subprocess.call('mkdir ' + topDir + '/%03d'%i + '_' + mainDirList[i], shell = True)
subprocess.call('cp ' + configParamFilePath + ' ' + topDir + '/%03d'%i + '_' + mainDirList[i], shell = True)
subprocess.call('cp ' + testParamFilePath + ' ' + topDir + '/%03d'%i + '_' + mainDirList[i], shell = True)
subprocess.call('cp ' + defaultMaskFilePath + ' ' + topDir + '/%03d'%i + '_' + mainDirList[i], shell = True)
subprocess.call('mkdir ' + topDir + '/configfiles', shell = True)
subprocess.call('mkdir ' + topDir + '/logfiles', shell = True)
subprocess.call('cp ' + XrayInDir + '/000_FPIXTest_p17/dc10*' + ' ' + topDir + '/000_HRData_60/', shell = True)
subprocess.call('cp ' + XrayInDir + '/000_FPIXTest_p17/dc20*' + ' ' + topDir + '/001_HRData_130/', shell = True)
placeHRFiles()
writeIniFile()
subprocess.call('tar -zcvf ' + topDir + '.tar.gz ' + topDir, shell = True)
|
__author__ = 'ryan@barnett.io'
#python makes this efficient with string slicing
def reverse_string(string):
return string[::-1] |
import os
import torch
from models import model_utils
from utils import eval_utils, time_utils
import numpy as np
def get_itervals(args, split):
if split not in ['train', 'val', 'test']:
split = 'test'
args_var = vars(args)
disp_intv = args_var[split+'_disp']
save_intv = args_var[split+'_save']
stop_iters = args_var['max_'+split+'_iter']
return disp_intv, save_intv, stop_iters
def test(args, log, split, loader, model, epoch, recorder):
model.eval()
log.print_write('---- Start %s Epoch %d: %d batches ----' % (split, epoch, len(loader)))
timer = time_utils.Timer(args.time_sync);
disp_intv, save_intv, stop_iters = get_itervals(args, split)
with torch.no_grad():
for i, sample in enumerate(loader):
data = model.parse_data(sample)
pred = model.forward();
timer.update_time('Forward')
loss = model.get_loss_terms()
if loss != None:
recorder.udpate_iter(split, loss.keys(), loss.values())
records, iter_res = model.prepare_records()
recorder.udpate_iter(split, records.keys(), records.values())
iters = i + 1
if iters % disp_intv == 0:
opt = {'split':split, 'epoch':epoch, 'iters':iters, 'batch':len(loader),
'timer':timer, 'recorder': recorder}
log.print_iters_summary(opt)
if iters % save_intv == 0:
visuals = model.prepare_visual()
nrow = min(data['img'].shape[0], 32)
log.save_img_results(visuals, split, epoch, iters, nrow=nrow)
log.plot_curves(recorder, split, epoch=epoch, intv=disp_intv)
if hasattr(args, 'save_detail') and args.save_detail or (split == 'test'):
model.save_visual_detail(log, split, epoch, sample['path'], sample['obj'])
if stop_iters > 0 and iters >= stop_iters: break
opt = {'split': split, 'epoch': epoch, 'recorder': recorder}
log.print_epoch_summary(opt)
|
import json
import numpy as np
import matplotlib.pyplot as plt
from scipy.special import expit as sigmoid
from sklearn.utils import shuffle
from datetime import datetime
from scipy.spatial.distance import cosine as cos_dist
from sklearn.metrics.pairwise import pairwise_distances
from glob import glob
import os
import sys
import string
sys.path.append(os.path.abspath('..'))
from brownCorpus import get_sentences_with_word2idx_limit_vocab as get_brown
def remove_punctuation_2(s):
return s.translate(None, string.punctuation)
def remove_punctuation_3(s):
return s.translate(str.maketrans('','',string.punctuation))
if sys.version.startswith('2'):
remove_punctuation = remove_punctuation_2
else:
remove_punctuation = remove_punctuation_3
# Get the data i.e sentences and word2idx mapings from brown courpus in nltk
def train_model(savedir):
sentences, word2idx = get_brown()
vocab_size = len(word2idx)
# Hyperparameter initialization
window_size = 5
learning_rate = 0.025
final_learning_rate = 0.0001
num_negatives = 5
epochs = 20
D = 50
# Claculating learning rate decay
learning_rate_delta = (learning_rate - final_learning_rate) / epochs
W = np.random.randn(vocab_size, D)
V = np.random.randn(D, vocab_size)
# Probability distribution for drawing negative samples.
p_neg = get_negative_sampling_distribution(sentences, vocab_size)
costs = []
total_words = sum(len(sentence) for sentence in sentences)
print("Total number of words in corpus:", total_words)
# Subsampling each sentence
threshold = 1e-5
p_drop = 1 - np.sqrt(threshold / p_neg)
#Training the model
for epoch in range(epochs):
np.random.shuffle(sentences)
cost = 0
counter = 0
t0 = datetime.now()
for sentence in sentences:
sentence = [w for w in sentence \
if np.random.random() < (1 - p_drop[w])
]
if len(sentence) < 2:
continue
randomly_ordered_positions = np.random.choice(
len(sentence),
size=len(sentence),
replace=False,
)
for pos in randomly_ordered_positions:
word = sentence[pos]
context_words = get_context(pos, sentence, window_size)
neg_word = np.random.choice(vocab_size, p=p_neg)
targets = np.array(context_words)
# Performing Gradient Descent
c = sgd(word, targets, 1, learning_rate, W, V)
cost += c
c = sgd(neg_word, targets, 0, learning_rate, W, V)
cost += c
counter += 1
if counter % 100 == 0:
sys.stdout.write("processed %s / %s\r" % (counter, len(sentences)))
sys.stdout.flush()
dt = datetime.now() - t0
print("epoch complete:", epoch, "cost:", cost, "dt:", dt)
costs.append(cost)
learning_rate -= learning_rate_delta # Updating the learning rate
# Visualization
plt.plot(costs)
plt.show()
# save the model
if not os.path.exists(savedir):
os.mkdir(savedir)
with open('%s/word2idx.json' % savedir, 'w') as f:
json.dump(word2idx, f)
np.savez('%s/weights.npz' % savedir, W, V)
return word2idx, W, V
def get_negative_sampling_distribution(sentences, vocab_size):
word_freq = np.zeros(vocab_size)
word_count = sum(len(sentence) for sentence in sentences)
for sentence in sentences:
for word in sentence:
word_freq[word] += 1
# Smoothening
p_neg = word_freq**0.75
#Normalization
p_neg = p_neg / p_neg.sum()
# assert(np.all(p_neg > 0))
return p_neg
def get_context(pos, sentence, window_size):
start = max(0, pos - window_size)
end_ = min(len(sentence), pos + window_size)
context = []
for ctx_pos, ctx_word_idx in enumerate(sentence[start:end_], start=start):
if ctx_pos != pos:
context.append(ctx_word_idx)
return context
# Gradient Descent function
def sgd(input_, targets, label, learning_rate, W, V):
activation = W[input_].dot(V[:,targets])
prob = sigmoid(activation)
# Calculating the gradients and updating the parameters
gV = np.outer(W[input_], prob - label)
gW = np.sum((prob - label)*V[:,targets], axis=1)
V[:,targets] -= learning_rate*gV
W[input_] -= learning_rate*gW
cost = label * np.log(prob + 1e-10) + (1 - label) * np.log(1 - prob + 1e-10)
return cost.sum()
def load_model(savedir):
with open('%s/word2idx.json' % savedir) as f:
word2idx = json.load(f)
npz = np.load('%s/weights.npz' % savedir)
W = npz['arr_0']
V = npz['arr_1']
return word2idx, W, V
def analogy(pos1, neg1, pos2, neg2, word2idx, idx2word, W):
V, D = W.shape
print("testing: %s - %s = %s - %s" % (pos1, neg1, pos2, neg2))
for w in (pos1, neg1, pos2, neg2):
if w not in word2idx:
print("Sorry, %s not in word2idx" % w)
return
p1 = W[word2idx[pos1]]
n1 = W[word2idx[neg1]]
p2 = W[word2idx[pos2]]
n2 = W[word2idx[neg2]]
vec = p1 - n1 + n2
distances = pairwise_distances(vec.reshape(1, D), W, metric='cosine').reshape(V)
idx = distances.argsort()[:10]
best_idx = -1
keep_out = [word2idx[w] for w in (pos1, neg1, neg2)]
for i in idx:
if i not in keep_out:
best_idx = i
break
print("got: %s - %s = %s - %s" % (pos1, neg1, idx2word[best_idx], neg2))
print("closest 10:")
for i in idx:
print(idx2word[i], distances[i])
print("dist to %s:" % pos2, cos_dist(p2, vec))
def test_model(word2idx, W, V):
idx2word = {i:w for w, i in word2idx.items()}
for We in (W, (W + V.T) / 2):
print("**********")
analogy('walk', 'walking', 'swim', 'swimming', word2idx, idx2word, We)
analogy('france', 'paris', 'japan', 'tokyo', word2idx, idx2word, We)
analogy('france', 'paris', 'china', 'beijing', word2idx, idx2word, We)
analogy('february', 'january', 'december', 'november', word2idx, idx2word, We)
analogy('france', 'paris', 'germany', 'berlin', word2idx, idx2word, We)
analogy('week', 'day', 'year', 'month', word2idx, idx2word, We)
analogy('week', 'day', 'hour', 'minute', word2idx, idx2word, We)
analogy('king', 'man', 'queen', 'woman', word2idx, idx2word, We)
analogy('king', 'prince', 'queen', 'princess', word2idx, idx2word, We)
analogy('miami', 'florida', 'dallas', 'texas', word2idx, idx2word, We)
analogy('einstein', 'scientist', 'picasso', 'painter', word2idx, idx2word, We)
analogy('japan', 'sushi', 'germany', 'bratwurst', word2idx, idx2word, We)
analogy('man', 'woman', 'he', 'she', word2idx, idx2word, We)
analogy('man', 'woman', 'uncle', 'aunt', word2idx, idx2word, We)
analogy('man', 'woman', 'brother', 'sister', word2idx, idx2word, We)
analogy('paris', 'france', 'rome', 'italy', word2idx, idx2word, We)
analogy('france', 'french', 'england', 'english', word2idx, idx2word, We)
analogy('japan', 'japanese', 'china', 'chinese', word2idx, idx2word, We)
analogy('china', 'chinese', 'america', 'american', word2idx, idx2word, We)
analogy('man', 'woman', 'husband', 'wife', word2idx, idx2word, We)
analogy('man', 'woman', 'actor', 'actress', word2idx, idx2word, We)
analogy('man', 'woman', 'father', 'mother', word2idx, idx2word, We)
analogy('heir', 'heiress', 'prince', 'princess', word2idx, idx2word, We)
analogy('nephew', 'niece', 'uncle', 'aunt', word2idx, idx2word, We)
analogy('france', 'paris', 'italy', 'rome', word2idx, idx2word, We)
analogy('japan', 'japanese', 'italy', 'italian', word2idx, idx2word, We)
analogy('japan', 'japanese', 'australia', 'australian', word2idx, idx2word, We)
if __name__ == '__main__':
word2idx, W, V = train_model('w2v_model')
test_model(word2idx, W, V)
|
from onegov.user.auth.core import Auth
__all__ = ('Auth', )
|
from adapters.dimmable_bulb_adapter import DimmableBulbAdapter
immax_adapters = {
'IM-Z3.0-DIM': DimmableBulbAdapter, # Immax LED E14/230V C35 5W TB 440LM ZIGBEE DIM
} |
import lxml.etree as ET
from lxml.builder import E as B
def buildPerson(personid ,firstname, lastname, title, address, street, zipcode, city, country, day, month, year):
root = B.person(
B.firstname(firstname),
B.lastname(lastname),
B.title(title),
B.address(
B.street(street),
B.zip(zipcode),
B.city(city),
B.country(country)
),
B.birthday(
B.day(day),
B.month(month),
B.year(year)
),
id=personid
)
return root
def buildPersonList(numberOfPersons):
personList = B.personlist()
for i in xrange(numberOfPersons):
arglist = []
arglist.append(str(i))
for j in xrange(11):
arglist.append(str(j))
personList.append(buildPerson(*arglist))
return personList
if __name__ == '__main__':
root = buildPersonList(2)
print(ET.tostring(root, pretty_print=True)) |
from stnu import NamedStnu
from fast_dc import DcTester
def main():
network = NamedStnu()
network.read_from_stdin()
dc_tester = DcTester(network)
print 'dc' if dc_tester.is_dynamically_controllable() else 'notdc'
if __name__ == '__main__':
main() |
# -*- coding: utf-8 -*-
# client.py
# 要测试这个服务器程序,我们还需要编写一个客户端程序:
# 导入socket库:
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 建立连接:
s.connect(('127.0.0.1', 7777))
# 接收欢迎消息:
print(s.recv(1024).decode('utf-8'))
for data in [b'shisan', b'luckypot', b'lakiGuo']:
# 发送数据:
s.send(data)
print(s.recv(1024).decode('utf-8'))
s.send(b'exit')
s.close()
|
import string
import random
from django.db import models
from .enum import SexEnum, NationalityEnum
class School(models.Model):
name = models.CharField(max_length=20)
maximum_student = models.PositiveIntegerField(default=1000)
def __str__(self):
return self.name
class StudentQuerySet(models.QuerySet):
def create(self, **kwargs):
if not ('id' in kwargs and kwargs['id']):
kwargs['id'] = ''.join([random.choice(string.ascii_letters + string.digits) for n in range(20)])
return super(StudentQuerySet, self).create(**kwargs)
class __StudentManager(models.Manager):
pass
StudentManager = __StudentManager.from_queryset(StudentQuerySet)
class Student(models.Model):
id = models.CharField(primary_key=True, max_length=20)
first_name = models.CharField(max_length=20)
last_name = models.CharField(max_length=20)
sex = models.CharField(max_length=1, choices=SexEnum.choices())
nationality = models.CharField(max_length=20, choices=NationalityEnum.choices(), default=NationalityEnum.THAI)
school = models.ForeignKey(School, on_delete=models.CASCADE)
objects = StudentManager()
def save(self, **kwargs):
if not self.id:
self.id = ''.join([random.choice(string.ascii_letters + string.digits) for n in range(20)])
super(Student, self).save(**kwargs)
def __str__(self):
return "{} : {} {}".format(self.id, self.first_name, self.last_name)
|
import requests
from allauth.socialaccount import providers
from allauth.socialaccount.providers.oauth2.views import (OAuth2Adapter,
OAuth2LoginView,
OAuth2CallbackView)
from .provider import SchedulePicturesOAuth2Provider
class SchedulePicturesOAuth2Adapter(OAuth2Adapter):
#schedulepictures_url = "http://192.168.1.17:3600"
schedulepictures_url = "https://www.schedulepictures.com"
provider_id = SchedulePicturesOAuth2Provider.id
access_token_url = schedulepictures_url + '/oauth/token/'
authorize_url = schedulepictures_url + '/oauth/authorize'
profile_url = schedulepictures_url + '/users/~'
supports_state = False
def complete_login(self, request, app, token, **kwargs):
extra_data = self.get_user_info(token)
return self.get_provider().sociallogin_from_response(request, extra_data)
def get_user_info(self, token):
fields = providers.registry \
.by_id(SchedulePicturesOAuth2Provider.id) \
.get_profile_fields()
#url = self.profile_url + ':(%s)?format=json' % ','.join(fields)
url = self.profile_url + '/?format=json'
headers = {"Authorization": "bearer " + token.token}
resp = requests.get(url, headers=headers)
return resp.json()
oauth2_login = OAuth2LoginView.adapter_view(SchedulePicturesOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(SchedulePicturesOAuth2Adapter)
|
import math
num_rows=int(input("enter the no of rows:"))
if num_rows%2 ==0:
print('enter a valid odd number')
else:
k=1
num = num_rows/2
upper = math.ceil(num)
for rows in range(0,upper):
for coloumn in range(k):
print("*", end=" ")
k=k+2
print()
k=k-4
for rows in range(0,upper):
for coloumn in range(k):
print("*",end=" ")
k=k-2
print()
|
class Solution(object):
def reverseList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if not head or not head.next:
return head
pCur = head
pRev = None
while pCur:
pTemp = pCur
pCur = pCur.next
pTemp.next = pRev
pRev = pTemp
return pRev |
from django.forms import ModelForm
import pytest
from ..schema_fields import BooleanField, CharField, ObjectField
from .mock_app.models import RecordShop
@pytest.fixture
def scooby_doo():
return {'name': 'Scooby Doo', 'breed': 'Daschund'}
@pytest.fixture
def snoopy():
return {'name': 'Snoopy', 'breed': 'Beagle'}
@pytest.fixture
def shaggy(scooby_doo):
return {'name': 'Shaggy', 'favourite_dog': scooby_doo}
@pytest.fixture
def nemo():
return {'name': 'Nemo', 'salt_water': True}
@pytest.fixture
def dog_field():
class DogField(ObjectField):
name = CharField(required=True)
breed = CharField()
@property
def short_name(self):
return self.name[:3]
return DogField
@pytest.fixture
def dog_schema():
return {
'type': 'object',
'title': 'Dog',
'properties': {
'name': {
'title': 'Name',
'type': 'string',
'format': 'text',
'minLength': 1
},
'breed': {
'title': 'Breed',
'type': 'string',
'format': 'text'
}
},
'required': ['name']
}
@pytest.fixture
def typed_dog_schema(dog_schema):
schema_name = 'dog'
return {
'type': 'object',
'title': 'Dog',
'properties': {
'schemaName': {
'title': 'Schema Name',
'const': schema_name,
'type': 'string',
'default': schema_name,
'template': schema_name
},
'data': dog_schema
},
"defaultProperties": ["data", "schemaName"],
"required": ['data', 'schemaName']
}
@pytest.fixture
def person_field(dog_field):
class PersonField(ObjectField):
name = CharField(required=True)
favourite_dog = dog_field()
favourite_colour = CharField(
choices=(
('red', 'Red'),
('green', 'Green'),
('blue', 'Blue')
)
)
return PersonField
@pytest.fixture
def person_schema(dog_schema):
dog_schema['title'] = 'Favourite Dog'
return {
'type': 'object',
'title': 'Person',
'properties': {
'name': {
'title': 'Name',
'type': 'string',
'format': 'text',
'minLength': 1
},
'favourite_colour': {
'title': 'Favourite Colour',
'type': 'string',
'format': 'text',
'enum': ['red', 'blue', 'green']
},
'favourite_dog': dog_schema
},
'required': ['name']
}
@pytest.fixture
def person_editor_schema(person_schema):
person_schema['properties']['favourite_colour'] = {
'title': 'Favourite Colour',
'type': 'string',
'format': 'text',
'enumSource': [
{
'source': [
{'value': 'red', 'title': 'Red'},
{'value': 'green', 'title': 'Green'},
{'value': 'blue', 'title': 'Blue'},
],
'title': '{{item.title}}',
'value': '{{item.value}}'
}
]
}
return person_schema
@pytest.fixture
def fish_field():
class FishField(ObjectField):
name = CharField(required=True)
salt_water = BooleanField(default=False)
ocean = CharField(
choices=[('pacific', 'Pacific'), ('atlantic', 'Atlantic')]
)
return FishField
@pytest.fixture
def fish_schema():
return {
'type': 'object',
'title': 'Fish',
'properties': {
'name': {
'title': 'Name',
'type': 'string',
'format': 'text',
'minLength': 1
},
'salt_water': {
'title': 'Salt Water',
'type': 'boolean',
'format': 'checkbox',
'default': False
},
'ocean': {
'title': 'Ocean',
'type': 'string',
'format': 'text',
'enum': ['pacific', 'atlantic']
}
},
'required': ['name']
}
@pytest.fixture
def fish_editor_schema(fish_schema):
fish_schema['properties']['ocean'] = {
'title': 'Ocean',
'type': 'string',
'format': 'text',
'enumSource': [
{
'source': [
{'value': 'pacific', 'title': 'Pacific'},
{'value': 'atlantic', 'title': 'Atlantic'},
],
'title': '{{item.title}}',
'value': '{{item.value}}'
}
]
}
return fish_schema
@pytest.fixture
def typed_fish_schema(fish_schema):
schema_name = 'fish'
return {
'type': 'object',
'title': 'Fish',
'properties': {
'schemaName': {
'title': 'Schema Name',
'const': schema_name,
'type': 'string',
'default': schema_name,
'template': schema_name
},
'data': fish_schema
},
"defaultProperties": ["data", "schemaName"],
"required": ['data', 'schemaName']
}
@pytest.fixture
def typed_fish_editor_schema(typed_fish_schema, fish_editor_schema):
typed_fish_schema['properties']['data'] = fish_editor_schema
return typed_fish_schema
@pytest.fixture
def generic_animal_field():
class GenericAnimalField(ObjectField):
name = CharField(required=True)
class Meta:
abstract = True
@property
def loud_name(self):
return self.name.upper()
return GenericAnimalField
@pytest.fixture
def parrot_field(generic_animal_field):
"""
A parrot field subclasses generic animal field
"""
class ParrotField(generic_animal_field):
talks = BooleanField(required=True)
class Meta:
schema_name = "squawker"
return ParrotField
@pytest.fixture
def parrot_schema():
return {
'type': 'object',
'title': 'Squawker',
'properties': {
'name': {
'title': 'Name',
'type': 'string',
'format': 'text',
'minLength': 1
},
'talks': {
'title': 'Talks',
'type': 'boolean',
'format': 'checkbox'
}
},
'required': ['name', 'talks']
}
@pytest.fixture
def typed_parrot_schema(parrot_schema):
schema_name = 'squawker'
return {
'type': 'object',
'title': 'Squawker',
'properties': {
'schemaName': {
'title': 'Schema Name',
'const': schema_name,
'type': 'string',
'default': schema_name,
'template': schema_name
},
'data': parrot_schema
},
"defaultProperties": ["data", "schemaName"],
"required": ['data', 'schemaName']
}
@pytest.fixture
def record_shop_form_class():
class RecordShopForm(ModelForm):
class Meta:
model = RecordShop
fields = ['name', 'catalog']
return RecordShopForm
|
'''
Given the root node of a binary search tree (BST) and a value. You need to find the node in the BST that the node's value equals the given value. Return the subtree rooted with that node. If such node doesn't exist, you should return NULL.
For example,
Given the tree:
4
/ \
2 7
/ \
1 3
And the value to search: 2
You should return this subtree:
2
/ \
1 3
In the example above, if we want to search the value 5, since there is no node with value 5, we should return NULL.
'''
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
# Reminder that a binary search tree is one where all values in the left subtree are less than the value of the current node,
# and all values in the right subtree are greater.
def searchBST(root,val):
'''
root: TreeNode object, root of the binary search tree
val: integer, the value we are looking for
output: return the node whose value is val. If no such node exits, return None
'''
stack = []
node = root
while node or stack:
# Traverse down the left side of the tree as much as possible, checking
# each node against the target value. Eventually, we reach a leaf and set
# node to None
while node:
if node.val == val:
return node
stack.append(node)
node = node.left
# At this point, we went past a leaf to None, so we step back to the leaf
node = stack.pop()
# Take a step to the right and then go back to the while loop to check nodes
# to the left as much as possible
node = node.right
# If we have survived to this point, it means we have traversed the whole tree
# and not found a node with the target value, so we return None
return None
## Here's a recursive implementation!
def searchBSTrecursive(root,val):
'''
root: TreeNode object, root of the binary search tree
val: integer, the value we are looking for
output: return the node whose value is val. If no such node exits, return None
'''
# if we have gone to a None node, then we have traversed a subtree that was supposed
# to contain the target value but failed to find it
if root is None:
return None
# if this node has the target value, return this node
if root.val == val:
return root
# if this node's value is greater than the target value, then
# the target value is either in the left subtree or it doesn't exist
if root.val > val:
return searchBSTrecursive(root.left,val)
# if this node's value is less than target, then the target value must be
# in the right subtree or doesn't exist
else:
return searchBSTrecursive(root.right,val)
|
from PIL import Image
import requests
from io import BytesIO
# Some sample token. Instead replace with the token returned by authentication endpoint
JWT_TOKEN = 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ0ZXN0IiwiaWFkIjoxLCJhY3AiOm51bGwsInRicCI6bnVsbCwiaWF0IjoxNTg4MTgzMjg5LCJleHAiOjE1ODgxODY4ODl9.PK1gZswB1_dzV13iZ6YCyOES4bCKCh00vvnho8gSLjM'
# Change to the user (not operator) ID who is managing the cart
USER_ID = 4
def make_read_request(what_to_select, table, conditions):
# Web API host URL (change if needed)
# BASE_HOST_URL = 'http://127.0.0.1:8000'
BASE_HOST_URL = 'http://www.appspesa.it/api'
# No need to change this
ENDPOINT_ROUTE = '/v1/query/read'
# No need to change this
# Authorization header information
headers = {"Authorization": "Bearer " + JWT_TOKEN}
# No need to change this
# Query parameters to pass to request
params = {
'what_to_select': what_to_select,
'which_table': table,
'conditions_to_satisfy': conditions
}
# Make request and return the response
return requests.get(BASE_HOST_URL + ENDPOINT_ROUTE, headers=headers, params=params)
# Get all cart notes for carts that were confirmed orders for the user
r = make_read_request('cart_note', 'cart', 'User_ID = ' + str(USER_ID) + ' AND Confirmed = 1')
# If the request was successful
if r.status_code == 200:
content = r.json()
# Flatten list of cart note messages
list_of_messages = [item for sublist in content['rows'] for item in sublist]
# Print list of messages for user
print(list_of_messages)
|
import CAL
'''def main():
# my code here
#menu
print "menu (please select an option)"
print "1-test gui"
print "2-quit"
num = raw_input('enter a number: ')
if num == '1':
gui = GUI.GUI()
gui.run_GUI()
print "im in 1"
elif num == '2':
print "im in 2"
quit()
if __name__ == "__main__":
main()
'''
myCAL = CAL.CAL()
myCAL.Create_board()
myCAL.Run_board()
myCAL.End_Game()
|
from bert_serving.client import BertClient
import numpy as np
with open('wordList.txt', 'r', encoding='utf-8') as f:
words = []
for line in f.readlines():
line = line.strip('\n') # 去掉换行符\n
b = line.split(' ') # 将每一行以空格为分隔符转换成列表
def not_empty(s):
return s and s.strip()
c = filter(not_empty, b)
words.extend(c)
words = set(words) # 所有不重复词的集合
word_list = []
count = 0
for item in enumerate(words):
# print(item)
count += 1
word_list.append(item[1])
print(str(count))
word_idx = dict((c, k + 1) for k, c in enumerate(words)) # 每个词及词的位置
word_idx_rev = dict((k + 1, c) for k, c in enumerate(words)) # 每个词及词的位置
# Save
fileName1 ="./BERT_RES/depression/word_idx.csv"
##保存文件
with open(fileName1, 'w') as f:
[f.write('{0},{1}\n'.format(key, value)) for key, value in word_idx.items()]
fileName2 ="./BERT_RES/depression/word_idx_rev.csv"
##保存文件
with open(fileName2, 'w') as f:
[f.write('{0},{1}\n'.format(key, value)) for key, value in word_idx_rev.items()]
bc = BertClient()
embedding = bc.encode(word_list)
print(embedding.shape)
np.save('./BERT_RES/depression/word_embedding.npy', embedding)
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tests import tagged, common
from odoo.tools.misc import formatLang
import time
from odoo import fields
from odoo.addons.account.tests.account_test_no_chart import TestAccountNoChartCommon
from odoo.addons.account_reports.tests.common import TestAccountReportsCommon
from dateutil.relativedelta import relativedelta
@tagged('post_install', '-at_install')
class TestAccountFollowup(TestAccountNoChartCommon):
@classmethod
def setUpClass(cls):
super().setUpClass()
super().setUpAdditionalAccounts()
super().setUpAccountJournal()
mock_date = time.strftime('%Y') + '-06-26'
cls.minimal_options = {
'date': {
'date_from': mock_date,
'date_to': mock_date,
},
}
def test_05_followup_multicompany(self):
date_sale = fields.Date.today()
# Company 0
invoice_move = self.env['account.move'].with_context(default_type='out_invoice').create({
'partner_id': self.partner_customer_usd.id,
'date': date_sale,
'journal_id': self.journal_sale.id,
'invoice_line_ids': [
(0, 0, {'quantity': 1, 'price_unit': 30}),
],
})
# Company 1
company1 = self.env['res.company'].create({'name': 'company1'})
account_sale1 = self.account_revenue.copy({'company_id': company1.id})
account_rec1 = self.account_receivable.copy({'company_id': company1.id})
sale_journal1 = self.journal_sale.copy({
'company_id': company1.id,
'default_debit_account_id': account_sale1.id,
'default_credit_account_id': account_sale1.id,
})
self.partner_customer_usd.with_context(force_company=company1.id).property_account_receivable_id = account_rec1
invoice_move1 = self.env['account.move'].with_context(default_type='out_invoice').create({
'partner_id': self.partner_customer_usd.id,
'date': date_sale,
'journal_id': sale_journal1.id,
'invoice_line_ids': [
(0, 0, {'quantity': 1, 'price_unit': 60}),
],
})
invoice_move.post()
invoice_move1.post()
# For company 0
main_company = self.env.ref('base.main_company')
currency = main_company.currency_id
self.assertEqual(self.partner_customer_usd.credit, 30.0)
options = dict(self.minimal_options)
options['partner_id'] = self.partner_customer_usd.id
lines = self.env['account.followup.report']._get_lines(options)
# Title line + actual business line
self.assertEqual(len(lines), 2)
self.assertEqual(lines[1]['class'], 'total')
self.assertEqual(len(lines[1]['columns']), 7)
self.assertEqual(lines[1]['columns'][5]['name'], 'Total Due')
self.assertEqual(lines[1]['columns'][6]['name'], formatLang(self.env, 30.00, currency_obj=currency))
# For company 1
currency = company1.currency_id
self.assertEqual(self.partner_customer_usd.with_context(allowed_company_ids=company1.ids).credit, 60.0)
lines = self.env['account.followup.report'].with_context(allowed_company_ids=company1.ids)._get_lines(options)
# Title line + actual business line
self.assertEqual(len(lines), 2)
self.assertEqual(lines[1]['class'], 'total')
self.assertEqual(len(lines[1]['columns']), 7)
self.assertEqual(lines[1]['columns'][5]['name'], 'Total Due')
self.assertEqual(lines[1]['columns'][6]['name'], formatLang(self.env, 60.00, currency_obj=currency))
def test_followup_mail_attachments(self):
'''Test that join_invoices options is working: sending attachment from multiple invoices'''
test_followup_level = self.env['account_followup.followup.line'].create({
'name': 'test_followup_level',
'delay': 4,
'description': 'Test Followup Level',
'send_email': True,
'print_letter': False,
'join_invoices': True,
})
test_partner = self.env['res.partner'].create({
'name': 'Pinco Pallino',
'email': 'test@example.com',
})
test_partner.property_account_receivable_id = self.account_receivable
today = fields.Date.today()
# generating invoices
invoices = self.env['account.move'].create([
{
'partner_id': test_partner.id,
'invoice_date': today + relativedelta(days=-10),
'type': 'out_invoice',
'invoice_line_ids': [(0, 0, {'quantity': 1, 'price_unit': 40})],
},
{
'partner_id': test_partner.id,
'invoice_date': today + relativedelta(days=-11),
'type': 'out_invoice',
'invoice_line_ids': [(0, 0, {'quantity': 2, 'price_unit': 40})],
},
])
invoices.post()
some_attachments = self.env['ir.attachment']
# creating and linking attachment with invoices
for inv in invoices:
att_id = self.env['ir.attachment'].create({
'name': 'some_attachment.pdf',
'res_id': inv.id,
'res_model': 'account.move',
'datas': 'test',
'type': 'binary',
})
some_attachments += att_id
inv._message_set_main_attachment_id([(4, att_id.id)])
# triggering followup report notice
test_partner._compute_unpaid_invoices()
options = dict(self.minimal_options)
options['partner_id'] = test_partner.id
# sending email with attachments
self.env['account.followup.report'].send_email(options)
# retrieving attachments from the last sent mail
sent_attachments = self.env['mail.message'].search([('partner_ids', '=', test_partner.id)]).attachment_ids
self.assertEqual(some_attachments, sent_attachments)
def test_followup_level_and_status(self):
self.env['account_followup.followup.line'].search([]).unlink()
(first_followup_level, second_followup_level) = self.env['account_followup.followup.line'].create([
{
'name': 'first_followup_level',
'delay': 15,
'description': 'First Followup Level',
'send_email': False,
'print_letter': False,
},
{
'name': 'second_followup_level',
'delay': 25,
'description': 'Second Followup Level',
'send_email': False,
'print_letter': False,
},
])
test_partner = self.env['res.partner'].create({
'name': 'Mr Bluesky',
})
test_partner.property_account_receivable_id = self.account_receivable
today = fields.Date.today()
tomorrow = today + relativedelta(days=1)
ten_days_ago = today + relativedelta(days=-10)
forty_days_ago = today + relativedelta(days=-40)
self.assertNotIn(test_partner.id, test_partner._query_followup_level())
today_invoice = self.env['account.move'].create({
'partner_id': test_partner.id,
'invoice_date': tomorrow,
'type': 'out_invoice',
'invoice_line_ids': [(0, 0, {'quantity': 1, 'price_unit': 40})]
})
today_invoice.post()
# only a recent invoice, nothing to do
self.assertEqual(test_partner._query_followup_level()[test_partner.id]['followup_level'], None)
self.assertEqual(test_partner._query_followup_level()[test_partner.id]['followup_status'], 'no_action_needed')
ten_days_ago_invoice = self.env['account.move'].create({
'partner_id': test_partner.id,
'invoice_date': ten_days_ago,
'type': 'out_invoice',
'invoice_line_ids': [(0, 0, {'quantity': 1, 'price_unit': 30})]
})
ten_days_ago_invoice.post()
# there is an overdue invoice, but it is not taken in the delay
self.assertEqual(test_partner._query_followup_level()[test_partner.id]['followup_level'], None)
self.assertEqual(test_partner._query_followup_level()[test_partner.id]['followup_status'], 'with_overdue_invoices')
forty_days_ago_invoice = self.env['account.move'].create({
'partner_id': test_partner.id,
'invoice_date': forty_days_ago,
'type': 'out_invoice',
'invoice_line_ids': [(0, 0, {'quantity': 1, 'price_unit': 20})]
})
forty_days_ago_invoice.post()
# the last invoice was due for longer than the delay
self.assertEqual(test_partner._query_followup_level()[test_partner.id]['followup_level'], first_followup_level.id)
self.assertEqual(test_partner._query_followup_level()[test_partner.id]['followup_status'], 'in_need_of_action')
# execute followup needed
test_partner._execute_followup_partner()
# no action needed because the date for next followup is in the future
self.assertEqual(test_partner.payment_next_action_date, today + relativedelta(days=10))
self.assertEqual(test_partner._query_followup_level()[test_partner.id]['followup_level'], second_followup_level.id)
self.assertEqual(test_partner._query_followup_level()[test_partner.id]['followup_status'], 'with_overdue_invoices')
# no action needed because followup of level 1 has already been done for all the lines
test_partner.payment_next_action_date = today + relativedelta(days=-1)
self.assertEqual(test_partner._query_followup_level()[test_partner.id]['followup_level'], second_followup_level.id)
self.assertEqual(test_partner._query_followup_level()[test_partner.id]['followup_status'], 'in_need_of_action')
# execute followup needed
test_partner._execute_followup_partner()
# stay on level 2, but the date should be set to later
self.assertEqual(test_partner._query_followup_level()[test_partner.id]['followup_level'], second_followup_level.id)
self.assertEqual(test_partner._query_followup_level()[test_partner.id]['followup_status'], 'with_overdue_invoices')
# register a payment for the older invoice
self.env['account.payment.register'].with_context(active_model='account.move', active_ids=forty_days_ago_invoice.ids).create({
'payment_date': today,
'journal_id': self.journal_sale.id,
}).create_payments()
# nothing more to see as the first invoice was earlier than the delay
self.assertEqual(test_partner._query_followup_level()[test_partner.id]['followup_level'], None)
self.assertEqual(test_partner._query_followup_level()[test_partner.id]['followup_status'], 'with_overdue_invoices')
@tagged('post_install', '-at_install')
class TestAccountFollowupReports(TestAccountReportsCommon):
@classmethod
def setUpClass(cls):
super(TestAccountFollowupReports, cls).setUpClass()
cls.env['account_followup.followup.line'].search([]).unlink()
cls.first_followup_level = cls.env['account_followup.followup.line'].create({
'name': 'first_followup_level',
'delay': 10,
'description': 'First Followup Level',
})
cls.second_followup_level = cls.env['account_followup.followup.line'].create({
'name': 'second_followup_level',
'delay': 20,
'description': 'Second Followup Level',
})
cls.partner_a.write({
'email': 'partner_a@mypartners.xyz',
})
def test_followup_report_initial_state(self):
''' Test folded/unfolded lines. '''
# Init options.
report = self.env['account.followup.report']
options = report._get_options(None)
options['partner_id'] = self.partner_a.id
report = report.with_context(report._set_context(options))
self.assertLinesValues(
report._get_lines(options),
# Name Date, Due Date, Doc. Comm. Exp. Date Blocked Total Due
[ 0, 1, 2, 3, 4, 5, 6, 7],
[
('INV/2017/0001', '01/01/2017', '01/01/2017', '', 'INV/2017/0001', '', '', 115.00),
('INV/2016/0001', '12/01/2016', '12/01/2016', '', 'INV/2016/0001', '', '', 780.00),
('', '', '', '', '', '', 'Total Due', 895.00),
('', '', '', '', '', '', 'Total Overdue', 895.00),
],
)
|
import tensorflow as tf
import util.TensorflowUtils as tu
from util.dataLoader import loadNormalData
from util.Util import Normalize, DummyCM
def train(loss_val, var_list, lr, max_grad):
optimizer = tf.train.AdamOptimizer(lr, beta1=0.9)
grads, _ = tf.clip_by_global_norm(tf.gradients(loss_val, var_list), max_grad)
return optimizer.apply_gradients(zip(grads, var_list))
class FCLayers(object):
def __init__(self, input_dimension, layers, layer_dimension, output_dimension):
self.wList = []
self.bList = []
self.layers = layers
iDimension = input_dimension
for i in range(layers):
oDimension = layer_dimension
if (i == layers - 1):
oDimension = output_dimension
w = tf.get_variable("fc_w_%d"%i, [iDimension, oDimension], dtype=tf.float32)
b = tf.get_variable("fc_b_%d"%i, [oDimension], dtype=tf.float32)
self.wList.append(w)
self.bList.append(b)
iDimension = oDimension
def apply(self, inputs):
for i in range(self.layers):
inputs = tf.matmul(inputs, self.wList[i]) + self.bList[i]
if (i < self.layers - 1):
inputs = tu.leaky_relu(inputs, 0.1, None)
return inputs
class SingleLayer(object):
IS_LAST = False
def __init__(self, x_dimension, y_dimension, name):
self.w = tf.get_variable("sl_w_%s"%name, [x_dimension, y_dimension], dtype=tf.float32)
self.b = tf.get_variable("sl_b_%s"%name, [y_dimension], dtype=tf.float32)
def apply(self, inputs):
result = tf.matmul(inputs, self.w) + self.b
if (self.IS_LAST == False):
result = tu.leaky_relu(result, 0.1, None)
return result
class RNNConfig(object):
X_DIMENSION = -1
Y_DIMENSION = -1
G_DIMENSION = -1
E_DIMENSION = -1
BALL_DIMENSION = 0
BALL_RADIUS = 13
ROOT_DIMENSION = 5
RNN_SIZE = 512
NUM_OF_LAYERS = 4
TRAIN_STEP_SIZE = 48
TRAIN_BATCH_SIZE = 30
TRAIN_EPOCH_ITER = 4
LAYER_KEEP_PROB = 1
INPUT_KEEP_PROB = 1
STATE_DIMENSION = 512
IS_STATE_MODEL = False
E_LAYERS = 3
MAX_GRAD_NORM = 1
root_weight = 1
pose_weight = 1
pose_joint_weights = None
NO_INPUT = False
POSE_CONTAINED_AS_INPUT = False
USE_RESIDUAL_MODEL = False
additional_joint = -1
additional_weight = 0
foot_weight = 6
foot_slide_weight = 6
ball_weight = 2;
ball_contact_weight = 1
ball_hand_weight = 0.01
ball_gravity_weight = 1
ball_cond_weight = 1
joint_len_weight = 0.01
ball_height_weight = 0.1
ball_height_normal = 1/30.0
ball_velocity_normal = 1/10.0
ball_occasionality_normal = 48.0
foot_contact_use_y = False
ball_contact_use_y = False
use_ball_have = False
use_input_layer = False
activation_weight = -1
activation_index = -1
quality_param_index = -1
forget_bias = 0.8
x_normal = None
y_normal = None
train_as_input = False
use_U_net = False
label = "RNNConfig"
jointPairs = [[ 17, 10 ],[ 10, 2 ],[ 7, 9 ],[ 9, 1 ],[ 18, 12 ],[ 12, 5 ],[ 8, 11 ],[ 11, 4 ]]
jointLengths = [39.9035,39.6689,28.5802,27.092,39.9035,39.6689,28.5802,27.092]
# jointPairs = [[ 17, 10 ],[ 10, 2 ],[ 2, 3 ],[ 7, 9 ],[ 9, 1 ],[ 1, 14 ],[ 18, 12 ],[ 12, 5 ],[ 5, 6 ],[ 8, 11 ],[ 11, 4 ],[ 4, 15 ]]
# jointLengths = [39.9035,39.6689,8.38147,28.5802,27.092,9.5622,39.9035,39.6689,8.38147,28.5802,27.092,9.5622,19.9754,20.7666]
def __init__(self):
pass
def model(self, batchSize, stepSize, lr=0.0001):
return RNNModel(self, batchSize, stepSize, lr)
def rnn_cell(self):
cell = tf.contrib.rnn.BasicLSTMCell(self.RNN_SIZE, forget_bias=self.forget_bias)
return cell
def scope(self):
if (self.label == None):
return DummyCM()
else:
return tf.variable_scope(self.label)
def scope_name(self, label):
if (self.label == None):
return label
else:
return "%s/%s"%(self.label, label)
def drop_out(self, input_data, keep_prob):
if (keep_prob == 1):
return input_data
else:
return tf.nn.dropout(input_data, keep_prob)
def error(self, x, prev_y, y, generated):
motion_prev = prev_y
motion_y = y
motion_g = generated
loss_root, loss_pose, loss_ball = self.motion_mse_loss(x, motion_y, motion_g)
loss_root = loss_root*self.root_weight
loss_pose = loss_pose*self.pose_weight
zero = tf.constant(0, dtype=tf.float32)
loss_addi = zero
if (self.additional_joint >= 0 and self.additional_weight > 0):
g = motion_g[:,:,self.additional_joint]
v = motion_y[:,:,self.additional_joint]
loss_addi = tf.reduce_mean(tf.square(g - v))*self.additional_weight
if (self.foot_slide_weight > 0 or self.BALL_DIMENSION > 0):
motion_prev = self.y_normal.de_normalize(motion_prev)
motion_g = self.y_normal.de_normalize(motion_g)
motion_y = self.y_normal.de_normalize(motion_y)
if (self.foot_slide_weight == 0):
loss_foot = zero
else:
loss_foot = self.foot_loss(motion_g, motion_prev, motion_y)*self.foot_slide_weight
if (self.joint_len_weight > 0):
loss_joint = self.joint_len_loss(motion_g) * self.joint_len_weight
else:
loss_joint = zero
if (self.BALL_DIMENSION > 0):
l_height, l_cond, l_hand, l_sign = self.ball_loss(motion_g, motion_prev, motion_y)
else:
l_height = l_cond = l_hand = l_sign = zero
loss = loss_root + loss_pose + loss_foot + loss_joint + loss_addi + loss_ball + l_height + l_cond + l_hand + l_sign
return loss, [loss_root, loss_pose, loss_foot, loss_joint, loss_addi, loss_ball, l_height, l_cond, l_hand, l_sign]
def estimation_error(self, x, esitmated):
# x : pre-acti, acti, action=3, goal pos=2, goal ori=2, normalized goal pos=2, remain time => 12
# estimated : estimated time, estimated pre-activation
loss_time = tf.reduce_mean(tf.square(x[:,:,11] - esitmated[:,:,0]))
loss_pa = tf.reduce_mean(tf.square(x[:,:,0] - esitmated[:,:,1]))
return loss_time + loss_pa, [loss_time, loss_pa]
def estimation_time_error(self, x, esitmated):
# x : acti, action=3, goal pos=2, goal ori=2, normalized goal pos=2, remain time => 11
# estimated : estimated time
loss_time = tf.reduce_mean(tf.square(x[:,:,10] - esitmated[:,:,0]))
return loss_time, [loss_time]
def ball_loss(self, output, prev_motion, y):
b_start = 0
b_end = self.BALL_DIMENSION
radius = self.BALL_RADIUS
# zero = tf.constant([0]*self.batchSize, dtype=tf.float32)
loss_list = [[], [], [], []]
# c_margin = 0.
min_height = tf.constant([0]*self.batchSize, dtype=tf.float32)
gv = 980/30.0/30.0 # 980cm/30fps / 30fps
r_idx = self.BALL_DIMENSION + self.ROOT_DIMENSION
# hand_indices = [1, 4]
hand_indices = [14, 15]
# output_pose = output[:,:,r_idx:]
output_pose = y[:,:,r_idx:]
output_c = output
if (self.ball_contact_use_y):
output_c = y
for i in range(self.stepSize-1):
if (i == 0):
b0 = prev_motion[:,b_start:(b_start + 3)]
c0 = tf.maximum(prev_motion[:,b_end-2], prev_motion[:,b_end-1])
else:
b0 = output[:,i-1,b_start:(b_start + 3)]
c0 = tf.maximum(output_c[:,i-1,b_end-2], output_c[:,i-1,b_end-1])
b1 = output[:,i,b_start:(b_start + 3)]
b2 = output[:,i+1,b_start:(b_start + 3)]
c_left = output_c[:,i,b_end-2]
c_right = output_c[:,i,b_end-1]
c1 = tf.maximum(c_left, c_right)
c2 = tf.maximum(output_c[:,i+1,b_end-2], output_c[:,i+1,b_end-1])
# not_contact = tf.clip_by_value(0.6 - c1, 0, 0.6) * 2
v0 = b1 - b0
v1 = b2 - b1
c0 = tf.sign(tf.maximum(c0 - 0.5, 0))
c1 = tf.sign(tf.maximum(c1 - 0.5, 0))
c2 = tf.sign(tf.maximum(c2 - 0.5, 0))
not_contact = 1 - c1
b_height = (b1[:,1] - radius)*self.ball_height_normal
min_height = (1-c1)*tf.minimum(min_height, b_height) + c1*b_height
loss_height = (1-c1)*c2*tf.square(min_height) * (self.ball_height_weight * self.ball_occasionality_normal)
u0 = tf.sign(tf.maximum(v0[:,1], 0))
u1 = tf.sign(tf.maximum(v1[:,1], 0))
not_contact_seq = (1-c0)*(1-c2)
is_reverse = u0*(1-u1)
is_bounce = (1-u0)*u1
is_down = (1-u0)*(1-u1)*not_contact_seq
is_up = u0*u1*not_contact_seq
dv_y = v1[:,1] - v0[:,1]
# is_reverse = tf.sign(tf.maximum(v0[:,1], 0) * tf.maximum(-v1[:,1], 0))
# is_bounce = tf.sign(tf.maximum(-v0[:,1], 0) * tf.maximum(v1[:,1], 0))
loss_reverse = tf.square(1 - v1[:,1] * self.ball_velocity_normal) * is_reverse * self.ball_occasionality_normal * self.ball_cond_weight
loss_bounce = tf.square(b_height) * is_bounce * self.ball_occasionality_normal * self.ball_cond_weight
loss_down = tf.square((dv_y + gv) * self.ball_velocity_normal) * is_down
loss_up = tf.square((dv_y - gv) * self.ball_velocity_normal) * is_up
# loss_cond = loss_reverse + loss_bounce + (loss_down + loss_up)*self.ball_gravity_weight
loss_cond = loss_reverse + (loss_down + loss_up)*self.ball_gravity_weight
loss_cond = loss_cond*not_contact
loss_bounce = loss_bounce*not_contact
left_contact = tf.clip_by_value(c_left - 0.5, 0, 0.5)*2
right_contact = tf.clip_by_value(c_right - 0.5, 0, 0.5)*2
# left_contact = tf.sign(tf.maximum(output[:,i,b_end-2] - 0.5, 0))
# right_contact = tf.sign(tf.maximum(output[:,i,b_end-1] - 0.5, 0))
current_pose = output_pose[:,i,:]
loss_left = self.ball_contact_loss(current_pose, hand_indices[0], b1) * left_contact
loss_right = self.ball_contact_loss(current_pose, hand_indices[1], b1) * right_contact
if (self.use_ball_have):
hb = tf.sign(tf.maximum(y[:,i,self.additional_joint] - 0.5, 0))
loss_list[0].append(loss_height*hb)
loss_list[1].append(loss_cond*hb)
loss_list[2].append((loss_left + loss_right)*hb)
loss_list[3].append(loss_bounce*hb)
else:
loss_list[0].append(loss_height)
loss_list[1].append(loss_cond)
loss_list[2].append(loss_left + loss_right)
loss_list[3].append(loss_bounce)
# loss_list[3].append(self.sign_loss(c_left) + self.sign_loss(c_right))
# loss_cond = (loss_reverse + loss_bounce)*(1-c1)
# loss_cond = (loss_reverse + loss_bounce)*not_contact
# loss_cond = tf.cond(is_reverse, lambda:((5 - v1[1])*5), lambda:tf.cond(is_bounce, lambda:((b1[1] - r)), lambda:zero))
# horizontally uniform velocity
# h_x = (v0[:,0] - v1[:,0])*not_contact
# h_z = (v0[:,2] - v1[:,2])*not_contact
# loss_list.append([loss_cond])
# loss_list.append([h_x, h_z, loss_cond])
# square everything
# return (tf.reduce_mean(tf.square(loss_list))*0.01 + tf.reduce_mean(loss_sum))*self.ball_contact_weight
for i in range(len(loss_list)):
loss_list[i] = tf.reduce_mean(loss_list[i])*self.ball_contact_weight
return loss_list[0], loss_list[1], loss_list[2], loss_list[3]
# return loss_list[0], loss_list[1], loss_list[2]
def ball_contact_loss(self, current_pose, j_idx, ball):
h_idx = 1 + 3*j_idx
dx = current_pose[:,h_idx] - ball[:,0]
dy = current_pose[:,h_idx+1] - ball[:,1]
dz = current_pose[:,h_idx+2] - ball[:,2]
d_len = tf.sqrt(tf.square(dx) + tf.square(dy) + tf.square(dz))
return tf.square((d_len - self.BALL_RADIUS)*self.ball_height_normal)*self.ball_hand_weight
def motion_mse_loss(self, x, y, output):
rootStart = self.BALL_DIMENSION
poseStart = self.ROOT_DIMENSION + self.BALL_DIMENSION
output_root = tf.slice(output, [0, 0, rootStart], [-1, -1, self.ROOT_DIMENSION])
output_pose = tf.slice(output, [0, 0, poseStart], [-1, -1, -1])
y_root = tf.slice(y, [0, 0, rootStart], [-1, -1, self.ROOT_DIMENSION])
y_pose = tf.slice(y, [0, 0, poseStart], [-1, -1, -1])
if (self.activation_weight > 0):
loss_root = tf.square(output_root - y_root)*[self.foot_weight, self.foot_weight, 1, 1, 1]
if (self.pose_joint_weights == None):
loss_pose = tf.square(output_pose - y_pose)
else:
loss_pose = tf.square(output_pose - y_pose)*self.pose_joint_weights
lr_list = []
lp_list = []
for i in range(self.stepSize):
acti = 1 + x[:,i,self.activation_index]*self.activation_weight
lr_list.append(tf.reduce_mean(loss_root[:,i,:])*acti)
lp_list.append(tf.reduce_mean(loss_pose[:,i,:])*acti)
loss_root = tf.reduce_mean(lr_list)
loss_pose = tf.reduce_mean(lp_list)
else:
loss_root = tf.reduce_mean(tf.square(output_root - y_root)*[self.foot_weight, self.foot_weight, 1, 1, 1])
if (self.pose_joint_weights == None):
loss_pose = tf.reduce_mean(tf.square(output_pose - y_pose))
else:
loss_pose = tf.reduce_mean(tf.square(output_pose - y_pose)*self.pose_joint_weights)
if (rootStart > 0):
if (self.use_ball_have):
loss_list = []
for i in range(self.stepSize):
o_i = output[:,i,0:rootStart]
y_i = y[:,i,0:rootStart]
if (self.BALL_DIMENSION == 8):
loss_ball = tf.reduce_mean(tf.square(o_i - y_i)* [1,1,1,0.5,0.5,0.5,self.ball_weight,self.ball_weight], 1)
else:
loss_ball = tf.reduce_mean(tf.square(o_i - y_i)* [1,1,1,self.ball_weight,self.ball_weight], 1)
hb = tf.sign(tf.maximum(y[:,i,self.additional_joint] - 0.5, 0)) + 0.1
loss_list.append(loss_ball*hb)
loss_ball = tf.reduce_mean(loss_list)
else:
output_ball = tf.slice(output, [0, 0, 0], [-1, -1, rootStart])
y_ball = tf.slice(y, [0, 0, 0], [-1, -1, rootStart])
if (self.BALL_DIMENSION == 8):
loss_ball = tf.reduce_mean(tf.square(output_ball - y_ball)* [1,1,1,0.5,0.5,0.5,self.ball_weight,self.ball_weight])
else:
loss_ball = tf.reduce_mean(tf.square(output_ball - y_ball)* [1,1,1,self.ball_weight,self.ball_weight])
else:
loss_ball = tf.constant(0, dtype=tf.float32)
return loss_root, loss_pose, loss_ball
def foot_loss(self, output, prev_motion, motion_y):
# prev_motion = self.y_normal.de_normalize(prev_motion)
# output = self.y_normal.de_normalize(output)
r_idx = self.BALL_DIMENSION + self.ROOT_DIMENSION
a_idx = 2
output_root = output[:,:,self.BALL_DIMENSION:r_idx]
output_pose = output[:,:,r_idx:]
c_root = output_root
if (self.foot_contact_use_y):
c_root = motion_y[:,:,self.BALL_DIMENSION:r_idx]
# root, root height, Head_End, LeftHand
foot_indices = [2, 3, 5, 6]
dist_list = []
for i in range(self.stepSize):
if (i == 0):
prev_pose = prev_motion[:,r_idx:]
else:
prev_pose = output_pose[:,i-1,:]
current_root = output_root[:,i,:]
current_pose = output_pose[:,i,:]
cos = tf.cos(current_root[:,a_idx])
sin = tf.sin(current_root[:,a_idx])
dx_x = cos
dx_z = -sin
dy_x = sin
dy_z = cos
t_x = current_root[:, a_idx+1]
t_z = -current_root[:, a_idx+2]
for j in range(len(foot_indices)):
idx = 1 + 3*foot_indices[j]
if (j < 2):
f_contact = c_root[:,i,0]
else:
f_contact = c_root[:,i,1]
f_contact = tf.sign(tf.maximum(f_contact - 0.5, 0))
# f_contact = tf.clip_by_value(f_contact - 0.5, 0, 0.5)
moved_x = dx_x*current_pose[:,idx] + dy_x*current_pose[:,idx+2] + t_x
moved_y = current_pose[:,idx+1]
moved_z = dx_z*current_pose[:,idx] + dy_z*current_pose[:,idx+2] + t_z
diff_x = (prev_pose[:, idx] - moved_x)*f_contact
diff_y = (prev_pose[:, idx + 1] - moved_y)*f_contact
diff_z = (prev_pose[:, idx + 2] - moved_z)*f_contact
dist_list.extend([diff_x, diff_y, diff_z])
return tf.reduce_mean(tf.square(dist_list))
def joint_len_loss(self, output):
r_idx = self.BALL_DIMENSION + self.ROOT_DIMENSION
dist_list = []
for sIdx in range(self.stepSize):
current_pose = output[:,sIdx,r_idx:]
for pIdx in range(len(self.jointPairs)):
pair = self.jointPairs[pIdx]
lenOrigin = self.jointLengths[pIdx]
jLen = self.joint_len(current_pose, pair[0], pair[1])
dist_list.append(tf.square(jLen - lenOrigin))
return tf.reduce_mean(dist_list)
def joint_len(self, current_pose, j1, j2):
idx1 = 1 + 3*j1
idx2 = 1 + 3*j2
dx = current_pose[:,idx1] - current_pose[:,idx2]
dy = current_pose[:,idx1+1] - current_pose[:,idx2+1]
dz = current_pose[:,idx1+2] - current_pose[:,idx2+2]
d_len = tf.sqrt(tf.square(dx) + tf.square(dy) + tf.square(dz))
return d_len
def load_normal_data(self, folder):
xMean, xStd = loadNormalData("%s/data/xNormal.dat"%(folder))
yMean, yStd = loadNormalData("%s/data/yNormal.dat"%(folder))
self.x_normal = Normalize(xMean, xStd)
self.y_normal = Normalize(yMean, yStd)
class RNNModel(object):
def __init__(self, config, batchSize, stepSize, lr=0.0001):
self.config = config
self.batchSize = batchSize
self.stepSize = stepSize
config.stepSize = stepSize
config.batchSize = batchSize
with config.scope():
self.x = tf.placeholder(tf.float32, [batchSize, stepSize, config.X_DIMENSION], name="x")
self.prev_y = tf.placeholder(tf.float32, [batchSize, config.Y_DIMENSION], name="prev_y")
self.y = tf.placeholder(tf.float32, [batchSize, stepSize, config.Y_DIMENSION], name="y")
with tf.variable_scope("generator"):
self.generated, self.final_state, self.initial_state, self.final_y = self.generator(self.x, self.prev_y)
if (stepSize <= 1): return
self.loss_g, self.loss_detail = config.error(self.x, self.prev_y, self.y, self.generated)
if (batchSize <= 1): return
g_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=config.scope_name('generator'))
regularizer = tf.contrib.layers.l2_regularizer(scale=0.00001)
self.reg_loss_g = tf.contrib.layers.apply_regularization(regularizer, g_variables)
self.train_g = train(self.loss_g + self.reg_loss_g, g_variables, lr, self.config.MAX_GRAD_NORM)
self.train_list = self.train_g
def generator(self, inputs, prev_motion):
c = self.config
cells = []
for i in range(c.NUM_OF_LAYERS):
cell = c.rnn_cell()
if ((i < c.NUM_OF_LAYERS - 1) and (c.LAYER_KEEP_PROB < 1)):
cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=c.LAYER_KEEP_PROB)
cells.append(cell)
stacked_lstm = tf.contrib.rnn.MultiRNNCell(cells)
# stacked_lstm = tf.contrib.rnn.MultiRNNCell([c.rnn_cell() for _ in range(c.NUM_OF_LAYERS)])
initial_state = stacked_lstm.zero_state(self.batchSize, tf.float32)
state = initial_state
hiddens = []
output_w = tf.get_variable("output_w", [c.RNN_SIZE, c.Y_DIMENSION], dtype=tf.float32)
output_b = tf.get_variable("output_b", [c.Y_DIMENSION], dtype=tf.float32)
output = prev_motion
q_size = 0
if (c.quality_param_index > 0):
q_size = c.X_DIMENSION - c.quality_param_index
if (c.use_input_layer):
i_layer = SingleLayer(c.X_DIMENSION - q_size, 128, "i_layer")
p_layer = SingleLayer(c.Y_DIMENSION, 128, "p_layer")
for i in range(self.stepSize):
if i > 0: tf.get_variable_scope().reuse_variables()
# cInput = inputs[:,i]
if (c.use_input_layer):
p_input = p_layer.apply(output)
if (c.quality_param_index > 0):
i_input = c.drop_out(inputs[:,i,0:c.quality_param_index], c.INPUT_KEEP_PROB)
i_input = i_layer.apply(i_input)
q_input = inputs[:,i,c.quality_param_index:c.X_DIMENSION]
cInput = tf.concat([i_input, q_input, p_input], 1)
else:
i_input = i_layer.apply(c.drop_out(inputs[:,i], c.INPUT_KEEP_PROB))
cInput = tf.concat([i_input, p_input], 1)
else:
if (c.POSE_CONTAINED_AS_INPUT):
cInput = c.drop_out(inputs[:,i], c.INPUT_KEEP_PROB)
elif (c.NO_INPUT):
cInput = c.drop_out(output, c.INPUT_KEEP_PROB)
else:
cInput = tf.concat([c.drop_out(inputs[:,i], c.INPUT_KEEP_PROB), output], 1)
prev_output = output
output, state = stacked_lstm(cInput, state)
output = tf.matmul(output, output_w) + output_b
# print(output.get_shape()) # (30, 120)
if (c.USE_RESIDUAL_MODEL):
rootStart = c.BALL_DIMENSION
poseStart = c.ROOT_DIMENSION + c.BALL_DIMENSION
output_root = output[:,:poseStart]
output_pose = output[:,poseStart:] + prev_output[:,poseStart:]
output = tf.concat([output_root, output_pose], 1)
hiddens.append(output)
if (c.train_as_input and self.stepSize > 1):
output = self.y[:, i]
outputs = tf.transpose(hiddens, perm=[1, 0, 2])
return outputs, state, initial_state, output
class StateModel(object):
def __init__(self, config, batchSize, stepSize, lr=0.0001):
self.config = config
self.batchSize = batchSize
self.stepSize = stepSize
config.stepSize = stepSize
config.batchSize = batchSize
with config.scope():
self.x = tf.placeholder(tf.float32, [batchSize, stepSize, config.X_DIMENSION], name="x")
self.prev_y = tf.placeholder(tf.float32, [batchSize, config.Y_DIMENSION], name="prev_y")
self.y = tf.placeholder(tf.float32, [batchSize, stepSize, config.Y_DIMENSION], name="y")
self.initial_state = tf.placeholder(tf.float32, [batchSize, config.STATE_DIMENSION], name="initial_state")
with tf.variable_scope("generator"):
self.generated, self.final_state, self.final_y = self.generator(self.x, self.prev_y)
if (stepSize <= 1): return
self.loss_g, self.loss_detail = config.error(self.x, self.prev_y, self.y, self.generated)
if (batchSize <= 1): return
g_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=config.scope_name('generator'))
regularizer = tf.contrib.layers.l2_regularizer(scale=0.00001)
self.reg_loss_g = tf.contrib.layers.apply_regularization(regularizer, g_variables)
self.train_g = train(self.loss_g + self.reg_loss_g, g_variables, lr, self.config.MAX_GRAD_NORM)
self.train_list = self.train_g
def generator(self, inputs, prev_motion):
c = self.config
layers = []
for i in range(c.NUM_OF_LAYERS):
input_d = c.RNN_SIZE
output_d = c.RNN_SIZE
isLast = False
if (i == 0):
input_d = c.X_DIMENSION + c.Y_DIMENSION + c.STATE_DIMENSION
if (i == c.NUM_OF_LAYERS - 1):
output_d = c.Y_DIMENSION + c.STATE_DIMENSION
layer = SingleLayer(input_d, output_d, "sm_layer_%d"%i)
layer.IS_LAST = isLast
layers.append(layer)
hiddens = []
state = self.initial_state
output = prev_motion
for i in range(self.stepSize):
if i > 0: tf.get_variable_scope().reuse_variables()
cInput = tf.concat([c.drop_out(inputs[:,i], c.INPUT_KEEP_PROB), output, state], 1)
inter_output = cInput
for i in range(c.NUM_OF_LAYERS):
inter_output = layers[i].apply(inter_output)
output = inter_output[:,:c.Y_DIMENSION]
state = inter_output[:,c.Y_DIMENSION:]
hiddens.append(output)
if (c.train_as_input and self.stepSize > 1):
output = self.y[:, i]
outputs = tf.transpose(hiddens, perm=[1, 0, 2])
return outputs, state, output
class MultiInputModel(object):
def __init__(self, config, batchSize, stepSize, lr=0.0001):
self.config = config
self.batchSize = batchSize
self.stepSize = stepSize
config.stepSize = stepSize
config.batchSize = batchSize
with config.scope():
self.x = tf.placeholder(tf.float32, [batchSize, stepSize, config.X_DIMENSION], name="x")
self.prev_y = tf.placeholder(tf.float32, [batchSize, config.Y_DIMENSION], name="prev_y")
self.y = tf.placeholder(tf.float32, [batchSize, stepSize, config.Y_DIMENSION], name="y")
with tf.variable_scope("generator"):
self.generated, self.final_state, self.initial_state, self.final_y = self.generator(self.x, self.prev_y)
if (stepSize <= 1): return
self.loss_g, self.loss_detail = config.error(self.x, self.prev_y, self.y, self.generated)
if (batchSize <= 1): return
g_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=config.scope_name('generator'))
regularizer = tf.contrib.layers.l2_regularizer(scale=0.00001)
self.reg_loss_g = tf.contrib.layers.apply_regularization(regularizer, g_variables)
self.train_g = train(self.loss_g + self.reg_loss_g, g_variables, lr, self.config.MAX_GRAD_NORM)
self.train_list = self.train_g
def generator(self, inputs, prev_motion):
c = self.config
s_cell_1 = c.rnn_cell()
s_cell_2 = c.rnn_cell()
if (c.LAYER_KEEP_PROB < 1):
s_cell_1 = tf.contrib.rnn.DropoutWrapper(s_cell_1, output_keep_prob=c.LAYER_KEEP_PROB)
s_cell_2 = tf.contrib.rnn.DropoutWrapper(s_cell_2, output_keep_prob=c.LAYER_KEEP_PROB)
cells = []
for i in range(c.NUM_OF_LAYERS-1):
cell = c.rnn_cell()
if ((i < c.NUM_OF_LAYERS - 2) and (c.LAYER_KEEP_PROB < 1)):
cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=c.LAYER_KEEP_PROB)
cells.append(cell)
stacked_lstm = tf.contrib.rnn.MultiRNNCell(cells)
state_1 = s_cell_1.zero_state(self.batchSize, tf.float32)
state_2 = s_cell_2.zero_state(self.batchSize, tf.float32)
state = stacked_lstm.zero_state(self.batchSize, tf.float32)
initial_state = [state_1, state_2]
initial_state.extend(state)
i_layer = SingleLayer(c.X_DIMENSION, 128, "i_layer")
p_layer = SingleLayer(c.Y_DIMENSION, 128, "p_layer")
hiddens = []
output_w = tf.get_variable("output_w", [c.RNN_SIZE, c.Y_DIMENSION], dtype=tf.float32)
output_b = tf.get_variable("output_b", [c.Y_DIMENSION], dtype=tf.float32)
output = prev_motion
for i in range(self.stepSize):
if i > 0: tf.get_variable_scope().reuse_variables()
# cInput = inputs[:,i]
i_input = i_layer.apply(c.drop_out(inputs[:,i], c.INPUT_KEEP_PROB))
p_input = p_layer.apply(output)
cInput = tf.concat([i_input, p_input], 1)
# cInput = tf.concat([c.drop_out(inputs[:,i], c.INPUT_KEEP_PROB), output], 1)
i_index = c.Y_DIMENSION - 1
indicator = output[:, i_index]
indicator = c.y_normal.de_normalize_idx(indicator, i_index)
indicator = tf.clip_by_value(indicator, 0, 1.0)
with tf.variable_scope("i_layer_1"):
output_1, state_1 = s_cell_1(cInput, state_1)
with tf.variable_scope("i_layer_2"):
output_2, state_2 = s_cell_2(cInput, state_2)
cInput = indicator*tf.transpose(output_1) + (1-indicator)*tf.transpose(output_2)
cInput = tf.transpose(cInput)
output, state = stacked_lstm(cInput, state)
output = tf.matmul(output, output_w) + output_b
hiddens.append(output)
if (c.train_as_input and self.stepSize > 1):
output = self.y[:, i]
outputs = tf.transpose(hiddens, perm=[1, 0, 2])
final_state = [state_1, state_2]
final_state.extend(state)
return outputs, tuple(final_state), tuple(initial_state), output
class TimeModel(object):
def __init__(self, config, batchSize, stepSize, lr=0.0001):
self.config = config
self.batchSize = batchSize
self.stepSize = stepSize
config.stepSize = stepSize
config.batchSize = batchSize
with config.scope():
# x : acti, action=3, goal pos=2, goal ori=2, normalized goal pos=2, remain time => 11
# y : foot contact=2, rot, tx, ty, root_height, joint pos=3*13=39 => 45
self.x = tf.placeholder(tf.float32, [batchSize, stepSize, config.X_DIMENSION], name="x")
self.prev_y = tf.placeholder(tf.float32, [batchSize, config.Y_DIMENSION], name="prev_y")
self.y = tf.placeholder(tf.float32, [batchSize, stepSize, config.Y_DIMENSION], name="y")
# g : goal pos=2, goal ori=2, normalized goal pos=2 => 6
with tf.variable_scope("estimator"):
self.e_layer = FCLayers(config.Y_DIMENSION + config.G_DIMENSION, config.E_LAYERS, config.RNN_SIZE, config.E_DIMENSION)
if (stepSize > 1):
with tf.variable_scope("generator"):
self.generated, self.final_state, self.initial_state, self.final_y, self.estimated = self.generator(self.x, self.prev_y, self.e_layer)
self.loss_g, self.loss_detail = config.error(self.x, self.prev_y, self.y, self.generated)
self.loss_e, l_e_detail = config.estimation_time_error(self.x, self.estimated)
self.loss_detail.extend(l_e_detail)
g_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=config.scope_name('generator'))
e_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=config.scope_name('estimator'))
regularizer = tf.contrib.layers.l2_regularizer(scale=0.00001)
self.reg_loss_g = tf.contrib.layers.apply_regularization(regularizer, g_variables)
self.reg_loss_e = tf.contrib.layers.apply_regularization(regularizer, e_variables)
self.train_g = train(self.loss_g + self.reg_loss_g, g_variables, lr, self.config.MAX_GRAD_NORM)
self.train_e = train(self.loss_e + self.reg_loss_e, e_variables, lr, self.config.MAX_GRAD_NORM)
self.train_list = [self.train_g, self.train_e]
def generator(self, inputs, prev_motion, e_layer):
stacked_lstm = tf.contrib.rnn.MultiRNNCell([self.config.rnn_cell() for _ in range(self.config.NUM_OF_LAYERS)])
initial_state = stacked_lstm.zero_state(self.batchSize, tf.float32)
state = initial_state
hiddens = []
eOutputList = []
if (self.config.use_U_net):
output_w = tf.get_variable("output_w", [self.config.RNN_SIZE + self.config.Y_DIMENSION, self.config.Y_DIMENSION], dtype=tf.float32)
else:
output_w = tf.get_variable("output_w", [self.config.RNN_SIZE, self.config.Y_DIMENSION], dtype=tf.float32)
output_b = tf.get_variable("output_b", [self.config.Y_DIMENSION], dtype=tf.float32)
output = prev_motion
g_dim = self.config.G_DIMENSION
for i in range(self.stepSize):
if i > 0: tf.get_variable_scope().reuse_variables()
prev_output = output
input = inputs[:,i]
# action type=3, goal pos/ori/n-pos=6, root move
goal = input[:,1:(1+g_dim)]
eInput = tf.concat([goal, prev_output], 1)
# estimated : estimated time, estimated pre-activation
eOutput = e_layer.apply(eInput)
eOutputList.append(eOutput)
# time, activation
cInput = tf.stack([input[:,(1+g_dim)], input[:,0]], 1)
cInput = tf.concat([cInput, goal, prev_output], 1)
# cInput : v_time, acti, action=3, goal=6, prev_y=45
output, state = stacked_lstm(cInput, state)
if (self.config.use_U_net):
output = tf.concat([prev_output, output], 1)
output = tf.matmul(output, output_w) + output_b
hiddens.append(output)
if (self.config.train_as_input and self.stepSize > 1):
output = self.y[:, i]
outputs = tf.transpose(hiddens, perm=[1, 0, 2])
eOutputList = tf.transpose(eOutputList, perm=[1, 0, 2])
return outputs, state, initial_state, output, eOutputList
class RuntimeModel(object):
def __init__(self, config, batchSize):
self.config = config
self.batchSize = batchSize
config.batchSize = batchSize
with config.scope():
# x : acti, action=3, goal pos=2, goal ori=2, normalized goal pos=2, remain time => 11
# y : foot contact=2, rot, tx, ty, root_height, joint pos=3*13=39 => 45
self.x = tf.placeholder(tf.float32, [batchSize, config.X_DIMENSION], name="x")
self.prev_y = tf.placeholder(tf.float32, [batchSize, config.Y_DIMENSION], name="prev_y")
self.g = tf.placeholder(tf.float32, [batchSize, config.G_DIMENSION], name="g")
with tf.variable_scope("estimator"):
self.e_layer = FCLayers(config.Y_DIMENSION + config.G_DIMENSION, config.E_LAYERS, config.RNN_SIZE, config.E_DIMENSION)
with tf.variable_scope("generator"):
self.generated, self.final_state, self.initial_state = self.generator(self.x, self.prev_y)
self.estimated = self.estimator(self.g, self.prev_y, self.e_layer)
def generator(self, x, prev_motion):
stacked_lstm = tf.contrib.rnn.MultiRNNCell([self.config.rnn_cell() for _ in range(self.config.NUM_OF_LAYERS)])
initial_state = stacked_lstm.zero_state(self.batchSize, tf.float32)
if (self.config.use_U_net):
output_w = tf.get_variable("output_w", [self.config.RNN_SIZE + self.config.Y_DIMENSION, self.config.Y_DIMENSION], dtype=tf.float32)
else:
output_w = tf.get_variable("output_w", [self.config.RNN_SIZE, self.config.Y_DIMENSION], dtype=tf.float32)
output_b = tf.get_variable("output_b", [self.config.Y_DIMENSION], dtype=tf.float32)
cInput = tf.concat([x, prev_motion], 1)
output, state = stacked_lstm(cInput, initial_state)
if (self.config.use_U_net):
output = tf.concat([prev_motion, output], 1)
output = tf.matmul(output, output_w) + output_b
return output, state, initial_state
def estimator(self, goal, prev_motion, e_layer):
eInput = tf.concat([goal, prev_motion], 1)
# estimated : estimated time, estimated pre-activation
return e_layer.apply(eInput)
|
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.datasets import SupervisedDataSet
from pybrain.structure import FeedForwardNetwork, LinearLayer, SigmoidLayer,FullConnection
import fitsio
import numpy as np
import time
import pickle
import sys
sys.path.insert(0, '../')
from redshift_utils import nanomaggies2mags, mags2nanomaggies
def brescia_nn(train, test, max_epochs=None, verbose=False):
trainval_ds = SupervisedDataSet(5, 1)
test_ds = SupervisedDataSet(5, 1)
for datum in train:
trainval_ds.addSample(datum[:5], (datum[5],))
for datum in test:
test_ds.addSample(datum[:5], (datum[5],))
train_ds, val_ds = trainval_ds.splitWithProportion(0.75)
if verbose:
print "Train, validation, test:", len(train_ds), len(val_ds), len(test_ds)
ns = {}
min_error = -1
min_h = -1
# use validation to form 4-layer network with two hidden layers,
# with (2n + 1) nodes in the first hidden layer and somewhere from
# 1 to (n - 1) in the second hidden layer
for h2 in range(1, 5):
if verbose:
start = time.time()
print "h2 nodes:", h2
# create the network
if verbose:
print "building network"
n = FeedForwardNetwork()
inLayer = LinearLayer(5)
hiddenLayer1 = SigmoidLayer(11)
hiddenLayer2 = SigmoidLayer(h2)
outLayer = LinearLayer(1)
n.addInputModule(inLayer)
n.addModule(hiddenLayer1)
n.addModule(hiddenLayer2)
n.addOutputModule(outLayer)
in_to_hidden = FullConnection(inLayer, hiddenLayer1)
hidden_to_hidden = FullConnection(hiddenLayer1, hiddenLayer2)
hidden_to_out = FullConnection(hiddenLayer2, outLayer)
n.addConnection(in_to_hidden)
n.addConnection(hidden_to_hidden)
n.addConnection(hidden_to_out)
n.sortModules()
# training
if verbose:
print "beginning training"
trainer = BackpropTrainer(n, train_ds)
trainer.trainUntilConvergence(maxEpochs=max_epochs)
ns[h2] = n
# validation
if verbose:
print "beginning validation"
out = n.activateOnDataset(val_ds)
actual = val_ds['target']
error = np.sqrt(np.sum((out - actual)**2) / len(val_ds))
if verbose:
print "RMSE:", error
if min_error == -1 or error < min_error:
min_error = error
min_h = h2
if verbose:
stop = time.time()
print "Time:", stop - start
# iterate through
if verbose:
print "best number of h2 nodes:", min_h
out_test = ns[min_h].activateOnDataset(test_ds)
return ns[h2], out_test
if __name__ == '__main__':
data_file = fitsio.FITS('../dr7qso.fit')[1].read()
data = np.zeros((len(data_file['UMAG']), 6))
data[:,0] = data_file['UMAG']
data[:,1] = data_file['GMAG']
data[:,2] = data_file['RMAG']
data[:,3] = data_file['IMAG']
data[:,4] = data_file['ZMAG']
data[:,5] = data_file['z']
# make sure there are no zero mags
for i in range(5):
data = data[data[:,i] != 0]
# convert to nanomaggies for the sake of example
data[:,:5] = mags2nanomaggies(data[:,:5])
# split into training and test
train = data[:int(0.8 * len(data)),:]
test = data[int(0.8 * len(data)):,:]
model, preds = brescia_nn(train, test, verbose=True)
# calculate RMSE
actual_test = test[:,5]
rmse = np.sqrt(np.sum((preds - actual_test)**2) / len(test))
output = open('brescia_output.pkl', 'wb')
pickle.dump(model, output)
output.close()
print "RMSE:", rmse
|
import numpy as np
from bresenham import bresenham
import scipy.ndimage
from PIL import Image
def mydrawPNG(vector_image, Side = 256):
raster_image = np.zeros((int(Side), int(Side)), dtype=np.float32)
initX, initY = int(vector_image[0, 0]), int(vector_image[0, 1])
stroke_bbox = []
pixel_length = 0
for i in range(0, len(vector_image)):
if i > 0:
if vector_image[i - 1, 2] == 1:
initX, initY = int(vector_image[i, 0]), int(vector_image[i, 1])
cordList = list(bresenham(initX, initY, int(vector_image[i, 0]), int(vector_image[i, 1])))
pixel_length += len(cordList)
for cord in cordList:
if (cord[0] > 0 and cord[1] > 0) and (cord[0] < Side and cord[1] < Side):
raster_image[cord[1], cord[0]] = 255.0
initX, initY = int(vector_image[i, 0]), int(vector_image[i, 1])
raster_image = scipy.ndimage.binary_dilation(raster_image) * 255.0
return raster_image, stroke_bbox
def preprocess(sketch_points, side = 256.0):
sketch_points = sketch_points.astype(np.float)
sketch_points[:, :2] = sketch_points[:, :2] / np.array([800, 800])
sketch_points[:,:2] = sketch_points[:,:2] * side
sketch_points = np.round(sketch_points)
return sketch_points
def rasterize_Sketch(sketch_points):
sketch_points = preprocess(sketch_points)
raster_images, stroke_bbox = mydrawPNG(sketch_points)
return raster_images, sketch_points
def mydrawPNG_from_list(vector_image, Side = 256):
raster_image = np.zeros((int(Side), int(Side)), dtype=np.float32)
for stroke in vector_image:
initX, initY = int(stroke[0, 0]), int(stroke[0, 1])
for i_pos in range(1, len(stroke)):
cordList = list(bresenham(initX, initY, int(stroke[i_pos, 0]), int(stroke[i_pos, 1])))
for cord in cordList:
if (cord[0] > 0 and cord[1] > 0) and (cord[0] <= Side and cord[1] <= Side):
raster_image[cord[1], cord[0]] = 255.0
else:
print('error')
initX, initY = int(stroke[i_pos, 0]), int(stroke[i_pos, 1])
raster_image = scipy.ndimage.binary_dilation(raster_image) * 255.0
return Image.fromarray(raster_image).convert('RGB') |
number = int(input())
numbersToText = []
numbersToText[0:9] = ["zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"]
try:
print(numbersToText[number])
except IndexError:
print("number too big")
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "MyProject Management",
"version" : "1.1",
"author" : "OpenERP SA",
"category" : "Generic Modules/Inventory Control",
"depends" : ["base", "jasper_reports", "analytic"],
"description": """
This is the base module for managing products and pricelists in OpenERP.
Products support variants, different pricing methods, suppliers
information, make to stock/order, different unit of measures,
packaging and properties.
Pricelists support:
* Multiple-level of discount (by product, category, quantities)
* Compute price based on different criteria:
* Other pricelist,
* Cost price,
* List price,
* Supplier price, ...
Pricelists preferences by product and/or partners.
Print product labels with barcode.
""",
'data': [
'myproject_view_oldserver.xml',
# 'myproject_view_newserver.xml',
'myproject_view.xml',
'myproject_view_project_main.xml',
'myproject_view_project_budget.xml',
'myproject_view_project_project_price.xml',
],
'css': ['static/src/css/myproject.css'],
# 'test' : ['test/create_old_server_data.yml'],
'demo': ['myproject_demo.xml',],
'installable': True,
'active': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
try:
from rpython.rlib.rarithmetic import ovfcheck, LONG_BIT # pylint: disable=W
from rpython.rlib.rbigint import rbigint, _divrem as divrem # pylint: disable=W
from rpython.rlib.rbigint import rbigint as BigIntType # pylint: disable=W
from rpython.rlib.rarithmetic import string_to_int # pylint: disable=unused-import
from rpython.rlib.rstring import ParseStringOverflowError # pylint: disable=W
bigint_from_int = rbigint.fromint
bigint_from_str = rbigint.fromstr
IntType = int
except ImportError:
"NOT_RPYTHON"
def ovfcheck(value):
return value
def bigint_from_int(value):
return value
def bigint_from_str(value):
return int(value)
def divrem(x, y):
raise Exception("not yet implemented")
string_to_int = int # pylint: disable=invalid-name
class ParseStringOverflowError(Exception):
def __init__(self, parser): # pylint: disable=super-init-not-called
self.parser = parser
LONG_BIT = 0x8000000000000000
import sys
if sys.version_info.major <= 2:
IntType = (int, long) # pylint: disable=undefined-variable
BigIntType = long # pylint: disable=undefined-variable
else:
IntType = int
BigIntType = int
|
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
#importing dataset
dataset=pd.read_csv('Salary_Data.csv')
X=dataset.iloc[:,:-1].values
y=dataset.iloc[:,1:].values
#splitting the dataset into training and test sets
from sklearn.model_selection import train_test_split
#used model_selection in place of cross_validation since the latter is deprecated
X_train, X_test,y_train,y_test = train_test_split(X,y,test_size=1/3,random_state=0)
#fitting simple linear regression to the training set
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train,y_train)
#preiction the test set results
y_pred = regressor.predict(X_test)
#visualising the training set results
plt.scatter(X_train,y_train,color='red')
plt.plot(X_train,regressor.predict(X_train),color='blue')
plt.title('Salary Vs Experience (Training set) ')
plt.xlabel('Year of Experience')
plt.ylabel('Salary')
plt.show()
#visualising the test set results
plt.scatter(X_test,y_test,color='red')
plt.plot(X_train,regressor.predict(X_test),color='blue')
plt.title('Salary Vs Experience (Test set) ')
plt.xlabel('Year of Experience')
plt.ylabel('Salary')
plt.show() |
"""create table organizations
Revision ID: 538755761e27
Revises:
Create Date: 2020-10-09 11:30:40.155436
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "538755761e27"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
"organizations",
sa.Column("id", sa.Integer, primary_key=True),
sa.Column("email", sa.String(), nullable=False),
sa.Column("name", sa.String(), nullable=False),
sa.Column("address", sa.String(), nullable=False),
sa.Column("phone_number", sa.String(), nullable=True),
)
op.create_index(op.f("idx_organizations_email"), "organizations", ["email"], unique=True)
def downgrade():
op.drop_index("idx_organizations_email", table_name="organizations")
op.drop_table("organizations")
|
import os
def confirm_move_forward(message):
move_forward = 'n'
while move_forward.lower() != 'y':
move_forward = input('{0} (y)'.format(message))
confirm_move_forward('Make bot message files?')
######################################
# MAKE THE BOT MESSAGE AND LOG FILES #
######################################
def make_dir_and_text_files(directory, file_names):
if os.path.isdir(directory):
print("Directory already exists!")
return
os.makedirs(directory)
for file_name in file_names:
with open('{0}/{1}.txt'.format(directory, file_name), 'w') as f:
f.write("Put your content here.")
# make bot message files
path_to_bot_messages = 'secrets/bot_messages'
bot_message_file_names = [
'disclaimer_prefix',
'message_for_asking-if_people_want_more',
'message_when_bot_doesnt_understand',
'message_when_sending_voters',
'message_when_someone_cant_mail_their_voters'
]
make_dir_and_text_files(path_to_bot_messages, bot_message_file_names)
confirm_move_forward('Make log files?')
# make log files
path_to_logs = './logs'
log_file_names = ['abnormal_log', 'error_log', 'ignored_log', 'routine_log']
make_dir_and_text_files(path_to_logs, log_file_names)
confirm_move_forward('Make voter data files? Remember to format the voter file properly before proceeding.')
################################################
# READ THE VOTER FILE AND FORMAT APPROPRIATELY #
################################################
import csv
import json
import config
with open('voter_file.csv', 'r') as f:
reader = csv.reader(f)
headers = next(reader)
assert config.VOTER_DATA_FIELDS == [header.lower() for header in headers]
voter_data = [{
config.VOTER_DATA_FIELDS[0]: row[0], # name
config.VOTER_DATA_FIELDS[1]: row[1], # street address
config.VOTER_DATA_FIELDS[2]: row[2], # apt number
config.VOTER_DATA_FIELDS[3]: row[3], # zip
config.VOTER_DATA_FIELDS[4]: row[4], # city
} for row in reader]
if not os.path.isdir('data'):
os.makedirs('data')
with open('data/unused_voters.json', 'w') as f:
f.write(json.dumps(voter_data))
#####################################
# MAKE A BLANK SEEN_EMAIL_DATA FILE #
#####################################
with open('data/seen_email_data.json', 'w') as f:
f.write('{}') |
#packing and unpacking
#Create packed tuple
pair = ("dog","cat",)
#unpack tuple
(key, value) = pair
#display
print(key)
print(value) |
import os
import time
source = [r'D:\Backup\phone', r'E:\Dropbox\Photos']
target_dir = 'd:/backup/'
target = target_dir + time.strftime('%Y%m%d%H%M%S') + '.zip'
zip_command = "zip -qr '%s' %s" % (target, ''.join(source))
print zip_command
if os.system(zip_command) == 0:
print 'Successful backup to', target
else:
print 'Backup failed.'
|
# Generated by Django 2.2.11 on 2020-06-01 19:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wishlist', '0010_auto_20200413_1512'),
]
operations = [
migrations.CreateModel(
name='WishlistDpModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('token_key', models.CharField(blank=True, default=None, max_length=128, null=True)),
('product_name', models.CharField(blank=True, default=None, max_length=128, null=True, unique=True, verbose_name='Продукт')),
('size', models.CharField(blank=True, default=None, max_length=128, null=True, verbose_name='Размер')),
('slug', models.CharField(blank=True, default=None, max_length=128, null=True, verbose_name='Слаг')),
('price', models.DecimalField(decimal_places=2, default=0, max_digits=10, verbose_name='Цена')),
('image', models.CharField(blank=True, default=None, max_length=128, null=True, verbose_name='Фото')),
('tkan', models.CharField(blank=True, default=None, max_length=128, null=True, verbose_name='Ткань')),
('brend', models.CharField(blank=True, default=None, max_length=128, null=True, verbose_name='Бренд')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Создан')),
],
options={
'verbose_name': 'Товар в жиланиях',
'verbose_name_plural': 'Товары в жиланиях',
},
),
]
|
from flask import Flask, render_template,request
from flask_sqlalchemy import SQLAlchemy
import mysql.connector as sql
import pandas as pd
import sqlite3
#import pyodbc
import sqlalchemy as sa
import pymysql
target = sa.create_engine(f'mysql://sql6424418:kZPRYYclcY@sql6.freemysqlhosting.net/sql6424418')
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/upload',methods=['GET','POST'])
def upload():
if request.method == 'POST':
file = request.files['inputFile']
if '.xlsx' in file.filename:
table = pd.read_excel(file)
table.columns = table.columns.str.replace(' ', '_')
table.to_sql(name= file.filename, con=target, if_exists='replace')
if '.csv' in file.filename:
table = pd.read_csv(file)
table.columns = table.columns.str.replace(' ', '_')
table.to_sql(name= file.filename, con=target, if_exists='replace')
return 'File uploaded'
if __name__ == '__main__':
app.run(debug=True) |
# coding = UTF-8
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
import logging
import datetime
import re
import lp2_while
import planet
import bot_constants
import mycalc
logging.basicConfig(format='%(name)s - %(levelname)s - %(message)s',
level=logging.INFO,
filename='nma_bot.log'
)
# Настройки прокси
PROXY = bot_constants.bot_proxy
def greet_user(bot, update):
text = 'Вызван /start'
print(text)
update.message.reply_text(text)
def talk_to_me(bot, update):
user_text = update.message.text
print(user_text)
if user_text[-1] == '=':
print(user_text[-1])
mycalc.run_calc(user_text)
else:
update.message.reply_text(user_text)
def run_planet_constellation(bot, update):
#planet_name = input('enter a planet name: ')
planet_from_user = update.message.text
planet_name = planet_from_user.split('/planet ')[1]
print('planet_name: ', planet_name)
today = datetime.date.today()
(planet_name_eng, err) = planet.get_eng_planet_name(planet_name)
print('planet_name_eng: ', planet_name_eng)
print('err: ', err)
if not err:
print('err is null')
cons_name = planet.get_constellation(planet_name_eng, today)
print('cons_name: ', cons_name)
update.message.reply_text(
'now the planet {planet} in the constellation {cons}'.format(
planet=planet_name_eng,
cons=cons_name))
else:
update.message.reply_text(err)
def run_wordcount(bot, update):
print('start wordcount')
str_from_user = update.message.text
if len(str_from_user) > 11:
print(str_from_user.split('/wordcount '))
str_analyse = str_from_user.split('/wordcount ')[1]
else:
res = 'you entered an empty string!'
print(res)
update.message.reply_text(res)
return
str_analyse = str_analyse.strip()
first_simbol = str_analyse[0]
last_simbol = str_analyse[-1]
print(first_simbol, last_simbol)
if first_simbol != '"' and last_simbol != '"':
res = 'please enter quoted strings'
print(res)
update.message.reply_text(res)
return
str_analyse = str_analyse.strip('"')
str_analyse = str_analyse.strip()
str_analyse = re.sub(' +', ',', str_analyse)
str_analyse_list = str_analyse.split(' ')
word_count = len(str_analyse_list)
res = 'number of words: ' + str(word_count)
print(res)
update.message.reply_text(res)
def main():
mybot = Updater(bot_constants.bot_key, request_kwargs=PROXY)
dp = mybot.dispatcher
dp.add_handler(CommandHandler("start", greet_user))
dp.add_handler(MessageHandler(Filters.text, talk_to_me))
#dp.add_handler(CommandHandler("lets_talk", lp2_while.ask_user()))
dp.add_handler(CommandHandler("planet", run_planet_constellation))
dp.add_handler(CommandHandler("wordcount", run_wordcount))
mybot.start_polling() #запрос к телеге
mybot.idle()
# if __name__ == '__main__':
main()
|
#!/usr/bin/python
# This is a sentence rectangle
# Author: Eason
sentence = raw_input("Input your sentences:")
screen_width = 120
text_width = len(sentence)
box_width = text_width + 6
left_margin = (screen_width - box_width) // 2
print
print ' ' * left_margin + '+' + '-' * (box_width - 6) + '+'
print ' ' * left_margin + '|' + ' ' * text_width + '|'
print ' ' * left_margin + '|' + sentence + '|'
print ' ' * left_margin + '|' + ' ' * text_width + '|'
print ' ' * left_margin + '+' + '-' * (box_width - 6) + '+'
print
|
import requests
import pprint
pp = pprint.PrettyPrinter()
class Token:
def __init__(self):
self.token = None
self.expires_in = None
self.active = False
self.code = None
self.access_token = None
self.refresh_token = None
def get_token(self):
return self.token
def get_expiration(self):
return self.expires_in
def set_active(self):
self.active = True
def set_disabled(self):
self.active = False
def get_state_str(self):
if self.active:
return "active"
else:
return "inactive"
def get_state(self):
return self.active
def __str__(self):
return "Token: %s, expires in: %s, currently %s" % (str(self.token), str(self.expires_in), self.get_state_str())
def got_code(self, code):
self.code = code
def got_access_token(self, access_token):
self.access_token = access_token
def set_refresh_token(self, refresh_token):
self.refresh_token = refresh_token
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import web
import sys
import kenlm
import json
if len(sys.argv) < 3:
print >>sys.stderr, "Usage: server.py port lm-path"
lm_path = sys.argv[2]
sys.stderr.write("Loading language model from %s..." % lm_path)
lm = kenlm.LanguageModel(lm_path)
sys.stderr.write("Done.\n")
urls = ('/score', 'score')
app = web.application(urls, globals())
class score:
def get_scores(self, queries):
return [lm.score(q) for q in queries]
def GET(self):
i = web.input(_unicode=False)
queries = [q.strip() for q in i.q.split('|')]
print >>sys.stderr, "queries:\n%s" % str('\n'.join(queries))
return '\n'.join('%0.4f' % s for s in self.get_scores(queries))
def POST(self):
return self.GET()
if __name__ == '__main__':
app.run()
|
from poyonga.client import Groonga
from poyonga.result import GroongaResult
__version__ = '0.1.4'
|
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import cv2
import os
# Load model that was previously created
model = tf.keras.models.load_model('digits_detect.model')
# Runs model prediction for each image file in the directory
for num in os.listdir():
if ".png" in num:
# Uses OpenCV and numpy to read image and invert it to make it black on white
img = cv2.imread(num)[:,:,0]
img = np.invert(np.array([img]))
# Runs model prediction and prints output
prediction = model.predict(img)
# print(f'You probably gave me a {np.argmax(prediction)}')
plt.text(0.5,-2,f'Computer says: You probably gave me a {np.argmax(prediction)}',size=12)
plt.imshow(img[0], cmap=plt.cm.binary)
plt.show() |
import FWCore.ParameterSet.Config as cms
source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_100_1_PiG.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_101_1_LBd.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_102_1_rQ1.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_103_1_5q7.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_104_1_n4n.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_105_1_STU.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_106_1_vdC.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_107_1_rLu.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_108_1_tK4.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_109_1_kE6.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_10_1_Enf.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_110_1_MA8.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_111_1_rCt.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_112_1_GQd.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_113_1_rk6.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_114_1_Kuq.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_115_1_ygW.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_116_1_Kbe.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_117_1_CcD.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_118_1_AKI.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_119_1_Ae2.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_11_1_SUE.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_120_1_lOQ.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_121_1_l6Q.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_122_1_AAV.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_123_1_QFl.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_124_1_pZL.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_125_1_qy6.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_126_1_BZQ.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_127_1_FJZ.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_128_1_HcJ.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_129_1_1ZX.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_12_1_CIn.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_130_1_Hvf.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_131_1_Gqb.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_132_1_25F.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_133_1_Xoy.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_134_1_kl4.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_135_1_5U0.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_136_1_025.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_137_1_1wQ.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_138_1_C3C.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_139_1_6Lu.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_13_1_UZm.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_140_1_Afo.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_141_1_DE4.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_142_1_EdV.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_143_1_lIM.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_144_1_PII.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_145_1_KAk.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_146_1_Vd2.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_147_1_64A.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_148_1_VCt.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_149_1_UuG.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_14_1_cnI.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_150_1_ViJ.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_151_1_A6Y.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_152_1_uz3.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_153_1_PFe.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_154_1_RYz.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_155_1_Pzo.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_156_1_Jry.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_157_1_qYm.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_158_1_Em5.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_159_1_cin.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_15_1_sgp.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_160_1_ch3.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_161_1_cUh.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_162_1_sgI.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_163_1_5L4.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_164_1_ikU.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_165_1_RWM.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_166_1_wpQ.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_167_1_wNn.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_168_1_Lio.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_169_1_h7M.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_16_1_IjB.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_170_1_pfV.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_171_1_ex0.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_172_1_64H.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_173_1_Byl.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_174_1_lF4.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_175_1_3gp.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_176_1_Y1J.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_177_1_Ivt.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_178_1_EcR.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_179_1_Jb0.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_17_1_znS.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_180_1_B0E.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_181_1_eT7.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_182_1_P5I.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_183_1_HU8.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_184_1_h2J.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_185_1_NyL.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_186_1_QyU.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_187_1_pyi.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_188_1_d39.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_189_1_a4a.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_18_1_ziZ.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_190_1_uJv.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_191_1_JTS.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_192_1_pwK.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_194_1_dXq.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_195_1_YQ0.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_196_1_vp0.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_197_1_Led.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_198_1_pAW.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_199_1_LTo.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_19_1_Sqm.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_1_1_B5Q.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_200_1_T8g.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_20_1_PKj.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_21_1_Xqm.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_22_1_hVc.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_23_1_fq5.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_24_1_VIz.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_25_1_ne3.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_26_1_wfs.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_27_1_SIK.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_28_1_ba1.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_29_1_ZcU.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_2_1_Pun.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_30_1_AQC.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_31_1_eTT.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_32_1_dVw.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_33_1_TFR.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_34_1_b4G.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_35_1_d5U.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_36_1_IW1.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_37_1_SbK.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_38_1_udf.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_39_1_HR8.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_3_1_9y2.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_40_1_zCr.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_41_1_oiz.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_42_1_BSr.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_43_1_87r.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_44_1_2Lq.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_45_1_ZkE.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_46_1_Ukq.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_47_1_HTr.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_48_1_cfg.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_49_1_pIj.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_4_1_Uv9.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_50_1_l7L.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_51_1_Xrl.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_52_1_Fw1.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_53_1_cFl.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_54_1_JII.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_55_1_LM7.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_56_1_6gJ.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_57_1_vz5.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_58_1_b17.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_59_1_WDB.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_5_1_58V.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_60_1_VDN.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_61_1_6HE.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_62_1_SbM.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_63_1_XDV.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_64_1_Vsm.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_65_1_VFI.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_66_1_qdw.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_67_1_soe.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_68_1_Qcy.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_69_1_hUr.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_6_1_Llo.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_70_1_o4N.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_71_1_xkT.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_72_1_ukj.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_73_1_h7z.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_74_1_9wv.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_75_1_2re.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_76_1_AxE.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_77_1_l25.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_78_1_08D.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_79_1_x3i.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_7_1_Oq1.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_80_1_j0e.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_81_1_U9Y.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_82_1_Nxa.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_83_1_3G5.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_84_1_egz.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_85_1_fb4.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_86_1_LUl.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_87_1_bIG.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_88_1_cLn.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_89_1_zKD.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_8_1_Loc.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_90_1_ykE.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_91_1_mis.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_92_1_4xw.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_93_1_BQx.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_94_1_ha1.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_95_1_JgD.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_96_1_CKu.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_97_1_sdC.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_98_1_WIJ.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_99_1_MzX.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi225R2_HISTATS/outfile14TeVSKIM_9_1_ufO.root',
)
)
|
user = {
'first_name': str,
'last_name': str,
'username': str,
'password': str
}
login_user = {
'username': str,
'password': str
}
|
import logging
import os
import subprocess
from src.definitions import INPUT_APK_DIR, DECODED_APK_DIR
logger = logging.getLogger(__name__)
DIR = os.path.dirname(os.path.abspath(__file__))
def disassemble_apk(apk_name):
input_apk = INPUT_APK_DIR + apk_name + ".apk"
output_path = DECODED_APK_DIR + apk_name
cmd = DIR + "/apktool d -f \"" + input_apk + "\" -o \"" + output_path + "\""
logger.debug(cmd)
subprocess.check_call(cmd, shell=True)
return output_path
|
from django import forms
from doctor import constants
from doctor.models import AvailableTime
class ScheduleAppointmentForm(forms.Form):
"""
Form for schedule appointment
"""
start_date = forms.DateInput(label='Start Date', format=('%d-%m-%Y'))
no_of_days = forms.IntegerField(label="Number of Days", initial=constants.MAX_NUMBER_OF_DAYS,
min_value=constants.MIN_NUMBER_OF_DAYS,
max_value=constants.MAX_NUMBER_OF_DAYS)
start_time = forms.ModelChoiceField(label='Start Time',
queryset=AvailableTime.get_timelist(),
empty_label='Select Time')
am_pm = forms.ChoiceField(label=False, choices=constants.AM_PM_CHOICE)
duration = forms.IntegerField(label="Duration In hours", requied=False,
initial=constants.MIN_DURATION,
min_value=constants.MIN_DURATION,
max_value=constants.MAX_DURATION)
|
print("Enter your name")
n=raw_input()# Greetings
print("Hello "+ n)
|
from __future__ import print_function
from twisted.internet.defer import inlineCallbacks
from autobahn import wamp
from autobahn.twisted.wamp import ApplicationSession
class ControllerBackend(ApplicationSession):
def __init__(self, config):
ApplicationSession.__init__(self, config)
@inlineCallbacks
def onJoin(self, details):
res = yield self.register(self)
print("ControllerBackend: {} procedures registered!".format(len(res)))
|
from agents.common import PLAYER1, PLAYER2
def test_auto_rematch():
from agents.agent_mlp.mlp_training.auto_rematch import auto_rematch
from agents.agent_random import generate_move as random_move
import numpy as np
n_matches = 100
boards, moves, a_wins = auto_rematch(random_move, random_move, n_matches=n_matches)
# two games per match and at least four moves to win
assert len(moves) == len(boards) >= 4 * 2 * n_matches
# every single value has to be -1, 0 or 1
assert np.all((np.array(boards) == -1) + (np.array(boards) == 0) + (np.array(boards) == 1))
assert type(boards[0]) == np.ndarray
assert type(boards[0][0]) == np.int8
assert type(moves[0]) == np.int8
assert a_wins[PLAYER1] + a_wins[PLAYER2] == 2 * n_matches
# test second agent win ratio option
sa_ratio = 0.3
boards, moves, a_wins = auto_rematch(random_move, random_move, n_matches=n_matches, sa_ratio=sa_ratio)
assert a_wins[PLAYER1] + a_wins[PLAYER2] == 2 * n_matches
assert a_wins[PLAYER1] == 2 * n_matches * (1-sa_ratio)
assert a_wins[PLAYER2] == 2 * n_matches * sa_ratio
# further type checks
assert type(boards) == list and type(moves) == list
assert len(boards) != 0 and len(moves) != 0
assert len(boards) == len(moves)
for board in boards:
for elem in board:
assert elem in [-1, 1, 0]
|
# Copyright 2016 Husky Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyhusky.frontend.library.linear_regression_receiver import LinearRegressionModelReceiver
from pyhusky.frontend.library.logistic_regression_receiver import LogisticRegressionModelReceiver
from pyhusky.frontend.library.word_receiver import WordReceiver
from pyhusky.frontend.library.graph_receiver import GraphReceiver
def register(receiver_map):
LinearRegressionModelReceiver.register(receiver_map)
LogisticRegressionModelReceiver.register(receiver_map)
WordReceiver.register(receiver_map)
GraphReceiver.register(receiver_map)
|
# protest.py
# by aaron montoya-moraga
# march 2017
# to distribute, on terminal do
# python setup.py sdist
# from distutils.core import setup
from setuptools import *
from codecs import open
from os import path
# taken from https://tom-christie.github.io/articles/pypi/
here = path.abspath(path.dirname(__file__))
# taken from https://tom-christie.github.io/articles/pypi/
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='protest',
version='0.5.11',
url='https://github.com/montoyamoraga/protestpy',
author='aaron montoya-moraga',
description='automatic generation of protesting material',
long_description=long_description,
license='MIT',
packages= find_packages(exclude=['contrib', 'docs', 'tests*']),
install_requires=['Pillow', 'videogrep', 'selenium<3.0.0', 'youtube_dl', 'chromedriver'],
package_data={'protest': ['*.ttf']}
)
|
__all__ = ['UserSerializer', 'EditUserSerializer', 'GroupSerializer']
from django.contrib.auth.models import Group
from extuser.models import ExtUser
from rest_framework import serializers
from helpers import roles
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = ExtUser
fields = ('first_name', 'last_name', 'email',
'date_of_birth', 'age', 'location',
'desired_salary', 'register_date',
'last_change', 'role', 'is_active',
'other', 'password')
def create(self, validated_data):
ModelClass = self.Meta.model
try:
role = validated_data.get('role')
if role == roles.ROLE_ADMIN:
instance = ModelClass.objects.create_superuser(email=validated_data.get('email'),
date_of_birth=validated_data.get('date_of_birth'),
location=validated_data.get('location'),
first_name=validated_data.get('first_name'),
password=validated_data.get('password'))
else:
instance = ModelClass.objects.create_user(email=validated_data.get('email'),
date_of_birth=validated_data.get('date_of_birth'),
location=validated_data.get('location'),
first_name=validated_data.get('first_name'),
password=validated_data.get('password'),
last_name=validated_data.get('last_name'),
age=validated_data.get('age'),
desired_salary=validated_data.get('desired_salary'),
other=validated_data.get('other', ""),
role=validated_data.get('role', roles.ROLE_USER))
except TypeError as exc:
msg = (
'Got a `TypeError` when calling `%s.objects.create_user()`. '
'This may be because you have a writable field on the '
'serializer class that is not a valid argument to '
'`%s.objects.create()`. You may need to make the field '
'read-only, or override the %s.create() method to handle '
'this correctly.\nOriginal exception text was: %s.' %
(
ModelClass.__name__,
ModelClass.__name__,
self.__class__.__name__,
exc
)
)
raise TypeError(msg)
return instance
class GroupSerializer(serializers.ModelSerializer):
class Meta:
model = Group
fields = ('url', 'name',)
class EditUserSerializer(serializers.Serializer):
first_name = serializers.CharField(max_length=40, required=False)
last_name = serializers.CharField(max_length=40, required=False)
email = serializers.EmailField(max_length=40, required=False)
date_of_birth = serializers.DateField(required=False)
age = serializers.IntegerField(min_value=1, required=False)
desired_salary = serializers.IntegerField(min_value=0, required=False)
location = serializers.CharField(max_length=40, required=False)
other = serializers.TimeField(required=False)
def update(self, instance, validated_data):
instance.first_name = validated_data.get('first_name', instance.first_name)
instance.last_name = validated_data.get('last_name', instance.last_name)
instance.email = validated_data.get('email', instance.email)
instance.date_of_birth = validated_data.get('date_of_birth', instance.date_of_birth)
instance.age = validated_data.get('age', instance.age)
instance.desired_salary = validated_data.get('desired_salary', instance.desired_salary)
instance.location = validated_data.get('location', instance.location)
instance.other = validated_data.get('other', instance.other)
instance.save()
return instance
|
string1 = str(input('Please enter a first string: '))
string2 = str(input('Please enter a second string: '))
#First case, the first string is longer.
if len(string1) > len(string2):
print('The first string, {0}, is the longest of the two'.format(string1))
#Second case, the second string is longer.
elif len(string2) > len(string1):
print('The second string, {0}, is the longest of the two'.format(string2))
#Third and last case, the first string is as long as the second one.
else:
print('The two string are of the same size, first string being {0}, and second string being {1}'.format(string1, string2)) |
#Número 84
i = 0
nome_peso = list()
lista = list()
while(i == 0):
del nome_peso[:]
nome_peso.append(input('Digite o nome:'))
nome_peso.append(int(input('Digite o peso:')))
lista.append(nome_peso[:])
i = int(input('Digite 0 para adicionar mais pessoas!'))
total = len(lista)
print(f'Total de pessoas cadastradas: {total}')
soma = 0
for n in range(0, len(lista)):
soma = soma + lista[n][1]
media = soma / len(lista)
print(f'A média de peso é {media}, logo...')
pesada = list()
for n in range(0, len(lista)):
if lista[n][1] > media:
pesada.append(lista[n][0][:])
print(f'As pessoas mais pesadas são: {pesada}')
leve = list()
for n in range(0, len(lista)):
if lista[n][1] < media:
leve.append(lista[n][0][:])
print(f'As pessoas mais leves são: {leve}')
# Número 85
num = list()
for n in range(0, 7):
x = int(input('Digite um valor:'))
if x % 2 == 0:
num.insert(0, x)
else:
num.insert(6, x)
print(f'Lista preenchida: {num}')
num.sort()
print(f'Lista ordenada: {num}')
# Número 86
matriz = list()
num = list()
for n in range (0,3):
del num[:]
num.append(int(input('Digite um valor [0]:')))
num.append(int(input('Digite um valor [1]:')))
num.append(int(input('Digite um valor [2]:')))
matriz.append(num[:])
for n in range(0, 3):
print(matriz[n][0], matriz[n][1], matriz[n][2])
# Número 87
# a) soma dos pares
soma = 0
for n in range(0, 3):
if matriz[n][0] % 2 == 0:
soma = soma + matriz[n][0]
if matriz[n][1] % 2 == 0:
soma = soma + matriz[n][1]
if matriz[n][2] % 2 == 0:
soma = soma + matriz[n][2]
print(f'Soma dos pares: {soma}')
# b) Soma de todos os valores da terceira coluna
soma = 0
for n in range(0, 3):
soma = soma + matriz[n][2]
print(f'Soma dos valores da terceira coluna: {soma}')
# c) O maior valor da segunda linha
maior = 0
for n in range(0, 3):
if matriz[1][n] > maior:
maior = matriz[1][n]
print(f'O maior valor da segunda linha: {maior}')
# Número 88
import random
x = int(input('Deseja gerar quantos jogos?'))
jogo = list()
total = list()
for n in range(0, x):
del jogo[:]
for k in range(0, 6):
y = random.randint(1, 60)
jogo.insert(k, y)
total.append(jogo[:])
print(f'Possibilidades: {total}')
# Número 89
flag = 0
nome_nota = list()
turma = list()
cont = 0
while(flag == 0):
del nome_nota[:]
nome_nota.append(input('Insira o nome do aluno:'))
nome_nota.append(int(input('Insira a nota AV1:')))
nome_nota.append(int(input('Insira a nota AV2:')))
turma.append(nome_nota[:])
cont = cont + 1
flag = int(input('Digite 0 para inserir mais alunos.'))
boletim = list()
for n in range(0, cont):
media = (turma[n][1] + turma[n][2]) / 2
boletim.insert(n, media)
print(f'Média dos alunos: {boletim}')
n = int(input('Digite o número do aluno para exibir suas notas:'))
print(f'Aluno {turma[n][0]} -> AV1: {turma[n][1]}, AV2: {turma[n][2]}')
|
#!/usr/bin/env python
#-*-coding:utf-8-*-
'''
The prime 41, can be written as the sum of six consecutive primes:
41 = 2 + 3 + 5 + 7 + 11 + 13
This is the longest sum of consecutive primes that adds to a prime below one-hundred.
The longest sum of consecutive primes below one-thousand that adds to a prime,
contains 21 terms, and is equal to 953.
Which prime, below one-million, can be written as the sum of the most consecutive primes?
'''
import math
import timeit
primes = [2, 3, 5, 7]
def is_prime(n):
for p in primes:
if n % p == 0:
return False
for i in range(max(primes), int(math.sqrt(n))+1):
if n % i == 0:
return False
return True
def calc(n):
for i in range(2, n/10):
if is_prime(i):
primes.append(i)
for i in range(len(primes)/10, 1, -1):
if i % 100 == 0: print i
for j in range(len(primes)-i):
t = sum(primes[j:j+i])
if t < n and is_prime(t):
return t
if __name__ == '__main__':
print calc(1000000)
print timeit.Timer('problem_050.calc()', 'import problem_050').timeit(1)
|
# -*- coding: utf-8 -*-
from http import HTTPStatus as statuses
import flask
from sqlalchemy import desc
from ...result import make_result
from ...utils import api_location
from ...resource import ApiResource
from ...utils import paginated_query
from ....database import schema as db
VERSION = 1
resource = ApiResource(__name__, version=VERSION)
@resource.route('/jobs', methods=['GET'])
def get_jobs():
"""
Get list of jobs.
METHOD: GET
PATH: /api/v1/jobs
GET params
* from: for pagination. (integer)
* to: for pagination. (integer)
"""
query = db.Job.query.filter_by(is_active=True)
query = query.order_by(desc(db.Job.created))
query = paginated_query(query, flask.request)
return make_result(
query.all(),
total_count=query.total_count,
current_count=query.current_count,
), statuses.OK
@resource.route('/jobs/<string:job_name>', methods=['POST'], schema='job.post.json')
def create_job(job_name):
"""
Create job.
METHOD: POST
PATH: /api/v1/jobs
JSON params
* description: description of job (string)
"""
json = flask.request.get_json()
data = {
'is_active': True,
'name': job_name,
'description': json.get('description', ''),
}
job = db.Job.create(**data)
return make_result(
job,
location=api_location(
'/jobs/{}',
job_name,
version=VERSION,
),
), statuses.CREATED
@resource.route('/jobs/<string:job_name>', methods=['GET'])
def get_job_by_name(job_name):
"""
Get only one job by name.
METHOD: GET
PATH: /api/v1/jobs/<string:job_name>
"""
job = db.Job.get_by_name(job_name)
if job:
return make_result(job), statuses.OK
@resource.route('/jobs/<string:job_name>', methods=['DELETE'])
def delete_job_by_name(job_name):
"""
Delete job by name.
METHOD: DELETE
PATH: /api/v1/jobs/<string:job_name>
"""
job = db.Job.get_by_name(job_name)
if job:
job.update(is_active=False)
return make_result(job), statuses.OK
|
import json
import requests
import sys
query = input("Enter an IP or Domain name: ")
endpoint = f"http://ip-api.com/json/{query}"
if query == '': #Conditia daca valoarea de la input lipseste
print("Value error")
sys.exit() #Terminates the program immediatly
response = requests.get( endpoint )
data = json.loads( response.text )
status = data['status']
if status == 'fail':
print("Value input non-exist\'s")
sys.exit()
country = data['country']
city = data['city']
timezone = data['timezone']
lat = data['lat']
lon = data['lon']
ip = data['query']
value = -1
while value != 0:
print( f"What information do you want to know?\n 1.Country and city > \n 2.Timezone > \n 3.Coordinates > \n 4.IP of domanin Name > \n 5.Exit ! ")
value = int(input(""))
if value == 1:
print(f"The domain is located in {country} / {city}\n")
if value == 2:
print(f"The timezone is {timezone}\n")
if value == 3:
print(f"The coordinates > Lat: {lat} and Lon: {lon}\n")
if value == 4:
print(f"The IP of domain is {ip}\n")
if value == 5:
sys.exit()
|
"""
TFeat Implementation
Author: Alex Butenko
"""
import sys
import os
import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
import cv2
from features.DetectorDescriptorTemplate import DetectorAndDescriptor
import features.feature_utils as utils
dirname = os.path.dirname(__file__)
class tfeat(DetectorAndDescriptor):
def __init__(self, pretrained_model='tfeat_misc/tfeat-liberty.params'):
super(
tfeat,
self).__init__(
name='tfeat',
is_detector=False,
is_descriptor=True,
is_both=False,
patch_input=True,
can_batch=True)
self.model = TNet()
pretrained_model = os.path.join(dirname, pretrained_model)
self.model.load_state_dict(torch.load(pretrained_model, map_location='cpu'))
self.model.eval()
def extract_descriptors_from_patch_batch(self, batch):
nb_patches = batch.shape[0]
batch_resized = list()
for i, patch in enumerate(batch):
batch_resized.append(cv2.resize(batch[i], (32, 32), interpolation=cv2.INTER_AREA))
batch_resized = torch.tensor(batch_resized)
batch_resized = batch_resized.view(nb_patches,1,32,32)
desc = self.model(batch_resized.float())
return desc.detach().numpy()
def extract_descriptors_from_patch(self, patch):
patch = cv2.resize(patch, (32, 32), interpolation=cv2.INTER_AREA)
patch = torch.tensor(patch)
patch = patch.view(1,1,32,32)
desc = self.model(patch.float())
return desc.detach().numpy()
def extract_descriptor(self, image, feature):
gray_image = utils.all_to_gray(image)
patches = []
for f in feature:
patch = utils.extract_patch(gray_image, f, patch_sz=32)
patches.append(patch)
patches = np.array(patches)
desc = self.extract_descriptors_from_patch_batch(patches)
return desc
class TNet(nn.Module):
"""TFeat model definition
"""
def __init__(self, pretrained_model=None):
super(TNet, self).__init__()
self.features = nn.Sequential(
nn.InstanceNorm2d(1, affine=False),
nn.Conv2d(1, 32, kernel_size=7),
nn.Tanh(),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(32, 64, kernel_size=6),
nn.Tanh()
)
self.descr = nn.Sequential(
nn.Linear(64 * 8 * 8, 128),
nn.Tanh()
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.descr(x)
return x
|
"""
Primitive types and type conversions.
"""
import math
from itertools import izip_longest
from .exceptions import ESTypeError, ESSyntaxError
from .literals import NumberLiteralParser
NaN = float('nan')
inf = float('inf')
sign = lambda x: math.copysign(1, x)
MASK16 = (2 ** 16) - 1
MASK32 = (2 ** 32) - 1
class Type(object):
"""
Base type class.
"""
pass
class PrimitiveType(Type):
"""
Base for all primitive object types.
"""
pass
class UndefinedType(PrimitiveType):
"""
The ``undefined`` type.
"""
pass
# The single instance of UndefinedType
Undefined = UndefinedType()
class NullType(PrimitiveType):
"""
The ``null`` type.
"""
pass
# The single instance of NullType
Null = NullType()
class BooleanType(PrimitiveType):
"""
The ``Boolean`` primitive type.
"""
pass
class NumberType(PrimitiveType):
"""
The ``Number`` primitive type.
"""
pass
class StringType(PrimitiveType):
"""
The ``String`` primitive type.
"""
pass
class ObjectType(Type):
"""
The base for all non-primitive objects.
"""
pass
def get_primitive_type(obj):
"""
Returns the primitive type class of the given object.
"""
if obj is Undefined or obj is None:
return Undefined
elif obj is Null:
return Null
elif isinstance(obj, (bool, BooleanType)):
return BooleanType
elif isinstance(obj, (float, int, long, NumberType)):
return NumberType
elif isinstance(obj, (basestring, StringType)):
return StringType
return ObjectType
def is_primitive(obj):
"""
Does the given object have a non-ObjectType primitive type class?
"""
return get_primitive_type(obj) is not ObjectType
def check_object_coercible(obj):
"""
9.10
"""
primitive_type = get_primitive_type(obj)
if primitive_type is Undefined:
raise ESTypeError('Cannot convert undefined to object')
elif primitive_type is Null:
raise ESTypeError('Cannot convert null to object')
class Conversions(object):
"""
Interpreter mixins for performing type conversions. These rely on having
access to constructors, so they must have the interpreter available.
9.0
"""
def to_primitive(self, value, preferred_type=None):
"""
9.1
"""
if is_primitive(value):
return getattr(value, 'primitive_value', value)
obj = self.to_object(value)
return obj.default_value(hint=preferred_type)
def to_boolean(self, value):
"""
9.2
"""
primitive_type = get_primitive_type(value)
if primitive_type is ObjectType:
return True
if primitive_type is Undefined or primitive_type is Null:
return False
value = self.to_primitive(value)
if primitive_type is NumberType:
if math.isnan(value):
return False
return bool(value)
elif primitive_type is BooleanType:
return value
elif primitive_type is StringType:
return len(value) > 0
def to_number(self, value):
"""
9.3
"""
primitive_type = get_primitive_type(value)
if primitive_type is Undefined:
return NaN
elif primitive_type is Null:
return +0
elif primitive_type is ObjectType:
primitive_value = self.to_primitive(value, preferred_type='Number')
return self.to_number(primitive_value)
primitive_value = self.to_primitive(value)
if primitive_type is NumberType:
return primitive_value
elif primitive_type is BooleanType:
return int(primitive_value)
elif primitive_type is StringType:
try:
value = primitive_value.strip()
if not value:
return 0
sign = 1
has_sign = False
if value[0] == u'-':
sign = -1
value = value[1:]
has_sign = True
elif value[0] == u'+':
value = value[1:]
has_sign = True
if value == u'Infinity':
return sign * inf
if has_sign:
return sign * NumberLiteralParser(value).parse_decimal_literal()
strict = self.in_strict_code()
return NumberLiteralParser.parse_string(value, allow_octal=not strict)
except ESSyntaxError:
return NaN
def to_integer(self, value):
"""
9.4
"""
number = self.to_number(value)
if math.isnan(number):
return 0
if number == 0 or number == +inf or number == -inf:
return number
return int(sign(number) * math.floor(abs(number)))
def to_int32(self, value):
"""
9.5
"""
value = self.to_integer(value)
if value == inf or value == -inf or math.isnan(value) or value == 0:
return 0
if value & (1 << (32 - 1)):
value = value | ~MASK32
else:
value = value & MASK32
return value
def to_uint32(self, value):
"""
9.6
"""
value = self.to_integer(value)
if value == inf or value == -inf or math.isnan(value) or value == 0:
return 0
return MASK32 & value
def to_uint16(self, value):
"""
9.7
"""
value = self.to_integer(value)
if value == inf or value == -inf or math.isnan(value) or value == 0:
return 0
return MASK16 & value
def to_string(self, value):
"""
9.8
"""
primitive_type = get_primitive_type(value)
if primitive_type is Undefined:
return "undefined"
elif primitive_type is Null:
return "null"
elif primitive_type is BooleanType:
value = self.to_boolean(value)
return value and "true" or "false"
elif primitive_type is NumberType:
value = self.to_primitive(value)
if math.isnan(value):
return "NaN"
elif value == inf:
return "Infinity"
elif value < 0:
return "-" + self.to_string(abs(value))
# FIXME
return unicode(value)
elif primitive_type is StringType:
return self.to_primitive(value)
elif primitive_type is ObjectType:
primitive_value = self.to_primitive(value, preferred_type='String')
return self.to_string(primitive_value)
def to_object(self, value):
"""
9.9
"""
check_object_coercible(value)
primitive_type = get_primitive_type(value)
if primitive_type is ObjectType:
return value
# We've got a primitive, so make an object
primitive_value = self.to_primitive(value)
if primitive_type is BooleanType:
cons = self.BooleanConstructor.construct
elif primitive_type is NumberType:
cons = self.NumberConstructor.construct
elif primitive_type is StringType:
cons = self.StringConstructor.construct
return cons([primitive_value])
def same_value(self, x, y):
"""
9.12
"""
x_type = get_primitive_type(x)
y_type = get_primitive_type(y)
if x_type != y_type:
return False
if x_type is Undefined:
return True
if x_type is Null:
return True
if x_type is NumberType:
x = self.to_primitive(x)
y = self.to_number(y)
if math.isnan(x) and math.isnan(y):
return True
return x == y
elif x_type is StringType:
x = self.to_primitive(x)
y = self.to_string(y)
return x == y
elif x_type is BooleanType:
x = self.to_primitive(x)
y = self.to_boolean(y)
return x == y
elif x_type is ObjectType and y_type is ObjectType:
return id(x) == id(y)
return False
def get_arguments(arguments, count=1):
"""
Return the argument or ``Undefined``. If ``count`` is > 1, return
an array of the arguments or ``Undefined`` up to ``count`` elements.
"""
to_return = arguments[:count]
undefined_count = max(count - len(arguments), 0)
to_return.extend([Undefined for i in range(undefined_count)])
if count == 1:
to_return = to_return[0]
return to_return
|
from django.contrib import admin
from .models import Beach, SelectedBeach
admin.site.register(Beach)
admin.site.register(SelectedBeach)
|
"""
תשע"ב מועד א' שאלה 5
"""
import numpy as np
from numpy import random as rn
import scipy.stats as ss
#Black and Scholes
def d1(S0, K, r, sigma, T):
return (np.log(S0/K) + (r + sigma**2 / 2) * T)/(sigma * np.sqrt(T))
def d2(S0, K, r, sigma, T):
return (np.log(S0 / K) + (r - sigma**2 / 2) * T) / (sigma * np.sqrt(T))
def blsprice(type,S0, K, r, sigma, T):
if type=="C":
return S0 * ss.norm.cdf(d1(S0, K, r, sigma, T)) - K * np.exp(-r * T) * ss.norm.cdf(d2(S0, K, r, sigma, T))
else:
return K * np.exp(-r * T) * ss.norm.cdf(-d2(S0, K, r, sigma, T)) - S0 * ss.norm.cdf(-d1(S0, K, r, sigma, T))
T=1
n=100
h=T/n
a=0.03
b=0.09
r=0.1
rho=0.1
S0=0.9
v0=0.04
k=1
M=1000
dw1=rn.randn(M,n)
dw2=rn.randn(M,n)
S1=S0*np.ones((M,n+1))
S2=S0*np.ones((M,n+1)) #משתנה בקרה
v=v0*np.ones((M,n+1))
#חישוב ה C
for i in range(0,n):
v[:,i+1]=v[:,i]+a*(v0-v[:,i])*h+b*np.sqrt(h*np.abs(v[:,i]))*(rho*dw1[:,i]+np.sqrt(1-rho**2)*dw2[:,i])
S1[:,i+1]=S1[:,i]*np.exp((r-v[:,i]/2)*h+np.sqrt(np.abs(v[:,i])*h)*dw1[:,i])
S2[:,i+1]=S2[:,i]*np.exp((r-v0/2)*h+np.sqrt(v0*h)*dw1[:,i])
x=(S1[:,-1]-k)*(S1[:,-1]>k)*np.exp(-r*T) #האופציה המבוקשת
y=np.exp(-r*T)*(S2[:,-1]>k)*(S2[:,-1]-k) # אופציה רגילה קרובה עם משתנה בקרה
q=np.cov(x,y)
C=-q[0,1]/q[1,1]
M=10000 #סימולציה אמיתית
V_C=blsprice("C",S0, k, r, np.sqrt(v0), T)
dw1=rn.randn(M,n)
dw2=rn.randn(M,n)
S1=S0*np.ones((M,n+1))
S2=S0*np.ones((M,n+1)) #משתנה בקרה
v=v0*np.ones((M,n+1))
#חישוב ה C
for i in range(0,n):
v[:,i+1]=v[:,i]+a*(v0-v[:,i])*h+b*np.sqrt(h*np.abs(v[:,i]))*(rho*dw1[:,i]+np.sqrt(1-rho**2)*dw2[:,i])
S1[:,i+1]=S1[:,i]*np.exp((r-v[:,i]/2)*h+np.sqrt(np.abs(v[:,i])*h)*dw1[:,i])
S2[:,i+1]=S2[:,i]*np.exp((r-v0/2)*h+np.sqrt(v0*h)*dw1[:,i])
x=(S1[:,-1]-k)*(S1[:,-1]>k)*np.exp(-r*T) #האופציה המבוקשת
y=np.exp(-r*T)*(S2[:,-1]>k)*(S2[:,-1]-k) # אופציה רגילה קרובה עם משתנה בקרה
corrected=x+C*(y-V_C) #תיקון לפי משתנה בקרה
V=[np.mean(corrected),np.std(corrected)/np.sqrt(M)]
print("V=", V)
|
import datetime
import logging
from typing import Any, Dict, List, Tuple
import flask_restless
import gunicorn.app.base
from dbcat import Catalog
from dbcat.catalog import CatColumn
from dbcat.catalog.db import DbScanner
from dbcat.catalog.models import (
CatSchema,
CatSource,
CatTable,
ColumnLineage,
Job,
JobExecution,
JobExecutionStatus,
)
from flask import Flask
from flask_restful import Api, Resource, reqparse
from data_lineage.parser import extract_lineage, parse, visit_dml_query
class Kedro(Resource):
def __init__(self, catalog: Catalog):
self._catalog = catalog
self._parser = reqparse.RequestParser()
self._parser.add_argument(
"job_ids", action="append", help="List of job ids for a sub graph"
)
def get(self):
nodes = []
edges = []
args = self._parser.parse_args()
column_edges = self._catalog.get_column_lineages(args["job_ids"])
for edge in column_edges:
nodes.append(self._column_info(edge.source))
nodes.append(self._column_info(edge.target))
nodes.append(self._job_info(edge.job_execution.job))
edges.append(
{
"source": "column:{}".format(edge.source_id),
"target": "task:{}".format(edge.job_execution.job_id),
}
)
edges.append(
{
"source": "task:{}".format(edge.job_execution.job_id),
"target": "column:{}".format(edge.target_id),
}
)
return {"nodes": nodes, "edges": edges}
@staticmethod
def _column_info(node: CatColumn):
return {
"id": "column:{}".format(node.id),
"name": ".".join(node.fqdn),
"type": "data",
}
@staticmethod
def _job_info(node: Job):
return {"id": "task:{}".format(node.id), "name": node.name, "type": "task"}
class Scanner(Resource):
def __init__(self, catalog: Catalog):
self._catalog = catalog
self._parser = reqparse.RequestParser()
self._parser.add_argument("id", required=True, help="ID of the resource")
def post(self):
args = self._parser.parse_args()
logging.debug("Args for scanning: {}".format(args))
source = self._catalog.get_source_by_id(int(args["id"]))
DbScanner(self._catalog, source).scan()
return "Scanned {}".format(source.fqdn), 200
class Parser(Resource):
def __init__(self, catalog: Catalog):
self._catalog = catalog
self._parser = reqparse.RequestParser()
self._parser.add_argument("query", required=True, help="Query to parse")
self._parser.add_argument("name", help="Name of the ETL job")
def post(self):
args = self._parser.parse_args()
logging.debug("Parse query: {}".format(args["query"]))
parsed = parse(args["query"], args["name"])
chosen_visitor = visit_dml_query(self._catalog, parsed)
if chosen_visitor is not None:
job_execution = extract_lineage(self._catalog, chosen_visitor, parsed)
return (
{
"data": {
"id": job_execution.id,
"type": "job_executions",
"attributes": {
"job_id": job_execution.job_id,
"started_at": job_execution.started_at.strftime(
"%Y-%m-%d %H:%M:%S"
),
"ended_at": job_execution.ended_at.strftime(
"%Y-%m-%d %H:%M:%S"
),
"status": job_execution.status.name,
},
}
},
200,
)
return {"data": {"error": "Query is not a DML Query"}}, 400
class Server(gunicorn.app.base.BaseApplication):
def __init__(self, app, options=None):
self.options = options or {}
self.application = app
super().__init__()
def load_config(self):
config = {
key: value
for key, value in self.options.items()
if key in self.cfg.settings and value is not None
}
for key, value in config.items():
self.cfg.set(key.lower(), value)
def load(self):
return self.application
def job_execution_serializer(instance: JobExecution, only: List[str]):
return {
"id": instance.id,
"type": "job_executions",
"attributes": {
"job_id": instance.job_id,
"started_at": instance.started_at.strftime("%Y-%m-%d %H:%M:%S"),
"ended_at": instance.ended_at.strftime("%Y-%m-%d %H:%M:%S"),
"status": instance.status.name,
},
}
def job_execution_deserializer(data: Dict["str", Any]):
attributes = data["data"]["attributes"]
logging.debug(attributes)
job_execution = JobExecution()
job_execution.job_id = int(attributes["job_id"])
job_execution.started_at = datetime.datetime.strptime(
attributes["started_at"], "%Y-%m-%d %H:%M:%S"
)
job_execution.ended_at = datetime.datetime.strptime(
attributes["ended_at"], "%Y-%m-%d %H:%M:%S"
)
job_execution.status = (
JobExecutionStatus.SUCCESS
if attributes["status"] == "SUCCESS"
else JobExecutionStatus.SUCCESS
)
logging.debug(job_execution)
logging.debug(job_execution.status == JobExecutionStatus.SUCCESS)
return job_execution
def create_server(
catalog_options: Dict[str, str], options: Dict[str, str], is_production=True
) -> Tuple[Any, Catalog]:
logging.debug(catalog_options)
catalog = Catalog(**catalog_options)
app = Flask(__name__)
# Create CRUD APIs
methods = ["DELETE", "GET", "PATCH", "POST"]
url_prefix = "/api/v1/catalog"
api_manager = flask_restless.APIManager(app, catalog.scoped_session)
api_manager.create_api(
CatSource,
methods=methods,
url_prefix=url_prefix,
additional_attributes=["fqdn"],
)
api_manager.create_api(
CatSchema,
methods=methods,
url_prefix=url_prefix,
additional_attributes=["fqdn"],
)
api_manager.create_api(
CatTable,
methods=methods,
url_prefix=url_prefix,
additional_attributes=["fqdn"],
)
api_manager.create_api(
CatColumn,
methods=methods,
url_prefix=url_prefix,
additional_attributes=["fqdn"],
)
api_manager.create_api(Job, methods=methods, url_prefix=url_prefix)
api_manager.create_api(
JobExecution,
methods=methods,
url_prefix=url_prefix,
serializer=job_execution_serializer,
deserializer=job_execution_deserializer,
)
api_manager.create_api(
ColumnLineage,
methods=methods,
url_prefix=url_prefix,
collection_name="column_lineage",
)
restful_manager = Api(app)
restful_manager.add_resource(
Kedro, "/api/main", resource_class_kwargs={"catalog": catalog}
)
restful_manager.add_resource(
Scanner,
"{}/scanner".format(url_prefix),
resource_class_kwargs={"catalog": catalog},
)
restful_manager.add_resource(
Parser, "/api/v1/parser", resource_class_kwargs={"catalog": catalog}
)
for rule in app.url_map.iter_rules():
rule_methods = ",".join(rule.methods)
logging.debug("{:50s} {:20s} {}".format(rule.endpoint, rule_methods, rule))
if is_production:
return Server(app=app, options=options), catalog
else:
return app, catalog
|
import os
import logging
import pandas as pd
from pathlib import Path
from cropcore.model_data_access import (
insert_model_run,
insert_model_product,
insert_model_predictions,
get_sqlalchemy_session,
)
# relative or non-relative imports, depending on where we run from :-/
if os.getcwd() == os.path.dirname(os.path.realpath(__file__)):
from TestScenarioV1_1 import runScenarios, FILEPATH_WEATHER
from ges.config import config
from ges.ges_utils import (
get_ges_model_id,
get_scenarios,
create_measures_dicts,
)
else:
from .TestScenarioV1_1 import runScenarios, FILEPATH_WEATHER
from .ges.config import config
from .ges.ges_utils import (
get_ges_model_id,
get_scenarios,
create_measures_dicts,
)
path_conf = config(section="paths")
DATA_DIR = Path(path_conf["data_dir"])
cal_conf = config(section="calibration")
MODEL_GES_NAME = cal_conf["model_name"]
SENSOR_RH_16B2_DATABASE_ID = int(cal_conf["sensor_id"])
MEASURE_MEAN_TEMPERATURE = {
"measure_database_id": 1,
"result_index": 0,
"preprocess": "to_celcius",
"result_key": "T_air",
}
MEASURE_SCENARIO_TEMPERATURE = {
"measure_database_id": 9,
"result_index": 1,
"preprocess": "to_celcius",
"result_key": "T_air",
}
MEASURE_UPPER_TEMPERATURE = {
"measure_database_id": 2,
"result_index": 3,
"preprocess": "to_celcius",
"result_key": "T_air",
}
MEASURE_LOWER_TEMPERATURE = {
"measure_database_id": 3,
"result_index": 2,
"preprocess": "to_celcius",
"result_key": "T_air",
}
MEASURE_MEAN_HUMIDITY = {
"measure_database_id": 10,
"result_index": 0,
"preprocess": "to_percent",
"result_key": "RH_air",
}
MEASURE_SCENARIO_HUMIDITY = {
"measure_database_id": 11,
"result_index": 1,
"preprocess": "to_percent",
"result_key": "RH_air",
}
MEASURE_LOWER_HUMIDITY = {
"measure_database_id": 12,
"result_index": 2,
"preprocess": "to_percent",
"result_key": "RH_air",
}
MEASURE_UPPER_HUMIDITY = {
"measure_database_id": 13,
"result_index": 3,
"preprocess": "to_percent",
"result_key": "RH_air",
}
def get_forecast_date(filepath_weather=None):
if not filepath_weather:
filepath_weather = FILEPATH_WEATHER
df_weather = pd.read_csv(
filepath_weather, header=None, names=["Timestamp", "Temperature", "Humidity"]
)
forecast_date = pd.to_datetime(df_weather.tail(1)["Timestamp"].item())
logging.info("Forecast Date: {0}".format(forecast_date))
return forecast_date
def assemble_values(product_id, measure, all_results):
"""
Process the output of TestScenarioV1_1.runModel() to put it into a
form ready to be inserted to the DB.
Parameters
----------
product_id: int, index of the (run x measure) product in the DB.
measure: dict, as produced by ges_utils.create_measures_dicts
all_results: dict, keyed by T_air, RH_air, values are np.array of
dimension (num_timepoints, num_scenarios+2) (where the +2
is because the Business-As-Usual scenario has upper and
lower bounds as well as the mean).
Returns
-------
prediction_parameters: list of tuples, of length num_timepoints, with
each tuple containing (product_id, result, pred_index)
where result is the value of that measure at that time.
"""
def to_percent(humidity_ratio):
return humidity_ratio * 100
def to_celcius(temp_kelvin):
return temp_kelvin - 273.15
result_of_type = all_results[measure["result_key"]]
result_of_measure = []
for result in result_of_type:
this_result = result[measure["result_index"]]
result_of_measure.append(this_result)
result_in_unit = result_of_measure
if measure["preprocess"] == "to_celcius":
result_in_unit = list(map(to_celcius, result_of_measure))
if measure["preprocess"] == "to_percent":
result_in_unit = list(map(to_percent, result_of_measure))
prediction_parameters = []
for prediction_index, result_at_hour in enumerate(result_in_unit):
prediction_parameters.append((product_id, result_at_hour, prediction_index))
return prediction_parameters
def run_pipeline(
scenario_ids=None,
filepath_ach=None,
filepath_ias=None,
filepath_weather=None,
filepath_forecast=None,
data_dir=DATA_DIR,
sensor_id=SENSOR_RH_16B2_DATABASE_ID,
model_name=MODEL_GES_NAME,
session=None,
):
"""
Run all the test scenarios and upload results to DB
"""
if not session:
session = get_sqlalchemy_session()
logging.basicConfig(level=logging.INFO)
forecast_date = get_forecast_date(filepath_weather)
model_id = get_ges_model_id(model_name, session=session)
measures = create_measures_dicts(
scenario_ids=scenario_ids, model_name=model_name, session=session
)
result = runScenarios(
scenario_ids=scenario_ids,
filepath_ach=filepath_ach,
filepath_ias=filepath_ias,
filepath_weather=filepath_weather,
filepath_forecast=filepath_forecast,
session=session,
)
filepath_resultsRH = os.path.join(data_dir, path_conf["filename_resultsrh"])
filepath_resultsT = os.path.join(data_dir, path_conf["filename_resultst"])
df_resultsRH = pd.DataFrame(result["RH_air"])
df_resultsRH.to_csv(filepath_resultsRH, header=False)
df_resultsT = pd.DataFrame(result["T_air"])
df_resultsT.to_csv(filepath_resultsT, header=False)
run_id = insert_model_run(
sensor_id=sensor_id,
model_id=model_id,
time_forecast=forecast_date,
session=session,
)
num_rows_inserted = 0
if run_id is not None:
logging.info("Run inserted, logged as ID: {0}".format(run_id))
for measure in measures:
product_id = insert_model_product(
run_id=run_id,
measure_id=measure["measure_database_id"],
session=session,
)
# Don't try to add values unless we successfully added a run x measure "product".
if not product_id:
continue
value_parameters = assemble_values(
product_id=product_id, measure=measure, all_results=result
)
logging.info(value_parameters)
num_rows_inserted += insert_model_predictions(
value_parameters, session=session
)
logging.info("{0} rows inserted".format(num_rows_inserted))
return num_rows_inserted
def main():
# run with all default parameters
run_pipeline()
if __name__ == "__main__":
main()
|
from forest import RandomForest
from kneighbors import KNeighbors
from linear import LinearModel, RidgeModel
from svr import SVRModel
|
import unittest
from lib.workflow.workflow_runner import WorkflowRunner
from lib.exception.file_format_exception import FileFormatException
from mockito import Mock, verify, when, any, inorder
from mock import mock_open, patch, Mock as mock_Mock
class WorkflowRunnerTest(unittest.TestCase):
def setUp(self):
self.sleep_mocked = mock_Mock()
self.workflow_file_content = ""
self.filesystem = Mock()
self.job_submission = Mock()
self.workflow_file_path = "/path/to/workflow/file"
self.workflow = WorkflowRunner(self.filesystem,
self.job_submission)
when(self.filesystem).list_dir(any()).thenReturn([])
def test_opens_right_file(self):
self.run_workflow()
self.open_mocked.assert_called_with(self.workflow_file_path, 'r')
def test_runs_ls_job(self):
self.workflow_file_content = "JOB A ls ssh://localhost/tmp"
self.run_workflow()
verify(self.filesystem).list_dir("ssh://localhost/tmp")
def test_runs_cat_job(self):
self.workflow_file_content = "JOB B cat ssh://localhost/tmp/file"
self.run_workflow()
verify(self.filesystem).cat(["ssh://localhost/tmp/file"])
def test_handles_two_jobs(self):
self.workflow_file_content = "JOB A ls ssh://localhost/tmp\n\n"
self.workflow_file_content += "\nJOB B cat ssh://localhost/tmp/file"
self.run_workflow()
verify(self.filesystem).list_dir("ssh://localhost/tmp")
verify(self.filesystem).cat(["ssh://localhost/tmp/file"])
def test_parses_jobs_argument(self):
job = "JOB A cat ssh://host/tmp/file1 -o ssh://host/tmp/file2"
self.workflow_file_content = job
self.run_workflow()
verify(self.filesystem).cat_to_file(["ssh://host/tmp/file1"],
"ssh://host/tmp/file2")
def test_runs_cp_job(self):
job = "JOB B copy ssh://host1/src ssh://host2/dst"
self.workflow_file_content = job
self.run_workflow()
verify(self.filesystem).copy(["ssh://host1/src"], "ssh://host2/dst")
def test_runs_rm_job(self):
self.workflow_file_content = "JOB A rm ssh://host/file"
self.run_workflow()
verify(self.filesystem).remove(["ssh://host/file"])
def test_runs_exec_job(self):
self.workflow_file_content = "JOB a exec -r ssh://host ls /tmp"
self.run_workflow()
verify(self.job_submission).submit_job("ls", "/tmp", None,
None, "ssh://host")
def test_raise_exception_invalid_file_format(self):
self.workflow_file_content = "JOB\nPARENT A CHILD A"
try:
self.run_workflow()
self.assertTrue(False)
except FileFormatException:
pass
def test_schedules_job_with_one_child_one_parent(self):
self.workflow_file_content = "JOB A ls ssh://host/dir\n"
self.workflow_file_content += "JOB B rm ssh://host/file\n"
self.workflow_file_content += "PARENT B CHILD A"
self.run_workflow()
inorder.verify(self.filesystem).remove(["ssh://host/file"])
inorder.verify(self.filesystem).list_dir("ssh://host/dir")
def test_schedules_job_with_two_children(self):
self.workflow_file_content = "JOB A ls ssh://host/dir\n"
self.workflow_file_content += "JOB B rm ssh://host/file\n"
self.workflow_file_content += "JOB C ls ssh://host/dir2\n"
self.workflow_file_content += "JOB D cat ssh://host/file2\n"
self.workflow_file_content += "PARENT B D CHILD A C\n"
self.workflow_file_content += "PARENT B CHILD D \n"
self.workflow_file_content += "PARENT A CHILD C\n"
self.run_workflow()
inorder.verify(self.filesystem).remove(["ssh://host/file"])
inorder.verify(self.filesystem).cat(["ssh://host/file2"])
inorder.verify(self.filesystem).list_dir("ssh://host/dir")
inorder.verify(self.filesystem).list_dir("ssh://host/dir2")
def test_retries_run_failed_jobs(self):
when_list = when(self.filesystem).list_dir("ssh://host/dir")
when_list.thenRaise(Exception).thenReturn("")
self.workflow_file_content = "JOB A ls ssh://host/dir\n"
self.workflow_file_content += "JOB B rm ssh://host/file\n"
self.run_workflow()
verify(self.filesystem, times=2).list_dir("ssh://host/dir")
self.sleep_mocked.assert_called_once_with(5)
verify(self.filesystem).remove(["ssh://host/file"])
def test_returns_statistics_about_failed_jobs(self):
when(self.filesystem).remove(["ssh://host/file"]).thenRaise(Exception)
self.workflow_file_content = "JOB A ls ssh://host/dir\n"
self.workflow_file_content += "JOB B rm ssh://host/file\n"
self.workflow_file_content += "JOB C cat ssh://host/file2"
stats = self.run_workflow()
self.assertEqual(stats, ['B', 'C'])
def run_workflow(self):
self.open_mocked = mock_open(read_data=self.workflow_file_content)
with patch('__builtin__.open', self.open_mocked):
with patch('time.sleep', self.sleep_mocked):
return self.workflow.run(self.workflow_file_path)
|
#-*- coding:utf8 -*-
__author__ = 'meixqhi'
import re
import json
from django.core.urlresolvers import reverse
from djangorestframework.views import ModelView
from djangorestframework.response import ErrorResponse
from djangorestframework import status
from shopback.orders.models import Order,Trade
from shopback.trades.models import MergeTrade
from shopback.items.models import Item
from shopback.users.models import User
from shopback.base.views import ModelView,FileUploadView
from shopapp.memorule.models import (TradeRule,
ProductRuleField,
RuleMemo,
ComposeRule,
ComposeItem)
from common.utils import parse_datetime
from auth import apis
CHAR_NUMBER_REGEX = re.compile('^\w+$')
import logging
logger = logging.getLogger('django.request')
def to_memo_string(memo):
s = [memo["post"]]
s.append(memo["addr"])
for product in memo["data"]:
t = [product["pid"]]
for k,v in product["property"].iteritems():
t.append(k + ":" + v)
s.append("|".join(t))
return "\r\n".join(s)
def update_trade_memo(trade_id,trade_memo,user_id):
try:
rule_memo, created = RuleMemo.objects.get_or_create(pk=trade_id)
rule_memo.rule_memo = json.dumps(trade_memo)
rule_memo.is_used = False
rule_memo.save()
except Exception,exc:
return {"success": False, "message":"write memo to backend failed"}
#将备注信息同步淘宝后台
# try:
# ms = to_memo_string(trade_memo)
# response = apis.taobao_trade_memo_update(tid=trade_id,memo=ms,tb_user_id=user_id)
# trade_rep = response['trade_memo_update_response']['trade']
# if trade_rep:
# MergeTrade.objects.filter(tid=trade_rep['tid']).update(modified=parse_datetime(trade_rep['modified']))
# except:
# pass
return {"success": True}
class UpdateTradeMemoView(ModelView):
def get(self, request, *args, **kwargs):
content = request.REQUEST
params = eval(content.get("params"))
trade_id = params.get('tid')
user_id = params.get('sid')
try:
profile = User.objects.get(visitor_id=user_id)
except User.DoesNotExist:
return {"success":False, "message":"no such seller id: "+user_id}
#raise ErrorResponse("the seller id is not record!")
return update_trade_memo(trade_id,params,user_id=profile.visitor_id)
post = get
class ProductRuleFieldsView(ModelView):
def get(self, request, *args, **kwargs):
content = request.REQUEST
out_iids = content.get('out_iids')
out_iid_list = out_iids.split(',')
product_fields = []
for out_iid in out_iid_list:
trade_extras = ProductRuleField.objects.filter(outer_id=out_iid)
trade_fields = [ extra.to_json() for extra in trade_extras]
product_fields.append([out_iid,trade_fields])
return product_fields
post = get
class ComposeRuleByCsvFileView(FileUploadView):
file_path = 'product'
filename_save = 'composerule_%s.csv'
def get(self, request, *args, **kwargs):
pass
def getSerialNo(self,row):
return row[0]
def getProductCode(self,row):
return row[1]
def getProductName(self,row):
return row[2]
def getSkuCode(self,row):
return row[3]
def getSkuName(self,row):
return row[4]
def getProductNum(self,row):
return row[5]
def createComposeRule(self,row):
product_code = self.getProductCode(row)
if not CHAR_NUMBER_REGEX.match(product_code):
return
sku_code = self.getSkuCode(row)
cr,state = ComposeRule.objects.get_or_create(outer_id=product_code,
outer_sku_id=sku_code)
cr.type = ComposeRule.RULE_SPLIT_TYPE
cr.extra_info = self.getProductName(row)+self.getSkuName(row)
cr.save()
return cr
def createComposeItem(self,row,rule=None):
product_code = self.getProductCode(row)
if not (rule and CHAR_NUMBER_REGEX.match(product_code)):
return
sku_code = self.getSkuCode(row)
ci,state = ComposeItem.objects.get_or_create(compose_rule=rule,
outer_id=product_code,
outer_sku_id=sku_code)
ci.num = self.getProductNum(row)
ci.extra_info = self.getProductName(row)+self.getSkuName(row)
ci.save()
def handle_post(self,request,csv_iter):
encoding = self.getFileEncoding(request)
cur_rule = None
for row in csv_iter:
row = [r.strip().decode(encoding) for r in row]
print row
if self.getSerialNo(row):
cur_rule = self.createComposeRule(row)
continue
self.createComposeItem(row,rule=cur_rule)
return {'success':True,
'redirect_url':reverse('admin:memorule_composerule_changelist')}
|
from flask import Flask, render_template
app = Flask(__name__)
@app.route("/")
def home():
return render_template("custom.html")
if __name__ == "__main__":
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
app.run(debug=True)
|
"""
Convert CSV language file to JS and PY files
"""
import sys
import csv
SEPERATE_FIELD = ';'
CLIENT_LANG = 'lang.js'
SERVER_LANG = 'i18n.py'
FILE = sys.argv[1]
FIRST = True
LANG_DICT = []
with open(FILE, 'rb') as csvfile:
LINES = csv.reader(csvfile, delimiter=';', quotechar='"')
for line in LINES:
if FIRST:
# Create the dictionnary for each lang
for index, lang in enumerate(line):
if index > 0:
code = lang[lang.index('(')+1: lang.index(')')]
lang = {
'name': lang,
'code': code,
'values': []
}
LANG_DICT.append(lang)
FIRST = False
else:
key = line[0]
for index, value in enumerate(line):
if index > 0 and key and value:
lang = LANG_DICT[index-1]
entry = {
'key': key.strip(),
'value': value.strip()
}
lang['values'].append(entry)
# Write client file
CLIENT = open(CLIENT_LANG, 'w')
CLIENT.write("(function()\n{\n")
CLIENT.write(" /* global angular */\n 'use strict'; // jshint ignore:line\n\n")
CLIENT.write(" angular.module(APPLICATION_NAME)\n")
CLIENT.write(" .config(['$translateProvider', function($translateProvider)\n")
CLIENT.write(" {\n")
for lang in LANG_DICT:
CLIENT.write(" $translateProvider.translations('" + lang['code'] + "',\n")
CLIENT.write(" {\n")
for index, entry in enumerate(lang['values']):
CLIENT.write(' "' + entry['key'] + '": "' +
entry['value'].replace('"', '\\"').replace('\n', '\\n" +\n ' + (' ' * len(entry['key'])) + '"') + '"')
if index < len(lang['values'])-1:
CLIENT.write(",\n")
CLIENT.write("\n });\n\n")
CLIENT.write(" $translateProvider.preferredLanguage((navigator.language !== null ? navigator.language : " +
"navigator.browserLanguage).split(\"_\")[0].split(\"-\")[0]);\n")
CLIENT.write(" $translateProvider.fallbackLanguage('en');\n")
CLIENT.write(" }]);\n")
CLIENT.write("})();")
CLIENT.close() |
__all__ = ["create2api","configGenerator"] |
print("hello")
a=input("please enter no;")
print(a) |
from mnist import MNIST
import numpy as np
import os
from PIL import Image
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
# Import the modules
from sklearn.externals import joblib
from sklearn import datasets
from skimage.feature import hog
from sklearn.svm import LinearSVC
import numpy as np
# Read all EMNIST test and train data
mndata = MNIST('C:/Users/manoj/Documents/py_workspace/ocr/EMNIST/data')
X_train, y_train = mndata.load('C:/Users/manoj/Documents/py_workspace/ocr/EMNIST/data/emnist-letters-train-images-idx3-ubyte',
'C:/Users/manoj/Documents/py_workspace/ocr/EMNIST/data/emnist-letters-train-labels-idx1-ubyte')
'''
X_train, y_train = mndata.load('C:/Users/manoj/Documents/py_workspace/ocr/EMNIST/data/emnist-letters-test-images-idx3-ubyte',
'C:/Users/manoj/Documents/py_workspace/ocr/EMNIST/data/emnist-letters-test-labels-idx1-ubyte')
'''
# Read mapping of the labels and convert ASCII values to chars
print("Read mapping of the labels and convert ASCII values to chars")
mapping = []
with open('C:/Users/manoj/Documents/py_workspace/ocr/EMNIST/data/emnist-letters-mapping.txt') as f:
for line in f:
mapping.append(chr(int(line.split()[1])))
print("Convert data to numpy arrays and normalize images to the interval [0, 1]")
# Convert data to numpy arrays and normalize images to the interval [0, 1]
print(len(X_train))
print(len(y_train))
# uncomment below lines to save the image file of emnist
'''
print(X_train[0])
i=0
#im = Image.frombytes('L',(28,28),X_train[0])
for temp in X_train:
temp=np.reshape(temp,(28,28))
temp=temp*255
im = Image.fromarray(temp).convert('L')
directory= str(y_train[i])
if not os.path.exists(directory):
os.makedirs(directory)
im.save(directory+"/"+str(i)+'.png')
i=i+1
'''
X_train = np.array(X_train, 'int16') / 255
y_train = np.array(y_train,'int')
'''
X_test = np.array(X_test[:100]) / 255
y_test = np.array(y_test[:100])
'''
print("Creating np array of feature and labels")
features = X_train
labels = y_train
print(set(y_train))
#print(y_train[:10])
list_hog_fd = []
for feature in features:
fd = hog(feature.reshape((28, 28)), orientations=9, pixels_per_cell=(14, 14), cells_per_block=(1, 1), visualise=False)
list_hog_fd.append(fd)
hog_features = np.array(list_hog_fd, 'float64')
print("Initiating svc classifier")
clf = LinearSVC()
print("Map fea and label ")
clf.fit(hog_features, labels)
print("saving the model to digit pickel")
joblib.dump(clf, "digits_cls.pkl", compress=3)
#clf = RandomForestClassifier(n_estimators=10, n_jobs=-1)
#cv_scores = cross_val_score(clf, X_train, y_train, cv=10)
#print('Mean accuracy: ', cv_scores.mean())
#print(' Std dev: ', cv_scores.std())
'''
# Import the modules
from sklearn.externals import joblib
from sklearn import datasets
from skimage.feature import hog
from sklearn.svm import LinearSVC
import numpy as np
#download the data set
print("Getting Mnist dataset for manipulation ")
dataset = datasets.fetch_mldata("MNIST Original")
print("Creating np array of feature and labels")
features = np.array(dataset.data, 'int16')
labels = np.array(dataset.target, 'int')
list_hog_fd = []
for feature in features:
fd = hog(feature.reshape((28, 28)), orientations=9, pixels_per_cell=(14, 14), cells_per_block=(1, 1), visualise=False)
list_hog_fd.append(fd)
hog_features = np.array(list_hog_fd, 'float64')
print("Initiating svc classifier")
clf = LinearSVC()
print("Map fea and label ")
clf.fit(hog_features, labels)
print("saving the model to digit pickel")
joblib.dump(clf, "digits_cls.pkl", compress=3)
'''
|
def main():
edad = int(input("Ingresa tu edad: "))
# Escribe el código adecuado para completar el programa
# Para pedir el dato de la idetificación oficial emplea este mensaje:
# "¿Tienes identificación oficial? (s/n): "
if(edad<=0):
print("Respuesta incorrecta")
elif(edad<18 ):
print("No cumples requisitos")
else:
identificacion = input("¿Tienes identificación oficial? (s/n): ")
if(identificacion !="s" and identificacion !="n"):
print("Respuesta incorrecta")
elif(identificacion =="n"):
print("No cumples requisitos")
elif(edad>=18 and identificacion=="s"):
print("Trámite de licencia concedido")
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.