text stringlengths 8 6.05M |
|---|
import caldav
from caldav.elements import dav, cdav
url = "https://chris.hyser@oracle.com:ATPA29bY@stbeehiveonline.oracle.com/caldav/Oracle/home/chris.hyser@oracle.com/calendars/MyCalendar"
client = caldav.DAVClient(url)
principal = caldav.Principal(client, url)
calendars = principal.calendars()
if len(calendars) > 0:
calendar = calendars[0]
print "Using calendar", calendar
print "Renaming"
calendar.set_properties([dav.DisplayName("Test calendar"),])
print calendar.get_properties([dav.DisplayName(),])
#event = caldav.Event(client, data = vcal, parent = calendar).save()
#print "Event", event, "created"
print "Looking for events after 2010-05-01"
results = calendar.date_search(datetime(2013, 1, 1))
for event in results:
print "Found", event
|
# Generated by Django 3.0.2 on 2020-03-21 20:50
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tasks', '0039_auto_20200320_1610'),
]
operations = [
migrations.DeleteModel(
name='UserData',
),
migrations.AlterField(
model_name='task',
name='due_date',
field=models.DateField(default=datetime.date(2020, 3, 21), verbose_name='due date'),
),
migrations.AlterField(
model_name='task',
name='due_time',
field=models.TimeField(default=datetime.time(20, 50, 1, 808826), verbose_name='due time'),
),
migrations.AlterField(
model_name='task',
name='time_estimate',
field=models.DurationField(default=datetime.timedelta(0), verbose_name='time estimate'),
),
]
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
def iris_data_load():
iris = datasets.load_iris()
df = pd.DataFrame(
iris.data,
columns=iris.feature_names
)
df["label"] = iris.target
return df
def kmeans(k, X, max_iter=300):
X_size, n_features = X.shape
# ランダムに重心の初期値を初期化
centroids = X[np.random.choice(X_size, k)]
# 前の重心と比較するために、仮に新しい重心を入れておく配列を用意
new_centroids = np.zeros((k, n_features))
# 各データ所属クラスタ情報を保存する配列を用意
cluster = np.zeros(X_size)
# ループ上限回数まで繰り返し
for epoch in range(max_iter):
# 入力データ全てに対して繰り返し
for i in range(X_size):
# データから各重心までの距離を計算(ルートを取らなくても大小関係は変わらないので省略)
distances = np.sum((centroids - X[i]) ** 2, axis=1)
# データの所属クラスタを距離の一番近い重心を持つものに更新
cluster[i] = np.argsort(distances)[0]
# すべてのクラスタに対して重心を再計算
for j in range(k):
new_centroids[j] = X[cluster == j].mean(axis=0)
# もしも重心が変わっていなかったら終了
if np.sum(new_centroids == centroids) == k:
print("break")
break
centroids = new_centroids
return cluster
if __name__ == '__main__':
df = iris_data_load()
input_data = df.iloc[:, :-1].values
cluster = kmeans(3, input_data)
df["cluster"] = cluster
df.plot(kind="scatter", x=0, y=1, c="label", cmap="winter") # cmapで散布図の色を変えられます。
plt.title("true label")
plt.show()
df.plot(kind="scatter", x=0, y=1, c="cluster", cmap="winter")
plt.title("clustering relust")
plt.show() |
import uuid
from django.test import TestCase
from model_mommy import mommy
from core.models import get_file_path
class GetFilePathTestCase(TestCase):
def setUp(self):
self.filename = f'{uuid.uuid4()}.png'
def test_file_path(self):
arquivo = get_file_path(None, 'teste.png')
self.assertTrue(len(arquivo), len(self.filename))
class ServiceTestCase(TestCase):
def setUp(self):
self.service = mommy.make('Service')
def test_str(self):
self.assertEquals(str(self.service), self.service.service)
class PositionTestCase(TestCase):
def setUp(self):
self.position = mommy.make('Position')
def test_str(self):
self.assertEquals(str(self.position), self.position.position)
class EmployeeTestCase(TestCase):
def setUp(self):
self.employee = mommy.make('Employee')
def test_str(self):
self.assertEquals(str(self.employee), self.employee.name)
class FeatureTestCase(TestCase):
def setUp(self):
self.feature = mommy.make('Feature')
def test_str(self):
self.assertEquals(str(self.feature), self.feature.title)
class PlanTestCase(TestCase):
def setUp(self):
self.plan = mommy.make('Plan')
def test_str(self):
self.assertEquals(str(self.plan), self.plan.name)
class ClientTestCase(TestCase):
def setUp(self):
self.client = mommy.make('Client')
def test_str(self):
self.assertEquals(str(self.client), self.client.name)
|
import re
from math import floor
from discord import Embed, Color
from discord.ext.commands.errors import CommandInvokeError
from tinydb import Query
from tinydb.operations import set
from time import time
from Utilities.Database import commissionsTable
from Utilities.ConfigurationsHelper import get_configuration
def update_commission(guild_id, user_id, name, property, value):
query = Query()
return len(
commissionsTable.update(
set(property, value),
(query.guildId == guild_id)
& (query.userId == user_id)
& (query.commission.matches(re.re.escape(name), flags=re.IGNORECASE)),
)
)
def add_commission(guild_id, user_id, name, days):
query = Query()
user_record = commissionsTable.search(
(query.guildId == guild_id)
& (query.userId == user_id)
& (query.commission.matches(re.escape(name), flags=re.IGNORECASE))
)
if len(user_record) > 0:
return {"success": False, "message": "This commission is already assigned"}
commissionsTable.insert(
{
"guildId": guild_id,
"userId": user_id,
"timestamp": time(),
"commission": name,
"days": days,
"finalNoticeReceived": False,
"intervalNoticeReceived": None,
}
)
return {"success": True, "message": "Commission successfully assigned"}
def remove_commission(guild_id, user_id, name=None):
query = Query()
count = 0
if name == None:
count = len(
commissionsTable.remove(
(query.guildId == guild_id) & (query.userId == user_id)
)
)
else:
count = len(
commissionsTable.remove(
(query.guildId == guild_id)
& (query.userId == user_id)
& (query.commission.matches(name, flags=re.IGNORECASE))
)
)
return {"success": True, "payload": count}
async def update_all_commissions(bot):
for commission in commissionsTable.all():
## GET GENERAL VALUES ##
guild_id = commission.get("guildId")
channel_id = get_configuration(guild_id, "BOT_CHANNEL")
admin_role_id = get_configuration(guild_id, "ADMIN_ROLE")
user_id = commission.get("userId")
guild = bot.get_guild(guild_id)
channel = guild.get_channel(channel_id)
admin_role = guild.get_role(admin_role_id)
user = guild.get_member(user_id)
name = commission.get("commission")
timestamp = commission.get("timestamp")
days = commission.get("days")
current_time = time()
final_notice_days_before = get_configuration(
guild_id, "FINAL_NOTICE_DAYS_BEFORE"
)
notice_days_interval = get_configuration(guild_id, "NOTICE_DAYS_INTERVAL")
expire_time = timestamp + (days * 86400)
days_left = round((expire_time - current_time) / 86400)
final_warning_time = expire_time - (final_notice_days_before * 86400)
get_interval = (
lambda time: floor((expire_time - time) / (notice_days_interval * 86400))
+ 1
)
current_interval = get_interval(current_time)
maximum_interval = get_interval(timestamp)
## CHECK IF COMMISSION HAS EXPIRED ##
expired_notice = get_configuration(guild_id, "EXPIRED_NOTICE")
expired_notice = expired_notice.format(
admin_role_mention=admin_role.mention,
user_mention=user.mention,
commission_name=name,
)
if current_time > expire_time:
embed = Embed(
title="Cancellation Notice",
colour=Color.red(),
)
embed.set_author(name=user.display_name, icon_url=user.avatar_url)
await channel.send(expired_notice)
await channel.send(embed=embed)
remove_commission(guild_id, user_id, name)
return
## CHECK IF FINAL WARNING NEEDS TO BE ISSUES ##
final_notice_received = commission.get("finalNoticeReceived")
days_remaining_forfeit_warning = get_configuration(
guild_id, "DAYS_REMAINING_FORFEIT_WARNING"
)
days_remaining_forfeit_warning = days_remaining_forfeit_warning.format(
user_mention=user.mention,
remaining_days=days_left,
commission_name=name,
)
if (current_time > final_warning_time) and not (final_notice_received):
embed = Embed(
title="Final Notice",
colour=Color.orange(),
)
embed.set_author(name=user.display_name, icon_url=user.avatar_url)
await channel.send(days_remaining_forfeit_warning)
await channel.send(embed=embed)
update_commission(
guild_id,
user_id,
name,
"finalNoticeReceived",
True,
)
final_notice_received = True
if final_notice_received:
return
## CHECK IF INTERVAL WARNING NEEDS TO BE ISSUED ##
interval_notice_received = commission.get("intervalNoticeReceived")
days_remaining_warning = get_configuration(guild_id, "DAYS_REMAINING_WARNING")
days_remaining_warning = days_remaining_warning.format(
user_mention=user.mention,
remaining_days=days_left,
commission_name=name,
)
if interval_notice_received == None:
update_commission(
guild_id, user_id, name, "intervalNoticeReceived", maximum_interval
)
interval_notice_received = maximum_interval
if interval_notice_received > current_interval:
embed = Embed(
title="Regular Notice",
colour=Color.green(),
)
embed.set_author(name=user.display_name, icon_url=user.avatar_url)
await channel.send(days_remaining_warning)
await channel.send(embed=embed)
update_commission(
guild_id,
user_id,
name,
"intervalNoticeReceived",
current_interval,
)
return
|
from random import shuffle
x = ['Tener', 'El', 'Azul', 'Bandera', 'Volar', 'Alto']
shuffle(x)
print(x) |
# -*- coding: utf-8 -*-
# @Author: Fallen
# @Date: 2020-04-03 19:09:03
# @Last Modified by: Fallen
# @Last Modified time: 2020-04-03 21:43:08
'''
小易喜欢的单词具有以下特性:
1.单词每个字母都是大写字母
2.单词没有连续相等的字母
例如:
小易不喜欢"ABBA",因为这里有两个连续的'B'
小易喜欢"A","ABA"和"ABCBA"这些单词
给你一个单词,你要回答小易是否会喜欢这个单词。
'''
def func():
#现有个单词
word = input("请输入一个单词:")
for i in range(len(word)):
if not word.isupper():
print("小明不喜欢。没大写~")
break
elif i<(len(word)-1) and word[i]==word[i+1]:
print("小明不喜欢。叠词~")
break
else:
print("小明喜欢。")
def main():
func()
if __name__ == "__main__":
main()
|
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, ForeignKey, Integer, String, Boolean, Text, DateTime
from sqlalchemy.orm import relationship
import datetime
Base = declarative_base()
# The user model is kept basic.
# maybe add a realation to blogpost?
class User(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True)
name = Column(String(80), unique=True, nullable=False)
email = Column(String(80), unique=True, nullable=False)
password = Column(String(80))
admin = Column(Boolean)
test = Column(String(50))
# The blogpost model is simple, but contains ForeignKey for relations to users
# Should also contain a relation to comment feature...
class Blogpost(Base):
__tablename__ = "blogpost"
id = Column(Integer, primary_key=True)
name = Column(String(120), nullable=False)
content = Column(Text, nullable=False)
author_id = Column(Integer, ForeignKey('users.id'), nullable=False)
created_at = Column(DateTime, default=datetime.datetime.utcnow())
modified_at = Column(DateTime, default=datetime.datetime.utcnow())
category = relationship('Category', backref='blogpost')
comment = relationship('Comment', backref='blogpost')
class Category(Base):
__tablename__ = "category"
id = Column(Integer, primary_key=True)
name = Column(String(50), nullable=False)
blogpost_id = Column(Integer, ForeignKey('blogpost.id'))
# just temporary... need to make this spam safe.
class Comment(Base):
__tablename__ = "comments"
id = Column(Integer, primary_key=True)
email = Column(String(50), nullable=False)
content = Column(String(200), nullable=False)
blogpost_id = Column(Integer, ForeignKey('blogpost.id'))
created_at = Column(DateTime, default=datetime.datetime.utcnow())
# table for handling blacklisted tokens.
class BlacklistToken(Base):
__tablename__ = "blacklist_tokens"
id = Column(Integer, primary_key=True)
token = Column(String(500), unique=True, nullable=False)
blacklisted_on = Column(DateTime, default=datetime.datetime.now())
|
import csv
import urllib2
import logging
from models.callout import CallOut
DOWNLOAD_URL_2011 = "http://www.dublinked.ie/datastore/server/FileServerWeb/FileChecker?metadataUUID=8032b927305d45558a3903020e740f63&filename=DCC_FireBrigadeAmbulanceIncidents2011.csv"
def get_file(url):
return urllib2.urlopen(url)
def get_data():
callout_file = csv.reader(get_file(DOWNLOAD_URL_2011))
next(callout_file)
i = 1
for row in callout_file:
# create an instance of Callout from csv row
# and save it to the datastore
CallOut.from_csv(row).put()
i= i + 1
logging.info(i)
|
# Generated by Django 2.1.3 on 2018-11-06 10:51
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0021_auto_20181106_2349'),
]
operations = [
migrations.RenameModel(
old_name='Staff',
new_name='Employee',
),
]
|
# coding=utf-8
# tester, given the config with model path
import tensorflow as tf
import numpy as np
class Tester():
def __init__(self,model,config,sess=None):
self.config = config
self.model = model
self.yp = self.model.yp # the output of the model # [N,M,JX]
def step(self,sess,batch):
# give one batch of Dataset, use model to get the result,
assert isinstance(sess,tf.Session)
batchIdxs,batch_data = batch
feed_dict = self.model.get_feed_dict(batch_data,is_train=False)
yp, = sess.run([self.yp],feed_dict=feed_dict)
# clip the output
# yp should be [N,4]
yp = yp[:batch_data.num_examples]
return yp
def trim(self,input_s,num):
return [ one[:num] if type(one) == type(np.array(0)) else -1 for one in input_s ]
# get all the value needed for visualization
def step_vis(self,sess,batch):
# give one batch of Dataset, use model to get the result,
assert isinstance(sess,tf.Session)
batchIdxs,batch_data = batch
feed_dict = self.model.get_feed_dict(batch_data,is_train=False)
yp,C,C_win,att_logits,q_att_logits,at_mask,ad_mask,when_mask,where_mask,pts_mask,pis_mask,q_mask,hat_len,had_len,hwhen_len,hwhere_len,hpts_len,hpis_len,JXP,warp_h,h,at,ad,when,where,pts,pis,q = sess.run([self.yp,self.model.C,self.model.C_win,self.model.att_logits,self.model.q_att_logits,self.model.at_mask,self.model.ad_mask,self.model.when_mask,self.model.where_mask,self.model.pts_mask,self.model.pis_mask,self.model.q_mask,self.model.hat_len,self.model.had_len,self.model.hwhen_len,self.model.hwhere_len,self.model.hpts_len,self.model.hpis_len,self.model.JXP,self.model.warp_h,self.model.hall,self.model.at,self.model.ad,self.model.when,self.model.where,self.model.pts,self.model.pis,self.model.q],feed_dict=feed_dict)
# clip the output
# yp should be [N,4]
yp = yp[:batch_data.num_examples] # this is needed for the last batch
# beware some will became -1 after trim
C,C_win,att_logits,q_att_logits,at_mask,pts_mask,pis_mask,q_mask = self.trim([C,C_win,att_logits,q_att_logits,at_mask,pts_mask,pis_mask,q_mask],batch_data.num_examples)
return yp,C,C_win,att_logits,q_att_logits,at_mask,ad_mask,when_mask,where_mask,pts_mask,pis_mask,q_mask,hat_len,had_len,hwhen_len,hwhere_len,hpts_len,hpis_len,JXP,warp_h,h,at,ad,when,where,pts,pis,q |
#!/usr/bin/env /proj/sot/ska/bin/python
#############################################################################################################
# #
# exclude_srouces.py: remove the area around the main source and all point sources from data #
# probably this is a good one to use evt2 files as it takes too much time #
# run on evt1 file. The results save in Reg_files can be used to removed #
# sources from evt 1 files. #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# Last Update: Apr 07, 2016 #
# #
#############################################################################################################
import sys
import os
import string
import re
import copy
import math
import Cookie
import unittest
import time
import random
#
#--- from ska
#
from Ska.Shell import getenv, bash
ascdsenv = getenv('source /home/ascds/.ascrc -r release; source /home/mta/bin/reset_param ', shell='tcsh')
#ascdsenv['MTA_REPORT_DIR'] = '/data/mta/Script/ACIS/SIB/Correct_excess/Lev1/Reportdir/'
#
#--- reading directory list
#
path = '/data/mta/Script/Python_script2.7/dir_list_py'
f = open(path, 'r')
data = [line.strip() for line in f.readlines()]
f.close()
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec "%s = %s" %(var, line)
#
#--- directory path
#
s_dir = '/data/mta/Script/ACIS/SIB/Correct_excess/'
b_dir = s_dir + 'Sib_corr/'
#
#--- append path to a private folders
#
sys.path.append(b_dir)
sys.path.append(mta_dir)
import mta_common_functions as mcf
import convertTimeFormat as tcnv
import sib_corr_functions as scf
#
#--- temp writing file name
#
rtail = int(time.time())
zspace = '/tmp/zspace' + str(rtail)
#-----------------------------------------------------------------------------------------
#-- exclude_sources: remove the area around the main source and all point sources from data
#-----------------------------------------------------------------------------------------
def exclude_sources(fits):
"""
remove the area around the main source and all point sources from data
input: fits --- input fits file name
output: out_name --- source removed fits file (<header>_ccd<ccd>_cleaned.fits)
"""
#
#--- read which ccds are used and several other info from fits header
#
cmd = ' dmlist ' + fits + ' opt=head > ' + zspace
scf.run_ascds(cmd)
data = scf.read_file(zspace, remove=1)
ccd_list = []
for ent in data:
mc = re.search('bias file used', ent)
if mc is not None:
atemp = re.split('CCD', ent)
val = atemp[1].strip()
ccd_list.append(val)
continue
for name in ['SIM_X', 'SIM_Y', 'SIM_Z', 'RA_NOM', 'DEC_NOM', 'ROLL_NOM', 'RA_TARG', 'DEC_TARG']:
mc = re.search(name, ent)
if mc is not None:
lname = name.lower()
atemp = re.split('\s+', ent)
val = atemp[2].strip()
exec "%s = %s" % (lname, val)
break
#
#--- sort ccd list
#
ccd_list.sort()
#
#--- guess a source center position on the sky coordinates from the information extracted from the header
#
cmd = ' dmcoords none none opt=cel '
cmd = cmd + ' ra=' + str(ra_targ) + ' dec=' + str(dec_targ )
cmd = cmd + ' sim="' + str(sim_x) + ' ' + str(sim_y) + ' ' + str(sim_z) + '" '
cmd = cmd + ' detector=acis celfmt=deg '
cmd = cmd + ' ra_nom=' + str(ra_nom) + ' dec_nom=' + str(dec_nom) + ' roll_nom=' + str(roll_nom) + ' '
cmd = cmd + ' ra_asp=")ra_nom" dec_asp=")dec_nom" verbose=1 >' + zspace
scf.run_ascds(cmd)
data = scf.read_file(zspace, remove=1)
for ent in data:
mc = re.search('SKY', ent)
if mc is not None:
atemp = re.split('\s+', ent)
skyx = atemp[1]
skyy = atemp[2]
break
#
#-- keep the record of the source position for the later use (e.g. used for evt1 processing);
#
o_fits = fits.replace('.gz', '')
coord_file = o_fits.replace('.fits', '_source_coord')
ofile = './Reg_files/' + coord_file
line = str(skyx) + ':' + str(skyy) + '\n'
fo = open(ofile, 'w')
fo.write(line)
fo.close()
#
#-- remove the 200 pix radius area around the source
#
cmd = ' dmcopy "' + fits + '[exclude sky=circle(' + skyx + ',' + skyy + ',200)]" '
cmd = cmd + ' outfile=source_removed.fits clobber="yes"'
scf.run_ascds(cmd)
#
#--- get a file size: will be used to measure the size of removed area later.
#--- assumption here is the x-ray hit ccd evenly, but of course it is not,
#--- but this is the best guess we canget
#
size = {}
for ccd in ccd_list:
cmd = ' dmcopy "' + fits + '[ccd_id=' + str(ccd) + ']" outfile=test.fits clobber=yes'
scf.run_ascds(cmd)
cmd = 'ls -l test.fits > ' + zspace
os.system(cmd)
data = scf.read_file(zspace, remove=1)
for line in data:
atemp = re.split('\s+', line)
if mcf.chkNumeric(atemp[4]):
size[ccd] = int(float(atemp[4]))
else:
size[ccd] = int(float(atemp[3]))
mcf.rm_file('test.fits')
#
#--- now separate observations to indivisual ccds
#
file_list = []
for ccd in ccd_list:
tail = '_ccd' + str(ccd) + '.fits'
out = o_fits.replace('.fits', tail)
file_list.append(out)
cmd = ' dmcopy "source_removed.fits[ccd_id=' + ccd + ']" outfile= ' + out + ' clobber=yes'
scf.run_ascds(cmd)
mcf.rm_file('source_removed.fits')
#
#--- process each ccd
#
for pfits in file_list:
reg_file = pfits.replace('.fits', '_block_src.reg')
#
#--- find point sources
#
cmd = ' celldetect infile=' + pfits
cmd = cmd + ' outfile=acisi_block_src.fits regfile=acisi_block_src.reg clobber=yes'
scf.run_ascds(cmd)
data = scf.read_file('acisi_block_src.reg')
exclude = []
for ent in data:
atemp = re.split('\,', ent)
#
#--- increase the area covered around the sources 3time to make sure leaks from a bright source is minimized
#
val2 = float(atemp[2]) * 3
val3 = float(atemp[3]) * 3
line = atemp[0] + ',' + atemp[1] + ',' + str(val2) + ',' + str(val3) +',' + atemp[4]
exclude.append(line)
out_name = pfits.replace('.gz','')
out_name = out_name.replace('.fits', '_cleaned.fits')
#
#--- if we actually found point sources, remove them from the ccds
#
e_cnt = len(exclude)
if e_cnt > 0:
cnt = 0
chk = 0
round = 0
line = ''
while cnt < e_cnt:
#
#--- remove 6 sources at a time so that it won't tax memory too much
#
for i in range(cnt, cnt + 6):
if i >= e_cnt:
chk += 1
break
if line == '':
line = exclude[i]
else:
line = line + '+' + exclude[i]
cnt += 6
if round == 0:
cmd = ' dmcopy "' + pfits + '[exclude sky=' + line +']" outfile=out.fits clobber="yes"'
scf.run_ascds(cmd)
round += 1
else:
cmd = 'mv out.fits temp.fits'
os.system(cmd)
cmd = ' dmcopy "temp.fits[exclude sky=' + line +']" outfile=out.fits clobber="yes"'
scf.run_ascds(cmd)
round += 1
if chk > 0:
break
else:
line = ''
mcf.rm_file('temp.fits')
cmd = 'mv out.fits ' + out_name
os.system(cmd)
else:
cmd = 'cp ' + pfits + ' ' + out_name
os.system(cmd)
#
#--- find the size of cleaned up file size
#
cmd = 'ls -l ' + out_name + '>' + zspace
os.system(cmd)
data = scf.read_file(zspace, remove=1)
for line in data:
atemp = re.split('\s+', line)
if mcf.chkNumeric(atemp[4]):
asize = float(atemp[4])
else:
asize = float(atempp[3])
for pccd in range(0, 10):
check = 'ccd' + str(pccd)
mc = re.search(check, out_name)
if mc is not None:
break
#
#--- compute the ratio of the cleaned to the original file; 1 - ratio is the potion that we removed
#--- from the original data
#
ratio = asize / float(size[str(pccd)])
#
#--- record the ratio for later use
#
fo = open('./Reg_files/ratio_table', 'a')
line = reg_file + ': ' + str(ratio) + '\n'
fo.write(line)
fo.close()
cmd = 'mv acisi_block_src.reg ./Reg_files/' + reg_file
os.system(cmd)
mcf.rm_file('acisi_block_src.fits')
#-----------------------------------------------------------------------------------------
if __name__ == '__main__':
if len(sys.argv) > 1:
fits = sys.argv[1]
fits.strip()
exclude_sources(fits)
|
"""Endpoints Class."""
from fmcapi.api_objects.apiclasstemplate import APIClassTemplate
from .ftds2svpns import FTDS2SVPNs
from fmcapi.api_objects.object_services.fqdns import FQDNS
from fmcapi.api_objects.object_services.hosts import Hosts
from fmcapi.api_objects.object_services.networks import Networks
from fmcapi.api_objects.object_services.networkgroups import NetworkGroups
from fmcapi.api_objects.device_ha_pair_services.ftddevicehapairs import FTDDeviceHAPairs
from fmcapi.api_objects.device_services.devicerecords import DeviceRecords
from fmcapi.api_objects.device_services.etherchannelinterfaces import (
EtherchannelInterfaces,
)
from fmcapi.api_objects.device_services.physicalinterfaces import PhysicalInterfaces
from fmcapi.api_objects.device_services.redundantinterfaces import RedundantInterfaces
from fmcapi.api_objects.device_services.subinterfaces import SubInterfaces
import logging
class Endpoints(APIClassTemplate):
"""The Endpoints Object in the FMC."""
VALID_JSON_DATA = [
"id",
"name",
"type",
"device",
"interface",
"nattedInterfaceAddress",
"protectedNetworks",
"ipv6InterfaceAddress",
"connectionType",
"peerType",
"extranet",
"extranetInfo",
"description",
"version",
]
VALID_FOR_KWARGS = VALID_JSON_DATA + []
FIRST_SUPPORTED_FMC_VERSION = "6.3"
VALID_FOR_POINT_TO_POINT = ["PEER"]
VALID_FOR_HUB_AND_SPOKE = ["HUB", "SPOKE"]
PREFIX_URL = "/policy/ftds2svpns"
REQUIRED_FOR_POST = ["vpn_id"]
def __init__(self, fmc, **kwargs):
"""
Initialize Endpoints object.
Set self.type to "Endpoint" and parse the kwargs.
:param fmc (object): FMC object
:param **kwargs: Any other values passed during instantiation.
:return: None
"""
super().__init__(fmc, **kwargs)
logging.debug("In __init__() for Endpoints class.")
self.parse_kwargs(**kwargs)
self.type = "EndPoint"
def vpn_policy(self, pol_name):
"""
Associate a VPN Policy.
:param pol_name: (str) Name of VPN Policy.
:return: None
"""
logging.debug("In vpn_policy() for Endpoints class.")
ftd_s2s = FTDS2SVPNs(fmc=self.fmc)
ftd_s2s.get(name=pol_name)
if "id" in ftd_s2s.__dict__:
self.vpn_id = ftd_s2s.id
self.URL = (
f"{self.fmc.configuration_url}{self.PREFIX_URL}/{self.vpn_id}/endpoints"
)
self.vpn_added_to_url = True
self.topology_type = ftd_s2s.topologyType
else:
logging.warning(
f'FTD S2S VPN Policy "{pol_name}" not found. '
f"Cannot set up Endpoints for FTDS2SVPNs Policy."
)
def endpoint(self, action, device_name):
"""
Associate an endpoint.
:param action: (str) 'add', 'remove', or 'clear'
:param device_name: (str) Name of device.
"""
logging.debug("In endpoint() for Endpoints class.")
device_json = DeviceRecords(fmc=self.fmc).get()
device_ha_json = FTDDeviceHAPairs(fmc=self.fmc).get()
items = device_json.get("items", []) + device_ha_json.get("items", [])
new_device = None
if action == "add":
for item in items:
if item["name"] == device_name:
new_device = {
"name": item["name"],
"id": item["id"],
"type": item["type"],
}
break
if new_device is None:
logging.warning(
f'Device/DeviceHA "{device_name}" is not found in FMC. Cannot add to Endpoints.'
)
else:
if "device" in self.__dict__:
self.device.append(new_device)
logging.info(f'Adding "{device_name}" to Endpoints.')
else:
self.device = new_device
elif action == "remove":
if "device" in self.__dict__:
self.device = list(
filter(lambda i: i["name"] != device_name, self.device)
)
else:
logging.warning("Endpoints has no members. Cannot remove device.")
elif action == "clear":
if "device" in self.__dict__:
del self.device
def vpn_interface(self, device_name, ifname):
"""
Associate an interface.
:param device_name: (str) Name of device.
:param ifname: (str) Name of interface.
"""
logging.debug("In vpn_interface() for Endpoints class.")
ether_json = EtherchannelInterfaces(fmc=self.fmc, device_name=device_name).get()
phys_json = PhysicalInterfaces(fmc=self.fmc, device_name=device_name).get()
redund_json = RedundantInterfaces(fmc=self.fmc, device_name=device_name).get()
subintf_json = SubInterfaces(fmc=self.fmc, device_name=device_name).get()
items = (
ether_json.get("items", [])
+ phys_json.get("items", [])
+ redund_json.get("items", [])
+ subintf_json.get("items", [])
)
new_intf = None
for item in items:
if item["ifname"] == ifname:
new_intf = {"id": item["id"], "type": item["type"]}
break
if new_intf is None:
logging.warning(
f'Interface "{ifname}" is not found in FMC. Cannot add to interface.'
)
else:
self.interface = new_intf
logging.info(f'Interface "{ifname}" added.')
def encryption_domain(self, action, names=[]):
"""
Associate Encryption.
:param action: (str) 'add', 'remove', or 'clear'.
:param names: (list) List of Encryption names.
"""
logging.debug("In endpoint() for Endpoints class.")
fqdns_json = FQDNS(fmc=self.fmc).get()
host_json = Hosts(fmc=self.fmc).get()
net_json = Networks(fmc=self.fmc).get()
netg_json = NetworkGroups(fmc=self.fmc).get()
items = (
fqdns_json.get("items", [])
+ host_json.get("items", [])
+ net_json.get("items", [])
+ netg_json.get("items", [])
)
new_network = None
if action == "add":
for name in names:
for item in items:
if item["name"] == name:
new_network = {"id": item["id"], "type": item["type"]}
break
if new_network is None:
logging.warning(
f'FQDNS/Host/Network/Network Group"{name}" is not found in FMC.'
f" Cannot add to protectedNetworks."
)
else:
if "protectedNetworks" in self.__dict__:
self.protectedNetworks["networks"].append(new_network)
logging.info(f'Appending "{name}" to protectedNetworks.')
else:
self.protectedNetworks = {"networks": [new_network]}
logging.info(f'Adding "{name}" to protectedNetworks.')
elif action == "remove":
if "protectedNetworks" in self.__dict__:
for name in names:
self.protectedNetworks = list(
filter(lambda i: i["name"] != name, self.protectedNetworks)
)
else:
logging.warning(
"protectedNetworks has no members. Cannot remove network."
)
elif action == "clear":
if "protectedNetworks" in self.__dict__:
del self.protectedNetworks
|
#!/usr/bin/env python
__author__ = "Master Computer Vision. Team 02"
__license__ = "M6 Video Analysis"
# Import libraries
import os
import math
import cv2
import numpy as np
from evaluate import *
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_recall_fscore_support as score
# Path to save images and videos
images_path = "./std-mean-images/"
video_path = "./background-subtraction-videos/"
# Define groundtruth labels namely
STATIC = 0
HARD_SHADOW = 50
OUTSIDE_REGION = 85
UNKNOW_MOTION = 170
MOTION = 255
def get_accumulator(path_test):
"""
Description: get accumulator structure data
Depends on image size to define borders
Data are coded into 32 bits of floats
Input: path test
Output: accumulator
"""
# Initialize accumualtor
accumulator = np.zeros((0,0), np.float32)
# Set accumulator depending on dataset choosen
if path_test == "./highway/input/":
accumulator = np.zeros((240,320,150), np.float32)
if path_test == "./fall/input/":
accumulator = np.zeros((480,720,50), np.float32)
if path_test == "./traffic/input/":
accumulator = np.zeros((240,320,50), np.float32)
return accumulator
def gaussian(path_test, path_gt, first_frame, last_frame, mu_matrix, sigma_matrix, alpha):
"""
Description: gaussian
Input: path_test, path_gt, first_frame, last_frame, mu_matrix, sigma_matrix, alpha
Output: AccFP, AccFN, AccTP, AccTN, AccP, AccR, AccF1
"""
# Initialize metrics accumulators
AccFP = 0
AccFN = 0
AccTP = 0
AccTN = 0
# Initialize index to accumulate images
index = 0
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(video_path+"gaussian_"+str(alpha)+str(path_test.split("/")[1])+".avi", fourcc, 60, (get_accumulator(path_test).shape[1], get_accumulator(path_test).shape[0]))
# Read sequence of images sorted
for filename in sorted(os.listdir(path_test)):
# Check that frame is into range
frame_num = int(filename[2:8])
if frame_num >= first_frame and frame_num <= last_frame:
# Read image from groundtruth in grayscale
frame = cv2.imread(path_test+filename, 0)
# Compute pixels that belongs to background
background = abs(frame - mu_matrix) >= alpha*(sigma_matrix+2)
# Convert bool to int values
background = background.astype(int)
# Replace 1 by 255
background[background == 1] = 255
# Scales, calculates absolute values, and converts the result to 8-bit
background = cv2.convertScaleAbs(background)
# Read groundtruth image
gt = cv2.imread(path_gt+"gt"+filename[2:8]+".png",0)
# Remember that we will use values as background 0 and 50, foreground 255, and unknow (not evaluated) 85 and 170
# Replace values acording previous assumption
background = background.flatten()
gt = gt.flatten()
index2remove = [index for index, gt in enumerate(gt)
if gt == UNKNOW_MOTION or gt == OUTSIDE_REGION]
gt = np.delete(gt, index2remove)
gt[gt == HARD_SHADOW] = 0
background = np.delete(background, index2remove)
# Evaluate results
TP, FP, TN, FN = evaluate_sample(background, gt)
# Accumulate metrics
AccTP = AccTP + TP
AccTN = AccTN + TN
AccFP = AccFP + FP
AccFN = AccFN + FN
# Write frame into video
video_frame = cv2.cvtColor(background, cv2.COLOR_GRAY2RGB)
out.write(video_frame)
return AccFP, AccFN, AccTP, AccTN
|
from urllib.request import urlopen
from bs4 import BeautifulSoup
html=urlopen('https://movie.naver.com/movie/running/current.nhn')
soup=BeautifulSoup(html,'lxml')
movie_content=soup.find_all('div',{'id':'content'})
movie_li=movie_content[0].find_all('li')
title_list=[]
score_list=[]
movie_ranking=dict()
for data in movie_li:
title=data.find_all('dt',{'class':'tit'})
for content in title:
content=content('a')
for content2 in content:
title_list.append(content2.get_text())
score=data.find_all("dl",{"class":"info_star"})
for content in score:
content=content.find_all("span",{'class':'num'})
for content2 in content:
score_list.append(content2.get_text())
for i in range(len(title_list)):
movie_ranking[str(i+1)+"위"]=title_list[i]+":"+score_list[i]
for rank,info in movie_ranking.items():
print(rank,"-",info)
|
from django.contrib.auth.models import User
from .models import Profile
from rest_framework import serializers
class AnalyzerSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ['username', 'email', 'password']
extra_kwargs = {
'password': {
'write_only': True,
'style': {'input_type': 'password'}
}
}
def create(self, validated_data):
"""Create and return a new user"""
user = User.objects.create_user(
username=validated_data['username'],
email=validated_data['email'],
password=validated_data['password']
)
return user
class ProfileSerializer(serializers.ModelSerializer):
user = AnalyzerSerializer(required=True)
class Meta:
model = Profile
fields = '__all__'
def create(self, validated_data):
user_data = validated_data.pop('user')
user = User.objects.create_user(
username=user_data['username'],
email=user_data['email'],
password=user_data['password']
)
user_profile = Profile.objects.create(user=user, **validated_data)
return user_profile
|
#!/usr/bin/python26
import json
import logging
import MySQLdb
import sys
import threepio
from webob import Request
CONFIG_PATH = '/scripts'
sys.path.append(CONFIG_PATH)
from db_queries import (OBJECT_QUERY_UUID_LOOKUP, SERVICE_ID_FROM_KEY_QUERY)
from configs import (PROV_DB_HOST, PROV_DB_USERNAME, PROV_DB_PASSWORD,
PROV_DB_NAME, PROV_DB_PORT, OBJECT_LOOKUP_LOGFILE)
threepio.initialize(log_filename=OBJECT_LOOKUP_LOGFILE,
logger_name="Object-Lookup",
app_logging_level=logging.DEBUG,
dep_logging_level=logging.WARN)
from threepio import logger as c3po
SRV_KEY = 'service_key'
OBJ_ID = 'object_id'
REQUIRED_ARGS = [SRV_KEY, OBJ_ID]
def application(environ, start_response):
"""
Performs an ``Object`` lookup given a ``service_key`` &
``object_id``.
The ``service_key`` is used to query for the ``service_id``, a
foreign key into the ``Service`` table. This provides "scoping" or
namespacing of the service correlation identifier (that's a fancy
way to say 'the object identifier that makes sense within the
_domain_, or `scope`, of the service committing provenance')
WSGI entry point for API endpoint 'object_lookup'.
This endpoint will lookup an object stored in the ``Object`` that is
related to actions/events within the ``Provenance`` table.
Currently, the query parameters are passed via the query string.
Expected parameters are:
``service_key`` - a short, alpha-numeric "key" that identifies
a calling service.
``object_id`` - an identifier for an object that exists within the
domain of the service.
An optional parameter +might+ be ``service_id``. This is unlikely
as it is an auto-incremented numeric key that a calling service is
not likely to know. In the current API, there is no endpoint to
tell a service what its' ``service_id`` is.
"""
req = Request(environ)
srv_key = req.params.get('service_key')
obj_id = req.params.get('object_id')
if srv_key is not None and obj_id is not None:
data_string, webstatus = _handle_get(srv_key, obj_id)
else:
data_string, webstatus = _handle_bad_request(req)
response_headers = [('Content-Type', 'application/json'),
('Content-Length', len(data_string))]
start_response(webstatus, response_headers)
return (data_string)
def _handle_get(srv_key, obj_id):
"""
Handle the object lookup for the request.
Returns a response string and HTTP status code as a tuple in the
form: ``(data_string, webstatus)``.
"""
try:
conn = MySQLdb.connect(host=PROV_DB_HOST, user=PROV_DB_USERNAME,
passwd=PROV_DB_PASSWORD, db=PROV_DB_NAME,
port=PROV_DB_PORT)
cursor = conn.cursor()
cursor.execute(SERVICE_ID_FROM_KEY_QUERY % (srv_key))
key_to_id = cursor.fetchone()
srv_id, = key_to_id
c3po.info('result from `service-id` query' + key_to_id)
cursor.execute(OBJECT_QUERY_UUID_LOOKUP % (obj_id, srv_id))
results = cursor.fetchall()
if len(results) == 1:
uid = str(results[0][0])
info_msg = "Lookup Object Exists:" + " " + uid
c3po.info(info_msg)
data_string = json.dumps({'UUID': uid}, indent=4)
webstatus = '200 OK'
elif len(results) > 1 :
errmsg = ("More than one object was found: " + str(results))
c3po.warn(errmsg)
data_string = json.dumps({'Status': 'Exception',
'Details': 'Multiple objects found ' +
'with the same `object_id` for the same ' +
' `service_id`. Incident has been reported'},
indent=4)
webstatus = '404 Not Found'
else:
err_msg = "Object UUID is null: " + obj_id
logging.error(err_msg)
data_string = json.dumps({'Status': 'Failed',
'Details': 'Object does not exist'},
indent=4)
webstatus = '404 Not Found'
cursor.close()
except Exception as exc:
err_msg = "MySQL DB Exception: " + " " + str(exc) + " " + obj_id
c3po.warn(err_msg)
c3po.exception(exc)
data_string = json.dumps({'Status': 'Failed',
'Details': 'MySQL Exception. Incident' +
' has been reported'}, indent=4)
webstatus = '500 Internal Server Error'
return (data_string, webstatus)
def _handle_bad_request(request):
"""
Provide some feedback on what horrible things has happened.
"""
webstatus = '400 Bad Request'
details = ''
if request.params.get(SRV_KEY) is None:
details += 'Expect ``service_key`` as a query string argument ' + \
'- and it is missing. '
if request.params.get(OBJ_ID) is None:
details += 'Expect ``object_id`` as a query string argument ' + \
'- and it is missing. '
data_string = json.dumps({'Status': 'Failed',
'Details': 'Request missing required arguments. ' +
details }, indent=4)
return (data_string, webstatus) |
from .save_class import save_all, load_all |
class Solution:
def carPooling(self, trips: List[List[int]], capacity: int) -> bool:
timestamp = [0] * 1001
for trip in trips:
timestamp[trip[1]] += trip[0]
timestamp[trip[2]] -= trip[0]
used_capacity = 0
for passenger_change in timestamp:
used_capacity += passenger_change
if used_capacity > capacity:
return False
return True
class Solution:
def carPooling(self, trips: List[List[int]], capacity: int) -> bool:
steps = []
for trip in trips:
steps.append([trip[1], trip[0]])
steps.append([trip[2], -trip[0]])
steps.sort()
for step in steps:
capacity -= step[1]
if capacity < 0:
return False
return True |
from django.contrib.auth.models import User
from rest_framework import status as s
from .models import Profile
from rest_framework import generics
from . import serializers
from . import permissions
def send_email(email):
print(email)
class Analyzer(generics.ListCreateAPIView):
queryset = Profile.objects.all()
serializer_class = serializers.ProfileSerializer
permission_classes = []
def create(self, request, *args, **kwargs):
res = super().create(request, *args, **kwargs)
send_email(request.data['user.email'])
res.status_code = s.HTTP_201_CREATED
return res
|
config = {
# Сообщения в консоль
"DEBUG": False,
"LOG": False,
# Linter
"LINT": True,
"MAXLENGTH": 160,
"SHOW_SAVE": True,
"SCOP_ERROR": "invalid.mac",
"MAX_EMPTY_LINE": 2,
"SHOW_CLASS_IN_STATUS": False,
"MAX_DEPTH_LOOP": 5,
"MAX_COUNT_MACRO_PARAM": 5,
"LINT_ON_SAVE": True,
"PREFIX_VARIABLE_GLOBAL": r"([msg]_)|(the)",
"PREFIX_VARIABLE_VISUAL": r"(grid|grd)|(tree)|(fld)|(frm)|(dlg)|(btn)|(chk)|(radio|rd)|(edit|edt)|(list|lst)|(cmb)|(lbl|label)|(tab)|(cmd)|(control|ctrl)|(cl)",
"PREFIX_VARIABLE_TYPE": r"(ref)|(ev)|(arr|tarr)|(o|obj)|(key)|(ax)|(dict)|(ds)|(i)|(s|str)|(is|b)|(f|lf)|(n|d|m)|(dt)|(t)|(v)|(pIn)|(oOut)",
# Settings
"RSB_SETTINGS_FILE": "RSBIDE.sublime-settings",
# Кэш
"EXCLUDE_FOLDERS": [
"DBFiles", "DstLbr", "Export",
"Help", "Html", "Import",
"Lbr", "LoadDocum", "Log",
"PKG", "Report", "RSM",
"Script", "TaskLog", "Template",
"Upgrader", "Web"],
"ALWAYS_IMPORT": ["CommonVariables", "CommonDefines", "CommonClasses", "CommonFunctions", "CommonCallReference"],
"BASE_DIRECTORY": False,
"PROJECT_DIRECTORY": "",
"TRIGGER": [{
"scope": "\\.mac\\s",
"auto": True,
"relative": True,
"base_directory": False,
"extensions": ["mac", "xml"],
}],
}
|
'''
Created on Feb 22, 2016
@author: Andrei Padnevici
@note: This is an example of object oriented program in python
'''
from tkinter import Pack
class PartyAnimal:
x = 0
def __init__(self, x=-1):
self.x = x
def party(self):
self.x += 1
print("So far", self.x)
class ChildParty(PartyAnimal):
y = 0
def __init__(self, x=0, y=0):
super(ChildParty, self).__init__(x)
self.y = y
def sum(self):
print(self.x + self.y)
p = PartyAnimal()
p.party()
p.party()
p.party()
p = PartyAnimal(5)
p.party()
p.party()
p.party()
c = ChildParty(9, 9)
c.party()
c.sum()
|
# KVM-based Discoverable Cloudlet (KD-Cloudlet)
# Copyright (c) 2015 Carnegie Mellon University.
# All Rights Reserved.
#
# THIS SOFTWARE IS PROVIDED "AS IS," WITH NO WARRANTIES WHATSOEVER. CARNEGIE MELLON UNIVERSITY EXPRESSLY DISCLAIMS TO THE FULLEST EXTENT PERMITTEDBY LAW ALL EXPRESS, IMPLIED, AND STATUTORY WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT OF PROPRIETARY RIGHTS.
#
# Released under a modified BSD license, please see license.txt for full terms.
# DM-0002138
#
# KD-Cloudlet includes and/or makes use of the following Third-Party Software subject to their own licenses:
# MiniMongo
# Copyright (c) 2010-2014, Steve Lacy
# All rights reserved. Released under BSD license.
# https://github.com/MiniMongo/minimongo/blob/master/LICENSE
#
# Bootstrap
# Copyright (c) 2011-2015 Twitter, Inc.
# Released under the MIT License
# https://github.com/twbs/bootstrap/blob/master/LICENSE
#
# jQuery JavaScript Library v1.11.0
# http://jquery.com/
# Includes Sizzle.js
# http://sizzlejs.com/
# Copyright 2005, 2014 jQuery Foundation, Inc. and other contributors
# Released under the MIT license
# http://jquery.org/license
import logging
from pylons import request, response, session, tmpl_context as c
from pycloud.pycloud.pylons.lib.base import BaseController
from pycloud.manager.lib.pages import HomePage
from pycloud.pycloud.cloudlet import Cloudlet
from pycloud.pycloud.pylons.lib.util import asjson
log = logging.getLogger(__name__)
################################################################################################################
# Controller for the main page.
################################################################################################################
class HomeController(BaseController):
############################################################################################################
# Shows the main page.
############################################################################################################
def GET_index(self):
# Mark the active tab.
c.home_active = 'active'
# Get current metadata.
page = HomePage()
ret = Cloudlet.system_information()
page.machine_status = ret
# Render the page with the grid.
return page.render()
############################################################################################################
# Returns the cloudlet state.
############################################################################################################
@asjson
def GET_state(self):
machine_state = Cloudlet.system_information()
return machine_state
|
import sys
import numpy as np
import itertools
import numpy as np
import subprocess as sub
import matplotlib.pylab as pl
# take as input file with angle definitions, number of angles in first line, .gro for indexes of atom, xtc with trajectory
def mkNdx(ndx,fgro):
#needs ndx-like file with name od dihedral and atom names
#in whole gro finds indexes of such atoms
gro = [i for i in open(fgro,'r').readlines()[2:] if 'SOL' not in i]
gro = [[i[9:15].strip(),i[15:22].strip()] for i in gro]
thisNdx = [ndx.readline(),ndx.readline().split()]
print gro
ndxs = []
for i in thisNdx[1]:
thisAtm = []
for j in gro:
if str(i) == str(j[0]):
thisAtm.append(j[1])
ndxs.append(thisAtm)
readyNdx = [thisNdx[0]]
for i in range(len(thisAtm)):
readyNdx.append([ndxs[0][i],ndxs[1][i],ndxs[2][i],ndxs[3][i]])
return readyNdx
def runGangle(fxtc,sndx,output):
dirmk = 'mkdir '+output
sub.call('rm -r '+output, shell=True)
sub.call(dirmk.split())
#runs subprocess of g_angle for certain
for i in range(lipNo):
print fxtc,sndx,output,output,i
runCmd = 'g_angle -f %s -b 10000 -e 20000 -n %s.ndx -type dihedral -ov %s/%s_%s' %(fxtc,sndx,output,output,str(i))
runner = sub.Popen(runCmd.split(), stdin=sub.PIPE)
stdin_dat = runner.communicate(str(i))
sub.call('rm \#*', shell=True)
return 1
def mergeFiles(sndx):
f1 = open('%s/%s_0.xvg' %(sndx,sndx),'r').readlines()
f1 = [i.split() for i in f1 if i[0] not in '@#']
f1 = zip(*f1)
for i in range(lipNo)[1:]:
fcont = open('%s/%s_%i.xvg' %(sndx,sndx,i),'r').readlines()
fcont = [i.split() for i in fcont if i[0] not in '@#']
fcont = zip(*fcont)
f1.append(fcont[1])
valOnly = f1[1:]
valOnly = list(itertools.chain(*valOnly))
f1 = zip(*f1) #here
dump = open('%s_merged.xvg' %(sndx),'w')
for i in f1:
dump.write('\t'.join([str(k) for k in i])+'\n')
dump.close()
return [float(i) for i in valOnly]
def smoothData(data,window):
data = list(data)
smooth = []
for i in range(len(data)):
if np.isinf(data[i]) == True:
smooth.append(np.max([i for i in data if np.isinf(i) != True]))
continue
if i < window/2:
smooth.append(np.mean([k for k in data[:i+(window/2)] if np.isinf(k) != True]))
continue
else:
smooth.append(np.mean([k for k in data[i-window/2:i+window/2] if np.isinf(k) != True]))
continue
return smooth
def mkHistogram(data,sndx):
print type(data[2])
histDat = pl.hist(data,360,normed=1,color='black',histtype='step',label='Angle %s' %(sndx))
pl.legend()
pl.xlim(-180,180)
pl.xlabel('Angle [deg.]',fontsize=16)
pl.ylabel('Probability density',fontsize=16)
pl.xticks(fontsize=12)
pl.yticks(fontsize=12)
pl.savefig('%s_hist.pdf' %(sndx))
pl.cla()
return histDat # sp.histogram(data,360)
def mkProfile(hvals,sndx):
xval = hvals[1][:-1]
yval = hvals[0]
yval = [-0.0019*float(300)*np.log(i) for i in yval]
minVal = float(min(yval))
yval = [i-minVal for i in yval]
yvalS = smoothData(yval,10)
pl.plot(xval,yval,'.',color='black',label='Calculated %s' %(sndx))
pl.plot(xval,yvalS,'-',color='black',label='Smooth %s' %(sndx))
pl.legend()
pl.xlim(-180,180)
pl.ylim(0,6)
pl.xlabel('Angle [deg.]',fontsize=16)
pl.ylabel('Energy [kcal/mol/deg2',fontsize=16)
pl.xticks(fontsize=12)
pl.yticks(fontsize=12)
pl.savefig('%s_prof.pdf' %(sndx))
pl.cla()
if __name__ == '__main__':
fndx = sys.argv[1]
fgro = sys.argv[2]
fxtc = sys.argv[3]
ndx = open(fndx,'r')
dihNo = int(ndx.readline())
dihNames = []
for i in range(dihNo):
data = mkNdx(ndx,fgro)
lipNo = len(data[1:])
dihNames.append(data[0])
print data
dump = open('%s.ndx' %(data[0][:-1]),'w')
for k in data[1:]:
dump.write('[ '+data[0][:-1]+' ]\n')
dump.write(' '+' '.join([str(v) for v in k])+'\n')
dump.flush()
print lipNo
print dihNames
#now You got ndx files for each of dih (stored in dihNames)
for sndx in dihNames:
sndx = sndx[:-1]
runGangle(fxtc,sndx,sndx)
vals = mergeFiles(sndx)
hvals = mkHistogram(vals,sndx)
mkProfile(hvals,sndx)
# plot()
|
# Designate the Domains
domain1 = "Language"
domain2 = "Memory"
domain3 = "Visuo-spatial"
domain4 = "Motor"
domain5 = "Attention"
domain6 = "Executive Function"
domain7 = "IQ/Academic"
# Prep work for adding the tests
# Create initial menu
language_menu = {
0 : "Add A New Test",
1 : "FAS",
2 : "CFL",
3 : "Semantic Fluency",
4 : "Animals",
5 : "Boston Naming"
}
max_item = len(language_menu)-1
# Display existing menu for the user
print(language_menu)
# Prompt user to select a test (options 2 through end) or add a new test (option 1)
lang_selection = int(input("Enter Selection: "))
print(lang_selection)
language_administered = []
numtestsl = len(language_administered)
# Add a new test to the menu
if lang_selection != 0:
langage_administered[numtestsl] = language_menu[lang_selection]
numtestsl += 1
print(lang_selection, ":", language_menu[lang_selection])
else:
new_testl = input("Enter Test: ")
max_item += 1
language_menu[max_item] = new_testl
language_administered[numtestsl] = new_testl
numtestsl =+ 1
print(language_menu)
# Create menu of score types
# Which score type a user initially selects will determine the calculations that will ultimately be run
scoretype_menu = {
1 : "Standard Score",
2 : "Scaled Score",
3 : "T-Score",
4 : "z-score"
}
print(scoretype_menu)
score_selection = int(input("Enter Selection: "))
print(score_selection, ":", scoretype_menu[score_selection])
def define_langs(langs, score_types, lang_sctype_dict=None):
''' show initial languages (langs) dict to user, allow to add new test, assign a type to each existsing test or quit
returns a dict which has all scoretypes for each entry in langs:
lang_dict = {"FAS": "T-Score",
"CFL": "Standard Score"
...
}
NEW:
{'CFL': {"Entered Type": "T-score",
"Standard Score":None,
"Scaled Score":None,
"T-Score":50,
"Z-score":None
},
...
}
'''
max_item = len(langs)
# If lang_sctype_dict was not given, start a new
if lang_sctype_dict == None:
lang_sctype_dict = {}
while True: # loop forever, use return to jump out of loop for the next step
# Display existing menu for the user
print("\nDefine Language Menu")
print("Currently defined languages:", lang_sctype_dict)
print(language_menu)
# Prompt user to select a test (options 2 through end) or add a new test (option 1)
lang_selection_str = input("Enter number for the language you want to assign a score type to (0 when you're finished): ")
lang_selection = int(lang_selection_str)
#print(lang_selection)
if lang_selection == 0: # done
return lang_sctype_dict
elif lang_selection != 1: # Add a new test to the menu
lang = language_menu[lang_selection]
sctype = define_score_type(lang, score_types)
lang_sctype_dict[lang] = sctype
else: # add new lang
new_test = input("Enter Name of New Test: ")
max_item += 1
language_menu[max_item] = new_test
#print(language_menu)
lang = new_test
sctype = define_score_type(lang, score_types)
lang_sctype_dict[lang] = sctype
def define_score_type(lang, score_types):
'''Have user select a score type (from score_types) for lang and return it (string)
'''
print(score_types)
score_selection_str = input("Enter score type for " + lang + ": ")
score_selection = int(score_selection_str)
sct = score_types[score_selection]
return(sct)
#
# MAIN
#
# test define_score_type
#print(define_score_type("FAS", scoretype_menu))
# Make new dict
d = define_langs(language_menu, scoretype_menu)
print(d)
# save dict in pickle format as test.p
import pickle
pickle.dump(d, open( "test.p", "wb" ))
# read it back in and edit it
d_from_file = pickle.load(open( "test.p", "rb" ))
d = define_langs(language_menu, scoretype_menu, d_from_file)
|
import asyncio
from aioconsole import ainput
from bouquet_design.consumer import Consumer
from bouquet_design.creator import BouquetCreator
from bouquet_design.models import Designs
async def run_consumer():
# '/usr/src/bloomon/sample.txt'
consumer = Consumer()
while True:
# await consumer.handle(input())
await consumer.handle(await ainput())
async def run_factory():
bouquets = set()
while True:
creator = BouquetCreator()
design = Designs().pop(0)
if design:
bouquets.add(await creator.apply_design(design))
if len(bouquets):
bouquet = bouquets.pop()
await creator.create_bouquet(bouquet)
if bouquet.is_ready:
print(bouquet)
else:
bouquets.add(bouquet)
await asyncio.sleep(0.01)
if __name__ == '__main__':
ioloop = asyncio.get_event_loop()
ioloop.create_task(run_consumer())
ioloop.create_task(run_factory())
try:
print("Service started. Enter path to file or designs and flowers to console")
ioloop.run_forever()
except KeyboardInterrupt:
pass
|
# -*- coding: utf-8 -*-
"""
J4HR models.
"""
import string
import random
import datetime
from ldap import MOD_REPLACE
from .app import db, ldaptools
class Corporation(db.Model):
__tablename__ = 'corporations'
id = db.Column(db.Integer, primary_key=True) # corporationID
name = db.Column(db.String) # corporationName
ticker = db.Column(db.String) # ticker
members = db.Column(db.Integer) # memberCount
active = db.Column(db.Boolean, default=True)
def __init__(self, id=None, name=None, ticker=None, members=None):
self.id = id
self.name = name
self.ticker = ticker
self.members = members
def __repr__(self):
return '<Corporation "{name}">'.format(name=self.name)
class Application(db.Model):
__tablename__ = 'applications'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String) # character name
character_id = db.Column(db.Integer)
corporation_id = db.Column(db.Integer, db.ForeignKey('corporations.id'))
corporation = db.relationship(
'Corporation', backref=db.backref('applications', lazy='dynamic'))
email = db.Column(db.String)
motivation = db.Column(db.Text)
reddit_id = db.Column(
db.Integer, db.ForeignKey('reddit.id'), nullable=True)
reddit = db.relationship(
'Reddit', backref=db.backref('reddit', lazy='dynamic'))
key_id = db.Column(db.Integer)
vcode = db.Column(db.String)
access_key = db.Column(db.String)
access_code = db.Column(db.Integer)
status = db.Column(db.Integer, default=1)
reject_message = db.Column(db.Text, nullable=True)
created_at = db.Column(db.DateTime)
updated_at = db.Column(db.DateTime)
def __init__(self, session=None):
if session:
self.name = session['character_name']
self.character_id = session['character_id']
self.corporation_id = session['corporation_id']
self.email = session['email']
self.motivation = session['motivation']
self.key_id = session['key_id']
self.vcode = session['vcode']
self.access_key = ''.join(random.choice(string.ascii_uppercase + string.digits)
for x in range(24))
self.access_code = ''.join(random.choice(string.digits)
for x in range(8))
self.created_at = datetime.datetime.utcnow()
self.updated_at = datetime.datetime.utcnow()
if 'reddit' in session:
reddit = Reddit(session)
self.reddit_id = reddit.id
def status_text(self):
if self.status == 1:
return 'pending'
elif self.status == 2:
return 'accepted'
else:
return 'rejected'
class Reddit(db.Model):
__tablename__ = 'reddit'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String)
scope = db.Column(db.String)
access_token = db.Column(db.String)
refresh_token = db.Column(db.String)
refreshed_at = db.Column(db.DateTime)
def __init__(self, session=None):
if session and session['reddit']:
reddit = session['reddit']
self.username = reddit['username']
self.scope = ','.join(reddit['scope'])
self.access_token = reddit['access_token']
self.refresh_token = reddit['refresh_token']
refreshed_at = reddit['refreshed_at']
db.session.add(self)
db.session.commit()
class Purge(db.Model):
__tablename__ = 'purge'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String)
reason = db.Column(db.Text)
purged_by = db.Column(db.String)
purged_at = db.Column(db.DateTime)
def __init__(self, username=None, reason=None):
if username:
self.username = username
if reason:
self.reason = reason
def do_purge(self):
user = ldaptools.getuser(self.username)
if user is None:
raise Exception('User not found in LDAP directory')
try:
ldaptools.modattr(
user.get_id(), MOD_REPLACE, 'accountStatus', 'purged')
except Exception, e:
raise e
db.create_all()
|
import math
import torch
import torch.distributed as dist
class RASampler(torch.utils.data.Sampler):
"""Sampler that restricts data loading to a subset of the dataset for distributed,
with repeated augmentation.
It ensures that different each augmented version of a sample will be visible to a
different process (GPU).
Heavily based on 'torch.utils.data.DistributedSampler'.
This is borrowed from the DeiT Repo:
https://github.com/facebookresearch/deit/blob/main/samplers.py
"""
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True, seed=0, repetitions=3):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available!")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available!")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * float(repetitions) / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
self.num_selected_samples = int(math.floor(len(self.dataset) // 256 * 256 / self.num_replicas))
self.shuffle = shuffle
self.seed = seed
self.repetitions = repetitions
def __iter__(self):
if self.shuffle:
# Deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.seed + self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = list(range(len(self.dataset)))
# Add extra samples to make it evenly divisible
indices = [ele for ele in indices for i in range(self.repetitions)]
indices += indices[: (self.total_size - len(indices))]
assert len(indices) == self.total_size
# Subsample
indices = indices[self.rank : self.total_size : self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices[: self.num_selected_samples])
def __len__(self):
return self.num_selected_samples
def set_epoch(self, epoch):
self.epoch = epoch
|
str = "cold"
# enumerate()
list_enumerate = list(enumerate(str))
print("list(enumerate(str)) = ", list_enumerate)
# character count
print("len(str) = ", len(str)) |
#coding:utf-8
#基础的图像操作
import numpy as np
import cv2 as cv
#访问和修改像素值
img = cv.imread('D:\python_file\Opencv3_study_file\images\!face.png')
'''
>>> px = img[100,100]
>>> print( px )
[157 166 200]
# 只访问蓝色像素
>>> blue = img[100,100,0]
>>> print( blue )
157
可以用同样的方式修改像素值
>>> img[100,100] = [255,255,255]
>>> print( img[100,100] )
[255 255 255]
157
'''
#访问图像属性
'''
shape:形状
>>> print( img.shape )
(342, 548, 3)
size:图像大小
>>> print( img.size )
562248
type:字节格式
>>> print( img.dtype )
uint8
'''
#图像投资回报率
'''
>>> ball = img[280:340, 330:390]
>>> img[273:333, 100:160] = ball
'''
#拆分和合并图像通道
'''
#拆分和合并
>>> b,g,r = cv.split(img)
>>> img = cv.merge((b,g,r))
'''
#为图像创建边框(填充)
''':arg
cv.BORDER_CONSTANT - 添加一个常量彩色边框。该值应作为下一个参数给出。
cv.BORDER_REFLECT - 边框将反映边框元素,像这样: fedcba_abcdefgh\hgfedcb
cv.BORDER_REFLECT_101或cv。BORDER_DEFAULT - 与上述相同,但有一个轻微的变化,像这样: gfedcb_abcdefgh\gfedcba
cv.BORDER_REPLICATE - 最后一个元素在整个过程中复制, 像这样: a _ abcdefgh _ h
cv.BORDER_WRAP - 无法解释, 它看起来像这样: cdefgh _ abcdefg
'''
''':arg
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
BLUE = [255,0,0]
img1 = cv.imread('opencv-logo.png')
replicate = cv.copyMakeBorder(img1,10,10,10,10,cv.BORDER_REPLICATE)
reflect = cv.copyMakeBorder(img1,10,10,10,10,cv.BORDER_REFLECT)
reflect101 = cv.copyMakeBorder(img1,10,10,10,10,cv.BORDER_REFLECT_101)
wrap = cv.copyMakeBorder(img1,10,10,10,10,cv.BORDER_WRAP)
constant= cv.copyMakeBorder(img1,10,10,10,10,cv.BORDER_CONSTANT,value=BLUE)
plt.subplot(231),plt.imshow(img1,'gray'),plt.title('ORIGINAL')
plt.subplot(232),plt.imshow(replicate,'gray'),plt.title('REPLICATE')
plt.subplot(233),plt.imshow(reflect,'gray'),plt.title('REFLECT')
plt.subplot(234),plt.imshow(reflect101,'gray'),plt.title('REFLECT_101')
plt.subplot(235),plt.imshow(wrap,'gray'),plt.title('WRAP')
plt.subplot(236),plt.imshow(constant,'gray'),plt.title('CONSTANT')
plt.show()
'''
|
from random import randint
import random
class Edge:
def __init__(self, destination):
self.destination = destination
class Vertex:
def __init__(self, value, **pos): #TODO: test default arguments
self.value = value
self.color = 'white'
self.pos = pos
self.edges = []
class Graph:
def __init__(self):
self.vertexes = []
def create_test_data(self):
vertex_1 = Vertex('t1', x=40, y=40)
vertex_2 = Vertex('t2', x=140, y=140)
vertex_3 = Vertex('t3', x=220, y=250)
vertex_4 = Vertex('t4', x=150, y=300)
vertex_5 = Vertex('t5', x=250, y=350)
vertex_6 = Vertex('t6', x=350, y=275)
vertex_7 = Vertex('t7', x=350, y=400)
vertex_8 = Vertex('t8', x=400, y=425)
vertex_9 = Vertex('t9', x=425, y=375)
vertex_10 = Vertex('t10', x=450, y=325)
vertex_11 = Vertex('t11', x=300, y=100)
vertex_12 = Vertex('t12', x=350, y=150)
vertex_1.edges.append(Edge(vertex_2))
vertex_2.edges.append(Edge(vertex_3))
vertex_2.edges.append(Edge(vertex_4))
vertex_3.edges.append(Edge(vertex_4))
vertex_4.edges.append(Edge(vertex_5))
vertex_5.edges.append(Edge(vertex_6))
vertex_5.edges.append(Edge(vertex_7))
vertex_8.edges.append(Edge(vertex_9))
vertex_9.edges.append(Edge(vertex_10))
vertex_11.edges.append(Edge(vertex_12))
self.vertexes.extend([vertex_1, vertex_2, vertex_3, vertex_4, vertex_5,
vertex_6, vertex_7, vertex_8, vertex_9, vertex_10, vertex_11, vertex_12])
def randomize(self, width, height, pxBox, probability=0.6):
def connectVerts(v0, v1):
v0.edges.append(Edge(v1))
v1.edges.append(Edge(v0))
count = 0
grid = []
for y in range(height):
row = []
for x in range(width):
value = 't' + str(count)
count += 1
v = Vertex(value)
row.append(v)
grid.append(row)
for y in range(height):
for x in range(width):
#connect down
if y < height -1:
if randint(0, 1) < probability:
connectVerts(grid[y][x], grid[y+1][x])
#connect right
if x < width -1:
if randint(0, 1) < probability:
connectVerts(grid[y][x], grid[y][x+1])
boxBuffer = 0.5
boxInner = pxBox * boxBuffer
boxInnerOffset = (pxBox - boxInner) / 2
for y in range(height):
for x in range(width):
grid[y][x].pos = {
'x': (x * pxBox + boxInnerOffset + randint(0, 1) * boxInner),
'y': (y * pxBox + boxInnerOffset + randint(0, 1) * boxInner)
}
for y in range(height):
for x in range(width):
self.vertexes.append(grid[y][x])
def bfs(self, start):
queue = [start]
found = [start]
start.color = "#"+''.join([random.choice('0123456789ABCDEF') for j in range(6)])
while len(queue) > 0:
v = queue[0]
for edge in v.edges:
if edge.destination not in found:
found.append(edge.destination)
queue.append(edge.destination)
edge.destination.color = start.color
queue.pop(0) # TODO: Look at collections.dequeue
return found
def get_connected_components(self):
searched = []
for vertex in self.vertexes:
if vertex not in searched:
searched = searched + [vertex] + self.bfs(vertex)
|
'''
We create the fibonacci sequence below.
As a refresher, the fibonacci sequence is a recursive sequence in which the last/most-recent term is a sum of the previous two terms
'''
Here, we define the fibonacci sequence in a manner such that there is poor handling of the recursion and thus the program slows down to basically a halt
def fibonacci(n):
if n == 1:
return 1
elif n == 2:
return 1
elif n > 2:
return fibonacci(n-1) + fibonacci(n-2)
for n in range(1, 101): # start at 1 and stop at 10
print(n, ":", fibonacci(n))
Here is how to better write the fibonacci sequence such that you do not encounter the above error when you run your recursion 100 times and have to
run the fibonacci again and again
How we achieve this is through MEMOIZATION, which is the idea that we can store the values for recent function calls so that future calls do not have to repeat the work |
#!bin/python
from functools import reduce
# program assumes valid input
class Vertex:
def __init__(self, name, saturated):
self.name = name
self.saturated = saturated
self.neighbours = []
def addNeighbour(self, vertex):
self.neighbours.append(vertex)
def __eq__(self, other):
return self.name == other.name
def __copy__(self):
v = Vertex(self.name, self.saturated)
v.neighbours = self.neighbours.copy()
return v
def setMinus(set1, set2):
return list(filter((lambda x: x not in set2), set1))
def inMatching(matching, v1, v2):
return (matching[0] == v1.name and matching[1] == v2.name) or (matching[1] == v1.name and matching[0] == v2.name)
VERTICE_FILE_NAME = "vertice.txt"
EDGES_FILE_NAME = "edge.txt"
MATCHING_FILE_NAME = "empty.txt"
# init list
vertices = [line.rstrip('\n') for line in open(VERTICE_FILE_NAME)]
edges = [line.rstrip('\n') for line in open(EDGES_FILE_NAME)]
matches = [line.rstrip('\n') for line in open(MATCHING_FILE_NAME)]
# convert to tuples
edges = list(map((lambda x: tuple(x)), edges))
matches = list(map((lambda x: tuple(x)), matches))
# create bipartition
A = [vertices[0]]
B = []
for e in edges:
if e[0] in A and e[1] not in B:
B.append(e[1])
elif e[0] in B and e[1] not in A:
A.append(e[1])
elif e[1] in A and e[0] not in B:
B.append(e[0])
elif e[1] in B and e[0] not in A:
A.append(e[0])
elif e[0] not in A and B and e[1] not in A and B:
# cant determine so move to the back
edges.append(e)
# init vertices
verticesA = dict((
v, Vertex(
v, reduce((lambda y, z: y or z[0] == v or z[1] == v), [False] + matches))) for v in A)
verticesB = dict((
v, Vertex(
v, reduce((lambda y, z: y or z[0] == v or z[1] == v), [False] + matches))) for v in B)
# set every vertices neighbours
for e in edges:
if e[0] in verticesA:
verticesA[e[0]].addNeighbour(verticesB[e[1]])
verticesB[e[1]].addNeighbour(verticesA[e[0]])
if e[0] in verticesB:
verticesB[e[0]].addNeighbour(verticesA[e[1]])
verticesA[e[1]].addNeighbour(verticesB[e[0]])
X = list(filter((lambda x: not x.saturated), verticesA.values()))
Y = []
orig = X.copy()
pr = [[x] for x in X]
while True:
# Create a level in our alternating level graph
l = []
for strand in pr:
v = strand[len(strand) - 1]
for u in v.neighbours:
if u not in Y:
s = strand.copy()
s.append(u)
l.append(s)
Y.append(u)
found = len(l) == 0
pr = l
# We have found the maximum matching and minimum cover so we print it
if found:
print("matching is:")
for m in matches:
print(m[0] + " " + m[1])
print("Cover is:")
for c in Y + setMinus(verticesA.values(), X):
print(c.name)
break
# we have an augmenting path
vertex = Vertex("", False)
cont = False
s = []
for strand in pr:
v = strand[len(strand) - 1]
if not v.saturated:
cont = True
s = strand
if cont:
for m in matches:
for j in range(len(s) - 1):
if inMatching(m, s[j], s[j + 1]):
matches.remove(m)
m = ""
s[0].saturated = True
s[len(s) - 1].saturated = True
for n in range(len(s)):
if n % 2 == 0:
m = s[n].name
else:
matches.append((m, s[n].name))
X = list(filter((lambda x: not x.saturated), verticesA.values()))
Y = []
pr = [[x] for x in X]
continue
# Create the matching layer of our alternating level graph
for strand in pr:
v = strand[len(strand) - 1]
for u in v.neighbours:
for m in matches:
if inMatching(m, u, v):
strand.append(u)
X.append(u)
|
import re
from dataclasses import dataclass
@dataclass
class Passport:
byr: int
iyr: int
eyr: int
hgt: str
hcl: str
ecl: str
pid: str
def is_valid(self) -> bool:
if not (1920 <= self.byr <= 2002):
return False
if not (2010 <= self.iyr <= 2020):
return False
if not (2020 <= self.eyr <= 2030):
return False
units = self.hgt[-2:]
value = int(self.hgt[:-2])
if not (units == "cm" or units == "in"):
return False
if units == "cm" and not (150 <= value <= 193):
return False
if units == "in" and not (59 <= value <= 76):
return False
hcl_match = re.fullmatch('#[a-f0-9]{6}', self.hcl)
if not hcl_match:
return False
ecl_match = re.fullmatch('amb|blu|brn|gry|grn|hzl|oth', self.ecl)
if not ecl_match:
return False
pid_match = re.fullmatch('[0-9]{9}', self.pid)
if not pid_match:
return False
return True
def parse_input():
with open("input.txt") as f:
input_list = f.read().split("\n\n")
input_matrix = [p.replace('\n', ' ').split() for p in input_list]
passports = []
for p in input_matrix:
d = {}
for f in p:
k, v = f.split(":")
d[k] = v
if all_fields_present(d):
passport = Passport(byr=int(d['byr']),
iyr=int(d['iyr']),
eyr=int(d['eyr']),
hgt=d['hgt'],
hcl=d['hcl'],
ecl=d['ecl'],
pid=d['pid'])
passports.append(passport)
return passports
def all_fields_present(passport):
required_fields = ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid']
return all([field in passport for field in required_fields])
def problem_1():
passports = parse_input()
return len(passports)
def problem_2():
passports = parse_input()
num_valid = 0
for p in passports:
if p.is_valid():
num_valid += 1
return num_valid
def main():
valid_count = problem_1()
print(valid_count)
valid_count = problem_2()
print(valid_count)
if __name__ == "__main__":
main()
|
from math import factorial as fact
for _ in range(int(input())):
n, m = map(int, input().split())
print((fact(m + n) // (fact(m) * fact(n))) % 1000000007)
|
import math
sales = float(input("Enter monthly sales: "))
# while sales > 0:
if sales < 10000:
rate = 0.10
elif sales >= 10000 and sales <= 14999:
rate = 0.12
elif sales >= 15000 and sales <= 17999:
rate = 0.14
elif sales >= 18000 and sales <= 21999:
rate = 0.16
else:
rate = 0.18
def main():
global sales
monthly_sales = sales
advanced_pay = get_advanced_pay()
commission_rate = get_commission_rate()
monthly_pay = (monthly_sales * commission_rate) - advanced_pay
if monthly_pay < 0:
imbursement = abs(monthly_pay)
print("You must reimburse the company with an amount of $", imbursement)
else:
print("Your monthly pay is $", monthly_pay)
def get_advanced_pay():
while True:
advance = float(input("Enter advanced pay: "))
if advance > 0 and advance <= 2000:
break
elif advance > 2000:
print("Advance pay cannot be greater than $2000")
continue
return advance
def get_commission_rate():
return rate
main()
|
import os
import shutil
# open the file, make a list of all filenames, close the file
with open('/foo/list.txt') as names_file:
# use .strip() to remove trailing whitespace and line breaks
names = [line.strip() for line in names_file]
dir_src = '/foo/src'
dir_dst = '/foo/target/'
for file in os.listdir(dir_src):
if file in names:
src_file = os.path.join(dir_src, file)
dst_file = os.path.join(dir_dst, file)
shutil.copy(src_file, dst_file)
|
from glorirep import factions
i = 0
for k,v in factions.items():
if v == False:
print k
# i = i + 1
#print i
|
import matplotlib.pyplot as plt
import sympy as sp
#plot the data
from matplotlib.ticker import MultipleLocator
"""when y’s value equals 1 color is “red” ,else “green”, and set the x axis as ‘x_1’ ,
set the y axis as ‘x_2’ , because on the plot the x-axis is the value of parameter ‘x1 ’,
the y-axis is the value of the second parameter ‘x1’ , and the parameter of ‘str’ is
the title of the plot"""
def scattersth(x1, x2, y, str):
positivey_x1=[]
positivey_x2 = []
negaivey_x1=[]
negaivey_x2 = []
for i in range(len(x1)):
if y[i] == 1:
positivey_x1.append(x1[i])
positivey_x2.append(x2[i])
else:
negaivey_x1.append(x1[i])
negaivey_x2.append(x2[i])
plt.scatter(positivey_x1,positivey_x2, label='y=1',color='red', s=5)
plt.scatter(negaivey_x1, negaivey_x2, label='y=-1',color='green', s=5)
plt.xlabel('x1')
plt.ylabel('x2')
plt.title(str)
|
#!/usr/bin/env python3
import sys, glob, os
import json
if (len(sys.argv) != 2):
print('show_result.py "result_dir_pattern"')
exit(0)
pattern = sys.argv[1]
directories = glob.glob(pattern)
buffer=[]
print('MODEL,TESTSET,ACC,TER,N:HYP:REF:MAX,DATE')
for dir in directories:
try:
date, service, test_set, max_utts = dir.split('__')
with open(os.path.join(dir, 'RESULTS.txt'), 'r', encoding = 'utf8') as f:
for l in f:
if 'token_error_rate' in l:
r = json.loads(l)
TER = r['token_error_rate']
R = r['num_ref_utts']
H = r['num_hyp_utts']
n = r['num_eval_utts']
buffer.append(F"{service},{test_set},{100-TER:.2f},{TER:.2f},{n}:{H}:{R}:{max_utts},{date}")
except:
continue
buffer.sort()
for l in buffer:
print(l)
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'test.py'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
import sys,re,json,zipfile,random,string
from PyQt5.QtWidgets import *
from PyQt5 import QtCore
from PyQt5 import QtGui
import signin
import dkdrive_2
import request,time
import webbrowser,OC_api,os
login_url = 'http://10.20.30.25'
user_name = 'admin'
user_pass = 'admin'
user_oc = OC_api.rewrite_oc(login_url, user_name, user_pass)
#
cwd = os.getcwd()
conf_dir = os.path.join(cwd , 'conf')
conf_file = os.path.join(cwd,'conf/config')
with open(conf_file,mode='r',encoding='utf-8') as f:
oc_config = json.load(f)
data_dir = oc_config['data_dir']
#
class Mylabel(QLabel):
clicked = QtCore.pyqtSignal()
def mouseReleaseEvent(self, QMouseEvent):
if QMouseEvent.button() == QtCore.Qt.LeftButton:
self.clicked.emit()
#
class sign_in_page(QDialog, signin.Ui_Dialog):
def __init__(self, parent=None):
super(sign_in_page, self).__init__(parent)
self.setupUi(self)
#
self.user_passwd.setEchoMode(QLineEdit.Password)
#
def sign_in(self):
user_name = self.user_name.text()
user_passwd = self.user_passwd.text()
try:
oc.login(user_name.strip(),user_passwd.strip())
except Exception as e:
print(e)
self.user_name.setText("")
self.user_passwd.setText("")
self.check_ret.setText("wrong")
else:
dkd.show()
myWin.close()
#
def sign_up(self):
pass
#
class general_dialog(QWidget):
def __init__(self,sharelink_info):
super(general_dialog,self).__init__()
self.setObjectName("general_dialog")
self.resize(629, 104)
self.setMinimumSize(QtCore.QSize(629, 104))
self.setMaximumSize(QtCore.QSize(629, 104))
self.copy_btn = QPushButton(self)
self.copy_btn.setGeometry(QtCore.QRect(550, 40, 41, 23))
self.copy_btn.setObjectName("copy_btn")
self.copy_btn.setText('复制')
self.label_context = QLabel(self)
self.label_context.setGeometry(QtCore.QRect(50, 40, 491, 21))
self.label_context.setObjectName("label_context")
self.label_context.setText(sharelink_info)
#
self.copy_btn.clicked.connect(lambda :self.copy_text(sharelink_info))
#
def copy_text(self,text):
clipboard = QApplication.clipboard()
clipboard.setText(text)
ret = QMessageBox.information(self,'','复制成功')
self.close()
#
class dkdrive_page2(QMainWindow, dkdrive_2.Ui_MainWindow):
def __init__(self,parent=None):
super(dkdrive_page2, self).__init__(parent)
self.setupUi(self)
#
self.left_widget.setFrameShape(QListWidget.NoFrame)
self.left_widget.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.left_widget.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
for i in [self.finish_table,self.download_table,self.upload_table]:
i.setEditTriggers(QAbstractItemView.NoEditTriggers)
i.horizontalHeader().setVisible(False)
i.verticalHeader().setVisible(False)
self.table_test = QTableWidget(self.my_drive)
self.table_test.setGeometry(QtCore.QRect(-1,50,1111,701))
self.table_test.setObjectName("table_test")
self.table_test.setColumnCount(4)
self.table_test.setHorizontalHeaderLabels(['','文件名','修改时间','大小'])
self.table_test.setStyleSheet('font: 12pt "微软雅黑";')
self.search_btn.clicked.connect(self.func_search_btn)
self.search_edit.returnPressed.connect(self.func_search_btn)
self.return_btn.clicked.connect(self.func_return_btn)
self.current_dir_edit.returnPressed.connect(self.func_return_btn)
self.upload_dir.clicked.connect(self.func_upload_dir_btn)
self.upload_file.clicked.connect(self.func_upload_file_btn)
self.table_test.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.table_test.resizeColumnsToContents()
self.table_test.setColumnWidth(0,20)
self.table_test.setColumnWidth(1,500)
self.table_test.setColumnWidth(2,250)
self.table_test.setColumnWidth(3,100)
self.table_test.horizontalHeader().setSectionResizeMode(0,QHeaderView.Fixed)
self.table_test.horizontalHeader().setSectionResizeMode(1,QHeaderView.Fixed)
self.table_test.horizontalHeader().setSectionResizeMode(2,QHeaderView.Fixed)
self.table_test.horizontalHeader().setSectionResizeMode(3,QHeaderView.Fixed)
self.table_test.horizontalHeader().setHighlightSections(False)
self.table_test.verticalHeader().setVisible(False)
self.table_test.setVerticalHeaderLabels(['a','b'])
self.table_test.setShowGrid(False)
self.table_test.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.table_test.customContextMenuRequested.connect(self.generateMenu)
self.user_oc = OC_api.rewrite_oc(login_url,user_name,user_pass)
self.files_refresh()
self.my_count,self.dir_list=self.user_oc._file_list()
self.whole_files = self.user_oc._whole_file()
self.table_test.setRowCount(self.my_count)
for d in self.dir_list:
ft = self.user_oc.is_dir(d)
if ft:
ft = 'dir'
else:
ft = 'file'
self.table_test.setCellWidget(self.dir_list.index(d),1,self.show_files(my_file_path=d,file_type=ft))
whole_file_info = self.user_oc.file_info(d)
if str(whole_file_info['file_size']) == 'None':
Size = QTableWidgetItem('-')
self.table_test.setItem(self.dir_list.index(d),3,Size)
else:
s = whole_file_info['file_size']
if s < 1024:
s = str(s) + ' ' + 'B'
elif 1048576> s >=1024:
s = str(format(float(s)/1024,'.2f')) + ' ' + 'KB'
elif 1073741824> s >=1048576:
s = str(format(float(s)/1048576,'.2f')) + ' ' + 'MB'
elif 1099511627776 > s >=1073741824:
s = str(format(float(s)/1073741824,'.2f')) + ' ' + 'GB'
else :
s = str(format(float(s)/1099511627776,'.2f')) + ' ' + 'TB'
Size = QTableWidgetItem(s)
self.table_test.setItem(self.dir_list.index(d),3,Size)
t = str(whole_file_info['file_modify'])
Date = QTableWidgetItem(t)
Date.setTextAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.table_test.setItem(self.dir_list.index(d),2,Date)
def files_refresh(self):
file_stf = self.user_oc._whole_rets()
ret = []
for i in range(len(file_stf)):
ret.append(list(file_stf[i]))
if os.path.exists('f_lst'):
with open('f_lst', mode='r', encoding='utf-8') as f:
self.gg = json.load(f)
if self.gg != ret:
with open('f_lst', mode='w', encoding='utf-8') as f:
json.dump(ret, f)
return ret
else:
return ret
else:
with open('f_lst',mode='w',encoding='utf-8') as f:
json.dump(ret,f)
return ret
def generateMenu(self,pos):
cell = self.table_test.selectionModel().selection().indexes()
if len(cell) == 0:
return
self.row_index = cell[0].row()
column_index = cell[0].column()
if self.row_index < self.my_count and column_index == 1:
menu = QMenu()
item_download = menu.addAction(u'下载')
item_delete = menu.addAction(u'删除')
item_share = menu.addAction(u'分享')
item_rename = menu.addAction(u'重命名')
item_mv = menu.addAction(u'移动')
action = menu.exec_(self.table_test.mapToGlobal(pos))
if action == item_download:
choice = self.dir_list[self.row_index]
ft = self.user_oc.is_dir(choice)
if ft:
ft = 'dir'
ret = self.select_local_datapath(ft)
else:
ft = 'file'
ret = self.user_oc.file_info(choice)
ret = os.path.basename(ret['file_name'])
ret = self.select_local_datapath(ft,file_localte=ret)
self.downloadThread = download_Thread()
self.downloadThread.setfun_attr(choice, ft, ret)
self.downloadThread.start()
self.downloadThread.trigger.connect(self.link_donothing)
elif action == item_delete:
print(2)
elif action == item_share:
choice = self.dir_list[self.row_index]
check_code = ''.join(random.sample(string.ascii_letters + string.digits,5))
info = self.user_oc._share_file_link(choice, password=check_code)
value = info.get_link() + ' 验证码:' + check_code
self.sharelink = general_dialog(value)
self.sharelink.show()
elif action == item_rename:
print(4)
elif action == item_mv:
print(5)
else:
return
else:
return
def label_clicked(self,parent_dir=None,new_item_reload=None):
if parent_dir == None :
cell = self.table_test.selectionModel().selection().indexes()
row_index = cell[0].row()
column_index = cell[0].column()
else:
column_index = 1
if column_index == 1:
if parent_dir == None :
select_file = self.dir_list[row_index]
else:
select_file = parent_dir
self.current_dir_edit.setText(str(self.user_oc.file_info(select_file)['file_path']))
self.next_my_count,self.next_dir_list = self.user_oc._file_list(select_file)
if self.next_my_count == 0 and self.user_oc.is_dir(select_file) == True:
self.table_test.setRowCount(0)
elif self.next_my_count == 0 and self.user_oc.is_dir(select_file) == False:
pass
else:
self.table_test.setRowCount(self.next_my_count)
self.dir_list = self.next_dir_list
for d in self.dir_list:
ft = self.user_oc.is_dir(d)
if ft:
ft = 'dir'
else:
ft = 'file'
self.table_test.setCellWidget(self.dir_list.index(d),1,self.show_files(my_file_path=d,file_type=ft))
whole_file_info = self.user_oc.file_info(d)
if str(whole_file_info['file_size']) == 'None':
Size = QTableWidgetItem('-')
self.table_test.setItem(self.dir_list.index(d), 3, Size)
else:
s = whole_file_info['file_size']
if s < 1024:
s = str(s) + ' ' + 'B'
elif 1048576 > s >= 1024:
s = str(format(float(s) / 1024, '.2f')) + ' ' + 'KB'
elif 1073741824 > s >= 1048576:
s = str(format(float(s) / 1048576, '.2f')) + ' ' + 'MB'
elif 1099511627776 > s >= 1073741824:
s = str(format(float(s) / 1073741824, '.2f')) + ' ' + 'GB'
else:
s = str(format(float(s) / 1099511627776, '.2f')) + ' ' + 'TB'
Size = QTableWidgetItem(s)
self.table_test.setItem(self.dir_list.index(d), 3, Size)
t = str(whole_file_info['file_modify'])
Date = QTableWidgetItem(t)
Date.setTextAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.table_test.setItem(self.dir_list.index(d), 2, Date)
def link_donothing(self):
pass
def func_search_btn(self):
search_stf = self.search_edit.text().strip()
match_list = []
for stf in self.whole_files:
if search_stf in stf:
match_list.append(stf)
count = len(match_list)
self.table_test.setRowCount(count)
self.dir_list = match_list
for d in self.dir_list:
ft = self.user_oc.is_dir(d)
if ft:
ft = 'dir'
else:
ft = 'file'
self.table_test.setCellWidget(self.dir_list.index(d), 1, self.show_files(my_file_path=d,file_type=ft))
whole_file_info = self.user_oc.file_info(d)
if str(whole_file_info['file_size']) == 'None':
Size = QTableWidgetItem('-')
self.table_test.setItem(self.dir_list.index(d), 3, Size)
else:
s = whole_file_info['file_size']
if s < 1024:
s = str(s) + ' ' + 'B'
elif 1048576 > s >= 1024:
s = str(format(float(s) / 1024, '.2f')) + ' ' + 'KB'
elif 1073741824 > s >= 1048576:
s = str(format(float(s) / 1048576, '.2f')) + ' ' + 'MB'
elif 1099511627776 > s >= 1073741824:
s = str(format(float(s) / 1073741824, '.2f')) + ' ' + 'GB'
else:
s = str(format(float(s) / 1099511627776, '.2f')) + ' ' + 'TB'
Size = QTableWidgetItem(s)
self.table_test.setItem(self.dir_list.index(d), 3, Size)
t = str(whole_file_info['file_modify'])
Date = QTableWidgetItem(t)
Date.setTextAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.table_test.setItem(self.dir_list.index(d), 2, Date)
def func_return_btn(self):
current_dir = self.current_dir_edit.text().strip()
while current_dir.endswith('/'):
current_dir = current_dir[:-1]
if current_dir + '/' not in self.whole_files:
parent_dir = '/'
else:
parent_dir = os.path.dirname(current_dir)
self.label_clicked(parent_dir=str(parent_dir))
def func_upload_file_btn(self):
current_dir = self.current_dir_edit.text().strip()
self.whole_files.append('/')
while current_dir + '/' not in self.whole_files:
if current_dir == '/':
break
else:
current_dir = current_dir[:-1]
self.current_dir_edit.setText(current_dir)
if current_dir != '/':
current_dir += '/'
fname,_ = QFileDialog.getOpenFileNames()
for file in fname:
self.uploadThread = upload_Thread()
self.uploadThread.setfun_attr(current_dir,'file',file)
self.uploadThread.start()
self.uploadThread.trigger.connect(lambda :self.refresh(current_dir))
self.message('上传完成')
def func_upload_dir_btn(self):
current_dir = self.current_dir_edit.text().strip()
self.whole_files.append('/')
while current_dir + '/' not in self.whole_files:
if current_dir == '/':
break
else:
current_dir = current_dir[:-1]
self.current_dir_edit.setText(current_dir)
if current_dir != '/':
current_dir += '/'
local_file = oc_config['data_dir']
fname = QFileDialog.getExistingDirectory(self, '目录', local_file)
self.uploadThread = upload_Thread()
self.uploadThread.setfun_attr(current_dir,'dir',fname)
self.uploadThread.start()
self.uploadThread.trigger.connect(lambda :self.refresh(current_dir))
def show_files(self,my_file_path=None,file_type='dir'):
my_widget = QWidget()
my_label1 = Mylabel()
my_label1.setText('')
if file_type == 'file':
my_label1.setStyleSheet('image: url(:/my_pic/file_icon.png);')
else:
my_label1.setStyleSheet('image: url(:/my_pic/dir_icon.png);')
my_label1.clicked.connect(self.link_donothing)
my_label2 = Mylabel()
my_label2.setText(str(my_file_path))
my_label2.setStyleSheet('font: 12pt "微软雅黑";')
my_label2.clicked.connect(self.label_clicked)
my_hlayout = QHBoxLayout(self,spacing=10)
my_hlayout.addWidget(my_label1)
my_hlayout.addWidget(my_label2)
my_hlayout.setStretch(0,1)
my_hlayout.setStretch(1,25)
my_hlayout.setContentsMargins(0,0,0,0)
my_widget.setLayout(my_hlayout)
return my_widget
def refresh(self,current_dir):
print(current_dir)
self.num,self.dir_list = self.user_oc._file_list(file_path=current_dir)
print(self.num,self.dir_list)
self.table_test.setRowCount(self.num)
for d in self.dir_list:
print(d)
ft = self.user_oc.is_dir(d)
if ft:
ft = 'dir'
else:
ft = 'file'
row_num = self.dir_list.index(d)
print(row_num)
self.table_test.setCellWidget(row_num, 1, self.show_files(my_file_path=d, file_type=ft))
whole_file_info = self.user_oc.file_info(d)
if str(whole_file_info['file_size']) == 'None':
Size = QTableWidgetItem('-')
self.table_test.setItem(row_num, 3, Size)
else:
s = whole_file_info['file_size']
if s < 1024:
s = str(s) + ' ' + 'B'
elif 1048576 > s >= 1024:
s = str(format(float(s) / 1024, '.2f')) + ' ' + 'KB'
elif 1073741824 > s >= 1048576:
s = str(format(float(s) / 1048576, '.2f')) + ' ' + 'MB'
elif 1099511627776 > s >= 1073741824:
s = str(format(float(s) / 1073741824, '.2f')) + ' ' + 'GB'
else:
s = str(format(float(s) / 1099511627776, '.2f')) + ' ' + 'TB'
Size = QTableWidgetItem(s)
self.table_test.setItem(row_num, 3, Size)
t = str(whole_file_info['file_modify'])
Date = QTableWidgetItem(t)
Date.setTextAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.table_test.setItem(row_num, 2, Date)
self.message('上传完成')
def select_local_datapath(self,ft,file_localte='example'):
if ft == 'dir':
local_file = oc_config['data_dir']
fname = QFileDialog.getExistingDirectory(self,'目录',local_file)
return fname
elif ft == 'file':
local_file = oc_config['data_dir'] + '\\' + file_localte
fname = QFileDialog.getSaveFileName(self,'存储地址',local_file,"All Files (*);;Text Files (*.txt)")
if fname[0]:
return fname[0]
else:
return
def message(self,text):
reply = QMessageBox.information(self,'提示',text,QMessageBox.Yes)
def trige_con():
print('finish_write')
class download_Thread(QtCore.QThread):
trigger = QtCore.pyqtSignal()
def __init__(self):
super(download_Thread,self).__init__()
self.user_oc = OC_api.rewrite_oc(login_url, user_name, user_pass)
def setfun_attr(self,choice,ft,local_path):
self.choice = choice
self.ft = ft
self.data_locate = local_path
def data_download(self):
if self.ft == 'dir':
num = ''
for i in range(20):
num += str(random.randint(0, 9))
i += 1
local_filename = self.data_locate + '\\' + num + '.zip'
self.user_oc.get_dir_zip(self.choice, local_filename)
"""unzip zip file"""
zip_file = zipfile.ZipFile(local_filename)
zip_file.extractall(self.data_locate)
zip_file.close()
os.remove(local_filename)
elif self.ft == 'file':
self.user_oc.get_file(self.choice,local_file=self.data_locate)
def run(self):
self.data_download()
self.trigger.emit()
class upload_Thread(QtCore.QThread):
trigger = QtCore.pyqtSignal()
def __init__(self):
super(upload_Thread, self).__init__()
def setfun_attr(self, user_choice, file_type, data_locate):
self.choice = user_choice
self.ft = file_type
self.data_locate = data_locate
def data_upload(self):
if self.ft == 'dir':
self.user_oc._put_directory(self.choice,self.data_locate)
elif self.ft == 'file':
self.user_oc._put_file(self.choice,self.data_locate)
def run(self):
self.data_upload()
self.trigger.emit()
if __name__ == '__main__':
app = QApplication(sys.argv)
splash = QSplashScreen(QtGui.QPixmap(":/my_pic/install.jpg"))
#splash.show()
splash.showMessage(u'程序正在加载。。。', QtCore.Qt.AlignCenter, QtCore.Qt.red)
#time.sleep(3)
app.processEvents()
#dkd = dkdrive_page2()
myWin = dkdrive_page2()
myWin.show()
splash.finish(myWin)
sys.exit(app.exec_()) |
# -*- coding: utf-8 -*-
# /usr/bin/python3
'''
By kyubyong park. kbpark.linguist@gmail.com.
https://www.github.com/kyubyong/g2p
'''
from __future__ import print_function
import tensorflow as tf
import g2p_th
from g2p_th.train import *
from nltk import pos_tag
from nltk.corpus import cmudict
import nltk
from pythainlp.tokenize import word_tokenize
#from train import Graph, hp, load_vocab
import numpy as np
import codecs
import re
import os
import dill
import unicodedata
from g2p_th.expand import normalize_numbers
with open(os.path.join(os.path.dirname(g2p_th.__file__),'dict5k.data'), 'rb') as in_strm:
cmu = dill.load(in_strm)
from builtins import str as unicode
dirname = os.path.dirname(__file__)
# Load vocab
g2idx, idx2g, p2idx, idx2p = load_vocab()
# Load Graph
g = tf.Graph()
with g.as_default():
with tf.device('/cpu:0'):
graph = Graph(); print("Graph loaded for g2p")
saver = tf.train.Saver()
config = tf.ConfigProto(
device_count={'GPU' : 0},
gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.0001)
)
g_sess = None # global session
class Session: # make/remove global session
def __enter__(self):
global g_sess
if g_sess != None:
raise Exception('Session already exist in g2p')
g_sess = tf.Session(graph=g, config=config)
saver.restore(g_sess, tf.train.latest_checkpoint(os.path.join(dirname,hp.logdir)))
def __exit__(self, exc_type, exc_val, exc_tb):
global g_sess
g_sess.close()
g_sess = None
def predict(words, sess):
'''
Returns predicted pronunciation of `words` which do NOT exist in the dictionary.
:param words: A list of words.
:return: pron: A list of phonemes
'''
if len(words) > hp.batch_size:
after = predict(words[hp.batch_size:], sess)
words = words[:hp.batch_size]
else:
after = []
x = np.zeros((len(words), hp.maxlen), np.int32) # 0: <PAD>
for i, w in enumerate(words):
for j, g in enumerate((w + "E")[:hp.maxlen]):
x[i][j] = g2idx.get(g, 2) # 2:<UNK>
## Autoregressive inference
preds = np.zeros((len(x), hp.maxlen), np.int32)
for j in range(hp.maxlen):
_preds = sess.run(graph.preds, {graph.x: x, graph.y: preds})
preds[:, j] = _preds[:, j]
# convert to string
pron = []
for i in range(len(preds)):
p = [u"%s" % unicode(idx2p[idx]) for idx in preds[i]] # Make p into unicode.
if "<EOS>" in p:
eos = p.index("<EOS>")
p = p[:eos]
pron.append(p)
return pron + after
# Construct homograph dictionary
def token2pron(token):
'''
Returns pronunciation of word based on its pos.
:param token: A tuple of (word, pos)
:return: A list of phonemes. If word is not in the dictionary, [] is returned.
'''
word = token
if re.search("\w", word) is None:
pron = [word]
elif word in cmu: # CMU dict
pron = cmu[word][0]
else:
return []
return pron
def tokenize(text):
'''
Splits text into `tokens`.
:param text: A string.
:return: A list of tokens (string).
'''
#text = re.sub('([.,?!]( |$))', r' \1', text)
return word_tokenize(text)
def g2p(text):
'''
Returns the pronunciation of text.
:param text: A string. A sequence of words.
:return: A list of phonemes.
'''
# normalization
text = unicode(text)
#text = normalize_numbers(text)
text = ''.join(char for char in unicodedata.normalize('NFD', text)
if unicodedata.category(char) != 'Mn') # Strip accents
text = text.lower()
#text = re.sub("[^ a-z'.,?!\-]", "", text)
#text = text.replace("i.e.", "that is")
#text = text.replace("e.g.", "for example")
# tokenization
words = tokenize(text)
tokens =(words)
#tokens = pos_tag(words) # tuples of (word, tag)
# g2p
oovs, u_loc = [], []
ret = []
for token in tokens:
pron = token2pron(token) # list of phonemes
if pron == []: # oov
oovs.append(token[0])
u_loc.append(len(ret))
ret.extend(pron)
ret.extend([" "])
if len(oovs)>0:
global g_sess
if g_sess is not None: # check global session
prons = predict(oovs, g_sess)
for i in range(len(oovs)-1,-1,-1):
ret = ret[:u_loc[i]]+prons[i]+ret[u_loc[i]:]
else: # If global session is not defined, make new one as local.
with tf.Session(graph=g, config=config) as sess:
saver.restore(sess, tf.train.latest_checkpoint(os.path.join(dirname, hp.logdir)))
prons = predict(oovs, sess)
for i in range(len(oovs)-1,-1,-1):
ret = ret[:u_loc[i]]+prons[i]+ret[u_loc[i]:]
return ret[:-1]
if __name__ == '__main__':
texts = ["ต้นตาล"] # newly coined word
for text in texts:
out = g2p(text)
print(out)
|
import cx_Oracle
import os, sys, json
import conf
j1 = json.loads(conf
.logdate)
login = j1["login"]
passwords = j1["passwords"]
ip = j1["ip"]
date_name = j1["date_name"]
try:
c_con = login + "/" + passwords + "@" + ip + "/" + date_name
#con = cx_Oracle.connect('login/passwords@ip/date_name')
con = cx_Oracle.connect(c_con)
print("Connection successful")
except:
print("Connection not successful")
|
import os
import re
import codecs
import pandas as pd
from ..util import defines
from ..util import file_handling as fh
from ..preprocessing import labels
from ..preprocessing import data_splitting as ds
import html
import common
from codes import code_names
def output_responses(dataset):
print dataset
output_dir = fh.makedirs(defines.web_dir, 'DRLD')
rnn_dir = fh.makedirs(defines.exp_dir, 'rnn', 'bayes_opt_rnn_LSTM_reuse_mod_34_rerun', 'fold0', 'responses')
blm_dir = fh.makedirs(defines.exp_dir, 'Democrat-Dislikes_Democrat-Likes_Republican-Dislikes_Republican-Likes', 'test_fold_0', 'L1LR_all_groups_a0', 'models')
predictions_dir = fh.makedirs(defines.exp_dir, 'Democrat-Dislikes_Democrat-Likes_Republican-Dislikes_Republican-Likes', 'test_fold_0', 'L1LR_all_groups_a0', 'predictions')
train_pred = pd.read_csv(fh.make_filename(predictions_dir, dataset + '_train', 'csv'), header=0, index_col=0)
test_pred = pd.read_csv(fh.make_filename(predictions_dir, dataset + '_test', 'csv'), header=0, index_col=0)
text_file_dir = fh.makedirs(defines.data_dir, 'rnn')
text = fh.read_json(fh.make_filename(text_file_dir, 'ngrams_n1_m1_rnn', 'json'))
true = labels.get_labels([dataset])
all_items = ds.get_all_documents(dataset)
word_list = common.get_word_list(true.columns, blm_dir)
train_words = {}
test_words = {}
for i in all_items:
true_i = true.loc[i]
rnn_file = fh.make_filename(rnn_dir, i, 'csv')
rnn_vals = pd.read_csv(rnn_file, header=-1)
rnn_vals.columns = true.columns
if i in train_pred.index:
pred_i = train_pred.loc[i]
train_item = True
else:
pred_i = test_pred.loc[i]
train_item = False
#output_filename = fh.make_filename(output_dir, i, 'html')
output_filename = '/Users/dcard/Desktop.temp.html'
with codecs.open(output_filename, 'w') as output_file:
output_file.write(html.make_header(i))
output_file.write(html.make_body_start())
output_file.write(common.make_masthead(-1))
output_file.write(html.make_heading('Response: ' + i, align='center'))
output_file.write(html.make_paragraph('The table below shows coeficients for the unigram model (red-blue)',
align="center"))
output_file.write(html.make_paragraph('and sequence element probabilities for the LSTM (white-green).',
align="center"))
links = [html.make_link('wordtype_' + w + '.html', w) if w in word_list else w for w in text[i]]
table_header = ['Label'] + links + ['True', 'Pred.']
output_file.write(html.make_table_start(style='t1'))
output_file.write(html.make_table_header(table_header))
for code_index, code in enumerate(true.columns):
# load coefficients from unigram model
words = text[i]
model_filename = fh.make_filename(blm_dir, re.sub(' ', '_', code), 'json')
model = fh.read_json(model_filename)
intercept = float(model.get('intercept', 1.0))
if 'coefs' in model:
coefs = dict(model['coefs'])
colours = [str((0, 0, 0))]
for word in words:
coef = coefs.get('_n1_' + word, 0.0)/abs(intercept)
val = int(255 - (min(1, abs(coef))*255))
if coef > 0:
colours += [(val, val, 255)]
else:
colours += [(255, val, val)]
else:
colours = [str((0, 0, 0))]
colours += [(255, 255, 255) for w in words]
colours += [str((0, 0, 0))]*2
code_name = code_names[code_index]
link = html.make_link('label_' + html.replace_chars(code_name) + '.html', code_name)
row = [link] + words + [str(true_i[code]), str(int(pred_i[code])) + ' (LR)']
output_file.write(html.make_table_row(row, colours=colours))
for i_v, v in enumerate(rnn_vals[code].values):
if v >= 0.5:
if train_item:
focal_word = text[i][i_v]
if focal_word in train_words:
train_words[focal_word] += 1
else:
train_words[focal_word] = 1
else:
focal_word = text[i][i_v]
if focal_word in test_words:
test_words[focal_word] += 1
else:
test_words[focal_word] = 1
colours = [str((0, 0, 0))]
vals = [int(235 - (v*235)) for v in rnn_vals[code]]
colours += [(v, 235, v) for v in vals]
colours += [str((0, 0, 0))]*2
row = [' '] + text[i] + [' ', str(int(rnn_vals[code].max() >= 0.5)) + ' (RNN)']
output_file.write(html.make_table_row(row, colours=colours))
output_file.write(html.make_table_end())
output_file.write(html.make_heading('LSTM Gates', align='center'))
output_file.write(html.make_paragraph('The plot below shows LSTM gate values at each sequence element.',
align="center"))
output_file.write(html.make_paragraph('Each grey line is one dimension; the colored line shows the mean.',
align="center"))
output_file.write(html.make_image(os.path.join('gate_plots', i + '_gates.png')))
output_file.write(html.make_heading('LSTM vectors', align='center'))
output_file.write(html.make_paragraph('The plot below shows the LSTM hidden and memory nodes for each '
'sequence element.', align="Center"))
output_file.write(html.make_paragraph('Vectors have been projected to a common space.',
align="center"))
output_file.write(html.make_image(os.path.join('vector_plots', i + '_vectors.png')))
output_file.write(html.make_body_end())
output_file.write(html.make_footer())
return train_words, test_words
def main():
train_words = {}
test_words = {}
tr, te = output_responses(dataset='Democrat-Likes')
train_words.update(tr)
test_words.update(te)
tr, te = output_responses(dataset='Democrat-Dislikes')
train_words.update(tr)
test_words.update(te)
tr, te = output_responses(dataset='Republican-Dislikes')
train_words.update(tr)
test_words.update(te)
tr, te = output_responses(dataset='Republican-Likes')
train_words.update(tr)
test_words.update(te)
print len(set(train_words).difference(set(test_words)))
print set(test_words).difference(set(train_words))
fh.write_to_json(train_words, '/Users/dcard/Desktop/train_words.json')
fh.write_to_json(test_words, '/Users/dcard/Desktop/test_words.json')
if __name__ == '__main__':
main()
|
import os
import itertools
import lxml.etree as ET
import re
from os.path import join
import glob
wdir = ""
#inpath = os.path.join("home", "christof", "repos", "dh-trier", "Distant-Reading", "Testdataset", "XML", "")
inpath = join(wdir, "..", "..", "Testdataset", "XML", "")
outpath = os.path.join("", "Testoutput", "")
xsltpath = os.path.join("", "xslt_files", "")
for dirpath, dirnames, filenames in os.walk(inpath):
for filename in filenames:
basename = filename.split(".")[0] # Removes [.xml] filename extension
print(basename)
prefix = basename.split("_")[0] # Removes [_author name] in the filename, e.g. DEU001_Willkommen ---> DEU001
lang = "".join(itertools.takewhile(str.isalpha, prefix)) # Takes only characters in identifiers, e.g. DEU001 ---> DEU
langdir = os.path.join(outpath, lang) # Creates aggregation folders
filedir = os.path.join(langdir, prefix)
aggr_file = os.path.join(outpath, "-" + lang + ".aggregation")
aggr_meta_file = os.path.join(outpath, "-" + lang + ".aggregation.meta")
edit_file = os.path.join(langdir, prefix + ".edition")
edit_meta_file = os.path.join(langdir, prefix + ".edition.meta")
work_file = os.path.join(filedir, prefix + ".work")
work_meta_file = os.path.join(filedir, prefix + ".work.meta")
xml_file = os.path.join(filedir, "-" + prefix + ".xml")
xml_meta_file = os.path.join(filedir, "-" + prefix + ".xml.meta")
for newdir in [langdir, filedir]:
try:
os.makedirs(newdir)
except:
if not os.path.isdir(newdir):
raise
for outfile in [aggr_file, aggr_meta_file, edit_file, edit_meta_file, work_file, work_meta_file, xml_file, xml_meta_file]:
print(outfile)
suffix = outfile.split(".")[1:3]
suffix = ".".join(suffix)
for dirpath, dirnames, xslt_filenames in os.walk(xsltpath):
for xslt_filename in xslt_filenames:
xslt_basename = re.sub(r'(.xsl)',"", xslt_filename)
if suffix == xslt_basename:
print("Here begins the transformation")
print("Transforming...")
print("taret= ", suffix)
print("using xslt= ", xslt_basename)
dom = ET.parse(inpath + filename)
xslt = ET.parse(xsltpath + xslt_filename)
transform = ET.XSLT(xslt)
newdom = transform(dom)
outfile = open(outfile, 'w', encoding="utf-8")
outfile.write('<?xml version="1.0" encoding="UTF-8"?>\n') # writes 'xml declaration' in output
if suffix != "work": # 'work' files only contain 'xml declaration'
outfile.write(ET.tostring(newdom, pretty_print=True, encoding="unicode"))
print("Here ends the Transformation and next...")
break
|
# 재귀를 사용하여 팩토리얼(factorial)을 구하는 함수를 구현해주세요. 팩토리얼이란 1에서부터 n까지의 정수를 모두 곱한것을 말합니다.
def factorial(n):
if n <= 1:
return 1
return n * factorial(n-1)
factorial(n-1)
factorial(5) |
import random
from django.shortcuts import render, get_object_or_404
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from mainapp.models import *
from mainapp.classes import BreadCrumb
# Create your views here.
def get_common_context():
categories = Category.objects.all()
menu_list = TopMenu.objects.all()
breadcrumbs = [
BreadCrumb('Главная', '/')
]
organization = Organization.objects.all()[0]
news = News.objects.all().order_by('date')
if news:
news = news[:1][0]
album = random.choice(PhotoAlbum.objects.all())
photo = random.choice(PhotoImage.objects.filter(album=album))
return {'organization': organization, 'menu_list': menu_list, 'categories': categories, 'news': news,
'random_photo': photo, 'breadcrumbs': breadcrumbs}
def index(request):
context = get_common_context()
show_categories = Category.objects.all()[:10]
query_album = PhotoAlbum.objects.filter(main_page=True)
if query_album:
album = query_album[0]
context['album'] = album
context['show_categories'] = show_categories
return render(request, 'index.html', context)
def news(request):
context = get_common_context()
context['page'] = 'news'
context['breadcrumbs'].append(BreadCrumb('Новости', 'news'))
objects = News.objects.all()
paginator = Paginator(objects, 10)
page = request.GET.get('page')
try:
objects = paginator.page(page)
except PageNotAnInteger:
objects = paginator.page(1)
except EmptyPage:
objects = paginator.page(paginator.num_pages)
context['objects'] = objects
return render(request, 'news.html', context)
def news_detail(request, _id):
context = get_common_context()
obj = get_object_or_404(News, id=_id)
context['breadcrumbs'].append(BreadCrumb('Новости', 'news'))
context['breadcrumbs'].append(BreadCrumb(obj.name, obj.id))
context['obj'] = obj
return render(request, 'news_detail.html', context)
def catalogue(request):
context = get_common_context()
context['breadcrumbs'].append(BreadCrumb('Каталог', 'catalogue'))
context['page'] = 'catalogue'
return render(request, 'catalogue.html', context)
def price(request):
context = get_common_context()
context['breadcrumbs'].append(BreadCrumb('Прайс товаров', 'price'))
context['page'] = 'price'
return render(request, 'price.html', context)
def gallery(request):
context = get_common_context()
context['breadcrumbs'].append(BreadCrumb('Фотогалерея', 'gallery'))
context['page'] = 'gallery'
return render(request, 'gallery.html', context)
def about(request):
context = get_common_context()
context['breadcrumbs'].append(BreadCrumb('О компании', 'about'))
context['page'] = 'about'
return render(request, 'about.html', context)
def contact(request):
context = get_common_context()
context['breadcrumbs'].append(BreadCrumb('Контакты', 'contact'))
contacts = Contact.objects.all()
places = []
peoples = []
for item in contacts:
if item.map:
places.append(item)
if item.position:
peoples.append(item)
context['page'] = 'contact'
context['places'] = places
context['peoples'] = peoples
return render(request, 'contact.html', context)
def default(request, default):
context = get_common_context()
menu = get_object_or_404(TopMenu, slug=default)
context['page'] = default
context['breadcrumbs'].append(BreadCrumb(menu.name, menu.slug))
return render(request, 'default.html', context)
def feedback(request):
if request.method == 'POST':
context = get_common_context()
|
a = int(input())
b = int(input())
print("%d" % ((a*4 + b*6) / 10))
|
import re
import numpy as np
import matplotlib.pyplot as plt
import gc
import MySQLdb
con = MySQLdb.Connection(host="localhost", user="root",
passwd="lin", port=3306)
cur = con.cursor()
con.select_db('recordworking')
try:
fil = open("getdata.txt", "r")
except:
print "file open failed"
exit()
try:
iwget = open("iwget.txt", "w")
except:
print "file open failed"
exit()
try:
queueget = open("queueget.txt", "w")
except:
print "file open failed"
exit()
try:
beaconget = open("beaconget.txt", "w")
except:
print "file open failed"
exit()
lines = fil.readlines()
time_list = []
time_record = {}
time_draw = []
dup = []
for line in lines:
line = line.replace('[', '')
line = line.replace(']', '')
line = re.split(',', line)
# print line
try:
(key, sequence, ack_sequence, time, xid, retrans, dupack, xid) = line
except:
print line[0]
print "line to set failed"
exit()
time = np.uint64(time)
# print time
time_list.append(time)
time_list.sort()
for i in time_list:
sql = "select * from iw where time < (%lu + 10) and time > (%lu - 10);" % (
i, i)
cur.execute(sql)
results = cur.fetchall()
for r in results:
iwget.write(str(r) + '\n')
sql1 = "select * from queue where time < (%lu + 10) and \
time > (%lu - 10);" % (i, i)
cur.execute(sql1)
results1 = cur.fetchall()
for k in results1:
queueget.write(str(k) + '\n')
sql2 = "select * from Beacon where time < (%lu + 100) and \
time > (%lu - 100);" % (i, i)
cur.execute(sql2)
results2 = cur.fetchall()
for m in results2:
beaconget.write(str(m) + '\n')
# tmp = str(i) + '\n'
# timewrite.write(tmp)
# for i in range(1, len(time_list)):
# tmp = time_list[i] - time_list[0]
# tmp = tmp - (tmp % 1000)
# try:
# time_record[tmp] += 1
# except:
# time_record[tmp] = 1
# time_record1 = sorted(time_record.iteritems(), key=lambda d: d[0])
# for i in time_record1:
# time_draw.append(i[0])
# dup.append(i[1])
# plt.figure(1)
# plt.xlabel('Time')
# plt.ylabel('Dupacks')
# plt.plot(time_draw, dup, 'ro', label='dup')
# plt.legend(loc='upper left')
# plt.show()
del dup, time_draw, time_list, time_record
if fil:
fil.close()
if iwget:
iwget.close()
if cur:
cur.close()
if con:
con.close()
if queueget:
queueget.close()
if beaconget:
beaconget.close()
gc.collect()
|
import secrets
import tempfile
import textwrap
import time
from pathlib import Path
import pytest
from ai.backend.client.exceptions import BackendAPIError
from ai.backend.client.session import Session
# module-level marker
pytestmark = pytest.mark.integration
def aggregate_console(c):
return {
'stdout': ''.join(item[1] for item in c if item[0] == 'stdout'),
'stderr': ''.join(item[1] for item in c if item[0] == 'stderr'),
'html': ''.join(item[1] for item in c if item[0] == 'html'),
'media': list(item[1] for item in c if item[0] == 'media'),
}
def exec_loop(kernel, mode, code, opts=None, user_inputs=None):
# The server may return continuation if kernel preparation
# takes a little longer time (a few seconds).
console = []
num_queries = 0
run_id = secrets.token_hex(8)
if user_inputs is None:
user_inputs = []
while True:
result = kernel.execute(
run_id,
code=code if num_queries == 0 or mode == 'input' else '',
mode=mode,
opts=opts)
num_queries += 1
console.extend(result['console'])
if result['status'] == 'finished':
break
elif result['status'] == 'waiting-input':
mode = 'input'
code = user_inputs.pop(0)
opts = None
else:
mode = 'continue'
code = ''
opts = None
return aggregate_console(console), num_queries
@pytest.yield_fixture
def py3_kernel():
with Session() as sess:
kernel = sess.Kernel.get_or_create('python:3.6-ubuntu18.04')
yield kernel
kernel.destroy()
def test_hello():
with Session() as sess:
result = sess.Kernel.hello()
assert 'version' in result
def test_kernel_lifecycles():
with Session() as sess:
kernel = sess.Kernel.get_or_create('python:3.6-ubuntu18.04')
kernel_id = kernel.kernel_id
info = kernel.get_info()
# the tag may be different depending on alias/metadata config.
lang = info['lang']
assert lang.startswith('index.docker.io/lablup/python:')
assert info['numQueriesExecuted'] == 1
info = kernel.get_info()
assert info['numQueriesExecuted'] == 2
kernel.destroy()
# kernel destruction is no longer synchronous!
time.sleep(2.0)
with pytest.raises(BackendAPIError) as e:
info = sess.Kernel(kernel_id).get_info()
assert e.value.status == 404
def test_kernel_execution_query_mode(py3_kernel):
code = 'print("hello world"); x = 1 / 0'
console, n = exec_loop(py3_kernel, 'query', code, None)
assert 'hello world' in console['stdout']
assert 'ZeroDivisionError' in console['stderr']
assert len(console['media']) == 0
info = py3_kernel.get_info()
assert info['numQueriesExecuted'] == n + 1
def test_kernel_execution_query_mode_user_input(py3_kernel):
name = secrets.token_hex(8)
code = 'name = input("your name? "); print(f"hello, {name}!")'
console, n = exec_loop(py3_kernel, 'query', code, None, user_inputs=[name])
assert 'your name?' in console['stdout']
assert 'hello, {}!'.format(name) in console['stdout']
def test_kernel_get_or_create_reuse():
with Session() as sess:
try:
# Sessions with same token and same language must be reused.
t = secrets.token_hex(6)
kernel1 = sess.Kernel.get_or_create('python:3.6-ubuntu18.04',
client_token=t)
kernel2 = sess.Kernel.get_or_create('python:3.6-ubuntu18.04',
client_token=t)
assert kernel1.kernel_id == kernel2.kernel_id
finally:
kernel1.destroy()
def test_kernel_execution_batch_mode(py3_kernel):
with tempfile.NamedTemporaryFile('w', suffix='.py', dir=Path.cwd()) as f:
f.write('print("hello world")\nx = 1 / 0\n')
f.flush()
f.seek(0)
py3_kernel.upload([f.name])
console, _ = exec_loop(py3_kernel, 'batch', '', {
'build': '',
'exec': 'python {}'.format(Path(f.name).name),
})
assert 'hello world' in console['stdout']
assert 'ZeroDivisionError' in console['stderr']
assert len(console['media']) == 0
def test_kernel_execution_batch_mode_user_input(py3_kernel):
name = secrets.token_hex(8)
with tempfile.NamedTemporaryFile('w', suffix='.py', dir=Path.cwd()) as f:
f.write('name = input("your name? "); print(f"hello, {name}!")')
f.flush()
f.seek(0)
py3_kernel.upload([f.name])
console, _ = exec_loop(py3_kernel, 'batch', '', {
'build': '',
'exec': 'python {}'.format(Path(f.name).name),
}, user_inputs=[name])
assert 'your name?' in console['stdout']
assert 'hello, {}!'.format(name) in console['stdout']
def test_kernel_execution_with_vfolder_mounts():
with Session() as sess:
vfname = 'vftest-' + secrets.token_hex(4)
sess.VFolder.create(vfname)
vfolder = sess.VFolder(vfname)
try:
with tempfile.NamedTemporaryFile('w', suffix='.py',
dir=Path.cwd()) as f:
f.write('print("hello world")\nx = 1 / 0\n')
f.flush()
f.seek(0)
vfolder.upload([f.name])
kernel = sess.Kernel.get_or_create('python:3.6-ubuntu18.04',
mounts=[vfname])
try:
console, n = exec_loop(kernel, 'batch', '', {
'build': '',
'exec': 'python {}/{}'.format(vfname, Path(f.name).name),
})
assert 'hello world' in console['stdout']
assert 'ZeroDivisionError' in console['stderr']
assert len(console['media']) == 0
finally:
kernel.destroy()
finally:
vfolder.delete()
def test_kernel_restart(py3_kernel):
num_queries = 1 # first query is done by py3_kernel fixture (creation)
first_code = textwrap.dedent('''
a = "first"
with open("test.txt", "w") as f:
f.write("helloo?")
print(a)
''').strip()
second_code_name_error = textwrap.dedent('''
print(a)
''').strip()
second_code_file_check = textwrap.dedent('''
with open("test.txt", "r") as f:
print(f.read())
''').strip()
console, n = exec_loop(py3_kernel, 'query', first_code)
num_queries += n
assert 'first' in console['stdout']
assert console['stderr'] == ''
assert len(console['media']) == 0
py3_kernel.restart()
num_queries += 1
console, n = exec_loop(py3_kernel, 'query', second_code_name_error)
num_queries += n
assert 'NameError' in console['stderr']
assert len(console['media']) == 0
console, n = exec_loop(py3_kernel, 'query', second_code_file_check)
num_queries += n
assert 'helloo?' in console['stdout']
assert console['stderr'] == ''
assert len(console['media']) == 0
info = py3_kernel.get_info()
# FIXME: this varies between 2~4
assert info['numQueriesExecuted'] == num_queries
def test_admin_api(py3_kernel):
sess = py3_kernel.session
q = '''
query($ak: String!) {
compute_sessions(access_key: $ak, status: "RUNNING") {
lang
}
}'''
resp = sess.Admin.query(q, {
'ak': sess.config.access_key,
})
assert 'compute_sessions' in resp
assert len(resp['compute_sessions']) >= 1
lang = resp['compute_sessions'][0]['lang']
assert lang.startswith('index.docker.io/lablup/python:')
|
#importation de modules
import requests
#import pandas as pd
import bs4
from bs4 import BeautifulSoup
import requests
#application
#crawling through all the pages associate to the chosen technologies
def get_pages(token, key_words, nb_pages):
pages = []
if type(key_words) != list or type(nb_pages) != int:
raise Exception("please provide the right type of arguments")
else:
for word in key_words:
for index in range(1,nb_pages+1):
page = token + word + "&page=" + str(index)
pages.append(page)
return pages
def even_indexed_items(liste_items):
if type(liste_items) != list:
raise Exception("parameter is not a list")
else :
indexes = list(range(len(liste_items)))
output_list = [liste_items[index] for index in indexes if index %2 == 0]
return output_list
def opportunities_extractor(URL):
page = requests.get(URL)
page
#check if the extractor connected to the site
if str(page) != "<Response [200]>":
raise Exception("page not reachable")
else:
#parsing of the web page
soup = BeautifulSoup(page.text, "html.parser")
#jobs extraction
jobs = []
links = []
for div in soup.find_all(name="div", attrs={"id":"titre-mission"}) :
for a in div.find_all(name="a", attrs={"class":"rtitre"} , href = True):
links.append("https://www.freelance-info.fr"+a["href"])
job = a.get_text().split()
jobs.append(' '.join(job))
jobs = even_indexed_items(jobs)
links = even_indexed_items(links)
#locations extraction
locations = []
for span in soup.find_all(name="span", attrs={"class":"textvert9"}) :
location = span.get_text()
locations.append(location)
locations = even_indexed_items(locations)
#dates extraction
dates = []
for span in soup.find_all(name="span", attrs={"class":"textgrisfonce9"}) :
date = span.get_text()
dates.append(date)
dates = even_indexed_items(dates)
#duration and daily rates extraction
durations = []
for span in soup.find_all(name="span") :
duration = span.get_text()
durations.append(duration)
durations = even_indexed_items(durations)
durations = even_indexed_items(durations[5:])
duration = list(map(lambda item : item.split("|")[1] , even_indexed_items(durations)))
TJM = list(map(lambda item : item.split("|")[2] , even_indexed_items(durations)))
outputs = [jobs , locations , dates , duration , TJM , links]
length_list_extracted = min([length_list for length_list in list(map(lambda x : len(x) , outputs)) ])
#print(length_list_extracted)
align_extraction = []
for k in range(length_list_extracted):
align_extraction.append([jobs[k] , locations[k] , dates[k] , duration[k] , TJM[k] , links[k]])
results = {}
#results = [jobs , location , dates , duration , TJM , links]
results = {"jobs" : jobs , "location" : locations , "dates" : dates , "duration" : duration , "TJM" : TJM}
return(align_extraction )
# execution de l'api
# if __name__ == '__main__':
# token = "https://www.freelance-info.fr/missions?keywords="
# # #invokation of all the pages
# key_words = ["python" , ""]
# URLs = get_pages(token , key_words = , nb_pages = 1)
# # URLs
# opportunities = []
# for url in URLs:
# opportunities.append(opportunities_extractor(url))
# opportunities = sum(opportunities , [])
# # #opportunities = pd.concat(opportunities)
# print(opportunities)
|
import math
from time import time
import random
def fastpower(a,b):
if b == 1:
return a
else:
c = a*a
answer = fastpower(a,math.floor(b/2))
if b %2 != 0:
return a*answer
else:
return answer
def analyze():
wins = 0
loses = 0
total_diffs = []
for _ in range(10000):
base,power = random.randint(1,10000),random.randint(1,60)
start = time()
math.pow(base,power)
builtin_pow = time() - start
start = time()
fastpower(base,power)
my_pow = time()-start
total_diffs.append(abs(my_pow - builtin_pow))
if builtin_pow > my_pow:
wins+=1
else:
loses+=1
print("total wins",wins)
print("total loses",loses)
print("Average difference:",sum(total_diffs)/float(len(total_diffs)))
print("largest difference:",max(total_diffs))
print("min difference:", min(total_diffs))
analyze()
|
# This program is to demonstrate how to create a class, an object and how to call them.
class exampleClass: # This is how to you create/define a class (line 3-16)
name = "Loisa"
age = 22
eyes = "black"
height = 5.6
def thisMethod(self): # This is normally how you define a function, but within a class, all functions become methods.
x = 20
y = 30
print ("Hello there! How are you doing?")
print ("\n")
return (x * y)
Lui = exampleClass() # This is how you create an object; by linking it to a class i.e. feeding the object the data of the class.
print (Lui.name)
print (Lui.age)
print (Lui.eyes)
print (Lui.height)
print ("\n")
print (Lui.thisMethod()) |
#!/usr/bin/env /data/mta/Script/Python3.6/envs/ska3/bin/python
#################################################################################################
# #
# update_grating_obs_list.py: update grating observations lists #
# which generate grat index.html page #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# last update: Sep 05, 2019 #
# #
#################################################################################################
import os
import sys
import re
import random
import numpy
import time
#
#--- reading directory list
#
path = '/data/mta/Script/Grating/Grating_HAK/Scripts/house_keeping/dir_list'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
#
#--- append pathes to private folders to a python directory
#
sys.path.append(bin_dir)
sys.path.append(mta_dir)
import mta_common_functions as mcf
#
#--- temp writing file name
#
rtail = int(time.time() * random.random())
zspace = '/tmp/zspace' + str(rtail)
#
#--- set a few global variables
#
month_list = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
data_location = '/data/mta_www/mta_grat/Grating_Data/'
sot_directory = '/data/mta4/obs_ss/sot_ocat.out'
#----------------------------------------------------------------------------------------------
#-- update_grating_obs_list: update grating observations lists --
#----------------------------------------------------------------------------------------------
def update_grating_obs_list():
"""
update grating observations lists
Input: none, but read from <data_location> and <sot_directory>
Output: <web_dir>/Grating_Data/grating_list_year<year>html
<web_dir>/Grating_Data/grating_list_past_six_month.html
"""
obs_dict = get_obsdate()
tyear = int(time.strftime('%Y', time.gmtime()))
tmon = int(time.strftime('%m', time.gmtime()))
lyear = tyear + 1
pyear = tyear
pmon = tmon - 6
if pmon < 1:
pmon += 12
pyear -= 1
#
#--- start checking from this year to backward
#
acis_hetg_r = []
acis_letg_r = []
hrc_letg_r = []
for year in range(tyear, 1998, -1):
ytemp = str(year)
pyr = ytemp[2] + ytemp[3]
acis_hetg = []
acis_letg = []
hrc_letg = []
#
#--- if this is this year, start from this month to backward
#
if year == tyear:
smon = tmon
else:
smon = 12
for k in range(smon-1, -1, -1):
if (year == tyear) and (k >tmon):
continue
if (year == 1999) and (k < 7):
break
tdir = data_dir + month_list[k] + pyr
if os.path.isdir(tdir) and len(os.listdir(tdir)) > 0:
cmd = 'ls ' + tdir + '/*/*_Sky_summary.html>' + zspace
os.system(cmd)
data = mcf.read_data_file(zspace, remove=1)
if len(data) == 0:
continue
#
#--- check whether to keep the data in recent list
#
achk = 0
if year > pyear:
achk = 1
elif year == pyear:
if k >= pmon:
achk = 1
#
#--- classify which link goes to which category
#
for ent in data:
catg = check_category(ent)
if catg == False:
continue
if catg == 'acis_hetg':
acis_hetg.append(ent)
if achk == 1:
acis_hetg_r.append(ent)
elif catg == 'acis_letg':
acis_letg.append(ent)
if achk == 1:
acis_letg_r.append(ent)
else:
hrc_letg.append(ent)
if achk == 1:
hrc_letg_r.append(ent)
#
#--- write a html page for each year
#
write_html_page(year, acis_hetg, acis_letg, hrc_letg, obs_dict)
#
#--- write a html page for the last 6 month
#
write_html_page(0, acis_hetg_r, acis_letg_r, hrc_letg_r, obs_dict)
#------------------------------------------------------------------------------------
#-- check_category: check which category this observation --
#------------------------------------------------------------------------------------
def check_category(ifile):
"""
check which category this observation
input: ifile --- file name with data path (partial)
output: catg --- acis_hetg, acis_letg, hrc_letg
"""
with open(ifile, 'r') as f:
text = f.read()
#
#--- check the file contain the words "ACIS-S", "HRC", "HETG", or "LETG"
#
chk1 = re.search('ACIS-S', text)
chk2 = re.search('HRC', text)
chk3 = re.search('HETG', text)
chk4 = re.search('LETG', text)
if chk1 is not None:
if chk3 is not None:
catg = 'acis_hetg'
else:
catg = 'acis_letg'
elif chk2 is not None:
catg = 'hrc_letg'
else:
catg = False
return catg
#------------------------------------------------------------------------------------
#-- write_html_page: wirte a html page for year grating observations --
#------------------------------------------------------------------------------------
def write_html_page(year, acis_hetg, acis_letg, hrc_letg, obs_dict):
"""
wirte a html page for year grating observations
input: year --- year
acis_hetg --- a list of acis_hetg list
acis_letg --- a list of acis_letg list
hrc_letg --- a list of hrc_letg list
output: <data_dir>/grating_list_year<year>.html
"""
#
#--- acis hetg data list tends to be long; devide into two lists
#
[acis_hetg1, acis_hetg2] = devide_list_to_two(acis_hetg)
#
#--- match the length of lists; d_list = [acis_letg, acis_hetg1, acis_hetg2, hrc_letg]
#
[d_list, tmax] = match_lentgh([acis_letg, acis_hetg1, acis_hetg2, hrc_letg])
#
#--- write table lines
#
if tmax > 0:
line = ''
for k in range(0, tmax):
line = line + '<tr>\n'
for m in range(0, len(d_list)):
line = line + '\t<th>' + get_info(d_list[m][k], obs_dict) + '</th>\n'
line = line + '</tr>\n'
#
#--- if there is no data, say so
#
else:
line = '<tr><th colspan=3>No Grating Observations </th></tr>\n'
lyear = int(time.strftime('%Y', time.gmtime())) + 1
m = 0
#
#--- if it is not a main page, adding link to the last six month page
#
if year == 0:
line2 = '<tr>\n'
else:
line2 = '<tr>\n<th colspan=10><a href="'
line2 = line2 + 'grating_list_past_six_month.html'
line2 = line2 + '">Last Six Months</a></th>\n</tr>\n'
line2 = line2 + '<tr>\n'
#
#--- add all other year links, except the year of the page
#
for k in range(1999, lyear):
if k == year:
line2 = line2 + '<th style="color:green;"><b>' + str(k) + '</b></th>\n'
else:
line2 = line2 + '<th><a href="grating_list_year' + str(k) + '.html">'
line2 = line2 + str(k) + '</a></th>\n'
m += 1
if m >= 10:
line2 = line2 + '</tr>\n<tr>\n'
m = 0
if (m > 0) and (m < 10):
for k in range(m, 10):
line2 = line2 + '<td> </td>'
line2 = line2 + '</tr>\n'
#
#--- read the template for the html page
#
t_file = house_keeping + 'grating_obs_template'
with open(t_file, 'r') as f:
text = f.read()
#
#--- replace the year and the table entry
#
if year == 0:
yline = "the Last Six Months"
outfile = data_dir + '/grating_list_past_six_month.html'
else:
yline = "Year " + str(year)
outfile = data_dir + '/grating_list_year' + str(year) + '.html'
text = text.replace('#YEAR#', yline)
text = text.replace('#TABLE#', line)
text = text.replace('#TABLE2#', line2)
#
#--- check whether this is the html page for the last 6 months
#
with open(outfile, 'w') as fo:
fo.write(text)
#------------------------------------------------------------------------------------
#-- devide_list_to_two: devide a long list into two lists --
#------------------------------------------------------------------------------------
def devide_list_to_two(alist):
"""
devide a long list into two lists
input: alist --- a list
output: alist1/alist2 --- two lists of about the same length
"""
a_len = len(alist)
if a_len > 3:
half = int(0.5 * a_len)
alist1 = alist[:half]
alist2 = alist[half:]
else:
alist1 = alist
alist2 = []
return [alist1, alist2]
#------------------------------------------------------------------------------------
#-- match_lentgh: add empty value to match the length of lists --
#------------------------------------------------------------------------------------
def match_lentgh(list_d, add=' '):
"""
add empty value to match the length of lists
input: list_d --- a list of lists
add --- the value/string to add to fill the empty spot
output: save --- a list of lists of the same length
tmax --- the length of the each list
"""
l_len = len(list_d)
#
#--- find max length of lists
#
tmax = len(list_d[0])
for k in range(1, l_len):
val = len(list_d[k])
if val > tmax:
tmax = val
save = []
#
#--- fill the empty part with add
#
for k in range(0, l_len):
start = len(list_d[k])
for m in range(start, tmax):
list_d[k].append(add)
save.append(list_d[k])
return [save, tmax]
#------------------------------------------------------------------------------------
#-- get_info: create table cell display info ---
#------------------------------------------------------------------------------------
def get_info(line, obs_dict):
"""
create table cell display info
input: line --- link to the html page
obs_dict --- a dictionary of obsid <---> observation time
output: oline --- a table cell display
"""
if line == ' ':
return line
else:
atemp = re.split('\/', line)
try:
obsid = int(atemp[-2])
except:
obsid = 'na'
#
#--- find observation time
#
try:
stime = obs_dict[obsid]
#
#--- if it cannot find, use the short one
#
except:
stime = atemp[-3]
#
#--- adjust the link path
#
ltemp = re.split('Grating_Data', line)
line = 'https://cxc.cfa.harvard.edu/mta_days/mta_grat/Grating_Data/' + ltemp[1]
oline = '<a href="'+ line + '" target="_parent">'
oline = oline + 'Date: ' + stime + '<br />' + 'Obsid: ' + str(obsid) + '</a>'
return oline
#------------------------------------------------------------------------------------
#-- get_obsdate: read sot database and make a list of obsids and observation dates --
#------------------------------------------------------------------------------------
def get_obsdate():
"""
read sot database and make a list of obsids and its observation dates
Input: none, but read data from <sot_direcotry>
Output: obs_dict --- a dictionary of obsid <--> starting date
"""
#
#--- read sot data
#
data = mcf.read_data_file(sot_directory)
obsid_list = []
start_date = []
index_date = []
obs_dict = {}
for ent in data:
temp = re.split('\^', ent)
obsid = temp[1]
#
#--- check the data are valid
#
try:
atemp = re.split('\s+', temp[13])
mon = mcf.change_month_format(atemp[0])
date = atemp[1]
year = atemp[2][2] + atemp[2][3]
except:
continue
#
#--- starting date in form of 05/23/14
#
lmon = mcf.add_leading_zero(mon)
ldate = mcf.add_leading_zero(date)
dline = lmon + '/' + ldate + '/' + year
try:
obs_dict[int(obsid)] = dline
except:
pass
return obs_dict
#------------------------------------------------------------------------------------
if __name__ == '__main__':
update_grating_obs_list()
|
n = int(input('Digite um número: '))
i = n % 2
if i == 0:
print('O número {} é par'.format(n))
else:
print('O número {} é ímpar'.format(n)) |
#!/usr/bin/env python3
import csv
import pdb
import requests
from bs4 import BeautifulSoup
def read_in_journal_data():
volumes = []
with open('yearly_volumes.csv', 'r') as csv_file:
reader = csv.reader(csv_file, delimiter=',')
next(reader)
for row in reader:
journal_data = {
'title': row[0],
'year': row[1],
'url': row[2]
}
volumes.append(journal_data)
csv_file.close()
return volumes
def parse_issue_page(soup, volume):
issue_section = soup.find_all('dl')
issues = []
for i in issue_section:
issue_number = i.find('h2').get_text().replace('Heft ', '')
href = i.find('a')['href']
url = full_url(href)
issue = issue_data(volume, url, issue=issue_number)
issues.append(issue)
return issues
def parse_calendar_page(soup, volume):
issue_links = soup.find_all('a')
issues = []
for i in issue_links:
date = i['title'].replace('.', '')
url = full_url(i['href'])
issue = issue_data(volume, url, date=date)
issues.append(issue)
return issues
def full_url(url):
if 'cgi-content' in url:
return 'http://anno.onb.ac.at/' + url[1:]
else:
return 'http://anno.onb.ac.at/cgi-content' + url[1:]
def issue_data(volume, url, issue=None, date=None):
return {
'title': volume['title'],
'year': volume['year'],
'issue': issue,
'date': date,
'url': url
}
def parse_html(volume):
page = requests.get(volume['url'])
soup = BeautifulSoup(page.content, 'html.parser')
issues = []
issue_page = soup.find(class_='prevws')
calendar_page = soup.find(class_='view-month')
if issue_page:
issues.append(parse_issue_page(issue_page, volume))
elif calendar_page:
issues.append(parse_calendar_page(calendar_page, volume))
else:
print('Neither issue nor calendar page!')
return [i for sublist in issues for i in sublist]
def write_out_data(journal_issues):
with open('journal_issues.csv', 'w') as outfile:
fields = list(journal_issues[0].keys())
writer = csv.DictWriter(outfile, fieldnames=fields)
writer.writeheader()
for row in journal_issues:
writer.writerow(row)
def retrieve_issue_data():
journal_yearly_volumes = read_in_journal_data()
issue_data = []
for j in journal_yearly_volumes:
issues = parse_html(j)
issue_data.append(issues)
all_issues = [i for sublist in issue_data for i in sublist]
write_out_data(all_issues)
retrieve_issue_data()
|
from django.conf.urls import patterns, include, url
from django.views.static import *
import settings
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^points/', include ('points.urls')),
# Examples:
# url(r'^$', 'concursoBandas.views.home', name='home'),
# url(r'^concursoBandas/', include('concursoBandas.foo.urls')),
# Admin
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls)),
#Auth
url(r'^accounts/login/$', 'concursoBandas.views.login'),
url(r'^accounts/auth/$', 'concursoBandas.views.auth_view'),
url(r'^accounts/logout/$', 'concursoBandas.views.logout'),
# Required to make static serving work
(r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
#url(r'^getgit/$', 'concursoBandas.views.logout'),
#custom
url(r'^login/$', 'concursoBandas.views.login'),
url(r'^$', 'concursoBandas.views.login'),
)
if not settings.DEBUG:
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns += staticfiles_urlpatterns() |
import unittest
from katas.beta.compare_section_numbers import compare
class CompareSectionNumbersTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(compare('1', '2'), -1)
def test_equal_2(self):
self.assertEqual(compare('1.1', '1.2'), -1)
def test_equal_3(self):
self.assertEqual(compare('1.1', '1'), 1)
def test_equal_4(self):
self.assertEqual(compare('1.2.3.4', '1.2.3.4'), 0)
def test_equal_5(self):
self.assertEqual(compare('3', '3.0'), 0)
def test_equal_6(self):
self.assertEqual(compare('3', '3.0.0.0'), 0)
def test_equal_7(self):
self.assertEqual(compare('1.2.1', '1.2.0'), 1)
def test_equal_8(self):
self.assertEqual(compare('3.0.0', '3.1.1'), -1)
def test_equal_9(self):
self.assertEqual(compare('3.0.1', '3.1'), -1)
def test_equal_10(self):
self.assertEqual(compare('1.2.3', '1.02.003'), 0)
def test_equal_11(self):
self.assertEqual(compare('22.87.95.49.24.8.91.84.80',
'22.87.95.49.24.8.91.84.34.23'), 1)
def test_equal_12(self):
self.assertEqual(compare('0.0.0', '1.0.0'), -1)
|
from flask import Blueprint, render_template, redirect, url_for, session, flash
from datetime import date
from flask.globals import request
from .__init__ import db
inventory = Blueprint('inventory', __name__)
@inventory.route('/inventory')
def inv():
if not session:
return redirect(url_for('auth.login'))
cur = db.connection.cursor()
cur.execute('call getRole(%s)', [session['id']])
role = cur.fetchone()[0]
if not role in ["Administrativo", 'Médico', 'Enfermero']:
flash('No se tienen los permisos suficientes para acceder a esta página', 'alert')
return redirect(url_for('main.index'))
cur.execute('call getOrders()')
ordenes = cur.fetchall()
cur.execute('select * from item')
items = cur.fetchall()
cur.close()
return render_template('inventario.html', items=items, ordenes=ordenes, role=role)
@inventory.route('/inventory', methods=['POST'])
def invPost():
idItem = request.form['item']
cantidad = request.form['cantidad']
cur = db.connection.cursor()
cur.execute('call getAdminProfile(%s)',[session['id']])
idAdministrativo = cur.fetchone()[7];
cur.execute('insert into pedidos (idItem,idAdministrativo,cantidad,fecha) values (%s,%s,%s,%s)', [idItem, idAdministrativo,cantidad,date.today()])
db.connection.commit()
flash('orden creada correctamente', 'ok')
return redirect(url_for('inventory.inv'))
@inventory.route('/inventory/recieved')
def recieved():
if not session:
return redirect(url_for('auth.login'))
cur = db.connection.cursor()
cur.execute('call getProfile(%s)', [session['id']])
profileInfo = cur.fetchone()
if profileInfo[5] != 'Administrativo':
flash('No se tienen los permisos suficientes para acceder a esta página', 'alert')
return redirect(url_for('main.index'))
orden = request.args.get('orden')
item = request.args.get('item')
cantidad = request.args.get('cantidad')
cur.execute('delete from pedidos where idPedido = %s', [orden])
db.connection.commit()
cur.execute('update item set cantidad = cantidad + %s where idItem = %s', [cantidad,item])
db.connection.commit()
cur.close()
flash('Orden completada', 'ok')
return redirect(url_for('inventory.inv'))
@inventory.route('/inventory/remove', methods=['POST'])
def invRem():
idItem = request.form['item']
cantidadARetirar = request.form['cantidad']
cur = db.connection.cursor()
cur.execute('select * from item where idItem = %s', [idItem])
cantidadInventario = cur.fetchone()[3]
if int(cantidadInventario) < int(cantidadARetirar):
flash('La cantidad solicitada es mayor que la cantidad en stock','alert')
else:
cur.execute('update item set cantidad = cantidad - %s where idItem = %s', [cantidadARetirar,idItem])
db.connection.commit()
flash('Cantidad retirada de inventario satisfactoriamente','ok')
cur.close()
return redirect(url_for('inventory.inv')) |
# Generated by Django 3.1.5 on 2021-02-28 21:08
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Team',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='Submission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file', models.FileField(upload_to='')),
('team', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hackathon.team')),
],
),
migrations.CreateModel(
name='Grader',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
('django_user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Grade',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('function', models.PositiveSmallIntegerField()),
('readability', models.PositiveSmallIntegerField()),
('implementation', models.PositiveSmallIntegerField()),
('creativity', models.PositiveSmallIntegerField()),
('educational', models.PositiveSmallIntegerField()),
('comments', models.TextField(blank=True)),
('grader', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hackathon.grader')),
('submission', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hackathon.submission')),
],
),
migrations.CreateModel(
name='Competitor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
('django_user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('team', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='hackathon.team')),
],
options={
'abstract': False,
},
),
]
|
from aiogram import Bot, Dispatcher, types
from bot.states.SQLAlchemyStorage import SQLAlchemyStorage
from config import BOT_TOKEN
bot = Bot(token=BOT_TOKEN, parse_mode=types.ParseMode.HTML)
storage = SQLAlchemyStorage()
dp = Dispatcher(bot, storage=storage)
|
import PIL
import tensorflow as tf
import pathlib
data_dir = pathlib.Path("../data/flower_photos")
image_count = len(list(data_dir.glob('*/*.jpg')))
print(image_count)
roses = list(data_dir.glob('roses/*'))
PIL.Image.open(str(roses[0]))
|
from typing import Optional
import pygame as pg
from abc import ABC, abstractmethod, ABCMeta
class ScreenType(ABCMeta):
pass
class Screen(ABC, metaclass=ScreenType):
@abstractmethod
def draw(self, screen: pg.Surface, clock: pg.time.Clock) -> Optional[ScreenType]:
pass
|
# coding: utf-8
# Standard Python libraries
from pathlib import Path
import tarfile
from .. import load_run_directory
def reset_orphans(run_directory, orphan_directory=None):
"""
Resets calculations that were moved to an orphan directory back to a
run directory and removes any bid files that they contain. Can be useful
if connection is lost to a remote database or a runner was accidentally
started with the wrong database.
Parameters
----------
run_directory : str
The directory to move the orphaned calculations to.
orphan_directory : str, optional
The orphan directory containing archived calculation folders. The
default value assumes that the orphan directory is a directory named
"orphan" that is in the same parent directory as run_directory, i.e.
is at "../orphan" relative to run_directory.
"""
# Check for run_directory first by name then by path
try:
run_directory = load_run_directory(run_directory)
except:
run_directory = Path(run_directory).resolve()
if not run_directory.is_dir():
raise ValueError('run_directory not found/set')
# Set default orphan directory
if orphan_directory is None:
orphan_directory = Path(run_directory.parent, 'orphan')
# Loop over tar.gz files
for archive in Path(orphan_directory).glob('*.tar.gz'):
# Extract calc to run_directory
tar = tarfile.open(archive)
tar.extractall(run_directory)
tar.close()
archive.unlink()
# Remove any bids in the calc
calc_dir = Path(run_directory, archive.name.split('.')[0])
for bidfile in calc_dir.glob('*.bid'):
bidfile.unlink() |
from flask import Flask, render_template, request, redirect, url_for, jsonify
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Restaurant, MenuItem
app = Flask(__name__)
engine = create_engine('sqlite:///restaurantmenu.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
@app.route('/')
@app.route('/index/')
def overview():
restaurants = session.query(Restaurant).all()
return render_template('overview.html')
@app.route('/council/', methods=['GET','POST'])
def council():
return render_template('council.html')
@app.route('/goals/', methods=['GET','POST'])
def goals():
return render_template('goals.html')
@app.route('/history/', methods=['GET','POST'])
def history():
return render_template('history.html')
@app.route('/settings/')
def settings():
return render_template('settings.html')
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0', port=5000) |
import matplotlib.pyplot as plt
import torch
from torchvision.utils import draw_bounding_boxes, draw_segmentation_masks
from torchvision import tv_tensors
from torchvision.transforms.v2 import functional as F
def plot(imgs, row_title=None, **imshow_kwargs):
if not isinstance(imgs[0], list):
# Make a 2d grid even if there's just 1 row
imgs = [imgs]
num_rows = len(imgs)
num_cols = len(imgs[0])
_, axs = plt.subplots(nrows=num_rows, ncols=num_cols, squeeze=False)
for row_idx, row in enumerate(imgs):
for col_idx, img in enumerate(row):
boxes = None
masks = None
if isinstance(img, tuple):
img, target = img
if isinstance(target, dict):
boxes = target.get("boxes")
masks = target.get("masks")
elif isinstance(target, tv_tensors.BoundingBoxes):
boxes = target
else:
raise ValueError(f"Unexpected target type: {type(target)}")
img = F.to_image(img)
if img.dtype.is_floating_point and img.min() < 0:
# Poor man's re-normalization for the colors to be OK-ish. This
# is useful for images coming out of Normalize()
img -= img.min()
img /= img.max()
img = F.to_dtype(img, torch.uint8, scale=True)
if boxes is not None:
img = draw_bounding_boxes(img, boxes, colors="yellow", width=3)
if masks is not None:
img = draw_segmentation_masks(img, masks.to(torch.bool), colors=["green"] * masks.shape[0], alpha=.65)
ax = axs[row_idx, col_idx]
ax.imshow(img.permute(1, 2, 0).numpy(), **imshow_kwargs)
ax.set(xticklabels=[], yticklabels=[], xticks=[], yticks=[])
if row_title is not None:
for row_idx in range(num_rows):
axs[row_idx, 0].set(ylabel=row_title[row_idx])
plt.tight_layout()
|
from pyspark import SparkContext, SparkConf
import sys
import time
import multiprocessing
start_time = time.time()
# Get Data Grouped by user_id or business_id Based on Case Number
def get_grouped_data(ungrouped_data):
# User_id case
data_p = ungrouped_data.map(lambda s: (s[0], s[1]))
data_grouped = data_p\
.map(lambda s: (s[0], [s[1]]))\
.reduceByKey(lambda x, y: x + y)\
.values()\
.persist()
return data_grouped
# Generate Doubleton and Higher Order Pair Permutations
def generate_permutations(iterable, num_set):
permutations = list()
min_set_size = 2
if num_set < min_set_size:
return permutations
else:
for i, item_cur in enumerate(iterable):
item_cur_set = set(item_cur)
# Pair each item with other item(s)
for j in range(i + 1, len(iterable)):
item_next = iterable[j]
# All permutations of pairs of two - order 1's
if num_set == min_set_size:
permutations.append(item_cur + item_next)
else:
# Higher order pairs
# Adding new permutation of two frequent itemsets
if item_cur[0: num_set - min_set_size] == item_next[0: num_set - min_set_size]:
permutations.append(tuple(sorted(item_cur_set.union(item_next))))
else:
# Eliminate the items that is not shown in frequent itemset from the last filtering pass
break
return permutations
def get_frequent_candidates(baskets, threshold, permutation_list=None, num_set=None):
counter = dict()
for basket in baskets:
if num_set is None or num_set == 1:
# Find count of each item from each basket
for item in basket:
item_tuple = tuple([item])
counter[item_tuple] = 1 if counter.get(item_tuple) is None else counter[item_tuple] + 1
elif num_set > 1:
basket_itemset = set(basket)
# Find count of each itemset from each basket
for permutation in permutation_list:
if basket_itemset.issuperset(permutation):
counter[permutation] = 1 if counter.get(permutation) is None else counter[permutation] + 1
else:
continue
else:
return list()
frequent_candidates = sorted(dict({k: v for (k, v) in counter.items() if v >= threshold}))
return frequent_candidates
# A-Priori Algorithm
def apriori(iterable, count, threshold):
baskets = list(iterable)
support_th = threshold * (len(baskets) / count)
candidate_list = list()
# print("Baskets: ", baskets)
# print("Support Threshold: ", support_th)
# Pass 1
frequent_candidates = get_frequent_candidates(baskets, support_th)
# Pass n
n = 2
while len(frequent_candidates) > 0:
candidate_list.extend(frequent_candidates)
# print("frequent_candidates: ", frequent_candidates)
# print("candidate_list: ", candidate_list)
permutation_list = generate_permutations(frequent_candidates, n)
# print("permutation_list: ", permutation_list)
frequent_candidates = get_frequent_candidates(baskets, support_th, permutation_list, n)
n += 1
return candidate_list
def count_freq(iterable, candidates):
baskets = list(iterable)
freq_items = list()
# Calculate the count of each candidate in basket
for candidate in candidates:
count = 0
c_set = set(candidate)
for basket in baskets:
if c_set.issubset(basket):
count += 1
freq_items.append((candidate, count))
return freq_items
def record_data(dataset, output_file):
last_len = 0
# count = 0
for item in dataset:
# Group the same length record together
item_size = len(tuple(item))
if item_size > last_len:
output_file.write("" if last_len == 0 else "\n\n")
last_len = item_size
else:
output_file.write(",")
# Candidate
record = str(item) \
.replace(",)", ")") \
.strip("[]")
output_file.write(record)
# count += 1
# print(count)
# Main Execution
# Run Configurations
filter_th = int(sys.argv[1])
support = int(sys.argv[2])
input_path = sys.argv[3]
output_path = sys.argv[4]
# Level of Parallelism - Recommended by Spark
# http://spark.apache.org/docs/latest/tuning.html#level-of-parallelism
cpu_num = multiprocessing.cpu_count()
task_per_cpu = cpu_num * 3
# Spark Configurations
conf = SparkConf().setAppName('HW2 - Task 2').setMaster('local[*]')
sc = SparkContext(conf=conf)
# Data Input
distFile = sc.textFile(input_path, minPartitions=2).coalesce(task_per_cpu)
rdd = distFile.map(lambda s: s.split(","))
# SON Algorithm
# Divide into market basket model
headers = rdd.first()
data = rdd.filter(lambda s: s != headers)
grouped_data = get_grouped_data(data).filter(lambda s: len(s) > filter_th)
# grouped_data = get_grouped_data(data, case_num).filter(lambda s: len(s) >= 70)
# print(grouped_data.collect())
num_count = grouped_data.count()
num_part = grouped_data.getNumPartitions()
# Pass 1
candidates = grouped_data\
.mapPartitions(lambda basket: apriori(basket, num_count, support))\
.map(lambda s: (s, 1))\
.reduceByKey(lambda x, y: 1)\
.keys()\
.collect()
candidates_sorted = sorted(candidates, key=lambda k: (len(k), k))
# Pass 2
freq_itemsets = grouped_data\
.mapPartitions(lambda basket: count_freq(basket, candidates))\
.reduceByKey(lambda x, y: (x + y))\
.filter(lambda s: s[1] >= support)\
.map(lambda s: s[0])\
.collect()
freq_itemsets_sorted = sorted(freq_itemsets, key=lambda k: (len(k), k))
# Data Output
with open(output_path, 'w') as op:
# Add Candidates
op.write("Candidates:" + "\n")
record_data(candidates_sorted, op)
op.write("\n\n")
# Add Frequent Itemsets
op.write("Frequent Itemsets:" + "\n")
record_data(freq_itemsets_sorted, op)
duration = time.time() - start_time
print("Duration: ", duration)
|
from decimal import Decimal, DecimalException
from django.forms import ValidationError
from ....fields import DecimalField
from .widgets import BRDecimalInput
class BRDecimalField(DecimalField):
widget = BRDecimalInput
def to_python(self, value):
value = value.replace(',', '.')
value = value.replace('.', '', value.count('.') - 1)
try:
value = Decimal(value)
except DecimalException:
raise ValidationError(self.error_messages['invalid'])
|
from typing import final
import numpy as np
import cv2
import pandas as pd
import os
import argparse
import glob
import random
import math
import matplotlib.pyplot as plt
from hdrtool import hdr # test purpose
output_path = ''
output_file = 'output.jpg'
response_curves = []
# the weight function for single pixel
def weightFunction(pixel):
color_min = 0
color_max = 255
color_mid = math.floor((color_min + color_max) / 2)
if pixel <= color_mid:
pixel = pixel - color_min + 1
else:
pixel = color_max - pixel + 1
return pixel
def expandGrid(array, n):
res = np.zeros(array.shape + (n, ))
axe_range = []
for i in range(array.ndim):
axe_range.append(range(array.shape[i]))
f_grid = []
for i in np.meshgrid(*axe_range, indexing='ij'):
f_grid.append(i.ravel())
res[tuple(f_grid + [array.ravel()])] = 1
assert((res.sum(-1) == 1).all())
assert(np.allclose(np.argmax(res, -1), array))
return res
def computeResponseCurve(color, exposures, smoothing):
color_range = 256
num_of_pixel = color.shape[0]
num_of_image = color.shape[1]
A = np.zeros((num_of_image * num_of_pixel + color_range + 1, color_range + num_of_pixel))
B = np.zeros((A.shape[0], 1))
# including data fitting equation
k = 0
for i in range(num_of_pixel):
for j in range(num_of_image):
c_ij = color[i, j]
w_ij = weightFunction(c_ij)
A[k, c_ij] = w_ij
A[k, (color_range) + i] = -w_ij
B[k] = w_ij * exposures[j]
k += 1
# fix the curve by setting middle value to one
A[k, math.floor(color_range / 2.)] = 1
k += 1
# apply smoothing
for i in range(1, color_range - 2):
w_i = weightFunction(i + 1)
A[k, i] = w_i * smoothing
A[k, i + 1] = -2 * w_i * smoothing
A[k, i + 2] = w_i * smoothing
k += 1
# solving it using Singular Value Decomposition
x = np.linalg.lstsq(A, B, rcond=-1)
x = x[0]
g = x[0: color_range]
le = x[color_range, :]
return g[:, 0]
def computeRadianceMap(images, exposure, response_curve):
zmid = (255-0)/2
imgarr = np.asarray(images) #(P photos, Length, Width)
img_shape = images[0].shape
img_rad_map = np.zeros(img_shape, dtype=np.float64)
num_of_images = len(images)
H, W = images[0].shape
w = np.zeros((num_of_images, H, W))
for i in range(num_of_images):
for j in range(H):
for k in range(W):
w[i, j, k] = weightFunction(w[i, j, k])
G = np.dot(expandGrid(imgarr, 256),response_curve)
W = zmid - np.absolute(imgarr-zmid)
lndT = exposure[:, np.newaxis, np.newaxis] * np.ones(imgarr.shape)
img_rad_map[:, :] = np.sum((w * (G - lndT)), axis=0) / (np.sum(w, axis=0) + 1e-8)
return np.exp(img_rad_map)
# implement debevec algorithm for getting hdr image
def getHDR(images, exposure_times, l):
rgb_channels = images[0].shape[2]
hdr_image = np.zeros(images[0].shape, dtype=np.float64)
num_of_images = len(images)
# new added
num_of_samples = math.ceil(255 * 2 / (num_of_images - 1)) * 2
random_indexes = np.random.choice(images[0].shape[0] * images[0].shape[1], (num_of_samples,), replace=False)
rgb = ['r', 'g', 'b']
for i in range(rgb_channels):
print("Color Channel: " + rgb[i])
# get the single color from each image
# size of [image.height, image.weight, 1]
single_color = []
for image in images:
single_color.append(image[:, :, i])
# anohter way doing sample
# sampling the color
this_color = np.zeros((num_of_samples, num_of_images), dtype=np.uint8)
for j, image in enumerate(images):
flat_image = image[:, :, i].flatten()
this_color[:, j] = flat_image[random_indexes]
# compute response curve
print(" Compute Reponse Cuve ... ")
response_curve = computeResponseCurve(this_color, exposure_times, l)
response_curves.append(response_curve)
# compute radiance map
print(" Compute Radiance Map ... ")
img_rad_map = computeRadianceMap(single_color, exposure_times, response_curve)
hdr_image[..., i] = img_rad_map
return hdr_image
def tone_mapping(image, max_white, alpha):
# global tone mapping
delta = 1e-9
n = image.shape[0] * image.shape[1] * 3
lw = image
lb = np.exp((1/n) * np.sum(np.log(delta + lw)))
lm = (alpha/lb) * lw
ld = lm * (1 + (lm / (max_white**2))) / (1 + lm)
mapped_image = ld
mapped_image = ((mapped_image - mapped_image.min()) * (255 / (mapped_image.max() - mapped_image.min())))
return mapped_image
# load the image and exposure time
if __name__ == '__main__':
# Settings
parser = argparse.ArgumentParser()
parser.add_argument('--image-path', default='./image/test1/', type=str,
help='directory of the folder containing images')
parser.add_argument('--exposure-file', default='./image/test1.txt', type=str,
help='file containing the exposure time for each image')
parser.add_argument('--output-path', default='', type=str,
help='output path of jpg file')
parser.add_argument('--lwhite', type=float, default=0.8,
help='the number for constraint the highest value in hdr image(default: 0.8)')
parser.add_argument('--alpha', type=float, default=0.5,
help='The number for correction. Higher value for brighter result; lower for darker(default: 0.5)')
args = parser.parse_args()
image_path = args.image_path
exposure_file = args.exposure_file
output_path = args.output_path
lwhite = args.lwhite
alpha = args.alpha
# load the images
image_files = glob.glob(image_path + "*")
images = []
for file in image_files:
image = cv2.imread(file)
image = cv2.resize(image, (960, 540))
images.append(image)
image_height = images[0].shape[0]
image_width = images[0].shape[1]
print("Pixel Number: " + str(image_height * image_width))
# load exposure times
exposure_times = []
file = open(exposure_file, 'r')
for x in file:
exposure_times.append(float(x))
exposure_times = np.asarray(exposure_times)
# compute hdr image
print("Calculating HDR image ... ")
hdr_image = getHDR(images, np.log(exposure_times).astype(np.float32), 100)
print("Got HDR Image ... ")
cv2.imwrite(output_path + "radiance_map.jpg", hdr_image)
# tone mapping
print(" Tone Mapping ... ")
final_image = tone_mapping(hdr_image, max_white=np.exp(hdr_image.max())*0.7, alpha=0.5)
# save the imagee
cv2.imwrite(output_path + output_file, final_image.astype(np.uint8))
# plot response curve
print(" Saving Response Curve ")
print(" Plotting Response Curve ")
px = list(range(0, 256))
plt.figure(constrained_layout=False, figsize=(5, 5))
plt.title("Response curves for BGR", fontsize=20)
plt.plot(px, np.exp(response_curves[0]), 'r')
plt.plot(px, np.exp(response_curves[1]), 'b')
plt.plot(px, np.exp(response_curves[2]), 'g')
plt.ylabel("log Exposure X", fontsize=20)
plt.xlabel("Pixel value Z", fontsize=20)
plt.savefig(output_path+"rc.png") |
from django.urls import path
from . import views
app_name='login'
urlpatterns=[
path('',views.index,name='index'),
path('auth',views.auth,name='auth'),
path('logout',views.logout,name='logout')
]
|
from tkinter import *
import glob, time
class veld:
def __init__(self, tk):
self.tk = tk
self.canvas = Canvas(self.tk, width=700, height=700)
self.canvas.pack()
self.tk.update()
self.images = self.load_img()
self.vakjes()
self.straten()
def load_img(self):
a = {}
for file in glob.glob('*.gif'):
a[file[1:-4]] = PhotoImage(file=file)
#print(a)
return a
def vakjes(self):
self.canvas.create_rectangle(10,10,670,670)
for i in range(11):
for j in range(11):
if 0<i<10 and 0<j<10:
continue
elif i%5 == 0 and j%5 == 0:
self.special(i, j)
#tekent het vakje van de straten (+algemeen fonds, kans en belast)
elif i == 0 :
self.canvas.create_rectangle(37+j*55+(j//5), 10,\
92+j*55+(j//5), 92)
elif i == 10:
self.canvas.create_rectangle(37+j*55+(j//5), 588,\
92+j*55+(j//5), 670)
elif j == 0:
self.canvas.create_rectangle(10, 37+i*55+(i//5),\
92, 92+i*55+(i//5))
else:
#j == 10
self.canvas.create_rectangle(588, 37+i*55+(i//5),\
670, 92+i*55+(i//5))
self.tk.update()
def special(self, i, j):
#tekent de img's
if (i,j) == (0,0):
self.canvas.create_image(10, 10, image=self.images['pot'], \
anchor=NW)
elif (i, j) == (0,5):
self.canvas.create_image(92+4*55, 10, image=self.images['station3'], \
anchor=NW)
elif (i, j) == (0,10):
self.canvas.create_image(148+8*55, 10, image=self.images['gotojail'], \
anchor=NW)
elif (i, j) == (5,0):
self.canvas.create_image(10, 92+4*55, image=self.images['station2'], \
anchor=NW)
elif (i, j) == (5,10):
self.canvas.create_image(148+8*55, 92+4*55, image=self.images['station4'], \
anchor=NW)
elif (i, j) == (10,0):
self.canvas.create_image(10, 148+8*55, image=self.images['jail'], \
anchor=NW)
elif (i, j) == (10,5):
self.canvas.create_image(92+4*55, 148+8*55, image=self.images['station1'], \
anchor=NW)
elif (i, j) == (10,10):
self.canvas.create_image(148+8*55, 148+8*55, image=self.images['start'], \
anchor=NW)
#tekent de vakjes rond de img's
if i%10 == 0 and j%10 == 0:
self.canvas.create_rectangle(10+(j//10)*(138+8*55), 10+(i//10)*(138+8*55),\
92+(j//10)*(138+8*55), 92+(i//10)*(138+8*55))
elif i==5:
self.canvas.create_rectangle(10+(j//10)*(138+8*55), 10+(82+4*55),\
92+(j//10)*(138+8*55), 66+(82+4*55))
elif j==5:
self.canvas.create_rectangle(10+(82+4*55), 10+(i//10)*(138+8*55), \
66+(82+4*55), 92+(i//10)*(138+8*55))
def straten(self):
'''
tekent de kleine balkjes door eerst een txt op te vragen en die data
te verwerken'''
#leest txt (dit is een lijst) en slaat hem op in een variabele
straatinfo = ''
with open('straten.txt', 'r') as infile:
straatinfo = [eval(line) for line in [i.strip() for i in infile.readlines()]]
for straat in straatinfo:
(i,j) = straat['loc']
if straat['type'] == 'straat':
#todo herleid de 4 if statements naar 2
if i == 0:
self.canvas.create_rectangle(37+j*55+(j//5), 72,\
92+j*55+(j//5), 92, \
fill=straat['kleur'])
elif i == 10:
self.canvas.create_rectangle(37+j*55+(j//5), 588,\
92+j*55+(j//5), 608,\
fill=straat['kleur'])
elif j == 0:
self.canvas.create_rectangle(72, 37+i*55+(i//5),\
92, 92+i*55+(i//5),
fill=straat['kleur'])
else:
#j == 10:
self.canvas.create_rectangle(588, 37+i*55+(i//5),\
608, 92+i*55+(i//5),
fill=straat['kleur'])
elif straat['type'] == 'kans' or straat['type'] == 'alg':
#tekent kans afb
#Todo in 1 lijn mogelijk
if i == 0:
self.canvas.create_image(37+j*55+(j//5), 10, anchor=NW, \
image=self.images[straat[type]+'3'])
elif i == 10:
self.canvas.create_image(37+j*55+(j//5), 588, anchor=NW, \
image=self.images[straat[type]+'1'])
elif j == 0:
self.canvas.create_image(10, 37+i*55+(i//5), anchor=NW, \
image=self.images[straat[type]+'2'])
else:
#j == 10
self.canvas.create_image(588, 37+i*55+(i//5), anchor=NW, \
image=self.images[straat[type]+'4']])
self.tk.update()
if __name__ == '__main__':
tk = Tk()
tk.wm_attributes('-topmost', 1)
tk.update()
tk.geometry('700x700+0+0')
veld(tk)
time.sleep(5)
tk.destroy()
|
"""
Django settings for EwhaEverytimeEverywhere project.
Generated by 'django-admin startproject' using Django 3.0.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY', '')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['django', 'localhost', 'localhost:8000', '127.0.0.1', '127.0.0.1:8000']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# 'bootstrap_modal_forms',
# 'sslserver',
'widget_tweaks',
'rest_framework',
'django_celery_beat',
'django_celery_results',
'ckeditor',
'ckeditor_uploader',
'django_filters',
'accounts',
'board',
'manager',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'EwhaEverytimeEverywhere.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'EwhaEverytimeEverywhere', 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'EwhaEverytimeEverywhere.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': os.environ.get('DB_NAME', ''),
'USER': os.environ.get('DB_USERNAME', ''),
'PASSWORD': os.environ.get('DB_PASSWORD', ''),
'HOST': os.environ.get('DB_HOST', ''),
'PORT': os.environ.get('DB_PORT', ''),
'OPTIONS': {
'init_command':
'SET default_storage_engine=INNODB,'
'character_set_connection=utf8,'
'collation_connection=utf8_unicode_ci'
}
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'ko-kr' #현재 시스템에 대한 디폴트 언어 설정 (사용자마다 다르게 적용 가능)
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# STATICFILES_DIRS = [
# os.path.join(BASE_DIR, 'static'), #경로를 합해주는 함수
# ]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
from django.urls import reverse_lazy
# LOGIN_URL = reverse_lazy('login')
LOGIN_REDIRECT_URL = reverse_lazy('profile')
# LOGOUT_REDIRECT_URL = reverse_lazy('login')
AUTH_USER_MODEL = 'accounts.User'
# EMAIL
# EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' # 개발 확인 용
# 실제 배포할 때는
# BACKEND SMTP로 바꾸고
# HOST, HOS_USER, HOST_PASSWORD, USE_TLS, USE_SSL 설정까지 해줘야함
EMAIL_HOST = 'smtp.gmail.com'
# 메일을 호스트하는 서버
EMAIL_PORT = '587'
# gmail과의 통신하는 포트
EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER', '')
# 발신할 이메일
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD', '')
# 발신할 메일의 비밀번호
EMAIL_USE_TLS = True
# TLS 보안 방법
DEFAULT_FROM_EMAIL = EMAIL_HOST_USER
# 사이트와 관련한 자동응답을 받을 이메일 주소,'webmaster@localhost'
# 암호 변경 토큰 유효 날짜(일)
PASSWORD_RESET_TIMEOUT_DAYS = 1
# DRF settings
REST_FRAMEWORK = {
# C:\Anaconda3\lib\site-packages\rest_framework\pagination.py:200: UnorderedObjectListWarning: Pagination may yield inconsistent results with an unordered object_list: <class '
# manager.models.certPage'> QuerySet.
# paginator = self.django_paginator_class(queryset, page_size)
# 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
# 'PAGE_SIZE': 10,
# 걍 발급, 인증 포맷 사용하는거지 서버에 jwt 사용하는것 X
# 'DEFAULT_AUTHENTICATION_CLASSES': (
# 'rest_framework_simplejwt.authentication.JWTAuthentication',
# )
}
# jwt 유효기간
from datetime import timedelta
SIMPLE_JWT = {
'ACCESS_TOKEN_LIFETIME': timedelta(minutes=10),
'REFRESH_TOKEN_LIFETIME': timedelta(minutes=11),
'ROTATE_REFRESH_TOKENS': True,
}
# # 세션쿠키 보안 적용=> cookie is only sent under an HTTPS connection
SESSION_COOKIE_SECURE = False
SESSION_COOKIE_AGE = 600 # 초 단위 10분
SESSION_SAVE_EVERY_REQUEST = True # 사용자가 리퀘스트를 서버로 날릴 때마다 서버의 세션 정보와 클라이언트의 세션 정보를 갱신할 것=true
# CSRF쿠키 보안 적용
CSRF_COOKIE_SECURE = False
CSRF_COOKIE_AGE = 600 # 초 단위, 10분
# http를 https로 강제 redirect
SECURE_SSL_REDIRECT = False
# SECURE_TLS_REDIRECT = True
# Celery 설정
CELERY_ALWAYS_EAGER = True
CELERY_BROKER_URL = 'redis://127.0.0.1:6379/0'
CELERY_RESULT_BACKEND = 'redis://127.0.0.1:6379/0'
# CELERY_RESULT_BACKEND = 'django-db'
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TIMEZONE = 'Asia/Seoul'
# CELERY_IMPORTS = [
# 'accounts.tasks',
# ]
# SCHEDULE_MINUTE = 60
# SCHEDULE_HOUR = 60 * SCHEDULE_MINUTE
# SCHEDULE_DAY = 24 * SCHEDULE_HOUR
# SCHEDULE_WEEK = 7 * SCHEDULE_DAY
# SCHEDULE_MONTH = 30 * SCHEDULE_DAY
# CELERY_BEAT_SCHEDULE = {
# 'ga_collect': {
# 'task': 'app.tasks.ga_collect',
# 'schedule': 5 * SCHEDULE_MINUTE,
# # 'schedule': 2.0,
# # 'args': (4, 4)
# }
# }
CKEDITOR_UPLOAD_PATH = 'uploads/'
CKEDITOR_IMAGE_BACKEND = "pillow"
|
import urllib2
from bs4 import BeautifulSoup
import re
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from xlrd import open_workbook
from xlwt import easyxf
from xlutils.copy import copy
import os
excel_address = r'/Users/zhoufengting/Desktop/replacingg_player_away_2013_2014.xls'
work_book = open_workbook(excel_address)
wb = copy(work_book)
sheet = wb.get_sheet(0)
raw = 0
txt_path = r"/Users/zhoufengting/Desktop/href_2014_2015.txt"
fp = open(txt_path)
for url in fp:
request = urllib2.urlopen(url)
response = request.read()
soup = BeautifulSoup(response,"html.parser")
Names_replacing = soup.find_all("ul",class_ = "players home-team")[0].find_all("span",class_="replacement")
#print len(Names_replacing)
for col in range(0,len(Names_replacing)):
data = Names_replacing[col].text
sheet.write(raw,col,data)
raw = raw+1
#url = "http://www.eurosport.com/football/ligue-1/2013-2014/montpellier-hsc-paris-saint-germain_mtc616155/live.shtml"
#print len(Names_major)
#for Name_major in Names_major:
#print Name_major.find_all("span")[1].text
os.remove(excel_address)
wb.save(excel_address) |
from gym_gomoku.envs.util import make_random_policy as policy
|
from flask import Blueprint, current_app, request, jsonify
from .model import Batidas_Ponto
from .serializer import PontoSchema
bp_ponto = Blueprint('Batidas_Ponto', __name__)
@bp_ponto.route('/cadastrar_ponto', methods=['POST'])
def cadastrar():
dados = request.get_json(force=True)
usuario_id = dados['usuario_id']
tipo_batida = dados['tipo_batida']
ponto = Batidas_Ponto(usuario_id=usuario_id, tipo_batida=tipo_batida)
current_app.db.session.add(ponto)
current_app.db.session.commit()
return PontoSchema().jsonify(ponto)
@bp_ponto.route('/mostrar_ponto', methods=['GET'])
def mostrar():
result = Batidas_Ponto.query.all()
return PontoSchema(many=True).jsonify(result), 200
|
"""
find_element_by_id
find_element_by_id
find_element_by_xpath
find_element_by_link_text
find_element_by_partial_link_text
find_element_by_tag_name
find_element_by_class_name
find_element_by_css_selector
_________________________
from selenium.webdriver.common.by import By
example: button = browser.find_element(By.ID, "submit_button")
By.ID – поиск по уникальному атрибуту id элемента;
By.CSS_SELECTOR – поиск элементов с помощью правил на основе CSS;
By.XPATH – поиск элементов с помощью языка запросов XPath;
By.NAME – поиск по атрибуту name элемента;
By.TAG_NAME – поиск по названию тега;
By.CLASS_NAME – поиск по атрибуту class элемента;
By.LINK_TEXT – поиск ссылки с указанным текстом. Текст ссылки должен быть точным совпадением;
By.PARTIAL_LINK_TEXT – поиск ссылки по частичному совпадению текста.
"""
import math
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
try:
browser = webdriver.Chrome("f:\Дима\chromedriver.exe")
browser.get("http://suninjuly.github.io/find_link_text")
link = browser.find_element_by_partial_link_text("224592")
link.click()
input1 = browser.find_element_by_name("first_name")
input1.send_keys("Ivan")
input2 = browser.find_element_by_name('last_name')
input2.send_keys("Petrov")
input3 = browser.find_element(By.CLASS_NAME, "city")
input3.send_keys("Smolensk")
input4 = browser.find_element_by_id("country")
input4.send_keys("Russia")
button = browser.find_element_by_css_selector("button[type=submit]")
button.click()
alert = browser.switch_to.alert
text = alert.text
help(text)
print(text[-17:])
alert.accept()
print("success!")
finally:
time.sleep(5)
browser.quit()
|
import cv2
import numpy as np
#siyah bir zemin olusturuyoruz.
img = np.zeros((512,512,3),np.uint8)
#5 piksel kalinliginda diagonal mavi bir cizgi cizdiriyoruz. Cizginin ozellikleri
#size kalmis, 8 bitlik degerleri istediginiz gibi degistirebilirsiniz.
cv2.line(img,(0,0),(511,511),(255,0,0),5) #cizgi cizimi
cv2.rectangle(img,(384,0),(510,511),(0,255,0),3) #diksortgen cizimi
cv2.circle(img,(447,63),63,(0,0,255),-1) #daire cizimi
cv2.ellipse(img,(256,256),(100,50),0,0,180,255,-1) #elips cizimi
#poligon cizimi
pts=np.array([[10,5],[20,30],[70,20],[50,10]],np.int32)
pts=pts.reshape((-1,1,2))
cv2.polylines(img,[pts],False,(0,255,255))
#beyaz renkte 'Sekiller' yazdiracagiz
font=cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img,'Sekiller',(10,500),font,4,(255,255,255),2,cv2.LINE_AA)
#goruntunun olusturuldugu pencereyi kapatmak icin herhangi bi tusu bekle
cv2.imshow('Geometrik Sekiller',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
num = list()
for i in range(0, 5):
num.append(int(input(f'Digite o {i+1}º número: ')))
if i == 0:
menor = maior = num[i]
posmenor = posmaior = str(i)
else:
if num[i] < menor:
menor = num[i]
posmenor = str(i)
elif (num[i] == menor):
posmenor += f'... {i}'
if num[i] > maior:
maior = num[i]
posmaior = str(i)
elif(num[i] == maior):
posmaior += f'... {i}'
print(f'Você digitou os valores {num}')
print(f'{maior} é o maior número e está na(s) posição(ões) {posmaior}.')
print(f'{menor} é o menor número e está na(s) posição(ões) {posmenor}.') |
from src.cdp.Habilidades import Resistencia
from src.util.FabricaNaves import FabricaNave
class FabricaNaveJogador(FabricaNave):
def __init__(self, nome, figura_nave, figura_explosao, som):
super(FabricaNaveJogador).__init__(nome, figura_nave, figura_explosao, som)
self.tempoMissel = 0
self.municao = self.cria_municao()
# """---------------ACOES----------------------"""
# @abc.override
def move(self):
self.posicao["y"] += self.velocidade["y"]
self.cria_area()
"""
# @abc.override
def atira(self):
if self.cria_tiro(self.posicao) != "ERRO":
self.cria_tiro(self.posicao)
self.municao[-1].atira()
self.buzina()
"""
# """--------------ATRIBUTO------------------"""
# @abc.override
@staticmethod
def cria_velocidade():
return {"x": 2, "y": 2}
# @abc.override
@staticmethod
def cria_resistencia():
return Resistencia.Resistencia(10, 2)
|
#scipy.signal.istft example
#https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.istft.html
#
import numpy as np #added by author
from scipy import signal
import matplotlib.pyplot as plt
#Generate a test signal, a 2 Vrms sine wave at 50Hz corrupted by 0.001 V**2/Hz of white noise sampled at 1024 Hz.
#テスト信号、1024 Hzでサンプリングされた0.001 V ** 2 / Hzのホワイトノイズで破損した50 Hzの2 Vrmsの正弦波を生成します
fs = 1024
N = 10*fs
nperseg = 64 #2048 #1024 #128 #256 #512
amp = 2 * np.sqrt(2)
noise_power = 0.0001 * fs / 2
time = np.arange(N) / float(fs)
#carrier = amp * np.sin(2*np.pi*50*time)
#t = np.linspace(0, 100, 1000, endpoint=False)
carrier=[]
for t1 in time:
if t1<1:
sig1 = amp*np.cos(2 * np.pi * 5 * t1) #+np.cos(2 * np.pi * 8 * t) + np.cos(2 * np.pi * 15 * t) #*np.exp(-0.1*t) *5
print("5",t1)
elif t1<2:
sig1 = amp*np.cos(2 * np.pi * 10 * t1)
print("10",t1)
elif t1<4:
sig1 = amp*np.cos(2 * np.pi * 20 * t1)
print("20",t1)
elif t1<6:
sig1 = amp*np.cos(2 * np.pi * 50 * t1)
print("30",t1)
else:
sig1 = amp*np.cos(2 * np.pi * 100 * t1)
print(t1)
carrier.append(sig1)
noise = np.random.normal(scale=np.sqrt(noise_power),
size=time.shape)
x = carrier + noise
plt.plot(time,x)
plt.show()
#Compute and plot the STFT’s magnitude.
#STFTの振幅を計算してプロットします
f, t, Zxx = signal.stft(x, fs=fs, nperseg=nperseg)
plt.figure()
plt.pcolormesh(t, f, np.abs(Zxx), vmin=0, vmax=amp)
plt.ylim([f[1], f[-1]])
plt.title('STFT Magnitude')
plt.ylabel('Frequency [Hz]')
plt.xlabel('Time [sec]')
plt.yscale('log')
plt.show()
#Zero the components that are 10% or less of the carrier magnitude, then convert back to a time series via inverse STFT
#キャリア振幅の10%以下の成分をゼロにしてから、逆STFTを介して時系列に変換し直す
Zxx = np.where(np.abs(Zxx) >= amp/10, Zxx, 0)
_, xrec = signal.istft(Zxx, fs)
#Compare the cleaned signal with the original and true carrier signals.
#きれいにされた信号を元のそして本当の搬送波信号と比較
plt.figure()
plt.plot(time, x, time, xrec, time, carrier)
plt.xlim([5, 6])
plt.xlabel('Time [sec]')
plt.ylabel('Signal')
plt.legend(['Carrier + Noise', 'Filtered via STFT', 'True Carrier'])
plt.show()
#Note that the cleaned signal does not start as abruptly as the original, since some of the coefficients of the transient were also removed:
#トランジェントの係数の一部も削除されているため、クリーン化された信号は元の信号ほど急激には開始されません。
plt.figure()
plt.plot(time, x, time, xrec, time, carrier)
plt.xlim([0, 1])
plt.xlabel('Time [sec]')
plt.ylabel('Signal')
plt.legend(['Carrier + Noise', 'Filtered via STFT', 'True Carrier'])
plt.show() |
#!/usr/bin/python
import sys
from collections import defaultdict as dfdict
import csv
import logging
import copy
"""Optional feature flag. Set to 0 if you don't want to analyze the optional feature."""
optional_feature_flag = 0
if optional_feature_flag:
from numpy import average as ave
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(message)s', level=logging.INFO)
def get_batch_graph(infile):
"""
Function to form the user network graph from the input file e.g. batch_paymet.csv.
Input: batch payment file
Output: user network graph
"""
csv.field_size_limit(sys.maxsize)
batch_graph = dfdict(set) # initialize the network graph
with open(infile,'rU') as fin:
batchfile = csv.DictReader(fin,skipinitialspace=True)
for line in batchfile:
try: # check for the input irregularity
_ = int(line['id1']) # line['id1'] is user1
_ = int(line['id2']) # line['id2'] is user2
except:
continue
# add the transaction to the network graph
batch_graph[line['id1']].add(line['id2'])
batch_graph[line['id2']].add(line['id1'])
return (batch_graph)
def check_direct_link(graph, node1, node2):
""" Function to check whether there exists an edge (direct link) between node1 and node2 of the graph.
It will return 'trusted' if an edge exists, otherwise it will return 'unverified'.
"""
if node1 in graph[node2]:
return('trusted') #There is a direct link between node1 and node2
else:
return('unverified') #There is no direct link between node1 and node2
def check_degree2_link(graph, node1, node2):
""" Function to check whether node1 and node2 of the graph have any common adjacent nodes i.e. there
exists at least one degree2 link between them.
It will return 'trusted' if such a link exists, otherwise it will return 'unverified'.
"""
if bool(graph[node1].intersection(graph[node2])): # check if node1 and node2 have any common adjacent nodes
return('trusted')
else:
return('unverified')
def shortest_bfs_path(graph, node1, node2, degree):
""" Function that uses Breadth First Search (BFS) method on the graph to find the shortest path
between node1 and node2 the length of which is not longer than the specified degree.
If such a path exists the function will return 'trusted', otherwise it will return 'unverified'.
"""
start_node = node1
end_node = node2
# If node2 has fewer adjacent nodes, start the search from it
if len(graph[node1]) > len(graph[node2]):
start_node = node2
end_node = node1
queue = [(start_node, [start_node])]
while queue:
(vertex, path) = queue.pop(0)
for nextnode in graph[vertex] - set(path):
if nextnode == end_node:
return ('trusted')
elif len(path) < degree: #if the current paths are of length (degree) don't go further
queue.append((nextnode, path + [nextnode]))
return ('unverified')
def feature1_verification(graph, infile, outfile):
""" Function to check the feature1 for each line of the input file. It writes the result ('trusted' or 'unverified')
in the corresponding line of the output file.
Inputs: network graph i.e. output of get_batch_graph function, stream_payment file, output file.
"""
f1_graph = copy.deepcopy(graph) # graph to be used and updated during feature1 analysis
with open(outfile,'wb') as fout:
with open(infile,'rU') as fin:
file_to_process = csv.DictReader(fin,skipinitialspace=True)
csv.field_size_limit(sys.maxsize)
for line in file_to_process:
# check for the input irregularity
try:
_ = int(line['id1'])
_ = int(line['id2'])
except:
continue
if line['id1'] == line['id2']: # user1 and user2 are the same person
fout.write('trusted' +'\n')
else:
fout.write(check_direct_link(f1_graph, line['id1'], line['id2']) +'\n')
# update the network
f1_graph[line['id1']].add(line['id2'])
f1_graph[line['id2']].add(line['id1'])
return
def feature2_verification(graph, infile, outfile):
""" Function to check the feature2 for each line of the input file. It writes the result ('trusted' or 'unverified')
in the corresponding line of the output file.
Inputs: network graph i.e. output of get_batch_graph function, stream_payment file, output file.
"""
f2_graph = copy.deepcopy(graph) # graph to be used and updated during feature2 analysis
with open(outfile,'wb') as fout:
with open(infile,'rU') as fin:
file_to_process = csv.DictReader(fin,skipinitialspace=True)
csv.field_size_limit(sys.maxsize)
for line in file_to_process:
# check for the input irregularity
try:
_ = int(line['id1'])
_ = int(line['id2'])
except:
continue
if line['id1'] == line['id2']: # user1 and user2 are the same person
fout.write('trusted' +'\n')
elif check_direct_link(f2_graph, line['id1'], line['id2']) =='trusted':
fout.write('trusted' +'\n')
continue
else:
fout.write(check_degree2_link(f2_graph, line['id1'], line['id2']) +'\n')
# update the network
f2_graph[line['id1']].add(line['id2'])
f2_graph[line['id2']].add(line['id1'])
return
def feature3_verification(graph, infile, outfile):
""" Function to check the feature3 for each line of the input file. It writes the result ('trusted' or 'unverified')
in the corresponding line of the output file.
Inputs: network graph i.e. output of get_batch_graph function, stream_payment file, output file.
"""
f3_graph = copy.deepcopy(graph) # graph to be used and updated during feature3 analysis
with open(outfile,'wb') as fout:
with open(infile,'rU') as fin:
file_to_process = csv.DictReader(fin,skipinitialspace=True)
csv.field_size_limit(sys.maxsize)
for line in file_to_process:
# check for the input irregularity
try:
_ = int(line['id1'])
_ = int(line['id2'])
except:
continue
if line['id1'] == line['id2']: # user1 and user2 are the same person
fout.write('trusted' +'\n')
elif check_direct_link(f3_graph, line['id1'], line['id2'])=='trusted':
fout.write('trusted' +'\n')
continue
elif check_degree2_link(f3_graph, line['id1'], line['id2'])=='trusted':
fout.write('trusted' +'\n')
# update the network
f3_graph[line['id1']].add(line['id2'])
f3_graph[line['id2']].add(line['id1'])
continue
else:
fout.write(shortest_bfs_path(f3_graph, line['id1'], line['id2'], 4) +'\n')
# update the network
f3_graph[line['id1']].add(line['id2'])
f3_graph[line['id2']].add(line['id1'])
return
def optional_feature1_verification(max_amount, infile, outfile):
"""
Function to check if the amount of each transaction is less than or equal to the maximum amount allowed to transfer.
writes 'unverified' in the corresponding line of output file if the amount is larger than maximum allowed and 'trusted otherwise.
"""
with open(outfile,'wb') as fout:
with open(infile,'rU') as fin:
file_to_process = csv.DictReader(fin,skipinitialspace=True)
csv.field_size_limit(sys.maxsize)
for line in file_to_process:
try:
if float(line['amount']) > max_amount:
fout.write('unverified'+'\n')
else:
fout.write('trusted'+'\n')
except:
continue
return
def optional_feature2_verification(batch_infile, stream_infile, outfile):
"""
Function to check if the amount of each transaction is less than or equal to twice the average amounts payed by user1 so far.
writes 'unverified' in the corresponding line of output file if the amount is larger and 'trusted otherwise.
"""
csv.field_size_limit(sys.maxsize)
batch_dict = dfdict(list) # initialize the dictionary that contains the amounts of transactions of each user
with open(batch_infile,'rU') as fin:
batchfile = csv.DictReader(fin,skipinitialspace=True)
for line in batchfile:
try: # check for the input irregularity
_ = int(line['id1']) # line['id1'] is user1
amnt = float(line['amount'])
# add the transaction to the user dictionary
batch_dict[line['id1']].append(amnt)
except:
continue
# dictionary to contain the number and average amount of transactions for each user
average_dict = dict()
for user in batch_dict:
# create entry {user:[average amount, number of transactions]}
average_dict[user] = [ave(batch_dict[user]),len(batch_dict[user])]
with open(outfile,'wb') as fout:
with open(stream_infile,'rU') as fin:
file_to_process = csv.DictReader(fin,skipinitialspace=True)
csv.field_size_limit(sys.maxsize)
for line in file_to_process:
try:
# check for the input irregularity
_ = int(line['id1']) # line['id1'] is user1
amnt = float(line['amount'])
if amnt > 2*average_dict[line['id1']][0]:
fout.write('unverified'+'\n')
else:
fout.write('trusted'+'\n')
count = average_dict[line['id1']][1]
ave_old = average_dict[line['id1']][0]
new_count = count+1
new_ave = (ave_old*count + amnt)/float(new_count)
average_dict[line['id1']] = [new_ave, new_count]
except:
continue
return
def main(inputs):
batch_infile = inputs[1]
stream_infile = inputs[2]
outfile1 = inputs[3]
outfile2 = inputs[4]
outfile3 = inputs[5]
logger.info("Constructing the Network of users form the batch_payment file ... ")
batch_graph = get_batch_graph(batch_infile)
logger.info("completed")
# Use feature1 to verify transactions (direct link)
logger.info("Using feature1 for stream file transaction verification ... ")
feature1_verification(batch_graph, stream_infile, outfile1)
logger.info("completed")
# Use feature2 to verify transactions (friend of friend)
logger.info("Using feature2 for stream file transaction verification ... ")
feature2_verification(batch_graph, stream_infile, outfile2)
logger.info("completed")
# Use feature3 to verify transactions (friend of friend)
logger.info("Using feature3 for stream file transaction verification ...")
feature3_verification(batch_graph, stream_infile, outfile3)
logger.info("completed")
if optional_feature_flag:
""" Optional feature1: define a limit for the amount of each transaction.
Transactions with amounts not larger than the limit are 'trusted'. The rest of transactions are 'unverified'.
"""
opt_outfile1 = outfile3.replace(outfile3.split('/')[-1],'output_optional1.txt') # save the output in the same directory as outfile3
max_amount = 25 # Maximum amount that can be transferred between users in a single transaction.
logger.info("Using optional feature1 for stream file transaction verification ...")
optional_feature1_verification(max_amount, stream_infile, opt_outfile1)
logger.info("completed")
""" Optional feature2: find the average amount payed by user1 (the payer) so far. If the transaction of
user1 to be verified has an amount larger than twice the average mark the transaction as 'unverified'.
"""
opt_outfile2 = outfile3.replace(outfile3.split('/')[-1],'output_optional2.txt') # save the output in the same directory as outfile3
logger.info("Using optional feature2 for stream file transaction verification ...")
optional_feature2_verification(batch_infile, stream_infile, opt_outfile2)
logger.info("completed")
return
if __name__ == '__main__':
main(sys.argv)
|
#program to find the greatest of two numbers
fno=int(input("Enter the first number"))
sno=int(input("Enter the second number"))
if fno>sno:
print("First number is greater that second number")
elif fno<sno:
print("Second number is greater than the first number")
else:
print("Both the numbers are equal") |
from typing import List
from sklearn.feature_extraction.text import TfidfVectorizer as SklearnTfidfVectorizer
from kts_linguistics.string_transforms.abstract_transform import AbstractTransform
from kts_linguistics.string_transforms.transform_pipeline import TransformPipeline
from kts_linguistics.misc import SparseMatrix2D
class TfidfTransform(AbstractTransform):
def __init__(self, tfidf: SklearnTfidfVectorizer = None, **tfidf_params):
if tfidf is None:
self.tfidf = SklearnTfidfVectorizer(**tfidf_params)
else:
self.tfidf = tfidf
def fit(self, groups: List[List[str]], pipeline: TransformPipeline):
sentences = [s for group in groups for s in group]
sentences = [pipeline.custom_transform(s, apply_before_transform=self) for s in sentences]
self.tfidf.fit(sentences)
def transform(self, s: str) -> SparseMatrix2D:
return self.tfidf.transform([s])
|
#!/usr/bin/python
# By: Cowart, Dominique A 9.17.16
# file io lib
import sys
# Goal: open a text file and change words "free" to "proprietary"
# define the path to the file
in_file = "manifesto"
out_file = "manifesto_out"
# obtain a file handle to open
fh = open(in_file, "r")
fh2 = open(out_file, "w")
# read each line and remove the newlines
for line in fh:
line = line.strip('\n')
line2 = line.replace("free", "proprietary") + "\n"
print line2
fh2.write(line2)
fh.close()
fh2.close()
|
"""
Odd Even Linked List
Given a singly linked list, group all odd nodes together followed by the even nodes. Please note here we are talking
about the node number and not the value in the nodes.
You should try to do it in place. The program should run in O(1) space complexity and O(nodes) time complexity.
Example 1:
Input: 1->2->3->4->5->NULL
Output: 1->3->5->2->4->NULL
Example 2:
Input: 2->1->3->5->6->4->7->NULL
Output: 2->3->6->7->1->5->4->NULL
Note:
(1) The relative order inside both the even and odd groups should remain as it was in the input.
(2) The first node is considered odd, the second node even and so on ...
"""
from solutions.list_node import ListNode
class Solution:
"""My own approach. More variables than necessary, as it looks."""
def oddEvenList(self, head: ListNode) -> ListNode:
if head is None:
return None
actual = head
even = head.next
is_even = False
odd = head
while actual.next:
node = actual.next
actual.next = actual.next.next
actual = node
if is_even:
odd = odd.next
is_even = not is_even
odd.next = even
return head
class SolutionV2:
"""Adapted from the discussion."""
def oddEvenList(self, head: ListNode) -> ListNode:
odd_head = odd = ListNode(-1)
even_head = even = ListNode(-1)
while head:
odd.next = head
even.next = head.next
odd = odd.next
even = even.next
head = head.next.next if even else None
odd.next = even_head.next
return odd_head.next
class SolutionV3:
"""Adapted from the LeetCode solution page. The solution were shown in Java."""
def oddEvenList(self, head: ListNode) -> ListNode:
if not head:
return None
odd = head
even = head.next
even_head = even
while even and even.next:
odd.next = even.next
odd = odd.next
even.next = odd.next
even = even.next
odd.next = even_head
return head
if __name__ == '__main__':
obj = SolutionV3()
example = None
expected = None
print('result: ', obj.oddEvenList(example), '\nexpected:', expected, end='\n\n')
example = ListNode(1)
expected = ListNode(1)
print('result: ', obj.oddEvenList(example), '\nexpected:', expected, end='\n\n')
example = ListNode(1, ListNode(2))
expected = ListNode(1, ListNode(2))
print('result: ', obj.oddEvenList(example), '\nexpected:', expected, end='\n\n')
example = ListNode(1, ListNode(2, ListNode(3)))
expected = ListNode(1, ListNode(3, ListNode(2)))
print('result: ', obj.oddEvenList(example), '\nexpected:', expected, end='\n\n')
example = ListNode(1, ListNode(2, ListNode(3, ListNode(4))))
expected = ListNode(1, ListNode(3, ListNode(2, ListNode(4))))
print('result: ', obj.oddEvenList(example), '\nexpected:', expected, end='\n\n')
example = ListNode(1, ListNode(2, ListNode(3, ListNode(4, ListNode(5)))))
expected = ListNode(1, ListNode(3, ListNode(5, ListNode(2, ListNode(4)))))
print('result: ', obj.oddEvenList(example), '\nexpected:', expected, end='\n\n')
example = ListNode(2, ListNode(1, ListNode(3, ListNode(5, ListNode(6, ListNode(4))))))
expected = ListNode(2, ListNode(3, ListNode(6, ListNode(1, ListNode(5, ListNode(4))))))
print('result: ', obj.oddEvenList(example), '\nexpected:', expected, end='\n\n')
example = ListNode(2, ListNode(1, ListNode(3, ListNode(5, ListNode(6, ListNode(4, ListNode(7)))))))
expected = ListNode(2, ListNode(3, ListNode(6, ListNode(7, ListNode(1, ListNode(5, ListNode(4)))))))
print('result: ', obj.oddEvenList(example), '\nexpected:', expected, end='\n\n')
# last line of code
|
#coding: utf-8
import speech_recognition as sr
from gtts import gTTS
from sys import stdin
from os import system
class SpeakWithTheRobot:
"""
A class created to enable the user to have a conversation with the "robot" utilizing the system proposed.
...
Methods
-------
listen()
The software listens to a human and transcribes what he/she said into text.
speak(utterance)
The software utters an utterance through the Google Text to Speech package using a sound file.
speaking_to_the_robot(lsa, naive, file)
It enables an user to have a conversation with the "robot" using the system proposed. It is also permitted to
check the result of a couple of testing phrases.
"""
def __init__(self):
self.slow = False
self.device_id = 0
self.lang = 'pt-pt'
self.chunk_size = 2048
self.r = sr.Recognizer()
self.sample_rate = 48000
def listen(self):
"""
The software listens to a human and transcribes what he/she said into text.
:return: a text that represents the audio utter by the human.
:rtype: str
"""
with sr.Microphone(device_index=self.device_id, sample_rate=self.sample_rate, chunk_size=self.chunk_size) as source:
self.r.adjust_for_ambient_noise(source)
print("Say Something")
audio = self.r.listen(source)
try:
text = self.r.recognize_google(audio, language="pt-PT")
print("you said: " + text)
return text
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service;{0}".format(e))
def speak(self, utterance):
"""
The software utters an utterance through the Google Text to Speech package using a sound file.
:param utterance:
:type utterance: str
"""
tts = gTTS(text=utterance, lang=self.lang, slow=self.slow)
tts.save("soundfile.mp3")
system("soundfile.mp3")
def speaking_to_the_robot(self, lsa, naive, db):
"""
It enables an user to have a conversation with the "robot" using the system proposed. It is also permitted to
check the result of a couple of testing phrases.
:param lsa: a Latent Semantic Analysis object
:type lsa: LSA
:param naive: a Naive Bayes classifier
:type naive: NaiveBayesClassifier
:param db: an object that represents the database and it is connected to the Database file
:type db: Database
"""
while True:
print("Press a character")
c = stdin.read(2)
if c[0] == 's':
self.speak(db.get_robot_utterance(naive.predict_new_robot_id(
lsa.process_new_human_utterance(self.listen(), db.human_utterances))))
elif c[0] == 't':
print(db.get_robot_utterance(naive.predict_new_robot_id(
lsa.process_new_human_utterance("Bom dia", db.human_utterances))))
print(db.get_robot_utterance(naive.predict_new_robot_id(
lsa.process_new_human_utterance("Como está tudo a andar?", db.human_utterances))))
print(db.get_robot_utterance(naive.predict_new_robot_id(
lsa.process_new_human_utterance("Comigo está tudo fantástico.", db.human_utterances))))
print(db.get_robot_utterance(naive.predict_new_robot_id(
lsa.process_new_human_utterance("Gosto muito de vaca", db.human_utterances))))
print(db.get_robot_utterance(naive.predict_new_robot_id(
lsa.process_new_human_utterance("Gosto de estar com a minha filha.", db.human_utterances))))
print(db.get_robot_utterance(naive.predict_new_robot_id(
lsa.process_new_human_utterance("Uma das minhas coisas preferidas é passear em parques.",
db.human_utterances))))
elif c[0] == 'q':
break
|
#!/usr/bin/python3
a=(3,4)
b=(5,4)
c=(6,4)
def produit_c(z1, z2): # produit entre un complexe et un autre complexe
return (z1[0]*z2[0]-z1[1]*z2[1], z1[0]*z2[1]+z1[1]*z2[0])
def produit_r(r, z): # produit entre un réel et un complexe
return (z[0]*r, z[0]*r)
def somme(z1, z2):
return (z1[0]+z2[0], z1[1]+z2[1])
delta=somme(produit_c(b,b), produit_r(-4, produit_c(a, c)))
if (delta == (0,0)):
print("l'équation admet ubne racine double -({}i{})/(2({}+i{}))".format(b[0], b[1], a[0], a[1]))
else:
|
import pandas as pd
import torch
from torchvision import transforms
from tqdm import tqdm
from nn import BengaliNet
from utils.preprocess import preprocess
def test(model, test_images, transform, batch_size=192):
"""Test the model by predicting classes of unseen images.
Args:
model = [nn.Module] model to test with dataset of unseen images
test_images = [ndarray] unseen images of which classes will be predicted
transform = [Compose] normalization transform applied to each image
batch_size = [int] number of images in a mini-batch
Returns [list]:
Class predictions as three consecutive integers for each test image in
a flattened list with sub-problem order consonant diacritic,
grapheme root, and vowel diacritic.
"""
predictions = []
with torch.no_grad():
for batch_idx in tqdm(range(0, len(test_images), batch_size)):
# select batch of images to process and normalize them
batch = test_images[batch_idx:batch_idx + batch_size]
x = torch.stack([transform(image) for image in batch])
# predict class of each sub-problem for each image in batch
y = model(x)
# prepare predictions for Kaggle with correct sub-problem order
preds = [y[idx].argmax(dim=-1) for idx in [2, 0, 1]]
preds = torch.stack(preds, dim=1).flatten()
predictions += preds.tolist()
return predictions
if __name__ == '__main__':
# preprocess test images
test_files = ['kaggle/input/bengaliai-cv19/test_image_data_0.parquet',
'kaggle/input/bengaliai-cv19/test_image_data_1.parquet',
'kaggle/input/bengaliai-cv19/test_image_data_2.parquet',
'kaggle/input/bengaliai-cv19/test_image_data_3.parquet']
preprocessed_test_images = preprocess(test_files, 236, 137, 128)
# load model from file
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = BengaliNet(device)
model.load_state_dict(torch.load('model.pt', map_location=device))
model.eval()
# initialize normalizing transformation
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=(0.071374745,), std=(0.20761949,))
])
# determine predictions of model on test images
predictions = test(model, preprocessed_test_images, transform)
# save predictions to submission CSV file
submission_df = pd.read_csv(
'kaggle/input/bengaliai-cv19/sample_submission.csv'
)
submission_df.target = predictions
submission_df.to_csv('submission.csv', index=False)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import logging
import os
import sys
from functools import partial
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import embedding_ops
from functools import partial
from evaluate import exact_match_score, f1_score
from data_batcher import get_batch_generator
from pretty_print import print_example
from modules import RNNEncoder, SimpleSoftmaxLayer, BasicAttn, masked_softmax
from QAModel import QAModel
logging.basicConfig(level=logging.INFO)
class DynamicAttention(QAModel):
def __init__(self, FLAGS, id2word, word2id, emb_matrix):
super(DynamicAttention, self).__init__(FLAGS, id2word, word2id, emb_matrix)
def add_placeholders(self):
super(DynamicAttention, self).add_placeholders()
def build_graph(self):
attn_layer = DynamicAttention_Attn(self.keep_prob, self.FLAGS)
output = attn_layer.build_graph(self.qn_embs, self.qn_mask, self.context_embs, self.context_mask) # attn_output is shape (batch_size, context_len, hidden_size*2)
encoder = RNNEncoder(self.FLAGS.embedding_size*2, self.keep_prob)
context_hiddens = encoder.build_graph(output, self.context_mask) # (batch_size, context_len, embedding_size*4)
blended_reps_final = tf.contrib.layers.fully_connected(context_hiddens, num_outputs=self.FLAGS.hidden_size) # blended_reps_final is shape (batch_size, context_len, hidden_size)
with vs.variable_scope("StartDist"):
softmax_layer_start = SimpleSoftmaxLayer()
self.logits_start, self.probdist_start = softmax_layer_start.build_graph(blended_reps_final, self.context_mask)
with vs.variable_scope("EndDist"):
softmax_layer_end = SimpleSoftmaxLayer()
self.logits_end, self.probdist_end = softmax_layer_end.build_graph(blended_reps_final, self.context_mask)
class DynamicAttention_Attn(BasicAttn):
def __init__(self, keep_prob, flag):
self.keep_prob = keep_prob
self.FLAGS = flag
self.keys_sentinel = tf.Variable(tf.random_normal([1, 1, self.FLAGS.embedding_size]), dtype=tf.float32)
self.values_sentinel = tf.Variable(tf.random_normal([1, 1, self.FLAGS.embedding_size]), dtype=tf.float32)
def build_graph(self, values, values_mask, keys, keys_mask):
sentinel_padding = tf.constant(1, shape=[1, 1])
batch_size = self.FLAGS.batch_size
with vs.variable_scope("Attention"):
# Calculate attention distribution
dense_layer = partial(tf.layers.dense, activation = tf.nn.tanh, kernel_regularizer = tf.contrib.layers.l1_regularizer(0.001))
projected_values_t = dense_layer(values, self.FLAGS.embedding_size)
values_t = tf.concat([projected_values_t, tf.broadcast_to(self.values_sentinel, [batch_size, 1, self.FLAGS.embedding_size])], 1) # (batch_size, value_vec_size, num_values)
#augmented context vectors.
keys_t = tf.concat([keys, tf.broadcast_to(self.keys_sentinel, [batch_size, 1, self.FLAGS.embedding_size])], 1)
affinity_scores = tf.matmul(keys_t, tf.transpose(values_t, perm=[0, 2, 1])) # shape (batch_size, num_keys, num_values)
values_mask_1 = tf.expand_dims(tf.concat([values_mask, tf.broadcast_to(sentinel_padding, [batch_size, 1])],1), 1) #shape (batch_size, 1, num_values).
_, C2Q_softmax = masked_softmax(affinity_scores, values_mask_1, 2) # shape (batch_size, num_keys, num_values). take softmax over values
attn_output_1 = tf.matmul(C2Q_softmax, values_t) # shape (batch_size, num_keys, value_vec_size)
keys_mask_1 = tf.expand_dims(tf.concat( [keys_mask, tf.broadcast_to(sentinel_padding, [batch_size, 1]) ],1), 2) #shape (batch_size, num_keys, 1)
_, Q2C_softmax = masked_softmax(affinity_scores, keys_mask_1, 1)
Q2C_output = tf.matmul(tf.transpose(Q2C_softmax, perm=[0,2,1]),keys_t)
attn_output_2 = tf.matmul(C2Q_softmax, Q2C_output)
key_hidden = tf.concat([attn_output_2, attn_output_1], 2)
key_hidden = key_hidden[:, :self.FLAGS.context_len, :]
# Apply dropout
output = tf.nn.dropout(key_hidden, self.keep_prob)
return output
|
def format_date(day, month, year):
if month > 12:
return None
if month == 1 and day > 31:
return None
elif day > 31 and month == 3:
return None
elif day > 31 and month == 5:
return None
elif day > 31 and month == 7:
return None
elif day > 31 and month == 8:
return None
elif day > 31 and month == 10:
return None
elif day > 31 and month == 12:
return None
elif day > 28 and month == 2:
return None
elif day > 30 and month == 4:
return None
elif day > 30 and month == 6:
return None
elif day > 30 and month == 9:
return None
elif day > 30 and month == 11:
return None
month_dict = {
1: 'Styczeń', 2: 'Luty', 3: 'Marzec', 4: 'Kwiecień',
5: 'Maj', 6: 'Czerwiec', 7: 'Lipiec', 8: 'Sierpień',
9: 'Wrzesień', 10: 'Październik', 11: 'Listopad',
12: 'Grudzień'
}
month = month_dict[month]
return f'{day} {month} {year}'
print(format_date(28, 12, 2021))
|
# == vs is
# == for value equality check but "is" keyword is used for check whether same memory location
print(True == 1) # True
print('1' == 1) # False
print([] == []) # True
# is (check for memory reference
print(True is 1) # False
print("1" is 1) # False
print([] is []) # False
print(True is True) # True
# Realtime example
print("Realtime Examples")
list1 = [1, 2, 3]
list2 = [1, 2, 3]
print(list1 == list2) # True
print(list1 is list2) # False
list3 = list1
print(list1 == list3) # True
print(list1 is list3) # True
list4 = list1[:]
print(list1 == list4) # True
print(list1 is list4) # False
|
import torch
import torch.nn as nn
class Encoder(nn.Module):
def __init__(self, input_size, latent_size):
super(Encoder, self).__init__()
"""
Parameters:
----------
input_size : int
Input dimension for the data.
latent_size : int
Latent space dimension
kernel* : int, defualt=4
Convolutional filter size for layer *.
stride* : int, default=1
Stride length for convolutional filter at layer *.
"""
self.input_size = input_size
self.latent_size = latent_size
self.cnn_encoder = nn.Sequential(
nn.Linear(self.input_size, 500),
nn.ReLU(),
nn.Linear(500, 250),
nn.ReLU(),
nn.Linear(250, 128),
nn.ReLU()
)
self.fc = nn.Linear(128, self.latent_size) # 64*2*2
def forward(self, input):
"""
Parameters:
----------
input : float tensor shape=(batch_size, input_size)
Returns:
-------
A float tensor with shape (batch_size, latent_variable_size)
"""
out = self.cnn_encoder(input)
out = self.fc(out)
return out
|
from scanner import openFile, readNextWord, getSymbolsToIDs
import sys
SEMICOLON=1
DERIVES=2
ALSODERIVES=3
EPSILON=4
SYMBOL=5
EOF=6
symbolNames = ["NONE/ERROR", "SEMICOLON", "DERIVES", "ALSODERIVES", "EPSILON", "SYMBOL", "EOF"]
symbolPending = None
# Of form: {NonTerm1 : [NT1Prod1, NT1Prod2, ...]; NonTerm2 : [...]; ...}
productionsIR = {}
# For debugging only
testGrammerSymbols = [SYMBOL, DERIVES, SYMBOL, SYMBOL, SYMBOL, ALSODERIVES, SYMBOL, SYMBOL, ALSODERIVES, SYMBOL, SEMICOLON, SYMBOL, DERIVES, SYMBOL, SYMBOL, ALSODERIVES, SYMBOL, SYMBOL, SYMBOL, SEMICOLON, EOF]
i = 0
"""
Word pairs of the form: [grammaticalSymbol, lexeme]
"""
def getNextWord():
global symbolPending
global testGrammerSymbols
global i
#Be sure to check queue before asking scanner
if symbolPending:
retSym = symbolPending
symbolPending = None
#print "Returning: " + symbolNames[retSym[0]]
return retSym
else:
# Debugging only
"""
retSym = testGrammerSymbols[i]
i = i + 1
return retSym
"""
retSym = readNextWord()
#print "Scanner gave us back: " + str(retSym)
#print "Returning: " + symbolNames[retSym[0]]
return retSym
return 1
def SymbolList(curWord, listSoFar):
global symbolPending
global productionsIR
"""
SL->SYMBOL SL
| E
Not what would be produced w/ the algorithm, but very obviously
equivalent
"""
if curWord[0] == SYMBOL:
listSoFar.append(curWord[1])
return SymbolList(getNextWord(), listSoFar)
#print "Sym list: " + symbolNames[curWord[0]]
# Empty case, has to be the start of another list or ;
if curWord[0] == SEMICOLON or curWord[0] == ALSODERIVES:
symbolPending = curWord
return [True, listSoFar]
# If it's not part of the RHS, or demarcating the end of an rhs, it's an error
sys.stderr.write("Parsing Error: expected SYMBOL or SEMICOLON or ALSODERIVES in right hand side of production, found: " + symbolNames[curWord[0]])
exit()
def RightHandSide(curWord):
"""
RHS->SL
| EPSILON
"""
global symbolPending
#print "RHS, cur: " + symbolNames[curWord[0]]
# Need to return whether it's a valid RHS, and if so what the full RHS is
if curWord[0] == EPSILON:
return [True, [EPSILON]]
thisList = []
# Builds the list of symbols in the right hand side, sets a flag if it was valid
# Terminates otherwise
sl = SymbolList(curWord, thisList)
if sl[0]:
return sl
return False
def ProductionSetPrime(curWord, nonTerm):
"""
PS'->ALSODERIVES RHS PS'
| E
"""
global symbolPending
if curWord[0] == ALSODERIVES:
rhs = RightHandSide(getNextWord())
if rhs[0]:
# Add this production to IR
productionsIR[nonTerm].append(rhs[1])
return ProductionSetPrime(getNextWord(), nonTerm)
else:
return False
# Empty case, if no other things being derived, should be a SEMICOLON
if curWord[0] == SEMICOLON:
symbolPending = curWord
return True
# Otherwise, we're neither at the end of a prod set or properly forming one, error
else:
sys.stderr.write("Incorrect terminating symbol for production set: " + symbolNames[curWord[0]])
exit()
def ProductionSet(curWord):
"""
PS->SYMBOL DERIVES RHS PS'
"""
global symbolPending
if curWord[0] == SYMBOL:
# Make entry for this non terminal in IR
curNonTerm = curWord[1]
productionsIR[curNonTerm] = []
thisNewWord = getNextWord()
if thisNewWord[0] == DERIVES:
# Expect RHS to return a tuple of [T/F, [symb1, symb2, symb3]]
rhs = RightHandSide(getNextWord())
if rhs[0]:
# Add this rhs as a production of curNonTerm, have PS' do the same
productionsIR[curNonTerm].append(rhs[1])
return ProductionSetPrime(getNextWord(), curNonTerm)
else:
sys.stderr.write("Parsing Error: Producing symbol must be followed by ':' (produces)\n")
sys.stderr.write("Instead found symbol: " + symbolNames[thisNewWord[0]])
exit()
return False
def ProductionListPrime(curWord):
"""
PL'->PS SEMICOLON PL'
| E
"""
global symbolPending
if(ProductionSet(curWord)):
if getNextWord()[0] != SEMICOLON:
sys.stderr.write("Parsing Error: Production sets must end with ';'\n")
exit()
return ProductionListPrime(getNextWord())
# Empty case, last item of full list is EOF
if curWord[0] == EOF:
return True
else:
sys.stderr.write("Parsing Error: Production list must end with EOF\n")
exit()
def ProductionList(curWord):
"""
PL->PS SEMICOLON PL'
"""
global symbolPending
if not ProductionSet(curWord):
return False
if getNextWord()[0] != SEMICOLON:
sys.stderr.write("Parsing Error: Expected ';' after production list\n")
exit()
return ProductionListPrime(getNextWord())
def Grammar():
global symbolPending
return ProductionList(getNextWord())
def parseFile(fileName):
global productionsIR
openFile(fileName)
# If it's a valid grammar, return IR
if Grammar():
return productionsIR
else:
sys.stderr.write("\n\nError! Parser found invalid grammar\n\n")
exit()
#openFile("mbnf.ll1")
#print Grammar()
#print getSymbolsToIDs().keys()
#print "hello wrorld"
#print ProductionList(getNextWord()) |
# The design of virtual assistant
import random
import os
import datetime, calendar
import wikipedia
import speech_recognition as sr
from gtts import gTTS
from va_utils import recordAudio, vaSpeechResponse, vaWakeUpCall, getInfoFromWikipedia
from playsound import playsound
speech_text = None
while True:
speech_text = recordAudio()
#print(speech_text)
#wake_call = vaWakeUpCall(speech_text)
#if wake_call is True:
# print("I am awake")
if speech_text is not None:
wiki_info = getInfoFromWikipedia(speech_text)
#test_text = "This is spaaarta!"
vaSpeechResponse(wiki_info) |
# File: EPL_Transceiver.py
# Celine Liu <tzuchung1030@gmail.com>
# Date: 2011/02/22
# version: dynamic payload length
import signal
import time
from threading import Thread
#from msvcrt import getch
from EPL_Transceiver_Param import *
Tx_Rx = 0
KeyEvent_Stop = False
data_length = [0x20,0x20,0x20,0x20,0x20,0x20]
usr_defined_pload = False
usr_defined_ploads = ""
usr_defined_ploads_len = 0
def KeyEvent_Handler(signal, frame):
global KeyEvent_Stop
KeyEvent_Stop = True
#print "Enter command line mode ... "
signal.signal(signal.SIGINT, KeyEvent_Handler)
class EPL_Transceiver:
def __init__ (self, conn):
self.connection = conn
#self.custom_pload = ""
#self.custom_plen = 0
#self.keyevent_detection = key_event_detect()
#self.keyevent_detection.start()
def string2int(self, indata):
indata = "0x" + indata
return int(indata, 16)
def get_ret_val(self):
#chcek result
ret_val = self.connection.recvCmd(3)
if ret_val == "ACK":
return 0
elif ret_val == "NAK":
return 1
else:
return -1
print "ERROR GET_RET_VAL!!"
def enter_sender_mode(self):
global Tx_Rx
Tx_Rx = 0
cmd = OP_transceiver + EPL_SENDER_MODE
self.connection.sendCmd(cmd)
def enter_dumper_mode(self):
global Tx_Rx
Tx_Rx = 1
cmd = OP_transceiver + EPL_DUMPER_MODE
self.connection.sendCmd(cmd)
def show_config(self):
cmd = OP_transceiver + EPL_SHOW_CONFIG
self.connection.sendCmd(cmd)
def set_output_power(self, indata):
#send cmdrun
cmd = OP_transceiver + EPL_OUTPUT_POWER + indata
#print "output power=" + cmd
self.connection.sendCmd(cmd)
return self.get_ret_val()
#return self.get_ret_val()
def set_channel(self, indata):
cmd = OP_transceiver + EPL_CHANNEL + indata
self.connection.sendCmd(cmd)
return self.get_ret_val()
def set_datarate(self, indata):
cmd = OP_transceiver + EPL_DATARATE + indata
#print "datarate=" + cmd
self.connection.sendCmd(cmd)
return self.get_ret_val()
def set_addr_width(self, indata):
cmd = OP_transceiver + EPL_ADDR_WIDTH + indata
#print "address width=" + cmd
self.connection.sendCmd(cmd)
return self.get_ret_val()
def set_autoack(self, pipe_num, indata):
OP_AUTOACK = {
0: EPL_AUTOACK_P0,
1: EPL_AUTOACK_P1,
2: EPL_AUTOACK_P2,
3: EPL_AUTOACK_P3,
4: EPL_AUTOACK_P4,
5: EPL_AUTOACK_P5
}[pipe_num]
cmd = OP_transceiver + OP_AUTOACK + indata
#print "autoack=" + cmd
self.connection.sendCmd(cmd)
return self.get_ret_val()
# 20110221 celine #
def set_dynamic_payload(self, pipe_num, indata):
cmd = OP_transceiver + EPL_DYNAMIC_PD + str(pipe_num).zfill(2) + indata
self.connection.sendCmd(cmd)
return self.get_ret_val()
def set_data_length(self, pipe_num, indata):
OP_DATA_LENGTH = {
0: EPL_DATA_LENGTH_P0,
1: EPL_DATA_LENGTH_P1,
2: EPL_DATA_LENGTH_P2,
3: EPL_DATA_LENGTH_P3,
4: EPL_DATA_LENGTH_P4,
5: EPL_DATA_LENGTH_P5
}[pipe_num]
data_length[pipe_num] = int(("0x"+indata), 16)
cmd = OP_transceiver + OP_DATA_LENGTH + indata
#print "data length=" + cmd
self.connection.sendCmd(cmd)
return self.get_ret_val()
def set_crc_mode(self, indata):
cmd = OP_transceiver + EPL_CRC_MODE + indata
#print "CRC mode=" + cmd
self.connection.sendCmd(cmd)
return self.get_ret_val()
def set_dest_addr(self, pipe_num, indata):
indata = ''.join(indata)
OP_DEST_ADDR = {
0: EPL_RX_ADDR_P0,
1: EPL_RX_ADDR_P1,
2: EPL_RX_ADDR_P2,
3: EPL_RX_ADDR_P3,
4: EPL_RX_ADDR_P4,
5: EPL_RX_ADDR_P5
}[pipe_num]
cmd = OP_transceiver + OP_DEST_ADDR + str(indata).zfill(2)
self.connection.sendCmd(cmd)
return self.get_ret_val()
def set_usr_pload(self, usr_definedp, indata):
global usr_defined_ploads
usr_defined_ploads = indata
if usr_definedp == True:
cmd = OP_transceiver + EPL_USER_PLOAD + USRS_PLOAD + indata
else:
cmd = OP_transceiver + EPL_USER_PLOAD + AUTO_PLOAD
#print "usr pload=" + cmd
self.connection.sendCmd(cmd)
return self.get_ret_val()
#return self.get_ret_val()
def run_sender(self):
global KeyEvent_Stop
global data_length
global usr_defined_pload
global usr_defined_ploads
global usr_defined_ploads_len
pkt_count = 1
KeyEvent_Stop = False
# send Cmd to LU1
cmd = OP_transceiver + EPL_NEW_COUNTER
self.connection.sendCmd(cmd)
ret_val = self.connection.recvCmd(3)
print "\r\nRF sending procedure begin ..."
print "Press Ctrl + c to Stop.\r\n"
while KeyEvent_Stop == False:
# send Cmd to LU1
if usr_defined_pload == True:
cmd = OP_transceiver + EPL_RUN_SENDER + USRS_PLOAD
else:
cmd = OP_transceiver + EPL_RUN_SENDER + AUTO_PLOAD
self.connection.sendCmd(cmd)
# get return msg from LU1
ret_val = self.connection.recvCmd(4)
# print msg on console
print "Transmit " + str(pkt_count) + "'s packet."
print "Auto Retransmission Count: %d" % ord(ret_val[3])
if usr_defined_pload == True:
if usr_defined_ploads_len < 16:
print "0x0000: ",
for i in range (usr_defined_ploads_len):
print "%02X " % self.string2int(usr_defined_ploads[2*(i+1):2*(i+2)]),
else:
print "0x0000: ",
for i in range (16):
print "%02X " % self.string2int(usr_defined_ploads[2*(i+1):2*(i+2)]),
print "\r\n0x0010: ",
for i in range (usr_defined_ploads_len-16):
print "%02X " % self.string2int(usr_defined_ploads[(2*(i+1)+32):(2*(i+2)+32)]),
print "\r\nEnd Packet.\r\n"
else:
if data_length[0] < 16:
print "0x0000: ",
for i in range (data_length[0]):
if i == 0:
print "%02X " % (pkt_count%256),
else:
print "%02X " % (i+9),
else:
print "0x0000: ",
for i in range (16):
if i == 0:
print "%02X " % (pkt_count%256),
else:
print "%02X " % (i+9),
print "\r\n0x0010: ",
for i in range (data_length[0]-16):
print "%02X " % (i+9+16),
print "\r\nEnd Packet.\r\n"
if ret_val[0:3] != "ACK":
print "ERROR: Connection Failed"
break
pkt_count += 1
try:
time.sleep(0.5)
except:
pass
print "Stop RF sending process . "
print "Enter command line mode ... "
KeyEvent_Stop = False
def run_dumper(self):
global KeyEvent_Stop
#global data_length
pkt_count = 1
cmd = OP_transceiver + EPL_RUN_DUMPER + "00"
self.connection.sendCmd(cmd)
print "RF receving procedure begin ..."
print "Press Ctrl + c to Stop.\r\n"
while True:
# Press Ctrl + C will set KeyEvent_Stop to be true in KeyEvent_Handler.
if KeyEvent_Stop == True:
cmd = OP_transceiver + EPL_EXIT_DUMPER
self.connection.sendCmd(cmd)
KeyEvent_Stop = False
break
try:
ret_val = self.connection.recvCmd(34)
pipe_num = ord(ret_val[32])
data_length = ord(ret_val[33])
print "The %d\'st Packet from PIPE %d: (%d Bytes)" % (pkt_count, pipe_num, data_length)
c = 0
if data_length > 16:
print "0x0000: ",
while c < 16:
print "%02X " % ord(ret_val[c]),
c = c + 1
print ""
c = 0
print "0x0010: ",
while c < (data_length -16):
print "%02X " % ord(ret_val[c+16]),
c = c + 1
print ""
else:
print "0x0000: ",
while c < data_length:
print "%02X " % ord(ret_val[c]),
c = c + 1
print ""
print "End Packet!"
print ""
pkt_count += 1
except:
pass
print "Stop RF receiving process . "
print "Enter command line mode ... "
KeyEvent_Stop = False
def InitLU1RF(self):
self.set_output_power("03")
self.set_channel("64")
self.set_datarate("01")
self.set_addr_width("05")
self.set_autoack(0,ON)
self.set_dest_addr(0,["65","65","65","65","65"])
self.set_data_length(0,"20")
self.set_dynamic_payload(0,OFF)
self.set_crc_mode("03")
self.enter_dumper_mode()
def sendData(self, data):
self.data_flush(DATA_TX)
self.enter_sender_mode()
cmd = binascii.unhexlify(OP_transceiver + EPL_USER_PLOAD + USRS_PLOAD) + chr(32) + data
self.connection.sendData(cmd)
self.get_ret_val()
cmd = binascii.unhexlify(OP_transceiver + EPL_RUN_SENDER + USRS_PLOAD)
self.connection.sendData(cmd)
ret_val = self.get_ret_val()
def readData(self, Length):
data = self.connection.recvCmd(Length+2)
return data[:32]
'''
class key_event_detect(Thread):
def __init__(self):
Thread.__init__(self)
def run(self):
global KeyEvent_Stop
print "Keyboard Event Interrupt"
while True:
z = getch()
# escape key to exit
if ord(z) == 27:
print "Escape Event Interrupt"
KeyEvent_Stop = True
break
print z
#pass
''' |
from django.db import models
from datetime import datetime
# Create your models here.
# how would my modle look for this particlar task where i dont have to
#form
class Country(models.Model):
name= models.CharField(max_length=50)
def __str__(self):
return self.name
class Category(models.Model):
name= models.CharField(max_length=50)
def __str__(self):
return self.name
class Director(models.Model):
first_name = models.CharField(max_length=50)
last_name= models.CharField(max_length=50)
def __str__(self):
return self.first_name
class Film(models.Model):
title= models.CharField(max_length=50)
release_date = models.DateField(default=datetime.now)
created_in_country = models.ForeignKey(Country, null= True, on_delete= models.CASCADE, related_name= '%(class)s_country_creator')
available_in_countries = models.ManyToManyField(Country, related_name= '%(class)s_aviaible_country')
category = models.ManyToManyField(Category)
director = models.ManyToManyField(Director)
def __str__(self):
return self.title
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 12 20:08:03 2020
@author: hui94
"""
import tensorflow as tf
import numpy as np
import idx2numpy
import cv2
from PIL import Image
from tensorflow import keras
from tensorflow.python.ops import resources
from tensorflow.contrib.tensor_forest.python import tensor_forest
import time
data_file = 'C:/Users/hui94/Desktop/AI project/digit recognition/train-images.idx3-ubyte'
label_file = 'C:/Users/hui94/Desktop/AI project/digit recognition/train-labels.idx1-ubyte'
test_data_file = 'C:/Users/hui94/Desktop/AI project/digit recognition/t10k-images.idx3-ubyte'
test_label_file = 'C:/Users/hui94/Desktop/AI project/digit recognition/t10k-labels.idx1-ubyte'
data_arr = idx2numpy.convert_from_file(data_file)
label_arr = idx2numpy.convert_from_file(label_file)
test_arr = idx2numpy.convert_from_file(test_data_file)
test_label_arr = idx2numpy.convert_from_file(test_label_file)
# for i in range(5):
# cv2.imshow('Image',test_arr[i])
# cv2.waitKey(0)
# cv2.destroyAllWindows()
data_len, n, m = data_arr.shape
mean_ = data_arr.mean()
range_ = np.amax(data_arr) - np.amin(data_arr)
#Normalized Data
normal_arr = (data_arr - mean_)/range_
normal_arr = normal_arr / 255
#Reshape Data
reshape_arr = np.zeros((data_len, n*m))
for i in range(data_len):
reshape_arr[i] = np.reshape(normal_arr[i],(1,n*m))
normal_arr = reshape_arr
#Normalized Test Data
norm_test_arr = (test_arr - mean_)/range_
norm_test_arr = norm_test_arr / 255
#Reshape Test Data
test_len, n, m = norm_test_arr.shape
reshape_arr = np.zeros((test_len, n*m))
for i in range(test_len):
reshape_arr[i] = np.reshape(norm_test_arr[i],(1,n*m))
norm_test_arr = reshape_arr
X_train = normal_arr
y_train = label_arr
X_test = norm_test_arr
y_test = test_label_arr
# Parameters
num_steps = 100 # Total steps to train
num_classes = 10
num_features = n*m
num_trees = 100
max_nodes = 10000
tf.reset_default_graph()
# Input and Target placeholders
X = tf.placeholder(tf.float32, shape=[None, num_features])
Y = tf.placeholder(tf.int64, shape=[None])
# Random Forest Parameters
hparams = tensor_forest.ForestHParams(num_classes=num_classes, num_features=num_features, num_trees=num_trees, max_nodes=max_nodes).fill()
# Build the Random Forest
forest_graph = tensor_forest.RandomForestGraphs(hparams)
# Get training graph and loss
train_op = forest_graph.training_graph(X, Y)
loss_op = forest_graph.training_loss(X, Y)
# Measure the accuracy
infer_op, _, _ = forest_graph.inference_graph(X)
correct_prediction = tf.equal(tf.argmax(infer_op, 1), tf.cast(Y, tf.int64))
accuracy_op = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Initialize the variables (i.e. assign their default value) and forest resources
init_vars = tf.group(tf.global_variables_initializer(), resources.initialize_resources(resources.shared_resources()))
# Start TensorFlow session
sess = tf.Session()
# Run the initializer
sess.run(init_vars)
# Training
time0 = time.time()
for i in range(1, num_steps + 1):
_, l = sess.run([train_op, loss_op], feed_dict={X: X_train, Y: y_train})
if i % 1 == 0 or i == 1:
acc = sess.run(accuracy_op, feed_dict={X: X_train, Y: y_train})
print('Step %i, Loss: %f, Acc: %f' % (i, l, acc))
time1 = time.time()
train_time = time1-time0
print('Test Time:', train_time)
# Test Model
print("Test Accuracy:", sess.run(accuracy_op, feed_dict={X: X_test, Y: y_test}))
#%%
path = 'C:/Users/hui94/Desktop/AI project/digit recognition/'
blank_arr = np.zeros((150,250))
for i in range(20):
idx = np.random.randint(0,10000-1)
blank_arr[18:18+n,100:100+m] = test_arr[idx]
im = Image.fromarray(blank_arr)
im = im.convert("L")
im.save(path + 'temp.jpeg')
image = cv2.imread(path + 'temp.jpeg')
X_temp = [X_test[idx]]
#print('Model Prediction:', sess.run(tf.argmax(infer_op, 1), feed_dict={X: X_temp}), 'Actual:', [y_test[idx]])
predict_val = str(sess.run(tf.argmax(infer_op, 1), feed_dict={X: X_temp})[0])
actual_val = str(y_test[idx])
x,y,w,h = 0,50,100,75
cv2.putText(image, 'Model Prediction: ' + predict_val, (x + int(w/10),y + int(h/2)), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,255,0), 2)
cv2.putText(image, 'Actual Number: ' + actual_val, (x + int(w/10),y + int(h/2)+50), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,0,0), 2)
cv2.imshow('Image',image)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
import numpy as np
import cv2
import os
import random
import pandas as pd
class ImageDataLoader():
def __init__(self, data_path, gt_path, shuffle=False, gt_downsample=False, pre_load=False):
# small data, pre_load set to True is faster
self.data_path = data_path
self.gt_path = gt_path
self.gt_downsample = gt_downsample
self.pre_load = pre_load
self.data_files = [filename for filename in os.listdir(data_path) if os.path.isfile(os.path.join(data_path, filename))]
self.data_files.sort()
self.shuffle = shuffle
if shuffle:
random.seed(2468)
self.num_samples = len(self.data_files)
self.blob_list = {}
self.id_list = range(0, self.num_samples)
if self.pre_load:
print('Preloading data...')
id = 0
for fname in self.data_files:
img = cv2.imread(os.path.join(data_path, fname), 0)
img = img.astype(np.float32)
ht = img.shape[0]
wd = img.shape[1]
ht_1 = (ht/4)*4
wd_1 = (wd/4)*4
img = cv2.resize(img, (wd_1, ht_1))
img = img.reshape((1, img.shape[0], img.shape[1], 1))
den = pd.read_csv(os.path.join(self.gt_path, os.path.splitext(fname)[0] + '.csv'), sep=',', header=None).as_matrix()
den = den.astype(np.float32)
if self.gt_downsample:
wd_1 = wd_1/4
ht_1 = ht_1/4
den = cv2.resize(den, (wd_1, ht_1))
den = den * ((wd * ht)/(wd_1*ht_1))
else:
den = cv2.resize(den, (wd_1, ht_1))
den = den * ((wd * ht)/(wd_1*ht_1))
den = den.reshape((1, den.shape[0], den.shape[1], 1))
blob = {}
blob['data'] = img
blob['den'] = den
blob['fname'] = fname
self.blob_list[id] = blob
id += 1
if id % 100 == 0:
print('loaded [{}/{}]'.format(id, self.num_samples))
print('complete loading')
def __iter__(self):
if self.shuffle:
if self.pre_load:
random.shuffle(self.id_list)
else:
random.shuffle(self.data_files)
files = self.data_files
id_list = self.id_list
for id in id_list:
if self.pre_load:
blob = self.blob_list[id]
blob['id'] = id
else:
fname = files[id]
img = cv2.imread(os.path.join(self.data_path, fname), 0)
img = img.astype(np.float32)
ht = img.shape[0]
wd = img.shape[1]
ht_1 = (ht/4)*4
wd_1 = (wd/4)*4
img = cv2.resize(img, (wd_1, ht_1))
img = img.reshape((1, img.shape[0], img.shape[1], 1))
den = pd.read_csv(os.path.join(self.gt_path, os.path.splitext(fname)[0] + '.csv'), sep=',', header=None).as_matrix()
den = den.astype(np.float32)
if self.gt_downsample:
wd_1 = wd_1/4
ht_1 = ht_1/4
den = cv2.resize(den, (wd_1, ht_1))
den = den * ((wd * ht)/(wd_1*ht_1))
else:
den = cv2.resize(den, (wd_1, ht_1))
den = den * ((wd * ht)/(wd_1*ht_1))
den = den.reshape((1, den.shape[0], den.shape[1], 1))
blob = {}
blob['data'] = img
blob['den'] = den
blob['fname'] = fname
yield blob
def get_num_samples(self):
return self.num_samples |
def MAPE(pred, test):
pred, test = np.array(pred), np.array(test)
sum = 0
for i in range(7):
if test[i] != 0:
sum += abs((test[i] - pred[i]) / test[i])
return (sum / 7) * 100
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, Lasso, Ridge
Data = pd.read_excel("COVID-19-10-06.xlsx")
Country_List = Data['countriesAndTerritories'].unique()
Output = pd.DataFrame(columns = Country_List)
#for country in Country_List:
df = Data.loc[Data['countriesAndTerritories'] == 'Aruba']
data = df['cases'].values
data = data[::-1] #first => data[0], last => data[len(data)-1]
date = np.arange(1, len(data) + 1) #first => date[0], last => date[len(data)-1]
X, Y = date, data
X = X.reshape(-1, 1)
Y = Y.reshape(-1, 1)
period_length = 100
X_train = X[len(X) - period_length:len(X) - 7]
Y_train = Y[len(Y) - period_length:len(Y) - 7]
X_test = X[len(X) - 7:]
Y_test = Y[len(Y) - 7:]
X_train2, X_train3, X_train4, X_train5, X_train6, X_train7, X_train8, X_train9, X_train10 = np.power(X_train, 2), np.power(X_train, 3), np.power(X_train, 4), np.power(X_train, 5), np.power(X_train, 6), np.power(X_train, 7), np.power(X_train, 8), np.power(X_train, 9), np.power(X_train, 10)
X_test2, X_test3, X_test4, X_test5, X_test6, X_test7, X_test8, X_test9, X_test10 = np.power(X_test, 2), np.power(X_test, 3), np.power(X_test, 4), np.power(X_test, 5), np.power(X_test, 6), np.power(X_test, 7), np.power(X_test, 8), np.power(X_test, 9), np.power(X_test, 10)
X_train_set = np.column_stack((X_train, X_train2, X_train3, X_train4, X_train5, X_train6, X_train7, X_train8, X_train9, X_train10))
X_test_set = np.column_stack((X_test, X_test2, X_test3, X_test4, X_test5, X_test6, X_test7, X_test8, X_test9, X_test10))
# Linear
linearModel = LinearRegression()
linearModel.fit(X_train_set, Y_train)
#Lasso
LassoModel = Lasso()
LassoModel.fit(X_train_set, Y_train)
#Ridge
RidgeModel = Ridge()
RidgeModel.fit(X_train_set, Y_train)
pred = linearModel.predict(X_train_set)
for x in np.nditer(pred, op_flags=['readwrite']): #修正
x[...] = int(x)
if x < 0:
x[...] = 0
print(Y_test)
print(pred)
print('mape = ', MAPE(pred, Y_train))
plt.scatter(X_train, Y_train, color='red')
#plt.scatter(X_test,Y_test, color='blue')
plt.scatter(X_train,pred, color='green')
plt.title('Cases Vs Time(Linear)', fontsize=14)
plt.xlabel('Time', fontsize=14)
plt.ylabel('Cases', fontsize=14)
plt.grid(True)
plt.show() |
#encoding:utf-8
#!/usr/bin/python
import MySQLdb
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
def mysql(f):
def _deco(*args):
conn = MySQLdb.connect(host = 'localhost', user = 'lfs', passwd = 'lfs653', db = 'todoDB', port = 3306, charset = "utf8")
cur = conn.cursor()
kwargs = {
'cur':cur
}
ret = f(*args, **kwargs)
cur.close()
conn.commit()
conn.close()
return ret
return _deco
@mysql
def scan(table, cur = None):
# count = cur.execute('select * from %s', table)
count = cur.execute('select * from %s' % table)
info = cur.fetchmany(count)
return info
@mysql
def update(table, newValue, condition, cur = None):
cur.execute('update %s set %s where %s' % (table, newValue, condition))
return
@mysql
def insert(table, itemValue, cur = None):
cur.execute('insert into %s value(%s)' % (table, itemValue))
count = cur.execute('select * from %s' % table)
info = cur.fetchmany(count)
return info
@mysql
def delete(table, condition, cur = None):
cur.execute('delete from %s where %s' % (table, condition))
return
@mysql
def insertField(table, newField, cur = None):
cur.execute('alter table %s add %s' % (table, newField))
return
@mysql
def deleteField(table, oldField, cur = None):
cur.execute('alter table %s drop %s' % (table, oldField))
return
@mysql
def rename(oldName, newName, cur = None):
cur.execute('rename %s to %s' % (oldName, newName))
return
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.