text stringlengths 8 6.05M |
|---|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from textwrap import dedent
import pytest
from pants.backend.codegen.thrift.apache.python import additional_fields
from pants.backend.codegen.thrift.apache.python.rules import (
ApacheThriftPythonDependenciesInferenceFieldSet,
GeneratePythonFromThriftRequest,
InferApacheThriftPythonDependencies,
)
from pants.backend.codegen.thrift.apache.python.rules import rules as apache_thrift_python_rules
from pants.backend.codegen.thrift.apache.rules import rules as apache_thrift_rules
from pants.backend.codegen.thrift.rules import rules as thrift_rules
from pants.backend.codegen.thrift.target_types import (
ThriftSourceField,
ThriftSourcesGeneratorTarget,
)
from pants.backend.codegen.utils import (
AmbiguousPythonCodegenRuntimeLibrary,
MissingPythonCodegenRuntimeLibrary,
)
from pants.backend.python.dependency_inference import module_mapper
from pants.backend.python.target_types import PythonRequirementTarget
from pants.build_graph.address import Address
from pants.core.util_rules import source_files, stripped_source_files
from pants.engine.internals import graph
from pants.engine.rules import QueryRule
from pants.engine.target import (
GeneratedSources,
HydratedSources,
HydrateSourcesRequest,
InferredDependencies,
)
from pants.source import source_root
from pants.testutil.rule_runner import RuleRunner, engine_error
from pants.testutil.skip_utils import requires_thrift
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
*thrift_rules(),
*apache_thrift_rules(),
*apache_thrift_python_rules(),
*source_files.rules(),
*source_root.rules(),
*graph.rules(),
*stripped_source_files.rules(),
*module_mapper.rules(),
*additional_fields.rules(),
QueryRule(HydratedSources, [HydrateSourcesRequest]),
QueryRule(GeneratedSources, [GeneratePythonFromThriftRequest]),
],
target_types=[ThriftSourcesGeneratorTarget, PythonRequirementTarget],
)
def assert_files_generated(
rule_runner: RuleRunner,
address: Address,
*,
expected_files: list[str],
source_roots: list[str],
extra_args: list[str] | None = None,
) -> None:
args = [
f"--source-root-patterns={repr(source_roots)}",
"--no-python-thrift-infer-runtime-dependency",
*(extra_args or ()),
]
rule_runner.set_options(args, env_inherit={"PATH", "PYENV_ROOT", "HOME"})
tgt = rule_runner.get_target(address)
thrift_sources = rule_runner.request(
HydratedSources, [HydrateSourcesRequest(tgt[ThriftSourceField])]
)
generated_sources = rule_runner.request(
GeneratedSources,
[GeneratePythonFromThriftRequest(thrift_sources.snapshot, tgt)],
)
assert set(generated_sources.snapshot.files) == set(expected_files)
@requires_thrift
def test_generates_python(rule_runner: RuleRunner) -> None:
# This tests a few things:
# * We generate the correct file names, keeping into account `namespace`. Note that if
# `namespace` is not set, then Thrift will drop all parent directories, all we do is
# restore the source root.
# * Thrift files can import other thrift files, and those can import others
# (transitive dependencies). We'll only generate the requested target, though.
# * We can handle multiple source roots, which need to be preserved in the final output.
rule_runner.write_files(
{
"src/thrift/dir1/f.thrift": "",
"src/thrift/dir1/BUILD": "thrift_sources()",
"src/thrift/dir2/f.thrift": dedent(
"""\
include "dir1/f.thrift"
namespace py custom_namespace.module
"""
),
"src/thrift/dir2/BUILD": "thrift_sources(dependencies=['src/thrift/dir1'])",
# Test another source root.
"tests/thrift/test_thrifts/f.thrift": 'include "dir2/f.thrift"',
"tests/thrift/test_thrifts/BUILD": "thrift_sources(dependencies=['src/thrift/dir2'])",
}
)
def assert_gen(addr: Address, expected: list[str]) -> None:
assert_files_generated(
rule_runner,
addr,
source_roots=["/src/thrift", "/tests/thrift"],
expected_files=expected,
)
assert_gen(
Address("src/thrift/dir1", relative_file_path="f.thrift"),
[
"src/thrift/__init__.py",
"src/thrift/f/__init__.py",
"src/thrift/f/constants.py",
"src/thrift/f/ttypes.py",
],
)
assert_gen(
Address("src/thrift/dir2", relative_file_path="f.thrift"),
[
"src/thrift/__init__.py",
"src/thrift/custom_namespace/__init__.py",
"src/thrift/custom_namespace/module/__init__.py",
"src/thrift/custom_namespace/module/constants.py",
"src/thrift/custom_namespace/module/ttypes.py",
],
)
assert_gen(
Address("tests/thrift/test_thrifts", relative_file_path="f.thrift"),
[
"tests/thrift/__init__.py",
"tests/thrift/f/__init__.py",
"tests/thrift/f/constants.py",
"tests/thrift/f/ttypes.py",
],
)
@requires_thrift
def test_top_level_source_root(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"codegen/dir/f.thrift": "",
"codegen/dir/f2.thrift": "namespace py custom_namespace.module",
"codegen/dir/BUILD": "thrift_sources()",
}
)
assert_files_generated(
rule_runner,
Address("codegen/dir", relative_file_path="f.thrift"),
source_roots=["/"],
expected_files=[
"__init__.py",
"f/__init__.py",
"f/constants.py",
"f/ttypes.py",
],
)
assert_files_generated(
rule_runner,
Address("codegen/dir", relative_file_path="f2.thrift"),
source_roots=["/"],
expected_files=[
"__init__.py",
"custom_namespace/__init__.py",
"custom_namespace/module/__init__.py",
"custom_namespace/module/constants.py",
"custom_namespace/module/ttypes.py",
],
)
def test_find_thrift_python_requirement(rule_runner: RuleRunner) -> None:
rule_runner.write_files({"codegen/dir/f.thrift": "", "codegen/dir/BUILD": "thrift_sources()"})
rule_runner.set_options(
["--python-resolves={'python-default': '', 'another': ''}", "--python-enable-resolves"]
)
thrift_tgt = rule_runner.get_target(Address("codegen/dir", relative_file_path="f.thrift"))
request = InferApacheThriftPythonDependencies(
ApacheThriftPythonDependenciesInferenceFieldSet.create(thrift_tgt)
)
# Start with no relevant requirements.
with engine_error(MissingPythonCodegenRuntimeLibrary):
rule_runner.request(InferredDependencies, [request])
# If exactly one, match it.
rule_runner.write_files({"reqs1/BUILD": "python_requirement(requirements=['thrift'])"})
assert rule_runner.request(InferredDependencies, [request]) == InferredDependencies(
[Address("reqs1")]
)
# Multiple is fine if from other resolve.
rule_runner.write_files(
{"another_resolve/BUILD": "python_requirement(requirements=['thrift'], resolve='another')"}
)
assert rule_runner.request(InferredDependencies, [request]) == InferredDependencies(
[Address("reqs1")]
)
# If multiple from the same resolve, error.
rule_runner.write_files({"reqs2/BUILD": "python_requirement(requirements=['thrift'])"})
with engine_error(
AmbiguousPythonCodegenRuntimeLibrary, contains="['reqs1:reqs1', 'reqs2:reqs2']"
):
rule_runner.request(InferredDependencies, [request])
|
import requests
from bs4 import BeautifulSoup
import pandas as pd
link = "http://www.ipeen.com.tw/search/taipei/000/1-0-0-0/?baragain=1&so=sat"
NextPage = "http://www.ipeen.com.tw"
count = 1
alldata = []
tmplink = []
def SplitStr(InputStr): #宣告副程式
city = "" #用來存取區域的變數名稱宣告成string的型態
citybool = False #用來略過主要城市存取區域的布林
for x in InputStr:
if citybool == True: #如果可以開始讀取區域的字串
city+=x
if x=="縣"or x=="市": #已經讀到"市"或"縣" 就可以開始存區域的名稱
citybool = True
if x=="區": #讀到區的時候跳出
break;
return city
def toIntger(inputNum): #將list的型態變成字串 應該有更簡潔的方法
tmp =""
for x in inputNum:
tmp +=x
if tmp.isdigit():
return int(tmp)
else:
return tmp
#================================================================
for x in range(1,int(input("請輸入要讀取得頁數 : "))+1):
res = requests.get(link+"&p="+str(x)) #用來翻頁
soup = BeautifulSoup(res.text,"html.parser") #用 BeautifulSoup 去接
clean = soup.select(".serItem")
for item in clean:
shop = item.select('.a37.ga_tracking')[0].text.strip()
tmp = filter(str.isdigit, item.select('.costEmpty')[0].text.strip().split()[1]) #使用filter函式取出數字
price = toIntger(tmp) #將字串變成int型態
tmp = filter(str.isdigit, item.select('.score')[0].text.strip()) #使用filter函式取出數字
score = toIntger(tmp) #將字串變成int型態
category = item.select('.cate')[0].text.strip().split("/")[0].split("\xa0")[0]
address = item.select('.basic')[0].text.strip().split(":")[2].split("\t")[0].strip("\n")
fulladdress = address
address = SplitStr(address)
if price != 0 and score!=0:
print('========[',count,']========')
alldata.append([shop,price,score,category,address,fulladdress]) #取出詳細資料的網址
tmplink.append(NextPage+item.find('a')['href'])
count += 1
print('========[第一次結束]========')
count=1
for x in range(len(tmplink)): #詳細資料的處理
print('========[',count,']========')
soup = BeautifulSoup(requests.get(tmplink[x]).text,"html.parser")
for item in soup.select(".scalar"):
tmp = filter(str.isdigit,item.select('em')[1].text.strip())
tmp = int(toIntger(tmp))
alldata[x].append(tmp)
break;
count+=1
shop = [x[0] for x in alldata]
click = [x[6] for x in alldata]
score = [x[2] for x in alldata]
price = [x[1] for x in alldata]
address = [x[4] for x in alldata]
fulladdress=[x[5] for x in alldata]
category = [x[3] for x in alldata]
select = {'店名':shop,'區域名稱':address,'類型':category,'平均消費':price,'評論人數':score,'點擊次數':click,'地址':fulladdress}
cost_and_click=pd.DataFrame(select)
writer = pd.ExcelWriter('dataFood.xlsx')
cost_and_click.to_excel(writer,'愛評網')
writer.save()
|
"""``pytest`` fixtures."""
import pytest
from tinyflow import __license__
from tinyflow import _testing
@pytest.fixture(scope='module')
def wordcount_input():
return __license__.splitlines()
@pytest.fixture(scope='module')
def wordcount_top5():
return {'the': 13, 'of': 12, 'or': 11, 'and': 8, 'in': 6}
@pytest.fixture(scope='module')
def add2():
"""Returns a function behaving like ``lambda a, b: a + b`` to get around
``pickle's`` limitations.
"""
return _testing.add2
@pytest.fixture(scope='module')
def add4():
"""Same as ``add2()`` but with 4 arguments."""
return _testing.add4
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-05-31 23:34
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='post',
name='header_image_path',
field=models.FilePathField(default='zilla-bg.png', path='blog/img'),
),
migrations.AddField(
model_name='post',
name='text',
field=models.TextField(default='Blog post goes here :)'),
),
]
|
import pytest
import allure
@allure.title("Запрос всех доступных ресурсов")
@pytest.mark.xfail(reason="Как пример падающего теста", strict=True)
def test_get_all_resources(session, base_url):
response = session.get(url=f'{base_url}')
assert response.status_code == 200, f"Неверный код ответа, получен {response.status_code}"
assert len(response.json()) == 7, "Количество ресурсов != 7"
@allure.title("Запрос существующего в базе фильма")
@pytest.mark.parametrize("number", [1, 6])
def test_get_real_film(session, base_url, number):
response = session.get(url=f"{base_url}/films/{number}")
assert response.status_code == 200, f"Неверный код ответа, получен {response.status_code}"
assert response.json()["url"] == f"{base_url}/films/{number}/", "Неверный url"
@allure.title("Запрос персонажа Luke Skywalker")
@pytest.mark.skip(reason="Как пример пропуска теста")
def test_get_people(session, base_url):
response = session.get(url=f"{base_url}/people/1")
assert response.status_code == 200, f"Неверный код ответа, получен {response.status_code}"
assert response.json()["name"] == "Luke Skywalker", f'Неверный персонаж, получен {response.json()["name"]}'
@allure.title("Запрос несуществующего в базе фильма")
@pytest.mark.parametrize("number", [-1, 0, 7])
def test_get_no_real_film(session, base_url, number):
response = session.get(url=f"{base_url}/films/{number}")
assert response.status_code == 404, f"Неверный код ответа, получен {response.status_code}"
|
from __future__ import print_function
import shutil
import os
import glob
import cv2
import numpy as np
#path = '../data/img/training/'
#output_path = '../data/img/training/'
path = '../data/mask/95_masks_ori/'
output_path = '../data/mask/95_masks/'
for img_name in glob.glob(path + '/*.png'):
pure_name = img_name.split('/')[-1]
pure_name = pure_name.split('.')[0]
print(pure_name)
number = int(pure_name[-2:])
pure_name = pure_name[:-3]
pure_name = pure_name + '-{}'.format(number)
im_gray = cv2.imread(img_name, cv2.IMREAD_GRAYSCALE)
indices = np.where(im_gray > 100)
im_gray[:,:] = 0
im_gray[indices] = 255
im_gray = cv2.resize(im_gray, (512, 512))
cv2.imwrite('{}/{}.png'.format(output_path, pure_name), im_gray)
|
from mysql.connector.errors import Error
from flask import Blueprint, flash, g
from flask_restful import Api, Resource, reqparse, fields, marshal_with
from homework.db import get_db
# 下面为department的api的实现
parser_departmentItem = reqparse.RequestParser()
parser_departmentItem.add_argument('departName', required=True,
type=str, help="departName not provide.")
parser_departmentItem.add_argument('departOffice', required=True,
type=str, help="departOffice not provide.")
parser_departmentItem.add_argument('dormitoryNo', required=True,
type=str, help="dormitoryNo not provide.")
class departmentItem(Resource):
def checkIfExist(self, departNo):
cur = get_db().cur
cur.execute("SELECT * FROM Department WHERE departNo='%s'" % departNo)
if(len(cur.fetchall()) < 1):
return False
else:
return True
def get(self, departNo):
cur = get_db().cur
cur.execute(
"SELECT departNo,departName,departOffice,departNum,dormitoryNo FROM Department WHERE departNo='%s'" % departNo)
items = cur.fetchone()
if not items:
return {'errCode': -1, 'status': '请求条目不存在'}
else:
return {'errCode': 0, 'status': 'OK', 'data': {'departNo': items[0], 'departName': items[1], 'departOffice': items[2], 'departNum': items[3], 'dormitoryNo': items[4]}}
def put(self, departNo):
db = get_db()
cur = get_db().cur
args = parser_departmentItem.parse_args()
if not self.checkIfExist(departNo):
return {'errCode': -1, 'status': '操作的系不存在'}
try:
cur.execute("UPDATE Department SET departName='%s',departOffice = '%s',dormitoryNo = '%s' WHERE departNo='%s';" % (
args['departName'], args['departOffice'], args['dormitoryNo'], departNo))
db.commit()
except Error as e:
return {'errCode': -1, 'status': str(e)}
return {'errCode': 0, 'status': 'OK'}, 200
def delete(self, departNo): # 删除
if not self.checkIfExist(departNo):
return {'errCode': -1, 'status': '操作的系不存在'}
db = get_db()
cur = get_db().cur
try:
cur.execute(
"DELETE FROM Department WHERE departNo='%s';" % departNo)
db.commit()
except Error as e:
return {'errCode': -1, 'status': str(e)}
return {'errCode': 0, 'status': 'OK'}, 200
parser_department = parser_departmentItem.copy()
class department(Resource):
def get(self):
cur = get_db().cur
cur.execute(
"SELECT departNo,departName,departOffice,departNum,dormitoryNo FROM Department;")
res = {'errCode': 0, 'status': 'OK', 'data': [
{'departNo': item[0], 'departName': item[1], 'departOffice': item[2], 'departNum': item[3], 'dormitoryNo':item[4]} for item in cur.fetchall()]}
return res
def post(self):
args = parser_department.parse_args()
db = get_db()
cur = get_db().cur
try:
cur.execute("INSERT INTO Department(departName,departOffice,dormitoryNo) VALUES('%s', '%s', '%s');" % (
args['departName'], args['departOffice'], args['dormitoryNo']))
db.commit()
except Error as e:
return {'errCode': -1, 'status': str(e)}
return {'errCode': 0, 'status': 'OK'}, 200
|
#!/usr/bin/env python3.6
# -*- coding: iso-8859-15 -*-
import pygame
from pygame.locals import *
from OpenGL.GL import *
#from OpenGL.GLUT import *
from OpenGL.GLU import *
import numpy as np
BLACK = (0.0, 0.0, 0.0)
WHITE = (1.0, 1.0, 1.0)
MAJOR_BLUE = (0.290198, 0.627456, 0.729418)
MINOR_BLUE = (0.078432, 0.203923, 0.207845)
MAJOR_GRAY = (0.388238, 0.388238, 0.388238)
MINOR_GRAY = (0.12157, 0.12157, 0.12157)
RED = (1.0, 0.0, 0.0)
LIGH_RED = (1.0, 0.454906, 0.39216)
LIGH_GREEN = (0.549024, 0.737261, 0.470592)
ORANGE = (0.843144, 0.572554, 0.305885)
def scale(sx=1.0, sy=1.0, sz=1.0):
return np.array([[sx, 0, 0],
[0, sy, 0],
[0, 0, sz]])
def rotate(tx=0.0, ty=0.0, tz=0.0):
cx = np.cos(tx)
sx = np.sin(tx)
cy = np.cos(ty)
sy = np.sin(ty)
cz = np.cos(tz)
sz = np.sin(tz)
Rx = np.array([[1,0,0],
[0,cx,-sx],
[0,sx,cx]])
Ry = np.array([[cy,0,sy],
[0,1,0],
[-sy,0,cy]])
Rz = np.array([[cz,-sz,0],
[sz,cz,0],
[0,0,1]])
return Rx.dot(Ry).dot(Rz)
class Grid:
def __init__(self, xs, ys, zs,
linecolor=WHITE, pointcolor=RED,
linewidth=1):
# Define edge points for grid lines
# x-axis
self.xpoints = np.empty((1,3))
yzpoints = np.mgrid[ys[0]:ys[1]+ys[2]:ys[2],
zs[0]:zs[1]+zs[2]:zs[2]].reshape(2,-1).T
for yzpoint in yzpoints:
y = yzpoint[0]
z = yzpoint[1]
xp0 = np.array([xs[0], y, z])
xp1 = np.array([xs[1], y, z])
self.xpoints = np.vstack((self.xpoints, xp0))
self.xpoints = np.vstack((self.xpoints, xp1))
self.xpoints = self.xpoints[1:]
# y-axis
self.ypoints = np.empty((1,3))
xzpoints = np.mgrid[xs[0]:xs[1]+xs[2]:xs[2],
zs[0]:zs[1]+zs[2]:zs[2]].reshape(2,-1).T
for xzpoint in xzpoints:
x = xzpoint[0]
z = xzpoint[1]
yp0 = np.array([x, ys[0], z])
yp1 = np.array([x, ys[1], z])
self.ypoints = np.vstack((self.ypoints, yp0))
self.ypoints = np.vstack((self.ypoints, yp1))
self.ypoints = self.ypoints[1:]
# z-axis
self.zpoints = np.empty((1,3))
xypoints = np.mgrid[xs[0]:xs[1]+xs[2]:xs[2],
ys[0]:ys[1]+ys[2]:ys[2]].reshape(2,-1).T
for xypoint in xypoints:
x = xypoint[0]
y = xypoint[1]
zp0 = np.array([x, y, zs[0]])
zp1 = np.array([x, y, zs[1]])
self.zpoints = np.vstack((self.zpoints, zp0))
self.zpoints = np.vstack((self.zpoints, zp1))
self.zpoints = self.zpoints[1:]
# Color
self.linecolor = linecolor
self.pointcolor = pointcolor
self.linewidth = linewidth
def draw(self):
r, g, b = self.linecolor
#glLineWidth(self.linewidth)
glColor3f(r, g, b)
for xp0, xp1 in zip(self.xpoints[0::2], self.xpoints[1::2]):
glVertex3fv(xp0)
glVertex3fv(xp1)
for yp0, yp1 in zip(self.ypoints[0::2], self.ypoints[1::2]):
glVertex3fv(yp0)
glVertex3fv(yp1)
for zp0, zp1 in zip(self.zpoints[0::2], self.zpoints[1::2]):
glVertex3fv(zp0)
glVertex3fv(zp1)
def transform(self, T):
for axis in [self.xpoints, self.ypoints, self.zpoints]:
for i, point in enumerate(axis):
axis[i] = np.dot(T, point)
def main():
pygame.init()
width, height = 1000, 1000
display = (width, height)
pygame.display.set_mode(display, DOUBLEBUF | OPENGL)
gluPerspective(45, (width/height), 0, 150.0)
glTranslatef(0, 0, -50)
main_grid_major = Grid((-10, 10, 4), (-10, 10, 4), (-10, 10, 4),
linecolor=MAJOR_BLUE)
T1 = rotate(0.01, 0.001, -0.01)
T2 = np.random.uniform(-1, 1, (3,3)) * 1E-1
T = T2 + np.identity(3)
run = True
while run:
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
run = False
if event.key == pygame.K_LEFT:
#glTranslatef(-1, 0, 0)
glRotatef(1, 0, 1, 0)
if event.key == pygame.K_RIGHT:
#glTranslatef(1, 0, 0)
glRotatef(1, 0, -1, 0)
if event.key == pygame.K_UP:
glTranslatef(0, 1, 0)
if event.key == pygame.K_DOWN:
glTranslatef(0, -1, 0)
if event.key == pygame.K_r:
main_grid_major = Grid((-10, 10, 4), (-10, 10, 4), (-10, 10, 4),
linecolor=MAJOR_BLUE)
T1 = rotate(0.01, 0.001, -0.01)
T2 = np.random.uniform(-1, 1, (3,3)) * 1E-3
T = T2 + np.identity(3)
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 4:
glTranslatef(0, 0, 1.0)
if event.button == 5:
glTranslatef(0, 0, -1.0)
# Do stuff
main_grid_major.transform(T)
# Draw stuff
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glBegin(GL_LINES)
main_grid_major.draw()
glEnd()
pygame.display.flip()
pygame.time.wait(10)
print('Exiting')
pygame.quit()
if __name__ == '__main__':
main()
print('Goodbye!')
|
from __future__ import unicode_literals, print_function
from django.db import models
from django.contrib.auth.models import User
from django.contrib.contenttypes.generic import GenericRelation
from django.conf import settings
from cjklib.characterlookup import CharacterLookup
from hitcount.models import HitCount
from .managers import NovelManager
IMAGE_UPLOAD_DIR = settings.IMAGE_UPLOAD_DIR
def get_cat_code(s):
char = unicode(s)[0]
cjk = CharacterLookup("C")
readings = cjk.getReadingForCharacter(char, "Pinyin")
if not readings:
# Not Chinese, just use first character as code
return char.upper()
# It's very hard to determine which reading is correct for our case,
# so don't bother to check it, just use the first one and let users to fix
# it if it is incorrect
reading = readings[0]
# We use the first letter as code
return reading[0].upper()
class Novel(models.Model):
name = models.CharField(max_length=512)
cat_code = models.CharField(max_length=5)
description = models.TextField()
author = models.CharField(max_length=50)
publisher = models.CharField(max_length=50)
image = models.ImageField(upload_to=IMAGE_UPLOAD_DIR, blank=True)
rating_points = models.IntegerField(default=0)
rating_count = models.IntegerField(default=0)
updated_date = models.DateTimeField(auto_now=True)
objects = NovelManager()
class Meta:
get_latest_by = "updated_date"
ordering = ["-updated_date", "-pk"]
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
if not self.cat_code:
self.cat_code = get_cat_code(self.name)
super(Novel, self).save(*args, **kwargs)
class Volume(models.Model):
name = models.CharField(max_length=512)
novel = models.ForeignKey(Novel)
description = models.TextField(blank=True)
image = models.ImageField(upload_to=IMAGE_UPLOAD_DIR, blank=True)
rating_points = models.IntegerField(default=0)
rating_count = models.IntegerField(default=0)
class Meta:
order_with_respect_to = "novel"
def __unicode__(self):
return self.name
class Chapter(models.Model):
name = models.CharField(max_length=512)
volume = models.ForeignKey(Volume)
rating_points = models.IntegerField(default=0)
rating_count = models.IntegerField(default=0)
updated_date = models.DateTimeField(auto_now_add=True)
posted_by = models.ForeignKey(User)
hitcount_object = GenericRelation(HitCount, object_id_field="object_pk")
def get_content(self):
try:
return self.content_record.content
except ChapterContent.DoesNotExist:
return ""
def set_content(self, content):
content_record = None
try:
content_record = self.content_record
except ChapterContent.DoesNotExist:
pass
if not content_record:
content_record = ChapterContent(chapter=self)
self.content_record = content_record
content_record.content = content
self._content_dirty = True
content = property(get_content, set_content)
@property
def hitcount_object_safe(self):
if not hasattr(self, "_hitcount_object_safe"):
self._hitcount_object_safe = HitCount.objects.get_for_object(self)
return self._hitcount_object_safe
def get_hit_count(self):
return self.hitcount_object_safe.hits
def set_hit_count(self, hits):
self.hitcount_object_safe.hits = hits
self.hitcount_object_safe.save()
hit_count = property(get_hit_count, set_hit_count)
class Meta:
order_with_respect_to = "volume"
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
super(Chapter, self).save(*args, **kwargs)
if self._content_dirty:
# Ensure content_record.chapter_id is set
self.content_record.chapter = self
self.content_record.save()
self._content_dirty = False
self.volume.novel.save()
# Ensure HitCount object is created. Fix #22
self.get_hit_count()
def __init__(self, *args, **kwargs):
super(Chapter, self).__init__(*args, **kwargs)
self._content_dirty = False
class ChapterContent(models.Model):
chapter = models.OneToOneField(Chapter, related_name="content_record")
content = models.TextField()
|
from test.tts.mytts import gTTS
def test():
tts = gTTS("罗大姐说,她弟弟在买奔驰之前,就跟她提起过一个女朋友,按弟弟的描述,那就是一个典型的白富美,但弟弟从来没带对方来见过面",lang='zh')
tts.save("E://temp/tts/gtts.mp3")
# tts.save("/home/recsys/hzwangjian1/data/test_gtts91.mp3") |
#!/usr/bin/env python
def laceStrings(s1,s2):
if len(s1) > len(s2):
maxlen = len(s1)
else:
maxlen = len(s2)
res = ''
for i in range(maxlen):
if i < len(s1):
res += s1[i]
if i < len(s2):
res += s2[i]
return res
print laceStrings('','')
print laceStrings('12','ab')
print laceStrings('1','ab')
print laceStrings('12','a')
print laceStrings('','a')
print laceStrings('1','')
|
#!/usr/bin/env python3
#
# Copyright (c) 2016, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
# Updates the list of Observatory source files.
import os
import sys
from datetime import date
def getDir(rootdir, target):
sources = []
for root, subdirs, files in os.walk(rootdir):
subdirs.sort()
files.sort()
for f in files:
sources.append(root + '/' + f)
return sources
HEADER = """# Copyright (c) 2017, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
# DO NOT EDIT. This file is generated by update_sources.py in this directory.
# This file contains all dart, css, and html sources for Observatory.
"""
def main():
with open('observatory_sources.gni', 'w') as target:
target.write(HEADER)
target.write('observatory_sources = [\n')
sources = []
for rootdir in ['lib', 'web']:
sources.extend(getDir(rootdir, target))
sources.sort()
for s in sources:
if (s[-9:] != 'README.md'):
target.write(' "' + s + '",\n')
target.write(']\n')
if __name__ == "__main__":
main()
|
# Copyright 2014 Google.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""VP9 codec definitions.
This is an instance of a codec definition.
It tells the generic codec the following:
- Name of codec = directory of codec database
- File extension
- Options table
"""
import encoder
import file_codec
class Vp9Codec(file_codec.FileCodec):
def __init__(self, name='vp9'):
super(Vp9Codec, self).__init__(name)
self.extension = 'webm'
self.option_set = encoder.OptionSet(
encoder.IntegerOption('cpu-used', 0, 16),
# The "best" option gives encodes that are too slow to be useful.
encoder.ChoiceOption(['good', 'rt']).Mandatory(),
encoder.IntegerOption('passes', 1, 2),
)
def StartEncoder(self, context):
return encoder.Encoder(context, encoder.OptionValueSet(self.option_set,
'--passes=1 --good --noise-sensitivity=0 --cpu-used=5'))
def EncodeCommandLine(self, parameters, bitrate, videofile, encodedfile):
commandline = (encoder.Tool('vpxenc') + ' ' + parameters.ToString()
+ ' --target-bitrate=' + str(bitrate)
+ ' --fps=' + str(videofile.framerate) + '/1'
+ ' -w ' + str(videofile.width)
+ ' -h ' + str(videofile.height)
+ ' ' + videofile.filename
+ ' --codec=vp9 '
+ ' -o ' + encodedfile)
return commandline
def DecodeCommandLine(self, videofile, encodedfile, yuvfile):
commandline = '%s %s --i420 -o %s' % (encoder.Tool("vpxdec"),
encodedfile, yuvfile)
return commandline
def ResultData(self, encodedfile):
more_results = {}
more_results['frame'] = file_codec.MatroskaFrameInfo(encodedfile)
return more_results
|
with open("artifacts01.txt","w+") as f:
f.write("text in stage01.py") |
""" Transform AWS Transcribe json files to docx, csv, sqlite and vtt. """
from docx import Document
from docx.shared import Cm, Mm, Inches, RGBColor
from docx.enum.text import WD_ALIGN_PARAGRAPH
import json, datetime
import matplotlib.pyplot as plt
import statistics
from pathlib import Path
from time import perf_counter
import pandas
import sqlite3
import webvtt
import logging
def convert_time_stamp(timestamp: str) -> str:
""" Function to help convert timestamps from s to H:M:S """
delta = datetime.timedelta(seconds=float(timestamp))
seconds = delta - datetime.timedelta(microseconds=delta.microseconds)
return str(seconds)
def load_json_as_dict(filepath: str) -> dict:
"""Load in JSON file and return as dict"""
logging.info("Loading json")
json_filepath = Path(filepath)
assert json_filepath.is_file(), "JSON file does not exist"
data = json.load(open(json_filepath.absolute(), "r", encoding="utf-8"))
assert "jobName" in data
assert "results" in data
assert "status" in data
assert data["status"] == "COMPLETED", "JSON file not shown as completed."
logging.debug("json checks psased")
return data
def calculate_confidence_statistics(data: dict) -> dict:
"""Confidence Statistics"""
logging.info("Gathering confidence statistics")
# Stats dictionary
stats = {
"timestamps": [],
"accuracy": [],
"9.8": 0,
"9": 0,
"8": 0,
"7": 0,
"6": 0,
"5": 0,
"4": 0,
"3": 0,
"2": 0,
"1": 0,
"0": 0,
"total": len(data["results"]["items"]),
}
# Confidence count
for item in data["results"]["items"]:
if item["type"] == "pronunciation":
stats["timestamps"].append(float(item["start_time"]))
confidence_decimal = float(item["alternatives"][0]["confidence"])
confidence_integer = int(confidence_decimal * 100)
stats["accuracy"].append(confidence_integer)
if confidence_decimal >= 0.98:
stats["9.8"] += 1
else:
rough_confidence = str(int(confidence_decimal * 10))
stats[rough_confidence] += 1
return stats
def make_graph_png(stats: dict, directory: str) -> str:
"""Make scatter graph from confidence statistics"""
logging.info("Making graph")
# Confidence of each word as scatter graph
plt.scatter(stats["timestamps"], stats["accuracy"])
# Mean average as line across graph
plt.plot(
[stats["timestamps"][0], stats["timestamps"][-1]],
[statistics.mean(stats["accuracy"]), statistics.mean(stats["accuracy"])],
"r",
)
# Formatting
plt.xlabel("Time (seconds)")
plt.ylabel("Accuracy (percent)")
plt.yticks(range(0, 101, 10))
plt.title("Accuracy during transcript")
plt.legend(["Accuracy average (mean)", "Individual words"], loc="lower center")
# Target filename, including directory for explicit path
filename = Path(directory) / Path("chart.png")
plt.savefig(str(filename))
logging.info("Graph saved to %s", filename)
plt.clf()
return str(filename)
def decode_transcript_to_dataframe(data: str):
"""Decode the transcript into a pandas dataframe"""
logging.info("Decoding transcript")
decoded_data = {"start_time": [], "end_time": [], "speaker": [], "comment": []}
# If speaker identification
if "speaker_labels" in data["results"].keys():
logging.debug("Transcipt has speaker_labels")
# A segment is a blob of pronounciation and punctuation by an individual speaker
for segment in data["results"]["speaker_labels"]["segments"]:
# If there is content in the segment, add a row, write the time and speaker
if len(segment["items"]) > 0:
decoded_data["start_time"].append(
convert_time_stamp(segment["start_time"])
)
decoded_data["end_time"].append(convert_time_stamp(segment["end_time"]))
decoded_data["speaker"].append(segment["speaker_label"])
decoded_data["comment"].append("")
# For each word in the segment...
for word in segment["items"]:
# Get the word with the highest confidence
pronunciations = list(
filter(
lambda x: x["type"] == "pronunciation",
data["results"]["items"],
)
)
word_result = list(
filter(
lambda x: x["start_time"] == word["start_time"]
and x["end_time"] == word["end_time"],
pronunciations,
)
)
result = sorted(
word_result[-1]["alternatives"], key=lambda x: x["confidence"]
)[-1]
# Write the word
decoded_data["comment"][-1] += " " + result["content"]
# If the next item is punctuation, write it
try:
word_result_index = data["results"]["items"].index(
word_result[0]
)
next_item = data["results"]["items"][word_result_index + 1]
if next_item["type"] == "punctuation":
decoded_data["comment"][-1] += next_item["alternatives"][0][
"content"
]
except IndexError:
pass
# If channel identification
elif "channel_labels" in data["results"].keys():
logging.debug("Transcipt has channel_labels")
# For each word in the results
for word in data["results"]["items"]:
# Punctuation items do not include a start_time
if "start_time" not in word.keys():
continue
# Identify the channel
channel = list(
filter(
lambda x: word in x["items"],
data["results"]["channel_labels"]["channels"],
)
)[0]["channel_label"]
# If still on the same channel, add the current word to the line
if (
channel in decoded_data["speaker"]
and decoded_data["speaker"][-1] == channel
):
current_word = sorted(
word["alternatives"], key=lambda x: x["confidence"]
)[-1]
decoded_data["comment"][-1] += " " + current_word["content"]
# Else start a new line
else:
decoded_data["start_time"].append(
convert_time_stamp(word["start_time"])
)
decoded_data["end_time"].append(convert_time_stamp(word["end_time"]))
decoded_data["speaker"].append(channel)
current_word = sorted(
word["alternatives"], key=lambda x: x["confidence"]
)[-1]
decoded_data["comment"].append(current_word["content"])
# If the next item is punctuation, write it
try:
word_result_index = data["results"]["items"].index(word)
next_item = data["results"]["items"][word_result_index + 1]
if next_item["type"] == "punctuation":
decoded_data["comment"][-1] += next_item["alternatives"][0][
"content"
]
except IndexError:
pass
# Neither speaker nor channel identification
else:
logging.debug("No speaker_labels or channel_labels")
decoded_data["start_time"] = convert_time_stamp(
list(
filter(lambda x: x["type"] == "pronunciation", data["results"]["items"])
)[0]["start_time"]
)
decoded_data["end_time"] = convert_time_stamp(
list(
filter(lambda x: x["type"] == "pronunciation", data["results"]["items"])
)[-1]["end_time"]
)
decoded_data["speaker"].append("")
decoded_data["comment"].append("")
# Add words
for word in data["results"]["items"]:
# Get the word with the highest confidence
result = sorted(word["alternatives"], key=lambda x: x["confidence"])[-1]
# Write the word
decoded_data["comment"][-1] += " " + result["content"]
# If the next item is punctuation, write it
try:
word_result_index = data["results"]["items"].index(word)
next_item = data["results"]["items"][word_result_index + 1]
if next_item["type"] == "punctuation":
decoded_data["comment"][-1] += next_item["alternatives"][0][
"content"
]
except IndexError:
pass
# Produce pandas dataframe
dataframe = pandas.DataFrame(
decoded_data, columns=["start_time", "end_time", "speaker", "comment"]
)
# Clean leading whitespace
dataframe["comment"] = dataframe["comment"].str.lstrip()
return dataframe
def write_docx(data, filename, **kwargs):
""" Write a transcript from the .json transcription file. """
logging.info("Writing docx")
output_filename = Path(filename)
# Initiate Document
document = Document()
# A4 Size
document.sections[0].page_width = Mm(210)
document.sections[0].page_height = Mm(297)
# Font
font = document.styles["Normal"].font
font.name = "Calibri"
# Document title and intro
title = f"Transcription of {data['jobName']}"
document.add_heading(title, level=1)
# Set thresholds for formatting later
threshold_for_grey = 0.98
# Intro
document.add_paragraph(
"Transcription using AWS Transcribe automatic speech recognition and"
" the 'tscribe' python package."
)
document.add_paragraph(
datetime.datetime.now().strftime("Document produced on %A %d %B %Y at %X.")
)
document.add_paragraph() # Spacing
document.add_paragraph(
f"Grey text has less than {int(threshold_for_grey * 100)}% confidence."
)
# Get stats
stats = calculate_confidence_statistics(data)
# Display confidence count table
table = document.add_table(rows=1, cols=3)
table.style = document.styles["Light List Accent 1"]
table.alignment = WD_ALIGN_PARAGRAPH.CENTER
hdr_cells = table.rows[0].cells
hdr_cells[0].text = "Confidence"
hdr_cells[1].text = "Count"
hdr_cells[2].text = "Percentage"
row_cells = table.add_row().cells
row_cells[0].text = str("98% - 100%")
row_cells[1].text = str(stats["9.8"])
row_cells[2].text = str(round(stats["9.8"] / stats["total"] * 100, 2)) + "%"
row_cells = table.add_row().cells
row_cells[0].text = str("90% - 97%")
row_cells[1].text = str(stats["9"])
row_cells[2].text = str(round(stats["9"] / stats["total"] * 100, 2)) + "%"
row_cells = table.add_row().cells
row_cells[0].text = str("80% - 89%")
row_cells[1].text = str(stats["8"])
row_cells[2].text = str(round(stats["8"] / stats["total"] * 100, 2)) + "%"
row_cells = table.add_row().cells
row_cells[0].text = str("70% - 79%")
row_cells[1].text = str(stats["7"])
row_cells[2].text = str(round(stats["7"] / stats["total"] * 100, 2)) + "%"
row_cells = table.add_row().cells
row_cells[0].text = str("60% - 69%")
row_cells[1].text = str(stats["6"])
row_cells[2].text = str(round(stats["6"] / stats["total"] * 100, 2)) + "%"
row_cells = table.add_row().cells
row_cells[0].text = str("50% - 59%")
row_cells[1].text = str(stats["5"])
row_cells[2].text = str(round(stats["5"] / stats["total"] * 100, 2)) + "%"
row_cells = table.add_row().cells
row_cells[0].text = str("40% - 49%")
row_cells[1].text = str(stats["4"])
row_cells[2].text = str(round(stats["4"] / stats["total"] * 100, 2)) + "%"
row_cells = table.add_row().cells
row_cells[0].text = str("30% - 39%")
row_cells[1].text = str(stats["3"])
row_cells[2].text = str(round(stats["3"] / stats["total"] * 100, 2)) + "%"
row_cells = table.add_row().cells
row_cells[0].text = str("20% - 29%")
row_cells[1].text = str(stats["2"])
row_cells[2].text = str(round(stats["2"] / stats["total"] * 100, 2)) + "%"
row_cells = table.add_row().cells
row_cells[0].text = str("10% - 19%")
row_cells[1].text = str(stats["1"])
row_cells[2].text = str(round(stats["1"] / stats["total"] * 100, 2)) + "%"
row_cells = table.add_row().cells
row_cells[0].text = str("0% - 9%")
row_cells[1].text = str(stats["0"])
row_cells[2].text = str(round(stats["0"] / stats["total"] * 100, 2)) + "%"
# Add paragraph for spacing
document.add_paragraph()
graph = make_graph_png(stats, str(output_filename.parent))
document.add_picture(graph, width=Cm(14.64))
document.paragraphs[-1].alignment = WD_ALIGN_PARAGRAPH.CENTER
document.add_page_break()
# Process and display transcript by speaker segments
table = document.add_table(rows=1, cols=3)
table.style = document.styles["Light List Accent 1"]
hdr_cells = table.rows[0].cells
hdr_cells[0].text = "Time"
hdr_cells[1].text = "Speaker"
hdr_cells[2].text = "Content"
# If speaker identification
if "speaker_labels" in data["results"].keys():
logging.debug("Transcript has speaker_labels")
# A segment is a blob of pronounciation and punctuation by an individual speaker
for segment in data["results"]["speaker_labels"]["segments"]:
# If there is content in the segment, add a row, write the time and speaker
if len(segment["items"]) > 0:
row_cells = table.add_row().cells
row_cells[0].text = convert_time_stamp(segment["start_time"])
row_cells[1].text = str(segment["speaker_label"])
# For each word in the segment...
for word in segment["items"]:
# Get the word with the highest confidence
pronunciations = list(
filter(
lambda x: x["type"] == "pronunciation",
data["results"]["items"],
)
)
word_result = list(
filter(
lambda x: x["start_time"] == word["start_time"]
and x["end_time"] == word["end_time"],
pronunciations,
)
)
result = sorted(
word_result[-1]["alternatives"], key=lambda x: x["confidence"]
)[-1]
# Write the word
run = row_cells[2].paragraphs[0].add_run(" " + result["content"])
if float(result["confidence"]) < threshold_for_grey:
font = run.font
font.color.rgb = RGBColor(204, 204, 204)
# If the next item is punctuation, write it
try:
word_result_index = data["results"]["items"].index(
word_result[0]
)
next_item = data["results"]["items"][word_result_index + 1]
if next_item["type"] == "punctuation":
run = (
row_cells[2]
.paragraphs[0]
.add_run(next_item["alternatives"][0]["content"])
)
except IndexError:
pass
# If channel identification
elif "channel_labels" in data["results"].keys():
logging.debug("Transcript has channel_labels")
for word in data["results"]["items"]:
# Punctuation items do not include a start_time
if "start_time" not in word.keys():
continue
# Identify the channel
channel = list(
filter(
lambda x: word in x["items"],
data["results"]["channel_labels"]["channels"],
)
)[0]["channel_label"]
# If still on the same channel, add the current word to the line
if table.cell(-1, 1).text == channel:
current_word = sorted(
word["alternatives"], key=lambda x: x["confidence"]
)[-1]
run = (
table.cell(-1, 2)
.paragraphs[0]
.add_run(" " + current_word["content"])
)
if float(current_word["confidence"]) < threshold_for_grey:
font = run.font
font.color.rgb = RGBColor(204, 204, 204)
# Else start a new line
else:
current_word = sorted(
word["alternatives"], key=lambda x: x["confidence"]
)[-1]
row_cells = table.add_row().cells
row_cells[0].text = convert_time_stamp(word["start_time"])
row_cells[1].text = channel
run = row_cells[2].paragraphs[0].add_run(" " + current_word["content"])
if float(current_word["confidence"]) < threshold_for_grey:
font = run.font
font.color.rgb = RGBColor(204, 204, 204)
# If the next item is punctuation, write it
try:
word_result_index = data["results"]["items"].index(word)
next_item = data["results"]["items"][word_result_index + 1]
if next_item["type"] == "punctuation":
run = (
row_cells[2]
.paragraphs[0]
.add_run(next_item["alternatives"][0]["content"])
)
except IndexError:
pass
# Else no speaker identification
else:
logging.debug("No speaker_labels or channel_labels")
# Start the first row
row_cells = table.add_row().cells
# Add words
for word in data["results"]["items"]:
# Get the word with the highest confidence
result = sorted(word["alternatives"], key=lambda x: x["confidence"])[-1]
# Write the word
run = row_cells[2].paragraphs[0].add_run(" " + result["content"])
if float(result["confidence"]) < threshold_for_grey:
font = run.font
font.color.rgb = RGBColor(204, 204, 204)
# If the next item is punctuation, write it
try:
word_result_index = data["results"]["items"].index(word)
next_item = data["results"]["items"][word_result_index + 1]
if next_item["type"] == "punctuation":
run = (
row_cells[2]
.paragraphs[0]
.add_run(next_item["alternatives"][0]["content"])
)
except IndexError:
pass
# Formatting transcript table widthds
widths = (Inches(0.6), Inches(1), Inches(4.5))
for row in table.rows:
for idx, width in enumerate(widths):
row.cells[idx].width = width
# Save
document.save(filename)
logging.info("Docx saved to %s", filename)
def write_vtt(dataframe, filename):
"""Output to VTT format"""
logging.info("Writing VTT")
# Initialize vtt
vtt = webvtt.WebVTT()
# Iterate through dataframe
for _, row in dataframe.iterrows():
# If the segment has 80 or less characters
if len(row["comment"]) <= 80:
caption = webvtt.Caption(
start=row["start_time"] + ".000",
end=row["end_time"] + ".000",
text=row["comment"],
)
# If the segment has more than 80 characters, use lines
else:
lines = []
text = row["comment"]
while len(text) > 80:
text = text.lstrip()
last_space = text[:80].rindex(" ")
lines.append(text[:last_space])
text = text[last_space:]
caption = webvtt.Caption(
row["start_time"] + ".000", row["end_time"] + ".000", lines
)
if row["speaker"]:
caption.identifier = row["speaker"]
vtt.captions.append(caption)
vtt.save(filename)
logging.info("VTT saved to %s", filename)
def write(transcript_filepath, **kwargs):
"""Main function, write transcript file from json"""
# Performance timer start
start = perf_counter()
logging.info("=" * 32)
logging.debug("Started at %s", start)
logging.info("Source file: %s", transcript_filepath)
logging.debug("kwargs = %s", str(kwargs))
# Load json file as dict
data = load_json_as_dict(transcript_filepath)
# Decode transcript
dataframe = decode_transcript_to_dataframe(data)
# Output
output_format = kwargs.get("format", "docx")
# Deprecated tmp_dir by improving save_as
if kwargs.get("tmp_dir"):
logging.warning("tmp_dir in kwargs")
raise Exception("tmp_dir has been deprecated, use save_as instead")
# Output to docx (default behaviour)
if output_format == "docx":
output_filepath = kwargs.get(
"save_as", Path(transcript_filepath).with_suffix(".docx")
)
write_docx(data, output_filepath)
# Output to CSV
elif output_format == "csv":
output_filepath = kwargs.get(
"save_as", Path(transcript_filepath).with_suffix(".csv")
)
dataframe.to_csv(output_filepath)
# Output to sqlite
elif output_format == "sqlite":
output_filepath = kwargs.get(
"save_as", Path(transcript_filepath).with_suffix(".db")
)
conn = sqlite3.connect(str(output_filepath))
dataframe.to_sql("transcript", conn)
conn.close()
# Output to VTT
elif output_format == "vtt":
output_filepath = kwargs.get(
"save_as", Path(transcript_filepath).with_suffix(".vtt")
)
write_vtt(dataframe, output_filepath)
else:
raise Exception("Output format should be 'docx', 'csv', 'sqlite' or 'vtt'")
# Performance timer finish
finish = perf_counter()
logging.debug("Finished at %s", finish)
duration = round(finish - start, 2)
print(f"{output_filepath} written in {duration} seconds.")
logging.info("%s written in %s seconds.", output_filepath, duration)
|
#!/usr/bin/python
"""
A simple script that:
1 - Connects to the ICOM-M802 via serial port on COM9 (Windows) or ttyUSB4 (Linux). Adjust the COM/TTY ports to match your system setup. Comment out lines 14/15 depending on if you are Linux/Windows based. This is the call that will turn on the ICOM-M802 head-unit if it is off.
2 - "$PICOA,90,00,REMOTE,ON*58" - turns on REMOTE mode
3 - "$CCFSI,123720,123720,m,0*01" - changes channel (to 12,372.0 kHz)
4 - "ser.close()" closed the serial connection. This will turn off the ICOM head-unit again at that point.
NOTE: If you manually turn on your radio and set it to DSC watch-mode then turn it off. Then when the below script is run it will turn on in watch mode. If you skip (comment-out) the middle three steps then it will turn-off the radio while still in DSC watch mode. This is a good method for turning the radio on in DSC-watch mode periodically to listen for DSC calls or position reports.
If all cruisers run the same script that turns the radio on DSC watch at particular times throughout the day then you could keep an almost-continuous watch with very low power. If the clocks across all the boats were well-synced then you could have the radio turn on for scan just 2-3 minutes every hour. This would reduce watch-time to 60-minutes or so per day and consume only 2 or 3 amp-hours.
If you run the middle 3 lines then the radio will be bumped out of DSC watch-mode.
----------------
Some info/resources for more information:
http://www.catb.org/gpsd/NMEA.txt
http://mvvikingstar.blogspot.com.au/2012/10/connecting-and-debugging-your-icom-m802.html
The following page provides evidence that you can control DSC communication via the NMEA interface:
http://continuouswave.com/whaler/reference/DSC_Datagrams.html
The following pages provide info on proprietary NEAM sentences:
http://fort21.ru/download/NMEAdescription.pdf
https://www8.garmin.com/support/pdf/NMEA_0183.pdf
http://www.icomuk.co.uk/files/icom/PDF/productManual/MXP-5000_MXD-5000_Installation_0.pdf
"""
import serial
import time
#ser=serial.Serial(port='\\.\COM9', baudrate=4800, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, timeout=10)
ser=serial.Serial(port='/dev/ttyUSB4', baudrate=4800, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, timeout=10)
#ser.open()
print("Connected to ICOM-M802")
ser.write('$PICOA,90,00,REMOTE,ON*58\r\n')
r = ser.readline()
print(r)
time.sleep(4)
ser.write('$CCFSI,123720,123720,m,0*01\r\n')
r = ser.readline()
print(r)
time.sleep(4)
ser.write('$PICOA,90,08,REMOTE,OFF*1E\r\n')
r = ser.readline()
print(r)
time.sleep(4)
print("Closing connection to ICOM-M802")
ser.close()
|
import datetime
import streamlit as st
#from playsound import playsound
def alarm(alarmH,alarmM,ap):
if ap == 'pm':
alarmH=alarmM+12
while(True):
if(alarmH==datetime.datetime.now().hour and alarm==datetime.datetime.now().minute):
st.write("Time to wake up")
audio_file=open("song.mp3","rb")
st.audio(audio_file,format='audio/mp3')
break
|
# Code to perform bit reversal
def bitreversal(N,lo,hi):
binary_num=bin(N)
print ("Binary of ",N,"is equal to = ",binary_num)
binary_rep=binary_num[2:len(binary_num)]
str1=binary_rep[0:lo]
str2=binary_rep[lo:hi+1]
str3=binary_rep[hi+1:]
str2_new=''
for i in range(0,len(str2)):
if(str2[i]=='0'):
str2_new=str2_new+'1'
elif(str2[i]=='1'):
str2_new=str2_new+'0'
new_binary=str1+str2_new+str3
output_decimal=int(new_binary,base=2)
print "The output decimal number is : ",output_decimal
bitreversal(150,2,4) |
from .crosslingual_vectors import Crosslingual
from torchtext import data
from .NERDataset import NERDataset
from torchtext.datasets import SequenceTaggingDataset
import logging
import numpy as np
import torch
import math
DATA_RELATIVE_PATH = 'data'
logger = logging.getLogger("data")
# predefine a label_set: PER - 1, LOC - 2, ORG - 3, MISC - 4, O - 5
labels_map = {
'B-ORG': 'ORG',
'O': 'O',
'B-MISC': 'MISC',
'B-PER': 'PER',
'I-PER': 'PER',
'B-LOC': 'LOC',
'I-ORG': 'ORG',
'I-MISC': 'MISC',
'I-LOC': 'LOC'}
caseLookup = {
'numeric': 0,
'allLower': 1,
'allUpper': 2,
'initialUpper': 3,
'other': 4,
'mainly_numeric': 5,
'contains_digit': 6}
mapping_files = {
'en.train': DATA_RELATIVE_PATH + '/conll2003/eng.train.txt',
'en.testa': DATA_RELATIVE_PATH + '/conll2003/eng.testa.txt',
'en.testb': DATA_RELATIVE_PATH + '/conll2003/eng.testb.txt',
'de.train': DATA_RELATIVE_PATH + '/conll2003/deu.train.txt',
'de.testa': DATA_RELATIVE_PATH + '/conll2003/deu.testa.txt',
'de.testb': DATA_RELATIVE_PATH + '/conll2003/deu.testb.txt',
'es.train': DATA_RELATIVE_PATH + '/conll2002/esp.train.txt',
'es.testa': DATA_RELATIVE_PATH + '/conll2002/esp.testa.txt',
'es.testb': DATA_RELATIVE_PATH + '/conll2002/esp.testb.txt',
'nl.train': DATA_RELATIVE_PATH + '/conll2002/ned.train.txt',
'nl.testa': DATA_RELATIVE_PATH + '/conll2002/ned.testa.txt',
'nl.testb': DATA_RELATIVE_PATH + '/conll2002/ned.testb.txt',
'fifty_nine.cca.normalized': DATA_RELATIVE_PATH + '/fifty_nine.cca.normalized',
'cadec': DATA_RELATIVE_PATH + '/cadec/cadec.conll'}
class Conll_dataset():
def __init__(self, opt, train=True, tag_type='ner'):
self.opt = opt
opt.lang = opt.train if train else opt.test
if(opt.lang.lower() == 'cadec'):
inputs_word, inputs_char, inputs_case, labels = self.cadec(
opt, tag_type=tag_type)
else:
inputs_word, inputs_char, inputs_case, labels = self.conll(
opt, tag_type=tag_type)
self.check_ids(self.train)
self.check_ids(self.val)
self.check_ids(self.test)
# Build vocab
inputs_char.build_vocab(
self.train.inputs_char,
self.val.inputs_char,
self.test.inputs_char,
max_size=opt.maxcharvocab)
inputs_case.build_vocab(
self.train.inputs_case,
self.val.inputs_case,
self.test.inputs_case)
inputs_word.build_vocab(self.train.inputs_word, self.val.inputs_word, self.test.inputs_word, max_size=opt.maxvocab,
# vectors ="fasttext.en.300d")
vectors=[Crosslingual(mapping_files['fifty_nine.cca.normalized'])] if opt.pre_embs else None)
labels.build_vocab(self.train.labels)
self.vocabs = inputs_word.vocab, inputs_char.vocab, inputs_case.vocab, labels.vocab
self.count_new, self.train_unlabeled = 0, []
self.gpu = opt.gpu
self.labeled = opt.labeled
# Keep for reseting
self.keep_duplicates()
if(opt.labeled != -1):
# Create unlabeled dataset
ratio = (opt.labeled * 1.) / len(self.train.examples)
if ratio != 0:
self.train, self.train_unlabeled = self.train.split(ratio)
else:
self.train_unlabeled = data.Dataset(
examples=self.train.examples, fields=self.fields)
self.train.examples = []
if(opt.budget is None):
opt.budget = len(self.train_unlabeled)
logger.info('Train size: %d' % (len(self.train)))
logger.info('Validation size: %d' % (len(self.val)))
logger.info('Test size: %d' % (len(self.test)))
logger.info('Unlabeled size: %d' % (len(self.train_unlabeled)))
logger.info('Input word vocab size:%d' % (len(inputs_word.vocab)))
logger.info('Input char vocab size:%d' % (len(inputs_char.vocab)))
logger.info('Input case vocab size:%d' % (len(inputs_case.vocab)))
logger.info('Tagset size: %d' % (len(labels.vocab)))
logger.info('Tag set:[{}]'.format(','.join(labels.vocab.itos)))
logger.info('----------------------------')
def conll(self, opt, tag_type='ner'):
"""
conll2003: Conll 2003 (Parser only. You must place the files)
Extract Conll2003 dataset using torchtext. Applies GloVe 6B.200d and Char N-gram
pretrained vectors. Also sets up per word character Field
tag_type: Type of tag to pick as task [pos, chunk, ner]
"""
logger.info(
'---------- CONLL 2003 %s lang = %s ---------' %
(tag_type, opt.lang))
train_file = mapping_files['.'.join([opt.lang, 'train'])]
dev_file = mapping_files['.'.join([opt.lang, 'testa'])]
test_file = mapping_files['.'.join([opt.lang, 'testb'])]
encoding = 'utf8' if opt.lang == "en" else 'latin-1'
# Setup fields with batch dimension first
inputs_word = data.Field(
batch_first=True,
fix_length=opt.maxlen,
lower=opt.lower,
preprocessing=data.Pipeline(
lambda w: '0' if opt.convert_digits and w.isdigit() else w))
inputs_char_nesting = data.Field(
tokenize=list, batch_first=True, fix_length=opt.maxlen)
inputs_char = data.NestedField(inputs_char_nesting)
inputs_case = data.Field(
batch_first=True,
fix_length=opt.maxlen,
preprocessing=data.Pipeline(
lambda w: self.getCasing(w)))
labels = data.Field(batch_first=True, unk_token=None, fix_length=opt.maxlen, # pad_token=None,
preprocessing=data.Pipeline(lambda w: labels_map[w]))
id = data.Field(batch_first=True, use_vocab=False)
if(opt.lang == "en"):
self.fields = ([(('inputs_word',
'inputs_char',
'inputs_case'),
(inputs_word,
inputs_char,
inputs_case))] + [('labels',
labels) if label == tag_type else (None,
None) for label in ['pos',
'chunk',
'ner']] + [('id',
id)])
elif(opt.lang == "de"):
self.fields = ([(('inputs_word',
'inputs_char',
'inputs_case'),
(inputs_word,
inputs_char,
inputs_case))] + [('idk',
None)] + [('labels',
labels) if label == tag_type else (None,
None) for label in ['pos',
'chunk',
'ner']] + [('id',
id)])
elif(opt.lang == "nl"):
self.fields = ([(('inputs_word',
'inputs_char',
'inputs_case'),
(inputs_word,
inputs_char,
inputs_case))] + [('labels',
labels) if label == tag_type else (None,
None) for label in ['pos',
'ner']] + [('id',
id)])
else:
self.fields = ([(('inputs_word',
'inputs_char',
'inputs_case'),
(inputs_word,
inputs_char,
inputs_case))] + [('labels',
labels) if label == tag_type else (None,
None) for label in ['ner']] + [('id',
id)])
# Load the data
self.train, self.val, self.test = NERDataset.splits(
path='.',
train=train_file,
validation=dev_file,
test=test_file,
separator=' ',
encoding=encoding,
fields=tuple(self.fields))
return inputs_word, inputs_char, inputs_case, labels
def cadec(self, opt, tag_type='ner'):
"""
cadec: CADEC (Parser only. You must place the files)
Extract CADEC dataset using torchtext.
"""
logger.info('---------- CADEC = %s ---------' % (tag_type))
train_file = mapping_files[opt.lang]
# Setup fields with batch dimension first
inputs_word = data.Field(
batch_first=True,
fix_length=opt.maxlen,
lower=opt.lower,
preprocessing=data.Pipeline(
lambda w: '0' if opt.convert_digits and w.isdigit() else w))
inputs_char_nesting = data.Field(
tokenize=list, batch_first=True, fix_length=opt.maxlen)
inputs_char = data.NestedField(inputs_char_nesting)
inputs_case = data.Field(
batch_first=True,
fix_length=opt.maxlen,
preprocessing=data.Pipeline(
lambda w: self.getCasing(w)))
labels = data.Field(
batch_first=True,
unk_token=None,
fix_length=opt.maxlen) # pad_token=None,
# preprocessing=data.Pipeline(lambda w: labels_map[w]))
id = data.Field(batch_first=True, use_vocab=False)
self.fields = ([(('inputs_word',
'inputs_char',
'inputs_case'),
(inputs_word,
inputs_char,
inputs_case))] + [('labels',
labels) if label == tag_type else (None,
None) for label in ['ner']] + [('id',
id)])
# Load the data
datafile = NERDataset.splits(
path='.',
train=train_file,
separator='\t',
encoding='utf-8',
fields=tuple(self.fields))[0]
self.train, self.val, self.test = datafile.split(
split_ratio=[5610, 1000, 1000])
return inputs_word, inputs_char, inputs_case, labels
def check_ids(self, examples): # no duplicate ids!
a = [i.id[0] for i in examples]
assert len(a) == len(set(a))
def keep_duplicates(self):
self.temp_train = data.Dataset(
examples=self.train.examples,
fields=self.fields)
self.temp_val = data.Dataset(
examples=self.val.examples,
fields=self.fields)
self.temp_test = data.Dataset(
examples=self.test.examples,
fields=self.fields)
def reset(self):
self.train = self.temp_train
self.val = self.temp_val
self.test = self.temp_test
self.keep_duplicates()
self.count_new = 0
if(self.labeled != -1):
# Create unlabeled dataset
ratio = self.labeled / len(self.train)
if ratio != 0:
self.train, self.train_unlabeled = self.train.split(ratio)
else:
self.train_unlabeled = data.Dataset(
examples=self.train.examples, fields=self.fields)
self.train.examples = []
def batch_iter(self, batch_size):
if(self.opt.adaptive_batch_size):
batch_size = int(math.ceil(len(self.train) /
self.opt.adaptive_batch_size))
# Get iterators
unlabeled_iter, _, _ = data.BucketIterator.splits(
(self.train_unlabeled, self.val, self.test), batch_size=batch_size*self.opt.n_ubatches, shuffle=True,
sort_key=lambda x: data.interleave_keys(len(x.inputs_word), len(x.inputs_char)),
device=torch.device("cuda:" + str(self.gpu) if self.gpu != -1 else "cpu"))
train_iter, val_iter, test_iter = data.BucketIterator.splits(
(self.train, self.val, self.test), batch_size=batch_size, shuffle=True,
sort_key=lambda x: data.interleave_keys(len(x.inputs_word), len(x.inputs_char)),
device=torch.device("cuda:" + str(self.gpu) if self.gpu != -1 else "cpu"))
train_iter.repeat = False
return train_iter, val_iter, test_iter, unlabeled_iter
def label(self, example):
self.train.examples.append(example)
for i in self.train_unlabeled.examples:
if(i.id == example.id):
assert i.inputs_word == example.inputs_word
assert i.labels == example.labels
self.train_unlabeled.examples.remove(i)
#self.train_unlabeled.examples = [i for i in self.train_unlabeled.examples if i.id!=example.id]
# self.train_unlabeled.examples.remove(example)
self.count_new += 1
def pseudo_label(self, example, model):
# Add example with new label
temp_example = None
for i in self.train_unlabeled.examples:
if(i.id == example.id):
temp_example = i
self.train_unlabeled.examples.remove(i)
assert temp_example.inputs_word == example.inputs_word
assert temp_example.labels == example.labels
prediction = self.get_prediction(example, self.fields, model)
temp_example.labels = prediction
self.train.examples.append(temp_example)
self.count_new += 1
def sample_unlabeled(self, k_num):
# TODO: remove sampling for unlabeled: cluster?
# Random sample k points from D_pool
unlabeled_pool = self.train_unlabeled
indices = np.arange(len(unlabeled_pool.examples))
np.random.shuffle(indices)
sampled_examples = [example for count, example in enumerate(
unlabeled_pool.examples) if count in indices[:k_num]]
unlabeled_entries = data.Dataset(sampled_examples, self.fields)
return unlabeled_entries
def sample_validation(self, k_num):
validation_pool = self.val
indices = np.arange(len(validation_pool.examples))
np.random.shuffle(indices)
sampled_examples = [example for count, example in enumerate(
validation_pool.examples) if count in indices[:k_num]]
self.val.examples = sampled_examples
logger.info('Sampled Validation size: %d' % (len(self.val)))
# np.random.shuffle(dataset.val.examples)
#dataset.val.examples = dataset.val.examples[:opt.labeled]
#logger.info('Sampled Validation size: %d' % (len(dataset.val)))
# https://github.com/mxhofer/Named-Entity-Recognition-BidirectionalLSTM-CNN-CoNLL/
# define casing s.t. NN can use case information to learn patterns
def getCasing(self, word):
casing = 'other'
numDigits = 0
for char in word:
if char.isdigit():
numDigits += 1
digitFraction = numDigits / float(len(word))
if word.isdigit(): # Is a digit
casing = 'numeric'
elif digitFraction > 0.5:
casing = 'mainly_numeric'
elif word.islower(): # All lower case
casing = 'allLower'
elif word.isupper(): # All upper case
casing = 'allUpper'
elif word[0].isupper(): # is a title, initial char upper, then all lower
casing = 'initialUpper'
elif numDigits > 0:
casing = 'contains_digit'
return caseLookup[casing]
def get_prediction(self, example, fields, model): # Predict label
pseudo_example = data.Dataset(examples=[example], fields=fields)
pseudo_example = data.BucketIterator(
dataset=pseudo_example,
batch_size=1,
repeat=False,
shuffle=False,
device=torch.device(
"cuda:" + str(self.gpu) if self.gpu != -1 else "cpu"))
assert len(pseudo_example) == 1
if isinstance(model, torch.nn.Module):
pad = model.wordrepr.tag_vocab.stoi['<pad>']
pseudo_example = list(pseudo_example)[0]
_, _, prediction = model(pseudo_example)
y = list(
filter(
lambda x: x != pad,
pseudo_example.labels.data.tolist()[0]))
else:
x, y = model.iter_to_xy(pseudo_example)
_, _, prediction = model(x, y)
y = list(filter(lambda x: x != '<pad>', y[0]))
assert len(prediction) == 1
prediction = lib.utils.indices2words(
prediction, model.wordrepr.tag_vocab)
#y = lib.utils.indices2words([y], model.wordrepr.tag_vocab)
#tokens = lib.utils.indices2words([pseudo_example.inputs_word.data.tolist()[0]], model.wordrepr.word_vocab)
prediction = prediction[0]
# shrink prediction to same len as labels
prediction = prediction[:len(y)]
return prediction
|
from pyspark.sql import *
from pyspark.sql.types import *
import os
import shutil
import subprocess
spark = SparkSession.builder \
.master("local") \
.appName("Data Integration") \
.config("spark.some.config.option", "some-value") \
.getOrCreate()
def get_or_create_dataframe(schema, path=None, format="parquet"):
if os.path.exists(path):
df = spark.read.format("parquet").load(path, schema=schema)
else:
df = spark.createDataFrame([], schema)
return df
def append_new_row(df_name, schema, row):
df = get_or_create_dataframe(schema, df_name)
newRow = spark.createDataFrame(row)
appended = df.union(newRow)
return appended
def save(df, df_name):
tmp_df_name = "tmp_"+df_name
df.write.save(tmp_df_name, format="parquet")
if os.path.exists(df_name):
subprocess.run(["rm", "-r", f"{df_name}"])
if os.path.exists(tmp_df_name):
subprocess.run(["mv", f"{tmp_df_name}", f"{df_name}"])
rel2schema = {
"kill": {
"schema": StructType([
StructField("killer", StringType(), True),
StructField("victim", StringType(), True)]),
"df_path": "kill.parquet"
},
"work_for": {
"schema": StructType([
StructField("person", StringType(), True),
StructField("organization", StringType(), True)]),
"df_path": "work_for.parquet"
},
"live_in": {
"schema": StructType([
StructField("person", StringType(), True),
StructField("location", StringType(), True)]),
"df_path": "live_in.parquet"
},
"located_in": {
"schema": StructType([
StructField("location", StringType(), True),
StructField("location", StringType(), True)]),
"df_path": "located_in.parquet"
},
"orgbased_in": {
"schema": StructType([
StructField("organization", StringType(), True),
StructField("location", StringType(), True)]),
"df_path": "orgbased_in.parquet"
}
}
def integrate(triples):
for triple_list in triples:
for rel, item1, item2 in triple_list:
rel = rel.lower()
schema = rel2schema[rel]["schema"]
df_path = rel2schema[rel]["df_path"]
appended = append_new_row(df_path, schema, [(item1, item2)])
save(appended, df_path)
|
import torch.nn as nn
from .utils import repeat_module, LayerNorm, SublayerConnection
class Encoder(nn.Module):
""" stack of N encoder layers """
def __init__(self, layer, N):
super().__init__()
self.layers = repeat_module(layer, N)
self.norm = LayerNorm(layer.model_dim)
def forward(self, x, mask):
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
class EncoderLayer(nn.Module):
""" self (bidirectional or causal) attention + FC """
def __init__(self, model_dim, self_attn, fc_net, dropout):
super().__init__()
self.self_attn = self_attn
self.fc_net = fc_net
self.sublayers = repeat_module(SublayerConnection(model_dim, dropout), 2)
self.model_dim = model_dim
def forward(self, x, mask):
self_attn_sublayer = lambda x: self.self_attn(x,x,x,mask)
x = self.sublayers[0](x, self_attn_sublayer)
x = self.sublayers[1](x, self.fc_net)
return x |
from models.users import UserModel
from flask_restful import reqparse,Resource
class UserRegister(Resource):
parser = reqparse.RequestParser()
parser.add_argument('username',type=str,required=True,help='This field is required')
parser.add_argument('password',type=str,required=True,help='This field is required')
def post(self):
data = UserRegister.parser.parse_args()
user = UserModel.find_by_username(data["username"])
if user:
return {"message":"user already exist","user_id":user.id}
user = UserModel(**data)
user.save_to_db()
user = UserModel.find_by_username(data["username"])
return {"message":"user registered successfully","user_id":user.id} |
import json
from flask import Blueprint, render_template, request, redirect, url_for
from src.models.bsb.orders.order import BSBOrder
from src.models.bsb.orders.utils import handleRequestForm
__author__ = 'nabee1'
bsborder_blueprint = Blueprint('bsborders', __name__)
@bsborder_blueprint.route('/')
def index():
regions = BSBOrder.generate_regions_list()
return render_template('BSBOrders/bsborder_index.jinja2', regions=regions)
@bsborder_blueprint.route('/bsborders_query_results', methods = ['POST', 'GET'])
def bsborders_query():
if request.method == 'POST':
transform_field_config = {
"orderDetailID": ["CommaString"],
"playerID": ["CommaString"],
"payment_Date_Start": ["DateString"],
"payment_Date_End": ["DateString"],
"region": ["DropdownString"],
"playerName": ["CommaString"],
"userName": ["CommaString"]
}
mongo_query = handleRequestForm(request.form, transform_field_config)
bsborders = BSBOrder.find_by_multiple_filters(mongo_query)
print("# of search results:", len(bsborders))
return render_template('BSBOrders/bsborders.jinja2', bsborders=bsborders, query=mongo_query)
|
from mezzanine.conf import register_setting
from django.utils.translation import ugettext_lazy as _ # import as '_', used for trans
# These register setting to editable in the admin easily.
# http://mezzanine.jupo.org/docs/configuration.html#registering-settings
# Register our new settings, so we can change their vals in admin.
# this also makes them available in a view say as
# from mezzanine.conf import settings
# settings.SOCIAL_LINK_FACEBOOK.
# But if we want avail in template see further down.
register_setting(
name="SOCIAL_LINK_FACEBOOK",
label=_("Facebook link"),
description=_("If present a Facebook icon linking here will be in the "
"header."),
editable=True,
default="https://facebook.com/mezzatheme",
)
register_setting(
name="SOCIAL_LINK_FLICKR",
label=_("Flickr link"),
description=_("If present a Flickr icon linking here will be in the "
"header."),
editable=True,
default="",
)
register_setting(
name="SOCIAL_LINK_GPLUS",
label=_("Google plus link"),
description=_("If present a Google-plus icon linking here will be in the "
"header"),
editable=True,
default="",
)
register_setting(
name="SOCIAL_LINK_TWITTER",
label=_("Twitter link"),
description=_("If present a Twitter icon linking here will be in the "
"header."),
editable=True,
default="https://twitter.com/MEZZaTHEME",
)
register_setting(
name="SOCIAL_LINK_DELICIOUS",
label=_("Delicious link"),
description=_("If present a delicious icon linking here will be in the "
"header"),
editable=True,
default="",
)
register_setting(
name="SOCIAL_LINK_TUMBLR",
label=_("Tumblr link"),
description=_("If present a tumblr icon linking here will be in the "
" header"),
editable=True,
default="",
)
register_setting(
name="SOCIAL_LINK_GPG_KEY",
label=_("Public key for gpg"),
description=_("Link to gpg public key on keyserver header."),
editable=True,
default="",
)
register_setting(
name="SOCIAL_LINK_UPWORK_PROFILE",
label=_("Upwork profile"),
description=_("Link to upwork profile in header"),
editable=True,
default="",
)
register_setting(
name="SOCIAL_LINK_EMAIL",
label=_("Email address"),
description=_("Email address for contact"),
editable=True,
default="me@somewhere.com",
)
register_setting(
name="GMAP_LOC",
label=_("Google map location"),
description=_("Centre address for google maps. "),
editable=True,
default="London, UK",
)
register_setting(
name="GMAP_APIKEY",
label=_("Google maps API key"),
description=_("Google maps API Key"),
editable=True,
default=" ",
)
register_setting(
name="GMAP_ZOOM",
label=_("Google map zoom level"),
description=_("Google maps zoom level"),
editable=True,
default="4",
)
register_setting(
name="GMAP_DISABLE_UI",
label=_("User control of map disabled"),
description=_("Can user zoom, pan etc disabled?"),
editable=True,
default=False,
)
register_setting(
name="GMAP_ICON_SIZE",
label=_("Size of marker (px)"),
description=_("The size of icon on the map in pixels"),
editable=True,
default=16
)
register_setting(
name="PORTFOLIO_ITEMS_PER_PAGE",
label=_("Portfolio items per page"),
description=_("The number of portfolio items per page (restart after change)"),
editable=True,
default=6
)
# TEMPLATE_ACCESSIBLE_SETTINGS is one of the existing settings
# specifying all setting names available within templates, thus
# we want to append our new settings to it so we can use them in templates
register_setting(
name="TEMPLATE_ACCESSIBLE_SETTINGS",
append=True, # Because we append these to
default=("SOCIAL_LINK_FACEBOOK", # existing templatate accessible settings.
"SOCIAL_LINK_TWITTER",
"SOCIAL_LINK_FLICKR",
"SOCIAL_LINK_GPLUS",
"SOCIAL_LINK_TUMBLR",
"SOCIAL_LINK_DELICIOUS",
"SOCIAL_LINK_UPWORK_PROFILE",
"SOCIAL_LINK_GPG_KEY",
"SOCIAL_LINK_EMAIL",
"GMAP_LOC",
"GMAP_ZOOM",
"GMAP_APIKEY",
"GMAP_DISABLE_UI",
"GMAP_ICON_SIZE",
"PORTFOLIO_ITEMS_PER_PAGE",
),
)
|
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String
from sqlalchemy import create_engine
from sqlalchemy import ForeignKey
from sqlalchemy.orm import relationship, backref
Base = declarative_base()
# Domain(Value Object) Definetion
class User(Base):
__tablename__ = 'users' # 테이블 이름
#definition columns mapping field
index = Column(Integer, autoincrement=True, primary_key=True) # 테이블의 시퀀스
user_id = Column(String(30)) # 기기 id
exe_name = Column(String(30)) # 판별된 운동종류
exe_count = Column(String(30)) # 운동 횟수
exe_kcal = Column(String(30)) # 운동으로인한 칼로리 소모량
exe_year = Column(String(30)) # 운동 한 년도
exe_month = Column(String(30)) # 운동 한 달
exe_day = Column(String(30)) # 운동 한 일
# Constructor
def __init__(self, user_id, exe_name, exe_count, exe_kcal, exe_year, exe_month, exe_day):
self.user_id = user_id
self.exe_name = exe_name
self.exe_count = exe_count
self.exe_kcal = exe_kcal
self.exe_year = exe_year
self.exe_month = exe_month
self.exe_day = exe_day
# setter getter function
# This returns general information of user-class ==> 마리아DB scoach 데이터베이스 > users 테이블에 맵핑되는 Object
def __repr__(self):
return "<User('%s','%s','%s','%s','%s','%s','%s')>" % (self.user_id, self.exe_name, self.exe_count, self.exe_kcal, self.exe_year, self.exe_month, self.exe_day)
engine = create_engine("mysql://root:sean@127.0.0.1/scoach", encoding='utf8', echo=True) # DB 정보
Base.metadata.create_all(engine) # 엔진 객체 meta정보로 추가
|
from pycocotools.coco import COCO
import numpy as np
import skimage.io as io # pip3 install scikit-image
import matplotlib.pyplot as plt
import pylab
import os
def parameters():
param = {}
#pylab.rcParams['figure.figsize'] = (8.0, 10.0)
dataDir='../bipolar_data'
param['dataDir'] = dataDir
dataType='robot_bipolar'
param['dataType'] = dataType
annFile='{}/annotations/instances_{}.json'.format(dataDir,dataType)
param['annFile'] = annFile
plot_size = (8.0, 10.0)
param['plot_size'] = plot_size
return param
|
from modules.facility import facility
detroit = facility('DETROITMI')
rmi = detroit.rmi
cfr = detroit.cfr
pfi = detroit.pfi
pfo = detroit.pfo
pis = detroit.pis
pck = detroit.pck
time = 0
transfer = pd.DataFrame(
{
'jb_color':['Coloring Agent1', 'Coloring Agent18'],
'amount':[45000, 250000]
}
)
rmi.load_drums(transfer, time=time)
flavors = ['F1', 'F2', 'F3', 'F4', 'F5', 'F6', 'F7', 'F8', 'F9', 'F10', 'F11', 'F12', 'F13', 'F14', 'F15']
flavor = (flavor for flavor in flavors)
package_types = ['Box', 'Box', 'Box', 'Bag', 'Bag']
package_type = (pack for pack in package_types)
while len(pis.empty_drums)!=0:
pck_in = next(pis.unload_drums())
pck_in['package_type'] = next(package_type)
pck_time = pck.load_machines(**pck_in)
print("Packing Time: {0}".format(pck_time))
time += pck_time
print("Global Time: {0}".format(time))
while len(pfo.avail_mach)!=0:
pis_cap = min([x.capacity for x in pis.empty_drums])
pfo_out = next(pfo.unload_machines(pis_cap))
pis.load_drums(pfo_out, time=time)
while len(pfi.empty_drums)!=0:
pfo_in = next(pfi.unload_drums())
pfo_in['jb_flavor'] = next(flavor)
pfo_time = pfo.load_machines(**pfo_in)
print("PFO Time: {0}".format(pfo_time))
time += pfo_time
print("Global Time: {0}".format(time))
while len(cfr.avail_mach)!=0:
pfi_cap = min([x.capacity for x in pfi.empty_drums])
cfr_out = next(cfr.unload_machines(pfi_cap))
pfi.load_drums(cfr_out, time=time)
while len(rmi.empty_drums)!=0:
cfr_in = next(rmi.unload_drums())
cfr_time = cfr.load_machines(**cfr_in)
print("Classifier Time: {0}".format(cfr_time))
time += cfr_time
print("Global Time: {0}".format(time))
else:
break
else:
break
else:
break
else:
break
|
# Generated by Django 2.0.3 on 2018-03-14 06:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0005_auto_20180314_0610'),
]
operations = [
migrations.CreateModel(
name='ConatacForm',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='имя')),
('tel', models.IntegerField(verbose_name='тел')),
('date', models.DateTimeField(auto_now_add=True, verbose_name='дата')),
],
options={
'verbose_name': 'Заявка на консультацию',
'verbose_name_plural': 'Заявки на консультацию',
'ordering': ['date'],
},
),
]
|
from django.db import models
# Create your models here.
class Product(models.Model):
img = models.ImageField(upload_to='img/course_list', default='assets/images/product_03.jpg')
price = models.DecimalField(max_digits=10, decimal_places=2)
name = models.CharField(max_length=100, default="")
desc = models.TextField(max_length=1000, blank=True, null=True)
url = models.CharField(max_length=200)
|
filepath = 'input.txt'
f = open(filepath, 'r')
contents = f.readlines()
numlines = len(contents)
for index, line in enumerate(contents):
for index2 in range(index+1, numlines):
charcount = 0
for cindex in range(len(line)):
if contents[index][cindex] != contents[index2][cindex]:
charcount += 1
if charcount > 1:
break
if charcount == 1:
print(contents[index], contents[index2])
str = ''
for i, c in enumerate(contents[index]):
if c == contents[index2][i]:
str += c
print(str)
|
from django.shortcuts import render, get_object_or_404, redirect
from django.http import HttpResponse
from .models import *
from django.core.paginator import Paginator
from django.db.models import Q
from django.core.exceptions import ValidationError
class BlogObjectsMixin:
model = None
url = None
paginator = False
context = {}
def get(self, request):
search = request.GET.get('searcher', '')
if search:
objects = self.model.objects.filter(Q(title__icontains=search) | Q(body__icontains=search))
else:
objects = self.model.objects.all()
if self.paginator:
pagin = Paginator(objects, 6)
page = request.GET.get('page', '1')
if int(page) not in pagin.page_range:
page = 1
objects = pagin.get_page(page)
self.context['pagin'] = pagin
self.context[self.model.__name__.lower()] = objects
return render(request, self.url, context=self.context)
class BlogObjectMixin:
model = None
url = None
paginator = False
context = {}
def get(self, request, slug):
obj = get_object_or_404(self.model, slug__iexact=slug)
if self.paginator:
objects = obj.posts.all()
pagin = Paginator(objects, 2)
page = request.GET.get('page', '1')
if int(page) not in pagin.page_range:
page = 1
objects = pagin.get_page(page)
self.context = {
'pagin': pagin,
'objects': objects,
}
self.context.update({
self.model.__name__.lower(): obj,
'admin_option': obj,
})
return render(request, self.url, context=self.context)
class CreateObjectMixin:
form_model = None
url_main = None
url_for_redir = None
def get(self, request):
form = self.form_model()
return render(request, self.url_main, {'form': form})
def post(self, request):
bound_form = self.form_model(request.POST)
if bound_form.is_valid():
new = bound_form.save(commit=False)
new.autor = request.user
new.save()
two = bound_form.save_m2m()
print(two)
return redirect(self.url_for_redir, new.slug)
return render(request, self.url_main, {'form': bound_form})
class EditObjMixin:
form_model = None
url_main = None
url_for_redir = None
def get(self, request, slug):
obj = get_object_or_404(self.form_model.Meta.model, slug__iexact=slug)
form = self.form_model(instance=obj)
return render(request, self.url_main, {'form': form, 'obj': obj})
def post(self, request, slug):
obj = get_object_or_404(self.form_model.Meta.model, slug__iexact=slug)
self.form_model.obj_id = obj.id
print(obj.id)
bound_form = self.form_model(request.POST, instance=obj)
if bound_form.is_valid():
new_obj = bound_form.save()
return redirect(self.url_for_redir, new_obj.slug)
return render(request, self.url_main, {'form': bound_form})
class DelObjectMixin:
model = None
url_main = None
def get(self, request, slug):
obj = get_object_or_404(self.model, slug__iexact=slug)
return render(request, self.url_main, {'obj': obj})
def post(self, request, slug):
obj = get_object_or_404(self.model, slug__iexact=slug)
obj.delete()
if self.model.__name__ == 'Posts':
return redirect('blog_posts_url')
return redirect('blog_tags_url') |
list_of_words = [
"python",
"adventure",
"words",
"banana",
"measure",
"cooing",
"milk",
"wheel",
"illegal",
"wretched",
"spy",
"letter",
"curl",
"haunt",
"trip",
"own",
"bleach",
"flimsy",
"useful",
"unlock",
"sedate",
"double",
"weigh",
"drown",
"follow",
"cheap",
"suspect",
"helpful",
"orange",
"minute",
"perpetual",
"placid",
"fine",
"wave",
"plot",
"deadpan",
"snails",
"jumpy",
"jar"
]
|
#Write a Python program to convert a list of characters into a string
def charToString(character):
print(' '.join(character))
character = ['a','s','d','r','g','f']
charToString(character) |
"""Написать свою реализацию функции filter."""
from typing import Union, Callable
test_list = []
test_tuple = ()
|
from django.db import models
# Create your models here.
class Blog(models.Model):
title = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
author = models.TextField(null=True)
body = models.TextField()
def summary(self):
if len(self.body) > 100: return self.body[:100] + '...'
else: return self.body
def is_authorized(self):
if self.author == '' or self.author == '-1': return "Anonymous"
else: return self.author |
"""
This type stub file was generated by pyright.
"""
import sys
PY2 = sys.version_info[0] == 2
if PY2:
def iteritems(d):
...
def itervalues(d):
...
xrange = xrange
string_types = (unicode, bytes)
def to_str(x, charset=..., errors=...):
...
else:
def iteritems(d):
...
def itervalues(d):
...
xrange = range
string_types = (str, )
def to_str(x, charset=..., errors=...):
...
|
from CallBackOperator import CallBackOperator
from SignalGenerationPackage.Sinus.SinusSignalController import SinusSignalController
from SignalGenerationPackage.UserSignal.UserSignalController import UserSignalController
from SignalGenerationPackage.DynamicPointsDensitySignal.DynamicPointsDensitySignalController import DynamicPointsDensitySignalController
from SignalGenerationPackage.EdgeSignal.EdgeSignalController import EdgeSignalController
from SignalGenerationPackage.ExperimentSchedule.ExperimentScheduleController import ExperimentScheduleController
class SignalTypeOperator(CallBackOperator):
def __init__(self, window, model=None, value_range=None):
super().__init__(window, model, value_range)
def ConnectCallBack(self):
self.window.SignalTypecomboBox.currentIndexChanged.connect(self.StartSignalGeneration)
def StartSignalGeneration(self):
signal_text = self.window.SignalTypecomboBox.currentText()
if signal_text == 'sin':
self.SignalController = SinusSignalController()
elif signal_text == 'user signal':
self.SignalController = UserSignalController()
elif signal_text == 'dynamic points density':
self.SignalController = DynamicPointsDensitySignalController()
elif signal_text == 'edge signal':
self.SignalController = EdgeSignalController()
elif signal_text == 'experiment schedule':
self.SignalController = ExperimentScheduleController()
# TODO: убрать ветвление, вставить словарь
# overridden
def value_changed(self, val):
pass
# overridden
def init_line_edit(self):
pass
# overridden
def init_slider(self):
pass |
from .responses import bucket_response, key_response
url_bases = [
"https?://(?P<bucket_name>[a-zA-Z0-9\-_.]*)\.?s3.amazonaws.com"
]
url_paths = {
'{0}/$': bucket_response,
'{0}/(?P<key_name>[a-zA-Z0-9\-_.]+)': key_response,
}
|
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Generate Blink C++ bindings (.h and .cpp files) for use by Dart:HTML.
If run itself, caches Jinja templates (and creates dummy file for build,
since cache filenames are unpredictable and opaque).
This module is *not* concurrency-safe without care: bytecode caching creates
a race condition on cache *write* (crashes if one process tries to read a
partially-written cache). However, if you pre-cache the templates (by running
the module itself), then you can parallelize compiling individual files, since
cache *reading* is safe.
Input: An object of class IdlDefinitions, containing an IDL interface X
Output: DartX.h and DartX.cpp
Design doc: http://www.chromium.org/developers/design-documents/idl-compiler
"""
import os
import pickle
import re
import sys
# Path handling for libraries and templates
# Paths have to be normalized because Jinja uses the exact template path to
# determine the hash used in the cache filename, and we need a pre-caching step
# to be concurrency-safe. Use absolute path because __file__ is absolute if
# module is imported, and relative if executed directly.
# If paths differ between pre-caching and individual file compilation, the cache
# is regenerated, which causes a race condition and breaks concurrent build,
# since some compile processes will try to read the partially written cache.
module_path, module_filename = os.path.split(os.path.realpath(__file__))
third_party_dir = os.path.normpath(
os.path.join(module_path, os.pardir, os.pardir, os.pardir, os.pardir,
os.pardir))
templates_dir = os.path.normpath(os.path.join(module_path, 'templates'))
# Make sure extension is .py, not .pyc or .pyo, so doesn't depend on caching
module_pyname = os.path.splitext(module_filename)[0] + '.py'
# jinja2 is in chromium's third_party directory.
# Insert at 1 so at front to override system libraries, and
# after path[0] == invoking script dir
sys.path.insert(1, third_party_dir)
# Add the base compiler scripts to the path here as in compiler.py
dart_script_path = os.path.dirname(os.path.abspath(__file__))
script_path = os.path.join(
os.path.dirname(os.path.dirname(dart_script_path)), 'scripts')
sys.path.extend([script_path])
import jinja2
import idl_types
from idl_types import IdlType
from utilities import write_pickle_file
from v8_globals import includes
from dart_utilities import DartUtilities
# TODO(jacobr): remove this hacked together list.
INTERFACES_WITHOUT_RESOLVERS = frozenset([
'TypeConversions', 'GCObservation', 'InternalProfilers',
'InternalRuntimeFlags', 'InternalSettings', 'InternalSettingsGenerated',
'Internals', 'LayerRect', 'LayerRectList', 'MallocStatistics',
'TypeConversions'
])
class CodeGeneratorDart(object):
def __init__(self, interfaces_info, cache_dir):
interfaces_info = interfaces_info or {}
self.interfaces_info = interfaces_info
self.jinja_env = initialize_jinja_env(cache_dir)
# Set global type info
idl_types.set_ancestors(
dict((interface_name, interface_info['ancestors'])
for interface_name, interface_info in interfaces_info.items()
if interface_info['ancestors']))
IdlType.set_callback_interfaces(
set(interface_name
for interface_name, interface_info in interfaces_info.items()
if interface_info['is_callback_interface']))
IdlType.set_implemented_as_interfaces(
dict((interface_name, interface_info['implemented_as'])
for interface_name, interface_info in interfaces_info.items()
if interface_info['implemented_as']))
IdlType.set_garbage_collected_types(
set(interface_name
for interface_name, interface_info in interfaces_info.items()
if 'GarbageCollected' in
interface_info['inherited_extended_attributes']))
def generate_code(self, definitions, interface_name, idl_pickle_filename,
only_if_changed):
"""Returns .h/.cpp code as (header_text, cpp_text)."""
try:
interface = definitions.interfaces[interface_name]
except KeyError:
raise Exception('%s not in IDL definitions' % interface_name)
# Store other interfaces for introspection
interfaces.update(definitions.interfaces)
# Set local type info
IdlType.set_callback_functions(definitions.callback_functions.keys())
IdlType.set_enums((enum.name, enum.values)
for enum in definitions.enumerations.values())
# Select appropriate Jinja template and contents function
if interface.is_callback:
header_template_filename = 'callback_interface_h.template'
cpp_template_filename = 'callback_interface_cpp.template'
generate_contents = dart_callback_interface.generate_callback_interface
else:
header_template_filename = 'interface_h.template'
cpp_template_filename = 'interface_cpp.template'
generate_contents = dart_interface.generate_interface
header_template = self.jinja_env.get_template(header_template_filename)
cpp_template = self.jinja_env.get_template(cpp_template_filename)
# Generate contents (input parameters for Jinja)
template_contents = generate_contents(interface)
template_contents['code_generator'] = module_pyname
# Add includes for interface itself and any dependencies
interface_info = self.interfaces_info[interface_name]
template_contents['header_includes'].add(interface_info['include_path'])
template_contents['header_includes'] = sorted(
template_contents['header_includes'])
includes.update(interface_info.get('dependencies_include_paths', []))
# Remove includes that are not needed for Dart and trigger fatal
# compile warnings if included. These IDL files need to be
# imported by Dart to generate the list of events but the
# associated header files do not contain any code used by Dart.
includes.discard('core/dom/GlobalEventHandlers.h')
includes.discard('core/frame/DOMWindowEventHandlers.h')
template_contents['cpp_includes'] = sorted(includes)
idl_world = {'interface': None, 'callback': None}
# Load the pickle file for this IDL.
if os.path.isfile(idl_pickle_filename):
with open(idl_pickle_filename) as idl_pickle_file:
idl_global_data = pickle.load(idl_pickle_file)
idl_pickle_file.close()
idl_world['interface'] = idl_global_data['interface']
idl_world['callback'] = idl_global_data['callback']
if 'interface_name' in template_contents:
interface_global = {
'name':
template_contents['interface_name'],
'parent_interface':
template_contents['parent_interface'],
'is_active_dom_object':
template_contents['is_active_dom_object'],
'is_event_target':
template_contents['is_event_target'],
'has_resolver':
template_contents['interface_name'] not in
INTERFACES_WITHOUT_RESOLVERS,
'is_node':
template_contents['is_node'],
'conditional_string':
template_contents['conditional_string'],
}
idl_world['interface'] = interface_global
else:
callback_global = {'name': template_contents['cpp_class']}
idl_world['callback'] = callback_global
write_pickle_file(idl_pickle_filename, idl_world, only_if_changed)
# Render Jinja templates
header_text = header_template.render(template_contents)
cpp_text = cpp_template.render(template_contents)
return header_text, cpp_text
# Generates global file for all interfaces.
def generate_globals(self, output_directory):
header_template_filename = 'global_h.template'
cpp_template_filename = 'global_cpp.template'
# Delete the global pickle file we'll rebuild from each pickle generated
# for each IDL file '(%s_globals.pickle) % interface_name'.
global_pickle_filename = os.path.join(output_directory, 'global.pickle')
if os.path.isfile(global_pickle_filename):
os.remove(global_pickle_filename)
# List of all interfaces and callbacks for global code generation.
world = {'interfaces': [], 'callbacks': []}
# Load all pickled data for each interface.
listing = os.listdir(output_directory)
for filename in listing:
if filename.endswith('_globals.pickle'):
idl_filename = os.path.join(output_directory, filename)
with open(idl_filename) as idl_pickle_file:
idl_world = pickle.load(idl_pickle_file)
if 'interface' in idl_world:
# FIXME: Why are some of these None?
if idl_world['interface']:
world['interfaces'].append(idl_world['interface'])
if 'callbacks' in idl_world:
# FIXME: Why are some of these None?
if idl_world['callbacks']:
world['callbacks'].append(idl_world['callback'])
idl_pickle_file.close()
world['interfaces'] = sorted(world['interfaces'],
key=lambda x: x['name'])
world['callbacks'] = sorted(world['callbacks'], key=lambda x: x['name'])
template_contents = world
template_contents['code_generator'] = module_pyname
header_template = self.jinja_env.get_template(header_template_filename)
header_text = header_template.render(template_contents)
cpp_template = self.jinja_env.get_template(cpp_template_filename)
cpp_text = cpp_template.render(template_contents)
return header_text, cpp_text
def initialize_jinja_env(cache_dir):
jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(templates_dir),
# Bytecode cache is not concurrency-safe unless pre-cached:
# if pre-cached this is read-only, but writing creates a race condition.
bytecode_cache=jinja2.FileSystemBytecodeCache(cache_dir),
keep_trailing_newline=True, # newline-terminate generated files
lstrip_blocks=True, # so can indent control flow tags
trim_blocks=True)
jinja_env.filters.update({
'blink_capitalize': DartUtilities.capitalize,
'conditional': conditional_if_endif,
'runtime_enabled': runtime_enabled_if,
})
return jinja_env
# [Conditional]
def conditional_if_endif(code, conditional_string):
# Jinja2 filter to generate if/endif directive blocks
if not conditional_string:
return code
return ('#if %s\n' % conditional_string + code +
'#endif // %s\n' % conditional_string)
# [RuntimeEnabled]
def runtime_enabled_if(code, runtime_enabled_function_name):
if not runtime_enabled_function_name:
return code
# Indent if statement to level of original code
indent = re.match(' *', code).group(0)
return ('%sif (%s())\n' % (indent, runtime_enabled_function_name) +
' %s' % code)
################################################################################
def main(argv):
# If file itself executed, cache templates
try:
cache_dir = argv[1]
dummy_filename = argv[2]
except IndexError as err:
print('Usage: %s OUTPUT_DIR DUMMY_FILENAME' % argv[0])
return 1
# Cache templates
jinja_env = initialize_jinja_env(cache_dir)
template_filenames = [
filename for filename in os.listdir(templates_dir)
# Skip .svn, directories, etc.
if filename.endswith(('.cpp', '.h', '.template'))
]
for template_filename in template_filenames:
jinja_env.get_template(template_filename)
# Create a dummy file as output for the build system,
# since filenames of individual cache files are unpredictable and opaque
# (they are hashes of the template path, which varies based on environment)
with open(dummy_filename, 'w') as dummy_file:
pass # |open| creates or touches the file
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
#-*—coding:utf8-*-
import numpy as np
import gc
import re
import csv
import codecs
from decimal import *
import os
try:
fil_winsize = codecs.open("list.txt", "r", 'utf_8_sig')
# fil6 = codecs.open("channel_ssid_time.csv", "w", 'utf_8_sig')
winsize = csv.reader(fil_winsize)
# write_ssid = csv.writer(fil6)
except Exception:
print "winsize_filelist open failed"
exit()
ratio = 1000
for i in winsize:
i = i[0]
i = i + '/split/'
res = os.listdir(i)
print res
flist = []
for j in res:
if j.find('_winsize') > 0:
j = i + j
flist.append(j)
for k in flist:
print k
wfile = 'new/' + k.replace('/', '_')
rfile = wfile
wfile = wfile.replace('winsize', 'ratio')
print wfile, rfile
# continue
try:
f_tmp = open(k, 'rb')
r_tmp = open(rfile, 'rb')
results = f_tmp.readlines()
wfhandle = open(wfile, 'wb')
write_record = csv.writer(wfhandle)
except Exception:
print f_tmp, wfile, 'open falied'
if f_tmp:
f_tmp.close()
re_results = r_tmp.readlines()
begin_time = re_results[0]
begin_time = int(begin_time)
end_time = re_results[len(re_results) - 2]
end_time = int(end_time)
duration = int(end_time / ratio) - int(begin_time / ratio)
# print begin_time, end_time, duration
# duration = int(duration)
wintimes = [0.0 for a in range(duration + 1000)]
for item in results:
try:
(mac_addr, eth_src, eth_dst, ip_src,
ip_dst, srcport, dstport, sequence, ack_sequence,
windowsize, cal_windowsize, timex,
datalength, flags, kind, length, wscale) = re.split(",", item)
except Exception:
# print item, "sss"
break
try:
timex = int(timex)
except Exception:
continue
timex = timex - begin_time
timex = timex / ratio
try:
wintimes[timex] += 1.0
except Exception:
continue
del results
gc.collect()
res_dic = {}
for i in re_results:
i = int(i)
i = i - begin_time
i = i / ratio
try:
res_dic[i] += 1.0
except Exception:
res_dic[i] = 1.0
# print res_dic
for i in range(0, duration):
tmp = 0
try:
tmp = res_dic[i]
# print tmp, wintimes[i]
wintimes[i] = round(tmp / wintimes[i], 4)
except Exception:
wintimes[i] = 0.0
# print wintimes
tmp1 = int(begin_time / ratio)
for i in range(0, duration + 1):
tmp = (tmp1 + i)
write_record.writerow([tmp, wintimes[i]])
if wfhandle:
wfhandle.close()
# exit()
del winsize
if fil_winsize:
fil_winsize.close()
gc.collect()
|
from django.db import models
from django.utils import timezone
class Msg(models.Model):
name = models.CharField(max_length=200)
title = models.CharField(max_length=200)
text = models.TextField()
date = models.DateTimeField(
default=timezone.now)
def __str__(self):
return self.title
|
import binascii
import unittest
from encoding.base58_check import Base58CheckAddress
from encoding.byte_conversion import to_n_bits
from encoding.cashaddr import AddressType, Cashaddr
class CashaddrTest(unittest.TestCase):
def test_polymod(self):
"""
Polymod should return 0
"""
cashaddr = "bitcoincash:qqjsprfudecxwurfswv0sjvvt8lhxf6zqvapsewce9"
addr = Cashaddr()
payload = addr.lower_prefix_bits() + [0] + addr.reverse_map(cashaddr.split(":")[1])
self.assertEqual(0, addr.poly_mod(payload))
def test_encoding(self):
btc = Base58CheckAddress()
bch = Cashaddr()
ripemd = btc.decode_base58("31nwvkZwyPdgzjBJZXfDmSWsC4ZLKpYyUw")
print("RIPEMD IS {}".format(binascii.hexlify(ripemd)))
bch.hash = ripemd[1:-4]
self.assertEqual("bitcoincash:pqq3728yw0y47sqn6l2na30mcw6zm78dzq5ucqzc37", bch.address_string(AddressType.P2SH))
def test_byte_split(self):
byte_arr = bytes([255, 255])
five_bits = to_n_bits(byte_arr)
self.assertSequenceEqual(five_bits, bytes([31, 31, 31, 16]))
self.assertSequenceEqual(byte_arr, to_n_bits(five_bits, 5, 8)[:-1])
|
from datetime import datetime
import json
from pathlib import Path
import sys
import click
import humanize
from tabulate import tabulate
from tqdm import tqdm
from ai.backend.cli.interaction import ask_yn
from ai.backend.client.config import DEFAULT_CHUNK_SIZE, APIConfig
from ai.backend.client.session import Session
from ..compat import asyncio_run
from ..session import AsyncSession
from .main import main
from .pretty import print_done, print_error, print_fail, print_info, print_wait, print_warn
from .params import ByteSizeParamType, ByteSizeParamCheckType, CommaSeparatedKVListParamType
@main.group()
def vfolder():
"""Set of vfolder operations"""
@vfolder.command()
def list_hosts():
'''List the hosts of virtual folders that is accessible to the current user.'''
with Session() as session:
try:
resp = session.VFolder.list_hosts()
print("Default vfolder host: {}".format(resp['default']))
print("Usable hosts: {}".format(', '.join(resp['allowed'])))
except Exception as e:
print_error(e)
sys.exit(1)
@vfolder.command()
def list_allowed_types():
'''List allowed vfolder types.'''
with Session() as session:
try:
resp = session.VFolder.list_allowed_types()
print(resp)
except Exception as e:
print_error(e)
sys.exit(1)
@vfolder.command()
@click.argument('name', type=str)
@click.argument('host', type=str, default=None)
@click.option('-g', '--group', metavar='GROUP', type=str, default=None,
help='Group ID or NAME. Specify this option if you want to create a group folder.')
@click.option('--unmanaged', 'host_path', type=bool, is_flag=True,
help='Treats HOST as a mount point of unmanaged virtual folder. '
'This option can only be used by Admin or Superadmin.')
@click.option('-m', '--usage-mode', metavar='USAGE_MODE', type=str, default='general',
help='Purpose of the folder. Normal folders are usually set to "general". '
'Available options: "general", "data" (provides data to users), '
'and "model" (provides pre-trained models).')
@click.option('-p', '--permission', metavar='PERMISSION', type=str, default='rw',
help='Folder\'s innate permission. '
'Group folders can be shared as read-only by setting this option to "ro".'
'Invited folders override this setting by its own invitation permission.')
@click.option('-q', '--quota', metavar='QUOTA', type=ByteSizeParamCheckType(), default='0',
help='Quota of the virtual folder. '
'(Use \'m\' for megabytes, \'g\' for gigabytes, and etc.) '
'Default is maximum amount possible.')
@click.option('--cloneable', '--allow-clone', type=bool, is_flag=True,
help='Allows the virtual folder to be cloned by users.')
def create(name, host, group, host_path, usage_mode, permission, quota, cloneable):
'''Create a new virtual folder.
\b
NAME: Name of a virtual folder.
HOST: Name of a virtual folder host in which the virtual folder will be created.
'''
with Session() as session:
try:
if host_path:
result = session.VFolder.create(
name=name,
unmanaged_path=host,
group=group,
usage_mode=usage_mode,
permission=permission,
quota=quota,
cloneable=cloneable,
)
else:
result = session.VFolder.create(
name=name,
host=host,
group=group,
usage_mode=usage_mode,
permission=permission,
quota=quota,
cloneable=cloneable,
)
print('Virtual folder "{0}" is created.'.format(result['name']))
except Exception as e:
print_error(e)
sys.exit(1)
@vfolder.command()
@click.argument('name', type=str)
def delete(name):
'''Delete the given virtual folder. This operation is irreversible!
NAME: Name of a virtual folder.
'''
with Session() as session:
try:
session.VFolder(name).delete()
print_done('Deleted.')
except Exception as e:
print_error(e)
sys.exit(1)
@vfolder.command()
@click.argument('old_name', type=str)
@click.argument('new_name', type=str)
def rename(old_name, new_name):
'''Rename the given virtual folder. This operation is irreversible!
You cannot change the vfolders that are shared by other users,
and the new name must be unique among all your accessible vfolders
including the shared ones.
OLD_NAME: The current name of a virtual folder.
NEW_NAME: The new name of a virtual folder.
'''
with Session() as session:
try:
session.VFolder(old_name).rename(new_name)
print_done('Renamed.')
except Exception as e:
print_error(e)
sys.exit(1)
@vfolder.command()
@click.argument('name', type=str)
def info(name):
'''Show the information of the given virtual folder.
NAME: Name of a virtual folder.
'''
with Session() as session:
try:
result = session.VFolder(name).info()
print('Virtual folder "{0}" (ID: {1})'
.format(result['name'], result['id']))
print('- Owner:', result['is_owner'])
print('- Permission:', result['permission'])
print('- Number of files: {0}'.format(result['numFiles']))
print('- Ownership Type: {0}'.format(result['type']))
print('- Permission:', result['permission'])
print('- Usage Mode: {0}'.format(result.get('usage_mode', '')))
print('- Group ID: {0}'.format(result['group']))
print('- User ID: {0}'.format(result['user']))
print('- Clone Allowed: {0}'.format(result['cloneable']))
except Exception as e:
print_error(e)
sys.exit(1)
@vfolder.command(context_settings={'show_default': True}) # bug: pallets/click#1565 (fixed in 8.0)
@click.argument('name', type=str)
@click.argument('filenames', type=Path, nargs=-1)
@click.option('-b', '--base-dir', type=Path, default=None,
help='Set the parent directory from where the file is uploaded. '
'[default: current working directry]')
@click.option('--chunk-size', type=ByteSizeParamType(),
default=humanize.naturalsize(DEFAULT_CHUNK_SIZE, binary=True, gnu=True),
help='Transfer the file with the given chunk size with binary suffixes (e.g., "16m"). '
'Set this between 8 to 64 megabytes for high-speed disks (e.g., SSD RAID) '
'and networks (e.g., 40 GbE) for the maximum throughput.')
@click.option('--override-storage-proxy',
type=CommaSeparatedKVListParamType(), default=None,
help='Overrides storage proxy address. '
'The value must shape like "X1=Y1,X2=Y2...". '
'Each Yn address must at least include the IP address '
'or the hostname and may include the protocol part and the port number to replace.')
def upload(name, filenames, base_dir, chunk_size, override_storage_proxy):
'''
TUS Upload a file to the virtual folder from the current working directory.
The files with the same names will be overwirtten.
\b
NAME: Name of a virtual folder.
FILENAMES: Paths of the files to be uploaded.
'''
with Session() as session:
try:
session.VFolder(name).upload(
filenames,
basedir=base_dir,
chunk_size=chunk_size,
show_progress=True,
address_map=override_storage_proxy or APIConfig.DEFAULTS['storage_proxy_address_map'],
)
print_done('Done.')
except Exception as e:
print_error(e)
sys.exit(1)
@vfolder.command(context_settings={'show_default': True}) # bug: pallets/click#1565 (fixed in 8.0)
@click.argument('name', type=str)
@click.argument('filenames', type=Path, nargs=-1)
@click.option('-b', '--base-dir', type=Path, default=None,
help='Set the parent directory from where the file is uploaded. '
'[default: current working directry]')
@click.option('--chunk-size', type=ByteSizeParamType(),
default=humanize.naturalsize(DEFAULT_CHUNK_SIZE, binary=True, gnu=True),
help='Transfer the file with the given chunk size with binary suffixes (e.g., "16m"). '
'Set this between 8 to 64 megabytes for high-speed disks (e.g., SSD RAID) '
'and networks (e.g., 40 GbE) for the maximum throughput.')
@click.option('--override-storage-proxy',
type=CommaSeparatedKVListParamType(), default=None,
help='Overrides storage proxy address. '
'The value must shape like "X1=Y1,X2=Y2...". '
'Each Yn address must at least include the IP address '
'or the hostname and may include the protocol part and the port number to replace.')
def download(name, filenames, base_dir, chunk_size, override_storage_proxy):
'''
Download a file from the virtual folder to the current working directory.
The files with the same names will be overwirtten.
\b
NAME: Name of a virtual folder.
FILENAMES: Paths of the files to be downloaded inside a vfolder.
'''
with Session() as session:
try:
session.VFolder(name).download(
filenames,
basedir=base_dir,
chunk_size=chunk_size,
show_progress=True,
address_map=override_storage_proxy or APIConfig.DEFAULTS['storage_proxy_address_map'],
)
print_done('Done.')
except Exception as e:
print_error(e)
sys.exit(1)
@vfolder.command()
@click.argument('name', type=str)
@click.argument('filename', type=Path)
def request_download(name, filename):
'''
Request JWT-formated download token for later use.
\b
NAME: Name of a virtual folder.
FILENAME: Path of the file to be downloaded.
'''
with Session() as session:
try:
response = json.loads(session.VFolder(name).request_download(filename))
print_done(f'Download token: {response["token"]}')
except Exception as e:
print_error(e)
sys.exit(1)
@vfolder.command()
@click.argument('filenames', nargs=-1)
def cp(filenames):
'''An scp-like shortcut for download/upload commands.
FILENAMES: Paths of the files to operate on. The last one is the target while all
others are the sources. Either source paths or the target path should
be prefixed with "<vfolder-name>:" like when using the Linux scp
command to indicate if it is a remote path.
'''
raise NotImplementedError
@vfolder.command()
@click.argument('name', type=str)
@click.argument('path', type=str)
def mkdir(name, path):
'''Create an empty directory in the virtual folder.
\b
NAME: Name of a virtual folder.
PATH: The name or path of directory. Parent directories are created automatically
if they do not exist.
'''
with Session() as session:
try:
session.VFolder(name).mkdir(path)
print_done('Done.')
except Exception as e:
print_error(e)
sys.exit(1)
@vfolder.command()
@click.argument('name', type=str)
@click.argument('target_path', type=str)
@click.argument('new_name', type=str)
def rename_file(name, target_path, new_name):
'''
Rename a file or a directory in a virtual folder.
\b
NAME: Name of a virtual folder.
TARGET_PATH: The target path inside a virtual folder (file or directory).
NEW_NAME: New name of the target (should not contain slash).
'''
with Session() as session:
try:
session.VFolder(name).rename_file(target_path, new_name)
print_done('Renamed.')
except Exception as e:
print_error(e)
sys.exit(1)
@vfolder.command()
@click.argument('name', type=str)
@click.argument('src', type=str)
@click.argument('dst', type=str)
def mv(name, src, dst):
'''
Move a file or a directory within a virtual folder.
If the destination is a file and already exists, it will be overwritten.
If the destination is a directory, the source file or directory
is moved inside it.
\b
NAME: Name of a virtual folder.
SRC: The relative path of the source file or directory inside a virtual folder
DST: The relative path of the destination file or directory inside a virtual folder.
'''
with Session() as session:
try:
session.VFolder(name).move_file(src, dst)
print_done('Moved.')
except Exception as e:
print_error(e)
sys.exit(1)
@vfolder.command(aliases=['delete-file'])
@click.argument('name', type=str)
@click.argument('filenames', nargs=-1)
@click.option('-r', '--recursive', is_flag=True,
help='Enable recursive deletion of directories.')
def rm(name, filenames, recursive):
'''
Delete files in a virtual folder.
If one of the given paths is a directory and the recursive option is enabled,
all its content and the directory itself are recursively deleted.
This operation is irreversible!
\b
NAME: Name of a virtual folder.
FILENAMES: Paths of the files to delete.
'''
with Session() as session:
try:
if not ask_yn():
print_info('Cancelled')
sys.exit(1)
session.VFolder(name).delete_files(
filenames,
recursive=recursive)
print_done('Done.')
except Exception as e:
print_error(e)
sys.exit(1)
@vfolder.command()
@click.argument('name', type=str)
@click.argument('path', metavar='PATH', nargs=1, default='.')
def ls(name, path):
"""
List files in a path of a virtual folder.
\b
NAME: Name of a virtual folder.
PATH: Path inside vfolder.
"""
with Session() as session:
try:
print_wait('Retrieving list of files in "{}"...'.format(path))
result = session.VFolder(name).list_files(path)
if 'error_msg' in result and result['error_msg']:
print_fail(result['error_msg'])
return
files = json.loads(result['files'])
table = []
headers = ['file name', 'size', 'modified', 'mode']
for file in files:
mdt = datetime.fromtimestamp(file['mtime'])
mtime = mdt.strftime('%b %d %Y %H:%M:%S')
row = [file['filename'], file['size'], mtime, file['mode']]
table.append(row)
print_done('Retrived.')
print(tabulate(table, headers=headers))
except Exception as e:
print_error(e)
@vfolder.command()
@click.argument('name', type=str)
@click.argument('emails', type=str, nargs=-1, required=True)
@click.option('-p', '--perm', metavar='PERMISSION', type=str, default='rw',
help='Permission to give. "ro" (read-only) / "rw" (read-write) / "wd" (write-delete).')
def invite(name, emails, perm):
"""Invite other users to access a user-type virtual folder.
\b
NAME: Name of a virtual folder.
EMAILS: Emails to invite.
"""
with Session() as session:
try:
assert perm in ['rw', 'ro', 'wd'], 'Invalid permission: {}'.format(perm)
result = session.VFolder(name).invite(perm, emails)
invited_ids = result.get('invited_ids', [])
if len(invited_ids) > 0:
print('Invitation sent to:')
for invitee in invited_ids:
print('\t- ' + invitee)
else:
print('No users found. Invitation was not sent.')
except Exception as e:
print_error(e)
sys.exit(1)
@vfolder.command()
def invitations():
"""List and manage received invitations.
"""
with Session() as session:
try:
result = session.VFolder.invitations()
invitations = result.get('invitations', [])
if len(invitations) < 1:
print('No invitations.')
return
print('List of invitations (inviter, vfolder id, permission):')
for cnt, inv in enumerate(invitations):
if inv['perm'] == 'rw':
perm = 'read-write'
elif inv['perm'] == 'ro':
perm = 'read-only'
else:
perm = inv['perm']
print('[{}] {}, {}, {}'.format(cnt + 1, inv['inviter'],
inv['vfolder_id'], perm))
selection = input('Choose invitation number to manage: ')
if selection.isdigit():
selection = int(selection) - 1
else:
return
if 0 <= selection < len(invitations):
while True:
action = input('Choose action. (a)ccept, (r)eject, (c)ancel: ')
if action.lower() == 'a':
session.VFolder.accept_invitation(invitations[selection]['id'])
msg = (
'You can now access vfolder {} ({})'.format(
invitations[selection]['vfolder_name'],
invitations[selection]['id'],
)
)
print(msg)
break
elif action.lower() == 'r':
session.VFolder.delete_invitation(invitations[selection]['id'])
msg = (
'vfolder invitation rejected: {} ({})'.format(
invitations[selection]['vfolder_name'],
invitations[selection]['id'],
)
)
print(msg)
break
elif action.lower() == 'c':
break
except Exception as e:
print_error(e)
sys.exit(1)
@vfolder.command()
@click.argument('name', type=str)
@click.argument('emails', type=str, nargs=-1, required=True)
@click.option('-p', '--perm', metavar='PERMISSION', type=str, default='rw',
help='Permission to give. "ro" (read-only) / "rw" (read-write) / "wd" (write-delete).')
def share(name, emails, perm):
"""Share a group folder to users with overriding permission.
\b
NAME: Name of a (group-type) virtual folder.
EMAILS: Emails to share.
"""
with Session() as session:
try:
assert perm in ['rw', 'ro', 'wd'], 'Invalid permission: {}'.format(perm)
result = session.VFolder(name).share(perm, emails)
shared_emails = result.get('shared_emails', [])
if len(shared_emails) > 0:
print('Shared with {} permission to:'.format(perm))
for _email in shared_emails:
print('\t- ' + _email)
else:
print('No users found. Folder is not shared.')
except Exception as e:
print_error(e)
sys.exit(1)
@vfolder.command()
@click.argument('name', type=str)
@click.argument('emails', type=str, nargs=-1, required=True)
def unshare(name, emails):
"""Unshare a group folder from users.
\b
NAME: Name of a (group-type) virtual folder.
EMAILS: Emails to share.
"""
with Session() as session:
try:
result = session.VFolder(name).unshare(emails)
unshared_emails = result.get('unshared_emails', [])
if len(unshared_emails) > 0:
print('Unshared from:')
for _email in unshared_emails:
print('\t- ' + _email)
else:
print('No users found. Folder is not unshared.')
except Exception as e:
print_error(e)
sys.exit(1)
@vfolder.command()
@click.argument('name', type=str)
def leave(name):
'''Leave the shared virutal folder.
NAME: Name of a virtual folder
'''
with Session() as session:
try:
vfolder_info = session.VFolder(name).info()
if vfolder_info['type'] == 'group':
print('You cannot leave a group virtual folder.')
return
if vfolder_info['is_owner']:
print('You cannot leave a virtual folder you own. Consider using delete instead.')
return
session.VFolder(name).leave()
print('Left the shared virtual folder "{}".'.format(name))
except Exception as e:
print_error(e)
sys.exit(1)
@vfolder.command()
@click.argument('name', type=str)
@click.argument('target_name', type=str)
@click.argument('target_host', type=str)
@click.option('-m', '--usage-mode', metavar='USAGE_MODE', type=str, default='general',
help='Purpose of the cloned virtual folder. '
'Default value is \'general\'.')
@click.option('-p', '--permission', metavar='PERMISSION', type=str, default='rw',
help='Cloned virtual folder\'s permission. '
'Default value is \'rw\'.')
def clone(name, target_name, target_host, usage_mode, permission):
"""Clone a virtual folder.
\b
NAME: Name of the virtual folder to clone from.
TARGET_NAME: Name of the virtual folder to clone to.
TARGET_HOST: Name of a virtual folder host to which the virtual folder will be cloned.
"""
with Session() as session:
try:
vfolder_info = session.VFolder(name).info()
if not vfolder_info['cloneable']:
print("Clone is not allowed for this virtual folder. "
"Please update the 'cloneable' option.")
return
result = session.VFolder(name).clone(
target_name,
target_host=target_host,
usage_mode=usage_mode,
permission=permission,
)
bgtask_id = result.get('bgtask_id')
except Exception as e:
print_error(e)
sys.exit(1)
async def clone_vfolder_tracker(bgtask_id):
print_wait(
"Cloning the vfolder... "
"(This may take a while depending on its size and number of files!)",
)
async with AsyncSession() as session:
try:
bgtask = session.BackgroundTask(bgtask_id)
completion_msg_func = lambda: print_done("Cloning the vfolder is complete.")
async with bgtask.listen_events() as response:
# TODO: get the unit of progress from response
with tqdm(unit='bytes', disable=True) as pbar:
async for ev in response:
data = json.loads(ev.data)
if ev.event == 'bgtask_updated':
pbar.total = data['total_progress']
pbar.write(data['message'])
pbar.update(data['current_progress'] - pbar.n)
elif ev.event == 'bgtask_failed':
error_msg = data['message']
completion_msg_func = \
lambda: print_fail(
f"Error during the operation: {error_msg}",
)
elif ev.event == 'bgtask_cancelled':
completion_msg_func = \
lambda: print_warn(
"The operation has been cancelled in the middle. "
"(This may be due to server shutdown.)",
)
finally:
completion_msg_func()
if bgtask_id is None:
print_done("Cloning the vfolder is complete.")
else:
asyncio_run(clone_vfolder_tracker(bgtask_id))
@vfolder.command()
@click.argument('name', type=str)
@click.option('-p', '--permission', type=str, metavar='PERMISSION',
help="Folder's innate permission.")
@click.option('--set-cloneable', type=bool, metavar='BOOLEXPR',
help="A boolean-interpretable string whether a virtual folder can be cloned. "
"If not set, the cloneable property is not changed.")
def update_options(name, permission, set_cloneable):
"""Update an existing virtual folder.
\b
NAME: Name of the virtual folder to update.
"""
with Session() as session:
try:
vfolder_info = session.VFolder(name).info()
if not vfolder_info['is_owner']:
print("You cannot update virtual folder that you do not own.")
return
session.VFolder(name).update_options(
name,
permission=permission,
cloneable=set_cloneable,
)
print_done("Updated.")
except Exception as e:
print_error(e)
sys.exit(1)
|
import pytest
class Test_a():
def setup(self):
print("--setup---")
def setup_class(self):
print("--1setup_class--")
def teardown(self):
print("---teardown---")
def teardown_class(self):
print("--1teardown_class--")
def test_001(self):
assert True
def test_002(self):
assert False
def ttest_003(self):
assert True
class Test_b():
def setup(self):
pass
def teardown(self):
pass
def test_004(self):
assert True
def test_005(self):
assert True
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 29 17:56:44 2019
@author: KelvinOX25
"""
import time
import numpy as np
import matplotlib.pyplot as plt
from qcodes.instrument_drivers.tektronix.AWG3252_Isrc import AWG3252_Isrc
from qcodes.instrument_drivers.HP.HP34401 import HP34401
from qcodes.instrument.base import Instrument
try:
Instrument.close_all()
except KeyError:
pass
except NameError:
pass
Isrc = AWG3252_Isrc('gen', 'TCPIP0::192.168.13.32::inst0::INSTR', R_bias = 1e9)
Vmeter = HP34401('meter', 'GPIB0::8::INSTR')
Vmeter.init('fast 6')
I_setpt = np.linspace(0, 4E-10,101)
V_rdg = []
for i in I_setpt:
Isrc.I.set(i)
time.sleep(0.050)
V_rdg.append(Vmeter.v.get())
fig, ax = plt.subplots()
ax.plot(I_setpt, V_rdg, '.')
#slopeN, interceptN, r_valueN, p_valueN, std_errN = stats.linregress() |
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Qotto, 2019
import os
import pytest
import uvloop
from kafka.client import KafkaClient as PyKafkaClient
from kafka.cluster import ClusterMetadata
# StoreRecord import
from tonga.models.store.store_record import StoreRecord
from tonga.models.store.store_record_handler import StoreRecordHandler
# PersistencyType import
from tonga.models.structs.persistency_type import PersistencyType
# Tonga Kafka client
from tonga.services.coordinator.client.kafka_client import KafkaClient
# Serializer
from tonga.services.serializer.avro import AvroSerializer
from tonga.stores.global_store import GlobalStore
# Local & global store import
from tonga.stores.local_store import LocalStore
# KafkaStoreManager import
from tonga.stores.manager.kafka_store_manager import KafkaStoreManager
# Persistency import
from tonga.stores.persistency.memory import MemoryPersistency
from tonga.stores.persistency.rocksdb import RocksDBPersistency
from tonga.stores.persistency.shelve import ShelvePersistency
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
t_loop = uvloop.new_event_loop()
# Create persistency test
test_memory_persistency = MemoryPersistency()
test_memory_persistency.__getattribute__('_set_initialize').__call__()
test_shelve_persistency = ShelvePersistency(os.path.join(os.path.dirname(os.path.abspath(__file__)),
'local_store.db'))
test_shelve_persistency.__getattribute__('_set_initialize').__call__()
test_rocksdb_persistency = RocksDBPersistency(os.path.join(os.path.dirname(os.path.abspath(__file__)),
'local_store'))
test_rocksdb_persistency.__getattribute__('_set_initialize').__call__()
# Local Store & global store with memory persistency test
test_local_store_memory_persistency = LocalStore(db_type=PersistencyType.MEMORY, loop=t_loop)
test_global_store_memory_persistency = GlobalStore(db_type=PersistencyType.MEMORY)
# Avro Serializer test
test_serializer = AvroSerializer(BASE_DIR + '/misc/schemas')
test_serializer_local_store_memory_persistency = LocalStore(db_type=PersistencyType.MEMORY, loop=t_loop)
test_serializer_global_store_memory_persistency = GlobalStore(db_type=PersistencyType.MEMORY)
# StoreBuilder test
store_builder_serializer = AvroSerializer(BASE_DIR + '/misc/schemas')
# tonga_kafka_client = KafkaClient(client_id='waiter', cur_instance=0, nb_replica=1, bootstrap_servers='localhost:9092')
# test_store_manager = KafkaStoreManager(client=tonga_kafka_client, topic_store='test-store',
# persistency_type=PersistencyType.MEMORY, serializer=store_builder_serializer,
# loop=t_loop, rebuild=True)
test_store_manager = None
store_record_handler = StoreRecordHandler(test_store_manager)
store_builder_serializer.register_event_handler_store_record(StoreRecord, store_record_handler)
@pytest.yield_fixture()
def event_loop():
loop = t_loop
yield loop
@pytest.fixture
def get_local_memory_store_connection():
return test_local_store_memory_persistency
@pytest.fixture
def get_global_memory_store_connection():
return test_global_store_memory_persistency
@pytest.fixture
def get_avro_serializer():
return test_serializer
@pytest.fixture
def get_avro_serializer_store():
return test_serializer_local_store_memory_persistency, test_serializer_global_store_memory_persistency
@pytest.fixture
def get_assignor_kafka_client():
return assignor_py_kafka_client
@pytest.fixture
def get_assignor_cluster_metadata():
return assignor_cluster_metadata
@pytest.fixture
def get_store_manager():
return test_store_manager
|
import sys
sys.path.insert(0, '/home/jesperes/dev/libstdc++-v3/python')
import libstdcxx.v6.printers
libstdcxx.v6.printers.register_libstdcxx_printers(None)
|
print(0.1 + 0.2) #0.30000000000000004 -> 오차가 붙기 때문
print(0.1 + 0.2 == 0.3) #False
import decimal
a = decimal.Decimal("0.1")
b = decimal.Decimal("0.2")
print(a) #0.1
print(b) #0.2
print(a + b) #0.3 -> 정확하게 연산을 하니 0.3이 나옴
#분수 표현 클래스
import fractions
a = fractions.Fraction(3, 10) #분자 분모
b = fractions.Fraction(-2, 20)
print(a) #3/10
print(b) #-1/10
print(a + b) #1/5
print(a + 1) #13/10
print(a - 2) #-17/10 |
"""
some utilities to work with xarray objects
"""
import numpy as np
import xarray as xr
def strip_coords(X, coords=None, inplace=False, as_str=True):
"""
strip blanks from string coordinates
Parameters
----------
X : DataArray or Dataset
coords : iterable (Default None)
Iterable of coordinates to alter.
If `None`, apply to all (string) coordinates
inplace : bool (Default False):
if True, do inplace modification
as_str : bool, default=True
if `True`, apply .astype(str) to output.
This helps if input has b'foo' types (which are annoying to work with)
"""
if inplace:
out = X
else:
out = X.copy()
if coords is None:
coords = out.coords.keys()
for k in coords:
if out[k].dtype.kind == 'S':
o = np.char.strip(out[k])
if as_str:
o = o.astype(str)
out[k] = o
if not inplace:
return out
def where(self, condition, *args, **kwargs):
"""
perform inplace where
Parameters
----------
self : dataset or datarray
must have `where`` method
condition: mask or function
condition to apply
*args, **kwargs: arguments to self.where
Returns
-------
output : self.where(condition, *args, **kwargs)
Usage
-----
self.pipe(where, lambda x: x > 0.0)
"""
if not hasattr(self, 'where'):
raise AttributeError('self must have `where` method')
if callable(condition):
return self.where(condition(self), *args, **kwargs)
else:
return self.where(condition, *args, **kwargs)
def average(x, w=None,
dim=None, axis=None,
var=False, unbiased=True, std=False,
name=None,
mask_null=True):
"""
(weighted) average of DataArray
Parameters
----------
x : xarray.DataArray
array to average over
w : xarray.DataArray, optional
array of weights
dim : str or list of strings, optional
dimensions to average over. See `xarray.DataArray.sum`
axis : int or list of ints, optional
axis to average over. See `xarray.DataArray.sum`
var : bool, default=False
If `True`, calculate weighted variance as well
std : bool, default=False
If `True`, return standard deviation, i.e., `sqrt(var)`
unbiased : bool, default=True
If `True`, return unbiased variance
name : str, optional
if supplied, name of output average. Variance is named 'name_var' or 'name_std'
mask_null : bool, default=True
if `True`, mask values where x and w are all null across `dim` or `axis`.
This prevents zero results from nan sums.
Returns
-------
average : xarray.DataArray
averaged data
err : xarray.DataArray, optional
weighted variance if `var==True` or standard deviation if `std==True`.
"""
assert type(x) is xr.DataArray
if w is None:
w = xr.ones_like(x)
assert type(w) is xr.DataArray
# only consider weights with finite x
# note that this will reshape w to same shape as x as well
w = w.where(np.isfinite(x))
# scale w
w = w / w.sum(dim=dim, axis=axis)
# output names
if name:
var_name = name + ('_std' if std else '_var')
else:
var_name = None
# mean
m1 = (w * x).sum(dim=dim, axis=axis)
if mask_null:
msk = (~x.isnull().all(dim=dim, axis=axis)) & (~w.isnull().all(dim=dim, axis=axis))
m1 = m1.where(msk)
# variance
if var or std:
m2 = (w * (x - m1)**2).sum(dim=dim, axis=axis)
if unbiased:
w1 = 1.0
w2 = (w * w).sum(dim=dim, axis=axis)
m2 *= w1 * w1 / (w1 * w1 - w2)
if std:
m2 = np.sqrt(m2)
if mask_null:
m2 = m2.where(msk)
return m1.rename(name), m2.rename(var_name)
else:
return m1.rename(name)
|
"""
需求:小猫爱吃鱼,小猫爱喝水
"""
class Cat:
def eat(self):
print("小猫吃鱼")
def drink(self):
print("小猫喝水")
# 创建对象
tom = Cat()
# 使用 .属性名 利用赋值语句就可以
tom.name = 'tom'
tom.eat()
tom.drink()
# print(tom)
# print("%x" % id(tom)) # %x 16进制
print('-'*30)
# 创建另一个对象
lazy_cat = Cat()
lazy_cat.age = 12
lazy_cat.eat()
lazy_cat.drink()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Unit test module.
Unit Tests in this module will often compare size and offset between the
libclang version and the ctypeslib-processed python version the types.
Because the objective of this framework is not to verify if libclang or the
python bindings work, there will be no testing of specific results of libclang.
E.g., ig libclang says a long is 4 bytes, we trust libclang.
"""
__author__ = "Loic Jaquemet"
__copyright__ = "Copyright (C) 2013 Loic Jaquemet"
__email__ = "loic.jaquemet+python@gmail.com"
__license__ = "GPL"
__maintainer__ = "Loic Jaquemet"
__status__ = "Production"
import sys
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
def alltests():
ret = unittest.TestLoader().discover('test/')
return ret
if __name__ == '__main__':
unittest.main(verbosity=0)
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 23 03:29:24 2019
@author: Parth Bhandari
"""
import cv2
import numpy as np
from sklearn.externals import joblib
from keras.preprocessing import image
dic = {1 : 'a', 2 : 'b', 3 : 'c', 4 : 'd',
5 : 'e', 6 : 'f', 7 : 'g', 8 : 'h',
9 : 'i', 10 : 'j', 11 : 'k', 12 : 'l',
13 : 'm', 14 : 'n', 15 : 'o', 16 : 'p',
17 : 'q', 18 : 'r', 19 : 's', 20 : 't',
21 : 'u', 22 : 'v', 23 : 'w', 24 : 'x',
25 : 'y', 26 : 'z', 27 : '0', 28 : '1',
29 : '2', 30 : '3', 31 : '4', 32 : '5',
33 : '6', 34 : '7', 35 : '8', 36 : '9',
37 : 'A', 38 : 'B', 39 : 'C', 40 : 'D',
41 : 'E', 42 : 'F', 43 : 'G', 44 : 'H',
45 : 'I', 46 : 'J', 47 : 'K', 48 : 'L',
49 : 'M', 50 : 'N', 51 : 'O', 52 : 'P',
53 : 'Q', 54 : 'R', 55 : 'S', 56 : 'T',
57 : 'U', 58 : 'V', 59 : 'W', 60 : 'X',
61 : 'Y', 62 : 'Z'}
joblib_file = "minor2.pkl"
joblib_model = joblib.load(joblib_file)
Image=cv2.imread("abc.jpg")
G_Image=cv2.cvtColor(Image,cv2.COLOR_RGB2GRAY)
#Otsu Thresholding
blur = cv2.GaussianBlur(G_Image,(1,1),0)
ret,th = cv2.threshold(blur,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
image1,contours,hierarchy = cv2.findContours(th,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
counter=0
dim = (64, 64)
for contour in contours:
[x, y, w, h] = cv2.boundingRect(contour)
if w/h > 2 or h>25 or h<5:
continue
try:
resized = cv2.resize(Image[y-8:y + h+8, x-4:x + w+4], dim, interpolation=cv2.INTER_AREA)
z = image.img_to_array(resized)
z = np.expand_dims(z, axis=0)
classes = joblib_model.predict_classes(z)
if classes[0]==0:
resized = cv2.resize(Image[y - 8:y + h + 8, x - 4:x + w + 4], dim, interpolation=cv2.INTER_CUBIC)
z = image.img_to_array(resized)
z = np.expand_dims(z, axis=0)
classes = joblib_model.predict_classes(z)
print(dic.get(classes[0] + 1))
cv2.imwrite("save/" + str(counter) + '.jpg', resized)
else:
print(dic.get(classes[0]+1))
cv2.imwrite("save/"+str(counter)+'.jpg', resized)
if classes[0]>0:
cv2.putText(Image, dic.get(classes[0]+1), (x-2, y+h+20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, 255, thickness=2)
counter+=1
except Exception as e:
print(str(e))
cv2.imshow('image',Image)
k = cv2.waitKey(0)
if k == 27:
cv2.destroyAllWindows() |
import re
from collections import OrderedDict
from functools import partial
from typing import Any, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from torch import Tensor
from ..transforms._presets import ImageClassification
from ..utils import _log_api_usage_once
from ._api import register_model, Weights, WeightsEnum
from ._meta import _IMAGENET_CATEGORIES
from ._utils import _ovewrite_named_param, handle_legacy_interface
__all__ = [
"DenseNet",
"DenseNet121_Weights",
"DenseNet161_Weights",
"DenseNet169_Weights",
"DenseNet201_Weights",
"densenet121",
"densenet161",
"densenet169",
"densenet201",
]
class _DenseLayer(nn.Module):
def __init__(
self, num_input_features: int, growth_rate: int, bn_size: int, drop_rate: float, memory_efficient: bool = False
) -> None:
super().__init__()
self.norm1 = nn.BatchNorm2d(num_input_features)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(num_input_features, bn_size * growth_rate, kernel_size=1, stride=1, bias=False)
self.norm2 = nn.BatchNorm2d(bn_size * growth_rate)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(bn_size * growth_rate, growth_rate, kernel_size=3, stride=1, padding=1, bias=False)
self.drop_rate = float(drop_rate)
self.memory_efficient = memory_efficient
def bn_function(self, inputs: List[Tensor]) -> Tensor:
concated_features = torch.cat(inputs, 1)
bottleneck_output = self.conv1(self.relu1(self.norm1(concated_features))) # noqa: T484
return bottleneck_output
# todo: rewrite when torchscript supports any
def any_requires_grad(self, input: List[Tensor]) -> bool:
for tensor in input:
if tensor.requires_grad:
return True
return False
@torch.jit.unused # noqa: T484
def call_checkpoint_bottleneck(self, input: List[Tensor]) -> Tensor:
def closure(*inputs):
return self.bn_function(inputs)
return cp.checkpoint(closure, *input)
@torch.jit._overload_method # noqa: F811
def forward(self, input: List[Tensor]) -> Tensor: # noqa: F811
pass
@torch.jit._overload_method # noqa: F811
def forward(self, input: Tensor) -> Tensor: # noqa: F811
pass
# torchscript does not yet support *args, so we overload method
# allowing it to take either a List[Tensor] or single Tensor
def forward(self, input: Tensor) -> Tensor: # noqa: F811
if isinstance(input, Tensor):
prev_features = [input]
else:
prev_features = input
if self.memory_efficient and self.any_requires_grad(prev_features):
if torch.jit.is_scripting():
raise Exception("Memory Efficient not supported in JIT")
bottleneck_output = self.call_checkpoint_bottleneck(prev_features)
else:
bottleneck_output = self.bn_function(prev_features)
new_features = self.conv2(self.relu2(self.norm2(bottleneck_output)))
if self.drop_rate > 0:
new_features = F.dropout(new_features, p=self.drop_rate, training=self.training)
return new_features
class _DenseBlock(nn.ModuleDict):
_version = 2
def __init__(
self,
num_layers: int,
num_input_features: int,
bn_size: int,
growth_rate: int,
drop_rate: float,
memory_efficient: bool = False,
) -> None:
super().__init__()
for i in range(num_layers):
layer = _DenseLayer(
num_input_features + i * growth_rate,
growth_rate=growth_rate,
bn_size=bn_size,
drop_rate=drop_rate,
memory_efficient=memory_efficient,
)
self.add_module("denselayer%d" % (i + 1), layer)
def forward(self, init_features: Tensor) -> Tensor:
features = [init_features]
for name, layer in self.items():
new_features = layer(features)
features.append(new_features)
return torch.cat(features, 1)
class _Transition(nn.Sequential):
def __init__(self, num_input_features: int, num_output_features: int) -> None:
super().__init__()
self.norm = nn.BatchNorm2d(num_input_features)
self.relu = nn.ReLU(inplace=True)
self.conv = nn.Conv2d(num_input_features, num_output_features, kernel_size=1, stride=1, bias=False)
self.pool = nn.AvgPool2d(kernel_size=2, stride=2)
class DenseNet(nn.Module):
r"""Densenet-BC model class, based on
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.
Args:
growth_rate (int) - how many filters to add each layer (`k` in paper)
block_config (list of 4 ints) - how many layers in each pooling block
num_init_features (int) - the number of filters to learn in the first convolution layer
bn_size (int) - multiplicative factor for number of bottle neck layers
(i.e. bn_size * k features in the bottleneck layer)
drop_rate (float) - dropout rate after each dense layer
num_classes (int) - number of classification classes
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_.
"""
def __init__(
self,
growth_rate: int = 32,
block_config: Tuple[int, int, int, int] = (6, 12, 24, 16),
num_init_features: int = 64,
bn_size: int = 4,
drop_rate: float = 0,
num_classes: int = 1000,
memory_efficient: bool = False,
) -> None:
super().__init__()
_log_api_usage_once(self)
# First convolution
self.features = nn.Sequential(
OrderedDict(
[
("conv0", nn.Conv2d(3, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)),
("norm0", nn.BatchNorm2d(num_init_features)),
("relu0", nn.ReLU(inplace=True)),
("pool0", nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
]
)
)
# Each denseblock
num_features = num_init_features
for i, num_layers in enumerate(block_config):
block = _DenseBlock(
num_layers=num_layers,
num_input_features=num_features,
bn_size=bn_size,
growth_rate=growth_rate,
drop_rate=drop_rate,
memory_efficient=memory_efficient,
)
self.features.add_module("denseblock%d" % (i + 1), block)
num_features = num_features + num_layers * growth_rate
if i != len(block_config) - 1:
trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
self.features.add_module("transition%d" % (i + 1), trans)
num_features = num_features // 2
# Final batch norm
self.features.add_module("norm5", nn.BatchNorm2d(num_features))
# Linear layer
self.classifier = nn.Linear(num_features, num_classes)
# Official init from torch repo.
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
def forward(self, x: Tensor) -> Tensor:
features = self.features(x)
out = F.relu(features, inplace=True)
out = F.adaptive_avg_pool2d(out, (1, 1))
out = torch.flatten(out, 1)
out = self.classifier(out)
return out
def _load_state_dict(model: nn.Module, weights: WeightsEnum, progress: bool) -> None:
# '.'s are no longer allowed in module names, but previous _DenseLayer
# has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'.
# They are also in the checkpoints in model_urls. This pattern is used
# to find such keys.
pattern = re.compile(
r"^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$"
)
state_dict = weights.get_state_dict(progress=progress, check_hash=True)
for key in list(state_dict.keys()):
res = pattern.match(key)
if res:
new_key = res.group(1) + res.group(2)
state_dict[new_key] = state_dict[key]
del state_dict[key]
model.load_state_dict(state_dict)
def _densenet(
growth_rate: int,
block_config: Tuple[int, int, int, int],
num_init_features: int,
weights: Optional[WeightsEnum],
progress: bool,
**kwargs: Any,
) -> DenseNet:
if weights is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
model = DenseNet(growth_rate, block_config, num_init_features, **kwargs)
if weights is not None:
_load_state_dict(model=model, weights=weights, progress=progress)
return model
_COMMON_META = {
"min_size": (29, 29),
"categories": _IMAGENET_CATEGORIES,
"recipe": "https://github.com/pytorch/vision/pull/116",
"_docs": """These weights are ported from LuaTorch.""",
}
class DenseNet121_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/densenet121-a639ec97.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 7978856,
"_metrics": {
"ImageNet-1K": {
"acc@1": 74.434,
"acc@5": 91.972,
}
},
"_ops": 2.834,
"_file_size": 30.845,
},
)
DEFAULT = IMAGENET1K_V1
class DenseNet161_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/densenet161-8d451a50.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 28681000,
"_metrics": {
"ImageNet-1K": {
"acc@1": 77.138,
"acc@5": 93.560,
}
},
"_ops": 7.728,
"_file_size": 110.369,
},
)
DEFAULT = IMAGENET1K_V1
class DenseNet169_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/densenet169-b2777c0a.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 14149480,
"_metrics": {
"ImageNet-1K": {
"acc@1": 75.600,
"acc@5": 92.806,
}
},
"_ops": 3.36,
"_file_size": 54.708,
},
)
DEFAULT = IMAGENET1K_V1
class DenseNet201_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/densenet201-c1103571.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 20013928,
"_metrics": {
"ImageNet-1K": {
"acc@1": 76.896,
"acc@5": 93.370,
}
},
"_ops": 4.291,
"_file_size": 77.373,
},
)
DEFAULT = IMAGENET1K_V1
@register_model()
@handle_legacy_interface(weights=("pretrained", DenseNet121_Weights.IMAGENET1K_V1))
def densenet121(*, weights: Optional[DenseNet121_Weights] = None, progress: bool = True, **kwargs: Any) -> DenseNet:
r"""Densenet-121 model from
`Densely Connected Convolutional Networks <https://arxiv.org/abs/1608.06993>`_.
Args:
weights (:class:`~torchvision.models.DenseNet121_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.DenseNet121_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.densenet.DenseNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/densenet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.DenseNet121_Weights
:members:
"""
weights = DenseNet121_Weights.verify(weights)
return _densenet(32, (6, 12, 24, 16), 64, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", DenseNet161_Weights.IMAGENET1K_V1))
def densenet161(*, weights: Optional[DenseNet161_Weights] = None, progress: bool = True, **kwargs: Any) -> DenseNet:
r"""Densenet-161 model from
`Densely Connected Convolutional Networks <https://arxiv.org/abs/1608.06993>`_.
Args:
weights (:class:`~torchvision.models.DenseNet161_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.DenseNet161_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.densenet.DenseNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/densenet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.DenseNet161_Weights
:members:
"""
weights = DenseNet161_Weights.verify(weights)
return _densenet(48, (6, 12, 36, 24), 96, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", DenseNet169_Weights.IMAGENET1K_V1))
def densenet169(*, weights: Optional[DenseNet169_Weights] = None, progress: bool = True, **kwargs: Any) -> DenseNet:
r"""Densenet-169 model from
`Densely Connected Convolutional Networks <https://arxiv.org/abs/1608.06993>`_.
Args:
weights (:class:`~torchvision.models.DenseNet169_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.DenseNet169_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.densenet.DenseNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/densenet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.DenseNet169_Weights
:members:
"""
weights = DenseNet169_Weights.verify(weights)
return _densenet(32, (6, 12, 32, 32), 64, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", DenseNet201_Weights.IMAGENET1K_V1))
def densenet201(*, weights: Optional[DenseNet201_Weights] = None, progress: bool = True, **kwargs: Any) -> DenseNet:
r"""Densenet-201 model from
`Densely Connected Convolutional Networks <https://arxiv.org/abs/1608.06993>`_.
Args:
weights (:class:`~torchvision.models.DenseNet201_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.DenseNet201_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.densenet.DenseNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/densenet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.DenseNet201_Weights
:members:
"""
weights = DenseNet201_Weights.verify(weights)
return _densenet(32, (6, 12, 48, 32), 64, weights, progress, **kwargs)
|
# Generated by Django 3.1.2 on 2020-11-11 18:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shopping', '0004_product_comment'),
]
operations = [
migrations.RemoveField(
model_name='product',
name='comment',
),
migrations.AddField(
model_name='product',
name='sort',
field=models.IntegerField(choices=[(1, 'مذهبی'), (2, 'رمان'), (3, 'درسی')], default='0',
verbose_name='دسته بندی'),
),
]
|
import sys, os
sys.path.append(os.pardir)
from dataset.mnist import load_mnist
from DeepConvNetwork import DeepConvNet
from common.trainer import Trainer
(a_train, b_train), (a_test, b_test) = load_mnist(flatten=False)
network = DeepConvNet()
trainer = Trainer(network, a_train, b_train, a_test, b_test,
epochs=2, mini_batch_size=100,
optimizer='Adam', optimizer_param={'lr':0.001},
evaluate_sample_num_per_epoch=1000)
trainer.train()
# Save parameters
network.save_params("deep_convnet_params.pkl")
print("Saved Network Parameters!")
|
# 列表推导式列表 字典推导式 集合推导式
# 旧的列表---新的列表
# 1、列表推导式
# 格式:[表达式 for 变量 in 旧的列表] 或者[表达式 for 变量 in 旧的列表 if 条件]
# 过滤掉长度小于或者等于3的人名
names = ['xiaomei', 'xiax', 'bob']
result = [name for name in names if len(name) > 3] # 第一个name 是符合条件的name 存放值,第二个name是从names中遍历的值,
print(result)
# 将获取的名字首字母大写
names = ['xiaomei', 'xiax', 'bob']
result = [name.capitalize() for name in names if len(name) > 3] # 第一个name 是符合条件的name 存放值,第二个name是从names中遍历的值,
print(result)
# 将1-100之间能被3整除的数组成一个新的列表
num = [num2 for num2 in range(1, 101) if num2 % 3 == 0 and num2 % 5 == 0]
print(num)
# 求出0-10 之间的偶数和0-10之间的奇数组成的元祖
def func():
newlist = []
for i in range(1, 10):
for j in range(1, 10):
if i % 2 == 0:
if j % 2 != 0:
newlist.append((i, j))
return newlist
X = func()
print(X)
# 使用列表推导式怎么写
newlist1 = [(x, y) for x in range(5) if x % 2 == 0 for y in range(10) if y % 2 != 0]
print(newlist1)
# 练习 list1 = [(1,2,3),(4,5,6),(7,8,9)] ---输出list2 = [(3,6,9),(2,5,8),(1,3,7)]
list1 = [(1, 2, 3), (4, 5, 6), (7, 8, 9), (1, 3, 5)]
newlist2 = [i[-1] for i in list1]
print(newlist2)
dict1 = {'name': 'tom', 'salary': 3000}
dict2 = {'name': 'ptom', 'salary': 4000}
dict3 = {'name': 'itom', 'salary': 5000}
dict4 = {'name': 'utom', 'salary': 6000}
list2 = [dict1, dict2, dict3, dict4]
# if 薪资大于5000,加200,if薪资小于=5000 加500
newlist3 = [i['salary'] + 200 if i['salary'] >= 5000 else i['salary'] + 500 for i in list2]
print(newlist3)
# 集合推导式 {}类似与列表推导式,在列表推导式的基础上添加了一个去重复项
list = [1, 2, 4, 5, 6, 6, 7, 9]
set1 = {x-1 for x in list if x>1}
print(set1) # 输出的结果去除了重复项
# 字典推导式
dict1 = {'name': 'xiaohong', 'sex': 'woman', 'age': 12, 'height': 12}
print(dict1.items()) # 输出每一个key:value
newlist1 = {value: key for key, value in dict1.items()}
print(newlist1) # 输出的结果能够去重 {'xiaohong': 'name', 'woman': 'sex', 12: 'height'}
|
from .driverchrome import DriverChrome
from .driverfirefox import DriverFirefox
from .driverIE import DriverIE
from .driver import IDriver
from .driverFactory import DriverFactory |
from decimal import Decimal
def convert_to_frames(cut_list, frame_rate):
START_THRESHOLD = 0.5
out = []
for i, cut in enumerate(cut_list):
# Clean first pyannote audio start time
if cut['start'] < START_THRESHOLD and cut['end'] > START_THRESHOLD and i == 0:
out.append({'start': 0, 'end': int(frame_rate * Decimal(cut['end']))})
else:
out.append({'start': int(frame_rate * Decimal(cut['start'])), 'end': int(frame_rate * Decimal(cut['end']))})
return out
def cleanup_cuts(cut_list):
cut_events = []
for cut in cut_list:
cut_events.append({'start': cut['start']})
cut_events.append({'end': cut['end']})
cut_events.sort(key=lambda x: x.get('start') if 'start' in x else x.get('end'))
stack = []
out = []
for event in cut_events:
if 'start' in event:
stack.append(event)
else:
popped = stack.pop()
if len(stack) == 0:
out.append({'start': popped['start'], 'end': event['end']})
return out
|
from django.shortcuts import render
# Create your views here.
from rest_framework import viewsets, mixins
from rest_framework.permissions import IsAuthenticated
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from rest_framework.authentication import SessionAuthentication
from user_operation.serializers import UserFavSerializer, AddressSerializer
from .serializers import UserFavSerializer, UserFavDetailSerializer, LeavingMessageSerializer
from .models import UserFav, UserMessages, UserAddress
from utils.permissions import IsOwnerOrReadOnly
class UserFavViewset(viewsets.GenericViewSet, mixins.CreateModelMixin, mixins.RetrieveModelMixin, mixins.ListModelMixin,
mixins.DestroyModelMixin):
'''
List:
Return user favorite list
Retrieve:
Return whether an item is favorite one
Create:
Add into favorite list
'''
permission_classes = (IsAuthenticated, IsOwnerOrReadOnly)
authentication_classes = (JSONWebTokenAuthentication, SessionAuthentication)
lookup_field = 'goods_id'
def get_queryset(self):
return UserFav.objects.filter(user=self.request.user)
def get_serializer_class(self):
if self.action == "list":
return UserFavDetailSerializer
elif self.action == "create":
return UserFavSerializer
return UserFavSerializer
class LeavingMessageViewset(mixins.ListModelMixin, mixins.DestroyModelMixin, mixins.CreateModelMixin,
viewsets.GenericViewSet):
"""
List:
Return user messages
Create:
Add messages
Delete:
Delete messages
"""
permission_classes = (IsAuthenticated, IsOwnerOrReadOnly)
authentication_classes = (JSONWebTokenAuthentication, SessionAuthentication)
serializer_class = LeavingMessageSerializer
def get_queryset(self):
return UserMessages.objects.filter(user=self.request.user)
class AddressViewset(viewsets.ModelViewSet):
"""
Shipping Address Management:
List:
return shipping addresses
Create:
Add shipping addresses
Update:
Update shipping addresses
Delete:
Remove shipping addresses
"""
permission_classes = (IsAuthenticated, IsOwnerOrReadOnly)
authentication_classes = (JSONWebTokenAuthentication, SessionAuthentication)
serializer_class = AddressSerializer
def get_queryset(self):
return UserAddress.objects.filter(user=self.request.user)
|
from requests import *
from json import *
from re import *
url="media/videos"
array_world_to_find=[
""
]
array_url=[
"""List of your videos"""
]
min_like=100
s=Session()
array_username=[]
array_like=[]
array_appear=[]
array_comment=[]
for full_url in array_url:
obj=s.get(full_url+"&pbj=1",headers={
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:72.0) Gecko/20100101 Firefox/72.0",
"X-YouTube-Client-Name":"1",
},
cookies=s.cookies.get_dict())
obj=s.get(full_url,headers={
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:72.0) Gecko/20100101 Firefox/72.0",
"X-YouTube-Client-Name":"1",
},
cookies=s.cookies.get_dict())
XSRF_TOKEN=findall("XSRF_TOKEN\":\"([a-zA-Z0-9\-\_:\,;=\%]+)",obj.text)[0]
continuation=findall("continuation\":\"([a-zA-Z0-9\-\,\._:;=\%]+)",obj.text)[0]
print("[+] xsrf token : "+XSRF_TOKEN)
print("[+] continuation :"+continuation)
comments=s.post("https://www.youtube.com/comment_service_ajax?action_get_comments=1&pbj=1&ctoken="+continuation+"&continuation="+continuation,headers={
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:72.0) Gecko/20100101 Firefox/72.0",
"X-YouTube-Client-Name":"1",
},
cookies=s.cookies.get_dict(),data={"session_token":XSRF_TOKEN})
js=loads(comments.text)
XSRF_TOKEN=js["xsrf_token"]
next_url="https://www.youtube.com/comment_service_ajax?action_get_comments=1&pbj=1&ctoken="+js["response"]["continuationContents"]["itemSectionContinuation"]["continuations"][0]["nextContinuationData"]["continuation"]+"&continuation="+js["response"]["continuationContents"]["itemSectionContinuation"]["continuations"][0]["nextContinuationData"]["continuation"]
for item in js["response"]["continuationContents"]["itemSectionContinuation"]["contents"]:
if item["commentThreadRenderer"]["comment"]["commentRenderer"]["likeCount"] > min_like:
print("[-] user-name :"+item["commentThreadRenderer"]["comment"]["commentRenderer"]["authorText"]["simpleText"]+"like count : "+str(item["commentThreadRenderer"]["comment"]["commentRenderer"]["likeCount"])+" ======> "+item["commentThreadRenderer"]["comment"]["commentRenderer"]["contentText"]["runs"][0]["text"])
if item["commentThreadRenderer"]["comment"]["commentRenderer"]["authorText"]["simpleText"] in array_username:
array_appear[array_username.index(item["commentThreadRenderer"]["comment"]["commentRenderer"]["authorText"]["simpleText"])] = array_appear[array_username.index(item["commentThreadRenderer"]["comment"]["commentRenderer"]["authorText"]["simpleText"])] + 1
array_comment[array_username.index(item["commentThreadRenderer"]["comment"]["commentRenderer"]["authorText"]["simpleText"])]=array_comment[array_username.index(item["commentThreadRenderer"]["comment"]["commentRenderer"]["authorText"]["simpleText"])]+"|"+item["commentThreadRenderer"]["comment"]["commentRenderer"]["contentText"]["runs"][0]["text"]
else:
array_appear.append(1)
array_username.append(item["commentThreadRenderer"]["comment"]["commentRenderer"]["authorText"]["simpleText"])
array_like.append(str(item["commentThreadRenderer"]["comment"]["commentRenderer"]["likeCount"]))
array_comment.append(item["commentThreadRenderer"]["comment"]["commentRenderer"]["contentText"]["runs"][0]["text"])
while next_url != None:
comments=s.post(
next_url,
headers={
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:72.0) Gecko/20100101 Firefox/72.0",
},
cookies=s.cookies.get_dict(), data={"session_token": XSRF_TOKEN})
js = loads(comments.text)
js["xsrf_token"]
try:
continuation = js["endpoint"]["urlEndpoint"]["url"].split("?")[1].split("&")[1].split("=")[1]
next_url = "https://www.youtube.com/comment_service_ajax?action_get_comments=1&pbj=1&ctoken=" + js["response"]["continuationContents"]["itemSectionContinuation"]["continuations"][0]["nextContinuationData"]["continuation"] + "&continuation=" + js["response"]["continuationContents"]["itemSectionContinuation"]["continuations"][0]["nextContinuationData"]["continuation"]
for item in js["response"]["continuationContents"]["itemSectionContinuation"]["contents"]:
if item["commentThreadRenderer"]["comment"]["commentRenderer"]["likeCount"] > min_like:
print("[-] user-name :" + item["commentThreadRenderer"]["comment"]["commentRenderer"]["authorText"]["simpleText"]+"like count : "+str(item["commentThreadRenderer"]["comment"]["commentRenderer"]["likeCount"])+" ======> "+item["commentThreadRenderer"]["comment"]["commentRenderer"]["contentText"]["runs"][0]["text"])
if item["commentThreadRenderer"]["comment"]["commentRenderer"]["authorText"][
"simpleText"] in array_username:
array_appear[array_username.index(
item["commentThreadRenderer"]["comment"]["commentRenderer"]["authorText"]["simpleText"])] = array_appear[array_username.index(item["commentThreadRenderer"]["comment"]["commentRenderer"]["authorText"]["simpleText"])] + 1
array_comment[array_username.index(
item["commentThreadRenderer"]["comment"]["commentRenderer"]["authorText"]["simpleText"])] = \
array_comment[array_username.index(
item["commentThreadRenderer"]["comment"]["commentRenderer"]["authorText"][
"simpleText"])] + "|" + \
item["commentThreadRenderer"]["comment"]["commentRenderer"]["contentText"]["runs"][0]["text"]
else:
array_appear.append(1)
array_username.append(
item["commentThreadRenderer"]["comment"]["commentRenderer"]["authorText"]["simpleText"])
array_like.append(str(item["commentThreadRenderer"]["comment"]["commentRenderer"]["likeCount"]))
array_comment.append(
item["commentThreadRenderer"]["comment"]["commentRenderer"]["contentText"]["runs"][0][
"text"])
except KeyError:
next_url=None
for i,j,k in zip(array_username,array_appear,array_comment):
if j > 1:
print("["+i+"] ")
print("------------")
for o in k.split("|"):
print("[*] "+o)
|
# dict2 遍历字典
dict1 = {"name": "xianqian", "age": 25,"sex": "girl"}
print(type(dict1))
for i in dict1:
print(i, dict1[i])
dict1[None] = "laotian" # None可以作为键
dict1[None] = "laowen" # None键也不能重复,重复时后面的还会覆盖前面的
dict1["grade"] = None # 值可以为None
dict1["class"] = None # 值可以重复
print(dict1)
|
class BinarySearchTree:
def __init__(self, array):
self.root_node = Node(None)
self.counter = 0
for key in array:
self.insert(self.root_node, key)
def insert(self, current_node, key):
"""Recursive insertion with in-place count updating
Params: current_node - node function is applied to
key - to be inserted
(Node, string) -> ()
"""
if not current_node.key:
current_node.key = key
current_node.child_left = Node(None)
current_node.child_right = Node(None)
else:
if key < current_node.key:
self.insert(current_node.child_left, key)
elif key == current_node.key:
current_node.value += 1
else:
self.insert(current_node.child_right, key)
def binary_search(self, node, key):
"""Recursively returns value of node given keys
Params: node - current node function applied to
key - key being searched for
(Node, string) -> int
"""
if key == node.key:
return node.value
elif key < node.key:
self.binary_search(node.child_left, key)
else:
self.binary_search(node.child_right, key)
def __iter__(self):
"""In-order iteration"""
for index in range(len(self)):
yield self[index]
def __getitem__(self, index):
"""Returns item at index with in-order traversal
int -> Node
"""
self.counter = 0
return self.traverse_to(self.root_node, index+1)
def traverse_to(self, node, index):
"""In-order iteration to index
params: node - curent Node
index - in-order node to be returned
counter - to track current index pos
(Node, int) -> Node
"""
condition = self.counter < index
if condition:
if node.child_left.key or node.child_right.key:
pos1 = None
pos2 = None
if node.child_left.key and condition:
pos1 = self.traverse_to(node.child_left, index)
self.counter += 1
if self.counter == index:
return node
if node.child_right.key and condition:
pos2 = self.traverse_to(node.child_right, index)
return pos1 or pos2
else:
self.counter += 1
if self.counter == index:
return node
def __len__(self):
"""Returns length of self
() -> int
"""
return self.length(self.root_node)
def length(self, node):
"""Returns length of tree
Params: node - current nodes
Node -> int
"""
counter = 1
if node.child_left.key:
counter += self.length(node.child_left)
if node.child_right.key:
counter += self.length(node.child_right)
return counter
def __str__(self):
nodes = ""
for index in range(len(self)):
node = self[index]
nodes += "({}, {}) ".format(node.key, node.value)
return nodes
class Node:
def __init__(self, key=None):
self.key = key
self.value = 1
self.child_left = None
self.child_right = None
|
#To use % in string formatting
a=raw_input('What is your name? ')
b=raw_input('What is your favorite sport? ')
print "Sooooo your name is %s, and you really enjoy playing %s..."%(a, b)
print ""
print "I AM A GENIUS!"
|
from datetime import datetime
import unittest
from zoomus import components, util
import responses
def suite():
"""Define all the tests of the module."""
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(AddPanelistsV2TestCase))
return suite
class AddPanelistsV2TestCase(unittest.TestCase):
def setUp(self):
self.component = components.webinar.WebinarComponentV2(
base_uri="http://foo.com",
config={
"api_key": "KEY",
"api_secret": "SECRET",
"version": util.API_VERSION_2,
},
)
@responses.activate
def test_can_add_panelists(self):
responses.add(
responses.POST,
"http://foo.com/webinars/ID/panelists",
)
response = self.component.add_panelists(
id="ID", panelists=[{"name": "Mary", "email": "test@test.com"}]
)
self.assertEqual(
response.request.body,
'{"id": "ID", "panelists": [{"name": "Mary", "email": "test@test.com"}]}',
)
def test_requires_id(self):
with self.assertRaisesRegexp(ValueError, "'id' must be set"):
self.component.add_panelists()
if __name__ == "__main__":
unittest.main()
|
from time import sleep
from nameko.events import EventDispatcher, event_handler
from nameko.rpc import rpc
class ServiceA:
""" Event dispatching service. """
name = "service_a"
dispatch = EventDispatcher()
@rpc
def dispatching_method(self, payload):
self.dispatch("event_type", payload)
return {"result": payload}
class ServiceB:
""" Event listening service. """
name = "service_b"
@event_handler("service_a", "event_type")
def handle_event(self, payload):
print(f"working... {payload}")
sleep(1)
print("service b received:", payload) |
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from GradientHelpers import abs_sobel_thresh, mag_thresh, dir_threshold
# Read in an image
image = mpimg.imread('../images/signs_vehicles_xygrad.png')
# Choose a Sobel kernel size
ksize = 3 # Choose a larger odd number to smooth gradient measurements
# Apply each of the thresholding functions
gradx = abs_sobel_thresh(image, orient='x', thresh_min=30, thresh_max=100)
grady = abs_sobel_thresh(image, orient='y', thresh_min=30, thresh_max=100)
mag_binary = mag_thresh(image, sobel_kernel=ksize, mag_thresh=(30, 100))
dir_binary = dir_threshold(image, sobel_kernel=ksize, thresh=(0.7, 1.3))
combined = np.zeros_like(image)
combined[((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1))] = 1
plt.imshow(combined)
plt.savefig("../images/combined_thresh.jpg") |
'''
@Description: 二分查找算法
@Date: 2019-07-29 16:07:55
@Author: Wong Symbol
@LastEditors: Wong Symbol
@LastEditTime: 2020-06-13 16:39:09
'''
# -*- coding:utf-8 -*-
#
'''
二分查找算法:
基于有序数据集合的查找算法
底层必须依赖数据结构
对于较小规模的数据查找,推荐使用直接遍历的方式
比较适合处理静态数据(无频繁的数据插入、删除操作)
易错点:
1. 最外层 while 的循环退出条件;同时注意和各排序算法的临界条件的异同(如快速排序)
2. mid取值:low和hight很大时有可能溢出(Python中是不会存在溢出情况的)
'''
'''
总结:
初始化的 high 的赋值是 len(arr)-1,而不是 len(arr);
前者相当于两端都是闭区间 [left, right];后者相当于左闭右开的区间 [left, right)
'''
# while 是 小于等于 的情况:
def BinarySearch(arr, value):
low = 0
high = len(arr) -1 # 注意
# while 的终止是 low > high 时
while low <= high: # 注意
# mid = int((low + high) / 2)
mid = low + int((high - low) / 2)
if arr[mid] == value:
return mid
elif arr[mid] > value:
high = mid - 1
else:
low = mid + 1
return -1
# while 是 小于 的情况:
def BinarySearch(arr, value):
low = 0
high = len(arr) - 1 # 注意
# while 的终止是 low == high 时
while low < high: # 注意
mid = low + int((high - low)/2)
if arr[mid] == value:
return mid
elif arr[mid] > value:
high = mid - 1
elif arr[mid] < value:
low = mid + 1
else:
print('Something Error...')
break
# 就是因为 while 的条件没有等于号,导致在 while 内部无法处理 low == high 的情况,故需要单独打个补丁
return low if arr[low] == value else -1
def BinarySearch(arr, value):
low = 0
high = len(arr)
while low < high:
mid = low + int( (high - low) / 2)
if arr[mid] == value:
return mid
elif arr[mid] > value:
high = mid
elif arr[mid] < value:
low = mid + 1
return low
'''
总结:
对于 high = len(arr),则 while 必须是 low < high,不能是 low <= high
对于 high = len(arr)-1,则 while 可以是 low < high, 也可以是 low <= high
'''
if __name__ == '__main__':
arr = [1,3,4,5,7,8]
print(BinarySearch(arr, 9)) |
from rest_framework import serializers, viewsets
from .models import Event
class EventSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Event
fields = [
'id',
'title',
'description',
'created',
'modified',
'active',
]
class EventViewSet(viewsets.ModelViewSet):
queryset = Event.objects.all()
serializer_class = EventSerializer
http_method_names = [
u'get', u'post', u'put', u'patch', u'delete',
u'head', u'options', u'trace',
]
|
# Python Coroutines and Tasks.
# Coroutines declared with async/await syntax is the preferred way of writing asyncio applications.
#
# To actually run a coroutine, asyncio provides three main mechanisms:
#
# > The asyncio.run() function to run the top-level entry point “main()” function.
# > Awaiting on a coroutine.
# > The asyncio.create_task() function to run coroutines concurrently as asyncio Tasks.
# Awaitables.
# We say that an object is an awaitable object if it can be used in an await expression.
# Many asyncio APIs are designed to accept awaitables.
#
# There are three main types of awaitable objects: coroutines, Tasks, and Futures.
#
# Coroutines:
# Python coroutines are awaitables and therefore can be awaited from other coroutines.
#
# Tasks:
# Tasks are used to schedule coroutines concurrently.
# When a coroutine is wrapped into a Task with functions like asyncio.create_task() the coroutine is automatically scheduled to run soon:
#
# Futures:
# A Future is a special low-level awaitable object that represents an eventual result of an asynchronous operation.
# When a Future object is awaited it means that the coroutine will wait until the Future is resolved in some other place.
# Future objects in asyncio are needed to allow callback-based code to be used with async/await.
# Normally there is no need to create Future objects at the application level code.
# Future objects, sometimes exposed by libraries and some asyncio APIs, can be awaited:
#
# FUTURES EXAMPLE:
#
async def main():
await function_that_returns_a_future_object()
# this is also valid:
await asyncio.gather(
function_that_returns_a_future_object(),
some_python_coroutine()
)
|
"""
Evaluation Script of Auto Encoder Model (ae.py)
"""
import numpy as np
import torch
import torchvision
import torchvision.transforms as transforms
import torch.optim as optim
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import matplotlib.pyplot as plt
from model import AutoEncoder, CAE
from load_data import ImbalancedCIFAR10
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
train_imbalance_class_ratio = np.array([1.] * 10)
train_imbalanced_dataset = ImbalancedCIFAR10(train_imbalance_class_ratio, train=False)
train_imbalanced_loader = DataLoader(train_imbalanced_dataset, batch_size=64, shuffle=False, num_workers=4)
# Load Model
# net = AutoEncoder()
net = CAE()
net.load_state_dict(torch.load('model_weights/auto_encoder'))
net = net.to(device)
def imshow(img):
#img = img / 2 + 0.5
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
# Test Model
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
dataiter = iter(train_imbalanced_loader)
images, labels = dataiter.next()
imshow(torchvision.utils.make_grid(images))
outputs = net(images.to(device))
imshow(torchvision.utils.make_grid(outputs.cpu().data))
|
from django.apps import AppConfig as BaseAppConfig
from django.utils.translation import ugettext_lazy as _
class AppConfig(BaseAppConfig):
name = "pinax.badges"
label = "pinax_badges"
verbose_name = _("Pinax Badges")
|
import os
from dotenv import load_dotenv
load_dotenv()
TELEGRAM_TOKEN = os.getenv("TELEGRAM_TOKEN")
HGBRASIL = os.getenv("HGBRASIL")
HOST = os.getenv("HOST")
DATABASE = os.getenv("DATABASE")
USER = os.getenv("USER")
PASSWORD = os.getenv("PASSWORD")
|
activity_pattern = r'^activity/$'
|
import os
def handle(form):
import DPjudge
try: DPjudge.Page(form)
except SystemExit: pass
except:
import traceback
print """
<H3>DPjudge Error</H3><p class=bodycopy>
Please <a href=mailto:%s>e-mail the judgekeeper</a>
and report how you got this error. Thank you.
<!--
""" % DPjudge.host.judgekeeper
traceback.print_tb(os.sys.exc_traceback, None, os.sys.stdout)
traceback.print_exc(None, os.sys.stdout)
print '-->'
# ------------------------------------------------------------------
# Entry function for installations using Apache's mod_python package
# ------------------------------------------------------------------
def handler(req):
from mod_python import apache, util
import urllib
os.chdir(os.path.dirname(req.filename))
os.sys.stdout, req.content_type = req, 'text/html'
req.send_http_header()
os.environ, form, mod = req.subprocess_env, {}, util.FieldStorage(req)
for key in mod.keys(): form[key] = mod[key]
os.environ['REMOTE_ADDR'] = req.connection.remote_ip
handle(form)
return apache.OK
try:
if not os.environ['GATEWAY_INTERFACE']: raise
import cgi
form = cgi.FieldStorage()
form.get = form.getvalue
print 'Content-type: text/html\n'
handle(form)
except: pass
|
first_row = input().split(' ')
second_row = input().split(' ')
third_row = input().split(' ')
if first_row[0] == second_row[0] and second_row[0] == third_row[0]:
if first_row[0] == '1':
print("First player won")
elif first_row[0] == '2':
print("Second player won")
else:
print('Draw!')
elif first_row[0] == second_row[1] and second_row[1] == third_row[2]:
if first_row[0] == '1':
print("First player won")
elif first_row[0] == '2':
print("Second player won")
else:
print('Draw!')
elif first_row[1] == second_row[1] and second_row[1] == third_row[1]:
if first_row[1] == '1':
print("First player won")
elif first_row[1] == '2':
print("Second player won")
else:
print('Draw!')
elif first_row[2] == second_row[2] and second_row[2] == third_row[2]:
if first_row[2] == '1':
print("First player won")
elif first_row[2] == '2':
print("Second player won")
else:
print('Draw!')
elif first_row[2] == second_row[1] and second_row[1] == third_row[0]:
if first_row[2] == '1':
print("First player won")
elif first_row[2] == '2':
print("Second player won")
else:
print('Draw!')
elif first_row[0] == first_row[1] == first_row[2]:
if first_row[0] == '1':
print("First player won")
elif first_row[0] == '2':
print("Second player won")
else:
print('Draw!')
elif second_row[0] == second_row[1] == second_row[2]:
if second_row[0] == '1':
print("First player won")
elif second_row[0] == '2':
print("Second player won")
else:
print('Draw!')
elif third_row[0] == third_row[1] == third_row[2]:
if third_row[0] == '1':
print("First player won")
elif third_row[0] == '2':
print("Second player won")
else:
print('Draw!')
else:
print('Draw!') |
#!/usr/bin/env python
import unittest
from testphonenumber import PhoneNumberTest
from testphonenumberutil import PhoneNumberUtilTest
from testasyoutype import AsYouTypeFormatterTest
from testexamplenumbers import ExampleNumbersTest
from testphonenumbermatcher import PhoneNumberMatchTest, PhoneNumberMatcherTest
if __name__ == '__main__':
unittest.main()
|
#From Jupyter notebook
#C1_Titanic T5.txt
#1
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
f=open("E:/Tinky/大学课件及作业/6 自学课/6-3.Kaggle竞赛/C1_泰坦尼克号生还预测/泰坦尼克号数据/train.csv")
data=pd.read_csv(f)
#2 数据可视化
fig=plt.figure(figsize=(18,6))
alpha=alpha_scatterplot=0.2
alpha_bar_chart=0.55
ax1=plt.subplot2grid((2,3),(0,0))
data.Survived.value_counts().plot(kind='bar',alpha=alpha_bar_chart)
ax1.set_xlim(-1,2)
plt.title("Distribution of Survival,(1=Survived)")
#绘制年龄的散点图
plt.subplot2grid((2,3),(0,1))
plt.scatter(data.Survived,data.Age,alpha=alpha_scatterplot)
plt.ylabel('Age')
plt.grid(b=True,which='major',axis='y')
plt.title("Survival by Age,(1=Survived)")
#Class的直方图
ax3=plt.subplot2grid((2,3),(0,2))
data.Pclass.value_counts().plot(kind="barh",alpha=alpha_bar_chart)
ax3.set_ylim(-1,len(data.Pclass.value_counts()))
plt.title("Class Distribution")
#Class点的密度
plt.subplot2grid((2,3),(1,0),colspan=2)#横向占了两格,如果是纵向就是rowspan=x
data.Age[data.Pclass==1].plot(kind='kde')
data.Age[data.Pclass==2].plot(kind='kde')
data.Age[data.Pclass==3].plot(kind='kde')
plt.xlabel('Age')
plt.title('Age Distribution within classer')
plt.legend(('1st Class','2nd Class','3rd Class'),loc='best')
#查看不同Boarding Location的直方图
ax5=plt.subplot2grid((2,3),(1,2))
data.Embarked.value_counts().plot(kind='bar',alpha=alpha_bar_chart)
ax5.set_xlim(-1,len(data.Embarked.value_counts()))
plt.title("Passengers per boarding location")
plt.show()
#3 生还情况:查看是否生还的直方图
plt.figure(figsize=(6,4))
ax=plt.subplot()
data.Survived.value_counts().plot(kind='barh',color='blue',alpha=0.65)
ax.set_ylim(-1,len(data.Survived.value_counts()))
plt.title("Survival Breakdown (1=Survived,0=Died)")
plt.show()
#4 生还与性别的关系
fig2=plt.figure(figsize=(18,6))
data_male=data.Survived[data.Sex=='male'].value_counts().sort_index()#sort_index:对行列进行索引排序
data_female=data.Survived[data.Sex=='female'].value_counts().sort_index()
ax1=fig2.add_subplot(121)#一行两列第一个位置,add_subplot:画子图,参数含义与subplot相同
data_male.plot(kind='barh',label='Male',alpha=0.55)
data_female.plot(kind='barh',color='#FA2379',label='Female',alpha=0.55)
plt.title("Who Survived? With respect to Gender, (raw value counts)")
plt.legend(loc='best')
ax1.set_ylim(-1,2)
#生还比例的直方图
ax2=fig2.add_subplot(122)
(data_male/float(data_male.sum())).plot(kind='barh',label='Male',alpha=0.55)
(data_female/float(data_female.sum())).plot(kind='barh',color='#FA2379',label='Female',alpha=0.55)
plt.title("Who Survived proportionally? with respect to Gender")
plt.legend(loc='best')
ax2.set_ylim(-1,2)
plt.show()
#5
fig3=plt.figure(figsize=(18,12))
a=0.65
w=0.35#设置宽度
index = np.arange(2)
#A 生还人数对比
ax1=fig3.add_subplot(341)
data.Survived.value_counts().plot(width=w,kind='bar',color='blue',alpha=a)
ax1.set_xlim(-1,len(data.Survived.value_counts()))
plt.title("Step.1")
#B 性别是否有关
ax2=fig3.add_subplot(345)
#data.Survived[data.Sex=='male'].value_counts().plot(width=w,kind='bar',label='Male')#我改成改为下面两行,画出来更好看
plt.bar(index,data.Survived[data.Sex=='male'].value_counts() , w, color='blue', label='Male')
plt.xticks(index + w, ('Died', 'Survived'))
#data.Survived[data.Sex=='female'].value_counts().plot(width=w,kind='bar',color='#FA2379',label='Female')
plt.bar(index+w,data.Survived[data.Sex=='female'].value_counts() , w,color='#FA2379',label='Female')
plt.xticks(index + w,('Died', 'Survived'))
ax2.set_xlim(-1,2)
plt.title("Step.2 \nWho survived?with respect to Gender.")
plt.legend(loc='best')
ax3=fig3.add_subplot(346)
(data.Survived[data.Sex=='male'].value_counts()/float(data.Sex[data.Sex=='male'].size)).plot(width=w,kind='bar',label='Male')
(data.Survived[data.Sex=='female'].value_counts()/float(data.Sex[data.Sex=='female'].size)).plot(width=w,kind='bar',color='#FA2379',label='Female')
ax3.set_xlim(-1,2)
plt.title("Who survived proportionally?")
plt.legend(loc='best')
#C 是否与社会地位有关?
#female high class
ax4=fig3.add_subplot(349)
female_highclass=data.Survived[data.Sex=='female'][data.Pclass!=3].value_counts()
female_highclass.plot(kind='bar',label='female,highclass',color='#FA2479',alpha=a)
ax4.set_xticklabels(['Survived','Died'],rotation=0)
ax4.set_xlim(-1,len(female_highclass))
plt.title("Who Survived? with respect to Gender and Class")
plt.legend(loc='best')
#female low class
ax5=fig3.add_subplot(3,4,10,sharey=ax4)#指定具有相同的y轴(或x轴 sharex)
female_lowclass=data.Survived[data.Sex=='female'][data.Pclass==3].value_counts()
female_lowclass.plot(kind='bar',label='female,lowclass',color='pink',alpha=a)
ax5.set_xticklabels(['Died','Survived'],rotation=0)
ax5.set_xlim(-1,len(female_lowclass))
plt.legend(loc='best')
#male low class
ax6=fig3.add_subplot(3,4,11,sharey=ax4)
male_lowclass=data.Survived[data.Sex=='male'][data.Pclass==3].value_counts()
male_lowclass.plot(kind='bar',label='male,lowclass',color='lightblue',alpha=a)
ax6.set_xticklabels(['Died','Survived'],rotation=0)
ax6.set_xlim(-1,len(male_lowclass))
plt.legend(loc='best')
#male high class
ax7=fig3.add_subplot(3,4,12,sharey=ax4)
male_highclass=data.Survived[data.Sex=='male'][data.Pclass!=3].value_counts()
male_highclass.plot(kind='bar',label='male,highclass',color='steelblue',alpha=a)
ax7.set_xticklabels(['Died','Survived'],rotation=0)
ax7.set_xlim(-1,len(male_highclass))
plt.legend(loc='best')
plt.show()
#6 兄弟姐妹是否有关
g = data.groupby(['SibSp','Survived'])
df = pd.DataFrame(g.count()['PassengerId'])
print(df)
data.Cabin.value_counts()#和Cabin的关系
#7
from sklearn.ensemble import RandomForestRegressor
#拟合缺失的年龄数据,此处用 RandomForestClassifier
def Fix_the_missing_ages(df):
# 把已有的数值型特征取出来放入Random Forest Regressor
age_df = df[['Age','Fare', 'Parch', 'SibSp', 'Pclass']]
# 分类:已知年龄、未知年龄
known_age = age_df[age_df.Age.notnull()].as_matrix()
unknown_age = age_df[age_df.Age.isnull()].as_matrix()#as_matrix: 将dataframe变为numpy的ndarrey
y = known_age[:, 0]# 预测的目标年龄
X = known_age[:, 1:]# 特征属性值
#用RamdomForest拟合
RFR_ = RandomForestRegressor(random_state=0, n_estimators=2000, n_jobs=-1)
RFR_.fit(X, y)
predictedAges = RFR_.predict(unknown_age[:, 1::])# 用得到的模型进行未知年龄预测
df.loc[ (df.Age.isnull()), 'Age' ] = predictedAges# 用得到的预测结果填补原缺失数据
return df, RFR_
def Setting_Cabin_types(df):
df.loc[ (df.Cabin.notnull()), 'Cabin' ] = "Yes"
df.loc[ (df.Cabin.isnull()), 'Cabin' ] = "No"
return df
data, RFR_ = Fix_the_missing_ages(data)
data = Setting_Cabin_types(data)#将Cabin那一列根据数据的有无换为Yes和No
#8
#逻辑回归建模时,需要输入的特征都是数值型特征,这里我先对类目型的特征因子化。
#Cabin原本取值是[‘yes’,’no’],这里我将其变为’Cabin_yes’,’Cabin_no’两个属性
#原本Cabin为yes,”Cabin_yes”=1,”Cabin_no”=0
#原本Cabin为no,”Cabin_yes”=0,”Cabin_no”=1
dummies_Cabin = pd.get_dummies(data['Cabin'], prefix= 'Cabin')#使用pandas的”get_dummies”,并拼接在原来的”data_train”之上
#data : 列数据或表格,prefix:新建的列名
dummies_Embarked = pd.get_dummies(data['Embarked'], prefix= 'Embarked')
dummies_Sex = pd.get_dummies(data['Sex'], prefix= 'Sex')
dummies_Pclass = pd.get_dummies(data['Pclass'], prefix= 'Pclass')
data = pd.concat([data,dummies_Cabin, dummies_Embarked, dummies_Sex, dummies_Pclass], axis=1)
data.drop(['Pclass', 'Name', 'Sex', 'Ticket', 'Cabin', 'Embarked'], axis=1, inplace=True)#丢弃原先的那些列
#9
#这里我用了scaling,防止fare、age列相差比较大的数值影响回归
#用preprocessing模块,将一些变化幅度较大的特征化到[-1,1]之内并保证均值为零,方差为一
import sklearn.preprocessing as preprocessing
scaler=preprocessing.StandardScaler()
Age_scalefixed=scaler.fit(data['Age'])
data['Age_scaled']=scaler.fit_transform(data['Age'],Age_scalefixed)
Fare_scalefixed=scaler.fit(data['Fare'])
data['Fare_scaled']=scaler.fit_transform(data['Fare'],Fare_scalefixed)
#10
from sklearn import linear_model
#建模;抽出属性特征,转成LogisticRegression可以处理的格式
#把需要feature字段取出,转成numpy格式,使用scikit-learn中的LogisticRegression建模
#用正则取出需要的属性值,用filter构建器的Regex方法构建正则过滤,其中正则化的特征用_.*表示
train_df=data.filter(regex='Survived|Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass_.*')
train_np=train_df.as_matrix()
y=train_np[:,0]#Survive的结果
X=train_np[:,1:]#特征属性值
#拟合
clf=linear_model.LogisticRegression(C=1.0,penalty='l1',tol=1e-6)
clf.fit(X,y)
#11
#将测试集做相同的处理(特征变换也相同)
f0=open("E:/Tinky/大学课件及作业/6 自学课/6-3.Kaggle竞赛/C1_泰坦尼克号生还预测/泰坦尼克号数据/test.csv")
data1=pd.read_csv(f0)
data1.loc[ (data1.Fare.isnull()), 'Fare' ] = 0
tmp_df = data1[['Age','Fare', 'Parch', 'SibSp', 'Pclass']]
null_age = tmp_df[data1.Age.isnull()].as_matrix()
X = null_age[:, 1:]
predictedAges = RFR_.predict(X)
data1.loc[ (data1.Age.isnull()), 'Age' ] = predictedAges
data1 = Setting_Cabin_types(data1)
dummies_Cabin = pd.get_dummies(data1['Cabin'], prefix= 'Cabin')
dummies_Embarked = pd.get_dummies(data1['Embarked'], prefix= 'Embarked')
dummies_Sex = pd.get_dummies(data1['Sex'], prefix= 'Sex')
dummies_Pclass = pd.get_dummies(data1['Pclass'], prefix= 'Pclass')
data1 = pd.concat([data1, dummies_Cabin, dummies_Embarked, dummies_Sex, dummies_Pclass], axis=1)
data1.drop(['Pclass', 'Name', 'Sex', 'Ticket', 'Cabin', 'Embarked'], axis=1, inplace=True)
data1['Age_scaled'] = scaler.fit_transform(data1['Age'], Age_scalefixed)
data1['Fare_scaled'] = scaler.fit_transform(data1['Fare'], Fare_scalefixed)
#12
#预测结果
test = data1.filter(regex='Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass_.*')
predictions=clf.predict(test)
result=pd.DataFrame({'PassengerId':data1['PassengerId'].as_matrix(),'Survived':predictions.astype(np.int32)})
result.to_csv("E:/Tinky/大学课件及作业/6 自学课/6-3.Kaggle竞赛/C1_泰坦尼克号生还预测/泰坦尼克号数据/test_X0.csv",index=False)
#最基本的模型,准确率0,76555
#13
# 参考了一些方法进行优化
#关联分析,model系数和feature
pd.DataFrame({"columns":list(train_df.columns)[1:], "coef":list(clf.coef_.T)})#根据正负号判断相关性
#14 交叉验证
from sklearn import cross_validation
clf=linear_model.LogisticRegression(C=1.0,penalty='l1',tol=1e-6)
all_data=data.filter(regex='Survived|Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass_.*')
X=all_data.as_matrix()[:,1:]
y=all_data.as_matrix()[:,0]#用X[:,0]选取第一行, X[:,1] 取其余行
p=cross_validation.cross_val_score(clf,X,y,cv=5)
print(p)
'''
PS:cross_validation被废弃后,可以改为:
#from sklearn.model_selection import train_test_split
#x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.25,random_state=33)
'''
#15 查看误判数据,手动分析
#分割数据,3:7的比例
split_train,split_cv=cross_validation.train_test_split(data,test_size=0.3,random_state=0)
train_df = split_train.filter(regex='Survived|Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass_.*')
#模型拟合
clf=linear_model.LogisticRegression(C=1.0,penalty='l1',tol=1e-6)
clf.fit(train_df.as_matrix()[:,1:],train_df.as_matrix()[:,0])
#对交叉验证的数据预测
cv_df=split_cv.filter(regex='Survived|Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass_.*')
predictions = clf.predict(cv_df.as_matrix()[:,1:])
f1=open("E:/Tinky/大学课件及作业/6 自学课/6-3.Kaggle竞赛/C1_泰坦尼克号生还预测/泰坦尼克号数据/train.csv")
origin_data_train=pd.read_csv(f1)
bad_cases=origin_data_train.loc[origin_data_train['PassengerId'].isin(split_cv[predictions!=cv_df.as_matrix()[:,0]]['PassengerId'].values)]
print(bad_cases)
#有了”train_df” 和 “vc_df” 两个数据部分,前者用于训练model,后者用于评定和选择模型,可以反复进行
#16 判断是否过拟合
# 用sklearn的learning_curve得到training_score和cv_score,使用matplotlib画出拟合曲线
from sklearn.learning_curve import learning_curve
def Learning_curve_drawing(estimator, title, X, y, ylim=None, cv=None, n_jobs=1,
train_sizes=np.linspace(0.05, 1.0, 30), verbose=0, plot=True):
"""
参数解释
estimator : 用的分类器。
X : 输入的feature,numpy类型
y : 输入的target vector
cv : 做cross-validation的时候,数据分成的份数,其中一份作为cv集,其余n-1份作为training
n_jobs : 并行的的任务数
"""
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes, verbose=verbose)
train_scores_mean = np.mean(train_scores, axis=1)#计算矩阵平均值
train_scores_std = np.std(train_scores, axis=1)#计算矩阵标准差
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
if plot:
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Size of Sample")
plt.ylabel("Score")
plt.gca().invert_yaxis()#获得当前Axes对象ax
plt.grid()#显示网格
plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std,
alpha=0.1, color="b")#将两条曲线之间填充上颜色(很直观的表示)
plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std,
alpha=0.1, color="r")
plt.plot(train_sizes, train_scores_mean, 'o-', color="b", label="Training Score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="r", label="Score on CV")
plt.legend(loc="best")
plt.draw()#draw可以进行交互模型绘制,改变数据或表格时图表自身也会变化
plt.gca().invert_yaxis()
midpoint = ((train_scores_mean[-1] + train_scores_std[-1]) + (test_scores_mean[-1] - test_scores_std[-1])) / 2
diff = (train_scores_mean[-1] + train_scores_std[-1]) - (test_scores_mean[-1] - test_scores_std[-1])
return midpoint, diff
Learning_curve_drawing(clf, "Learning Curcve", X, y)
#17 模型融合(原理:不同模型建模判断,投票式决定最终结果)
#每次取训练集的一个subset做训练,能用同一个算法得到不一样的模型
#用scikit-learn的Bagging完成
from sklearn.ensemble import BaggingRegressor
train_df = data.filter(regex='Survived|Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass.*')
train_np = train_df.as_matrix()
y = train_np[:, 0]# ySurvival结果
X = train_np[:, 1:]# 特征属性值
# 用BaggingRegressor拟合
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
bagging_clf = BaggingRegressor(clf, n_estimators=20, max_samples=0.8, max_features=1.0, bootstrap=True, bootstrap_features=False, n_jobs=-1)
bagging_clf.fit(X, y)
test = data1.filter(regex='Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass.*')
predictions = bagging_clf.predict(test)
result = pd.DataFrame({'PassengerId':data1['PassengerId'].as_matrix(), 'Survived':predictions.astype(np.int32)})
result.to_csv("E:/Tinky/大学课件及作业/6 自学课/6-3.Kaggle竞赛/C1_泰坦尼克号生还预测/泰坦尼克号数据/test_Xf.csv", index=False)
#这次的准确率0.77511
|
#!/usr/bin/python
import socket, subprocess,sys
from datetime import datetime
subprocess.call('clear',shell=True)
rmip = raw_input("\t Enter the remote host IP to scan:")
r1 = int(raw_input("\t Enter the start port number\t"))
r2 = int (raw_input("\t Enter the last port number\t"))
print "*"*40
print "\n Mohit's Scanner is working on ",rmip
print "*"*40
t1= datetime.now()
try:
for port in range(r1,r2):
sock= socket.socket(socket.AF_INET,socket.SOCK_STREAM)
socket.setdefaulttimeout(1)
result = sock.connect_ex((rmip,port))
if result==0:
print "Port Open:-->\t", port
# print desc[port]
sock.close()
except KeyboardInterrupt:
print "You stop this "
sys.exit()
except socket.gaierror:
print "Hostname could not be resolved"
sys.exit()
except socket.error:
print "could not connect to server"
sys.exit()
t2= datetime.now()
total =t2-t1
print "scanning complete in " , total
|
../../subrepos/colin-nolan/key_value_string_parser.py/key_value_string_parser.py |
from simulator.core.pq import PriorityQueue
from simulator.schedulers.scheduler import Scheduler
class SRTF(Scheduler):
"""
Shortest Remaining Time First (SRTF) scheduler.
Think of this as a Shortest Job First (SJF) but pre-emptive.
"""
def __init__(self):
super(SRTF, self).__init__()
# Ready queue
self.q = PriorityQueue()
def perform_schedule(self):
"""
We perform scheduling in either of the two scenarions:
1. A task completes its execution
2. A shorter task has arrived in the ready queue.
"""
# TODO: Implement here your code.
def enqueue_new_jobs(self):
"""
(OVERRIDE) - Scheduler.enqueue_new_jobs
We need to override this to make use of our PriorityQueue API instead.
"""
while self.ordered and self.ordered[0].arrive_time == self.current_time:
nxt = self.ordered.popleft()
self.q.add(nxt, priority=nxt.burst_time)
def timer_interrupt(self):
"""
(OVERRIDE) - Scheduler.timer_interrupt
We need to set a timer interrupt when a task of lower burst time comes
in to the ready queue as well.
"""
default = super(SRTF, self).timer_interrupt()
if self.q and self.active:
shorter = self.q.peek().burst_time < self.active.burst_time
else:
shorter = False
return default or shorter
|
# -*- coding: utf-8 -*-
# -------------
import sublime
from RSBIDE.common.async import run_after_loading
# from RSBIDE.common.notice import *
ST3 = int(sublime.version()) > 3000
if ST3:
basestring = (str, bytes)
# if the helper panel is displayed, this is true
# ! (TODO): use an event instead
b_helper_panel_on = False
output_view = None
# prints the text to the "helper panel" (Actually the console)
def print_to_panel(view, text, b_overwrite=True, bLog=False, bDoc=False, showline=0, region_mark=None):
global b_helper_panel_on, output_view
b_helper_panel_on = True
name_panel = ''
if bLog:
name_panel = 'RSBIDE:Log'
elif bDoc:
name_panel = 'RSBIDE:Documentation'
else:
name_panel = 'RSBIDE:Declaration'
if b_overwrite or not output_view:
kill_panel(name_panel)
panel = view.window().create_output_panel(name_panel, False)
output_view = panel
else:
panel = output_view
panel.set_read_only(False)
panel.run_command('append', {'characters': text})
if not b_overwrite:
panel.show(panel.size())
if bLog:
# panel.set_syntax_file("Packages/UnrealScriptIDE/Log.tmLanguage")
pass
elif bDoc:
# panel.set_syntax_file('INI')
pass
else:
panel.set_syntax_file(view.settings().get('syntax'))
def show_at_center():
panel.show_at_center(region)
if showline > 0:
position = panel.text_point(showline, 0)
region = panel.line(position)
run_after_loading(panel, show_at_center)
if region_mark:
rm = panel.word(panel.text_point(*region_mark))
panel.add_regions('rsbide_declare', [rm], 'string', 'dot', sublime.DRAW_NO_FILL)
elif showline > 0:
panel.add_regions('rsbide_declare', [region], 'string', 'dot', sublime.DRAW_NO_FILL)
panel.set_read_only(True)
view.window().run_command("show_panel", {"panel": "output.%s" % name_panel})
def get_panel(view, text, name_panel='Rsb_parse_panel', syntax='Packages/RSBIDE/HighlightSyntax/R-Style.sublime-syntax'):
kill_panel(name_panel)
panel = view.window().create_output_panel(name_panel, True)
panel.set_read_only(False)
panel.run_command('append', {'characters': text})
panel.set_syntax_file(syntax)
panel.set_read_only(True)
return panel
def kill_panel(name_panel='Rsb_parse_panel'):
sublime.active_window().destroy_output_panel(name_panel)
|
#!/usr/bin/env python3
with open('/proc/sys/vm/swappiness') as file:
swappiness = file.readlines()[0][:-1]
with open('/proc/sys/vm/min_free_kbytes') as file:
min_free_kbytes = file.readlines()[0][:-1]
with open('/proc/sys/vm/admin_reserve_kbytes') as file:
admin_reserve_kbytes = file.readlines()[0][:-1]
print('/proc/sys/vm/*')
print('swappiness {}'.format(swappiness.rjust(9, ' ')))
print('min_free_kbytes {}'.format(min_free_kbytes.rjust(9, ' ')))
print('admin_reserve_kbytes {}'.format(admin_reserve_kbytes.rjust(9, ' ')))
|
user_agents = ['Mozilla/5.0 (X11; Linux x86_64; rv:78.0) Gecko/20100101 Firefox/78.0',
'Mozilla/5.0 (Windows NT 10.0; rv:60.0) Gecko/20100101 Firefox/60.0.2',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:84.0) Gecko/20100101 Firefox/84.0',
'Mozilla/5.0 (Windows NT 10.0; WOW64; Rv:50.0) Gecko/20100101 Firefox/50.0',
'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:28.0) Gecko/20100101 Firefox/28.0',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:84.0) Gecko/20100101 Firefox/84.0',
'Mozilla/5.0 (X11; Linux armv7l; rv:78.0) Gecko/20100101 Firefox/78.0',
'Mozilla/5.0 (Windows NT 10.0; rv:68.0) Gecko/20100101 Firefox/68.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:86.0) Gecko/20100101 Firefox/86.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:85.0) Gecko/20100101 Firefox/85.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:84.0) Gecko/20100101 Firefox/84.0',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:83.0) Gecko/20100101 Firefox/83.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:82.0) Gecko/20100101 Firefox/82.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1',
'Mozilla/5.0 (X11; Linux x86_64; rv:84.0) Gecko/20100101 Firefox/84.0',
'Mozilla/5.0 (Windows NT 10.0; rv:78.0) Gecko/20100101 Firefox/78.0',
'Mozilla/5.0 (X11; Fedora; Linux x86_64; rv:84.0) Gecko/20100101 Firefox/84.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:56.0) Gecko/20100101 Firefox/56.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:84.0) Gecko/20100101 Firefox/84.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:79.0) Gecko/20100101 Firefox/79.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0',
'Mozilla/5.0 (Windows NT 6.2; Win64; x64; rv:84.0) Gecko/20100101 Firefox/84.0',
'Mozilla/5.0 (Windows NT 10.0; rv:59.0) Gecko/20100101 Firefox/59.0.2',
'Mozilla/5.0 (X11; Linux x86_64; rv:82.0) Gecko/20100101 Firefox/82.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:83.0) Gecko/20100101 Firefox/83.0',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:77.0) Gecko/20100101 Firefox/77.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:84.0) Gecko/20100101 Firefox/84.0',
'Mozilla/5.0 (X11; Linux x86_64; rv:85.0) Gecko/20100101 Firefox/85.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:83.0) Gecko/20100101 Firefox/83.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:82.0) Gecko/20100101 Firefox/82.0',
'Mozilla/5.0 (X11; U; Linux i686; ru; rv:1.9.0.5) Gecko/2008121622 Ubuntu/8.10 (intrepid) Firefox/3.0.5',
'Mozilla/5.0 (X11; Linux x86_64; rv:86.0) Gecko/20100101 Firefox/86.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:70.0) Gecko/20100101 Firefox/70.0',
'Mozilla/5.0 (Windows NT 5.1; rv:52.0) Gecko/20100101 Firefox/52.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:81.0) Gecko/20100101 Firefox/81.0,gzip(gfe)',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:66.0) Gecko/20100101 Firefox/66.0',
'Mozilla/5.0 (Android 9; Mobile; rv:78.0) Gecko/20100101 Firefox/78.0',
'Mozilla/5.0 (Windows NT 5.1; rv:31.0) Gecko/20100101 Firefox/31.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:85.0) Gecko/20100101 Firefox/85.0',
'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:84.0) Gecko/20100101 Firefox/84.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:78.0) Gecko/20100101 Firefox/78.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:76.0) Gecko/20100101 Firefox/76.0',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:84.0) Gecko/20100101 Firefox/84.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.16; rv:83.0) Gecko/20100101 Firefox/83.0',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:50.0) Gecko/20100101 Firefox/50.0',
'Mozilla/5.0 (Wayland; Linux x86_64; rv:58.0) Gecko/20100101 Firefox/58.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:71.0) Gecko/20100101 Firefox/71.0',
'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:60.0) Gecko/20100101 Firefox/73.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:81.0) Gecko/20100101 Firefox/81.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:80.0) Gecko/20100101 Firefox/80.0'] |
#!/usr/bin/python
from xlrd import open_workbook
import constants
import sys
#Open excel book
print 'opening Machine Learning Training Workbook...(this can take a while)'
try:
book = open_workbook(constants.training_workbook_name)
except:
print 'unable to find or open training workbook...'
print 'program is exiting...'
sys.exit(0)
print 'extracting classified training and test data from Machine Learning Workbook...'
#Extract training and test data from the Machine Learning Summary Excel Spreadsheet
#Open excel sheet
sheet = book.sheet_by_name(constants.training_sheet_name)
#Read header values into the list
keys = [sheet.cell(0, col_index).value for col_index in xrange(sheet.ncols)]
#Add values to dictionary and append to list
dict_list = []
for row_index in xrange(1, sheet.nrows):
d = {keys[col_index]: sheet.cell(row_index, col_index).value
for col_index in xrange(sheet.ncols)}
dict_list.append(d)
|
"""
author songjie
"""
import json
from flask import Response
class Reply(object):
_result = None
_code = None
_msg = None
_data_type = 1
def __init__(self, **kwargs):
pass
@property
def result(self):
return Reply._result
@result.setter
def result(self, value):
Reply._result = value
@property
def code(self):
return Reply._code
@code.setter
def code(self, value):
Reply._code = value
@property
def msg(self):
return Reply._msg
@msg.setter
def msg(self, value):
Reply._msg = value
@property
def data_type(self):
return Reply._data_type
@data_type.setter
def data_type(self, value):
Reply._data_type = value
@classmethod
def json(cls):
"""
:return:
"""
data = {
"result": cls._result,
"code": cls._code,
"msg": cls._msg
}
data = json.dumps(data, default=cls.object_to_dict)
return Response(data, mimetype="application/json;charset=utf-8")
@classmethod
def object_to_dict(cls, value):
data = {}
if Reply._data_type == 1:
return value.__dict__
try:
for column in value.__table__.columns:
data[column.name] = getattr(value, column.name)
except:
data = value.__dict__
return data
@classmethod
def success(cls, result="", code=0, data_type=1):
"""
:param data_type:
:param code:
:param result:
:return:
"""
cls._data_type = data_type
if not result:
result = cls._result
cls._code = code
cls._result = result
cls._msg = ""
return cls.json()
@classmethod
def error(cls, msg="", code=1, data_type=1):
"""
:param data_type:
:param code:
:param msg:
:return:
"""
cls._data_type = data_type
cls._code = code
cls._msg = msg
cls._result = ""
return cls.json()
|
import os
from itertools import chain
from django.conf import settings
from datetime import datetime
from operator import attrgetter
from urllib.parse import urlparse, urlunparse
from django.shortcuts import render, redirect, resolve_url, get_object_or_404
from django.http import HttpResponseRedirect, QueryDict, JsonResponse
from django.template import Context, RequestContext
from django.db.models import Q, Count
from django.contrib.auth.mixins import UserPassesTestMixin
from django.views.generic.base import TemplateView
from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView, View
from django.views.generic.edit import FormView
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.utils import timezone
from django.utils.encoding import force_bytes, force_text
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.template.loader import render_to_string
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.urls import reverse_lazy, reverse
from django.utils.decorators import method_decorator
from django.utils.http import is_safe_url, urlsafe_base64_decode
from django.utils.translation import gettext_lazy as _
from django.db.models.functions import Greatest
from django.contrib.postgres.search import SearchVector, SearchQuery, SearchRank, TrigramSimilarity
from .forms import SubmitRequestForm, SearchForm
from .models import Section, HelpCenter, UsersRequest
class TitleContextMixin:
extra_context = None
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['title'] = self.title
if self.extra_context is not None:
context.update(self.extra_context)
return context
class HelpHomePageView(ListView):
"""
List both latest stories and both following author stories in main home page.
"""
model = Section
template_name = 'helpcenter/home.html'
context_object_name = 'help_center'
def get_context_data(self, *args, **kwargs):
context = super(HelpHomePageView, self).get_context_data(*args, **kwargs)
context['helpcenter'] = True
return context
class ArticleDetailView(DetailView):
"""
dsiplay the article info....
"""
# model = HelpCenter
template_name = 'helpcenter/articles_detail.html'
context_object_name = 'articles'
def get_object(self):
return get_object_or_404(HelpCenter, help_hex=self.kwargs['help_hex'])
def get_context_data(self, *args, **kwargs):
context = super(ArticleDetailView, self).get_context_data(**kwargs)
self.article_section = HelpCenter.objects.filter(section=self.object.section)
context['article_section'] = self.article_section
context['helpcenter'] = True
return context
class SectionListView(ListView):
"""
dsiplay the article info....
"""
template_name = 'helpcenter/section.html'
context_object_name = 'sections'
def get_queryset(self):
self.section = get_object_or_404(Section, slug=self.kwargs['slug'])
return HelpCenter.objects.filter(section=self.section)
def get_context_data(self, *args, **kwargs):
context = super(SectionListView, self).get_context_data(**kwargs)
self.article_section = HelpCenter.objects.filter(section=self.section)
context['article_section'] = self.article_section
context['section'] = self.section
context['helpcenter'] = True
return context
def search_helpcenter(request):
form = SearchForm()
query = None
results = []
if 'query' in request.GET:
form = SearchForm(request.GET)
if form.is_valid():
query = form.cleaned_data['query']
results = HelpCenter.objects.annotate(
similarity=Greatest(TrigramSimilarity('title', query),
TrigramSimilarity('section__name', query)
)
).filter(similarity__gt=0.1).order_by('-similarity')
context = {
'form': form,
'query': query,
'results': results,
'helpcenter': True,
'title': 'Search results',
}
return render(request, 'helpsearch/search.html', context)
@method_decorator(login_required, name='dispatch')
class SubmitRequestView(TitleContextMixin, CreateView):
"""
Display the create new topic / category form and handle the topic action.
"""
model = UsersRequest
form_class = SubmitRequestForm
template_name = "helpcenter/submit_request.html"
success_url = reverse_lazy('submit_request')
title = _('Submit a request')
def form_valid(self, form):
# self.user = User.objects.filter(user=self.request.user)
self.submit_request = form.save(commit=False)
self.submit_request.user = self.request.user
self.submit_request.save()
messages.success(self.request, 'Great!! Your request is sent us and we\' notify you soon...!')
return super().form_valid(form)
def get_context_data(self, **kwargs):
context = super(SubmitRequestView, self).get_context_data(**kwargs)
context['helpcenter'] = True
return context
class AboutPageView(TemplateView):
"""
Display the create new topic / category form and handle the topic action.
"""
template_name = "about.html"
def get_context_data(self, **kwargs):
context = super(AboutPageView, self).get_context_data(**kwargs)
context['helpcenter'] = True
return context
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 06/02/2018 9:32 PM
# @Author : Lee
# @File : index_max_heap.py
# @Software: PyCharm
import random
class IndexMaxHeap(object):
"""
索引最大堆
"""
def __init__(self, capacity):
self.data = [-1]
self.index = [-1]
self.reverse = [-1] * (capacity+1)
self.capacity = capacity
self.count = 0
def is_empty(self):
return self.count == 0
def size(self):
return self.count
def insert(self, i, item):
"""
插入的索引值从0开始计数,需要先内部+1
:param i: 索引值
:param item: 数据值
:return:
"""
assert self.count + 1 <= self.capacity
assert i >= 0 and i + 1 <= self.capacity
i += 1
self.data.append(item)
self.count += 1
self.index.append(i)
self.reverse[i] = self.count
self._shift_up(self.count)
def _swap_index(self, i, j):
self.index[i], self.index[j] = self.index[j], self.index[i]
self.reverse[self.index[i]] = i
self.reverse[self.index[j]] = j
def extract_max_index(self):
assert self.count > 0
result = self.index[1] - 1
self._swap_index(1, self.count)
self.reverse[self.index[self.count]] = -1
self.count -= 1
self._shift_down(1)
return result
def extract_max(self):
assert self.count > 0
result = self.data[self.index[1]]
self._swap_index(1, self.count)
self.reverse[self.index[self.count]] = -1
self.count -= 1
self._shift_down(1)
return result
def get_max_index(self):
assert self.count > 0
return self.index[1] - 1
def get_max(self):
assert self.count > 0
return self.data[self.index[1]]
def _contain(self, i):
assert i >= 0 and i + 1 <= self.capacity
return self.reverse[i] != -1
def change(self, i, item):
"""
索引值从0开始计数,需要先内部+1
:param i: 索引值
:param item:
:return:
"""
assert self._contain(i)
i += 1
self.data[i] = item
# for j in range(self.count):
# if self.index[j] == i:
# self._shift_up(j)
# self._shift_down(j)
# return
self._shift_up(self.reverse[i])
self._shift_down(self.reverse[i])
"""
以下为核心辅助函数
"""
def _shift_up(self, k):
"""
此函数中 // 符号代表整除,结果为整数
:param k: 节点位置
"""
while (1 < k <= self.count) and self.data[self.index[k]] > self.data[self.index[k // 2]]:
self._swap_index(k, k // 2)
k //= 2
def _shift_down(self, k):
while 2 * k <= self.count:
j = 2 * k
# 判断右子树是否存在并对左右子节点进行比较
if j + 1 <= self.count and self.data[self.index[j]] < self.data[self.index[j + 1]]:
j += 1
if self.data[self.index[k]] < self.data[self.index[j]]:
self._swap_index(k, j)
k = j
else:
break
if __name__ == '__main__':
"""
测试结果:
堆中索引值->index: [-1, 10, 9, 6, 7, 8, 2, 5, 1, 4, 3]
堆中数据值->data: [-1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19]
反向查找->reverse: [-1, 8, 6, 10, 9, 7, 3, 4, 5, 2, 1]
验证方式:
index[x] = y
reverse[y] = x
通过index堆得结构,将data数据填入,构成最大堆
"""
index_max_heap = IndexMaxHeap(10)
for i in range(10):
index_max_heap.insert(i, 2*i + 1)
print(index_max_heap.index)
print(index_max_heap.data)
print(index_max_heap.reverse)
|
from flask import Blueprint, render_template, abort, session, request, jsonify, url_for, redirect
from jinja2 import TemplateNotFound
import requests
import pprint
import simplejson as json
from collections import OrderedDict
from datetime import datetime, date, timedelta
import application.codechefAPI as helper
from flaskConfiguration import monDB
import random
statistics_page = Blueprint('statistics_page', __name__, template_folder='templates')
@statistics_page.before_request
def tokenExpireCheck():
try:
if session['expires_in'] <= datetime.now():
status = helper.refreshAccessToken()
if status is not True:
abort(redirect(url_for('authenticate.logout')))
except:
abort(redirect(url_for('authenticate.logout')))
@statistics_page.route('/stats/tags/<friend_username>')
def tags(friend_username):
fullname = friend_username
headers = {'Authorization': 'Bearer ' + session['access_token']}
# gets the user problem statistics
userProfileResponse = requests.get("https://api.codechef.com/users/{0}?fields=problemStats".format(friend_username), headers=headers)
userProfileResponse = json.loads(userProfileResponse.text)
print (userProfileResponse)
problemStats = {}
if(userProfileResponse['status'] == 'OK' and userProfileResponse['result']['data']['code'] == 9001):
problemStats = userProfileResponse['result']['data']['content']['problemStats']['solved']
fullname = userProfileResponse['result']['data']['content']['fullname']
tags = {}
# iterate over all the solved problems and aggregate tags
for contestCode, solvedProblems in problemStats.items():
for problemCode in solvedProblems:
# fetch the tags for given problem -- incomplete db, may not be accurate results
res = monDB.tags.find_one({'contestCode': contestCode, 'problemCode': problemCode})
if(res != None):
for tag in res['tags']:
if str(tag) == contestCode.lower():
continue
if tag not in tags:
tags[tag] = 0
tags[tag] = tags[tag] + 1
print (tags)
tags = OrderedDict(sorted(tags.items(), key=lambda x: x[1]))
orderedTags = []
# prepare data pie chart
for key, val in tags.items():
orderedTags.append({'name': str(key), 'y':val})
orderedTags = orderedTags[::-1]
# shows only top 20 tags data
orderedTags = orderedTags[:min(20, len(orderedTags))]
orderedTags = json.dumps(orderedTags)
# for footer of tags page
randomList = getFiveRandomFriends()
try:
return render_template('tags.html', tags=orderedTags, username=friend_username, fullname=fullname, randomList = randomList)
except TemplateNotFound:
abort(404)
def getFiveRandomFriends():
friends = monDB.friends.find()
friendsList = {}
randomList = {}
for x in friends:
friendsList[x['friend_username']] = x['friend_fullname']
keys = list(friendsList.keys())
random.shuffle(keys)
maxRand = min(5, len(keys))
for key in keys:
if maxRand > 0:
maxRand = maxRand-1
randomList[key] = friendsList[key]
return randomList
@statistics_page.route('/stats/problems/<friend_username>')
def tagProblems(friend_username):
tag = request.args.get('tag')
headers = {'Authorization': 'Bearer ' + session['access_token']}
# initialize the empty problem list
tagProblemsUser = []
# api request for user profile
userProfileResponse = requests.get("https://api.codechef.com/users/{0}?fields=problemStats".format(friend_username), headers=headers)
userProfileResponse = json.loads(userProfileResponse.text)
#
if(userProfileResponse['status'] == 'OK' and userProfileResponse['result']['data']['code'] == 9001):
problemStats = userProfileResponse['result']['data']['content']['problemStats']['solved']
else:
problemStats = {}
for contestCode, solvedProblems in problemStats.items():
for problemCode in solvedProblems:
problemTags = monDB.tags.find_one({'contestCode': contestCode, 'problemCode': problemCode})
if(problemTags != None and tag in problemTags['tags']):
tagProblemsUser.append({'problemCode': problemCode, 'contestCode': contestCode})
try:
userFriends = findFriends()
return render_template('tag_problems_user.html',userFriends = userFriends, tagProblemsUser=tagProblemsUser, friend_username=friend_username)
except TemplateNotFound:
abort(404)
def findFriends():
friends = []
dbUsers = monDB.friends.find({
'username': session['username']
})
for friend in dbUsers:
obh = {
'friend_username': friend['friend_username'],
'friend_fullname': friend['friend_fullname'].title(),
'timestamp': friend['timestamp'],
'friend_id': str(friend['_id'])
}
friends.append(obh)
return friends |
from django import forms
from .models import Profile,Photo,Comments
from django.forms import ModelForm,Textarea,IntegerField
class NewPhotoForm(forms.ModelForm):
class Meta:
model = Photo
exclude = ['user','photos','likes']
class NewProfileForm(forms.ModelForm):
class Meta:
model = Profile
exclude = ['user','photos']
class CommentForm(forms.ModelForm):
class Meta:
model = Comments
exclude = ['posted_by', 'commented_photo','user']
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020-04-08 22:00:09
# @Author : Fallen (xdd043@qq.com)
# @Link : https://github.com/fallencrasher/python-learning
# @Version : $Id$
#闭包应用
#1.保存返回闭包时的状态
#2.
def func(a,b):
c = 10
def inner_func():
s = a+b+c
print("加和为:",s )
return inner_func
#调用
x1 = func(6,9) #也即,x1=inner_func
x2 = func(2,8) #同样,x2 = inner_func
#调用返回的内部函数
x1()
x2()
# 从上面可以看到,我们通过传给 a,b 不同的实参,赋予不同的变量x1和x2,
# 得到的连个不同运行结果的函数,这就是闭包的作用,这他妈算什么作用,这个普通
# 函数有啥区别。
# 在python里,函数也是变量的一种,大家都是平等的对象,所以你定义的不同,它功能
# 就不同嘛。。。
# 这个通过闭包定义的新的变量x1,x2,都是函数,不同于普通函数的是,他们是被保留在
# 内存里的函数,在声明之后就跟 a=1 ,b = 'str' 一样,他们就是个普通的变量了,被
# 内存记住了。通过同样方法定义的多个新的变量x3,x4,x5....大家互不影响。
# 其实最主要的区别是,普通调用的函数,在调用后,函数就会被python从内存里拿出去,
# 以免占用过高,所以普通函数在定义时或证明后,代码运行到这个函数这里,就把他暂时
# 保存在内存里,接下来代码运行到调用它那一步,调用完,就要把声明函数那个部分从内存
# 里删除掉。
|
import numpy as np
import pandas as pd
import random
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
import sys
sys.path.insert(1, 'Trabalho 3/modules/')
import models
#Funções do Trabalho 2
def plot_confusion_matrix(y_true, y_pred, title, cmap=plt.cm.Reds):
cm = confusion_matrix(y_true, y_pred) #Computar a matrix de confusão
classes = [int(i) for i in np.unique(y_true)] #Classes
fig, ax = plt.subplots(figsize=(8, 5))
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
xticklabels=classes,
yticklabels=classes,
title=title,
ylabel='Verdadeiros',
xlabel='Preditos')
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
thresh = cm.max() / 2.0
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], 'd'), ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
return ax
def plot_boundaries(X, y, clf, title, cmap=plt.cm.YlOrRd):
markers = ('o', 'x')
colors = ('firebrick', 'black')
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.02),
np.arange(y_min, y_max, 0.02))
Z = clf.predict(np.array([xx.ravel(), yy.ravel()]).T)
Z = np.array(Z).reshape(xx.shape)
plt.figure(figsize=(8, 5))
plt.title(label=title)
plt.contourf(xx, yy, Z, alpha=0.3, cmap=cmap)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0],
y=X[y == cl, 1],
c=colors[idx],
marker=markers[idx],
label='Class ' + str(int(cl)),
edgecolor='black')
plt.legend()
plt.show()
def accuracy_score(y_real, y_pred):
return np.sum(y_pred == y_real)/y_real.shape[0]
# Função extra do Trabalho 2
def plot_loss_path(loss, title=None):
plt.figure(figsize=(10, 5))
plt.rcParams.update({'font.size': 14})
plt.plot(range(1, len(loss)+1), loss, '-k', color='firebrick')
plt.xlabel('Épocas', fontsize=14)
plt.ylabel('Loss', fontsize=14)
if title is not None:
plt.title(title, fontsize=14)
plt.show()
#Novas funções
def plot_data(X, y, marker='o', title=False):
classes = np.array([int(i) for i in np.unique(y)])
colors = plt.cm.Set1(np.linspace(0, 0.9, classes.shape[0]))
fig = plt.figure(figsize=(8, 6), )
plt.rcParams.update({'font.size': 14})
for i, class_ in enumerate(classes):
plt.scatter(X[y==class_, 0], X[y==class_, 1], s=100, marker=marker, color=colors[i])
if title:
plt.title(label=title)
plt.show()
# 3ª questão: K-fold
def k_fold(X, y, k, method, seed=42):
idx = list(range(len(X)))
subset_size = round(len(X)/k)
metric_values = []
random.Random(seed).shuffle(idx)
subsets = [idx[X:X + subset_size] for X in range(0, len(idx), subset_size)]
for i in range(k):
X_ = X[subsets[i]]
y_ = y[subsets[i]]
X_train, X_test, y_train, y_test = train_test_split(X_, y_, test_size=0.3, random_state=seed)
method.fit(X_train, y_train)
y_pred = method.predict(X_test)
metric_values.append(accuracy_score(y_test, y_pred))
kfold_error = np.mean(metric_values)
return kfold_error
# Para análise do melhor alpha para a rede MLP
def grid_search_mlp(X_train, X_test, y_train, y_test, units, epochs):
grid_search = np.logspace(-2, 0, 11) # Alphas
val_list = []
for i in range(grid_search.shape[0]):
alpha = grid_search[i]
model = models.MLPClassifier(hidden_unit=units, epochs=epochs, alpha=alpha)
model.fit(X_train, y_train)
y_pred = np.argmax(model.predict(X_test))
wrong_index_val = y_test != y_pred
val_list.append(np.mean(wrong_index_val))
best_alpha = grid_search[np.argmin(val_list)]
print("[MLP] Melhor modelo encontrado: alpha={}".format(best_alpha))
final_model = models.MLPClassifier(hidden_unit=units, epochs=epochs, alpha=best_alpha)
final_model.fit(X_train, y_train)
plot_loss_path(final_model.loss_history(), 'Função de loss ao longo das iterações')
return best_alpha |
# Author: Koorosh Gobal
# Python code for 3.3
# -----------------------------------
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize
from scipy.integrate import odeint
# -----------------------------------
epsilon = 1.0
mu = 1.0
alpha = 1.0
k = 1.0
omega = 2.0
N = 99
T = 2*2*np.pi
t = np.linspace(0, T, N+1)
t = t[0:-1]
Omega = np.fft.fftfreq(N, T/(2*np.pi*N))
x0 = np.zeros(N)
# Harmonic Balance method
def residual(x):
X = np.fft.fft(x)
dx = np.fft.ifft(np.multiply(1j * Omega, X))
ddx = np.fft.ifft(np.multiply(-Omega**2, X))
Residual = ddx + x + epsilon * (2 * mu * dx + alpha * x**3
+ 2 * k * x * np.cos(omega * t)) - np.sin(2 * t)
Residual = np.sum(np.abs((Residual**2)))
return Residual
#
res = minimize(residual, x0, method='SLSQP')
xSol = res.x
# Numerical solution
def RHS(X, t=0.0):
x1, x2 = X
x1dot = x2
x2dot = -x1 - epsilon * (2 * mu * x2 + alpha * x1**3
+ 2 * k * x1 * np.cos(omega * t)) + np.sin(2 * t)
return [x1dot, x2dot]
#
ta = np.linspace(0.0, T, N)
sol = odeint(RHS, [0, 0], ta)
plt.figure()
plt.plot(t, res.x, 'k',
ta, sol[:, 0], 'r--')
plt.legend(['Harmonic Balance', 'Time integration'], loc='best')
plt.xlabel('Time')
plt.ylabel('Displacement')
plt.show()
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 2 17:45:52 2020
@author: Mitchell
"""
import requests as rq
import datetime
import json
from datetime import timedelta, date
import xlsxwriter
import time
#default data
start_date = datetime.date.today()
end_date = datetime.date.today()
row = 0
col = 0
#commented countries have no information provides, thus have been left out of the request. Country code bases on ISO 3166
country_dictionary = {
"Albania":"ALB",
"Andorra":"AND",
"Austria":"AUT",
"Belarus":"BLR",
"Belgium":"BEL",
"Bosnia and Herzegovina":"BIH",
"Bulgaria":"BGR",
"Croatia":"HRV",
# "Vatican city":"VAT",
"United Kingdom":"GBR",
"Ukraine":"UKR",
"Switzerland":"CHE",
"Sweden":"SWE",
"Spain":"ESP",
"Slovenia":"SVN",
"Slovakia":"SVK",
"Serbia":"SRB",
# "San Marino":"RSM",
"Russia":"RUS",
"Romania":"ROU",
"Portugal":"PRT",
"Poland":"POL",
"Norway":"NOR",
"Netherlands":"NLD",
# "Montenegro":"MNE",
# "Monaco":"MCO",
"Moldova":"MDA",
# "Malta":"MLT",
"Luxembourg":"LUX",
"Lithuania":"LTU",
# "Liechtenstein":"LIE",
"Latvia":"LVA",
# "Kosovo":"UNK",
"Italy":"ITA",
"Ireland":"IRL",
"Iceland":"ISL",
"Hungary":"HUN",
"Greece":"GRC",
"Germany":"DEU",
"France":"FRA",
"Finland":"FIN",
"Estonia":"EST",
"Denmark":"DNK",
"Czechia":"CZE"
}
def daterange(start_date, end_date):
for n in range(int((end_date - start_date).days)):
yield start_date + timedelta(n)
#make variable in frontend
start_date = date(2020, 1, 21)
end_date = date(2020, 12, 6)
'''
#controle data
response_country = rq.get("https://covidtrackerapi.bsg.ox.ac.uk/api/v2/stringency/actions/NLD/2021-1-12")
json_str = json.dumps(response_country.json())
resp = json.loads(json_str)
print(resp)
'''
# get data and write to xlsx
workbook = xlsxwriter.Workbook('data_countries_all_with_relatives.xlsx')
fnames = ['Date', 'New Cases', 'Change of Cases', 'Cumulative cases','New Deaths', 'Change of Deaths', 'Cumulative deaths','Stringency','C1' ,'C2' ,'C3' ,'C4' ,'C5' ,'C6' ,'C7' ,'C8' ,'E1','E2' ,'E3' ,'E4' ,'H1' ,'H2' ,'H3','H4' ,'H5' ,'H6', 'H7']
for key in country_dictionary:
#makes sheet per country
worksheet = workbook.add_worksheet(country_dictionary[key])
#prints header row
for col_no, item in enumerate(fnames):
worksheet.write(0, col_no, item)
row_num = 1
prev_cases = 0
prev_deaths = 0
change_cases = 0
change_deaths = 0
last_day_cases= 0
last_day_deaths =0
#gets data
for single_date in daterange(start_date, end_date):
col_num=0
r = ""
try:
r = rq.get("https://covidtrackerapi.bsg.ox.ac.uk/api/v2/stringency/actions/"+ country_dictionary[key] +"/" + single_date.strftime("%Y-%m-%d"), timeout=5)
except rq.exceptions.ConnectionError as e:
continue
response_country = r
json_str = json.dumps(response_country.json())
resp = json.loads(json_str)
try:
daily_cases = int(resp["stringencyData"]["confirmed"])-prev_cases
daily_deaths = int(resp["stringencyData"]["deaths"])-prev_deaths
my_data = [str(resp["stringencyData"]["date_value"]),
(daily_cases),
(daily_cases-last_day_cases),
int(resp["stringencyData"]["confirmed"]),
(daily_deaths),
(daily_deaths-last_day_deaths),
int(resp["stringencyData"]["deaths"]),
resp['stringencyData']['stringency']]
last_day_cases = daily_cases
last_day_deaths = daily_deaths
prev_cases = int(resp["stringencyData"]["confirmed"])
prev_deaths = int(resp["stringencyData"]["deaths"])
for x in range(len(resp["policyActions"])):
my_data += [str(resp["policyActions"][x]["flagged"])]
for data in my_data:
worksheet.write(row_num, col_num, data)
col_num +=1
row_num +=1
except:
print(key + " rip" + single_date.strftime("%Y-%m-%d"))
time.sleep(0.5)
workbook.close()
"""
prob not needed if there is a list with all countries
response_total = rq.get("https://covidtrackerapi.bsg.ox.ac.uk/api/v2/stringency/date-range/2020-06-02/2020-06-03")
"""
"""
###general information about the json data retrieved from the get requests
{
policyActions: {
0...n: { //Numerical key
policy_type_code: String, //Policy type 2 or 3 digit code - letter/number - or NONE if no data available
policy_type_display: String, //String describing policy value,
policyvalue: Integer, //Represents policy status
is_general: Boolean, //If this is a general policy,
flagged: Boolean, //Replaces isgneral from 28 April 2020,
policy_value_display_field: String, //Describes the level of stringency of the policy or type of policy
notes: String, //Notes entered by contributors
}
},
stringencyData: {
date_value: String, //YYYY-MM-DD date of record
country_code: String, //ALPHA-3 country code
confirmed: Integer, //Recorded confirmed cases,
deaths: Integer, //Recorded deaths,
stringency_actual: Integer, //Calculated stringency
stringency: Integer, //Display stringency - Will be actual value if available. For previous 7 days will take last available value. Otherwise null.
}
}
""" |
# coding=utf-8
# Copyright 2018 The Dopamine Authors.
# Modifications copyright 2019 Unity Technologies.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Obstacle Tower-specific utilities including Atari-specific network architectures.
This includes a class implementing minimal preprocessing, which
is in charge of:
. Converting observations to greyscale.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from obstacle_tower_env import ObstacleTowerEnv
import gym
from gym.spaces.box import Box
import numpy as np
import tensorflow as tf
import gin.tf
import cv2
slim = tf.contrib.slim
NATURE_DQN_OBSERVATION_SHAPE = (84, 84) # Size of downscaled Atari 2600 frame.
NATURE_DQN_DTYPE = tf.uint8 # DType of Atari 2600 observations.
NATURE_DQN_STACK_SIZE = 4 # Number of frames in the state stack.
@gin.configurable
def create_otc_environment(environment_path=None):
"""Wraps an Obstacle Tower Gym environment with some basic preprocessing.
Returns:
An Obstacle Tower environment with some standard preprocessing.
"""
assert environment_path is not None
env = ObstacleTowerEnv(environment_path, 0, retro=False)
env = OTCPreprocessing(env)
return env
def nature_dqn_network(num_actions, network_type, state):
"""The convolutional network used to compute the agent's Q-values.
Args:
num_actions: int, number of actions.
network_type: namedtuple, collection of expected values to return.
state: `tf.Tensor`, contains the agent's current state.
Returns:
net: _network_type object containing the tensors output by the network.
"""
net = tf.cast(state, tf.float32)
net = tf.div(net, 255.)
net = slim.conv2d(net, 32, [8, 8], stride=4)
net = slim.conv2d(net, 64, [4, 4], stride=2)
net = slim.conv2d(net, 64, [3, 3], stride=1)
net = slim.flatten(net)
net = slim.fully_connected(net, 512)
q_values = slim.fully_connected(net, num_actions, activation_fn=None)
return network_type(q_values)
def rainbow_network(num_actions, num_atoms, support, network_type, state):
"""The convolutional network used to compute agent's Q-value distributions.
Args:
num_actions: int, number of actions.
num_atoms: int, the number of buckets of the value function distribution.
support: tf.linspace, the support of the Q-value distribution.
network_type: namedtuple, collection of expected values to return.
state: `tf.Tensor`, contains the agent's current state.
Returns:
net: _network_type object containing the tensors output by the network.
"""
weights_initializer = slim.variance_scaling_initializer(
factor=1.0 / np.sqrt(3.0), mode='FAN_IN', uniform=True)
net = tf.cast(state, tf.float32)
net = tf.div(net, 255.)
net = slim.conv2d(
net, 32, [8, 8], stride=4, weights_initializer=weights_initializer)
net = slim.conv2d(
net, 64, [4, 4], stride=2, weights_initializer=weights_initializer)
net = slim.conv2d(
net, 64, [3, 3], stride=1, weights_initializer=weights_initializer)
net = slim.flatten(net)
net = slim.fully_connected(
net, 512, weights_initializer=weights_initializer)
net = slim.fully_connected(
net,
num_actions * num_atoms,
activation_fn=None,
weights_initializer=weights_initializer)
logits = tf.reshape(net, [-1, num_actions, num_atoms])
probabilities = tf.contrib.layers.softmax(logits)
q_values = tf.reduce_sum(support * probabilities, axis=2)
return network_type(q_values, logits, probabilities)
def implicit_quantile_network(num_actions, quantile_embedding_dim,
network_type, state, num_quantiles):
"""The Implicit Quantile ConvNet.
Args:
num_actions: int, number of actions.
quantile_embedding_dim: int, embedding dimension for the quantile input.
network_type: namedtuple, collection of expected values to return.
state: `tf.Tensor`, contains the agent's current state.
num_quantiles: int, number of quantile inputs.
Returns:
net: _network_type object containing the tensors output by the network.
"""
weights_initializer = slim.variance_scaling_initializer(
factor=1.0 / np.sqrt(3.0), mode='FAN_IN', uniform=True)
state_net = tf.cast(state, tf.float32)
state_net = tf.div(state_net, 255.)
state_net = slim.conv2d(
state_net, 32, [8, 8], stride=4,
weights_initializer=weights_initializer)
state_net = slim.conv2d(
state_net, 64, [4, 4], stride=2,
weights_initializer=weights_initializer)
state_net = slim.conv2d(
state_net, 64, [3, 3], stride=1,
weights_initializer=weights_initializer)
state_net = slim.flatten(state_net)
state_net_size = state_net.get_shape().as_list()[-1]
state_net_tiled = tf.tile(state_net, [num_quantiles, 1])
batch_size = state_net.get_shape().as_list()[0]
quantiles_shape = [num_quantiles * batch_size, 1]
quantiles = tf.random_uniform(
quantiles_shape, minval=0, maxval=1, dtype=tf.float32)
quantile_net = tf.tile(quantiles, [1, quantile_embedding_dim])
pi = tf.constant(math.pi)
quantile_net = tf.cast(tf.range(
1, quantile_embedding_dim + 1, 1), tf.float32) * pi * quantile_net
quantile_net = tf.cos(quantile_net)
quantile_net = slim.fully_connected(
quantile_net,
state_net_size,
weights_initializer=weights_initializer)
# Hadamard product.
net = tf.multiply(state_net_tiled, quantile_net)
net = slim.fully_connected(
net, 512, weights_initializer=weights_initializer)
quantile_values = slim.fully_connected(
net,
num_actions,
activation_fn=None,
weights_initializer=weights_initializer)
return network_type(quantile_values=quantile_values, quantiles=quantiles)
@gin.configurable
class OTCPreprocessing(object):
"""A class implementing image preprocessing for OTC agents.
Specifically, this converts observations to greyscale. It doesn't
do anything else to the environment.
"""
def __init__(self, environment):
"""Constructor for an Obstacle Tower preprocessor.
Args:
environment: Gym environment whose observations are preprocessed.
"""
self.environment = environment
self.game_over = False
self.lives = 0 # Will need to be set by reset().
self.stage_reward = 0.0
self.previous_stage_time_remaining = 3000
self.previous_reward = 0
self.previous_keys = 0
self.previous_time_remaining = 3000
self.tableAction = self.createActionTable()
def createActionTable(self):
tableAction = []
for a in range(0, 3):
for b in range(0, 3):
for c in range(0, 2):
tableAction.append([a, b, c, 0])
# print("Action option: ", tableAction[6:12])
return tableAction
@property
def observation_space(self):
return self.environment.observation_space
@property
def action_space(self):
return self.environment.action_space
@property
def reward_range(self):
return self.environment.reward_range
@property
def metadata(self):
return self.environment.metadata
def reset(self):
"""Resets the environment. Converts the observation to greyscale,
if it is not.
Returns:
observation: numpy array, the initial observation emitted by the
environment.
"""
observation = self.environment.reset()
observation = observation[0]
self.stage_reward = 0.0
self.previous_stage_time_remaining = 3000
self.previous_reward = 0
self.previous_keys = 0
self.previous_time_remaining = 3000
self.previous_stage_time_remaining = 3000
if(len(observation.shape) > 2):
observation = cv2.cvtColor(cv2.convertScaleAbs(observation, alpha=(255.0 / 1.0)), cv2.COLOR_RGB2GRAY)
observation = cv2.resize(observation, (84, 84))
return observation
def render(self, mode):
"""Renders the current screen, before preprocessing.
This calls the Gym API's render() method.
Args:
mode: Mode argument for the environment's render() method.
Valid values (str) are:
'rgb_array': returns the raw ALE image.
'human': renders to display via the Gym renderer.
Returns:
if mode='rgb_array': numpy array, the most recent screen.
if mode='human': bool, whether the rendering was successful.
"""
return self.environment.render(mode)
def step(self, action):
"""Applies the given action in the environment. Converts the observation to
greyscale, if it is not.
Remarks:
* If a terminal state (from life loss or episode end) is reached, this may
execute fewer than self.frame_skip steps in the environment.
* Furthermore, in this case the returned observation may not contain valid
image data and should be ignored.
Args:
action: The action to be executed.
Returns:
observation: numpy array, the observation following the action.
reward: float, the reward following the action.
is_terminal: bool, whether the environment has reached a terminal state.
This is true when a life is lost and terminal_on_life_loss, or when the
episode is over.
info: Gym API's info data structure.
"""
observation, reward, game_over, info = self.environment.step(np.array(self.tableAction[int(action)-1]))
observation, keys, time_remaining = observation
self.stage_reward, previous_stage_time_remaining = self.reward_compute(done=game_over,
reward_total=self.stage_reward,
keys=keys,
previous_keys=self.previous_keys,
reward=reward,
previous_reward=self.previous_reward,
time_remaining=time_remaining,
previous_time_remaining=self.previous_time_remaining,
previous_stage_time_remaining=self.previous_stage_time_remaining)
self.previous_reward = reward
self.previous_keys = keys
self.previous_time_remaining = time_remaining
self.game_over = game_over
if(len(observation.shape) > 2):
observation = cv2.cvtColor(cv2.convertScaleAbs(observation, alpha=(255.0 / 1.0)), cv2.COLOR_RGB2GRAY)
observation = cv2.resize(observation, (84, 84))
return observation, self.stage_reward, game_over, info
def reward_compute(
self,
done,
reward_total,
keys,
previous_keys,
reward,
previous_reward,
time_remaining,
previous_time_remaining,
previous_stage_time_remaining):
# 定義獎勵公式
# reward 是從環境傳來的破關數
# keys 是撿到鑰匙的數量
# time_remaining 是剩餘時間
# 過關最大獎勵為10
# 一把鑰匙為5
# 時間果實暫時只給0.5,因為結束會結算剩餘時間,會有獎勵累加的問題。
# 如果過關,給予十倍過關獎勵 - (場景開始的時間-剩餘時間)/1000
# print("time_remaining ", time_remaining,
# " previous_time_remaining ", previous_time_remaining,
# " reward ", reward)
if reward < 0.2:
reward = 0
if (reward - previous_reward) > 0.8:
# ***如果剩餘時間比場景時間多會變成加分獎勵,可能會極大增加Agent吃時間果實的機率。
# ***另一種方式是剩餘的時間直接/1000加上去,這樣就沒有累加效果。
print("Pass ", reward, " Stage!")
reward_total += (reward - previous_reward) * 100 - \
(previous_stage_time_remaining - time_remaining)
# 過關之後把時間留到下一關,儲存這回合時間供下次計算過關使用
previous_time_remaining = time_remaining
previous_stage_time_remaining = time_remaining
# 假設過關的時候有順便吃到果實或鑰匙,所以預設為同時可以加成
if previous_keys > keys:
print("Get Key")
reward_total += 5
if previous_time_remaining < time_remaining and previous_time_remaining != 0:
print("Get time power up")
reward_total += 0.5
else:
reward_total -= 0.1
if done and previous_time_remaining > 100:
print("Agent died")
# 如果剩餘時間越多就掛點,扣更多
reward_total -= (10 + time_remaining / 100)
return reward_total, previous_stage_time_remaining
|
#coding:utf-8
from django.shortcuts import render_to_response, get_object_or_404
from activity.dao import activityDao
from django.template.context import RequestContext
from collection.dao import collectionDao, select_collection_byReq,\
update_rightTime_byReq, update_wrongTime_byReq
from django.http.response import HttpResponse
import json
from subject.models import Collection, Exercise
from django.views.decorators.csrf import csrf_exempt
from django.utils import simplejson
from exercise.dao import get_tips_byId
def into_collection(req):
if req.COOKIES.has_key('userid'):
userid = req.COOKIES['userid']
content = ('进入错题集').decode('utf-8')
ADao = activityDao({"userid":userid})
ADao.add_a_activity(content)
return render_to_response('collection.html',RequestContext(req))
return render_to_response('login.html',RequestContext(req))
def get_collection(req):
if req.COOKIES.has_key('userid'):
p = int(req.GET.get('p'))
cur = p
rs = {}
dao = collectionDao({'userid':req.COOKIES['userid']})
if p==0:
cur = 1
cn = dao.select_Ccollection_byUs()
rs['numT'] = cn
ts = dao.select_collection_byUs(cur)
rs['col'] = ts
return HttpResponse(json.dumps(rs),content_type="application/json")
return HttpResponse(json.dumps({}),content_type="application/json")
@csrf_exempt
def delete_collection(req,p1):
if select_collection_byReq({'id':p1}).righttime > 0:
col = get_object_or_404(Collection,id=p1)
col.delete()
return HttpResponse()
return HttpResponse(json.dumps({'tips':'唯有正确次数>0才能删除'}),content_type="application/json")
def into_a_collection(req):
if req.COOKIES.has_key('userid'):
return render_to_response('a_collection.html',RequestContext(req))
return render_to_response('login.html',RequestContext(req))
#获取一条错题
def get_a_collection(req,param):
if req.COOKIES.has_key('userid'):
rsp = collectionDao({'userid':req.COOKIES['userid']}).select_a_collection_byUs(int(param)-1)
return HttpResponse(json.dumps(rsp), content_type="application/json")
return HttpResponse(json.dumps({}), content_type="application/json")
'''
验证错题答案:1.获取登录信息
2.获取json
3.判断答案:根据题目id、answer get——》存在:根据collection.id增加正确次数,返回下一错题详情
不存在:根据collection.id增加错误次数,返回tips
'''
@csrf_exempt
def check_answer(req):
if req.method=='POST' and req.COOKIES.has_key('userid'):
jsonReq = simplejson.loads(req.body)
title = jsonReq['title']
id = jsonReq['id']
isTitle = Exercise.objects.filter(id = title['id'],answer = title['answer'])
CDao = collectionDao({'userid':req.COOKIES['userid']})
if isTitle:
update_rightTime_byReq({'id':id})
rsp = CDao.select_a_collection_byUs(jsonReq['num']-1)
return HttpResponse(json.dumps(rsp), content_type="application/json")
else:
update_wrongTime_byReq({'id':id})
return HttpResponse(json.dumps({'tips':get_tips_byId(title['id']),'wrongTime':select_collection_byReq({'id':id}).wrongtime}), content_type="application/json")
return HttpResponse(json.dumps({'tips':'访问错误,请重新登录'}), content_type="application/json")
|
def bubblesort (list1):
temp = 0
#this code is implemented for ascending sort
for i in range(len(list1)-1,0,-1):
for j in range (i):
if list1[j] > list1[j+1]:
temp = list1[j]
list1[j] = list1[j+1]
list1[j+1] = temp
return list1
myL = [2,4,10,64,52,14,18,25]
print (bubblesort(myL))
|
# Learn Python The Hard Way
# http://learnpythonthehardway.org/book/
# iTerm for terminal
# iPython
# Atom as IDE (integrated developent environment) / Text Editor
# GitHub, keep remote cooy of your git repository
# Exercise 1
print "Begin Exercise 1" + "\n"
print "Hello World!"
print "Hello Again"
print "I like typing this."
print "This is fun."
print 'Yay! Printing.'
print "I'd much rather you 'not'."
print 'I "said" do not touch this.' + "\n"
# Hello World!
# Hello Again
# I like typing this.
# This is fun.
# Yay! Printing.
# I'd much rather you 'not'.
# I "said" do not touch this.
# Exercise 3
print "Begin Exercise 3"+"\n"
print "I will now count my chickens:"
print "Hens", 25 + 30 / 6
print "Roosters", 100 - 25 * 3 % 4
print 25 * 3 % 4
print 25 * 3
print 75 % 4
print "Now I will count the eggs:"
print 3 + 2 + 1 - 5 + 4 % 2 - 1 / 4 + 6
print "Is it true that 3 + 2 < 5 - 7?"
print 3 + 2 < 5 - 7
print "What is 3 + 2?", 3 + 2
print "What is 5 - 7?", 5 - 7
print "Oh, that's why it's False."
print "How about some more."
print "Is it greater?", 5 > -2
print "Is it greater or equal?", 5 >= -2
print "Is it less or equal?", 5 <= -2, "\n"
# I will now count my chickens:
# Hens 30
# Roosters 97
# 3
# 75
# 3
# Now I will count the eggs:
# 7
# Is it true that 3 + 2 < 5 - 7?
# False
# What is 3 + 2? 5
# What is 5 - 7? -2
# Oh, that's why it's False.
# How about some more.
# Is it greater? True
# Is it greater or equal? True
# Is it less or equal? False
# Exercise 4
print "Begin Exercise 4" + "\n"
cars = 100
space_in_a_car = 4.0
drivers = 30
passengers = 90
cars_not_driven = cars - drivers
cars_driven = drivers
carpool_capacity = cars_driven * space_in_a_car
average_passengers_per_car = passengers / cars_driven
print "There are", cars, "cars available."
print "There are only", drivers, "drivers available."
print "There will be", cars_not_driven, "empty cars today."
print "We can transport", carpool_capacity, "people today."
print "We have", passengers, "to carpool today."
print "We need to put about", average_passengers_per_car, "in each car."
# There are 100 cars available.
# There are only 30 drivers available.
# There will be 70 empty cars today.
# We can transport 120.0 people today.
# We have 90 to carpool today.
# We need to put about 3 in each car.
print "Hey %s there." % "you" + "\n"
# Hey you there.
# What do you mean by "read the file backward"?
# Very simple. Imagine you have a file with 16 lines of code in it. Start at line 16,
# and compare it to my file at line 16. Then do it again for 15,
# and so on until you've read the whole file backward.
# Exercise 5
print "Begin Exercise 5" + "\n"
my_name = 'Zed A. Shaw'
my_age = 35 # not a lie
my_height = 74 # inches
my_weight = 180 # lbs
my_eyes = 'Blue'
my_teeth = 'White'
my_hair = 'Brown'
print "Let's talk about %s." % my_name
print "He's %d inches tall." % my_height
print "He's %d pounds heavy." % my_weight
print "Actually that's not too heavy."
print "He's got %s eyes and %s hair." % (my_eyes, my_hair)
print "His teeth are usually %s depending on the coffee." % my_teeth
print "%s is really fat. He weighs %d pounds." % (my_name, my_weight)
# Let's talk about Zed A. Shaw.
# He's 74 inches tall.
# He's 180 pounds heavy.
# Actually that's not too heavy.
# He's got Blue eyes and Brown hair.
# His teeth are usually White depending on the coffee.
# Zed A. Shaw is really fat. He weighs 180 pounds.
# If I add 35, 74, and 180 I get 289.
# Format specifiers: %s for string, %d for decimal, %r for debugging
# What are formatters?
# They tell Python to take the variable on the right and put it in to replace the %s with its value.
# I don't get it, what is a "formatter"? Huh? The problem with teaching you programming is that
# to understand many of my descriptions you need to know how to do programming already.
# The way I solve this is I make you do something, and then I explain it later.
# When you run into these kinds of questions, write them down and see if I explain it later.
# this line is tricky, try to get it exactly right
print "If I add %d, %d, and %d I get %d." % (
my_age, my_height, my_weight, my_age + my_height + my_weight) + "\n"
# Begin Exercise 6
x = "There are %d types of people." % 10
binary = "binary"
do_not = "don't"
y = "Those who know %s and those who %s." % (binary, do_not)
print x
print y
# There are 10 types of people.
# Those who know binary and those who don't.
print "I said: %r." % x
# I said: 'There are 10 types of people.'.
# Notice the stylistic choice of using single quotes and then the double quotes
# for a string with a string noted below
print "I also said: '%s'." % y
# I also said: 'Those who know binary and those who don't.'.
hilarious = False
joke_evaluation = "Isn't that joke so funny?! %r"
print joke_evaluation % hilarious
# Isn't that joke so funny?! False
w = "This is the left side of..."
e = "a string with a right side."
# This is the left side of...a string with a right side.
print w + e + "\n"
# What is the difference between %r and %s?
# Use the %r for debugging, since it displays the "raw" data of the variable,
# but the others are used for displaying to users.
# What's the point of %s and %d when you can just use %r?
# The %r is best for debugging, and the other formats are for actually displaying variables to users.
# Begin Exercise 7
print "Begin Exercise 7" "\n"
# print a string
print "Mary had a little lamb."
# print a string with a format specifer for string 'snow'
print "Its fleece was white as %s." % 'snow'
# print a string with a format specifer for string 'black'
print "What about the %s sheep?" % 'black'
# Mistake: I forgot to have black in quotes, threw an error
print "And everywhere that Mary went."
print "." * 10 # what'd that do?
print "F$#@" * 4, "it"
# Mistake: Without the comma it returns the whole line as a string, need comma not () to isolate
#Create a string with format specifiers for multiple string and decimal values as well as a variable
selling = "What if we sold"
print "%s the %s sheep for $%d and the %s sheep for $%d?" % (selling, 'black', 10, 'white', 5)
# Mary had a little lamb.
# Its fleece was white as snow.
# What about the black sheep?
# ..........
# F$#@F$#@F$#@F$#@ it
# What if we sold the black sheep for $10 and the white sheep for $5?
paradise = "Paradise"
end1 = "C"
end2 = "h"
end3 = "e"
end4 = "e"
end5 = "s"
end6 = "e"
end7 = "B"
end8 = "u"
end9 = "r"
end10 = "g"
end11 = "e"
end12 = "r"
# watch that comma at the end. try removing it to see what happens
print end1 + end2 + end3 + end4 + end5 + end6,
print end7 + end8 + end9 + end10 + end11 + end12
print "in %s" % paradise +"\n"
# Without the comma after end6 a new line is automatically created,
# The comma acts as a space between both print lines when it returns values
# It's bad form to go over 80 characters per line (ie- this line to the right ->)
# Exercise 8
print "Exercise 8" "\n"
formatter = "%r %r %r %r"
print formatter % (1,2,3,4)
print formatter % ("one", "two", "three", "four")
print formatter % (True, False, False, True)
print formatter % (formatter, formatter, formatter, formatter)
print formatter % (
"I had this thing.",
"That you could type up right.",
"But it didn't sing.",
"So I said goodnight.") +"\n"
# Mistake: I forgot the commas for the last 4 sentences
# Mistake: The last 4 sentences >80 characters so I had to put on new lines
# Note: Used double quotes in lieu of single quoates due to conjunction: didn't
# Note: Only string values need quotes
# 1 2 3 4
# 'one' 'two' 'three' 'four'
# True False False True
# '%r %r %r %r' '%r %r %r %r' '%r %r %r %r' '%r %r %r %r'
# 'I had this thing.' 'That you could type up right.'
# "But it didn't sing." 'So I said goodnight.'
# Exercise 9
print "Exercies 9" +"\n"
days = "Mon Tue Wed Thu Fri Sat Sun"
months = "Jan\nFeb\nMar\nApr\nMay\nJun\nJul\nAug"
# Note: "\n" adds a new line
print "Here are the days:", days
print "Here are the months:", months
print "Test"
print """There's something going on here.
With the three double-quotes.
We'll be able to type as much as we like.
Even 4 lines if we want, or 5, or 6."""
print "Test"
print """
There's something going on here.
With the three double-quotes.
We'll be able to type as much as we like.
Even 4 lines if we want, or 5, or 6.
"""
#Below code will only print the top line (ie - limiation of "" single line)
print "There's something going on here.",
"With the three double-quotes.",
"We'll be able to type as much as we like.",
"Even 4 lines if we want, or 5, or 6."
# Triple quotes on their own lines at beginning and end will add new lines
# Note: You can use ' or " or """ to wrap around strings
# They can use ' or " quotation marks (eg 'foo' "bar").
# The main limitation with these is that they don't wrap across multiple lines.
# That's what multiline-strings are for: These are strings surrounded by
# triple single or double quotes (''' or """) and are terminated only when
# a matching unescaped terminator is found. They can go on for as many
# lines as needed, and include all intervening whitespace.
# Here are the days: Mon Tue Wed Thu Fri Sat Sun
# Here are the months: Jan
# Feb
# Mar
# Apr
# May
# Jun
# Jul
# Aug
#
# There's something going on here.
# With the three double-quotes.
# We'll be able to type as much as we like.
# Even 4 lines if we want, or 5, or 6.
# Exercise 10
print "Exercise 10" +"\n"
# Sometimes you need to escape the ' or " within a string (use /)
print "I am 6'2\" tall." # escape double-quote inside string
print 'I am 6\'2" tall.' # escape single-quote inside string
# Tab a line in
tabby_cat = "\tI'm tabbed in."
# Add a new line mid string
persian_cat = "I'm split\non a line."
# Add a single backslash within a string 2 methods
backslash_cat = "I'm \\ a \\ cat."
backslash_cat2 = "I'm \ a \ cat."
# Make a formatted list 2 methods
fat_cat = """
I'll do a list:
\t* Cat food
\t* Fishies
\t* Catnip\n\t* Grass
"""
fat_cat2 = """
I'll do a list:
\t* Cat food
\t* Fishies
\t* Catnip
\t* Grass
"""
# Note: New line automatically created in output when formatted such here
print tabby_cat
print persian_cat
print backslash_cat
print backslash_cat2
print fat_cat
print fat_cat2
# Escape sequence list reference (notice the '\' preface)
# http://learnpythonthehardway.org/book/ex10.html
# \\ Backslash (\)
# \' Single-quote (')
# \" Double-quote (")
# \a ASCII bell (BEL)
# \b ASCII backspace (BS)
# \f ASCII formfeed (FF)
# \n ASCII linefeed (LF)
# \N{name} Character named name in the Unicode database (Unicode only)
# \r Carriage Return (CR)
# \t Horizontal Tab (TAB)
# \uxxxx Character with 16-bit hex value xxxx (Unicode only)
# \Uxxxxxxxx Character with 32-bit hex value xxxxxxxx (Unicode only)
# \v ASCII vertical tab (VT)
# \ooo Character with octal value ooo
# \xhh Character with hex value hh
print 'A "smart" man named %s with an IQ of %d.' % ("Steven", 140)
print 'A /"smart/" man named %r with an IQ of %r.' % ("Steven", 140)
# Sometimes you need to escape the ' or " within a string (use /)
print "I am 6'2\" %s with long %s." % ("tall", "arms")
print "I am 6'2\" %r with long %r." % ("tall", "arms")
print 'I am %d\'%d" tall with %d" long %s.' % (6,2,36, "arms")
print 'I am %r\'%r" tall with %d" long %s.' % (6,2,36,"arms")
print "I am 6'2\""
# print "I am 6'2""
# The above throws and error without the escape sequence
print "\n" "End Of Exercise Block 1-10"+ "\n"
|
print ("Please enter your name!")
user_name = input()
print("Hello,", user_name)
|
from option import gather_options, print_options
from network import Resnet, get_scheduler, init_net
from dataload import loadData
from Util import save_networks, load_networks, evaluate
import torch
import torch.nn as nn
from torch.utils.tensorboard import SummaryWriter
import torchvision
if __name__ == '__main__':
opt = gather_options()
print_options(opt)
device = torch.device('cuda:{}'.format(opt.gpu_ids[0])) if opt.gpu_ids else torch.device('cpu')
trainloader, testloader = loadData(opt)
dataset_size = len(trainloader)
print('#training images = %d' % dataset_size)
net = Resnet(opt.input_nc, num_classes=opt.num_classes, norm=opt.norm, nl=opt.nl)
net = init_net(net, init_type='normal', gpu_ids=[0])
if opt.continue_train:
load_networks(opt, net)
criterion = nn.CrossEntropyLoss().to(device)
optimizer = torch.optim.SGD(net.parameters(), lr=opt.lr, momentum=0.9)
scheduler = get_scheduler(optimizer, opt)
iter = 0
running_loss = 0.0
correct = 0.0
total = 0
writer = SummaryWriter()
for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1):
loss = 0.0
for i, data in enumerate(trainloader):
iter = iter + 1
inputs, labels = data
inputs = inputs.to(device)
labels = labels.to(device)
outputs = net(inputs)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item()
total += labels.size(0)
_, predict = torch.max(outputs.data, 1)
correct += (predict == labels).sum().item()
if iter % opt.print_freq == 0:
writer.add_scalar('Loss_crossEntropy/train', float(running_loss / opt.print_freq), iter)
# trainset accuracy
accuracy = correct * 100.0 / total
writer.add_scalar('Accuracy/train', accuracy, iter)
print("iteration: %d, loss: %.4f, accuracy on %d train images: %.3f %%"
% (iter, running_loss / opt.print_freq, total, accuracy))
writer.add_graph(net, inputs)
running_loss = 0.0
correct = 0
total = 0
if iter % opt.save_latest_freq == 0:
save_networks(opt, net, 'latest')
print('saving the latest model (epoch %d, iter %d)' % (epoch, iter))
# testset accuracy
test_accuracy = evaluate(net, testloader, device)
print("Accuracy on testset of epoch %d (iter: %d )is %.3f %%" % (epoch, iter, test_accuracy))
writer.add_scalar('Accuracy/test', test_accuracy, iter)
if epoch % opt.save_epoch_freq == 0:
save_networks(opt, net, epoch)
scheduler.step()
lr = optimizer.param_groups[0]['lr']
print('learning rate = %.7f' % lr)
writer.close()
|
from encoding.base58_check import Base58CheckAddress
"""
You don't wanna know.
"""
class Ptr(Base58CheckAddress):
VERSION_BYTE = bytes([117]) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.