blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
72a4cb0ca3550ac8f90f2fb20f53c61176885330
|
0ef2e598854a39e19e523074c17c0ca72cbeccbc
|
/object_movement.py
|
b2b5020f4ebeb8c00ff7194da2e35bb4f687d704
|
[] |
no_license
|
BryanBattershill/Hackathon2018
|
d2d0956aa15b4f6cbe3f935ce9c131141c1c46fc
|
d0b56f479dc651e6bf63e4f7b360ef7bb26fb4ac
|
refs/heads/master
| 2021-04-28T19:34:17.919981
| 2018-09-18T22:10:16
| 2018-09-18T22:10:16
| 121,900,211
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,217
|
py
|
# USAGE
# python object_movement.py --video object_tracking_example.mp4
# python object_movement.py
# import the necessary packages
from collections import deque
import cv2
import numpy as np
import argparse
import imutils
import math
import time
import random
import sys
class EnemyClass:
speed = 0
radius = 0
trajectory = [] #[0,1]
positionE = [] #[0,1]
corners = 0 #[0,1,2,3]
xrange = 0
yrange = 0
# boolean for spawn location
destroyed = False
outOfBounds = False
absorbed = False
# constructor
def __init__(self, speed, radius):
self.speed = speed
self.radius = radius
self.xrange = 600
self.yrange = 450
positionE = [0,0]
trajectory = [0,0]
# 0 is right side, 1 is top side, 2 is left side, 3 is bottom side
randomSide = random.randint(0,3)
# angle of movement based on position relative to center of side
angleOfTrajectory = 0
# x.range = 600 --> screen x range
# y.range = 400 --> screen y range
# randomly spawning objects on sides of screen
# corner = [0,1,2,3]
# 0 = top right corner, 1 = top left corner, 2 = bottom left corner, 3 = bottom right corner
# COORDINATES START AT TOP LEFT CORNER
# right side
# right side
# right side
if(randomSide == 0):
self.positionE = [self.xrange, random.randint(0,self.yrange)]
if(self.positionE[1] >= self.yrange/2):
corner= 3# bottom right corner
angleOfTrajectory = random.uniform(4*math.pi/6,5*math.pi/6)
self.trajectory = [math.cos(angleOfTrajectory)*self.speed,-math.sin(angleOfTrajectory)*self.speed]
else:
corner=0 # top right corner
angleOfTrajectory = random.uniform(7*math.pi/6,4*math.pi/3)
self.trajectory = [math.cos(angleOfTrajectory)*self.speed,-math.sin(angleOfTrajectory)*self.speed]
# top side
elif(randomSide == 1):
self.positionE = [random.randint(0,self.xrange),0]
if(self.positionE[0] >= self.xrange/2):
corner=0 # top right corner
angleOfTrajectory = random.uniform(7*math.pi/6,4*math.pi/3)
self.trajectory = [math.cos(angleOfTrajectory)*self.speed,-math.sin(angleOfTrajectory)*self.speed]
else:
corner=1# top left corner
angleOfTrajectory = random.uniform(10*math.pi/6,11*math.pi/6)
self.trajectory = [math.cos(angleOfTrajectory)*self.speed,-math.sin(angleOfTrajectory)*self.speed]
# left side
elif(randomSide == 2):
self.positionE = [0, random.randint(0,self.yrange)]
if(self.positionE[1] <= self.yrange/2):
corner=1 # top left corner
angleOfTrajectory = random.uniform(10*math.pi/6,11*math.pi/6)
self.trajectory = [math.cos(angleOfTrajectory)*self.speed,-math.sin(angleOfTrajectory)*self.speed]
else:
corner=2# bottom left corner
angleOfTrajectory = random.uniform(math.pi/6,math.pi/3)
self.trajectory = [math.cos(angleOfTrajectory)*self.speed,-math.sin(angleOfTrajectory)*self.speed]
# bottom side
elif(randomSide == 3):
self.positionE = [random.randint(0,self.xrange),self.yrange]
if(self.positionE[0] <= self.xrange/2):
corner =2
angleOfTrajectory = random.uniform(math.pi/6,math.pi/3) # bottom left corner
self.trajectory = [math.cos(angleOfTrajectory)*self.speed,-math.sin(angleOfTrajectory)*self.speed]
else:
corner=3 # bottom right corner
angleOfTrajectory = random.uniform(4*math.pi/6,5*math.pi/6)
self.trajectory = [-math.cos(angleOfTrajectory)*self.speed,math.sin(angleOfTrajectory)*self.speed]
# run as update
def stateDetect(self):
# if destroyed == true, increment score, if outOfBounds == true then increase speed
# if absorbed == true, increase the radius of the boss
return [self.destroyed, self.outOfBounds, self.absorbed] # [0, 1, 2]
def move(self):
self.positionE[0]=self.positionE[0]+self.trajectory[0]
self.positionE[1]=self.positionE[1]+self.trajectory[1]
pass
# run as update, after stateDetect
def collision(self,playerPosition, boss, tempFrame):
try:
if((((self.positionE[0]-playerPosition[0])**2 + (self.positionE[1] - playerPosition[1])**2)**(1/2)) <= 60): # player radius is 30
self.destroyed = True
if(self.positionE[0] < 0 or self.positionE[0] > self.xrange or self.positionE[1] < 0 or self.positionE[1] > self.yrange):
self.outOfBounds = True
if(((self.positionE[0] - boss.position[0])**2 + (self.positionE[1] - boss.position[1])**2)**(1/2) <= boss.radius):
self.absorbed = True
except:
pass
class PlayerClass:
pBuffer = 0
lives = 3
pColor = (29, 120, 6)
def damage(self):
if self.invuln()==False:
self.lives = self.lives - 1
self.dead()
self.pBuffer = time.time()
self.pColor = (255,0,0)
def invuln(self):
return self.pBuffer !=0
def dead(self):
if self.lives == 0:
sys.exit()
def updateBuffer(self):
if (time.time()-self.pBuffer) > 2:
self.pBuffer = 0
self.pColor = (29, 120, 6)
class BossClass:
speed = 2
Bdirection = 0
radius = 50
position = [200, 200]
def incSize(self):
self.radius += 3
def incSpeed(self):
self.speed += 0.5
def move(self, pos, PRadius, playerRef):
deltx = pos[0] - self.position[0]
delty = self.position[1] - pos[1]
if deltx != 0:
Bdirection = math.atan(delty / deltx)
if deltx>0:
self.position[0] += self.speed * math.cos(Bdirection)
self.position[1] += self.speed * -math.sin(Bdirection)
else:
self.position[0] += self.speed * -math.cos(Bdirection)
self.position[1] += self.speed * math.sin(Bdirection)
if math.sqrt(deltx ** 2 + delty ** 2) <= self.radius + PRadius:
playerRef.damage()
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=32,
help="max buffer size")
args = vars(ap.parse_args())
# define the lower and upper boundaries of the "green"
# ball in the HSV color space
greenLower = (20, 80, 0)
greenUpper = (70, 255, 255)
# initialize the list of tracked points, the frame counter,
# and the coordinate deltas
pts = deque(maxlen=args["buffer"])
counter = 0
(dX, dY) = (0, 0)
damageBuffer = 0
direction = ""
score = 0
timer = time.time()
boss = BossClass()
player = PlayerClass()
allEnemies = []
enemySpawnTime = time.time()
# if a video path was not supplied, grab the reference
# to the webcam
if not args.get("video", False):
camera = cv2.VideoCapture(0)
# otherwise, grab a reference to the video file
else:
camera = cv2.VideoCapture(args["video"])
# keep looping
while True:
# grab the current frame
(grabbed, frame) = camera.read()
frame = cv2.flip(frame, 1)
if(time.time() - enemySpawnTime > 1):
allEnemies.append(EnemyClass(2,30))
enemySpawnTime = time.time()
# if we are viewing a video and we did not grab a frame,
# then we have reached the end of the video
if args.get("video") and not grabbed:
break
# resize the frame, blur it, and convert it to the HSV
# color space
frame = imutils.resize(frame, width=600)
# blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# construct a mask for the color "green", then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
mask = cv2.inRange(hsv, greenLower, greenUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# find contours in the mask and initialize the current
# (x, y) center of the ball
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
center = None
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# only proceed if the radius meets a minimum size
if radius > 10:
# draw the circle and centroid on the frame,
# then update the list of tracked points
#cv2.circle(frame, (int(x), int(y)), int(radius),
# (0, 255, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
pts.appendleft(center)
for i in range(len(allEnemies)):
cv2.circle(frame, (round(allEnemies[i].positionE[0]+15), round(allEnemies[i].positionE[1])), 25, (0, 255, 255), -1)
allEnemies[i].move()
if(allEnemies[i].stateDetect()[0] == True):
allEnemies.pop(i)
score+=1
break
elif(allEnemies[i].stateDetect()[1] == True):
allEnemies.pop(i)
boss.incSpeed()
break
elif(allEnemies[i].stateDetect()[2] == True):
allEnemies.pop(i)
boss.incSize()
break
allEnemies[i].collision(center, boss, frame)
try:
boss.move([center[0], center[1]], 30, player)
if (player.invuln()):
player.updateBuffer()
except:
pass
cv2.circle(frame, (round(boss.position[0]), round(boss.position[1])), boss.radius, (0, 255, 255),-1)
# loop over the set of tracked points
for i in np.arange(1, len(pts)):
# if either of the tracked points are None, ignore
# them
if pts[i - 1] is None or pts[i] is None:
continue
# check to see if enough points have been accumulated in
# the buffer
try:
if counter >= 10 and i == 1 and pts[-10] is not None:
# compute the difference between the x and y
# coordinates and re-initialize the direction
# text variables
dX = pts[-10][0] - pts[i][0]
dY = pts[-10][1] - pts[i][1]
(dirX, dirY) = ("", "")
## # ensure there is significant movement in the
## # x-direction
## if np.abs(dX) > 20:
## dirX = "East" if np.sign(dX) == 1 else "West"
##
## # ensure there is significant movement in the
## # y-direction
## if np.abs(dY) > 20:
## dirY = "North" if np.sign(dY) == 1 else "South"
##
## # handle when both directions are non-empty
## if dirX != "" and dirY != "":
## direction = "{}-{}".format(dirY, dirX)
##
## # otherwise, only one direction is non-empty
## else:
## direction = dirX if dirX != "" else dirY
except:
pass
# otherwise, compute the thickness of the line and
# draw the connecting lines
thickness = int(np.sqrt(args["buffer"] / float(i + 1)) * 2.5)
cv2.line(frame, pts[i - 1], pts[i], (0, 0, 255), thickness)
try:
if center == None:
pass
elif(center[0] > 10):
cv2.circle(frame, (center[0], center[1]), 30, player.pColor, -1)
except:
pass
# show the movement deltas and the direction of movement on
# the frame
## cv2.putText(frame, direction, (10, 30), cv2.FONT_HERSHEY_SIMPLEX,
## 0.65, (0, 0, 255), 3)
if not (center == (None)):
cv2.putText(frame, "Score: {}, Time: {}, Lives: {}".format(score, round(time.time()-timer,2),player.lives),
(0, 10), cv2.FONT_HERSHEY_SIMPLEX,
0.35, (0, 0, 255), 1)
# show the frame to our screen and increment the frame counter
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
counter += 1
# if the 'q' key is pressed, stop the loop
if key == ord("q") or player.lives==0:
break
# cleanup the camera and close any open windows
camera.release()
#cv2.ims
cv2.destroyAllWindows()
|
[
"noreply@github.com"
] |
BryanBattershill.noreply@github.com
|
c35f0eea37740102e29dbec251418a94b8a24eef
|
e815fd9fbc703ce43d94fba6e53c86b898e32977
|
/llia/synths/comb/comb_data.py
|
d970ca8784031b0980adfd2f3759750a82743041
|
[] |
no_license
|
kaos/Llia
|
0cdedc13d84ce86034287fba00ec0b169fbd87b1
|
b2aaa163c4ada3b446a7c3701e3f05c0f8406d6e
|
refs/heads/master
| 2020-12-28T19:23:48.480608
| 2017-03-27T05:51:14
| 2017-03-27T05:51:14
| 64,940,263
| 0
| 0
| null | 2016-08-04T14:13:13
| 2016-08-04T14:13:13
| null |
UTF-8
|
Python
| false
| false
| 1,347
|
py
|
# llia.synths.Comb.Comb_data
from __future__ import print_function
from llia.program import Program
from llia.bank import ProgramBank
from llia.performance_edit import performance
prototype = {
"delayScale" : 0.01,
"delay" : 0.50,
"phase" : -1,
"wet" : 1.0}
class Comb(Program):
def __init__(self,name):
super(Comb,self).__init__(name,Comb,prototype)
self.performance = performance()
program_bank = ProgramBank(Comb("Init"))
program_bank.enable_undo = False
def comb(slot, name,
delayScale = 0.01, # 0.001|0.010|0.100
delay = 0.50, # 0.0 .. 1.0
phase = -1, # -1 .. +1
wet = 1.0): # 0.0 .. 2.0
def fval(x):
return round(float(x),4)
p = Comb(name)
p["delayScale"] = fval(delayScale)
p["delay"] = fval(delay)
p["phase"] = int(phase)
p["wet"] = fval(wet)
program_bank[slot] = p
return p
comb(0,"Bypass", delayScale=0.001, delay=0.5, phase=-1, wet=0.0)
slot = 1
for p in (-1, 1):
if p == -1:
sign = "-"
else:
sign = "+"
for ds in (0.001, 0.01, 0.1):
for d in (0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0):
delay = ds*d
name = (sign+"%s ms") % delay
comb(slot,name,ds,d,p,1.0)
slot = slot + 1
|
[
"plewto@gmail.com"
] |
plewto@gmail.com
|
fa55fb23e3d6bae9cb457a19aa839c87b4bcf142
|
3bbdcdfa6ee6631bea52dd651914cb898062b870
|
/numpy_pandas/__init__.py
|
132426c8ed006e4d81e02c92ae4a1325b00aa968
|
[] |
no_license
|
xiaoyeren/python_high_performance
|
55ea5ee9f628e1c1155a6946274c862bda57ae2c
|
496a5e55e7f40033c80e9ee3b9190c184d4701d9
|
refs/heads/master
| 2020-05-24T15:20:00.576786
| 2019-05-13T06:01:35
| 2019-05-13T06:01:35
| 187,329,222
| 1
| 0
| null | 2019-05-18T07:43:11
| 2019-05-18T07:43:11
| null |
UTF-8
|
Python
| false
| false
| 70
|
py
|
# -*- coding: utf-8 -*-
#使用numpy和pandas快速执行数组操作
|
[
"luozhukun@163.com"
] |
luozhukun@163.com
|
f45f0f19ecdbd76d15d862f09d15896aed3c69c4
|
e8feb94c80b706cac8187aea3abdab5554d0585e
|
/portscan.py
|
29d00f1e8dd50a2928ff6da405406a08ae09c777
|
[] |
no_license
|
fabiocabrini/python
|
63ee4d36261cdf309668d96f3d77a7f5456131a9
|
23f7ad379ece83841ad8c9270e629837c8055159
|
refs/heads/master
| 2023-03-31T22:27:35.404628
| 2023-03-24T10:39:57
| 2023-03-24T10:39:57
| 243,404,103
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 868
|
py
|
import sys
import socket
def main():
args = sys.argv
if len(args) < 2:
print("[!]Falta argumentos para o programa! Saindo...")
sys.exit(1)
ip = args[1]
portas = args[2] if len(args) >= 3 else "1:65536"
portas = (x for x in range(int(portas.split(":")[0]), int(portas.split(":")[1])+1))
scan(ip, portas)
def banner(sckt, ip, porta):
try:
sckt.settimeout(1)
sckt.connect((ip, porta))
banner = sckt.recv(1024).decode().strip()
assert banner
return banner
except:
return "Unknown"
def child(ip, port):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
s.settimeout(0.3)
if s.connect_ex((ip, port)) == 0:
print("{}/tcp open".format(port), end="|")
print(banner(s, ip, port))
except:
pass
def scan(ip, portas):
for c in portas:
child(ip, c)
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
fabiocabrini.noreply@github.com
|
881992550625165c17da1fcee6b64e92bb30f8da
|
88b1efabf9ba2532f37dc9eef5a8f60a63bb9181
|
/libs/networks/build_whole_network.py
|
fcfb02de3f726a2b12d0ef897d7b377776ef7fa0
|
[
"Apache-2.0"
] |
permissive
|
arasharchor/DCL_RetinaNet_Tensorflow
|
e674b2b3a975020e7442584b47116456f8490c5b
|
1d14c9800c3eb1975e8832978f7a263783d171ec
|
refs/heads/main
| 2023-03-30T13:16:24.151129
| 2021-04-13T08:01:20
| 2021-04-13T08:01:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,753
|
py
|
# -*-coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import os
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
from libs.networks import resnet, resnet_gluoncv, mobilenet_v2, xception
from libs.box_utils import anchor_utils, generate_anchors, generate_rotate_anchors
from libs.configs import cfgs
from libs.losses import losses
from libs.box_utils import show_box_in_tensor
from libs.detection_oprations.proposal_opr_ import postprocess_detctions
from libs.detection_oprations.anchor_target_layer_without_boxweight import anchor_target_layer
class DetectionNetwork(object):
def __init__(self, base_network_name, is_training):
self.base_network_name = base_network_name
self.is_training = is_training
if cfgs.METHOD == 'H':
self.num_anchors_per_location = len(cfgs.ANCHOR_SCALES) * len(cfgs.ANCHOR_RATIOS)
else:
self.num_anchors_per_location = len(cfgs.ANCHOR_SCALES) * len(cfgs.ANCHOR_RATIOS) * len(cfgs.ANCHOR_ANGLES)
self.method = cfgs.METHOD
def build_base_network(self, input_img_batch):
if self.base_network_name.startswith('resnet_v1'):
return resnet.resnet_base(input_img_batch, scope_name=self.base_network_name, is_training=self.is_training)
elif self.base_network_name in ['resnet152_v1d', 'resnet101_v1d', 'resnet50_v1d']:
return resnet_gluoncv.resnet_base(input_img_batch, scope_name=self.base_network_name,
is_training=self.is_training)
elif self.base_network_name.startswith('MobilenetV2'):
return mobilenet_v2.mobilenetv2_base(input_img_batch, is_training=self.is_training)
elif self.base_network_name.startswith('xception'):
return xception.xception_base(input_img_batch, is_training=self.is_training)
else:
raise ValueError('Sry, we only support resnet, mobilenet_v2 and xception')
def rpn_cls_net(self, inputs, scope_list, reuse_flag, level):
rpn_conv2d_3x3 = inputs
for i in range(4):
rpn_conv2d_3x3 = slim.conv2d(inputs=rpn_conv2d_3x3,
num_outputs=256,
kernel_size=[3, 3],
stride=1,
activation_fn=tf.nn.relu,
weights_initializer=cfgs.SUBNETS_WEIGHTS_INITIALIZER,
biases_initializer=cfgs.SUBNETS_BIAS_INITIALIZER,
scope='{}_{}'.format(scope_list[0], i),
reuse=reuse_flag)
rpn_box_scores = slim.conv2d(rpn_conv2d_3x3,
num_outputs=cfgs.CLASS_NUM * self.num_anchors_per_location,
kernel_size=[3, 3],
stride=1,
weights_initializer=cfgs.SUBNETS_WEIGHTS_INITIALIZER,
biases_initializer=cfgs.FINAL_CONV_BIAS_INITIALIZER,
scope=scope_list[2],
activation_fn=None,
reuse=reuse_flag)
rpn_box_scores = tf.reshape(rpn_box_scores, [-1, cfgs.CLASS_NUM],
name='rpn_{}_classification_reshape'.format(level))
rpn_box_probs = tf.sigmoid(rpn_box_scores, name='rpn_{}_classification_sigmoid'.format(level))
return rpn_box_scores, rpn_box_probs
def rpn_reg_net(self, inputs, scope_list, reuse_flag, level):
rpn_delta_boxes = inputs
for i in range(4):
rpn_delta_boxes = slim.conv2d(inputs=rpn_delta_boxes,
num_outputs=256,
kernel_size=[3, 3],
weights_initializer=cfgs.SUBNETS_WEIGHTS_INITIALIZER,
biases_initializer=cfgs.SUBNETS_BIAS_INITIALIZER,
stride=1,
activation_fn=tf.nn.relu,
scope='{}_{}'.format(scope_list[1], i),
reuse=reuse_flag)
rpn_delta_boxes = slim.conv2d(rpn_delta_boxes,
num_outputs=5 * self.num_anchors_per_location,
kernel_size=[3, 3],
stride=1,
weights_initializer=cfgs.SUBNETS_WEIGHTS_INITIALIZER,
biases_initializer=cfgs.SUBNETS_BIAS_INITIALIZER,
scope=scope_list[3],
activation_fn=None,
reuse=reuse_flag)
rpn_delta_boxes = tf.reshape(rpn_delta_boxes, [-1, 5],
name='rpn_{}_regression_reshape'.format(level))
return rpn_delta_boxes
def rpn_net(self, feature_pyramid):
rpn_delta_boxes_list = []
rpn_scores_list = []
rpn_probs_list = []
with tf.variable_scope('rpn_net'):
with slim.arg_scope([slim.conv2d], weights_regularizer=slim.l2_regularizer(cfgs.WEIGHT_DECAY)):
for level in cfgs.LEVEL:
if cfgs.SHARE_NET:
reuse_flag = None if level == 'P3' else True
scope_list = ['conv2d_3x3_cls', 'conv2d_3x3_reg', 'rpn_classification', 'rpn_regression']
else:
reuse_flag = None
scope_list = ['conv2d_3x3_cls_' + level, 'conv2d_3x3_reg_' + level,
'rpn_classification_' + level, 'rpn_regression_' + level]
rpn_box_scores, rpn_box_probs = self.rpn_cls_net(feature_pyramid[level], scope_list, reuse_flag, level)
rpn_delta_boxes = self.rpn_reg_net(feature_pyramid[level], scope_list, reuse_flag, level)
rpn_scores_list.append(rpn_box_scores)
rpn_probs_list.append(rpn_box_probs)
rpn_delta_boxes_list.append(rpn_delta_boxes)
rpn_all_delta_boxes = tf.concat(rpn_delta_boxes_list, axis=0)
rpn_all_boxes_scores = tf.concat(rpn_scores_list, axis=0)
rpn_all_boxes_probs = tf.concat(rpn_probs_list, axis=0)
return rpn_all_delta_boxes, rpn_all_boxes_scores, rpn_all_boxes_probs
def make_anchors(self, feature_pyramid):
with tf.variable_scope('make_anchors'):
anchor_list = []
level_list = cfgs.LEVEL
with tf.name_scope('make_anchors_all_level'):
for level, base_anchor_size, stride in zip(level_list, cfgs.BASE_ANCHOR_SIZE_LIST, cfgs.ANCHOR_STRIDE):
'''
(level, base_anchor_size) tuple:
(P3, 32), (P4, 64), (P5, 128), (P6, 256), (P7, 512)
'''
featuremap_height, featuremap_width = tf.shape(feature_pyramid[level])[1], \
tf.shape(feature_pyramid[level])[2]
featuremap_height = tf.cast(featuremap_height, tf.float32)
featuremap_width = tf.cast(featuremap_width, tf.float32)
# tmp_anchors = anchor_utils.make_anchors(base_anchor_size=base_anchor_size,
# anchor_scales=cfgs.ANCHOR_SCALES,
# anchor_ratios=cfgs.ANCHOR_RATIOS,
# featuremap_height=featuremap_height,
# featuremap_width=featuremap_width,
# stride=stride,
# name='make_anchors_{}'.format(level))
if self.method == 'H':
tmp_anchors = tf.py_func(generate_anchors.generate_anchors_pre,
inp=[featuremap_height, featuremap_width, stride,
np.array(cfgs.ANCHOR_SCALES) * stride, cfgs.ANCHOR_RATIOS, 4.0],
Tout=[tf.float32])
tmp_anchors = tf.reshape(tmp_anchors, [-1, 4])
else:
tmp_anchors = generate_rotate_anchors.make_anchors(base_anchor_size=base_anchor_size,
anchor_scales=cfgs.ANCHOR_SCALES,
anchor_ratios=cfgs.ANCHOR_RATIOS,
anchor_angles=cfgs.ANCHOR_ANGLES,
featuremap_height=featuremap_height,
featuremap_width=featuremap_width,
stride=stride)
tmp_anchors = tf.reshape(tmp_anchors, [-1, 5])
anchor_list.append(tmp_anchors)
all_level_anchors = tf.concat(anchor_list, axis=0)
return all_level_anchors
def add_anchor_img_smry(self, img, anchors, labels, method):
positive_anchor_indices = tf.reshape(tf.where(tf.greater_equal(labels, 1)), [-1])
# negative_anchor_indices = tf.reshape(tf.where(tf.equal(labels, 0)), [-1])
positive_anchor = tf.gather(anchors, positive_anchor_indices)
# negative_anchor = tf.gather(anchors, negative_anchor_indices)
pos_in_img = show_box_in_tensor.only_draw_boxes(img_batch=img,
boxes=positive_anchor,
method=method)
# neg_in_img = show_box_in_tensor.only_draw_boxes(img_batch=img,
# boxes=negative_anchor)
tf.summary.image('positive_anchor', pos_in_img)
# tf.summary.image('negative_anchors', neg_in_img)
def build_whole_detection_network(self, input_img_batch, gtboxes_batch_h, gtboxes_batch_r, gpu_id=0):
if self.is_training:
gtboxes_batch_h = tf.reshape(gtboxes_batch_h, [-1, 5])
gtboxes_batch_h = tf.cast(gtboxes_batch_h, tf.float32)
gtboxes_batch_r = tf.reshape(gtboxes_batch_r, [-1, 6])
gtboxes_batch_r = tf.cast(gtboxes_batch_r, tf.float32)
# 1. build base network
feature_pyramid = self.build_base_network(input_img_batch)
# 2. build rpn
rpn_box_pred, rpn_cls_score, rpn_cls_prob = self.rpn_net(feature_pyramid)
# 3. generate_anchors
anchors = self.make_anchors(feature_pyramid)
# 4. postprocess rpn proposals. such as: decode, clip, filter
if not self.is_training:
with tf.variable_scope('postprocess_detctions'):
boxes, scores, category = postprocess_detctions(rpn_bbox_pred=rpn_box_pred,
rpn_cls_prob=rpn_cls_prob,
anchors=anchors,
is_training=self.is_training)
return boxes, scores, category
# 5. build loss
else:
with tf.variable_scope('build_loss'):
labels, target_delta, anchor_states, target_boxes = tf.py_func(func=anchor_target_layer,
inp=[gtboxes_batch_h, gtboxes_batch_r,
anchors, gpu_id],
Tout=[tf.float32, tf.float32, tf.float32,
tf.float32])
if self.method == 'H':
self.add_anchor_img_smry(input_img_batch, anchors, anchor_states, 0)
else:
self.add_anchor_img_smry(input_img_batch, anchors, anchor_states, 1)
cls_loss = losses.focal_loss(labels, rpn_cls_score, anchor_states)
if cfgs.REG_LOSS_MODE == 0:
reg_loss = losses.iou_smooth_l1_loss(target_delta, rpn_box_pred, anchor_states, target_boxes,
anchors)
elif cfgs.REG_LOSS_MODE == 1:
reg_loss = losses.smooth_l1_loss_atan(target_delta, rpn_box_pred, anchor_states)
elif cfgs.REG_LOSS_MODE == 2:
reg_loss = losses.iou_smooth_l1_loss_(target_delta, rpn_box_pred, anchor_states, target_boxes,
anchors, alpha=cfgs.ALPHA, beta=cfgs.BETA)
elif cfgs.REG_LOSS_MODE == 3:
reg_loss = losses.iou_smooth_l1_loss_1(rpn_box_pred, anchor_states, target_boxes,
anchors, alpha=cfgs.ALPHA, beta=cfgs.BETA)
else:
reg_loss = losses.smooth_l1_loss(target_delta, rpn_box_pred, anchor_states)
losses_dict = {'cls_loss': cls_loss * cfgs.CLS_WEIGHT,
'reg_loss': reg_loss * cfgs.REG_WEIGHT}
with tf.variable_scope('postprocess_detctions'):
boxes, scores, category = postprocess_detctions(rpn_bbox_pred=rpn_box_pred,
rpn_cls_prob=rpn_cls_prob,
anchors=anchors,
is_training=self.is_training,
gpu_id=gpu_id)
boxes = tf.stop_gradient(boxes)
scores = tf.stop_gradient(scores)
category = tf.stop_gradient(category)
return boxes, scores, category, losses_dict
def get_restorer(self):
checkpoint_path = tf.train.latest_checkpoint(os.path.join(cfgs.TRAINED_CKPT, cfgs.VERSION))
if checkpoint_path != None:
if cfgs.RESTORE_FROM_RPN:
print('___restore from rpn___')
model_variables = slim.get_model_variables()
restore_variables = [var for var in model_variables if not var.name.startswith('FastRCNN_Head')] + \
[slim.get_or_create_global_step()]
for var in restore_variables:
print(var.name)
restorer = tf.train.Saver(restore_variables)
else:
restorer = tf.train.Saver()
print("model restore from :", checkpoint_path)
else:
checkpoint_path = cfgs.PRETRAINED_CKPT
print("model restore from pretrained mode, path is :", checkpoint_path)
model_variables = slim.get_model_variables()
# for var in model_variables:
# print(var.name)
# print(20*"__++__++__")
def name_in_ckpt_rpn(var):
return var.op.name
def name_in_ckpt_fastrcnn_head(var):
'''
Fast-RCNN/resnet_v1_50/block4 -->resnet_v1_50/block4
Fast-RCNN/MobilenetV2/** -- > MobilenetV2 **
:param var:
:return:
'''
return '/'.join(var.op.name.split('/')[1:])
nameInCkpt_Var_dict = {}
for var in model_variables:
if var.name.startswith('Fast-RCNN/'+self.base_network_name): # +'/block4'
var_name_in_ckpt = name_in_ckpt_fastrcnn_head(var)
nameInCkpt_Var_dict[var_name_in_ckpt] = var
else:
if var.name.startswith(self.base_network_name):
var_name_in_ckpt = name_in_ckpt_rpn(var)
nameInCkpt_Var_dict[var_name_in_ckpt] = var
else:
continue
restore_variables = nameInCkpt_Var_dict
for key, item in restore_variables.items():
print("var_in_graph: ", item.name)
print("var_in_ckpt: ", key)
print(20*"___")
restorer = tf.train.Saver(restore_variables)
print(20 * "****")
print("restore from pretrained_weighs in IMAGE_NET")
return restorer, checkpoint_path
def get_gradients(self, optimizer, loss):
'''
:param optimizer:
:param loss:
:return:
return vars and grads that not be fixed
'''
# if cfgs.FIXED_BLOCKS > 0:
# trainable_vars = tf.trainable_variables()
# # trained_vars = slim.get_trainable_variables()
# start_names = [cfgs.NET_NAME + '/block%d'%i for i in range(1, cfgs.FIXED_BLOCKS+1)] + \
# [cfgs.NET_NAME + '/conv1']
# start_names = tuple(start_names)
# trained_var_list = []
# for var in trainable_vars:
# if not var.name.startswith(start_names):
# trained_var_list.append(var)
# # slim.learning.train()
# grads = optimizer.compute_gradients(loss, var_list=trained_var_list)
# return grads
# else:
# return optimizer.compute_gradients(loss)
return optimizer.compute_gradients(loss)
def enlarge_gradients_for_bias(self, gradients):
final_gradients = []
with tf.variable_scope("Gradient_Mult") as scope:
for grad, var in gradients:
scale = 1.0
if cfgs.MUTILPY_BIAS_GRADIENT and './biases' in var.name:
scale = scale * cfgs.MUTILPY_BIAS_GRADIENT
if not np.allclose(scale, 1.0):
grad = tf.multiply(grad, scale)
final_gradients.append((grad, var))
return final_gradients
|
[
"yangxue0827@126.com"
] |
yangxue0827@126.com
|
a5bf349f10d05cb162f75a7bfbede3c10a24f87b
|
c2321b327c0793b823c842d41a873b89ddf95b02
|
/MQMO_Workbook/danorand.py
|
3bb64b27d87280f8297f19595ccb2346a01307f9
|
[] |
no_license
|
Danoishere/mqmo-workbook-python
|
4f84afcbca28b3b73e76f0c9f6495e5ec8b66240
|
be8bb886dcc1f949b84152fbd2ba7f1f5202b86c
|
refs/heads/master
| 2020-06-02T14:31:56.509312
| 2019-06-14T08:05:18
| 2019-06-14T08:05:18
| 191,188,855
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,057
|
py
|
import pyximport
pyximport.install()
import simpyx as sp
import danosim as ds
import numpy as np
def sorted_poisson_arrival(lambd, t_end):
event_times = np.random.uniform(low=0,high=t_end,size=np.int(t_end*lambd))
event_times = np.sort(event_times)
return event_times
def possibility_for_num_of_events_in_time(event_times, how_many_events, t_end, t_observed):
# Number of time-intervals to check for
# 200'000 * 0.5h
num_observed_intervals = np.int(t_end*(1/t_observed))
# Histogram with bins for each half hour
# 1. 0.5h -> 3 Autos, 2. 0.5h -> 1 Auto, etc. ....
hist, bins = np.histogram(event_times, bins=num_observed_intervals)
# Number of histogram bins with the same count as our observed number of events
num_events_in_observed = np.sum(hist == how_many_events)
return num_events_in_observed/num_observed_intervals
def exp_rnd_lambd(lambd, size=None):
return np.random.exponential(scale=1/lambd,size=size)
def exp_rnd_avg(avg, size=None):
return np.random.exponential(scale=avg, size=size)
|
[
"dano.roost@gmail.com"
] |
dano.roost@gmail.com
|
4510678e66dacc8d9a42810e6f5b901215a93fef
|
5f64fb1c9db645919029d03fcfb222732c16ae4c
|
/analysis/__init__.py
|
ccc6064de1896a78f9119daec7d5ccaff0620a34
|
[] |
no_license
|
devYaoYH/cogPsychLab_toolkit
|
95b3c5a99288ef20d807ea2200d5c3128a2d0374
|
b926126bf67a30bc4ec192ae335ef650f2f2abe0
|
refs/heads/master
| 2020-07-29T19:08:58.667628
| 2019-12-18T18:50:13
| 2019-12-18T18:50:13
| 209,926,254
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 89
|
py
|
from .normalize_rt_raw import parse_rt_path
from .plot_rt_pathlengths import plot_rt_path
|
[
"yaoyiheng@gmail.com"
] |
yaoyiheng@gmail.com
|
2ae541b40b7cd84b167c0fe96d85e532d439192a
|
3481bb48f454d0dd269fa67e9f8e845c4d2cf2fc
|
/2018/day5.py
|
102d982c2e041e0701528f522c18d32e1279a348
|
[] |
no_license
|
mattr555/advent-of-code
|
0d84c6c77d0a0467791379de540a5f92a09ae2b4
|
bd2253b4eb560323d1ff7b1b55d1854009b24cde
|
refs/heads/master
| 2021-12-27T09:30:21.644035
| 2021-12-18T21:00:05
| 2021-12-18T21:00:05
| 164,048,265
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 819
|
py
|
with open("day5.txt") as f:
data = f.read()
# data = 'dabAcCaCBAcCcaDA'
def react(data):
newdata = ''
while True:
i = 0
while i < len(data):
if i < len(data) - 1 and data[i].lower() == data[i+1].lower() and data[i] != data[i+1]:
i += 2
else:
newdata += data[i]
i += 1
if len(newdata) == len(data):
break
else:
data = newdata
newdata = ''
return newdata
# print(data)
reacted = react(data)
print(len(reacted))
best = len(reacted)
for i in range(26):
char_to_remove = chr(ord('a') + i)
this_try = reacted.replace(char_to_remove, '').replace(char_to_remove.upper(), '')
best = min(best, len(react(this_try)))
print(char_to_remove)
print(best)
|
[
"mattramina@gmail.com"
] |
mattramina@gmail.com
|
60777dfa8618834846b5accb91c04b9f6e4e44a1
|
48faa86184e733fa4cb1d05f8d4b3b20aeb9f327
|
/you_get/__init__.py
|
5da7138ff2db95bcb8278be8d7f2541aafa25152
|
[
"MIT"
] |
permissive
|
wgnms/webdecode
|
5a72bcb130f3e0d109bb4dc8c0375c766e7c6088
|
b4052e1a5e8e53fd75838e2e5d2bc47da6ad3b14
|
refs/heads/main
| 2023-07-06T01:33:44.769245
| 2021-08-17T02:24:48
| 2021-08-17T02:24:48
| 426,558,044
| 1
| 0
|
MIT
| 2021-11-10T09:20:31
| 2021-11-10T09:20:30
| null |
UTF-8
|
Python
| false
| false
| 371
|
py
|
#!/usr/bin/env python
# This file is Python 2 compliant.
import sys
if sys.version_info[0] == 3:
#from .extractor import Extractor, VideoExtractor
#from .util import log
from .__main__ import *
#from .common import *
#from .version import *
#from .cli_wrapper import *
#from .extractor import *
else:
# Don't import anything.
pass
|
[
"yangxuan@ev-image.com"
] |
yangxuan@ev-image.com
|
8899c83e97081de7941ab5e8f6f97b043735e116
|
f66a28aa2f466d3c5cf1b04eac2c8225c4034984
|
/util/ImageHelper.py
|
0e44f26fc6611d125b7179fb1abb924df3cf0ea7
|
[
"MIT"
] |
permissive
|
abbottli/asciipy
|
7816821ab9a2964649cff47916452c281067c136
|
40fe58d64d0618041b71691b55a08888f76e4b54
|
refs/heads/master
| 2023-05-04T13:17:52.335134
| 2021-05-14T19:17:47
| 2021-05-14T19:17:47
| 367,268,065
| 1
| 0
|
MIT
| 2021-05-15T02:05:16
| 2021-05-14T06:18:51
|
Python
|
UTF-8
|
Python
| false
| false
| 3,358
|
py
|
import os
from PIL import Image, ImageOps
from util.ImageType import ImageType
RESOURCE_FOLDER = 'resources'
BLACK = 255
WHITE = 0
DEBUG = False
def resource_folder(file):
return os.path.join(RESOURCE_FOLDER, file)
def convert_image(image, convert_type=ImageType.DITHER):
if ImageType.DITHER == convert_type:
return dither(image)
elif ImageType.BLACK_WHITE == convert_type:
return black_white(image)
elif ImageType.HALFTONE == convert_type:
return halftone(image)
elif ImageType.GRAY == convert_type:
return gray(image)
elif ImageType.SILHOUETTE == convert_type:
return silhouette(image)
def gray(image):
image = image.convert('L')
if DEBUG:
image.save(resource_folder('gray.png'))
return image
def black_white(image, thresh=128):
image = image.convert('L').point(lambda x: BLACK if x > thresh else WHITE, mode='1')
if DEBUG:
image.save(resource_folder('bw.png'))
return image
def silhouette(image):
image = image.convert('L').point(lambda x: BLACK if x == BLACK else WHITE, mode='1')
if DEBUG:
image.save(resource_folder('silhouette.png'))
return image
def dither(image):
image = image.convert('1')
if DEBUG:
image.save(resource_folder('dither.png'))
return image
def halftone(image):
image = image.convert('L')
width, height = image.size
pixels = image.load()
for x in range(0, width, 2):
for y in range(0, height, 2):
here, right, down, diag = (x, y), (x + 1, y), (x, y + 1), (x + 1, y + 1)
if x + 1 >= width:
right = (0, 0)
diag = (0, 0)
if y + 1 >= height:
down = (0, 0)
diag = (0, 0)
saturation = (pixels[here] + pixels[right] + pixels[down] + pixels[diag]) / 4
if saturation > 223: # all white
pixels[here] = 255
pixels[right] = 255
pixels[down] = 255
pixels[diag] = 255
elif saturation > 159:
pixels[here] = 255
pixels[right] = 255
pixels[down] = 0
pixels[diag] = 255
elif saturation > 95:
pixels[here] = 255
pixels[right] = 0
pixels[down] = 0
pixels[diag] = 255
elif saturation > 23:
pixels[here] = 0
pixels[right] = 0
pixels[down] = 0
pixels[diag] = 255
else: # all black
pixels[here] = 0
pixels[right] = 0
pixels[down] = 0
pixels[diag] = 0
if DEBUG:
image.save(resource_folder('halftone.png'))
return image
def resize_image(image, max_width, max_height, scale_type=Image.BICUBIC):
if image.width > max_width or image.height > max_height:
# resize image to console window bounds
scale = min(max_width / image.width, max_height / image.height)
scaled = tuple([int(x * scale) for x in image.size])
resized = image.resize(scaled, scale_type)
if DEBUG:
resized.save(resource_folder('resized.png'))
return resized
return image
def invert(image):
return ImageOps.invert(image)
|
[
"abbott.li49@gmail.com"
] |
abbott.li49@gmail.com
|
c99a79681efd4708275a91f12d409ed732157dc1
|
737a8273a04f1ff545971061f9e82c94794d5fd1
|
/venv_py36/Scripts/pyi-bindepend-script.py
|
5a06e65c01dcc45c62967f443e35e7139bfd63fc
|
[
"MIT"
] |
permissive
|
PeterMoresco/RefriCalcSoft
|
eb316574ec3826c5a42ad6b3d74c1121a397dac4
|
1ed728ef1937fdda248cee19d97b3d13bd98af03
|
refs/heads/master
| 2022-04-13T11:11:01.425094
| 2020-04-13T17:11:00
| 2020-04-13T17:11:00
| 255,360,256
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 477
|
py
|
#!C:\Users\pedro.camargo\Documents\Programas\RefriCalc\venv_py36\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'PyInstaller==3.4','console_scripts','pyi-bindepend'
__requires__ = 'PyInstaller==3.4'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('PyInstaller==3.4', 'console_scripts', 'pyi-bindepend')()
)
|
[
"pedro.moresco93@gmail.com"
] |
pedro.moresco93@gmail.com
|
2be70c160418e48ef9adbfa51bc4a11912cc9ff9
|
048a3362f0990cd6a53f9a47002b910445e76f65
|
/Python/Modules/Utilities.py
|
2ef9db6c045079306f42a29c47ab8bf0484f1d83
|
[] |
no_license
|
alonyan/PythonFlowCytometryStartup
|
b26fa3cc3fa7c806fefee8edc522635d61510723
|
7631f64b9fd0572d1b5340aa84df65868b769f6f
|
refs/heads/master
| 2020-03-24T02:07:00.454866
| 2018-07-26T17:18:43
| 2018-07-26T17:18:43
| 142,363,185
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,098
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 21 15:46:30 2016
@author: Alonyan
"""
import numpy as np
import scipy.io as sio
def num2str(num, precision):
return "%0.*f" % (precision, num)
def colorcode(datax, datay):
from scipy import interpolate
import numpy as np
H, xedges, yedges = np.histogram2d(datax,datay, bins=30)
xedges = (xedges[:-1]+xedges[1:])/2
yedges = (yedges[:-1]+yedges[1:])/2
f = interpolate.RectBivariateSpline(xedges,yedges , H)
z = np.array([])
for i in datax.index:
z = np.append(z,f(datax[i],datay[i]))
#z=(z-min(z))/(max(z)-min(z))
z[z<0] = 0
idx = z.argsort()
return z, idx
class kmeans:
def __init__(self, X, K):
# Initialize to K random centers
oldmu = X.sample(K).values#np.random.sample(X, K)
mu = X.sample(K).values#np.random.sample(X, K)
while not _has_converged(mu, oldmu):
oldmu = mu
# Assign all points in X to clusters
clusters = _cluster_points(X, mu)
# Reevaluate centers
mu = _reevaluate_centers(oldmu, clusters)
self.mu = mu
self.clusters = clusters
#return(mu, clusters)
def _cluster_points(X, mu):
clusters = {}
for x in X:
bestmukey = min([(i[0], np.linalg.norm(x-mu[i[0]])) \
for i in enumerate(mu)], key=lambda t:t[1])[0]
try:
clusters[bestmukey].append(x)
except KeyError:
clusters[bestmukey] = [x]
return clusters
def _reevaluate_centers(mu, clusters):
newmu = []
keys = sorted(clusters.keys())
for k in keys:
newmu.append(np.mean(clusters[k], axis = 0))
return newmu
def _has_converged(mu, oldmu):
return (set(mu) == set(oldmu))
def makeTicks():
a = np.outer(np.arange(1,10),10**np.arange(1,2)).T.reshape((1,-1)).squeeze()
ticks = np.append(-a[::-1],0)
ticks = np.append(-100,ticks)
a = np.outer(np.arange(1,10),10**np.arange(1,6)).T.reshape((1,-1)).squeeze()
ticks = np.append(ticks,a[:])
emptvec = ['','','','','','','','']
ticklabels = ['-0.1']+emptvec+['']+['0']+emptvec+['']+['0.1']+emptvec+['1']+emptvec+['10']+emptvec+['100']+emptvec
return ticks, ticklabels
#Utils for opening MAT files
def print_mat_nested(d, indent=0, nkeys=0):
"""Pretty print nested structures from .mat files
Inspired by: `StackOverflow <http://stackoverflow.com/questions/3229419/pretty-printing-nested-dictionaries-in-python>`_
"""
# Subset dictionary to limit keys to print. Only works on first level
if nkeys>0:
d = {k: d[k] for k in d.keys()[:nkeys]} # Dictionary comprehension: limit to first nkeys keys.
if isinstance(d, dict):
for key, value in d.iteritems(): # iteritems loops through key, value pairs
print '\t' * indent + 'Key: ' + str(key)
print_mat_nested(value, indent+1)
if isinstance(d,np.ndarray) and d.dtype.names is not None: # Note: and short-circuits by default
for n in d.dtype.names: # This means it's a struct, it's bit of a kludge test.
print '\t' * indent + 'Field: ' + str(n)
print_mat_nested(d[n], indent+1)
def loadmat(filename):
'''
this function should be called instead of direct spio.loadmat
as it cures the problem of not properly recovering python dictionaries
from mat files. It calls the function check keys to cure all entries
which are still mat-objects
from: `StackOverflow <http://stackoverflow.com/questions/7008608/scipy-io-loadmat-nested-structures-i-e-dictionaries>`_
'''
data = sio.loadmat(filename, struct_as_record=False, squeeze_me=True)
return _check_keys(data)
def _check_keys(dict):
'''
checks if entries in dictionary are mat-objects. If yes
todict is called to change them to nested dictionaries
'''
dict1 = {}
for key in dict:
if isinstance(dict[key],np.ndarray):
i=1
for inst in dict[key]:
if isinstance(inst, sio.matlab.mio5_params.mat_struct):
dict1[key+'_'+str(i)] = _todict(inst)
i+=1
elif isinstance(dict[key], sio.matlab.mio5_params.mat_struct):
dict1[key] = _todict(dict[key])
return dict1
def _todict(matobj):
'''
A recursive function which constructs from matobjects nested dictionaries
'''
dict = {}
for strg in matobj._fieldnames:
elem = matobj.__dict__[strg]
if isinstance(elem, sio.matlab.mio5_params.mat_struct):
dict[strg] = _todict(elem)
elif isinstance(elem,np.ndarray):
dict[strg] = _tolist(elem)
else:
dict[strg] = elem
return dict
def _tolist(ndarray):
'''
A recursive function which constructs lists from cellarrays
(which are loaded as numpy ndarrays), recursing into the elements
if they contain matobjects.
'''
elem_list = []
for sub_elem in ndarray:
if isinstance(sub_elem, sio.matlab.mio5_params.mat_struct):
elem_list.append(_todict(sub_elem))
elif isinstance(sub_elem,np.ndarray):
elem_list.append(_tolist(sub_elem))
else:
elem_list.append(sub_elem)
return elem_list
#
#def _todict(matobj):
# '''
# A recursive function which constructs from matobjects nested dictionaries
# '''
# dict = {}
# for strg in matobj._fieldnames:
# elem = matobj.__dict__[strg]
# if isinstance(elem, np.ndarray):
# i=1
# for el in elem:
# if isinstance(el, sio.matlab.mio5_params.mat_struct):
# dict[strg+'_'+str(i)] = _todict(el)
# i+=1
# else:
# dict[strg] = elem
# elif isinstance(elem, sio.matlab.mio5_params.mat_struct):
# dict[strg] = _todict(elem)
# else:
# dict[strg] = elem
# return dict
#
#
|
[
"Alonyan@Alons-MacBook-Pro.local"
] |
Alonyan@Alons-MacBook-Pro.local
|
daf15159963c2c223164a14990ac1ad980c26f04
|
66f371cbb748c2e55bb26e35cdf23c7eddcf0e51
|
/py3hardway/ex17.py
|
3dd73772fd15f1543debdfc0ceeb17e82dd89e8b
|
[] |
no_license
|
MichealGarcia/code
|
8bc807e432df0110db7c51620d6e358f52eddc4a
|
0f5003d1d9de32b0939d3c3f765e632b79985904
|
refs/heads/main
| 2023-09-02T19:56:53.541069
| 2021-11-15T17:41:57
| 2021-11-15T17:41:57
| 407,957,577
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 522
|
py
|
from sys import argv
from os.path import exists
script, from_file, to_file = argv
print(f"Copying from {from_file} to {to_file}")
# We could do these two on one line, how?
in_file = open(from_file); indata = in_file.read()
print(f"The input file is {len(indata)} bytes long")
print(f"Does the output file exist? {exists(to_file)}")
print("Ready, hit RETURN to continue, CTRL-C to abort.")
input()
out_file = open(to_file, 'w'); out_file.write(indata)
print("Alright, all done.")
out_file.close()
in_file.close()
|
[
"garciamicheal0@outlook.com"
] |
garciamicheal0@outlook.com
|
840a49c0c67c499e3b30d896b164e0e70d6d9ca5
|
20a79a6bf4d5f077d5bc5d2d989d0bbedea92cc4
|
/venv/bin/python-config
|
f35d9f24086192b469f0392fa178284f61868af6
|
[] |
no_license
|
denaverma007/Greendeck
|
d4f5dc662ebbe1d93696ac0d906c75d11d5f86b9
|
97afae0783ac35d5c1e18316b8d49ba33b430a3e
|
refs/heads/master
| 2022-12-08T17:09:00.759085
| 2020-02-07T16:26:16
| 2020-02-07T16:26:16
| 238,938,321
| 0
| 0
| null | 2022-12-08T03:34:17
| 2020-02-07T14:07:48
|
Python
|
UTF-8
|
Python
| false
| false
| 2,357
|
#!/home/yj/PycharmProjects/greendeck/venv/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
|
[
"jainyash031@gmail.com"
] |
jainyash031@gmail.com
|
|
e0b4e0d09721d9b9da92a58b86f47afebac9cff4
|
330ba6024e2728ec2476ce2d9f8e6be20c60ed67
|
/resources/code/class11/scratch.py
|
05297afa2551f11d03483182dc63479edf6c9ce6
|
[] |
no_license
|
foureyes/csci-ua.0479-spring2021-001
|
ec008e797b569476cce7b062f2469d6dea56d670
|
f7ee0f69c2e9150134f41f39676807e4f776f96e
|
refs/heads/main
| 2023-06-27T15:44:38.192216
| 2021-08-01T17:35:55
| 2021-08-01T17:35:55
| 330,051,703
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,265
|
py
|
"""
from matplotlib import pyplot as plt
plt.figure(1)
plt.subplot(211)
plt.plot([1,0. 2,0. 3,0. 4],0. [4,0. 7,0. 8,0. 9])
#plt.figure(2)
plt.subplot(212)
plt.hist([1,0. 3,0. 7,0. 4,0. 2])
plt.show()
"""
"""
import matplotlib.pyplot as plt
scores = [0.82, 0.88, 0.96, 0.90, 0.87, 0.88, 0.81, 0.90, 0.90, 0.74, 0.82, 0.73, 0.92, 0.76, 0.87, 0.90, 0.90, 0.74, 0.72, 0.88, 0.62, 0.70, 0.82, 0.82, 0.90, 0.74, 0.65, 0.86, 0.89, 0.97, 0.46, 0.89, 0.82, 0.87, 0.90, 0.96, 0.92, 0.60, 0.91, 0.26, 0.92, 0.91, 0.60, 0.94, 0.87, 0.91, 0.92, 0.98, 0.96, 0.96, 0.96, 0.81, 0.67, 0.81, 0.91, 0.94, 0.94, 0.96, 0.95, 0.90]
bins = [0.6, 0.7, 0.73, 0.77, 0.8, 0.83, 0.87, 0.9, 0.93, 1]
plt.hist(scores, bins)
plt.xticks(bins, ['F', 'D', 'C-', 'C', 'C+', 'B-', 'B', 'B+', 'A-', 'A'])
plt.show()
"""
"""
class Fraction:
def __init__(self, num, den):
self.num = num
self.den = den
def __str__(self):
return "{}/{}".format(self.num, self.den)
@staticmethod
def gcf(a, b):
if a > b:
b, a = a, b
gcf = 1
for factor in range(1, a + 1):
if a % factor == 0 and b % factor == 0:
gcf = factor
return gcf
def reduce(self):
factor = Fraction.gcf(self.num, self.den)
new_num = self.num // factor
new_den = self.den // factor
return Fraction(new_num, new_den)
def add(self, other):
new_num = (other.den * self.num) + (other.num * self.den)
new_den = other.den * self.den
result = Fraction(new_num, new_den)
return result.reduce()
def __add__(self, other):
return self.add(other)
def __gt__(self, other):
self_num = self.num * other.den
other_num = other.num * self.den
return self_num > other_num
def __lt__(self, other):
self_num = self.num * other.den
other_num = other.num * self.den
return self_num < other_num
def __eq__(self, other):
return self.num == other.num and self.den == other.den
def __repr__(self):
return self.__str__()
def silly(fn):
def new_fn(*args):
print('start')
result = fn(*args)
print('end')
return result
return new_fn
@silly
def foo(a, b):
return a + b
print(foo(2, 3))
a = Fraction(1, 2)
print(a)
print(Fraction.gcf(4, 8))
print(Fraction.gcf(9, 6))
print(Fraction.gcf(17, 5))
b = Fraction(2, 8)
print(a.add(b))
print(a + b)
a.foozy = 'barry'
print(a.foozy)
b.num = 6
print(b)
#print(b.foozy)
print(type(b))
print(a > b)
print(b > a)
c = Fraction(1,3)
print('c < a', c < a)
print('c < b', c < b)
print('c == c', c == c)
print('c != c', c != c)
fractions = [a, b, c]
fractions.sort()
print(fractions)
print([range(4), range(2)])
"""
"""
class Rectangle:
def __init__(self, t, x, y, w, h, color):
self.t = t
self.x = x
self.y = y
self.w = w
self.h = h
self.color = color
def area(self):
return self.w * self.h
def __lt__(self, other):
return self.area() < other.area
def __str__(self):
return "{} x {} at ({}, {})".format(self.w, self.h, self.x, self.y)
def __repr__(self):
return self.__str__()
def __eq__(self, other):
return self.w == other.w and self.h == other.h
def render(self):
self.t.up()
self.t.color = self.color
self.t.begin_fill()
self.t.goto(self.x, self.y)
self.t.down()
self.t.goto(self.x + self.w, self.y)
self.t.goto(self.x + self.w, self.y - self.h)
self.t.goto(self.x, self.y - self.h)
self.t.goto(self.x, self.y)
self.t.end_fill()
import turtle
t = turtle.Turtle()
t.hideturtle()
wn = turtle.Screen()
wn.tracer(0)
r1 = Rectangle(t, 50, 50, 100, 50, 'black')
r2 = Rectangle(t, -50, -50, 100, 100, 'red')
def draw():
t.clear()
r1.x += 1
r1.render()
r2.render()
wn.ontimer(draw, 20)
wn.ontimer(draw, 200)
wn.update()
wn.mainloop()
"""
"""
class Sprite:
def __init__(self, t, x, y):
self.t = t
self.x = x
self.y = y
def move_right(self, delta):
self.x += delta
def render(self):
self.t.up()
self.t.goto(self.x, self.y)
self.t.down()
class Circle(Sprite):
def __init__(self, t, x, y, r):
super().__init__(t, x, y)
self.r = r
def render(self):
super().render()
self.t.circle(self.r)
class Rectangle(Sprite):
def __init__(self, t, x, y, w, h, color):
super().__init__(t, x, y)
self.w = w
self.h = h
self.color = color
def area(self):
return self.w * self.h
def __lt__(self, other):
return self.area() < other.area
def __str__(self):
return "{} x {} at ({}, {})".format(self.w, self.h, self.x, self.y)
def __repr__(self):
return self.__str__()
def __eq__(self, other):
return self.w == other.w and self.h == other.h
def render(self):
super().render()
self.t.color = self.color
self.t.begin_fill()
self.t.goto(self.x + self.w, self.y)
self.t.goto(self.x + self.w, self.y - self.h)
self.t.goto(self.x, self.y - self.h)
self.t.goto(self.x, self.y)
self.t.end_fill()
import turtle
t = turtle.Turtle()
t.hideturtle()
wn = turtle.Screen()
wn.tracer(0)
r1 = Rectangle(t, 50, 50, 100, 50, 'black')
r2 = Rectangle(t, -50, -50, 100, 100, 'red')
c = Circle(t, 0, 100, 100)
def draw():
t.clear()
r1.move_right(5)
c.move_right(3)
r1.render()
r2.render()
c.render()
wn.update()
wn.ontimer(draw, 20)
wn.ontimer(draw, 200)
wn.mainloop()
"""
#from matplotlib import pyplot as plt
"""
plt.plot([1, 2, 3, 4, 5], [2, 4, 1, 1, 5])
plt.xlim(0, 10)
plt.ylim(0, 10)
"""
#plt.bar(['lemons', 'apples', 'oranges', 'limes'], [5, 12, 2, 3])
#plt.hist([1,0. 3,0. 7,0. 4,0. 2])
#plt.show()
import matplotlib.pyplot as plt
data = {'apples': 10, 'oranges': 15, 'lemons': 5, 'limes': 20}
names = list(range(1, len(data.keys()) + 1))
values = list(data.values())
plt.bar(names, values)
plt.xticks(names, list(data.keys()))
#plt.xticklabels(list(data.keys()))
plt.show()
|
[
"jversoza@cs.nyu.edu"
] |
jversoza@cs.nyu.edu
|
ff9985458b628b7515d40abce1908071b3909f62
|
f9ff85c981942d15c65d37de107e0c5fa5e6a2ba
|
/pychron/experiment/utilities/identifier.py
|
93e069990688ddb7ca6694d6320df4bfe0882396
|
[
"Apache-2.0"
] |
permissive
|
kenlchen/pychron
|
0c729f1b1973b9883734007b7a318fe21669e6c1
|
ffd988e27ae09fb3e8a8790d87ff611557911d07
|
refs/heads/master
| 2021-01-24T21:53:42.293554
| 2016-04-04T07:18:39
| 2016-04-04T07:18:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,439
|
py
|
# ===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
import os
import yaml
# ============= local library imports ==========================
from pychron.file_defaults import IDENTIFIERS_DEFAULT
from pychron.pychron_constants import LINE_STR, ALPHAS
from pychron.paths import paths
ANALYSIS_MAPPING = dict() # ba: 'Blank Air'
NON_EXTRACTABLE = dict() # ba: 'Blank Air'
ANALYSIS_MAPPING_INTS = dict() # blank_air: 0
SPECIAL_MAPPING = dict() # blank_air: ba
SPECIAL_NAMES = ['Special Labnumber', LINE_STR] # 'Blank Air'
SPECIAL_KEYS = [] # ba
# AGE_TESTABLE = []
p = os.path.join(paths.hidden_dir, 'identifiers.yaml')
if os.path.isfile(p):
with open(p, 'r') as rfile:
yd = yaml.load(rfile)
else:
yd = yaml.load(IDENTIFIERS_DEFAULT)
for i, idn_d in enumerate(yd):
key = idn_d['shortname']
value = idn_d['name']
ANALYSIS_MAPPING[key] = value
underscore_name = value.lower().replace(' ', '_')
ANALYSIS_MAPPING_INTS[underscore_name] = i
if not idn_d['extractable']:
NON_EXTRACTABLE[key] = value
# if idn_d['ageable']:
# AGE_TESTABLE.append(value.lower())
if idn_d['special']:
SPECIAL_MAPPING[underscore_name] = key
SPECIAL_NAMES.append(value)
SPECIAL_KEYS.append(key)
# ANALYSIS_MAPPING = dict(ba='Blank Air', bc='Blank Cocktail', bu='Blank Unknown',
# bg='Background', u='Unknown', c='Cocktail', a='Air',
# pa='Pause', ic='Detector IC')
#
# ANALYSIS_MAPPING_INTS = dict(unknown=0, background=1,
# air=2, cocktail=3,
# blank_air=4,
# blank_cocktail=5,
# blank_unknown=6,
# detector_ic=7)
#
#
# # "labnumbers" where extract group is disabled
# NON_EXTRACTABLE = dict(ba='Blank Air', bc='Blank Cocktail', bu='Blank Unknown',
# bg='Background', c='Cocktail', a='Air', ic='Detector IC', be='Blank ExtractionLine')
#
# AGE_TESTABLE = ('unknown','cocktail')
# SPECIAL_NAMES = ['Special Labnumber', LINE_STR, 'Air', 'Cocktail', 'Blank Unknown',
# 'Blank Air', 'Blank Cocktail', 'Background', 'Pause', 'Degas', 'Detector IC']
#
# SPECIAL_MAPPING = dict(background='bg',
# blank_air='ba',
# blank_cocktail='bc',
# blank_unknown='bu',
# pause='pa',
# degas='dg',
# detector_ic='ic',
# air='a',
# cocktail='c',
# unknown='u')
#
# p = os.path.join(paths.setup_dir, 'identifiers.yaml')
# differed = []
# if os.path.isfile(p):
# with open(p, 'r') as rfile:
# yd = yaml.load(rfile)
# for i, (k, v) in enumerate(yd.items()):
# ANALYSIS_MAPPING[k] = v
#
# #if : assume '01:Value' where 01 is used for preserving order
# if ':' in v:
# a, v = v.split(':')
# c = int(a)
# differed.append((c, v))
# ANALYSIS_MAPPING_INTS[v.lower()] = 7 + c
# else:
# SPECIAL_NAMES.append(v)
# ANALYSIS_MAPPING_INTS[v.lower()] = 7 + i
# SPECIAL_MAPPING[v.lower()] = k
#
# if differed:
# ds = sorted(differed, key=lambda x: x[0])
# SPECIAL_NAMES.extend([di[1] for di in ds])
#
# SPECIAL_KEYS = map(str.lower, SPECIAL_MAPPING.values())
def convert_identifier_to_int(ln):
m = {'ba': 1, 'bc': 2, 'bu': 3, 'bg': 4, 'u': 5, 'c': 6, 'ic': 7}
try:
return int(ln)
except ValueError:
return m[ln]
def convert_special_name(name, output='shortname'):
"""
input name output shortname
name='Background'
returns:
if output=='shortname'
return 'bg'
else
return 4 #identifier
"""
if isinstance(name, str):
name = name.lower()
name = name.replace(' ', '_')
if name in SPECIAL_MAPPING:
sn = SPECIAL_MAPPING[name]
if output == 'labnumber':
sn = convert_identifier(sn)
return sn
else:
return name
def convert_identifier(identifier):
"""
old:
identifier=='bg, a, ...'
return 1
identifier== bu-FD-J, 51234, 13212-01
return bu-FD-J, 51234, 13212
"""
if '-' in identifier:
ln = identifier.split('-')[0]
try:
ln = int(ln)
identifier = str(ln)
except ValueError:
return identifier
# identifier=identifier.split('-')[0]
# if identifier in ANALYSIS_MAPPING:
# sname = ANALYSIS_MAPPING[identifier]
# identifier = next((k for k, v in SPECIAL_IDS.iteritems() if v == sname), identifier)
return identifier
def get_analysis_type(idn):
"""
idn: str like 'a-...' or '43513'
"""
idn = idn.lower()
for atype, tag in SPECIAL_MAPPING.iteritems():
if idn.startswith(tag):
return atype
else:
return 'unknown'
# if idn.startswith('bg'):
# return 'background'
# elif idn.startswith('ba'):
# return 'blank_air'
# elif idn.startswith('bc'):
# return 'blank_cocktail'
# elif idn.startswith('b'):
# return 'blank_unknown'
# elif idn.startswith('a'):
# return 'air'
# elif idn.startswith('c'):
# return 'cocktail'
# elif idn.startswith('dg'):
# return 'degas'
# elif idn.startswith('pa'):
# return 'pause'
# else:
# return 'unknown'
def make_runid(ln, a, s=''):
_as = make_aliquot_step(a, s)
return '{}-{}'.format(ln, _as)
def strip_runid(r):
l, x = r.split('-')
a = ''
for i, xi in enumerate(x):
a += xi
try:
int(a)
except ValueError:
a = x[:i]
s = x[i:]
break
else:
s = ''
return l, int(a), s
def make_step(s):
if isinstance(s, (float, int, long)):
s = ALPHAS[int(s)]
return s or ''
def make_aliquot_step(a, s):
if not isinstance(a, str):
a = '{:02d}'.format(int(a))
s = make_step(s)
return '{}{}'.format(a, s)
def make_identifier(ln, ed, ms):
try:
_ = int(ln)
return ln
except ValueError:
return make_special_identifier(ln, ed, ms)
def make_standard_identifier(ln, modifier, ms, aliquot=None):
"""
ln: str or int
a: int
modifier: str or int. if int zero pad
ms: int or str
"""
if isinstance(ms, int):
ms = '{:02d}'.format(ms)
try:
modifier = '{:02d}'.format(modifier)
except ValueError:
pass
d = '{}-{}-{}'.format(ln, modifier, ms)
if aliquot:
d = '{}-{:02d}'.format(d, aliquot)
return d
def make_special_identifier(ln, ed, ms, aliquot=None):
"""
ln: str or int
a: int aliquot
ms: int mass spectrometer id
ed: int extract device id
"""
if isinstance(ed, int):
ed = '{:02d}'.format(ed)
if isinstance(ms, int):
ms = '{:02d}'.format(ms)
d = '{}-{}-{}'.format(ln, ed, ms)
if aliquot:
if not isinstance(aliquot, str):
aliquot = '{:02d}'.format(aliquot)
d = '{}-{}'.format(d, aliquot)
return d
def make_rid(ln, a, step=''):
"""
if ln can be converted to integer return runid
else return ln-a
"""
try:
_ = int(ln)
return make_runid(ln, a, step)
except ValueError:
if not isinstance(a, str):
a = '{:02d}'.format(a)
return '{}-{}'.format(ln, a)
def is_special(ln):
special = False
if '-' in ln:
special = ln.split('-')[0] in ANALYSIS_MAPPING
return special
# return make_special_identifier(ln, ed, ms, aliquot=a)
# ===============================================================================
# deprecated
# ===============================================================================
# SPECIAL_IDS = {1: 'Blank Air', 2: 'Blank Cocktail', 3: 'Blank Unknown',
# 4: 'Background', 5: 'Air', 6: 'Cocktail'
# }
# # @deprecated
# def convert_labnumber(ln):
# """
# ln is a str but only special labnumbers cannot be converted to int
# convert number to name
#
# """
# try:
# ln = int(ln)
#
# if ln in SPECIAL_IDS:
# ln = SPECIAL_IDS[ln]
# except ValueError:
# pass
#
# return ln
#
#
# # @deprecated
# def convert_shortname(ln):
# """
# convert number to shortname (a for air, bg for background...)
# """
# name = convert_labnumber(ln)
# if name is not None:
# ln = next((k for k, v in ANALYSIS_MAPPING.iteritems()
# if v == name), ln)
# return ln
def convert_extract_device(name):
"""
change Fusions UV to FusionsUV, etc
"""
n = ''
if name:
n = name.replace(' ', '')
return n
def pretty_extract_device(ident):
"""
change fusions_uv to Fusions UV, etc
"""
n = ''
if ident:
args = ident.split('_')
if args[-1] in ('uv, co2'):
n = ' '.join(map(str.capitalize, args[:-1]))
n = '{} {}'.format(n, args[-1].upper())
else:
n = ' '.join(map(str.capitalize, args))
#n=ident.replace(' ', '_')
return n
# ============= EOF =============================================
|
[
"jirhiker@gmail.com"
] |
jirhiker@gmail.com
|
24f1dcc1d1aa4ed13620403653e66f45ffdc5e4a
|
11fb66c21b1afe7ea96f5a1816662b225f2dc79c
|
/nextgen/bcbio/variation/genotype.py
|
22a9442f3a128efdeec2d7c608c21989707d43cd
|
[] |
no_license
|
raonyguimaraes/bcbb
|
064f1c7d573b82478e6f787a2bd3932443c25864
|
c7485f8dbc63a93e73f0b0bb43630110ee7381df
|
refs/heads/master
| 2021-01-18T06:18:10.282876
| 2011-06-21T19:31:03
| 2011-06-21T19:31:03
| 1,986,676
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,578
|
py
|
"""Provide SNP, indel calling and variation analysis using GATK genotyping tools.
Genotyping:
http://www.broadinstitute.org/gsa/wiki/index.php/Unified_genotyper
http://www.broadinstitute.org/gsa/wiki/index.php/Local_realignment_around_indels
http://www.broadinstitute.org/gsa/wiki/index.php/IndelGenotyper
http://www.broadinstitute.org/gsa/wiki/index.php/VariantFiltrationWalker
Variant Evaluation:
http://www.broadinstitute.org/gsa/wiki/index.php/VariantEval
"""
import os
import itertools
from bcbio import broad
# ## SNP Genotyping
def gatk_genotyper(align_bam, ref_file, config, dbsnp=None):
"""Perform genotyping and filtration on a sorted aligned BAM file.
"""
picard = broad.runner_from_config(config)
picard.run_fn("picard_index_ref", ref_file)
picard.run_fn("picard_index", align_bam)
snp_file = _unified_genotyper(picard, align_bam, ref_file, dbsnp)
filter_snp = _variant_filtration(picard, snp_file, ref_file)
return filter_snp
def _unified_genotyper(picard, align_bam, ref_file, dbsnp=None):
"""Perform SNP genotyping on the given alignment file.
"""
out_file = "%s-snp.vcf" % os.path.splitext(align_bam)[0]
params = ["-T", "UnifiedGenotyper",
"-I", align_bam,
"-R", ref_file,
"-o", out_file,
"-A", "DepthOfCoverage",
"-A", "AlleleBalance",
"-A", "HomopolymerRun",
"-A", "QualByDepth",
"--genotype_likelihoods_model", "SNP",
"-baq", "CALCULATE_AS_NECESSARY",
"--standard_min_confidence_threshold_for_calling", "10.0",
"--standard_min_confidence_threshold_for_emitting", "10.0",
#"--trigger_min_confidence_threshold_for_calling", "10.0",
#"--trigger_min_confidence_threshold_for_emitting", "10.0",
"--downsample_to_coverage", 10000,
"--min_base_quality_score", 20,
"-l", "INFO",
]
if dbsnp:
params += ["-B:dbsnp,VCF", dbsnp]
if not (os.path.exists(out_file) and os.path.getsize(out_file) > 0):
picard.run_gatk(params)
return out_file
def _variant_filtration(picard, snp_file, ref_file):
"""Filter out problematic SNP calls.
Recommended Broad hard filtering for deep coverage exomes:
QUAL < 30.0 || AB > 0.75 && DP > 40 || QD < 5.0 || HRun > 5 || SB > -0.10
"""
out_file = "%s-filter%s" % os.path.splitext(snp_file)
params = ["-T", "VariantFiltration",
"-R", ref_file,
"-o", out_file,
"-B:variant,VCF", snp_file,
"--filterName", "QUALFilter",
"--filterExpression", "QUAL <= 50.0",
"--filterName", "QDFilter",
"--filterExpression", "QD < 5.0",
"--filterName", "ABFilter",
"--filterExpression", "AB > 0.75 && DP > 40",
"--filterName", "HRunFilter",
"--filterExpression", "HRun > 3.0",
"--filterName", "SBFilter",
"--filterExpression", "SB > -0.10",
"-l", "INFO",
]
if not (os.path.exists(out_file) and os.path.getsize(out_file) > 0):
picard.run_gatk(params)
return out_file
# ## Variant evaluation
def gatk_evaluate_variants(vcf_file, ref_file, config, dbsnp=None, intervals=None):
"""Evaluate variants, return SNP counts and Transition/Transversion ratios.
"""
runner = broad.runner_from_config(config)
eval_file = variant_eval(vcf_file, ref_file, dbsnp, intervals, runner)
stats = _extract_eval_stats(eval_file)
return _format_stats(stats['called'])
def _format_stats(stats):
"""Convert statistics into high level summary of major variables.
"""
total = sum(itertools.chain.from_iterable(s.itervalues() for s in stats.itervalues()))
if total > 0:
dbsnp = sum(stats['known'].itervalues()) / float(total) * 100.0
else:
dbsnp = -1.0
tv_dbsnp = stats['known']['tv']
ti_dbsnp = stats['known']['ti']
tv_novel = stats['novel']['tv']
ti_novel = stats['novel']['ti']
if tv_novel > 0 and tv_dbsnp > 0:
titv_all = float(ti_novel + ti_dbsnp) / float(tv_novel + tv_dbsnp)
titv_dbsnp = float(ti_dbsnp) / float(tv_dbsnp)
titv_novel = float(ti_novel) / float(tv_novel)
else:
titv_all, titv_dbsnp, titv_novel = (-1.0, -1.0, -1.0)
return dict(total=total, dbsnp_pct = dbsnp, titv_all=titv_all,
titv_dbsnp=titv_dbsnp, titv_novel=titv_novel)
def _extract_eval_stats(eval_file):
"""Parse statistics of interest from GATK output file.
"""
stats = dict()
for snp_type in ['called', 'filtered']:
stats[snp_type] = dict()
for dbsnp_type in ['known', 'novel']:
stats[snp_type][dbsnp_type] = dict(ti=0, tv=0)
for line in _eval_analysis_type(eval_file, "Ti/Tv Variant Evaluator"):
if line[:2] == ['eval', 'dbsnp']:
snp_type = line[3]
dbsnp_type = line[4]
try:
cur = stats[snp_type][dbsnp_type]
except KeyError:
cur = None
if cur:
stats[snp_type][dbsnp_type]["ti"] = int(line[5])
stats[snp_type][dbsnp_type]["tv"] = int(line[6])
return stats
def _eval_analysis_type(in_file, analysis_name):
"""Retrieve data lines associated with a particular analysis.
"""
with open(in_file) as in_handle:
# read until we reach the analysis
for line in in_handle:
if (line.startswith("Analysis Name:") and
line.find(analysis_name) > 0):
break
# read off header lines
for _ in range(4):
in_handle.next()
# read the table until a blank line
for line in in_handle:
if not line.strip():
break
parts = line.rstrip("\n\r").split()
yield parts
def variant_eval(vcf_in, ref_file, dbsnp, target_intervals, picard):
"""Evaluate variants in comparison with dbSNP reference.
"""
out_file = "%s.eval" % os.path.splitext(vcf_in)[0]
params = ["-T", "VariantEval",
"-R", ref_file,
"-B:eval,VCF", vcf_in,
"-B:dbsnp,VCF", dbsnp,
"-o", out_file,
"-l", "INFO"
]
if target_intervals:
params.extend(["-L", target_intervals])
if not (os.path.exists(out_file) and os.path.getsize(out_file) > 0):
picard.run_gatk(params)
return out_file
|
[
"chapmanb@50mail.com"
] |
chapmanb@50mail.com
|
8c360a0f72d3907199f648b5f07ac60f7c71ae74
|
118c35587c050c2157d6043d7e8e9111f9d3a65f
|
/code/code_first_run_words/space_to_grid.py
|
e413ee2d478b0db505dd91bb9187ef991be8a7a7
|
[] |
no_license
|
LydiaMennes/smrThesis
|
ef049b074a8bafeaa75a59c9866b995b97b5b18f
|
cb7d7a0efb94511017bdb0e8a451f03e47e9dae8
|
refs/heads/master
| 2021-05-28T02:58:02.084185
| 2014-11-12T17:12:27
| 2014-11-12T17:12:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,021
|
py
|
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import pylab
import math
import datetime
from thesis_utilities import *
import sys
import copy
import random
import gc
figure_size = 8
update_neighborhood = 300
grid_param = []
class TypeKeeper:
def __init__(self, indexes):
self.indexes = indexes
def get_color(self, x):
return self.indexes[x]
class GridPoint:
stepsize = 0.6 # before: 0.4
def __init__(self, x, y, grid):
self.pos = np.array([float(x), float(y)])
self.assignments = []
self.lonely_points = []
self.steps = {}
self.providers = []
self.prev_providers = []
self.grid = grid
def reset(self):
self.assignments=[]
self.lonely_points=[]
self.steps = {}
self.prev_providers = self.providers
self.providers = []
# assignment 0 = numpy array with position, 1 = index of point
def add_assignment(self, assignment):
self.assignments.append(assignment)
def add_provider(self, prov):
self.providers.append(prov)
def get_prev_providers(self):
return self.prev_providers
def add_lonely_gridpoint(self, x, y):
# implement processing of grid points
g_pos = np.array([float(x), float(y)])
dist = np.sqrt(np.inner(g_pos-self.pos, g_pos-self.pos))
self.lonely_points.append([dist, g_pos])
def get_movement(self, i):
if len(self.assignments) < 2:
return np.array([0.0,0.0])
elif self.steps == {}:
self.calc_assignments()
if not i in self.steps.keys():
return np.array([0.0,0.0])
return self.steps[i]
else:
if not i in self.steps.keys():
return np.array([0.0,0.0])
return self.steps[i]
## p = point, p0 = first point of line, p1 = other point of line
# def dist_to_line(p, p0, p1):
# gamma_q = np.sum(np.multiply(p1-p0, p-p0)) / np.sum(np.power(p1-p0,2))
# return np.sqrt(np.sum(p-p0- gamma_q*np.power(p1-p0,2)))
def get_step(self, gp, p, d):
# Next part makes direction slightly more random
# q = np.array([p[0]+(random.random()-0.5)/3, p[1]+(random.random()-0.5)/3])
# d1 = np.sqrt(np.dot(q-gp, q-gp))
alpha = math.asin( abs(gp[0]-p[0]) / d )
step = np.array([math.sin(alpha), math.cos(alpha)]) * self.stepsize
if gp[0] < p[0]:
step[0] *= -1.0
if gp[1] < p[1]:
step[1]*=-1.0
return step
def calc_assignments(self):
self.lonely_points.sort(key=lambda x: x[0])
# make point move with smallest distance to 'movement line' from grid point to lonely gridpoint
for i in range(len(self.assignments)-1):
if i < len(self.lonely_points):
min_dist_lgp = float("inf")
min_dist_orig = float("inf")
min_ind = -1
p = self.lonely_points[i]
for index in range(len(self.assignments)):
[pos, ind, orig_pos] = self.assignments[index]
if not ind in self.steps.keys():
dist_orig = np.sqrt(np.dot(orig_pos-pos, orig_pos-pos))
if dist_orig < min_dist_orig:
min_dist_lgp = np.sqrt(np.dot(p[1]-pos, p[1]-pos))
min_dist_orig = dist_orig
min_ind = index
if dist_orig == min_dist_orig:
dist_lgp = np.sqrt(np.dot(p[1]-pos, p[1]-pos))
if dist_lgp < min_dist_lgp:
min_dist_lgp=dist_lgp
min_ind = index
# to_pos = p[1]
# self.grid[int(round(to_pos[0]))][int(round(to_pos[1]))].add_provider(self.pos)
# if len(self.grid[int(round(to_pos[0]))][int(round(to_pos[1]))].assignments) != 0:
# print("wrong provider added")
# print("provider added")
self.steps[self.assignments[min_ind][1] ] = self.get_step(p[1], self.assignments[min_ind][0], min_dist_lgp)
def print_memory():
# w = WMI('.')
# result = w.query("SELECT WorkingSet FROM Win32_PerfRawData_PerfProc_Process WHERE IDProcess=%d" % os.getpid())
# result2 = int(result[0]['WorkingSet'])
# print type(result2)
# print "memory:\n", result2
# return result2
# h = hpy()
# print h.heap()
# return None
print("")
def restart(data_folder, log_memory, last_iter_nr, last_fig_nr, grid_enlarge=0):
blob_colors = {}
colors = get_colors()
f = open(data_folder+r"\color_file.txt")
for line in f:
line = line.replace("\n", "")
line = line.split(";")
color = line[1]
color = color.replace("]","")
color = color.replace("[","")
color = color.split(", ")
color = [float(color[0]), float(color[1]), float(color[2])]
blob_colors[int(line[0])] = color
f.close()
blob_colors = TypeKeeper(blob_colors)
data = space_from_file(data_folder+r"\intermediate_grids\data_"+str(last_fig_nr)+"_it"+str(last_iter_nr)+".txt")
print("data shape",data.shape[0])
nr_items = data.shape[0]
grid_size = int(math.ceil(math.sqrt(nr_items)))+grid_enlarge
grid = []
for i in range(grid_size):
grid.append([])
for j in range(grid_size):
grid[i].append(GridPoint(i,j, grid))
orig_data = space_from_file(data_folder + r"\intermediate_grids\data_orig.txt")
iter_nr, assignment = iterate(data, orig_data, grid, last_fig_nr+1, nr_items, grid_size, data_folder+r"\intermediate_grids", log_memory, last_iter_nr+1, blob_colors)
f = open(data_folder+r"\init_grid_assignment.txt", "w")
for elem in assignment:
f.write(str(elem[0]) +";"+ str(elem[1]) +";"+ str(elem[2]) + "\n")
f.close()
def iterate(data, orig_data, grid, fig_nr, nr_items, grid_size, result_path, log_memory, iternr, blob_nr_keeper=None):
#iteratively move to grid points
assigned = set()
assignment = []
sufficient_gradient =True
neighborhood_size = 10
first = True
print("start conversion to grid", datetime.datetime.now())
neighborhood_size_changed = True
#find intial lonely gridpoints
lonely_points = []
assigned = set()
assignment = []
for i in range(nr_items):
nearest = [min(max(int(round(data[i,0])),0),grid_size-1), min(max(int(round(data[i,1])),0),grid_size-1)]
grid[nearest[0]][nearest[1]].add_assignment( (data[i,:], i, orig_data[i,:]) )
assigned.add((nearest[0], nearest[1]))
assignment.append((nearest[0], nearest[1], i))
for i in range(grid_size):
for j in range(grid_size):
if len(grid[i][j].assignments) == 0:
lonely_points.append(grid[i][j])
print("\n\nNr lonely points at start:", len(lonely_points), "with grid size", grid_size, "and", nr_items, "elems")
while len(assigned)<nr_items:
iternr+=1
if not first:
assigned = set()
assignment = []
# Assign each data point to the nearest grid point
for i in range(nr_items):
nearest = [min(max(int(round(data[i,0])),0),grid_size-1), min(max(int(round(data[i,1])),0),grid_size-1)]
grid[nearest[0]][nearest[1]].add_assignment( (data[i,:], i, orig_data[i,:]) )
assigned.add((nearest[0], nearest[1]))
assignment.append((nearest[0], nearest[1], i))
if first:
first = False
if len(assigned)<nr_items:
# VERVANG DIT MET DOOR LONELY POINTS HEEN LOPEN
if neighborhood_size_changed:
print("check neighborhood")
for lpi in reversed(range(len(lonely_points))) :
lonely_p = lonely_points[lpi]
if len(lonely_p.assignments)==0:
i = int(lonely_p.pos[0])
j = int(lonely_p.pos[1])
if sufficient_gradient:
no_providers = True
if not neighborhood_size_changed:
# DOOR VORIGE PUNTEN HEEN LOPEN
prev_providers = lonely_p.get_prev_providers()
no_providers = len(prev_providers)==0
nr_additions = 0
checked = set()
for [px,py] in prev_providers:
from_pi, to_pi = max(0, px-2), min(grid_size, px+2)
from_pj, to_pj = max(0, py-2), min(grid_size, py+2)
for ii in range(from_pi,to_pi):
for jj in range(from_pj,to_pj):
if (ii,jj) not in checked and len(grid[ii][jj].assignments) > 1:
grid[ii][jj].add_lonely_gridpoint(i,j)
lonely_p.add_provider([ii,jj])
nr_additions +=1
checked.add((ii,jj))
else:
from_i, to_i = max(0, i-neighborhood_size), min(grid_size, i+neighborhood_size+1)
from_j, to_j = max(0, j-neighborhood_size), min(grid_size, j+neighborhood_size+1)
for ii in range(from_i,to_i):
for jj in range(from_j,to_j):
if len(grid[ii][jj].assignments) > 1:
grid[ii][jj].add_lonely_gridpoint(i,j)
lonely_p.add_provider([ii,jj])
else:
for elem in assigned:
if len(grid[elem[0]][elem[1]].assignments) > 0:
grid[elem[0]][elem[1]].add_lonely_gridpoint(i,j)
else:
del lonely_points[lpi]
nr_movements = 0
for i in range(nr_items):
nearest = [min(max(int(round(data[i,0])),0),grid_size-1), min(max(int(round(data[i,1])),0),grid_size-1)]
m = grid[nearest[0]][nearest[1]].get_movement(i)
if m[0] != 0 or m[1] != 0:
nr_movements+=1
data[i,:] = np.add(data[i,:] , m)
for i in range(grid_size):
for j in range(grid_size):
grid[i][j].reset()
neighborhood_size_changed = False
if iternr%5 == 0:
sufficient_gradient = True
sufficient_gradient = nr_movements > 50 or len(assigned)+nr_movements==nr_items
if not sufficient_gradient and iternr%20!=0:
print("insuf grad")
print("i:",iternr,"ass",len(assigned), "mo:", nr_movements, "nr lonely points:", len(lonely_points))
if nr_movements < update_neighborhood and len(assigned)+nr_movements!=nr_items:
neighborhood_size_changed = True
neighborhood_size += 5
print("neigh size upgraded", neighborhood_size)
gc.collect()
if iternr%10 == 0:
neighborhood_size_changed = True
if iternr%20 == 0 or len(assigned) == nr_items:
print("\n\n")
if blob_nr_keeper!=None:
used_marker = "o"
if nr_items > 1000:
used_marker = "."
print( "im" + str(fig_nr))
space_to_file(data, result_path + r"\data_"+str(fig_nr)+"_it"+str(iternr)+".txt")
image_name = result_path + r"\intermediate_grid_formed_"+str(fig_nr)+".pdf"
fig = plt.figure(figsize=(figure_size, figure_size))
for i in range(nr_items):
prop_plot=plt.scatter( data[i,0], data[i,1], c=blob_nr_keeper.get_color(i), marker=used_marker)
if nr_items > 1000:
prop_plot.set_edgecolor("none")
# for i in range(grid_size):
# for j in range(grid_size):
# if len(grid[i][j].assignments) == 0:
# prop_plot = plt.scatter(j, grid_size-1-i, c = "k", marker = used_marker)
# if nr_items > 1000:
# prop_plot.set_edgecolor("none")
plt.axis([-1, grid_size, -1, grid_size])
plt.title("Result at iteration " + str(iternr))
fig.savefig(image_name, bbox_inches='tight')
fig.savefig(result_path + r"\intermediate_grid_"+four_digit_string(fig_nr)+".png")
plt.close()
fig_nr+=1
print( "iter", iternr, "nr assigned", len(assigned), "from", nr_items, "mo:", nr_movements, "at", datetime.datetime.now())
if log_memory:
print("Memory log not available")
# all_objects = muppy.get_objects()
# sum1 = summary.summarize(all_objects)
# summary.print_(sum1, limit=15, sort='size')
# print("printed at", datetime.datetime.now())
return iternr, assignment
def space_to_grid_iterative(data, result_path, log_memory, with_figures=True, blob_nr_keeper = None, grid_enlarge = 0, scale = True):
nr_items = data.shape[0]
print("nr items:", nr_items)
grid_size = int(np.ceil(np.sqrt(nr_items)))+grid_enlarge
space_to_file(data, result_path + r"\data_orig.txt")
orig_data = np.copy(data)
# Prepare grid
grid = []
for i in range(grid_size):
grid.append([])
for j in range(grid_size):
grid[i].append(GridPoint(i,j, grid))
# Rescale and move data
if scale:
print("scale data")
move_scale = np.array([0.9 , 0.9])
if data.min(axis=0)[0] < 0:
move_scale[0] = 1.1
if data.min(axis=0)[1] < 0:
move_scale[1] = 1.1
data = data - (data.min(axis=0) * move_scale)
scaling = (float(grid_size)-1)/ (data.max(axis=0) * 1.2 )
data = np.multiply(data, np.tile(scaling, (nr_items, 1) ) )
colors = get_colors()
# Show initial data
if with_figures:
x = list(data[:,0])
xi = np.tile(np.arange(grid_size), (grid_size, 1))
y = list(data[:,1])
yi = np.tile( np.array([np.arange(grid_size)]).T, (1,grid_size))
image_name = result_path + r"\space_to_grid_init_plot.pdf"
fig = plt.figure()
plt.plot(np.ndarray.flatten(xi), np.ndarray.flatten(yi), 'b.')
plt.scatter( x, y, c=colors)
fig.savefig(image_name, bbox_inches='tight')
plt.close()
image_name = result_path + r"\space_to_grid_init_plot2.pdf"
fig = plt.figure()
plt.scatter( x, y, c=colors)
fig.savefig(image_name, bbox_inches='tight')
plt.close()
fig_nr = 1
iternr = 0
iternr, assignment = iterate(data, orig_data, grid, fig_nr, nr_items, grid_size, result_path, log_memory, iternr, blob_nr_keeper)
print("needed ", iternr, "iterations for", len(assignment), "points")
print("\n=============\nDONE\n=============\n")
# plt.plot(np.ndarray.flatten(xi), np.ndarray.flatten(yi), 'b.')
# plt.scatter( x, y, c=colors)
# plt.show()
if with_figures:
for i in range(nr_items):
data[assignment[i][2],:] = np.array([assignment[i][0], assignment[i][1]])
x = list(data[:,0])
y = list(data[:,1])
image_name = result_path + r"\grid_result_plot.pdf"
fig = plt.figure(figsize=(figure_size, figure_size))
plt.scatter( x, y, c=colors)
plt.title("Result of forming a grid from a space")
plt.axis([-1, grid_size+1, -1, grid_size+1])
# fig.savefig(image_name, bbox_inches='tight')
fig.savefig(image_name)
plt.close()
# return result
return assignment, grid_size
def get_minst_data(file):
f = open(file, 'r')
data = []
labels = []
for line in f:
line = line.replace("\n", "")
instance = line.split(" ")
data.append([float(instance[0]), float(instance[1])])
labels.append(float(instance[2]))
return data,labels
if __name__ == "__main__":
# random_data = (np.random.random((2500, 2)) * 6) -3
# random_data[0:100,:] = (np.random.random((500, 2)) * 3) + 0.5
# random_data[500:2500,:] = (np.random.random((2000, 2)) * 6) -3
# ass, grid_size = space_to_grid_iterative(random_data)
# file = r"K:\Lydia\code\tsne_python\minst_data_reduced.txt"
# (data, labels) = get_minst_data(file)
# data = np.array(data)
# plt.scatter(data[:,0], data[:,1], c=labels)
# plt.show()
# print("shape:", data.shape)
# (assignment, grid_size) = space_to_grid_iterative(data )
# index = 0
# x=[]
# y=[]
# l=[]
# for elem in assignment:
# x.append(elem[0])
# y.append(elem[1])
# l.append(labels[elem[2]])
# plt.scatter(x, y, c=l);
# plt.show()
update_neighborhood = 500
data_case = "\cutoff_10_nolog"
restart(r"D:\Users\Lydia\results puzzle" + data_case, False, 2160, 108)
|
[
"lyltje@gmail.com"
] |
lyltje@gmail.com
|
4dd4f469bb006de018703e400da050fb576366f7
|
dabe0cafc1533e83b7748d0c851e9b0c5d11aedf
|
/lurkServer/svr.py
|
d15654f35981ed0d9b355b4d7d811a8708a91e2c
|
[] |
no_license
|
Jesse-McDonald/LURK-Server
|
d805e17a863ba61ffd4a10e831fa835a1a9af973
|
26addc1e76ffe6543d6e6b4e2d71dcc2bfa10a1c
|
refs/heads/master
| 2021-07-08T15:02:48.707964
| 2017-10-06T04:56:51
| 2017-10-06T04:56:51
| 105,967,743
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 87
|
py
|
from subprocess import call
import sys
while True:
call(["./LurkServer", sys.argv[1]])
|
[
"noreply@github.com"
] |
Jesse-McDonald.noreply@github.com
|
5596f3080418c4b15b4e17a3ab5dcca54c88f88b
|
54fc684e36f5326de8a1ac1d5c36467b61f84ec7
|
/flask-aws/bin/easy_install
|
d9fb9298420a69f7626b6363347206ebf5b3c9a8
|
[] |
no_license
|
cgiglio/scraper
|
b3b4ee57fef787db94bc2f001e2fcf14724062b3
|
67fcf3bc13a02af1bba43451cdb6d9d646e9e391
|
refs/heads/master
| 2021-01-10T13:28:59.672939
| 2016-03-30T03:37:57
| 2016-03-30T03:37:57
| 55,030,019
| 0
| 1
| null | 2020-07-25T22:42:42
| 2016-03-30T03:37:13
|
Python
|
UTF-8
|
Python
| false
| false
| 377
|
#!/Users/cgiglio/Desktop/flask-aws-tutorial-master/flask-aws/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'distribute==0.6.24','console_scripts','easy_install'
__requires__ = 'distribute==0.6.24'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('distribute==0.6.24', 'console_scripts', 'easy_install')()
)
|
[
"chris.giglio1@gmail.com"
] |
chris.giglio1@gmail.com
|
|
1fe15d6c65cd4c39ca0921ffd6169470c7071826
|
d8626ce85cb10f6259afe8567b2eb227d897008b
|
/exemplo2.py
|
a0aeb0977c088e685ef474bcba4f74962ac499ae
|
[] |
no_license
|
pd4ni3l/Deteccao
|
c40ece447c4d0509a86ab0464a651732248abb87
|
4aa3159f76fd8359ea853f257e6aac83811adfa2
|
refs/heads/master
| 2020-06-08T04:17:29.584171
| 2019-06-21T20:26:39
| 2019-06-21T20:26:39
| 193,156,137
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 276
|
py
|
import cv2
classificadorFace - cv2.CascadeClassifier('cascades/haarcascade_frontalface_default.xml')
classificadorOlhos = cv2.CascadeClassifier('cascades/haarcacade_eye.xml')
imagem = cv2.imread('pessoas/faceolho.jpg')
imagemCinza = cv2.cvtColor(imagem, cv2.COLOR_BGR2GRAY)
|
[
"pd.rg@outlook.com"
] |
pd.rg@outlook.com
|
7f88a65ad57645d2e64297907867372f22c9743f
|
7ec3f117b80675b46e69adbd47622ce56e3d4879
|
/001.py
|
aa32a344b3f5687faf15376d68ca9a27d0ec7615
|
[
"MIT"
] |
permissive
|
phibra/Project_Euler
|
feacd9f56da80edc67c5fe84df7ef1ccf195cba9
|
0de6d26825323453b583d8e1c99e3b4360e17e58
|
refs/heads/master
| 2021-05-05T23:58:07.903422
| 2018-08-17T23:32:30
| 2018-08-17T23:32:30
| 116,863,790
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 138
|
py
|
a, b = 0, 1
sum = 0
while (b < 1000):
if (b % 3 == 0) or (b % 5 == 0):
sum += a
a, b = b, a + b
print(sum)
|
[
"noreply@github.com"
] |
phibra.noreply@github.com
|
0a55919f28900b1616e8bd049fd104b042b7297f
|
00a4b05f985d88329057585e7fd5e986e2a85d56
|
/functions_frontend.py
|
c4d1a93fc6d316ec87128b9d5d9eeb99f8cf4858
|
[] |
no_license
|
jakobludewig/image_keyword_generator
|
7c8809912ed90f6e32bf66e79baaf982a51ff44a
|
23a0c502ff48b93b217b421bce3d4de91480df69
|
refs/heads/main
| 2023-07-06T05:47:39.243370
| 2021-07-25T15:58:09
| 2021-07-25T15:58:09
| 389,136,357
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,810
|
py
|
import pandas as pd
import ast
from typing import List
import matplotlib.pyplot as plt
from PIL.Image import Image as PILImage
from PIL import ImageOps, Image
def read_imagenet_labels() -> pd.DataFrame:
""" Read in imagenet labels, taken from https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a#file-imagenet1000_clsidx_to_labels-txt
Returns:
pd.DataFrame: DataFrame containing the labels assigned to each image file
"""
with open("imagenet1000_labels.txt", "r") as f:
s = f.read()
imagenet_labels = ast.literal_eval(s)
imagenet_labels = pd.DataFrame(
{"label": imagenet_labels.keys(), "label_text": imagenet_labels.values()}
)
return imagenet_labels
def query_image_labels(database: pd.DataFrame, filenames: List[str]) -> List[dict]:
"""Read in the tags associated with a given list of image files from the keyword database
Args:
database (pd.DataFrame): The keyword database generated by the build_keyword_database CLI program
filenames (List[str]): List of image filenames for which to look up the keywords
Returns:
List[dict]: List of dictionaries which contain the image filename and the assigned keywords
"""
images_dict = [
{
"filename": f,
"labels": database[database.filename.isin([f])][["label_text", "prob"]]
.set_index("label_text")
.to_dict()["prob"],
}
for f in filenames
]
return images_dict
def transform_image(image: PILImage, target_width: int = 400) -> PILImage:
""" Transforms a given PILImage to make it suitable for plotting.
Args:
image (PILImage): PILImage to transform
target_width (int, optional): The target width of the transformed image, will preserve aspect ratio. Defaults to 400.
Returns:
PILImage: Transformed PILImage
"""
# filename attribute gets lost during transformations so we have to re-assign it afterwards
filename = image.filename
scaling_factor = target_width / image._size[0]
image = image.resize(
size=(
round(image._size[0] * scaling_factor),
round(image._size[1] * scaling_factor),
)
)
image = ImageOps.exif_transpose(image)
image.filename = filename
return image
def load_images_and_labels(database: pd.DataFrame, filenames: List[str]) -> List[dict]:
""" Loads the specified images and the associated labels from the keyword database into a list of dictionaries
Args:
database (pd.DataFrame): The keyword database generated by the build_keyword_database CLI program
filenames (List[str]): List of image filenames to load
Returns:
List[dict]: List of dictionaries containing the images and their associated labels from the keyword database
"""
images_dict = query_image_labels(database, filenames)
images_dict = [
{**i, "image": transform_image(Image.open(i["filename"]))} for i in images_dict
]
return images_dict
def query_top_images_for_label(
database: pd.DataFrame, label: str, n: int = 5
) -> List[str]:
""" Query the keyword database for the top n images for a given label.
Args:
database (pd.DataFrame): The keyword database generated by the build_keyword_database CLI program
label (str): The label for which to query the database
n (int, optional): The number of images to return for the given label. Defaults to 5.
Returns:
List[str]: List containing the filenames of the top n images for the specified label
"""
top_images_label = (
database[database["label_text"] == label]
.sort_values("prob", ascending=False)
.head(n)
)
return top_images_label.filename.tolist()
def load_top_n_images_for_label(
database: pd.DataFrame, label: str, n: int = 5
) -> List[dict]:
""" Loads top n images and labels for a specified label
Args:
database (pd.DataFrame): The keyword database generated by the build_keyword_database CLI program
label (str): The label for which to query the database
n (int, optional): The number of images to return for the given label. Defaults to 5.
Returns:
List[dict]: List of dictionaries containing the images and their associated labels from the keyword database
"""
return load_images_and_labels(
database, query_top_images_for_label(database, label, n),
)
def plot_image_with_labels(
image: PILImage, labels: dict, full_path: bool = True
) -> None:
""" Plot an image alongside its associated labels
Args:
image (PILImage): The image to plot
labels (dict): Dictionary containing the labels with their predicted probability
full_path (bool): Flag indicating whether the full path or just the filename should be displayed. Defaults to True
"""
fig = plt.figure(figsize=(16, 12))
ax1 = fig.add_subplot(1, 6, (1, 4))
ax1.set_axis_off()
ax1.imshow(image)
ax2 = fig.add_subplot(1, 6, (5, 6))
if full_path:
labels_text = "File: " + image.filename + "\n"
else:
labels_text = "File: " + "[...]/" + image.filename.split("/")[-1] + "\n"
labels_text = labels_text + "\n".join(
[k + ": " + str(round(100 * v, 1)) + " %" for k, v in labels.items()]
)
ax2.set_axis_off()
ax2.text(0, 0.5, labels_text, fontsize=16)
def plot_all_images_in_dict(images_dict: dict, full_path: bool = True) -> None:
""" Plot all images and their associated labels in the specified dictionary
Args:
images_dict (dict): Dictionary containing the images and their associated labels
full_path (bool): Flag indicating whether the full path or just the filename should be displayed. Defaults to True
"""
for img in images_dict:
plot_image_with_labels(img["image"], img["labels"], full_path)
def plot_top_n_images_for_label(database: pd.DataFrame, label: str, n: int = 5) -> None:
""" Plots top n images and labels for a specified label
Args:
database (pd.DataFrame): The keyword database generated by the build_keyword_database CLI program
label (str): The label for which to query the database
n (int, optional): The number of images to return for the given label. Defaults to 5.
"""
plot_all_images_in_dict(
load_images_and_labels(
database, query_top_images_for_label(database, label, n),
)
)
def plot_all_images_by_filename(database: pd.DataFrame, filenames: List[str]) -> None:
"""[summary]
Args:
database (pd.DataFrame): [description]
filenames (List[str]): [description]
"""
plot_all_images_in_dict(load_images_and_labels(database, filenames))
|
[
"jakobludewig@gmx.net"
] |
jakobludewig@gmx.net
|
e4125f181e2989b349e722ba298c236ba8e32e14
|
9ebc9bba7577c958cc83bf52573303404ea3c7f1
|
/mycasa_scripts_active/scripts_ts09_phangs_r21/myim02a_regrid.py
|
3b7da75c7b6afa593266e5253efdae6c940837c4
|
[] |
no_license
|
toshikisaito1005/mycasa_scripts
|
3c3d8942d492ea5b5d28bfe7348764caea857549
|
6367ce6c28e0fe6f98e3adae9823843ba7742da1
|
refs/heads/master
| 2021-08-10T23:02:38.690492
| 2020-10-01T20:10:00
| 2020-10-01T20:10:00
| 225,368,379
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,425
|
py
|
import os
import glob
import numpy as np
import scripts_phangs_r21 as r21
dir_data = "/Users/saito/data/myproj_active/proj_ts09_phangs_r21/data/"
galnames = ["ngc0628","ngc4321","ngc3627"]
image_lengths = [280.,230.,280.] # arcsec
direction_ras = ["24.174deg","185.729deg","170.063deg"]
direction_decs = ["15.783deg","15.8223deg","12.9914deg"]
chanss = ["14~36","","25~74"]
#####################
### Main Procedure
#####################
os.system("rm -rf " + dir_data.replace("r21/data/","r21/data_ready"))
os.system("mkdir " + dir_data.replace("r21/data/","r21/data_ready"))
for i in range(len(galnames)):
imagenames = glob.glob(dir_data + galnames[i] + "*co*.image*")
imagenames.sort()
r21.gridtemplate(imagenames[0],
image_lengths[i],
direction_ras[i],
direction_decs[i])
for j in range(len(imagenames)):
output_tmp = imagenames[j].replace("r21/data","r21/data_ready")
output = output_tmp.replace(".image",".regrid").replace("_pbcor","")
os.system("rm -rf "+output)
imregrid(imagename=imagenames[j],
template="template.image",
output=output,
axes=[0,1])
outfile = output.replace(".regrid",".image")
os.system("rm -rf "+outfile)
immath(imagename = output,
expr = "IM0",
chans = chanss[i],
outfile = outfile)
os.system("rm -rf " + output)
# pbmasking
imagename = outfile
pbmask = outfile.replace(".image",".pbmask")
os.system("rm -rf " + pbmask + "_tmp")
immath(imagename = imagename,
expr = "iif( IM0 >=-100000000., 1.0, 0.0)",
outfile = pbmask + "_tmp")
#
imsmooth(imagename = pbmask + "_tmp",
major = "55.0arcsec",
minor = "55.0arcsec",
pa = "0deg",
outfile = pbmask + "_tmp2")
os.system("rm -rf " + pbmask + "_tmp")
#
os.system("rm -rf " + pbmask)
maxval = imstat(pbmask + "_tmp2")["max"][0]
immath(imagename = pbmask + "_tmp2",
expr = "iif( IM0 >=" + str(maxval*0.6) + ", 1.0, 0.0)",
outfile = pbmask)
os.system("rm -rf " + pbmask + "_tmp2")
os.system("rm -rf template.*")
os.system("rm -rf *.last")
#os.system("rm -rf " + dir_data)
|
[
"toshikisaito1005@gmail.com"
] |
toshikisaito1005@gmail.com
|
6fee09609ee5fbcdc6b8a46948f4d158b0456f9f
|
efba6a09d46b95400716db5359c228e7fc844ac8
|
/Llamo/while1.py
|
84677a4e64ae4ad282471a4bed2685ed178c4acd
|
[] |
no_license
|
llamo-unprg28/t07_Llamo.Catter
|
4384f4bf201d9f33e1b0ee8b1e4b2026aec69003
|
a7c760c27e7b2e97c12eb08423475308875b1fb8
|
refs/heads/master
| 2020-11-23T23:21:58.067804
| 2019-12-15T18:48:15
| 2019-12-15T18:48:15
| 227,862,171
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 306
|
py
|
#SOLO LOS MAYORES DE 18 y menores de 60 AÑOS DEBERAN SUFRAGAR EN ESTAS ELECCIONES
#MOSTRAR EN PANTALLA TIENES QUE SUFRAGAR
EDAD=0
EDAD_INVALIDA=(EDAD<18 or EDAD>60)
while(EDAD_INVALIDA):
EDAD=int(input("ingresar edad:"))
EDAD_INVALIDA=(EDAD<18 or EDAD>60)
#fin_while
print("tiene que sufragar")
|
[
"jllamo@unprg.edu.pe"
] |
jllamo@unprg.edu.pe
|
c32f46eac1c711c70e73dc5bff0f43060bae6713
|
e19b1f6318a951501b31a23d2cea4bcbe9aaa872
|
/lists.py
|
c37baf1f44eb07e83987b4e3337e6043148301b1
|
[] |
no_license
|
usmanwardag/Python-Tutorial
|
737a1e6f553e4f66d1b18eaf19c4213dc029db08
|
b4d941046c19e2e83d0d680f8a465fe19bdf090c
|
refs/heads/master
| 2021-01-10T02:04:13.322451
| 2016-02-06T16:16:15
| 2016-02-06T16:16:15
| 49,075,607
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,394
|
py
|
import sys
def listConcepts():
'''Illustrates basic list concepts'''
a = ['Usman','Mahmood','Khan']
b = a #Does not create a copy
c = a[:] #Creates a copy
a.append('-')
print b #Changes with 'a'
print c #Does not change with 'a'
if 'Khan' in a:
#do something
print 'Yay'
print '--------------------------------------------------'
'''
Implements various list methods such as
append, insert and extend.
'''
def listMethods():
a = ['Usman','Mahmood','Khan']
b = [1,2,3]
a.append('-')
print a
a.insert(0,'Mr.')
print a
a.extend(b) #Adds elements of b towards the end
print a
a.remove('-') #Finds the element and removes it
print a
a.pop(0) #Removes the elemnt from index no.
print a
#Note that these methods do not return lists
print '--------------------------------------------------'
'''
Implements list comprehension which is
faster method than simple looping.
'''
def listComprehension():
nums = [1, 2, 3, 4]
vals = [10,11,12,13]
print [n*n for n in nums]
print [n*n for n in nums if n>=3]
print [n*j for n in nums for j in vals]
print '--------------------------------------------------'
'''
Main function to run test modules.
Run any one of above listed function to test
with commands.
'''
def main():
listConcepts()
#listMethods()
#listComprehension()
if __name__ == '__main__':
main()
|
[
"usmanwardag@gmail.com"
] |
usmanwardag@gmail.com
|
0ef785d7f477ae2850e5fbeb4b06a99bccf45cfc
|
dece3eb22be792aeac65ea12a1f183dd73498add
|
/Tkinter/5.py
|
b5138b09936d918a4817a1c13949e5e3b7c8c784
|
[] |
no_license
|
santanu5670/Python
|
352515ad82f94157e7f74467c5e7dedd6c9069c8
|
48c2779ccf934588f2bfce7cd322088acec04080
|
refs/heads/master
| 2023-06-24T09:22:49.925654
| 2021-07-22T15:17:34
| 2021-07-22T15:17:34
| 387,683,810
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 424
|
py
|
#jpeg,jpg type of image are not supported in tkinter so first we have to install pillow
#command:-pip install pillow
#then the following code for jpeg,jpg type of image
from tkinter import *
from PIL import Image,ImageTk
root=Tk()
root.geometry("1200x600")
image=Image.open("python_image.jpeg")
photo=ImageTk.PhotoImage(image)
a=Label(image=photo)
a.pack()
b=Label(text="This is a Image of Python")
b.pack()
root.mainloop()
|
[
"santanu2539@gmail.com"
] |
santanu2539@gmail.com
|
b086c183edb2473fc1947cf98bd7f319be257ba0
|
2b4a809b6a78c920e4d2a27bae4eb2cf5c0233e7
|
/lesson-08/roll_dice_v5.0.py
|
b61e2c14bddd0211e9dc0a67a89d2785cda368c8
|
[
"MIT"
] |
permissive
|
hemiaoio/learn-python
|
c748f906412657aa185348de4d3aa16bbf014928
|
4b3885ed7177db4e6e03da80dd9ed69719c8d866
|
refs/heads/master
| 2018-12-20T11:17:53.934263
| 2018-09-23T01:15:17
| 2018-09-23T01:15:17
| 117,075,237
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,142
|
py
|
"""
功能:模拟掷骰子
版本:3.0
2.0新增功能:模拟连个骰子
3.0新增功能:可视化抛掷两个骰子的结果
4.0新增功能:使用直方图统计结果
5.0新增功能:使用科学计算库简化程序
"""
import random
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
# 解决中文显示问题
plt.rcParams["font.sans-serif"] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
def main():
roll1_arr = np.random.randint(1, 7, size=1000)
roll2_arr = np.random.randint(1, 7, size=1000)
roll3_arr = np.random.randint(1, 7, size=1000)
result_list = roll1_arr + roll2_arr+roll3_arr
hist, bins = np.histogram(result_list, bins=range(3, 6*3+2))
print(hist)
print(bins)
# normed=1 以 比率显示 Y轴,1 为分母
# edgecolor 边线颜色
# linewidth 边线宽度
plt.hist(result_list, bins=bins, normed=10,
edgecolor='#FFFFFF', linewidth=1)
# 图表名称
plt.title('骰子点数统计')
plt.xlabel('点数')
plt.ylabel('频率')
plt.show()
if __name__ == '__main__':
main()
|
[
"hemiao@woyouqiu.com"
] |
hemiao@woyouqiu.com
|
78fdb205cc61f39b5896322fa4b0424416df751d
|
199f3f91ec71f7af34d379d285c1535c26dab976
|
/config/urls.py
|
6aeac6c1562b5b653b356439082310747eefc3ff
|
[] |
no_license
|
umarhussain88/Akhbar
|
0a78d3303346d06ea020a82f259b83b5bb3a202b
|
0dba82cc4503b2192757c6702ee713c240bd65bc
|
refs/heads/master
| 2023-03-04T09:34:15.615795
| 2021-02-21T22:55:45
| 2021-02-21T22:55:45
| 341,019,660
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 999
|
py
|
"""config URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.views.generic import TemplateView
urlpatterns = [
path('admin/', admin.site.urls),
path('accounts/', include('accounts.urls')),
path('accounts/', include('django.contrib.auth.urls')),
path('articles/', include('articles.urls')),
path('', include('pages.urls')),
]
|
[
"umar.hussain.da@outlook.com"
] |
umar.hussain.da@outlook.com
|
7aa10abc887ca1e890a309e7633a9c236026ef95
|
826b5fd4c31612a16bd60e5fc77188f8536e7f29
|
/Leap_UI_IOS/page/bases/base_login.py
|
707bcd868f9498a0c0fb3bb6e94e4f4fcc004d8b
|
[] |
no_license
|
liminghui774373994/Leap_UI_IOS
|
bb9e33305d810c2e1faeb83ea8b9a3acce4bb583
|
660dd1573df8e417c20339c238a4d09765271eaa
|
refs/heads/master
| 2023-07-11T16:19:41.136610
| 2021-08-09T12:13:48
| 2021-08-09T12:13:48
| 394,159,593
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,209
|
py
|
# -*- encoding=utf8 -*-
__author__ = "liminghui-2021-05-08"
from common.settings import *
class Login:
def __init__(self):
# 元素定义
self.moudle = 'login'
self.path = Common()
self.parents_center = '家长中心'
self.my_button = "我的"
self.password_to_login = "密码登录"
self.input_phone = "TextField"
self.input_pass = "SecureTextField"
self.login_icon_unselected = "login icon unselected"
self.login_button = "登录"
self.check_input = '定位账号输入框.png'
self.clearbutton = '清除按钮.png'
self.leap = '励步.png'
self.photo = '头像.png'
self.phone = '19600002021'
self.passw = 'a1234567'
def get_mytab(self):
my_button = self.my_button
return my_button
def get_password_to_login(self):
password_to_login = self.password_to_login
return password_to_login
def get_clear_phone(self):
clearphone_path = self.path.get_path(self.moudle, self.check_input)
return clearphone_path
def get_clear_button(self):
clear_button_path = self.path.get_path(self.moudle,self.clearbutton)
return clear_button_path
def get_input_phone(self):
input_pass = self.input_phone
return input_pass
def get_phone(self):
phone = self.phone
return phone
def get_input_pass(self):
input_pass = self.input_pass
return input_pass
def get_password(self):
password = self.passw
return password
def get_close_board(self):
leap_path = self.path.get_path(self.moudle, self.leap)
return leap_path
def get_privacy_agreement(self):
privacy_agreement = self.login_icon_unselected
return privacy_agreement
def get_login_button(self):
login_button = self.login_button
return login_button
def get_student(self):
photo_path = self.path.get_path(self.moudle, self.photo)
return photo_path
def get_parents_center(self):
parents_center = self.parents_center
return parents_center
|
[
"774373994@qq.com"
] |
774373994@qq.com
|
358672c997418ff955972fdc53fedb2ef810efad
|
ec7194c4f037875c860d1b2f7578b29b71211e55
|
/code/ML/text-clfn/predict-correlations.py
|
b97ede6b81e80258e2f0ef66d6a8ce0da8d96ffd
|
[] |
no_license
|
perdaug/iow
|
d8576d2f5350daca6026474c1b32a75dac01cb40
|
92de481379ded9d5d7902728940a6c91b96b2ae5
|
refs/heads/master
| 2021-03-30T18:13:08.227885
| 2018-09-10T06:12:04
| 2018-09-10T06:12:04
| 63,000,233
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,604
|
py
|
"""
VERSION
- Python 3
FUNCTION
- Classifying the images based on textual features.
"""
import os
from sklearn.datasets import load_files
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn import metrics
import numpy as np
from optparse import OptionParser
import pickle as pkl
op = OptionParser()
op.add_option('--source', action='store', type=str,
help='The source of the corpora.')
op.add_option('--lexicon', action='store', type=str,
help='The source of the lexicon.')
op.add_option('--claim', action='store', type=str,
help='The choice of lexicon.')
op.add_option('--experiment', action='store', type=str,
help='The choice of an experiment.')
(opts, args) = op.parse_args()
PATH_HOME = os.path.expanduser('~') + '/Projects/iow'
PATH_LEXICON = PATH_HOME + '/data/FE/textual/' + opts.lexicon \
+ '/features-separate_' + opts.claim + '/'
PATH_SOURCE = PATH_HOME + '/data/FE/textual/' + opts.source
PATH_OUT = PATH_HOME + '/data/ML/text-clfn/' + opts.claim + '/' \
+ opts.source + '/' + opts.lexicon + '/'
PATH_EXPERIMENT = '{}/data/settings/dirs_{}-{}.txt'.format(
PATH_HOME, opts.claim, opts.experiment)
if not os.path.exists(PATH_OUT):
os.makedirs(PATH_OUT)
# ___________________________________________________________________________
def main():
vectoriser_count = CountVectorizer(stop_words='english')
transformer_tfidf = TfidfTransformer()
'''
Initialising the lexicon.
'''
file_text = open(PATH_EXPERIMENT, 'r')
categories = file_text.read().split('\n')
lexicon_sk = load_files(PATH_LEXICON, categories=categories)
# print(lexicon_sk.data[0])
lexicon_vectorised = vectoriser_count.fit_transform(lexicon_sk.data)
X_lexicon = transformer_tfidf.fit_transform(lexicon_vectorised)
y_lexicon = lexicon_sk.target
# print(len(y_lexicon))
# return
# print(lexicon_sk.target_names)
# print(lexicon_sk.target)
# names_target = np.array(lexicon_sk.target_names)[lexicon_sk.target]
names_target = np.array(lexicon_sk.target_names)
print(lexicon_sk.target_names)
# print(names_target )
pkl.dump(names_target, open(PATH_OUT + 'target.pkl', 'wb'))
# return
'''
Initialising the corpora.
'''
corpora_sk = load_files(PATH_SOURCE, categories=categories)
corpora_vectorised = vectoriser_count.transform(corpora_sk.data)
X_corpora = transformer_tfidf.transform(corpora_vectorised)
y_true_corpus = corpora_sk.target
'''
Running the parameter tuning
'''
from sklearn.naive_bayes import MultinomialNB
from sklearn import svm
from sklearn import linear_model
# from sklearn.grid_search import GridSearchCV
# from sklearn.model_selection import StratifiedShuffleSplit
# cv = StratifiedShuffleSplit(n_splits=5, test_size=0.3)
# range_C = np.logspace(-6, 6, 3)
# tuned_parameters = [{'C': range_C}]
# clf = GridSearchCV(linear_model.LogisticRegression(), tuned_parameters, cv=2, scoring='precision_macro')
# for (idxs_train, idxs_test) in cv.split(X_corpora, y_true_corpus):
# X_train = X_corpora[idxs_train]
# X_test = X_corpora[idxs_test]
# y_train = y_true_corpus[idxs_train]
# y_test = y_true_corpus[idxs_test]
# clf.fit(X_train, y_train.ravel())
# y_pred = clf.predict(X_test)
# print(clf.best_params_)
# report_clf = metrics.classification_report(y_test, y_pred)
# print(report_clf)
# return
'''
Running the classification
'''
# clfs = [MultinomialNB(), svm.SVC(decision_function_shape='ovo'),
# linear_model.LogisticRegression(C=1e5)]
# names_clf = ['nb', 'svm', 'log-reg']
clfs = [MultinomialNB(), linear_model.LogisticRegression(C=1)]
names_clf = ['nb', 'log-reg']
for clf, name_clf in zip(clfs, names_clf):
print('Running model: %s' % name_clf)
clf.fit(X_lexicon, y_lexicon)
y_pred_corpus = clf.predict(X_corpora)
report_clf = metrics.classification_report(y_true_corpus,
y_pred_corpus)
matrix_conf = metrics.confusion_matrix(y_true_corpus, y_pred_corpus)
path_model = PATH_OUT + name_clf + '/'
if not os.path.exists(path_model):
os.makedirs(path_model)
pkl.dump(matrix_conf, open(path_model + 'matrix-conf.pkl', 'wb'))
print(report_clf)
print(matrix_conf)
print(dir(clf))
if __name__ == '__main__':
main()
|
[
"arijus.pleska@inria.fr"
] |
arijus.pleska@inria.fr
|
8c8ea78c0a1faf00c55e03395b8ea646bcb1b392
|
c6d6f62b89e063759f69e179bf7983a13c8180b5
|
/src/customers/management/commands/add_kml_folder_user.py
|
38eaa74a1e0aef611fa637a29a50a106c51c6b62
|
[] |
no_license
|
gtsarik/GSI
|
170bba1b21b98a44f0fc0f7b9e25bc4b3ae31e9f
|
e625a189ebb79ebb772f752fbe6230a3e9f68acb
|
refs/heads/master
| 2021-01-21T17:06:34.026805
| 2018-01-25T13:13:59
| 2018-01-25T13:13:59
| 47,333,208
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,228
|
py
|
# -*- coding: utf-8 -*-
import os
import shutil
from django.core.management.base import BaseCommand
from django.conf import settings
from gsi.settings import KML_PATH, KML_DIRECTORY
from customers.models import CustomerPolygons
class Command(BaseCommand):
def handle(self, *args, **options):
for cp in CustomerPolygons.objects.all():
old_path_kml = cp.kml_path
path_kml_user = os.path.join(KML_PATH, cp.user.username, cp.kml_name)
new_path_dir_kml_user = os.path.join(KML_PATH, cp.user.username)
dir_kml_user = os.path.join(KML_DIRECTORY, cp.user.username, cp.kml_name)
if not os.path.exists(new_path_dir_kml_user):
os.makedirs(new_path_dir_kml_user)
try:
if not os.path.exists(old_path_kml):
old_path_kml = os.path.join(KML_PATH, cp.kml_name)
shutil.move(old_path_kml, new_path_dir_kml_user)
except Exception, e:
pass
cp.kml_path = path_kml_user
kml_http = cp.kml_url.split('media')[0]
cp.kml_url = os.path.join(kml_http, dir_kml_user)
cp.save()
print '******** DONE ********'
|
[
"artgrem@gmail.com"
] |
artgrem@gmail.com
|
1ca8a96e0b71e9a58189e2862eea29a54f47696b
|
4e39710ae6f45efa126c7b13405a3828569328d6
|
/androidtv/adb_manager.py
|
7a653ea0c0113b612ae3efb0c5f229b48ce88608
|
[
"MIT"
] |
permissive
|
achupryn/python-androidtv
|
8fadca02e7ea1c6626ee1380668fd54ac32ca409
|
f07cef9d2a23ecc74159f2aff214f48d3ae6134e
|
refs/heads/master
| 2020-08-14T02:07:36.472217
| 2019-10-14T16:19:14
| 2019-10-14T16:19:14
| 215,078,002
| 0
| 0
| null | 2019-10-14T15:23:43
| 2019-10-14T15:23:42
| null |
UTF-8
|
Python
| false
| false
| 10,457
|
py
|
"""Classes to manage ADB connections.
* :py:class:`ADBPython` utilizes a Python implementation of the ADB protocol.
* :py:class:`ADBServer` utilizes an ADB server to communicate with the device.
"""
import logging
from socket import error as socket_error
import sys
import threading
from adb_shell.adb_device import AdbDevice
from adb_shell.auth.sign_pythonrsa import PythonRSASigner
from adb_messenger.client import Client
from .constants import DEFAULT_AUTH_TIMEOUT_S
_LOGGER = logging.getLogger(__name__)
#: Use a timeout for the ADB threading lock if it is supported
LOCK_KWARGS = {'timeout': 3} if sys.version_info[0] > 2 and sys.version_info[1] > 1 else {}
if sys.version_info[0] == 2: # pragma: no cover
FileNotFoundError = IOError # pylint: disable=redefined-builtin
class ADBPython(object):
"""A manager for ADB connections that uses a Python implementation of the ADB protocol.
Parameters
----------
host : str
The address of the device in the format ``<ip address>:<host>``
adbkey : str
The path to the ``adbkey`` file for ADB authentication
"""
def __init__(self, host, adbkey=''):
self.host = host
self.adbkey = adbkey
self._adb = AdbDevice(serial=self.host, default_timeout_s=9.)
# keep track of whether the ADB connection is intact
self._available = False
# use a lock to make sure that ADB commands don't overlap
self._adb_lock = threading.Lock()
@property
def available(self):
"""Check whether the ADB connection is intact.
Returns
-------
bool
Whether or not the ADB connection is intact
"""
return self._adb.available
def close(self):
"""Close the ADB socket connection.
"""
self._adb.close()
def connect(self, always_log_errors=True, auth_timeout_s=DEFAULT_AUTH_TIMEOUT_S):
"""Connect to an Android TV / Fire TV device.
Parameters
----------
always_log_errors : bool
If True, errors will always be logged; otherwise, errors will only be logged on the first failed reconnect attempt
auth_timeout_s : float
Authentication timeout (in seconds)
Returns
-------
bool
Whether or not the connection was successfully established and the device is available
"""
self._adb_lock.acquire(**LOCK_KWARGS) # pylint: disable=unexpected-keyword-arg
# Make sure that we release the lock
try:
# Catch errors
try:
if self.adbkey:
# private key
with open(self.adbkey) as f:
priv = f.read()
# public key
try:
with open(self.adbkey + '.pub') as f:
pub = f.read()
except FileNotFoundError:
pub = ''
signer = PythonRSASigner(pub, priv)
# Connect to the device
self._adb.connect(rsa_keys=[signer], auth_timeout_s=auth_timeout_s)
else:
self._adb.connect(auth_timeout_s=auth_timeout_s)
# ADB connection successfully established
self._available = True
_LOGGER.debug("ADB connection to %s successfully established", self.host)
except socket_error as serr:
if self._available or always_log_errors:
if serr.strerror is None:
serr.strerror = "Timed out trying to connect to ADB device."
_LOGGER.warning("Couldn't connect to host %s, error: %s", self.host, serr.strerror)
# ADB connection attempt failed
self._adb.close()
self._available = False
finally:
return self._available
finally:
self._adb_lock.release()
def shell(self, cmd):
"""Send an ADB command using the Python ADB implementation.
Parameters
----------
cmd : str
The ADB command to be sent
Returns
-------
str, None
The response from the device, if there is a response
"""
if not self.available:
_LOGGER.debug("ADB command not sent to %s because python-adb connection is not established: %s", self.host, cmd)
return None
if self._adb_lock.acquire(**LOCK_KWARGS): # pylint: disable=unexpected-keyword-arg
_LOGGER.debug("Sending command to %s via python-adb: %s", self.host, cmd)
try:
return self._adb.shell(cmd)
finally:
self._adb_lock.release()
else:
_LOGGER.debug("ADB command not sent to %s because python-adb lock not acquired: %s", self.host, cmd)
return None
class ADBServer(object):
"""A manager for ADB connections that uses an ADB server.
Parameters
----------
host : str
The address of the device in the format ``<ip address>:<host>``
adbkey : str
The path to the ``adbkey`` file for ADB authentication
adb_server_ip : str
The IP address of the ADB server
adb_server_port : int
The port for the ADB server
"""
def __init__(self, host, adb_server_ip='', adb_server_port=5037):
self.host = host
self.adb_server_ip = adb_server_ip
self.adb_server_port = adb_server_port
self._adb_client = None
self._adb_device = None
# keep track of whether the ADB connection is intact
self._available = False
# use a lock to make sure that ADB commands don't overlap
self._adb_lock = threading.Lock()
@property
def available(self):
"""Check whether the ADB connection is intact.
Returns
-------
bool
Whether or not the ADB connection is intact
"""
if not self._adb_client:
return False
try:
# make sure the server is available
adb_devices = self._adb_client.devices()
# make sure the device is available
try:
# case 1: the device is currently available
if any([self.host in dev.get_serial_no() for dev in adb_devices]):
if not self._available:
self._available = True
return True
# case 2: the device is not currently available
if self._available:
_LOGGER.error('ADB server is not connected to the device.')
self._available = False
return False
except RuntimeError:
if self._available:
_LOGGER.error('ADB device is unavailable; encountered an error when searching for device.')
self._available = False
return False
except RuntimeError:
if self._available:
_LOGGER.error('ADB server is unavailable.')
self._available = False
return False
def close(self):
"""Close the ADB server socket connection.
Currently, this doesn't do anything.
"""
def connect(self, always_log_errors=True):
"""Connect to an Android TV / Fire TV device.
Parameters
----------
always_log_errors : bool
If True, errors will always be logged; otherwise, errors will only be logged on the first failed reconnect attempt
Returns
-------
bool
Whether or not the connection was successfully established and the device is available
"""
self._adb_lock.acquire(**LOCK_KWARGS) # pylint: disable=unexpected-keyword-arg
# Make sure that we release the lock
try:
try:
self._adb_client = Client(host=self.adb_server_ip, port=self.adb_server_port)
self._adb_device = self._adb_client.device(self.host)
# ADB connection successfully established
if self._adb_device:
_LOGGER.debug("ADB connection to %s via ADB server %s:%s successfully established", self.host, self.adb_server_ip, self.adb_server_port)
self._available = True
# ADB connection attempt failed (without an exception)
else:
if self._available or always_log_errors:
_LOGGER.warning("Couldn't connect to host %s via ADB server %s:%s", self.host, self.adb_server_ip, self.adb_server_port)
self._available = False
except Exception as exc: # noqa pylint: disable=broad-except
if self._available or always_log_errors:
_LOGGER.warning("Couldn't connect to host %s via ADB server %s:%s, error: %s", self.host, self.adb_server_ip, self.adb_server_port, exc)
# ADB connection attempt failed
self._available = False
finally:
return self._available
finally:
self._adb_lock.release()
def shell(self, cmd):
"""Send an ADB command using an ADB server.
Parameters
----------
cmd : str
The ADB command to be sent
Returns
-------
str, None
The response from the device, if there is a response
"""
if not self._available:
_LOGGER.debug("ADB command not sent to %s via ADB server %s:%s because pure-python-adb connection is not established: %s", self.host, self.adb_server_ip, self.adb_server_port, cmd)
return None
if self._adb_lock.acquire(**LOCK_KWARGS): # pylint: disable=unexpected-keyword-arg
_LOGGER.debug("Sending command to %s via ADB server %s:%s: %s", self.host, self.adb_server_ip, self.adb_server_port, cmd)
try:
return self._adb_device.shell(cmd)
finally:
self._adb_lock.release()
else:
_LOGGER.debug("ADB command not sent to %s via ADB server %s:%s because pure-python-adb lock not acquired: %s", self.host, self.adb_server_ip, self.adb_server_port, cmd)
return None
|
[
"noreply@github.com"
] |
achupryn.noreply@github.com
|
b26faaee57005647e91ee5eed1db8e45e42168e5
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/appflow_read_1/flow_list.py
|
699f8baee02a7393c0403dd0ed480108f71902a9
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379
| 2020-12-27T13:38:45
| 2020-12-27T13:38:45
| 318,686,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,831
|
py
|
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import execute_one_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/appflow/describe-flow.html
if __name__ == '__main__':
"""
create-flow : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/appflow/create-flow.html
delete-flow : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/appflow/delete-flow.html
list-flows : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/appflow/list-flows.html
start-flow : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/appflow/start-flow.html
stop-flow : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/appflow/stop-flow.html
update-flow : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/appflow/update-flow.html
"""
parameter_display_string = """
# flow-name : The specified name of the flow. Spaces are not allowed. Use underscores (_) or hyphens (-) only.
"""
add_option_dict = {}
#######################################################################
# setting option use
# ex: add_option_dict["setting_matching_parameter"] = "--owners"
# ex: add_option_dict["setting_key"] = "owner_id"
#######################################################################
# single parameter
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
#######################################################################
# parameter display string
add_option_dict["parameter_display_string"] = parameter_display_string
execute_one_parameter("appflow", "describe-flow", "flow-name", add_option_dict)
|
[
"hcseo77@gmail.com"
] |
hcseo77@gmail.com
|
5a196eef9655b0d0716fc8ab981d696c18e4abc4
|
3f11036a0a637125a6e7773b822b8aa8a78790eb
|
/PrNdOwN/colors.py
|
232c0c0d4dfe4dc031b869d76697f9731e4fd6f2
|
[
"MIT"
] |
permissive
|
asdlei99/PrNdOwN
|
08a4d20b67e140cc1464305b9955f5a7be4fc4eb
|
5f524e5f4c0bf73b8657bcd6017e4465a12e5965
|
refs/heads/master
| 2023-08-28T12:32:17.419494
| 2021-10-18T14:30:40
| 2021-10-18T14:30:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,273
|
py
|
#!/usr/bin/python3
# Created By ybenel
from random import randint
list = ["\033[1;33m","\033[1;34m","\033[1;30m","\033[1;36m","\033[1;31m","\033[35m","\033[95m","\033[96m","\033[39m","\033[38;5;82m","\033[38;5;198m","\033[38;5;208m","\033[38;5;167m","\033[38;5;91m","\033[38;5;210m","\033[38;5;165m","\033[38;5;49m","\033[38;5;160m","\033[38;5;51m","\033[38;5;13m","\033[38;5;162m","\033[38;5;203m","\033[38;5;113m","\033[38;5;14m"]
class get_colors():
def randomize():
return list[randint(0,23)]
def randomize1():
return list[randint(0,23)]
def randomize2():
return list[randint(0,23)]
def randomize3():
return list[randint(0,23)]
def yellow():
return list[0]
def cyan():
return list[3]
def red():
return list[4]
def white():
return list[8]
def green():
return list[9]
def magento():
return list[6]
def sharp_green():
return list[22]
def bright_megento():
return list[14]
def pink():
return list[10]
def sharp_megento():
return list[12]
def orange():
return list[11]
def sharp_orange():
return list[21]
|
[
"r2dr0dn@pm.me"
] |
r2dr0dn@pm.me
|
a128bd48e8decf7d7b6a257ef954180c2cc1bd69
|
83e58395b163c17dd807eb1a6fe7fc89a572ae6a
|
/knn_sepal_class.py
|
c27fdc0d6a17afee60d23524234ca59ce4ef036c
|
[] |
no_license
|
tsbawa61/Basic-Python
|
3de77bc6c440cc80f287c998398769bc9fdd4950
|
db525798bf769bdb9aa9bd26ab7c8572d5800590
|
refs/heads/master
| 2020-06-12T14:21:33.520112
| 2019-07-09T08:02:51
| 2019-07-09T08:02:51
| 194,327,896
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 903
|
py
|
import pandas as pd
# loading training data
df = pd.read_csv(r'f:\PythonProgs\iris.csv')
df.head()
# define column names
names = ['sepal length', 'sepal width', 'petal length', 'petal width', 'spieces']
# loading libraries
import numpy as np
from sklearn.model_selection import train_test_split
# create design matrix X and target vector y
X=df.iloc[:,0:-1].values
y=df.iloc[:,-1].values
# split into train and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
# loading library
from sklearn.neighbors import KNeighborsClassifier
# instantiate learning model (k = 3)
knn = KNeighborsClassifier(n_neighbors=3)
# fitting the model
knn.fit(X_train, y_train)
# predict the response
pred = knn.predict(X_test)
# evaluate accuracy
from sklearn.metrics import accuracy_score
print (accuracy_score(y_test, pred))
|
[
"noreply@github.com"
] |
tsbawa61.noreply@github.com
|
b7e85d1268bde6c041074923388775ae00941141
|
b4e3f94e238419477e8e7a5cd19da4fed54a804c
|
/django_rest/settings.py
|
284253522ea464dd1217b5c3a70dabe1e4262ca4
|
[] |
no_license
|
mvillafuertem/django-rest
|
529058b9b59c911db16295597996864ad7636ffb
|
7a7b7372bf93464e17ca67839e1df7e2e246e4a3
|
refs/heads/master
| 2020-03-27T02:05:09.545088
| 2018-08-22T21:25:27
| 2018-08-22T21:30:45
| 145,765,095
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,163
|
py
|
"""
Django settings for django_rest project.
Generated by 'django-admin startproject' using Django 2.0.8.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'caknv#57*#jz8+8rbbty3-q)ey2ac^teh*)tr5e@7bb&@gsq#s'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['0.0.0.0']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'users.apps.UsersConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_rest.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_rest.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"mvillafuerte@fintonic.com"
] |
mvillafuerte@fintonic.com
|
3666c1888a87781170d032e3bf6e304c40a08f4d
|
21dc9ae50715f126a28d7670dfceeedbec60d1f3
|
/knowlarity.py
|
505ca794c4b4eca16abc9a299782d3d4a5723458
|
[] |
no_license
|
bhishan/knowlaritysignupscript
|
be39da4d3857b63503be6361c7540020f4546fdc
|
e503736c8ddba181bd2969648234989184d4ca83
|
refs/heads/master
| 2016-08-11T21:14:20.424562
| 2015-12-01T08:39:04
| 2015-12-01T08:39:04
| 47,176,384
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 557
|
py
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
browser = webdriver.Firefox()
browser.get("http://knowlarity.com")
sign_up_submit = browser.find_elements_by_xpath("//*[contains(text(),'Try Free-for-Life')]")
sign_up_submit[4].click()
name = browser.find_element_by_name("contact-name")
name.send_keys("Bhishan Bhandari")
email = browser.find_element_by_name("contact-email")
email.send_keys("bbhishan@gmail.com")
phone = browser.find_element_by_name("contact-phone")
phone.send_keys("9849060230")
phone.send_keys(Keys.RETURN)
|
[
"bhishan_0306@deerwalk.edu.np"
] |
bhishan_0306@deerwalk.edu.np
|
ceaf8b6be3564067ae5486c614255e73564c1f27
|
f54b6e5a4b3282ef24bb2c7a7687a97b3d663c06
|
/get_tweets/allt.py
|
40bf15421a8edd0c1291459ec0cc9255031d7d87
|
[] |
no_license
|
DristiAI/twitterSNA
|
5a56065fa3721a54465c95373a6e1dd57bc1f8dd
|
e5ea345ab9e8507ebfb96d8d5ced5b399feb06ca
|
refs/heads/master
| 2020-03-18T19:19:12.110668
| 2018-05-28T10:56:07
| 2018-05-28T10:56:07
| 135,147,396
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,346
|
py
|
import tweepy
import csv
CONSUMER_KEY = ''
CONSUMER_SECRET = ''
ACCESS_TOKEN = ''
ACCESS_SECRET = ''
def get_all_tweets(username,count):
auth=tweepy.OAuthHandler(CONSUMER_KEY,CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN,ACCESS_SECRET)
api=tweepy.API(auth)
tweets=[]
new_tweets=api.user_timeline(screen_name=username,count=200)
tweets.extend(new_tweets)
leastrecent=tweets[-1].id -1
while(len(new_tweets)>0):
new_tweets=api.user_timeline(screen_name=username,count=200,max_id=leastrecent)
tweets.extend(new_tweets)
leastrecent=tweets[-1].id-1
totaltweets=len(tweets)
if totaltweets>count:
break
return [[tweet.id_str, tweet.created_at, tweet.text.encode("utf-8"),tweet.retweet_count] for tweet in tweets]
def convert_totxt(tweets):
with open('newtweet.txt','w') as f:
f.write('tweets\n')
for i in tweets:
f.write(str(i[2],'utf-8'))
f.write('\n')
f.close()
with open('newtweetslabels.txt','w') as f:
f.write('retweets\n')
for i in tweets:
f.write(str(i[3]))
f.write('\n')
f.close()
if __name__ =='__main__':
t=get_all_tweets("realDonaldTrump",1500)
convert_totxt(t)
|
[
"aidris@localhost.localdomain"
] |
aidris@localhost.localdomain
|
01e32ca4177f55e36ea16c44fe160745cb49c5d6
|
8e291f094a1072a5ec470bdf1498d646afb315a3
|
/PlotNeuralNet/pyexamples/test_simple.py
|
e3155e2edd86e5957d65998677248e8cb8eb922f
|
[] |
no_license
|
qihangwang/PlotNeuralNet
|
9712426608df3bb0239ddf3ec015fdeb2904002a
|
34bf5df88bbba78017ebe8f9957445e81cb54a86
|
refs/heads/master
| 2022-01-23T09:17:46.055332
| 2019-06-21T14:30:32
| 2019-06-21T14:30:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,237
|
py
|
import sys
sys.path.append('../') # 添加自定义库的目录
from pycore.tikzeng import * # 导入自定义库
# defined your arch
arch = [
# 添加头
to_head( '..' ),
to_cor(),
to_begin(),
# 添加卷积层conv1
to_Conv("conv1", 512, 64, offset="(0,0,0)", to="(0,0,0)", height=64, depth=64, width=2, caption="conv1"),
# 卷积层conv1东侧添加池化层pool1
to_Pool("pool1", offset="(0,0,0)", to="(conv1-east)", caption="pool1"),
# 池化层pool1东侧添加卷积层conv2
to_Conv("conv2", 128, 64, offset="(1,0,0)", to="(pool1-east)", height=32, depth=32, width=2, caption="conv2"),
# 建立pool1到conv2的连接箭头
to_connection( "pool1", "conv2"),
# conv2东侧添加pool2
to_Pool("pool2", offset="(0,0,0)", to="(conv2-east)", height=28, depth=28, width=1, caption="pool2"),
# pool1东侧添加softmax层但是偏移3单位
to_SoftMax("soft1", 10 ,"(3,0,0)", "(pool1-east)", caption="softmax"),
# 建立pool2到soft1的连接箭头
to_connection("pool2", "soft1"),
# 结束
to_end()
]
def main():
namefile = str(sys.argv[0]).split('.')[0]
to_generate(arch, namefile + '.tex' )
if __name__ == '__main__':
main()
|
[
"1695735420@qq.com"
] |
1695735420@qq.com
|
567bdd3484ea87d2fef778157ecdc6551466457f
|
ad1c7e72779a34ea3d99cd392b3875acf84fc15f
|
/gae/main.py
|
6cebce846d34ee0d5c0f8cb92bdb750424478b37
|
[
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] |
permissive
|
nelsondaza/sndlatr
|
68871ca8e0398238eff840b1156118d726facac8
|
eeaa9f7d4afb88ff813f0acf6ea47b24b78d2d65
|
refs/heads/master
| 2021-01-16T19:19:49.649219
| 2016-06-29T12:21:20
| 2016-06-29T12:21:20
| 62,527,053
| 0
| 0
|
Apache-2.0
| 2020-09-12T03:22:37
| 2016-07-04T02:53:42
|
Python
|
UTF-8
|
Python
| false
| false
| 1,134
|
py
|
import os
import json
import webapp2
from webapp2 import Route
import oauth2client.appengine
from sndlatr import api, auth
def get_client_id():
path = os.path.join(os.path.dirname(__file__), 'client_secrets.json')
with open(path) as fd:
data = json.load(fd)
return data.values()[0]['client_id']
config = {'idtoken_audience': get_client_id()}
app = webapp2.WSGIApplication(
[Route('/api/init', api.InitializeHanlder),
Route('/api/schedule/<id>', api.ScheduleSendHandler),
Route('/api/schedule', api.ScheduleSendHandler),
Route('/api/snippet/<id>', api.SnippetHandler),
Route('/api/snippet', api.SnippetHandler),
Route('/api/remind/<id>', api.ScheduleRemindHandler),
Route('/api/remind', api.ScheduleRemindHandler),
Route('/api/remind/<id>/check_reply', api.ScheduleCheckReplyHandler),
('/api/tasks/enqueue_scheduled', api.QueueJobHandler),
('/api/tasks/send', api.SendHandler),
('/api/tasks/remind', api.RemindHandler),
('/api/tasks/check_reply', api.CheckReplyHandler),
# ('/api/signout', LogoutHandler),
], debug=False, config=config)
|
[
"thembrown@gmail.com"
] |
thembrown@gmail.com
|
3a97bc46d4e8a7af8ac36a679c16db88cac1dd88
|
cdafb14fe1f4e334960c91b2f53bc7e48b40a93c
|
/calculator/cal02.py
|
e0ae21931d7c72cbeaa669bc3388da8106a68ae2
|
[] |
no_license
|
alireza-E/alireza-
|
b15801d8c194fd82990f602d2759d2b67f1d5ec6
|
b577ff8a6d81672ce9126e1bdd4ee603458f3207
|
refs/heads/master
| 2022-12-24T05:41:12.098687
| 2020-10-04T13:40:30
| 2020-10-04T13:40:30
| 301,139,516
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 150
|
py
|
from term4.calculator.cal01 import cal
calcute = cal(10, 20)
print(calcute.mul())
print(calcute.div())
print(calcute.plus())
print(calcute.neg())
|
[
"Rominash@gmail.com"
] |
Rominash@gmail.com
|
15096b8ea2606442330ff23af507e35d1d7adc1f
|
dc9642c9b73b8f29f7f322f48ce81973a52d7f08
|
/Simple Chatty Bot/task/bot/bot.py
|
92e022e539261379217cc31f1bd5699dd58a0f7e
|
[] |
no_license
|
merlin2181/Chatty-Bot
|
4f61be462f57ff8ba006c9c4c76c249ba6f1a839
|
63fa28c787034fed29a981cae5c7426228d6a042
|
refs/heads/master
| 2022-11-10T17:00:01.940505
| 2020-06-28T05:29:32
| 2020-06-28T05:29:32
| 275,513,393
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,415
|
py
|
def greet(bot_name, birth_year):
print('Hello! My name is ' + bot_name + '.')
print('I was created in ' + birth_year + '.')
def remind_name():
print('Please, remind me your name.')
name = input()
print('What a great name you have, ' + name + '!')
def guess_age():
print('Let me guess your age.')
print('Enter remainders of dividing your age by 3, 5 and 7.')
rem3 = int(input())
rem5 = int(input())
rem7 = int(input())
age = (rem3 * 70 + rem5 * 21 + rem7 * 15) % 105
print("Your age is " + str(age) + "; that's a good time to start programming!")
def count():
print('Now I will prove to you that I can count to any number you want.')
num = int(input())
curr = 0
while curr <= num:
print(curr, '!')
curr = curr + 1
def test():
print("Let's test your programming knowledge.")
# write your code here
print("""When does the body of a function end?
1. It ends after the keyword 'def'
2. It ends when the function is called
3. It ends with the lack of indentation
4. It ends with 'return' statement""")
ans = input()
while ans != '3':
print("Please, try again.")
ans = input()
print('Completed, have a nice day!')
def end():
print('Congratulations, have a nice day!')
greet('Aid', '2020') # change it as you need
remind_name()
guess_age()
count()
# ...
test()
end()
|
[
"merlin2181@gmail.com"
] |
merlin2181@gmail.com
|
cef3363d820485112bac11a7906824633122136f
|
36130e4eb6c343a66906ef6930d30a0ba23ebf71
|
/cgmaptools
|
2e4ad8ac281b92b92795bbc21579cb150f64550a
|
[] |
no_license
|
guoweilong/cgmaptools
|
d05b32694d0d0b98173a00058766a52933227fea
|
9e6617ddd9029f66d63bbf41695534271d460d91
|
refs/heads/master
| 2022-08-21T22:30:29.166941
| 2022-07-25T03:55:57
| 2022-07-25T03:55:57
| 79,823,059
| 59
| 29
| null | 2019-06-19T13:58:09
| 2017-01-23T16:29:17
|
C
|
UTF-8
|
Python
| false
| false
| 12,433
|
#!/usr/bin/env python
"""
cgmaptools
Copyright (C) Weilong Guo & Ping Zhu
Contact: Weilong Guo <guoweilong@126.com>
Ping Zhu <pingzhu.work@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import sys, os
#os.system(sys.argv[0])
#print sys.argv[0]
#print sys.argv[1]
#print sys.argv[-1]
DIR=os.path.dirname(sys.argv[0])
#print DIR
import subprocess
#subprocess.call(["ls", "-l", "/etc/resolv.conf"])
argv_len = len(sys.argv)
def PrintVersion() :
print("Version: 0.1.2")
print("Updated on: Dec. 14th, 2018")
#
if (argv_len) == 1 or sys.argv[1] in ["-h", "-H", "--help"]:
print("Program : cgmaptools (Tools for analysis in CGmap/ATCGmap format)")
PrintVersion()
print("Usage: cgmaptools <command> [options]")
print("Commands:")
print(" -- File manipulation")
print(" convert + data format conversion tools")
print(" fetch + fetch a region by random accessing")
print(" refill refill the missing columns")
print(" intersect intersect two files")
print(" merge2 + merge two files into one")
print(" mergelist + merge a list of files")
print(" sort sort lines by chromosome and position")
print(" split + split file by chromosomes")
print(" select + select lines by region/site")
print(" -- SNV analysis")
print(" snv snv analysis")
print(" -- Methylation analysis")
print(" dms differentially methylated site analysis")
print(" dmr differentially methylated region analysis")
print(" asm allele-specific methylation analysis")
print(" mbed average methylation level in regions")
print(" mbin * single sample, mC levels in bins")
print(" mmbin multiple samples, mC levels in bins")
print(" mfg methlation levels across fragmented region")
print(" mstat * methyaltion statistic")
print(" mtr methylation level to each region")
print(" -- Coverage analysis")
print(" oac +* overall coverage (for ATCGmap)")
print(" mec +* methylation effective coverage (for CGmap)")
print(" -- Graph related functions")
print(" lollipop * show local mC levels as lollipop bars")
print(" heatmap * global mC distribution for multiple samples")
print(" fragreg * show mC profile across fragmented regions")
print(" tanghulu * show local mapped reads in Tanghulu shape")
print(" -- Other Utils")
print(" findCCGG get MspI cutting sites for RRBS")
print(" bed2fragreg get fragmented region based on region")
print("Note: ")
print(" Commands support figures generation are marked with \"*\" ")
print(" Commands contain sub-commands are marked with \"+\" ")
print("Authors:")
print(" GUO, Weilong; guoweilong@126.com; http://guoweilong.github.io")
print(" ZHU, Ping; pingzhu.work@gmail.com; http://perry-zhu.github.io")
print("")
else :
code1 = sys.argv[1]
# -- File manipulation
if code1 == "convert" :
if (argv_len) == 2 or sys.argv[2] in ["-h", "-H", "--help"]:
print("Usage: cgmaptools convert <command> [options]")
PrintVersion()
print("Commands:")
print(" bam2cgmap BAM => CGmap & ATCGmap")
print(" atcgmap2atcgbz ATCGmap => ATCGbz")
print(" atcgbz2atcgmap ATCGbz => ATCGmap")
print(" atcgmap2cgmap ATCGmap => CGmap")
print(" cgmap2cgbz CGamp => CGbz")
print(" cgbz2cgmap CGbz => CGmap")
print(" cgmap2wig CGmap => WIG")
print(" bismark2cgmap Bismark => CGmap")
else :
code2 = sys.argv[2]
if code2 == "bam2cgmap" :
subprocess.call([DIR + "/bin/CGmapFromBAM"]+ sys.argv[3:])
elif code2 == "cgmap2wig" :
subprocess.call([DIR + "/bin/CGmapToWig"]+ sys.argv[3:])
elif code2 == "atcgbz2atcgmap" :
subprocess.call([DIR + "/bin/ATCGbzToATCGmap"]+ sys.argv[3:])
elif code2 == "atcgmap2atcgbz" :
subprocess.call([DIR + "/bin/ATCGmapToATCGbz"]+ sys.argv[3:])
elif code2 == "cgbz2cgmap" :
subprocess.call([DIR + "/bin/CGbzToCGmap"]+ sys.argv[3:])
elif code2 == "cgmap2cgbz" :
subprocess.call([DIR + "/bin/CGmapToCGbz"]+ sys.argv[3:])
elif code2 == "atcgmap2cgmap" :
subprocess.call([DIR + "/bin/ATCGmapToCGmapWig"]+ sys.argv[3:])
elif code2 == "bismark2cgmap" :
subprocess.call([DIR + "/bin/BismarkToCGmap"]+ sys.argv[3:])
else :
print("Wrong parameter. Enter \"cgmaptools convert -h\" for more information.")
#
#
elif code1 == "fetch" :
if (argv_len) == 2 or sys.argv[2] in ["-h", "-H", "--help"]:
print("Usage: cgmaptools fetch <command> [options]")
PrintVersion()
print("Commands:")
print(" atcgbz fetch lines from ATCGbz")
print(" cgbz fetch lines from CGbz")
else :
code2 = sys.argv[2]
if code2 == "atcgbz" :
subprocess.call([DIR + "/bin/ATCGbzFetchRegion"]+ sys.argv[3:])
elif code2 == "cgbz" :
subprocess.call([DIR + "/bin/CGbzFetchRegion"]+ sys.argv[3:])
else :
print("Wrong parameter. Enter \"cgmaptools fetch -h\" for more information.")
#
#
elif code1 == "refill" :
subprocess.call([DIR + "/bin/CGmapFillContext"]+ sys.argv[2:])
elif code1 == "intersect" :
subprocess.call([DIR + "/bin/CGmapIntersect"]+ sys.argv[2:])
elif code1 == "merge2" :
if (argv_len) == 2 or sys.argv[2] in ["-h", "-H", "--help"]:
print("Usage: cgmaptools merge2 <command> [options]")
PrintVersion()
print("Commands:")
print(" atcgmap merge two ATCGmap files into one")
print(" cgmap merge two CGmap files into one")
else :
code2 = sys.argv[2]
if code2 == "atcgmap" :
subprocess.call([DIR + "/bin/ATCGmapMerge"]+ sys.argv[3:])
elif code2 == "cgmap" :
subprocess.call([DIR + "/bin/CGmapMerge"]+ sys.argv[3:])
else :
print("Wrong parameter. Enter \"cgmaptools merge2 -h\" for more information.")
#
#
elif code1 == "mergelist" :
if (argv_len) == 2 or sys.argv[2] in ["-h", "-H", "--help"]:
print("Usage: cgmaptools mergelist <command> [options]")
PrintVersion()
print("Commands:")
print(" tomatrix mC levels matrix from multiple files")
print(" tosingle merge list of input files into one")
else :
code2 = sys.argv[2]
if code2 == "tomatrix" :
subprocess.call([DIR + "/bin/CGmapFillIndex"]+ sys.argv[3:])
elif code2 == "tosingle" :
subprocess.call([DIR + "/bin/MergeListOfCGmap"]+ sys.argv[3:])
else :
print("Wrong parameter. Enter \"cgmaptools mergelist -h\" for more information.")
#
#
elif code1 == "sort" :
subprocess.call([DIR + "/bin/Sort_chr_pos"]+ sys.argv[2:])
elif code1 == "split" :
subprocess.call([DIR + "/bin/CGmapSplitByChr"]+ sys.argv[2:])
elif code1 == "select" :
if (argv_len) == 2 or sys.argv[2] in ["-h", "-H", "--help"]:
print("Usage: cgmaptools select <command> [options]")
PrintVersion()
print("Commands:")
print(" region select or exclude liens by region lists")
print(" site select or exclude lines by site list")
else :
code2 = sys.argv[2]
if code2 == "region" :
subprocess.call([DIR + "/bin/CGmapSelectByRegion"]+ sys.argv[3:])
elif code2 == "site" :
subprocess.call([DIR + "/bin/CGmapSelectBySite"]+ sys.argv[3:])
else :
print("Wrong parameter. Enter \"cgmaptools select -h\" for more information.")
#
#
# -- SNV analysis
elif code1 == "snv" :
subprocess.call([DIR + "/bin/SNVFromATCGmap"]+ sys.argv[2:])
# -- Methylation analysis
elif code1 == "dms" :
subprocess.call([DIR + "/bin/CGmapInterDiffSite"]+ sys.argv[2:])
elif code1 == "dmr" :
subprocess.call([DIR + "/bin/CGmapInterDiffReg"]+ sys.argv[2:])
elif code1 == "asm" :
subprocess.call([DIR + "/bin/ASM"]+ sys.argv[2:])
elif code1 == "mbed" :
subprocess.call([DIR + "/bin/CGmapMethInBed"]+ sys.argv[2:])
elif code1 == "mbin" :
subprocess.call([DIR + "/bin/CGmapMethInBins"]+ sys.argv[2:])
elif code1 == "mmbin" :
subprocess.call([DIR + "/bin/CGmapsMethInBins"]+ sys.argv[2:])
elif code1 == "mfg" :
subprocess.call([DIR + "/bin/CGmapMethInFragReg"]+ sys.argv[2:])
elif code1 == "mstat" :
subprocess.call([DIR + "/bin/CGmapStatMeth"]+ sys.argv[2:])
elif code1 == "mtr" :
subprocess.call([DIR + "/bin/CGmapToRegion"]+ sys.argv[2:])
# -- Coverage analysis
elif code1 == "oac" :
if (argv_len) == 2 or sys.argv[2] in ["-h", "-H", "--help"]:
print("Usage: cgmaptools oac <command> [options]")
PrintVersion()
print("Commands:")
print(" bin * overall coverage in bins")
print(" stat * overall coverage statistics globally")
else :
code2 = sys.argv[2]
if code2 == "bin" :
subprocess.call([DIR + "/bin/ATCGmapCovInBins"]+ sys.argv[3:])
elif code2 == "stat" :
subprocess.call([DIR + "/bin/ATCGmapStatCov"]+ sys.argv[3:])
else :
print("Wrong parameter. Enter \"cgmaptools oac -h\" for more information.")
#
#
elif code1 == "mec" :
if (argv_len) == 2 or sys.argv[2] in ["-h", "-H", "--help"]:
print("Usage: cgmaptools mec <command> [options]")
PrintVersion()
print("Commands:")
print(" bin * methylation effective coverage in bins")
print(" stat * methylation effective coverage statistics globally")
else :
code2 = sys.argv[2]
if code2 == "bin" :
subprocess.call([DIR + "/bin/CGmapCovInBins"]+ sys.argv[3:])
elif code2 == "stat" :
subprocess.call([DIR + "/bin/CGmapStatCov"]+ sys.argv[3:])
else :
print("Wrong parameter. Enter \"cgmaptools mec -h\" for more information.")
#
#
# -- Graph related funtion
elif code1 == "lollipop" :
subprocess.call([DIR + "/bin/mCLollipop"]+ sys.argv[2:])
elif code1 == "heatmap" :
subprocess.call([DIR + "/bin/mCBinHeatmap"]+ sys.argv[2:])
elif code1 == "fragreg" :
subprocess.call([DIR + "/bin/mCFragRegView"]+ sys.argv[2:])
elif code1 == "tanghulu" :
subprocess.call([DIR + "/bin/mCTanghulu"]+ sys.argv[2:])
# -- other utilities
elif code1 == "findCCGG" :
subprocess.call([DIR + "/bin/FindCCGG"]+ sys.argv[2:])
elif code1 == "combinestrands" :
subprocess.call([DIR + "/bin/CGmapCombineStrands"]+ sys.argv[2:])
elif code1 == "bed2fragreg" :
subprocess.call([DIR + "/bin/FragRegFromBED"]+ sys.argv[2:])
else :
print("Wrong parameter. Enter \"cgmaptools -h\" for more information.")
#
#
|
[
"791116341@qq.com"
] |
791116341@qq.com
|
|
823a712c59c662ec4f9992f7f89e7f4223da9a66
|
75080b5a153c7493a04a05b5713aad629799a389
|
/optimizor.py
|
0cbf7ae6fa4a3539df75252665a894ac6bce95cc
|
[] |
no_license
|
chenjiaxiang/statlearn
|
e26a2bbd478d8ff39cd9f6f2a6efe761e70c45fe
|
7c262e0f267d4226067e1428a383deab58ffcfaf
|
refs/heads/master
| 2021-07-02T09:04:47.113672
| 2017-09-23T12:56:59
| 2017-09-23T12:56:59
| 103,604,320
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 589
|
py
|
import numpy as np
import scipy
from scipy.optimize import minimize
def func(x,sign=1.0):
return sign*(10-x[0]**2-x[1]**2)
def func_deriv(x,sign=1.0):
deriv_1 = sign * (-2*x[0])
deriv_2 = sign * (-2*x[1])
return np.array([deriv_1,deriv_2])
cons = ({'type':'eq',
'fun':lambda x:np.array([x[0]+x[1]]),
'jac':lambda x:np.array([1.0,1.0])},
{'type':'ineq',
'fun':lambda x:np.array([-(x[0]**2)+x[1]]),
'jac':lambda x:np.array([-2.0*x[0],1.0])})
res = minimize(func,[-1.0,1.0],args=(1.0,),jac=func_deriv,constraints=cons,method='SLSQP',options={'disp':True})
print(res.x)
|
[
"noreply@github.com"
] |
chenjiaxiang.noreply@github.com
|
a4a793650c2b248fc1011d9998c9c2335c39e36a
|
f03e695b5fd6d8e4f38a54bd38751120d9b6c6d0
|
/datatype.py
|
e9ae26b8bf2d9b9fa1073ea5cf43bd82a10eb619
|
[] |
no_license
|
littlegogo/doccrawler
|
b118f04bf9f81763bb01a0fd40d4db92b4ca7a8b
|
e27f121b35fa392e067b931476addf48c96c1c62
|
refs/heads/master
| 2022-04-19T18:33:37.130635
| 2020-04-15T13:16:25
| 2020-04-15T13:16:25
| 255,919,881
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,973
|
py
|
class DataType:
"""
用于表示类或结构体类型的成员信息
"""
def __init__(self, name, desc, varinfo, funinfo, typedefs, enums):
"""
构造数据类型描述信息
:param name: 数据类型的名称,类名或结构体名称
:param desc: 数据类型的描述信息,类或结构体的描述信息
:param varinfo: 描述类型成员变量信息的列表,列表元素依次为描述public,protected, private 成员的列表
:param funinfo: 描述类型成员函数信息的列表,列表元素依次为描述public,protected, private 方法的列表
:param typedefs: typedef 重定义类型列表
:param enums: 类型内的枚举变量列表
"""
self.name = name
self.desc = desc
self.public_var_list = varinfo[0]
self.protected_var_list = varinfo[1]
self.private_var_list = varinfo[2]
self.public_fun_list = funinfo[0]
self.protected_fun_list = funinfo[1]
self.private_fun_list = funinfo[2]
self.typedef_list = typedefs
self.enum_list = enums
def __str__(self):
"""
输出自身表示的数据类型的信息
:return: 无
"""
print('类型名称:', self.name)
print('类型描述:', self.desc)
print('公有属性:')
print(' ', self.public_var_list)
print('保护属性:')
print(' ', self.protected_var_list)
print('私有属性:')
print(' ', self.private_var_list)
print('public 方法:')
print(' ', self.public_fun_list)
print('protected 方法:')
print(' ', self.protected_fun_list)
print('private 方法:')
print(' ', self.private_fun_list)
print('类型重定义:')
print(' ', self.typedef_list)
print('枚举类型定义:')
print(' ', self.enum_list)
return ''
|
[
"1605436974@qq.com"
] |
1605436974@qq.com
|
f4ef95888f019546f1c9effa5878a6f1438da8c3
|
4937e6e53384ad779cf9d73a0db7f2875e8afef2
|
/Network Service/Processing Logic/queue_operation/python_queue.py
|
895f61b2c941cb56e0c6557c065bc03406c73fa5
|
[] |
no_license
|
panxie0906/prepare615
|
7b64547ed724d71ca0bc179e1effbfa5ffe85160
|
9d1a7283fee9b42d6144281d8e723576002b0a3c
|
refs/heads/master
| 2020-12-02T07:47:04.167303
| 2017-08-31T14:20:32
| 2017-08-31T14:20:32
| 96,726,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 653
|
py
|
#-*- coding:utf-8 -*-
import queue
'''
python queue有三种构造函数
class Queue.Queue(maxsize)
class Queue.LifoQueue(maxsize)
class Queue.PriorityQueue(maxsize)
分别对应
FIFO先进先出队列
LIFO先进后出队列
优先级队列,优先级越低越先出来
'''
def queue_op():
# 如果超出队列长度就会发生很诡异的反应
# maxsize<1代表无限
# 想要实现环状的队列可以自己自定义实现
myqueue = queue.Queue(3)
myqueue.put(10)
myqueue.put('hhhhhh')
myqueue.put(3)
while not myqueue.empty():
print(myqueue.get())
if __name__ == '__main__':
queue_op()
|
[
"406551408@qq.com"
] |
406551408@qq.com
|
1121f6d1df4336faf95572947631d438cb815965
|
8921e3e95c354ffb58989692a2c460b9133da09f
|
/blog/migrations/0001_initial.py
|
71a438646c87d7ef85369d72bbd60fcd2d5bf97e
|
[] |
no_license
|
zhangliutian/simpleblog
|
f8fbf0be1174b60cdcc63291c94dbb7b695abead
|
c1ecb05edff4fde15dd636f8dbac51c6d881f3f9
|
refs/heads/master
| 2021-09-02T19:24:58.989845
| 2018-01-03T19:28:55
| 2018-01-03T19:28:55
| 115,597,990
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,046
|
py
|
# Generated by Django 2.0 on 2017-12-28 07:10
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=datetime.datetime(2017, 12, 28, 7, 10, 23, 923975, tzinfo=utc))),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"521348888@qq.com"
] |
521348888@qq.com
|
8fdf88c808741fc5ec9b1b3e6f8a3e2a28698f9a
|
a25acab883494fa90cccc7255cac67251b40a21d
|
/models/base/backbone/pvt.py
|
0f83ce20a3df9c84f6eaba65ae4b21c0f1b8dda5
|
[] |
no_license
|
csliuchang/PupaDetector
|
cd8d85ca0cdb236dae28b82cdac144e17ce8f76f
|
b88dfdfd3f52e1df7cd44b5e1d7086acbe1ec046
|
refs/heads/master
| 2023-08-12T13:05:19.796420
| 2021-09-17T08:54:28
| 2021-09-17T08:54:28
| 397,140,426
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,713
|
py
|
import torch
import torch.nn as nn
import math
from functools import partial
from models.builder import BACKBONES
import warnings
import collections.abc
from itertools import repeat
def _ntuple(n):
def parse(x):
if isinstance(x, collections.abc.Iterable):
return x
return tuple(repeat(x, n))
return parse
to_2tuple = _ntuple(2)
class PyramidVisionTransformerImpr(nn.Module):
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dims=[64, 128, 256, 512],
num_heads=[1, 2, 4, 8], mlp_ratios=[4, 4, 4, 4], qkv_bias=False, qk_scale=None, drop_rate=0.,
attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm,
depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1]):
super().__init__()
self.num_classes = num_classes
self.depths = depths
# patch_embed
self.patch_embed1 = OverlapPatchEmbed(img_size=img_size, patch_size=7, stride=4, in_chans=in_chans,
embed_dim=embed_dims[0])
self.patch_embed2 = OverlapPatchEmbed(img_size=img_size // 4, patch_size=3, stride=2, in_chans=embed_dims[0],
embed_dim=embed_dims[1])
self.patch_embed3 = OverlapPatchEmbed(img_size=img_size // 8, patch_size=3, stride=2, in_chans=embed_dims[1],
embed_dim=embed_dims[2])
self.patch_embed4 = OverlapPatchEmbed(img_size=img_size // 16, patch_size=3, stride=2, in_chans=embed_dims[2],
embed_dim=embed_dims[3])
# transformer encoder
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
cur = 0
self.block1 = nn.ModuleList([Block(
dim=embed_dims[0], num_heads=num_heads[0], mlp_ratio=mlp_ratios[0], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[0])
for i in range(depths[0])])
self.norm1 = norm_layer(embed_dims[0])
cur += depths[0]
self.block2 = nn.ModuleList([Block(
dim=embed_dims[1], num_heads=num_heads[1], mlp_ratio=mlp_ratios[1], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[1])
for i in range(depths[1])])
self.norm2 = norm_layer(embed_dims[1])
cur += depths[1]
self.block3 = nn.ModuleList([Block(
dim=embed_dims[2], num_heads=num_heads[2], mlp_ratio=mlp_ratios[2], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[2])
for i in range(depths[2])])
self.norm3 = norm_layer(embed_dims[2])
cur += depths[2]
self.block4 = nn.ModuleList([Block(
dim=embed_dims[3], num_heads=num_heads[3], mlp_ratio=mlp_ratios[3], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[3])
for i in range(depths[3])])
self.norm4 = norm_layer(embed_dims[3])
# classification head
# self.head = nn.Linear(embed_dims[3], num_classes) if num_classes > 0 else nn.Identity()
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = 1
#load_checkpoint(self, pretrained, map_location='cpu', strict=False, logger=logger)
def reset_drop_path(self, drop_path_rate):
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(self.depths))]
cur = 0
for i in range(self.depths[0]):
self.block1[i].drop_path.drop_prob = dpr[cur + i]
cur += self.depths[0]
for i in range(self.depths[1]):
self.block2[i].drop_path.drop_prob = dpr[cur + i]
cur += self.depths[1]
for i in range(self.depths[2]):
self.block3[i].drop_path.drop_prob = dpr[cur + i]
cur += self.depths[2]
for i in range(self.depths[3]):
self.block4[i].drop_path.drop_prob = dpr[cur + i]
def freeze_patch_emb(self):
self.patch_embed1.requires_grad = False
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed1', 'pos_embed2', 'pos_embed3', 'pos_embed4', 'cls_token'} # has pos_embed may be better
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
B = x.shape[0]
outs = []
# stage 1
x, H, W = self.patch_embed1(x)
for i, blk in enumerate(self.block1):
x = blk(x, H, W)
x = self.norm1(x)
x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
outs.append(x)
# stage 2
x, H, W = self.patch_embed2(x)
for i, blk in enumerate(self.block2):
x = blk(x, H, W)
x = self.norm2(x)
x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
outs.append(x)
# stage 3
x, H, W = self.patch_embed3(x)
for i, blk in enumerate(self.block3):
x = blk(x, H, W)
x = self.norm3(x)
x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
outs.append(x)
# stage 4
x, H, W = self.patch_embed4(x)
for i, blk in enumerate(self.block4):
x = blk(x, H, W)
x = self.norm4(x)
x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
outs.append(x)
return outs
# return x.mean(dim=1)
def forward(self, x):
x = self.forward_features(x)
# x = self.head(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop, sr_ratio=sr_ratio)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x, H, W):
x = x + self.drop_path(self.attn(self.norm1(x), H, W))
x = x + self.drop_path(self.mlp(self.norm2(x), H, W))
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., sr_ratio=1):
super().__init__()
assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.sr_ratio = sr_ratio
if sr_ratio > 1:
self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio)
self.norm = nn.LayerNorm(dim)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x, H, W):
B, N, C = x.shape
q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
if self.sr_ratio > 1:
x_ = x.permute(0, 2, 1).reshape(B, C, H, W)
x_ = self.sr(x_).reshape(B, C, -1).permute(0, 2, 1)
x_ = self.norm(x_)
kv = self.kv(x_).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
else:
kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
k, v = kv[0], kv[1]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class OverlapPatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=7, stride=4, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.H, self.W = img_size[0] // patch_size[0], img_size[1] // patch_size[1]
self.num_patches = self.H * self.W
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride,
padding=(patch_size[0] // 2, patch_size[1] // 2))
self.norm = nn.LayerNorm(embed_dim)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
x = self.proj(x)
_, _, H, W = x.shape
x = x.flatten(2).transpose(1, 2)
x = self.norm(x)
return x, H, W
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.dwconv = DWConv(hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x, H, W):
x = self.fc1(x)
x = self.dwconv(x, H, W)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
def drop_path(x, drop_prob: float = 0., training: bool = False):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
class DWConv(nn.Module):
def __init__(self, dim=768):
super(DWConv, self).__init__()
self.dwconv = nn.Conv2d(dim, dim, 3, 1, 1, bias=True, groups=dim)
def forward(self, x, H, W):
B, N, C = x.shape
x = x.transpose(1, 2).view(B, C, H, W)
x = self.dwconv(x)
x = x.flatten(2).transpose(1, 2)
return x
@BACKBONES.register_module()
class pvt_v2_b2(PyramidVisionTransformerImpr):
def __init__(self, **kwargs):
super(pvt_v2_b2, self).__init__(
patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4],
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1],
drop_rate=0.0, drop_path_rate=0.1)
@BACKBONES.register_module()
class pvt_v2_b0(PyramidVisionTransformerImpr):
def __init__(self, **kwargs):
super(pvt_v2_b0, self).__init__(
patch_size=4, embed_dims=[32, 64, 160, 256], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4],
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[2, 2, 2, 2], sr_ratios=[8, 4, 2, 1],
drop_rate=0.0, drop_path_rate=0.1)
if __name__ == "__main__":
a = torch.randn(1, 3, 512, 512)
|
[
"598306303@qq.com"
] |
598306303@qq.com
|
50142bec6afbefb2d5673718f52fc2336d4a3425
|
e99b36eab69d98f0ed2ca0e1d2ec705778e8a2fd
|
/api.py
|
796a6745f9b1b2412e390c79655ba7c8e6c744bc
|
[] |
no_license
|
RV05/thermal-Image-processing
|
de863c2c88b704e2bfe1426f655ca149bc134c70
|
5f97ab1a5a088cbf7e0992756b98f8c309cdbee3
|
refs/heads/master
| 2022-11-17T19:18:01.696743
| 2020-07-25T14:09:11
| 2020-07-25T14:09:11
| 282,456,508
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 371
|
py
|
import pyrebase
config={
"apiKey": "
"authDomain":
"databaseURL":
"projectId": "
"storageBucket": ",
"messagingSenderId": ,
"appId": "
"measurementId": "
}
firebase=pyrebase.initialize_app(config)
storage = firebase.storage()
path_on_cloud="files/demo1.xlsx"
path_local = "demo1.xlsx"
storage.child(path_on_cloud).put(path_local)
|
[
"noreply@github.com"
] |
RV05.noreply@github.com
|
ad5591a734bd2d1525760f15420f88ba941c85bd
|
d8d79878598e75d7a21c6cdd244b3702732756ab
|
/lesson13/top_k.py
|
2ed8a6c720641e908e7b6e277d3d6f816708b806
|
[] |
no_license
|
MyFate-0517/AdminTempalte
|
dbebb0613b9182549f788a2dfb8dec1c8fcbbc71
|
f7a8c44e39698ff82a01afc8a744cbb4d92e71ad
|
refs/heads/master
| 2020-12-07T10:59:25.413726
| 2020-01-09T03:17:25
| 2020-01-09T03:17:25
| 232,708,062
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,068
|
py
|
import tensorflow as tf
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# 返回预则结果的准确率
def accuracy(output, target, topk=(1,)):
"""
:param output: [10,6]
:param target: [10]
:param topk: top_k acc
:return:
"""
maxk = max(topk)
batch_size = target.shape[0]
idx = tf.math.top_k(output, maxk).indices
idx = tf.transpose(idx, [1, 0])
target = tf.broadcast_to(target, idx.shape)
correct = tf.equal(idx, target)
result = []
for k in topk:
val_cor = tf.cast(tf.reshape(correct[:k], [-1]), dtype=tf.float32)
res = tf.reduce_sum(val_cor)
acc = float(res * (100.0 / batch_size))
result.append(acc)
return result
output = tf.random.normal([10, 6])
output = tf.math.softmax(output, axis=1)
target = tf.random.uniform([10], maxval=6, dtype=tf.int32)
print('prob:', output.numpy())
preb = tf.argmax(output, axis=1)
print('preb:', preb.numpy())
print('label:', target.numpy())
acc = accuracy(output, target, topk=(1, 2, 3, 4, 5, 6))
print('top_1-6_acc:', acc)
|
[
"68612123@qq.com"
] |
68612123@qq.com
|
c0343df09abcbc85ef321b86520d2a3a3a598787
|
c00dcd05a6cba10dc94ddb4b92c32efc4da17cb8
|
/mystie/mystie/urls.py
|
eed1de4ded21fb246009835c0c7adcb0bd6e1b1d
|
[] |
no_license
|
mickberber/djangoProto
|
3434331fd8db0267872f50efa916beecfee1058f
|
f4f3105e8121651ae541efb194933483997925cd
|
refs/heads/master
| 2021-01-01T05:18:18.039901
| 2016-05-11T01:43:36
| 2016-05-11T01:43:36
| 58,497,267
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 806
|
py
|
"""mystie URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^polls/', include('polls.urls')),
url(r'^admin/', admin.site.urls),
]
|
[
"mickberber@icloud.com"
] |
mickberber@icloud.com
|
842500ee3b4b3366bab6f1f5bc1c4c75d8750ab8
|
0f427e0edf7837f8213592580dca7edbc99997f3
|
/main.py
|
3ac2df6f7cc966b094ab1f7e9e675d03bb681209
|
[] |
no_license
|
davidmace/qa-research
|
a07b343ad7cf45b0569b7e0fea9da5a854adfafe
|
0a282d00a4f0c2889ff1d2eec54e72c579777ea5
|
refs/heads/master
| 2020-06-12T15:35:58.709583
| 2016-12-07T04:19:54
| 2016-12-07T04:19:54
| 75,798,804
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,152
|
py
|
# Notes:
# Lose 2.4% of questions because the entids are not in feebase-ents.txt
from pyspark import SparkContext, SparkConf
import operator
import re, string
from nltk import ngrams
from collections import defaultdict
import operator as op
import math
import numpy as np
import sys
from googleapiclient import discovery
import httplib2
from oauth2client.client import GoogleCredentials
import cPickle
import os
from nltk.stem.porter import *
########################################################################
### List and Dict Helpers
########################################################################
# +, {'a':1,'b':2}, {'a':1,'c':3} -> {'a':2,,'b':2,c':3}
def outer_join_dicts(f,d1,d2) :
d3 = {}
for k in set.union(set(d1.keys()),set(d2.keys())) :
d3[k] = f( d1[k] if k in d1 else 0 , d2[k] if k in d2 else 0 )
return d3
# x^2, {'a':2} -> {'a':4}
def map_values(f,d) :
return dict(map(lambda (k,v): (k, f(v)), d.iteritems()))
# [('a',1),('a',2),('b',1),('b',1)] -> {'a':[1,2],'b':[1,1]}
def group_pairs_by_first(pairs) :
d = defaultdict(list)
for pair in pairs :
d[pair[0]].append(pair[1])
return d
# [[[1,2],[3,4]],[[5,6]]] -> [[1,2],[3,4],[5,6]]
def flatten_one_layer(l) :
return [item for sublist in l for item in sublist]
# {'a':1,'b':2,'c':2} -> {'a':0.2,'b':0.4,'c':0.4}
def normalize_dict(d) :
sm = reduce(op.add,d.values())
d = map_values(lambda x: x/sm, d)
return d
# {'dog':0.2,'cat':0.1} + {'dog':0.3,'giraffe':0.1} -> {'dog':0.6}
def dict_dotproduct(bag1,bag2) :
score = 0.0
for key in set.intersection(set(bag1.keys()),set(bag2.keys())) :
score += bag1[key]*bag2[key]
return score
########################################################################
### String Helpers
########################################################################
# 'I go there' -> 'I go there'
def remove_double_spaces(s) :
return ' '.join(filter(lambda x: x!='', s.split(' ')))
# 'I do have a dog' + 'do' + 'did' -> 'I did have a dog'
def replace_complete_word(s,word,replacement) :
return (' '+s+' ').replace(' '+word+' ',' '+replacement+' ')[1:-1]
########################################################################
### Vector Helpers
########################################################################
# ['dog','cat'],{'dog':2,'cat':4},6 -> [0,0,1,0,1,0]
def sparse_vector_to_dense_vector(l,key2id,length) :
vec = np.zeros(length)
for key in l :
if key not in key2id :
continue
vec[key2id[key]] = 1
return vec
# {'dog':0.2,'cat':0.3},{'dog':2,'cat':4},6 -> [0,0,0.2,0,0.3,0]
def bag_to_dense_vector(bag,key2id,length) :
vec = np.zeros(length)
for key in bag :
if key not in key2id :
continue
vec[key2id[key]] = bag[key]
return vec
def write_vector_as_csv(filename,vector) :
with open(filename,'w') as f :
for pt in vector :
f.write(str(pt)+'\n')
def write_2d_matrix_as_csv(filename, matrix) :
with open(filename,'w') as f :
for row in matrix :
s = ''
for val in row :
s += str(val)+','
f.write(s[:-1]+'\n')
# {'a':0.2,'b':0.3}, 0.25 -> {'b':0.3}
def threshold_bag(d,thresh) :
d2 = {}
for key in d :
if d[key] > thresh :
d2[key] = d[key]
return d2
# {'a':1,'b':2} + ['a','c'] -> {'a':2,'b':2,'c':1}
def add_list_to_count_dict(d,l) :
for x in l :
if x not in d :
d[x] = 0
d[x] += 1
# ['a','b','c'] + 1 -> {'a':1,'b':2,'c':3}
def list_to_id_map(l,start_id) :
d = {}
i = start_id
for x in l :
d[x] = i
i += 1
return d
########################################################################
### Load Word Resources (ie. word frequencies and filter words)
########################################################################
# global_word_log_cts: {'the':24.12,'cat':2.33,...}
# filter_words: set('the','a',...)
def load_word_resources() :
with open('global_word_cts.txt','r') as f :
lines = f.read().split('\n')
global_word_log_cts = defaultdict(float)
for line in lines :
parts = line.split()
global_word_log_cts[parts[0]] = math.log(int(parts[1]))
with open('filter-words-100.txt','r') as f :
filter_words = set(f.read().split('\n'))
stemmer = PorterStemmer()
return (global_word_log_cts, filter_words, stemmer)
########################################################################
### Store entity id to name mapping
########################################################################
# 'David freebase-entity <http://rdf.freebase.com/ns/m/067sbmt> .' -> ('/067sbmt','david')
def make_uid_name_pair(line) :
name = line[:line.find('\t')]
# get rid of (...) in line because not part of entity name
if '(' in name :
name = name[:name.find('(')]
name = re.sub('[^a-zA-Z0-9\' \-]+', '', name).lower().strip()
uid = line[line.rfind('/'):line.rfind('>')]
# if uid was parsed incorrectly then throw it out
if ' ' in uid or len(uid)>10 :
uid = ''
name = ''
return (uid,name)
# Write id->name mappings from raw freebase ents file
def process_entity_file(sc) :
uid_name_pairs = sc.textFile("freebase-ents.txt").map(make_uid_name_pair) # [('1':'a'),('2','b'),...]
unique_uid_name_pairs = uid_name_pairs.reduceByKey(lambda a,b: a) # get rid of duplicate ids
unique_uid_name_pairs.coalesce(1).saveAsSequenceFile("entid2name") # condense to single file
###########################################################################
### Make list of all entity ids that we need to exact match
###########################################################################
# line: www.freebase.com/m/03_7vl www.freebase.com/people/person/profession www.freebase.com/m/09jwl
# returns ['/067sbmt','/027vb3h',...]
def get_all_ids(line) :
parts = line.split('\t')
uid1 = parts[0]
uid1 = uid1[uid1.rfind('/'):]
l = []
l.append(uid1)
# can be multiple objects in relationship description so get all of them
for i in range(2,len(parts)) :
uid2 = parts[i]
uid2 = uid2[uid2.rfind('/'):]
l.append(uid2)
return l
# Get list of entity ids that appear in the rulebase
def process_entid_list(sc) :
ent_ids = sc.textFile("freebase-rules.txt").map(get_all_ids).flatMap(lambda x: x) # ['/067sbmt','/027vb3h',...]
ent_ids.distinct().coalesce(1).saveAsTextFile("present-entids2") # get distinct ids and condense to single file
###########################################################################
### Make reduced ent_id to name map that only has entities present in the ruleset
###########################################################################
# make mapping of entid->name but only for entities in ruleset
def process_entname_list(sc) :
present_entids = sc.textFile('present-entids/part-00000') # ['/067sbmt','/027vb3h',...]
entid2name = sc.sequenceFile('entid2name/part-00000') # {'/067sbmt':'david','/027vb3h':'john',...]
present_id_map = present_entids.map(lambda x: (x,1)) # convert from list to pairs so can join
entid2name.join(present_id_map).coalesce(1).saveAsTextFile("entid2name-important") # write id2name map
# load entid->name mapping
#( {'/0a2':'david','/g5h':'steven',...}, {'david':['/0a2'],'steven':['/g5h'],...}, set('david','steven',...) )
def load_ent2name() :
with open('entid2name-important/part-00000','r') as f :
lines = f.read().split('\n')
# (u'/012fh', (u'afrikaner', 1)) -> ('/012fh','afrikaner')
pair_list = [(line[3:line.find('\',')],remove_double_spaces(line[line.find(' (u')+4:-6])) for line in lines]
entid2name_important = dict( pair_list )
entname2id_important = defaultdict(list) # can be multiple ids per name
# make reversed map
for id in entid2name_important :
entname2id_important[ entid2name_important[id] ].append(id)
entname_set = set( [tuple(s.split()) for s in entid2name_important.values()] )
return (entid2name_important, entname2id_important, entname_set)
###########################################################################
### Load rules into memory
###########################################################################
# line: www.freebase.com/m/03_7vl www.freebase.com/people/person/profession www.freebase.com/m/09jwl
# return (/03_7vl,/people/person/profession)
def process_rule_line(line) :
parts = line.split('\t')
uid1 = parts[0]
uid1 = uid1[uid1.rfind('/'):]
reltype = parts[1]
reltype = reltype.replace('www.freebase.com','')
return (uid1,reltype)
# Extract all distinct (uid1,relationship_type) pairs and write to a single file
def process_rules(sc) :
sc.textFile("freebase-rules.txt").map(process_rule_line).distinct().coalesce(1).saveAsTextFile("rules")
# rules: {'/a2df':['profession,born_here'],...}
def load_rules() :
rules = defaultdict(list)
with open('rules/part-00000','r') as f :
lines = f.read().split('\n')
for line in lines :
# line: "('/a2df','profession')"
parts = line.split(',')
id = parts[0][3:-1]
rel = parts[1][3:-2]
rules[id].append(rel)
return rules
###########################################################################
### Quickly find possible mispellings by method from http://emnlp2014.org/papers/pdf/EMNLP2014171.pdf
### 1. make a distinct letter -> prime number mapping
### 2. multiply primes for letters in word
### 3. find all entities with scores that are off by one or two prime factors (off by one or two letters)
### 4. run edit distance on this vastly reduced set of candidates to find if the incorrect letters are properly positioned
###########################################################################
# entname_set: ['star wars','star trek']
# returns ( {'a':2,'b':3,...}, {'star wars':1.232424e46,...}, [2,3,0.5,0.33,1.5,...] )
def make_mispelling_resources(entname_set) :
# map letters to prime numbers
primes_letters = [2,3,5,7,11,13,17,19,23,29,31,37,41,43,47,53,59,61,67,71,73,79,83,89,97,101]
primes_numbers = [103,109,113,127,131,137,139,149,151,157]
primes_all = primes_letters + primes_numbers + [163,167,173]
primes_map = {' ':163,'-':167,'\'':173}
for i in range(26) :
primes_map[chr(ord('a')+i)] = primes_letters[i]
for i in range(10) :
primes_map[chr(ord('0')+i)] = primes_numbers[i]
# list of factors that entity letter score can be off by for one or two errors
possible_spelling_ratios = set( flatten_one_layer([[1.0*x*y,1.0*x/y,1.0*y/x,1.0/x/y] for x in primes_all for y in primes_all])
+ flatten_one_layer([[1.0*x,1.0/x] for x in primes_all]) )
# map of spelling score to entity
ent_spell_scores = {}
for ent in entname_set :
num_list = [primes_map[c] for c in ' '.join(ent)]
if len(num_list)==0 or len(num_list)>40 :
continue
ent_spell_scores[float(reduce(op.mul,num_list))] = ent
return (primes_map, ent_spell_scores, possible_spelling_ratios)
# source: http://stackoverflow.com/questions/2460177/edit-distance-in-python
def edit_distance(s1, s2):
if len(s1) > len(s2):
s1, s2 = s2, s1
distances = range(len(s1) + 1)
for i2, c2 in enumerate(s2):
distances_ = [i2+1]
for i1, c1 in enumerate(s1):
if c1 == c2:
distances_.append(distances[i1])
else:
distances_.append(1 + min((distances[i1], distances[i1 + 1], distances_[-1])))
distances = distances_
return distances[-1]
# return list of entities off by 1 or 2 letters from ent
def find_mispellings(ent, primes_map, ent_spell_scores, possible_spelling_ratios) :
# check each of ~1000 values that this spelling can be off by
# add to possibilities any value that is present in ent_spell_scores so corresponds to a known entity
find_val = reduce(op.mul,[primes_map[c] for c in ' '.join(ent)])
possibilities = []
for ratio in possible_spelling_ratios :
if find_val*ratio in ent_spell_scores :
possibilities.append(ent_spell_scores[long(find_val*ratio)])
# use expensive edit distance method on reduced list to account for letter order
found_ents = []
for poss in possibilities :
if edit_distance(' '.join(poss),' '.join(ent))<=2 :
found_ents.append((poss,ent))
return found_ents
###########################################################################
### Dependency Parse
###########################################################################
# the dog runs
# pos: {'the':'det','dog':'nn','runs':'vb'}
# deps: [[nsubj,dog,1,runs,2],[det,dog,1,the,0]]
# root: runs
class Parse:
def __init__(self,pos,deps,root) :
self.pos = pos
self.deps = deps
self.root = root
def __str__(self) :
s='['
s+=self.pos.__str__()+',\n'
s+=self.deps.__str__()+',\n'
s+=self.root.__str__()+'\n'
return s
def __repr__(self) :
return self.__str__()
# ie. [nsubj,dog,1,runs,2]
class Dep:
def __init__(self,rel,w1,w1id,w2,w2id) :
self.rel = rel
self.w1 = w1
self.w1id = w1id
self.w2 = w2
self.w2id = w2id
def __str__(self) :
return '[%s,%s,%i,%s,%i]' % (self.rel, self.w1, self.w1id, self.w2, self.w2id)
def __repr__(self) :
return self.__str__()
# Returns the encoding type that matches Python's native strings (source: stack overflow)
def get_native_encoding_type():
if sys.maxunicode == 65535:
return 'UTF16'
else:
return 'UTF32'
# Call analyze_text for Google NLP API then formats response to Parse object
def format_parse(extracted_info) :
tokens = extracted_info.get('tokens', [])
# extract word ids and part of speech tags
words = {}
pos = {}
for i in range(len(tokens)) :
token = tokens[i]
word = token['text']['content']
words[i] = word
tag = token['partOfSpeech']['tag']
pos[word] = tag.lower()
# extract dependencies
deps = []
for i in range(len(tokens)) :
token = tokens[i]
dep = token['dependencyEdge']
other_word = words[ dep['headTokenIndex'] ]
other_word_index = dep['headTokenIndex']
deps.append( Dep(dep['label'].lower(), words[i], i, other_word, other_word_index) )
# find root
for i in range(len(deps)) :
if deps[i].w1id==deps[i].w2id :
root = deps[i].w1
return Parse(pos, deps, root)
# Call Google Natural Language syntax API, raises HTTPError is connection problem.
def parse_text(text):
credentials = GoogleCredentials.get_application_default()
scoped_credentials = credentials.create_scoped(['https://www.googleapis.com/auth/cloud-platform'])
http = httplib2.Http()
scoped_credentials.authorize(http)
service = discovery.build(
'language', 'v1beta1', http=http)
body = {
'document': {
'type': 'PLAIN_TEXT',
'content': text,
},
'features': {
'extract_syntax': True,
#'extract_entities': True,
},
'encodingType': get_native_encoding_type(),
}
request = service.documents().annotateText(body=body)
extracted_info = request.execute()
parse_info = format_parse(extracted_info)
parse_string = cPickle.dumps(parse_info).replace('\n','\t')
return parse_string
###########################################################################
### Training Flow Helpers
###########################################################################
# extract information from dataset line
def process_dataset_line(line,entid2name) :
id1 = line[0].replace('www.freebase.com/m','')
rel_type = line[1].replace('www.freebase.com','')
id2 = line[2].replace('www.freebase.com/m','')
# preprocess input text
text = filter(lambda c: c in [' ','-','\''] or (c>='a' and c<='z') or (c>='0' and c<='9'), line[3].lower())
text = re.sub(r"'(?=[a-z])", r" '", text)
if id1 not in entid2name :
ent1 = None
else :
ent1 = entid2name[id1]
return (text,id1,ent1,rel_type)
# generate list of all possible entities
def generate_grams(text, filter_words) :
words = text.split()
# get rid of unigrams that are really common
unigrams = filter(lambda tup: tup[0] not in filter_words, list(ngrams(words,1)))
grams_list = unigrams
for i in range(2,len(words)+1) :
grams_list += list(ngrams(words,i))
grams = set(grams_list)
return grams
# make a list of possible mispelled entities from the text input
def generate_mispelled_ents(grams, exact_match_ents, global_word_log_cts, primes_map, ent_spell_scores, possible_spelling_ratios) :
mispelled_ents = []
# check all ngrams that we have not already exact matched
for ent in grams-set([x[0] for x in exact_match_ents]) :
# throw out candidate if very short length or only has very common words
if len(' '.join(ent))<=4 or all([global_word_log_cts[w]>12 for w in ent]) :
continue
poss_ents = find_mispellings(ent, primes_map, ent_spell_scores, possible_spelling_ratios)
mispelled_ents += poss_ents
return mispelled_ents
# get the log probability of the phrase appearing randomly
def get_ent_score(ent_words, global_word_log_cts) :
return reduce(op.add,[math.log(360000000)-global_word_log_cts[w] for w in ent_words])
# ['a','b','c','d'] -> {'a':0.25,'b':0.25,'c':0.25,'d':0.25}
def uniform_normalized_bag(l) :
return dict( zip(l,[1.0/len(l) for x in l]) )
# get tf dotproduct score for all bags relative to the word weights
def get_rel_scores(word_weights, relationship_bags) :
scores = []
for rel in relationship_bags :
scores.append( ( rel, dict_dotproduct(word_weights,relationship_bags[rel]) ) )
return dict(scores)
# get all possible rules for entity
def get_present_rels(ent_words, entname2id, rules) :
ids = entname2id[' '.join(ent_words)] # get ids for entity name
rules_list = []
for id in ids :
rules_list += rules[id]
return set(rules_list)
|
[
"dmace@caltech.edu"
] |
dmace@caltech.edu
|
3d59cf2ba21b419c661f3c613e8d078d6d06fa2f
|
ac699d1e433b95187975a718eef7c85d059ca70e
|
/bin/timeoutlib.py
|
de717c7a5e6bfee4a6e8c413a522edafef0c7aea
|
[] |
no_license
|
marcelouva/pewa
|
1d253bfea61becaba14f1d7921e04ba5fcb5cc11
|
eea8f530f43738717867d82d8436e117d08fe01e
|
refs/heads/master
| 2021-01-19T10:36:36.631698
| 2017-04-13T01:28:43
| 2017-04-13T01:28:43
| 75,091,335
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,397
|
py
|
from functools import wraps
import errno
import os,ap1
import signal
from datetime import datetime, date, time, timedelta
class TimeoutError(Exception):
pass
def timeout(error_message=os.strerror(errno.ETIME)):
seconds=int(ap1.read_config_case("../config.ini","options")['timeout'])
def decorator(func):
def _handle_timeout(signum, frame):
raise TimeoutError(error_message)
def wrapper(*args, **kwargs):
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(seconds)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return wraps(func)(wrapper)
return decorator
@timeout()
def call_with_timeout(funcion, parametro1, parametro2,parametro3,parametro4,parametro5,filename):
t=datetime.now()
try:
funcion(parametro1,parametro2,parametro3,parametro4,parametro5,filename)
return [datetime.now()-t,False]
except Exception as inst:
print(type(inst)) # la instancia de excepcin
print(inst.args) # argumentos guardados en .args
print(inst)
return [datetime.now()-t,True]
@timeout()
def call_with_timeout_permament(funcion, parametro1):
#t=datetime.now()
try:
funcion(parametro1)
return [0,False]
except:
return [0,True]
|
[
"uva@dc.exa.unrc.edu.ar"
] |
uva@dc.exa.unrc.edu.ar
|
902093c9e3bc1c6b3fbe804605244b8c8cfaee54
|
d8508a5471ab2db65a96419bef412f9e123ce988
|
/week_01/lab_01/chop_011.py
|
0c5e1b29196529ed18d78e76032f0ece14a62d6a
|
[] |
no_license
|
Dunnel45/ca117
|
75bc8f7518c16fe21d7033f54fe4462e982e6c7d
|
11d522e58e364b14cb67f718cef554bef0691810
|
refs/heads/master
| 2020-11-27T20:34:06.845509
| 2019-12-29T12:15:58
| 2019-12-29T12:15:58
| 229,592,165
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 211
|
py
|
#!/usr/bin/env python
import sys
def main():
for lines in sys.stdin:
words = lines.strip()
chop = words[1:-1]
if chop:
print(chop)
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
Dunnel45.noreply@github.com
|
89519fbd675ddebfbfb5b2e93b4c7e10f44b81fc
|
71fbfb197e2756160680de9f4c303f20b63afa26
|
/dpkt socket学习过程/learning dpkt/dpkt_out.py
|
f4c9753ff6b2b06b224b05079ac9bb9fad73e7bc
|
[] |
no_license
|
RabitW/PacketCaptureOnPy
|
b1839bfa7ac12187056383443e1cbc4587283803
|
207bcd6038497bf36c55ba4089931b8ba6cb897b
|
refs/heads/master
| 2021-01-21T11:34:02.281168
| 2016-10-22T04:47:36
| 2016-10-22T04:47:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,457
|
py
|
# -*- coding:utf-8 -*-
#!/usr/bin/env python
"""
TESTED !
[Print Packets Example]
Use DPKT to read in a pcap file and print out the contents of the packets
This example is focused on the fields in the Ethernet Frame and IP packet
URL:http://dpkt.readthedocs.io/en/latest/_modules/examples/print_packets.html#test
"""
import dpkt
import pcap
import datetime
import socket
AF_INET = socket.AF_INET
def inet_ntop(address_family, packed_ip):
if address_family != AF_INET:
raise socket.error, (97, 'Address family not supported by protocol')
lIP = []
for ch in packed_ip:
lIP.append(str(ord(ch)))
strIP = string.join(lIP,'.')
return strIP
def inet_pton(address_family, ip_string):
if address_family != AF_INET:
raise socket.error, (97, 'Address family not supported by protocol')
lIP = ip_string.split('.')
strHexIP = ""
for i in lIP:
if i == '':
continue
strHex = "%x" % int(i)
strHex = strHex.zfill(2)
strHexIP += "\\x"+strHex
return strHexIP
def mac_addr(address):
"""Convert a MAC address to a readable/printable string
Args:
address (str): a MAC address in hex form (e.g. '\x01\x02\x03\x04\x05\x06')
Returns:
str: Printable/readable MAC address
"""
return ':'.join('%02x' % ord(b) for b in address)
def inet_to_str(inet):
"""Convert inet object to a string
Args:
inet (inet struct): inet network address
Returns:
str: Printable/readable IP address
"""
# First try ipv4 and then ipv6
try:
return socket.inet_ntoa(inet) # change here socket.AF_INET,
except ValueError:
return socket.inet_ntoa(inet) # socket.AF_INET6,
def print_packets(pcap):
"""Print out information about each packet in a pcap
Args:
pcap: dpkt pcap reader object (dpkt.pcap.Reader)
"""
# For each packet in the pcap process the contents
for timestamp, buf in pcap:
# Print out the timestamp in UTC
print 'Timestamp: ', str(datetime.datetime.utcfromtimestamp(timestamp))
# Unpack the Ethernet frame (mac src/dst, ethertype)
eth = dpkt.ethernet.Ethernet(buf)
print 'Ethernet Frame: ', mac_addr(eth.src), mac_addr(eth.dst), eth.type
# Make sure the Ethernet data contains an IP packet
if not isinstance(eth.data, dpkt.ip.IP):
print 'Non IP Packet type not supported %s\n' % eth.data.__class__.__name__
continue
# Now unpack the data within the Ethernet frame (the IP packet)
# Pulling out src, dst, length, fragment info, TTL, and Protocol
ip = eth.data
# Pull out fragment information (flags and offset all packed into off field, so use bitmasks)
do_not_fragment = bool(ip.off & dpkt.ip.IP_DF)
more_fragments = bool(ip.off & dpkt.ip.IP_MF)
fragment_offset = ip.off & dpkt.ip.IP_OFFMASK
# Print out the info
print 'IP: %s -> %s (len=%d ttl=%d DF=%d MF=%d offset=%d)\n' % \
(inet_to_str(ip.src), inet_to_str(ip.dst), ip.len, ip.ttl, do_not_fragment, more_fragments, fragment_offset)
def test():
"""Open up a test pcap file and print out the packets"""
# with open('data/http.pcap', 'rb') as f:
# pcap = dpkt.pcap.Reader(f)
pc = pcap.pcap("D:\pcap.pcap")
print_packets(pc)
if __name__ == '__main__':
test()
|
[
"uulink@outlook.com"
] |
uulink@outlook.com
|
5c1ac19434bd1f7eca275e5ddf72cd8965c8e62e
|
f4ea1f3236f9980e82cc52bb1f9beaa87d17fd85
|
/workflow/NumberNode.py
|
1008f3d206e57dbb07e38f00e8ebb5dc18ad7827
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
YOCKOW/PythonGitHubActionsWorkflowRepresentation
|
44eb1b332eb2e36f38bed0ba3f7b966f835f7394
|
1e8cb9fd4ade42b01f0c521f659c16b12d7957de
|
refs/heads/master
| 2020-09-07T20:11:38.132302
| 2019-11-22T06:00:16
| 2019-11-22T06:00:16
| 220,901,242
| 0
| 0
|
MIT
| 2019-11-22T06:00:17
| 2019-11-11T04:31:27
|
Python
|
UTF-8
|
Python
| false
| false
| 444
|
py
|
from .string import Lines
from .Node import Node
from .util import yaml_from_number
from numbers import Real, Integral
class NumberNode(Node):
def __init__(self, info: Real):
assert isinstance(info, Real)
self.__number = info
def yaml(self) -> Lines:
return yaml_from_number(self.__number)
class IntegerNode(NumberNode):
def __init__(self, info: Integral):
assert isinstance(info, Integral)
super().__init__(info)
|
[
"YOCKOW@users.noreply.github.com"
] |
YOCKOW@users.noreply.github.com
|
7fed78fc07f768255aefaa2a257bc0634ba38294
|
9afd82a16b97600ccdec764945cea31b77248e69
|
/Teaching/entry.py
|
13990c02b02a182c4fc7ab309272fa97bfd4f347
|
[] |
no_license
|
sinameshkini/python_samples
|
c722e0acb448b50d29fea6d005c483e373b77b81
|
b4ba993df327e47d97fd16e634ea44f749ed13ac
|
refs/heads/master
| 2020-05-27T21:52:35.104764
| 2019-05-27T03:44:16
| 2019-05-27T03:44:16
| 188,800,504
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 515
|
py
|
from tkinter import *
root = Tk()
root.geometry("300x200")
root.title('Code Core')
root.configure(background='light gray')
def chang_txt():
l1.config(text=e1.get())
l1=Label(root, text = " Hello World!",fg = "light green",bg = "darkgreen",font = "tahoma 16")
l1.grid(row=0,column=0)
#l1.place(x=10,y=10)
Label(root,text="Enter your name: ").grid(row=1,column=0)
e1=Entry(root)
e1.grid(row=1,column=1)
button = Button(root,text='Change text',command=chang_txt)
button.grid(row=2,column=0)
root.mainloop()
|
[
"sinameshkini7@gmail.com"
] |
sinameshkini7@gmail.com
|
427ba04826a00292ca7fafef75d3d013173842a5
|
38ec5642460306d48d5f973dce3e10c80f3f9650
|
/social_login/models.py
|
2e9dfbe34e0867954c2b258949d64e2a6fe13917
|
[
"BSD-4-Clause"
] |
permissive
|
lomatus/django-social-login
|
113be279b120f56a3547275bebad0429f26258c8
|
c84f8d8ea82cb7cb58ede1230c69ae5f19de1f77
|
refs/heads/master
| 2020-12-03T10:29:51.946410
| 2014-03-27T05:05:22
| 2014-03-27T05:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,138
|
py
|
# -*- coding: utf-8 -*-
from django.db import models
from django.conf import settings
from .app_settings import SOCIAL_LOGIN_UID_LENGTH
from .manager import SocialUserManager
def _abstract_siteuser():
custom_siteuser = getattr(settings, 'SOCIAL_LOGIN_ABSTRACT_SITEUSER', None)
if not custom_siteuser:
from .abstract_models import AbstractBaseSiteUser
return AbstractBaseSiteUser
_app, _model = custom_siteuser.split('.')
_module = __import__('%s.models' % _app, fromlist=[_model])
_model = getattr(_module, _model)
if not _model._meta.abstract:
raise AttributeError("%s must be abstract model" % custom_siteuser)
return _model
class SiteUser(_abstract_siteuser()):
def __unicode__(self):
return '<SiteUser %d>' % self.id
class SocialUser(models.Model):
user = models.OneToOneField(SiteUser, related_name='social_user')
site_uid = models.CharField(max_length=SOCIAL_LOGIN_UID_LENGTH)
site_id = models.SmallIntegerField()
objects = SocialUserManager()
class Meta:
unique_together = (('site_uid', 'site_id'),)
|
[
"yueyoum@gmail.com"
] |
yueyoum@gmail.com
|
3f06a5b160b479d9a68ec56bf4db9b3841dcbb8f
|
8f3ac7f795f4c57c895514331743c86721147a9e
|
/demo2/booktest2/urls.py
|
25668b20b92879c7af1a95c76c7ff4974f2753d6
|
[] |
no_license
|
shuimao/wuchunfeng
|
e1341f06451101d870bd460038db7be6c4e593e1
|
7efed1fd27df0b22c87f76017123fd3081029788
|
refs/heads/master
| 2022-11-24T11:46:31.573281
| 2019-07-19T02:28:36
| 2019-07-19T02:28:36
| 194,625,310
| 0
| 0
| null | 2022-11-22T04:07:44
| 2019-07-01T07:50:56
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 632
|
py
|
from django.conf.urls import url
from . import views
app_name = 'booktest2'
urlpatterns = [
url(r'^index/$', views.index, name='index'),
url(r'^list/$', views.list, name='list'),
url(r'^detail/(\d+)/$', views.detail, name='detail'),
url(r'^roledel/(\d+)/$', views.roledel, name='roledel'),
url(r'^roleadd/(\d+)/$', views.roleadd, name='roleadd'),
url(r'^login/$', views.login, name='login'),
url(r'^logout/$', views.logout, name='logout'),
url(r'^regist/$', views.regist, name='regist'),
url(r'^verity/$', views.verity, name='verity'),
url(r'^active/(\d+)/$', views.active, name='active')
]
|
[
"411468873@qq.com"
] |
411468873@qq.com
|
7f637b614c55bd7c37e6d684ad5808118acfe36c
|
5be53821af680c32d9d8b097e6f38cabad0068f7
|
/Week02/242.有效的字母异位词.py
|
7c2a4a279e51e1bd52248c818bf1255ae122945d
|
[] |
no_license
|
DonnyChen/algorithm010
|
bb1f91e3d23d7fed862d3bf7097a64a5deef4675
|
f065fdad276318db179bc60d58438890b8e1fbe0
|
refs/heads/master
| 2022-11-12T08:51:36.620166
| 2020-06-28T15:58:48
| 2020-06-28T15:58:48
| 271,441,519
| 0
| 0
| null | 2020-06-11T03:21:37
| 2020-06-11T03:21:37
| null |
UTF-8
|
Python
| false
| false
| 623
|
py
|
#
# @lc app=leetcode.cn id=242 lang=python3
#
# [242] 有效的字母异位词
#
# @lc code=start
class Solution:
# 1 排序
# def isAnagram(self, s: str, t: str) -> bool:
# return sorted(s) == sorted(t)
# 2 哈希映射
def isAnagram(self, s: str, t: str) -> bool:
dict26 = [0]*26
if len(t) != len(s):
return False
for i in range(len(t)):
dict26[ord(s[i]) - ord('a')] += 1
dict26[ord(t[i]) - ord('a')] -= 1
for j in range(26):
if dict26[j] != 0:
return False
return True
# @lc code=end
|
[
"“donny_df_mse@outlook.com"
] |
“donny_df_mse@outlook.com
|
80687ef66fb00b8c3322399150fd54d32cdc51bb
|
f5b54e3d1d00f6d58b5eb8a3ceaf0cee9735c53f
|
/Veacon/vehicle/migrations/0001_initial.py
|
613bc19f5e839235e031e46da10a9ec8a483ae36
|
[] |
no_license
|
feerposser/veacon_sys
|
c5ba824aadb806dea1746ee708ad98df550721fa
|
2f85e4999c1c43fa968ed1ea599c6a23b4f7e395
|
refs/heads/master
| 2022-04-02T15:14:01.509692
| 2020-02-16T22:31:09
| 2020-02-16T22:31:09
| 234,101,869
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 932
|
py
|
# Generated by Django 2.2.7 on 2020-01-08 19:03
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('user_veacon', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='VehicleModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('plaque', models.CharField(max_length=10)),
('color', models.CharField(max_length=20)),
('model', models.CharField(max_length=50)),
('brand', models.CharField(max_length=50)),
('users', models.ManyToManyField(to='user_veacon.UserVeaconModel')),
],
options={
'verbose_name': 'Veículo',
'verbose_name_plural': 'Veículos',
},
),
]
|
[
"fernando.posser@hotmail.com"
] |
fernando.posser@hotmail.com
|
2d7fabb3c3d73402cce2a0930012fd2cbb7212de
|
1498148e5d0af365cd7fd16197174174a7fa9800
|
/leetcode/t000557_3.py
|
b6eeb948f9726f5896a9f6da569093d8e6d99862
|
[] |
no_license
|
feiyanshiren/myAcm
|
59a2b80fe7e02787defcb152eee3eae26135322a
|
00c7082d5143ddf87aeeafbdb6ce29da46dc8a12
|
refs/heads/master
| 2023-09-01T12:12:19.866447
| 2023-09-01T09:09:56
| 2023-09-01T09:09:56
| 148,560,672
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 319
|
py
|
class Solution:
def reverseWords(self, s: str) -> str:
return " ".join(s[::-1].split(" ")[::-1])
s = Solution()
print(s.reverseWords("Let's take LeetCode contest"))
import time
t1 = time.time()
for i in range(100000):
s.reverseWords("Let's take LeetCode contest")
print(time.time() - t1)
|
[
"feiyanshiren@163.com"
] |
feiyanshiren@163.com
|
da6a0298a940ce8965d0f00bd8a5ef8e830004b5
|
e819eff29a5002a20adbf68ecc5e0295dfdea5bb
|
/ch5/readline2.py
|
016e0a9c14f907d5ebaad69bab28acc4eee1c1d0
|
[] |
no_license
|
Scott-S-Lin/Python_Programming_ChineseBook
|
91f25e01ca123e32d121468055a5749045557351
|
06ad28da15065d49790eefa6d6cd92702bfa55e8
|
refs/heads/master
| 2020-04-17T21:26:41.523865
| 2019-01-22T07:41:24
| 2019-01-22T07:41:24
| 166,949,930
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 355
|
py
|
#function: using readline() to read data
file_in = open("employee1.txt",'r')
while True:
linedata = file_in.readline()
if not linedata:
break
print(linedata, end='')
file_in.close()
file_in = open("employee1.txt",'r')
print("\n using readlines()\n")
for linedata in file_in.readlines():
print(linedata, end='')
file_in.close()
|
[
"wenyuan.s.lin@gmail.com"
] |
wenyuan.s.lin@gmail.com
|
18758a9998673d576be354f2729c612fca8dca2a
|
9c3708c7cfc0a16fcb09e459dd66a53c67d25e6f
|
/djClassSchedule/urls.py
|
7c44195767f9bf086e5438deee6d1943e7eb1ba2
|
[] |
no_license
|
EckerdCollege/djClassSchedule
|
a1f668338a56c16d449cb2880f4a8f3cf11153f4
|
37d55d5369e067e5ac884fc0e4a0d9a075ae2e2b
|
refs/heads/master
| 2021-01-19T21:32:46.558498
| 2017-06-09T15:45:19
| 2017-06-09T15:45:19
| 88,663,985
| 0
| 2
| null | 2017-06-09T15:45:20
| 2017-04-18T19:43:55
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 832
|
py
|
"""djClassSchedule URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^', include('apps.classSchedule.urls')),
url(r'^admin/', admin.site.urls),
]
|
[
"ChristopherDavenport@outlook.com"
] |
ChristopherDavenport@outlook.com
|
d7619a72a5b64244d23fd88c90aa7bc8d693aea5
|
7604051222cd0779f8f5ea5c40477665862c9c25
|
/first_task/script.py
|
3a510b4181a2584f793eca16e8cecd98ac04f764
|
[] |
no_license
|
Khitretsov/examples
|
9cb0d8ffa2b160cc595f68365485066704b11261
|
651f1daeb712b6c753dbb8809587187d6717426a
|
refs/heads/master
| 2023-01-05T21:04:14.675535
| 2020-11-04T22:42:36
| 2020-11-04T22:42:36
| 310,134,379
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 618
|
py
|
obj = {
'a': 1,
'b': 3,
'c': {
'd': 2,
'a': 3,
'f': {
'f': 2,
'd':3
},
'v': 4
}
}
def flatten(d):
answer = {}
def search_path(dd, letter=''):
paths = []
keys = dd.keys()
for key in keys:
field = key if letter == '' else letter + '.' + key
try:
hash(dd[key])
answer[field] = dd[key]
except:
search_path(dd[key], field)
search_path(d)
print(answer)
return answer
flatten(obj)
|
[
"NekitHitr@yandex.ru"
] |
NekitHitr@yandex.ru
|
ef66d5d1c123eba265ed18adb9743adf1dbfd930
|
2aa3a50982ab45136787f55a0dfc4d4be86831c4
|
/rbtree.py
|
7370c2b5f1532117c07ce765fd3630ee70ec5911
|
[] |
no_license
|
Miautawn/Red-Black-Tree
|
6a5026eaa568ce6c1899531be072d3e447944d88
|
a6edca687a87d59736c444214e323fe576261131
|
refs/heads/main
| 2023-04-25T22:43:41.605664
| 2021-06-06T08:35:10
| 2021-06-06T08:35:10
| 374,307,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,025
|
py
|
#RED-BLACK Tree, Martynas Jašinskas VU ISI 1k.
from os import system
class Node():
def __init__(self, key):
self.colour = "RED"
self.key = key
self.left = None
self.right = None
self.parent = None
class RBTree():
def __init__(self):
self.NILL = Node("NILL")
self.NILL.colour = "BLACK"
self.root = self.NILL
def __transmutate(self, deletable_node, changeable_node):
if deletable_node.parent == None:
self.root = changeable_node
elif deletable_node == deletable_node.parent.left:
deletable_node.parent.left = changeable_node
else:
deletable_node.parent.right = changeable_node
changeable_node.parent = deletable_node.parent
def delete(self, item):
"""
ištrina elementą iš medžio
"""
x = self.root
z = self.NILL
#find the lowest possible node with this number
while x != self.NILL:
if x.key == item:
z = x
if x.key <= item:
x = x.right
else:
x = x.left
#if there is no such node
if z == self.NILL:
print("Tokio elemento nėra")
return
y = z
y_orginal = y.colour
#Simple BST delete
##################
# Šie atvejai jei tas node turi tik viena vaiką
# arba išvis neturi
if(z.right == self.NILL):
x = z.left
self.__transmutate(z, z.left)
elif(z.left == self.NILL):
x = z.right
self.__transmutate(z, z.right)
#Jeigu turi abu vaikus
else:
#reikia surasti pakaitalą
y = self.get_minimum(z.right)
y_orginal = y.colour
x = y.right
if y.parent == z:
x.parent = y
else:
self.__transmutate(y, y.right)
y.right = z.right
y.right.parent = y
self.__transmutate(z, y)
y.left = z.left
y.left.parent = y
y.colour = z.colour
# Jeigu istrintas yra juodas
if y_orginal == "BLACK":
self.fix_delete(x)
def fix_delete(self, x):
while x != self.root and x.colour == "BLACK":
if x == x.parent.left:
sibling = x.parent.right
#Case 1 - Sibling is red
if(sibling.colour == "RED"):
sibling.colour = "BLACK"
x.parent.colour = "RED"
self.left_rotate(x.parent)
sibling = x.parent.right
#Case 2 - both of siblings children are black
if(sibling.left.colour == "BLACK" and sibling.right.colour == "BLACK"):
sibling.colour = "RED"
x = x.parent
#Case 3 - left sibling child is red, right is black
else:
if(sibling.right.colour == "BLACK"):
sibling.left.colour = "BLACK"
sibling.colour = "RED"
self.right_rotate(sibling)
sibling = x.parent.right
#Case 4 - left sibling child is black, right is red
sibling.colour = x.parent.colour
sibling.right.colour = "BLACK"
x.parent.colour = "BLACK"
self.left_rotate(x.parent)
x = self.root
else:
sibling = x.parent.left
#Case 1 - Sibling is red (inverted)
if(sibling.colour == "RED"):
sibling.colour = "BLACK"
x.parent.colour = "RED"
sefl.right_rotate(x.parent)
sibling = x.parent.left
#Case 2 - both of siblings children are black
if(sibling.left.colour == "BLACK" and sibling.right.colour == "BLACK"):
sibling.colour = "RED"
x = x.parent
else:
#Case 3 - left sibling child is red, right is black (inverted)
if(sibling.left.colour == "BLACK"):
sibling.right.colour = "BLACK"
sibling.colour = "RED"
self.left_rotate(sibling)
sibling = x.parent.left
#Case 4 - left sibling child is black, right is red (inverted)
sibling.colour = x.parent.colour
x.parent.colour = "BLACK"
sibling.left.colour = "BLACK"
self.right_rotate(x.parent)
x = self.root
x.colour = "BLACK"
def insert(self, item):
"""
įdeda elementą į medį
"""
new_node = Node(item)
new_node.left, new_node.right = self.NILL, self.NILL
#get the parent of the to be inserted node
y = None
x = self.root
while x != self.NILL:
y = x
if(item > x.key):
x = x.right
else:
x = x.left
new_node.parent = y
#if parent is non egzisatnt, aka new_node is the root
if y == None:
self.root = new_node
self.root.colour = "BLACK"
return
if(item > y.key):
y.right = new_node
else:
y.left = new_node
#if the inserted node is in the second level
if new_node.parent.parent == None:
return
#if nothing else, let's do the fixing
self.fix_insert(new_node)
def fix_insert(self, new_node):
while new_node.parent.colour == "RED":
#if the parent is the left child of GrandParent
uncle = None
if(new_node.parent == new_node.parent.parent.left):
uncle = new_node.parent.parent.right
else:
uncle = new_node.parent.parent.left
# Case 1 - tėvas Raudonas, o Dėdė Raudonas
if uncle.colour == "RED":
new_node.parent.colour = "BLACK"
uncle.colour = "BLACK"
new_node.parent.parent.colour = "RED"
new_node = new_node.parent.parent
else:
if(new_node.parent == new_node.parent.parent.left):
# Case 2 - tėvas Raudonas, o Dėdė Juodas, O x dešinys
if new_node == new_node.parent.right:
new_node = new_node.parent
self.left_rotate(new_node)
# Case 3 - tėvas Raudonas, o Dėdė Juodas, o X kairys
new_node.parent.colour = "BLACK"
new_node.parent.parent.colour = "RED"
self.right_rotate(new_node.parent.parent)
else:
#Case 2 - tėvas Raudonas, o Dėdė Juodas, o x dešinys (apkeista vietom)
if new_node == new_node.parent.left:
new_node = new_node.parent
self.right_rotate(new_node)
# Case 3 - tėvas Raudonas, o Dėdė Juodas, o X kairys (apkeista vietom)
new_node.parent.colour = "BLACK"
new_node.parent.parent.colour = "RED"
self.left_rotate(new_node.parent.parent)
#if after changes the pointing node is a root - exit
if new_node == self.root:
break
self.root.colour = "BLACK"
def get_minimum(self, node):
"""
gauti tam tikro pomedžio mažiausią elementą
"""
while node.left != self.NILL:
node = node.left
return node
def print_tree(self):
"""
atspausdinti medį aukščio metodu
"""
def print_level(root, level):
if root == None:
return
elif level == 1:
if root != self.NILL:
print('(', root.key,root.colour,')', end=" ")
else:
print('(', root.key, ')', end=" ")
elif level > 1:
print_level(root.left, level - 1)
print_level(root.right, level - 1)
h = self.get_height(self.root)
for i in range(1, h+1):
print_level(self.root, i)
print("\n")
def get_height(self, node):
"""
Grąžina medžio ilgį
"""
if(node == self.NILL):
return 0
else:
return max(self.get_height(node.left), self.get_height(node.right)) + 1
def find(self, value):
"""
suranda ir gražina medžio ieškomą elementą
"""
x = self.root
y = self.NILL
level = 0
while x != self.NILL:
level += 1
if x.key == value:
y = x
break
if(value >= x.key):
x = x.right
else:
x = x.left
if(y == self.NILL):
print("Tokio elemento nėra!")
else:
print("Rastas toks elementas: {}, {} lygyje".format(y.key, level))
def left_rotate(self, x):
"""
padaryti pasukimą kairėn
"""
y = x.right
x.right = y.left
if y.left != self.NILL:
y.left.parent = x
y.parent = x.parent
if(x.parent == None):
self.root = y
elif(x == x.parent.left):
x.parent.left = y
else:
x.parent.right = y
y.left = x
x.parent = y
def right_rotate(self, x):
"""
padaryti pasukimą dešinėn
"""
y = x.left
x.left = y.right
if y.right != self.NILL:
y.right.parent = x
y.parent = x.parent
if(x.parent == None):
self.root = y
elif(x == x.parent.right):
x.parent.right = y
else:
x.parent.left = y
y.right = x
x.parent = y
tree = RBTree()
while True:
print("Įvesti - 1, Ištrinti - 2, Spausdinti - 3, Surasti - 4")
selection = int(input("Jūsų pasirinkimas: "))
if(selection == 1):
number = int(input("Kokį skaičių įvesti: "))
tree.insert(number)
_ = system('clear')
elif(selection == 2):
number = int(input("Kokį elementą ištrinti: "))
tree.delete(number)
_ = system('clear')
elif(selection == 3):
_ = system('clear')
print("***********************")
tree.print_tree()
print("***********************")
else:
_ = system('clear')
number = int(input("Kokios reikšmės ieškote: "))
tree.find(number)
|
[
"miautawn@gmail.com"
] |
miautawn@gmail.com
|
0283fb33f229cbc5a9db922f72ecb19545e2d7c7
|
58fd749471c9de26a7ad9a357385aeb9606f8d97
|
/plug_and_play/master/forms.py
|
ef09dc679d8805eda66898594fbbea4030865c1c
|
[] |
no_license
|
kSinghParth/Plug-and-Play
|
2810336a26f0a6847678352d0e37ec55ba2b9db4
|
13f1e9afb9d194968282ddc9e8a30202e23b3b77
|
refs/heads/master
| 2022-06-09T08:06:27.028198
| 2022-05-27T00:22:53
| 2022-05-27T00:22:53
| 177,397,385
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 149
|
py
|
from django import forms
class UploadJobForm(forms.Form):
file = forms.FileField()
process = forms.FileField()
aggregate = forms.FileField()
|
[
"parthsingh287@gmail.com"
] |
parthsingh287@gmail.com
|
c8aa9a7652ebbd1dbeb6a42dbd17fe1780634663
|
91e2408eeaecda5e3bc6f08099024a2f7dba7929
|
/util/latent_space.py
|
be8996455ad1d51ed3082cf3c9f31ab34a8d34e8
|
[] |
no_license
|
ArashVahabpour/SOG
|
90041e16dcd7da9b2c61be0a277a7846c5d046c1
|
3e8abdaae72941b6316e8849026b11846b77db94
|
refs/heads/master
| 2023-04-22T16:24:12.830567
| 2021-05-17T05:53:11
| 2021-05-17T05:53:11
| 236,617,903
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,684
|
py
|
import numpy as np
from scipy import stats, linalg
from math import ceil, log2
import torch
import torchvision
import cv2 as cv
import os
# Converts the tensor of a batch of images to a grid visualisation
def make_grid(image_tensor): # TODO add normalization option
batch_size = image_tensor.shape[0]
grid_width = 2 ** ceil(log2(batch_size) // 2)
# print('WARNING only>>>>tanh'); image_tensor = image_tensor/2+.5 # todo
img = torchvision.utils.make_grid(image_tensor, nrow=grid_width, padding=2,
normalize=False, range=None, scale_each=False,
pad_value=0)
img = (img.cpu().numpy().transpose(1, 2, 0) * 255).astype(np.uint8) # changing float mode to uint8
return img
def generate_full_grid(sog_model, opt): # TODO add this to training
grid_width = opt.grid_width
cdf_begin = 0.01
cdf_end = 1 - cdf_begin
z1 = np.linspace(cdf_begin, cdf_end, grid_width)
z1 = stats.norm.ppf(z1)
z1 = torch.tensor(z1, device=opt.device, dtype=torch.float32)
# x_test[i1, i2, ..., ik, :] = x1[i], x1[j], ..., x1[k]
z_test = torch.cat([xv.unsqueeze(-1) for xv in torch.meshgrid([z1] * opt.n_latent)], dim=-1)
z_test = z_test.reshape(-1, opt.n_latent)
y_pred = torch.cat([sog_model.decode(z_test[i:i + 1], requires_grad=False).reshape(-1, opt.nc, opt.img_size, opt.img_size).cpu()
for i in range(len(z_test))]) # obtain grid's test results with a batch size of 1
nrow = grid_width ** ceil(opt.n_latent // 2)
img = torchvision.utils.make_grid(y_pred, nrow=nrow, padding=2, normalize=False, pad_value=0)
img = np.ndarray.astype(img[0].numpy() * 255, np.uint8)
return img
class RandomMotion:
def __init__(self, dim, tau=10, v_mag=0.02):
self.dim = dim
self.tau = tau
self.v_mag = v_mag
self.loc = np.random.rand(dim)
self._renew_v()
def _renew_v(self):
rand_dir = np.random.randn(self.dim)
self.v = rand_dir / linalg.norm(rand_dir) * self.v_mag
def _update_loc(self):
self.loc += self.v
bounds = np.clip(self.loc, 0, 1)
bounce_mask = bounds != self.loc
self.loc[bounce_mask] = 2 * bounds[bounce_mask] - self.loc[bounce_mask]
self.v[bounce_mask] *= -1
def tick(self):
if np.random.rand() < 1. / self.tau:
self._renew_v()
self._update_loc()
def generate_seq(self, count=1000):
locs = []
for _ in range(count):
locs.append(self.loc.copy())
self.tick()
return np.vstack(locs)
def generate_video(sog_model, opt, web_dir):
seq = RandomMotion(dim=opt.n_latent, tau=100, v_mag=.02).generate_seq(10000)
seq = stats.norm.ppf(seq) # map to normal distribution
seq = np.clip(seq, -3, 3) # avoid off-3-sigma values
z_test = torch.tensor(seq, device=opt.device, dtype=torch.float32)
y_pred = sog_model.decode(z_test, requires_grad=False).reshape(-1, opt.nc, opt.img_size, opt.img_size).cpu().numpy()
y_pred = y_pred.transpose(0, 2, 3, 1) # channels last format for opencv
if opt.nc == 1:
y_pred = y_pred.repeat(3, axis=3) # fake RGB channels
y_pred = (y_pred * 255).astype(np.uint8)
video_dir = os.path.join(web_dir, 'morph.avi')
fourcc = cv.VideoWriter_fourcc(*'DIVX')
video_writer = cv.VideoWriter(video_dir, fourcc, 30., (opt.img_size, opt.img_size))
for frame in y_pred:
video_writer.write(frame)
video_writer.release()
# TODO refractor web_dir as results_dir
# TODO organize all this as a class in a reasonable way, ask Yipeng about it
|
[
"vahaabpour@gmail.com"
] |
vahaabpour@gmail.com
|
7bae208221ada732f93c04006eea3676ed23eef1
|
639a7b975ebfba7cb7d522dbfa99c57188c3e388
|
/setup.py
|
963e4d5ae1361b2b7764a7f337e1e670cf170071
|
[
"MIT"
] |
permissive
|
Suresh-Singamsetty/oneNeuron_Pypi
|
a6998ea7dbf567e15083afa1ebf400edabd70106
|
4d0634dc5bf550def5f42621a82bf8706513085c
|
refs/heads/main
| 2023-09-06T04:47:09.267756
| 2021-10-02T23:12:56
| 2021-10-02T23:12:56
| 412,920,287
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 965
|
py
|
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
PROJECT_NAME = "oneNeuron_Pypi"
USER_NAME = "Suresh-Singamsetty"
setuptools.setup(
name=f"{PROJECT_NAME}-{USER_NAME}",
version="0.0.1",
author="USER_NAME",
author_email="author@example.com",
description="its an implementation of perceptron",
long_description=long_description,
long_description_content_type="text/markdown",
url=f"https://github.com/{USER_NAME}/{PROJECT_NAME}",
project_urls={
"Bug Tracker": f"https://github.com/{USER_NAME}/{PROJECT_NAME}/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
python_requires=">=3.7",
install_requires=[
"numpy",
"tqdm"
]
)
|
[
"sureshsmart96@gmail.com"
] |
sureshsmart96@gmail.com
|
80da79e2bdd7822bb7d3551aa39ae8f34bd61f93
|
ea9c8cf17b1a82d070e5ee96aa40c3fee4938a96
|
/test2.py
|
deec61f7fbaa02f973853de85c8ad2c11ff539e5
|
[] |
no_license
|
z00m1k/Python
|
c311cbb1f74a1203497e32adf84b8fe9c2685b92
|
5ead0f5a759f9ec42fad6b8ce161e6ab2e49b316
|
refs/heads/master
| 2020-09-30T18:01:11.262437
| 2019-12-11T10:53:12
| 2019-12-11T10:53:12
| 227,343,016
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,280
|
py
|
#Дан массив размера N. Найти два соседних элемента, сумма которых
#максимальна, и вывести эти элементы в порядке возрастания их
#индексов.
import random
from colorama import init, Fore, Back
init()
print(Fore.BLACK)#Черный цвет
print(Back.GREEN)#Зеленый фон
spam = int(input("Введите размер массива N: "))
array = [random.randint(-500, 500) for i in range(spam)] #Заполнение массива рандомными числами
print(Back.YELLOW)
def search(array): #Функция для нахождения максимального числа.
max1 = 0
for i in range(spam):
if i >= spam - 1: #Прерывание цикла
break
summa = array[i] + array[i + 1] #Сложение соседних элементов
if max1 < summa: #Сравнение прошлой суммы элементов с нынешней
max1 = summa
print(Back.CYAN)
print("Сумма максимальных соседних элементов массива N: " + str(max1))
print("Массив N: " + str(array) + "\n")
search(array) #Вызов функции
|
[
"noreply@github.com"
] |
z00m1k.noreply@github.com
|
2a4d717f63b2aadc4aa1f79e5ce3f4b0573d5839
|
d0aa87b4d5ec9eac9646cba850deb4883ba72625
|
/app/recipes/views.py
|
4f030938fa561806af0e25d5c9f6b828bf4c2cc3
|
[
"MIT"
] |
permissive
|
alisoliman/recipe-app-api
|
c33c8ca73866d44a7e07acd13506618edf2bbd4d
|
5a8269d4e25c11cd57a9d25d8fb69955192fc2a6
|
refs/heads/master
| 2020-07-27T16:19:13.943206
| 2019-10-21T21:23:43
| 2019-10-21T21:23:43
| 209,155,072
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,558
|
py
|
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework import viewsets, mixins, status
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
from core.models import Tag, Ingredient, Recipe
from recipes.serializers import IngredientSerializer, \
TagSerializer, RecipeSerializer, \
RecipeDetailSerializer, RecipeImageSerializer
class BaseRecipeAttrViewSet(viewsets.GenericViewSet,
mixins.ListModelMixin,
mixins.CreateModelMixin):
"""Base View Set for user owned recipe attributes"""
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def get_queryset(self):
"""Return objects for the current authenticated user only"""
assigned_only = bool(self.request.query_params.get('assigned_only'))
queryset = self.queryset
if assigned_only:
queryset = queryset.filter(recipe__isnull=False)
return queryset.filter(user=self.request.user).order_by('-name')
def perform_create(self, serializer):
"""Create a new object"""
serializer.save(user=self.request.user)
class TagViewSet(BaseRecipeAttrViewSet):
"""Manage Tags in the database"""
queryset = Tag.objects.all()
serializer_class = TagSerializer
class IngredientViewSet(BaseRecipeAttrViewSet):
"""Manage Ingredients in the database"""
queryset = Ingredient.objects.all()
serializer_class = IngredientSerializer
class RecipeViewSet(viewsets.ModelViewSet):
"""Manage Recipes in the Database"""
serializer_class = RecipeSerializer
queryset = Recipe.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def _params_to_ints(self, qs):
"""Convert a list of string ids to a list of integers"""
return [int(str_id) for str_id in qs.split(',')]
def get_queryset(self):
"""Limit objects to authenticated user"""
tags = self.request.query_params.get('tags')
ingredients = self.request.query_params.get('ingredients')
queryset = self.queryset
if tags:
tag_ids = self._params_to_ints(tags)
queryset = queryset.filter(tags__id__in=tag_ids)
if ingredients:
ingredients_ids = self._params_to_ints(ingredients)
queryset = queryset.filter(ingredients__id__in=ingredients_ids)
return queryset.filter(user=self.request.user)
def get_serializer_class(self):
""":return appropriate serializer class"""
if self.action == 'retrieve':
return RecipeDetailSerializer
elif self.action == 'upload_image':
return RecipeImageSerializer
return self.serializer_class
def perform_create(self, serializer):
"""Create a new recipe"""
serializer.save(user=self.request.user)
@action(methods=['POST'], detail=True, url_path='upload-image')
def upload_image(self, request, pk=None):
"""Upload an image to a recipe"""
recipe = self.get_object()
serializer = self.get_serializer(
recipe,
data=request.data
)
if serializer.is_valid():
serializer.save()
return Response(
serializer.data,
status=status.HTTP_200_OK
)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
[
"ali.soliman95@gmail.com"
] |
ali.soliman95@gmail.com
|
37e641caa37f4e7f18e93aaf0e5e626765ec31c3
|
1260ce7869ce32d6b434afbf273273b7b1ebea2d
|
/euclidean_equivariant_networks/experiments/nbody/nbody_run.py
|
4a138819ba680fd99e4be0f945f93bf14e4c68ef
|
[] |
no_license
|
savvy379/Lorentz-Equivariant-GNN
|
b3b30e964cfa9af39adcb4e8b73bc78b4f8b7b5e
|
3d1c74081bdd43387a7c530bce73580db379d22d
|
refs/heads/master
| 2023-08-01T06:43:13.229014
| 2021-09-22T18:35:15
| 2021-09-22T18:35:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,826
|
py
|
from euclidean_equivariant_networks.utils.utils_profiling import * # load before other local modules
import argparse
import os
import sys
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import dgl
import numpy as np
import torch
import wandb
import time
import datetime
from torch import optim
import torch.nn as nn
from torch.utils.data import DataLoader
from euclidean_equivariant_networks.experiments.nbody.nbody_dataloader import RIDataset
from euclidean_equivariant_networks.utils import utils_logging
from euclidean_equivariant_networks.experiments.nbody import nbody_models as models
from euclidean_equivariant_networks.equivariant_attention.from_se3cnn.SO3 import rot
from euclidean_equivariant_networks.experiments.nbody.nbody_flags import get_flags
def to_np(x):
return x.cpu().detach().numpy()
def get_acc(pred, x_T, v_T, y=None, verbose=True):
acc_dict = {}
pred = to_np(pred)
x_T = to_np(x_T)
v_T = to_np(v_T)
assert len(pred) == len(x_T)
if verbose:
y = np.asarray(y.cpu())
_sq = (pred - y) ** 2
acc_dict['mse'] = np.mean(_sq)
_sq = (pred[:, 0, :] - x_T) ** 2
acc_dict['pos_mse'] = np.mean(_sq)
_sq = (pred[:, 1, :] - v_T) ** 2
acc_dict['vel_mse'] = np.mean(_sq)
return acc_dict
def train_epoch(epoch, model, loss_fnc, dataloader, optimizer, schedul, FLAGS):
model.train()
loss_epoch = 0
num_iters = len(dataloader)
#wandb.log({"lr": optimizer.param_groups[0]['lr']}, commit=False)
for i, (g, y1, y2) in enumerate(dataloader):
g = g.to(FLAGS.device)
x_T = y1.to(FLAGS.device).view(-1, 3)
v_T = y2.to(FLAGS.device).view(-1, 3)
y = torch.stack([x_T, v_T], dim=1)
optimizer.zero_grad()
# run model forward and compute loss
pred = model(g)
loss = loss_fnc(pred, y)
loss_epoch += to_np(loss)
if torch.isnan(loss):
import pdb
pdb.set_trace()
# backprop
loss.backward()
optimizer.step()
# print to console
if i % FLAGS.print_interval == 0:
print(
f"[{epoch}|{i}] loss: {loss:.5f}")
# log to wandb
if i % FLAGS.log_interval == 0:
# 'commit' is only set to True here, meaning that this is where
# wandb counts the steps
wandb.log({"Train Batch Loss": to_np(loss)}, commit=True)
# exit early if only do profiling
if FLAGS.profile and i == 10:
sys.exit()
schedul.step(epoch + i / num_iters)
# log train accuracy for entire epoch to wandb
loss_epoch /= len(dataloader)
wandb.log({"Train Epoch Loss": loss_epoch}, commit=False)
def test_epoch(epoch, model, loss_fnc, dataloader, FLAGS, dT):
model.eval()
keys = ['pos_mse', 'vel_mse']
acc_epoch = {k: 0.0 for k in keys}
acc_epoch_blc = {k: 0.0 for k in keys} # for constant baseline
acc_epoch_bll = {k: 0.0 for k in keys} # for linear baseline
loss_epoch = 0.0
for i, (g, y1, y2) in enumerate(dataloader):
g = g.to(FLAGS.device)
x_T = y1.view(-1, 3)
v_T = y2.view(-1, 3)
y = torch.stack([x_T, v_T], dim=1).to(FLAGS.device)
# run model forward and compute loss
pred = model(g).detach()
loss_epoch += to_np(loss_fnc(pred, y)/len(dataloader))
acc = get_acc(pred, x_T, v_T, y=y)
for k in keys:
acc_epoch[k] += acc[k]/len(dataloader)
# eval constant baseline
bl_pred = torch.zeros_like(pred)
acc = get_acc(bl_pred, x_T, v_T, verbose=False)
for k in keys:
acc_epoch_blc[k] += acc[k]/len(dataloader)
# eval linear baseline
# Apply linear update to locations.
bl_pred[:, 0, :] = dT * g.ndata['v'][:, 0, :]
acc = get_acc(bl_pred, x_T, v_T, verbose=False)
for k in keys:
acc_epoch_bll[k] += acc[k] / len(dataloader)
print(f"...[{epoch}|test] loss: {loss_epoch:.5f}")
wandb.log({"Test loss": loss_epoch}, commit=False)
for k in keys:
wandb.log({"Test " + k: acc_epoch[k]}, commit=False)
wandb.log({'Const. BL pos_mse': acc_epoch_blc['pos_mse']}, commit=False)
wandb.log({'Linear BL pos_mse': acc_epoch_bll['pos_mse']}, commit=False)
wandb.log({'Linear BL vel_mse': acc_epoch_bll['vel_mse']}, commit=False)
class RandomRotation(object):
def __init__(self):
pass
def __call__(self, x):
M = np.random.randn(3, 3)
Q, __ = np.linalg.qr(M)
return x @ Q
def collate(samples):
graphs, y1, y2 = map(list, zip(*samples))
batched_graph = dgl.batch(graphs)
return batched_graph, torch.stack(y1), torch.stack(y2)
def main(FLAGS, UNPARSED_ARGV):
# Prepare data
train_dataset = RIDataset(FLAGS, split='train')
train_loader = DataLoader(train_dataset,
batch_size=FLAGS.batch_size,
shuffle=True,
collate_fn=collate,
num_workers=FLAGS.num_workers,
drop_last=True)
test_dataset = RIDataset(FLAGS, split='test')
# drop_last is only here so that we can count accuracy correctly;
test_loader = DataLoader(test_dataset,
batch_size=FLAGS.batch_size,
shuffle=False,
collate_fn=collate,
num_workers=FLAGS.num_workers,
drop_last=True)
# time steps
assert train_dataset.data['delta_T'] == test_dataset.data['delta_T']
assert train_dataset.data['sample_freq'] == test_dataset.data['sample_freq']
print(f'deltaT: {train_dataset.data["delta_T"]}, '
f'freq: {train_dataset.data["sample_freq"]}, '
f'FLAGS.ri_delta_t: {FLAGS.ri_delta_t}')
dT = train_dataset.data['delta_T'] * train_dataset.data[
'sample_freq'] * FLAGS.ri_delta_t
FLAGS.train_size = len(train_dataset)
FLAGS.test_size = len(test_dataset)
assert len(test_dataset) < len(train_dataset)
model = models.__dict__.get(FLAGS.model)(FLAGS.num_layers, FLAGS.num_channels, num_degrees=FLAGS.num_degrees,
div=FLAGS.div, n_heads=FLAGS.head, si_m=FLAGS.simid, si_e=FLAGS.siend,
x_ij=FLAGS.xij)
#utils_logging.write_info_file(model, FLAGS=FLAGS, UNPARSED_ARGV=UNPARSED_ARGV, wandb_log_dir=wandb.run.dir)
if FLAGS.restore is not None:
model.load_state_dict(torch.load(FLAGS.restore))
model.to(FLAGS.device)
# Optimizer settings
optimizer = optim.Adam(model.parameters(), lr=FLAGS.lr)
scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, FLAGS.num_epochs, eta_min=1e-4)
criterion = nn.MSELoss()
criterion = criterion.to(FLAGS.device)
task_loss = criterion
# Save path
save_path = os.path.join(FLAGS.save_dir, FLAGS.name + '.pt')
# Run training
print('Begin training')
for epoch in range(FLAGS.num_epochs):
torch.save(model.state_dict(), save_path)
print(f"Saved: {save_path}")
train_epoch(epoch, model, task_loss, train_loader, optimizer, scheduler, FLAGS)
test_epoch(epoch, model, task_loss, test_loader, FLAGS, dT)
if __name__ == '__main__':
FLAGS, UNPARSED_ARGV = get_flags()
os.makedirs(FLAGS.save_dir, exist_ok=True)
# Log all args to wandb
#wandb.init(project='equivariant-attention', name=FLAGS.name, config=FLAGS)
#wandb.save('*.txt')
# Where the magic is
try:
main(FLAGS, UNPARSED_ARGV)
except Exception:
import pdb, traceback
traceback.print_exc()
pdb.post_mortem()
|
[
"jtwong71@gmail.com"
] |
jtwong71@gmail.com
|
440db375b41af7127785a5a26f60663d0f298e07
|
8a9b13128e8358afc69364be5ad08c537e278da0
|
/backend/pyserver/pyauth/getUsers.py
|
083693f43f1390eaef6951211ebb4ee519d0fe13
|
[] |
no_license
|
jai2033shankar/smartPi-app
|
e97b9322e1a506a8f977c46f0d7b6d0d411e3c7e
|
9f2f4095dd4b396a4927591af023ef3082480c0a
|
refs/heads/master
| 2022-06-02T02:20:53.744877
| 2020-05-01T19:36:46
| 2020-05-01T19:36:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 234
|
py
|
import sqlite3
if __name__ == '__main__':
conn = sqlite3.connect('pyauth/pyauthentication.db')
c = conn.cursor()
c.execute("SELECT * FROM User;")
rows = c.fetchall()
for row in rows:
print(row)
conn.commit()
conn.close()
|
[
"stefano.milani96@gmail.com"
] |
stefano.milani96@gmail.com
|
e83d86c7576db3f7eee3604f9de83c1697463f4f
|
7da3c9a7a4f6470b077d03f660341522c8906f5d
|
/2/3Lista.py
|
c6f5d5b4297e6cc29f6049c9696cf84c574354d3
|
[] |
no_license
|
KFranciszek/Python
|
4cda7060936ea9644052a75dfd8aa63eacc17b0a
|
19caa6b4c80745fda2bca6b68ee58c260437b40b
|
refs/heads/master
| 2021-01-20T03:36:51.416134
| 2017-02-17T12:05:35
| 2017-02-17T12:05:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 386
|
py
|
#[] lista pusta
lista1 = ['Aurelia', 'Amelia' ,'Monika','Asia']
print(lista1)
# wstawianie do listy lista.append()
lista1.append('Arielka')
print(lista1)
lista1[-1] = 'Rafal'
print(lista1)
#usuwanie z listy lista.pop() lista.pop(pozycja)
# lista.remove(sth)
lista1.pop(0)
print(lista1)
lista1.remove('Asia')
lista2 = ['Aurelia', 'Amelia' ,'Monika','Asia']
raw_input()
|
[
"e.kaczmarek01@gmail.com"
] |
e.kaczmarek01@gmail.com
|
15b977a009a4628376a2913a58323c68ac6574dd
|
172befeabd113a71c6a7adc80b9ec005a29d12d3
|
/UDP_Server.py
|
e0120d83aa639b8999e0adf09b2eb88401d34c35
|
[] |
no_license
|
RyanSandford/Reliable-Data-Transfer-Protocol
|
87efd088cdbeede151abfaca87a0cdbc019cb174
|
c5e2d08fc9e46998fc7bca24488a36af01094056
|
refs/heads/master
| 2022-12-16T05:36:24.261485
| 2020-09-07T16:25:24
| 2020-09-07T16:25:24
| 256,092,277
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,359
|
py
|
"""
Ryan Sandford, November 10, 2019
This Program implements the server side of the reliable data transfer 3.0
protocol described in section 3.4.1 of Computer Networking: A Top-Down Approach
"""
import binascii
import socket
import struct
import sys
import hashlib
import random
import time
#The servers address and port number
UDP_IP = "127.0.0.1"
UDP_PORT = 5000
unpacker = struct.Struct('I I 8s 32s')
sendingPort = 6000 # a port to send acks to
#takes in a 3-tuple and returns the checksum for the tuple
def mk_chksum(values):
UDP_Data = struct.Struct('I I 8s')
packed_data = UDP_Data.pack(*values)
return bytes(hashlib.md5(packed_data).hexdigest(), encoding="UTF-8")
#takes in a four tuple with the fourth value being the check sum for the first
# 3 entries in the tuple and returns a pseudo UDP Packet
def mk_packet(values_with_chksum):
UDP_Packet_Data = struct.Struct('I I 8s 32s')
return UDP_Packet_Data.pack(*values_with_chksum)
#Sends a pseudo UDP Packet to the target port, prints a message to the console
def send_pkt(UDP_Packet, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
sock.sendto(UDP_Packet, (UDP_IP, port))
print("Sent packet: ", unpacker.unpack(UDP_Packet))
#Checks if a given pseudo UDP Packet is corrupt by calculating the check sum of the first 3 entries
#and comparing it to the checksum provided in the fourth entry of the tuple
def notcorrupt(UDP_Packet):
chksum = mk_chksum((UDP_Packet[0], UDP_Packet[1], UDP_Packet[2]))
if UDP_Packet[3] == chksum:
print('CheckSums Match, Packet OK')
return True
else:
print('Checksums Do Not Match, Packet Corrupt')
return False
#Switch the expected sequence number from one state to another
def switch_seq(expected_seq):
if expected_seq == 0 :
return 1
else:
return 0
#Given a pseudo UDP Packet and a sequence number, this function returns true if the given packet
# has the given sequence number and false otherwise, prints a message to the console with the results
def has_seq(UDP_Packet,num):
if UDP_Packet[1] == num:
print('Packet has correct sequence number: seq =', num)
return True
else:
print('Packet has incorrect sequence number: seq =', switch_seq(num))
return False
#Simply implements the extract data function reffered to in rdt 3.0
def extract(UDP_Packet):
return UDP_Packet[2]
#Implements the deliver data function in rdt 3.0
#printing the recieved data to a string
def deliver(data):
string = data.decode("utf-8")
print("Recieved data:", string + ", succesfully delivered upwards")
#Network Delay, 1/3 chance of delaying an ack
def Network_Delay():
if True and random.choice([0,1,0]) == 1:
time.sleep(.01)
print("Packet Delayed ")
else:
print("Packet Sent ")
#Network Loss, 2/5 chance of loosing an ack
def Network_Loss():
if True and random.choice([0,1,0,1,0]) == 1:
print("Packet Lost ")
return(1)
else:
return(0)
#Packet corrupter, 2/5 chance of corrupting an ack
def Packet_Checksum_Corrupter(packetdata):
if True and random.choice([0,1,0,1,0]) == 1:
return(b'Corrupt!')
else:
return(packetdata)
#Create the socket and listen
sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
sock.bind((UDP_IP, UDP_PORT))
expected_seq = 0 #starting sequence number
ack_msg = b'ACK__ACK' #Standard Ack message that the server will send with each ack
#Listen for client requests
while True:
#Receive Data
data, addr = sock.recvfrom(1024) # buffer size is 1024 bytes
UDP_Packet = unpacker.unpack(data)
print("received from:", addr)
print("received message:", UDP_Packet)
#Check if the recieved packet has been corrupted and has the correct sequence number
if notcorrupt(UDP_Packet) and has_seq(UDP_Packet, expected_seq):
#recieve and deliver data upwards
data = extract(UDP_Packet)
deliver(data)
#when network loss occurs the ack does not get sent
#other wise send the correct ack
if not Network_Loss():
chksum = mk_chksum((1, expected_seq, ack_msg))
#2/5 chance of an incorrect checksum being sent
packet = mk_packet((1, expected_seq, ack_msg, Packet_Checksum_Corrupter(chksum)))
#1/3 chance the ack is sent late
Network_Delay()
send_pkt(packet, sendingPort)
expected_seq = switch_seq(expected_seq) #switch states
#packet is corrupt or has wrong seq, send ack with previous states sequence number
else:
if not Network_Loss():
chksum = mk_chksum((1, switch_seq(expected_seq), ack_msg))
# 2/5 chance of an incorrect checksum being sent
packet = mk_packet((1, switch_seq(expected_seq), ack_msg, Packet_Checksum_Corrupter(chksum)))
# 1/3 chance the ack is sent late
Network_Delay()
send_pkt(packet, sendingPort)
print("\n")
#Note Please make sure server is running before client sends requests;
#otherwise you will be met with an infinite timeout loop
|
[
"noreply@github.com"
] |
RyanSandford.noreply@github.com
|
c4d5e4a9d08432307712aa7554bd1f7c8294b134
|
55b4fe0a6616b30c128b51a9918605050ce49f6d
|
/long_migrate_reverb
|
2fda0ad0a588dd11dce3bebc40fc085247a8f0ca
|
[] |
no_license
|
samhaug/ScS_reverb_setup
|
783a4fb7c942a598f18dc6c9e3544aa5e2bbcafe
|
05e96b9f871d25a1e7b5e9284083167993f56cec
|
refs/heads/master
| 2021-01-12T03:35:45.657459
| 2017-06-24T17:24:07
| 2017-06-24T17:24:07
| 78,234,557
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,952
|
#!/home/samhaug/anaconda2/bin/python
'''
==============================================================================
File Name : migrate_reverb.py
Purpose : Perform a migration to detect reflection coefficients of mid mantle
discontinuities. Must have access to a lookup table, waveform glossary,
data stripped of zeroth-order discontinuities.
See eq (14) of 'A Study of mid-mantle layering beneath the Western Pacific'
1989, Revenaugh & Jordan.
This is similar to migrate_reverb, but should be more efficient and
easier to execute.
Creation Date : 14-03-2017
Last Modified : Tue 14 Mar 2017 11:54:11 AM EDT
Created By : Samuel M. Haugland
==============================================================================
'''
import numpy as np
import obspy
import seispy
import h5py
from matplotlib import pyplot as plt
from scipy.signal import correlate
from scipy.signal import tukey
def main():
wvlt_glossary = h5py.File('/home/samhaug/work1/ScS_reverb_sims/wave_glossary/prem_568_FJ_20160130.h5','r')
lkup = h5py.File('/home/samhaug/work1/ScS_reverb_sims/lookup_tables/NA_prem_568_20160130.h5','r')
st = obspy.read('/home/samhaug/work1/ScS_reverb_sims/mineos/prem_568_FJ/st_T.pk')
st.integrate().detrend().integrate().detrend()
st.interpolate(1)
st.filter('bandpass',freqmax=1/15.,freqmin=1/75.,zerophase=True)
st = seispy.data.align_on_phase(st,phase=['ScSScS'],a_min=False)
#st.differentiate()
st.normalize()
for idx,tr in enumerate(st):
st[idx] = seispy.data.phase_window(tr,phase=['ScSScS'],window=(-400,2400))
idx=3
ones = np.ones(len(st[idx].data))
ones[387:425] = 1+(-1*tukey(425-387,0.3))
ones[632:669] = 1+(-1*tukey(669-632,0.3))
ones[1299:1343] = 1+(-1*tukey(1343-1299,0.3))
ones[1561:1600] = 1+(-1*tukey(1600-1561,0.3))
ones[2221:2278] = 1+(-1*tukey(2278-2221,0.3))
ones[2466:2524] = 1+(-1*tukey(2524-2466,0.3))
#plt.plot(st[idx].data)
#plt.plot(ones)
#plt.show()
#st[idx].data *= ones
#depth = np.arange(10,2800,2)
#depth = np.arange(900,1000,10)
depth = np.array([670])
stat = st[idx].stats.station
corr_dict,wave_e,wvlt_len = correlate_sig(st[idx],wvlt_glossary)
R_list = []
for h in depth:
h_R = 0
for keys in corr_dict:
ScS2 = lkup[stat+'/ScS2'][:]
lkup_t = lkup[stat+'/'+keys][:]
shift = int(wvlt_len/2.)-58
h_R += find_R(corr_dict[keys],h,lkup_t,ScS2,shift=shift,data=st[idx].data)/wave_e[keys]
R_list.append(h_R)
plt.plot(np.array(R_list),depth,lw=2)
plt.ylim(depth.max(),depth.min())
plt.axhline(220,color='k')
plt.axhline(400,color='k')
plt.axhline(670,color='k')
plt.xlim(-10,10)
plt.grid()
plt.show()
def correlate_sig(tr,wvlt_glos):
corr_dict = {}
wave_e = {}
for keys in wvlt_glos:
wvlt = wvlt_glos[keys]
corr_sig = correlate(tr.data,wvlt,mode='same')
wave_e[keys] = np.dot(wvlt,wvlt)
corr_dict[keys] = corr_sig
return corr_dict,wave_e,len(wvlt)
def find_R(corr_sig,h,lkup,ScS2,**kwargs):
shift = kwargs.get('shift',0)
data = kwargs.get('data',np.zeros(5))
t = lkup[np.argmin(np.abs(lkup[:,0]-h)),1]
ScS2_time = ScS2[np.argmin(np.abs(lkup[:,0]-h)),1]
plot_corr(t,corr_sig,data,ScS2_time,shift)
try:
r = corr_sig[int(t-ScS2_time+400+shift)]
return r
except IndexError:
return 0
corr *= 1./denominator(wvlt_glos)
def plot_corr(t,corr_sig,data,ScS2_time,shift):
fig,ax = plt.subplots(figsize=(25,6))
ax.plot(corr_sig,lw=2)
ax.plot(data,alpha=0.5,color='k')
ax.axvline(t-ScS2_time+400+shift)
plt.tight_layout()
plt.show()
def denominator(wvlt_glos):
energy = 0
for keys in wvlt_glos:
energy += np.dot(wvlt_glos[keys][...],wvlt_glos[keys][...])
return energy
main()
|
[
"samhaug@umich.edu"
] |
samhaug@umich.edu
|
|
a7c4eea0ecde8bbaacf7d180023f3c10d8fa1093
|
9facfc32e57d6ada78f985d733fe743776eace6c
|
/py1.py
|
f4e6b3aaa385ca91f7c9de3bc90ea4afff415a7b
|
[] |
no_license
|
hadiuzzaman83/Python_Basic
|
2241737a644e945f0ed26a4d01ed6e0e5d75896b
|
6b7b649f7c16bfdf0c939a0b1d56cea0b4d8938a
|
refs/heads/main
| 2023-05-25T08:19:27.452142
| 2021-06-05T17:17:42
| 2021-06-05T17:17:42
| 370,715,429
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 113
|
py
|
name="Sajib"
age=23
print("My name is "+ name)
print(name + " lives in Dhaka")
print("I am ",age,"years old")
|
[
"noreply@github.com"
] |
hadiuzzaman83.noreply@github.com
|
f93504628b64047d8caf1a1e51b1e51ec623fdb6
|
4c7edea64557713f578589bcb37194aa11298037
|
/blog/admin.py
|
e75b53032d8869e7a52bc553a2da987d57401507
|
[] |
no_license
|
andmichalski/simple_blog
|
f9eaed52bdf5d6d0def7fd6d06d64a5a1e3b5574
|
70a4bce58fb37a26001c7f7408b960627011418f
|
refs/heads/master
| 2020-03-17T09:58:31.561788
| 2018-05-15T09:50:44
| 2018-05-15T09:50:44
| 133,495,385
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 217
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import Post, Comment
# Register your models here.
admin.site.register(Post)
admin.site.register(Comment)
|
[
"amich@PGI.LOCAL"
] |
amich@PGI.LOCAL
|
f75a8d1e069137fda3e6800c5dad62c04b1af9f1
|
9abe6ee7a6f2abe977ff56ef7a1146cede60d130
|
/tools/viewer/watcher.py
|
d8f990759a0b959c46147bb4ece25654ac041c6b
|
[] |
no_license
|
nitk-pm/Procon
|
cd806eb6256c7da5ee3d2e6e206b1ca0f8f14df2
|
aca6c9a783b81349df5d04b289debc75ae1f0315
|
refs/heads/develop
| 2022-04-05T15:30:52.868403
| 2020-01-15T09:20:10
| 2020-01-15T09:20:10
| 41,779,698
| 0
| 3
| null | 2020-01-15T09:20:12
| 2015-09-02T04:24:33
|
Python
|
UTF-8
|
Python
| false
| false
| 2,610
|
py
|
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
import subprocess
class Handler(FileSystemEventHandler):
callback_list = []
def handler(self, func, *args):
return func(*args)
def run_command(self, event):
if event.is_directory:
return
if event.src_path.rsplit('.', 1)[-1] != 'png':
return
data = subprocess.run(
['zbarimg', event.src_path],
stdout=subprocess.PIPE
)
for callback_list in self.callback_list:
self.handler(
callback_list,
event.src_path,
self.shaping_data(data.stdout.decode('utf-8'))
)
def on_created(self, event):
self.run_command(event)
def on_moved(self, event):
self.run_command(event)
def on_modified(self, event):
self.run_command(event)
def shaping_data(self, data):
data_list = [d.replace('QR-Code:', '') for d in data.splitlines()]
head = []
tail = None
total = 0
for d in data_list:
num_str, shapes = d.split(':', 1)
num = int(num_str)
shapes_num = len(shapes.split(':'))
total += num
if num != shapes_num:
tail = shapes
else:
head.append(shapes)
head.append(tail)
return '{}:{}'.format(total, ':'.join(head))
class Watcher(object):
def __init__(self, callback=None):
self.observer = Observer()
self.handler = Handler()
self.watch = None
if callback is not None:
self.register_callback(callback)
def register_callback(self, func):
self.handler.callback_list.append(func)
def start(self, path):
if self.watch is None:
self.watch = self.observer.schedule(
self.handler,
path,
recursive=True
)
self.observer.start()
def stop(self):
if self.watch is not None:
self.observer.stop()
self.observer.join()
self.watch = None
class Test(object):
def callback(self, data):
print(data)
if __name__ == '__main__':
test = Test()
watcher = Watcher()
watcher.register_callback(test.callback)
watcher.start('./')
try:
import time
while True:
time.sleep(1)
except:
watcher.stop()
|
[
"st16423@kagawa.kosen-ac.jp"
] |
st16423@kagawa.kosen-ac.jp
|
c01ec44f472ccf75f5c30d40679c61081519651b
|
d8ab8493671362787a6adc7de6994dff733db086
|
/venv/bin/wheel
|
934f24afaf55548ebae12bc0c72e08b230381114
|
[] |
no_license
|
sean7218/NFL-ELO-Backend
|
da5846864b3dfe7ddc6a063f83c3bec138641ab5
|
d5db1e16553df3211b51dd19d285ef905a325399
|
refs/heads/master
| 2021-05-16T05:01:26.105453
| 2017-11-03T01:52:17
| 2017-11-03T01:52:17
| 106,234,956
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 243
|
#!/Users/mpb15sz/apps/nfl-elo/venv/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from wheel.tool import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"sean7218@gmail.com"
] |
sean7218@gmail.com
|
|
8987b5c868a3bdb413b4653d0418dcb4533fe615
|
d110546d747d7e3865ce5742d5fca09f404623c0
|
/tests/unit/utils/test_proxy.py
|
1c9a34ede319499d1b3092e79566219d1481ff35
|
[
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] |
permissive
|
saltstack/salt
|
354fc86a7be1f69514b3dd3b2edb9e6f66844c1d
|
1ef90cbdc7203f97775edb7666db86a41eb9fc15
|
refs/heads/master
| 2023-07-19T20:56:20.210556
| 2023-06-29T23:12:28
| 2023-07-19T11:47:47
| 1,390,248
| 11,026
| 6,296
|
Apache-2.0
| 2023-09-14T20:45:37
| 2011-02-20T20:16:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,403
|
py
|
"""
Unit tests for salt.utils.proxy
:codeauthor: :email:`Gareth J. Greenaway <gareth@saltstack.com>`
"""
import salt.utils.proxy
from tests.support.mock import patch
from tests.support.unit import TestCase
class ProxyUtilsTestCase(TestCase):
def test_is_proxytype_true(self):
opts = {
"proxy": {
"proxytype": "esxi",
"host": "esxi.domain.com",
"username": "username",
"passwords": ["password1"],
}
}
with patch("salt.utils.platform.is_proxy", return_value=True, autospec=True):
ret = salt.utils.proxy.is_proxytype(opts, "esxi")
self.assertTrue(ret)
def test_is_proxytype_false(self):
opts = {
"proxy": {
"proxytype": "esxi",
"host": "esxi.domain.com",
"username": "username",
"passwords": ["password1"],
}
}
with patch("salt.utils.platform.is_proxy", return_value=True, autospec=True):
ret = salt.utils.proxy.is_proxytype(opts, "docker")
self.assertFalse(ret)
def test_is_proxytype_not_proxy(self):
opts = {}
with patch("salt.utils.platform.is_proxy", return_value=False, autospec=True):
ret = salt.utils.proxy.is_proxytype(opts, "docker")
self.assertFalse(ret)
|
[
"dan.woz@gmail.com"
] |
dan.woz@gmail.com
|
069d3d012ad1d98276a8559e2436bc06aed458f7
|
0e7243d4d77e6c36ee9905bbbd4de92c322f49b7
|
/app/views.py
|
3892f0cf2a805986ae234ed6673732f21c533ed1
|
[] |
no_license
|
meru86/django-book-search
|
c64ba1cdee504db17cff3b924681cb4369dac16a
|
d909db122b4527f2e760545a8205c25eb6343edc
|
refs/heads/main
| 2023-03-31T20:35:43.069501
| 2021-03-31T04:12:12
| 2021-03-31T04:12:12
| 352,857,023
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,919
|
py
|
from django.shortcuts import render, redirect # redirectを追加
from django.views.generic import View
from .forms import SearchForm
import json
import requests
from django.http.response import HttpResponse
SEARCH_URL = 'https://app.rakuten.co.jp/services/api/BooksBook/Search/20170404?format=json&applicationId=1004224127963110171'
# 楽天(https://webservice.rakuten.co.jp/api/booksbooksearch/)のurlをコピーして貼り付け
# urlの最後に'format=json'を記入しフォーマットをjson形式にする
# urlの最後に'&'を点けることで商品をフィルタリングをすることができる
# '&'の後にアプリケーションidを記入
def get_api_data(params):
api = requests.get(SEARCH_URL, params=params).text
result = json.loads(api)
items = result['Items']
return items
class CallbackView(View):
def get(self, request, *args, **kwargs):
return HttpResponse('OK')
class IndexView(View):
def get(self, request, *args, **kwargs):
form = SearchForm(request.POST or None)
return render(request, 'app/index.html', { # 指定したテンプレートにデータを渡す
'form': form,
})
def post(self, request, *args, **kwargs):
form = SearchForm(request.POST or None)
if form.is_valid():
keyword = form.cleaned_data['title']
params = {
'title' : keyword,
'hits' : 28,
}
items = get_api_data(params)
book_data = []
for i in items:
item = i['Item']
title = item['title']
image = item['largeImageUrl']
isbn = item['isbn']
query = {
'title' : title,
'image' : image,
'isbn' : isbn,
}
book_data.append(query)
return render(request, 'app/book.html', { # 指定したテンプレートにbook_data,keywordを渡す
'book_data': book_data,
'keyword': keyword,
})
return render(request, 'app/index.html', { # 指定したテンプレートにデータを渡す
'form': form,
})
class DetailView(View):
def get(self, request, *args, **kwargs):
isbn = self.kwargs['isbn'] # 引数からisbnを取り出す
params = {
'isbn': isbn
}
items = get_api_data(params) # api_data関数の引数にisbnを渡すことで特定の書籍情報を取得することができる
items = items[0]
item = items['Item'] # アイテムデータを取得
# 取得するデータの名前はapiのマニュアルに記載されているので参考にする
title = item['title']
image = item['largeImageUrl']
author = item['author']
itemPrice = item['itemPrice']
salesDate = item['salesDate']
publisherName = item['publisherName']
size = item['size']
isbn = item['isbn']
itemCaption = item['itemCaption']
itemUrl = item['itemUrl']
reviewAverage = item['reviewAverage']
reviewCount = item['reviewCount']
# 取得したデータをbook_dataに辞書形式で格納する
book_data = {
'title': title,
'image': image,
'author': author,
'itemPrice': itemPrice,
'salesDate': salesDate,
'publisherName': publisherName,
'size': size,
'isbn': isbn,
'itemCaption': itemCaption,
'itemUrl': itemUrl,
'reviewAverage': reviewAverage,
'reviewCount': reviewCount,
'average': float(reviewAverage) * 20
}
return render(request, 'app/detail.html' , {
'book_data': book_data
})
|
[
"haru@kawanoharuyanoMacBook-Air.local"
] |
haru@kawanoharuyanoMacBook-Air.local
|
570bdc2ff7f7f54f3c4cca452f7c71c566753459
|
2009e5cdc3851290aefb751feaa93a6f5336f563
|
/the-data-science-process/AllTogether.py
|
d5b558fcb8fbad82698f0f5f264ff6499266c1e2
|
[] |
no_license
|
huli/dsnd
|
50c4b1af7d13b80845831cc1e7ecc03ae959a79c
|
466e16c3dca5f17dd5fce256238a1fa3b41b88f3
|
refs/heads/master
| 2020-04-07T18:18:47.014802
| 2019-03-03T11:42:19
| 2019-03-03T11:42:19
| 158,605,569
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,431
|
py
|
import pandas as pd
import numpy as np
from collections import defaultdict
import AllTogetherSolns as s
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score, mean_squared_error
import matplotlib.pyplot as plt
## Putting It All Together
#Helper functions
def clean_fit_linear_mod(df, response_col, test_size=.3, rand_state=42):
'''
INPUT:
df - a dataframe holding all the variables of interest
response_col - a string holding the name of the column
test_size - a float between [0,1] about what proportion of data should be in the test dataset
rand_state - an int that is provided as the random state for splitting the data into training and test
OUTPUT:
X - cleaned X matrix (dummy and mean imputation)
y - cleaned response (just dropped na)
test_score - float - r2 score on the test data
train_score - float - r2 score on the test data
lm_model - model object from sklearn
X_train, X_test, y_train, y_test - output from sklearn train test split used for optimal model
This function cleans the data and provides the necessary output for the rest of this notebook.
'''
#Dropping where the salary has missing values
df = df.dropna(subset=['Salary'], axis=0)
#Drop columns with all NaN values
df = df.dropna(how='all', axis=1)
#Pull a list of the column names of the categorical variables
cat_df = df.select_dtypes(include=['object'])
cat_cols = cat_df.columns
#dummy all the cat_cols
for col in cat_cols:
df = pd.concat([df.drop(col, axis=1), pd.get_dummies(df[col], prefix=col, prefix_sep='_', drop_first=True, dummy_na=True)], axis=1)
# Mean function
fill_mean = lambda col: col.fillna(col.mean())
# Fill the mean
df = df.apply(fill_mean, axis=0)
#Split into explanatory and response variables
X = df.drop(response_col, axis=1)
y = df[response_col]
#Split into train and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=rand_state)
lm_model = LinearRegression(normalize=True) # Instantiate
lm_model.fit(X_train, y_train) #Fit
#Predict using your model
y_test_preds = lm_model.predict(X_test)
y_train_preds = lm_model.predict(X_train)
#Score using your model
test_score = r2_score(y_test, y_test_preds)
train_score = r2_score(y_train, y_train_preds)
return X, y, test_score, train_score, lm_model, X_train, X_test, y_train, y_test
def find_optimal_lm_mod(X, y, cutoffs, test_size = .30, random_state=42, plot=True):
'''
INPUT
X - pandas dataframe, X matrix
y - pandas dataframe, response variable
cutoffs - list of ints, cutoff for number of non-zero values in dummy categorical vars
test_size - float between 0 and 1, default 0.3, determines the proportion of data as test data
random_state - int, default 42, controls random state for train_test_split
plot - boolean, default 0.3, True to plot result
OUTPUT
r2_scores_test - list of floats of r2 scores on the test data
r2_scores_train - list of floats of r2 scores on the train data
lm_model - model object from sklearn
X_train, X_test, y_train, y_test - output from sklearn train test split used for optimal model
'''
r2_scores_test, r2_scores_train, num_feats, results = [], [], [], dict()
for cutoff in cutoffs:
#reduce X matrix
reduce_X = X.iloc[:, np.where((X.sum() > cutoff) == True)[0]]
num_feats.append(reduce_X.shape[1])
#split the data into train and test
X_train, X_test, y_train, y_test = train_test_split(reduce_X, y, test_size = test_size, random_state=random_state)
#fit the model and obtain pred response
lm_model = LinearRegression(normalize=True)
lm_model.fit(X_train, y_train)
y_test_preds = lm_model.predict(X_test)
y_train_preds = lm_model.predict(X_train)
# append the r2 value from the test set
test_score = r2_score(y_test, y_test_preds)
r2_scores_test.append(test_score)
train_score = r2_score(y_train, y_train_preds)
r2_scores_train.append(train_score)
print('Validating with %s features (Train/Test): %.4f, %.4f (R2-score)' % (reduce_X.shape[1], train_score, test_score))
results[str(cutoff)] = r2_score(y_test, y_test_preds)
if plot:
plt.plot(num_feats, r2_scores_test, label="Test", alpha=.5)
plt.plot(num_feats, r2_scores_train, label="Train", alpha=.5)
plt.ylim((-1,1))
plt.xlabel('Number of Features')
plt.ylabel('Rsquared')
plt.title('Rsquared by Number of Features')
plt.legend(loc=1)
plt.show()
best_cutoff = max(results, key=results.get)
#reduce X matrix
reduce_X = X.iloc[:, np.where((X.sum() > int(best_cutoff)) == True)[0]]
num_feats.append(reduce_X.shape[1])
#split the data into train and test
X_train, X_test, y_train, y_test = train_test_split(reduce_X, y, test_size = test_size, random_state=random_state)
#fit the model
lm_model = LinearRegression(normalize=True)
lm_model.fit(X_train, y_train)
return r2_scores_test, r2_scores_train, lm_model, X_train, X_test, y_train, y_test
#Question 1
def q1_piat_answer():
'''
Prints the correct order of the letters in the format portion of the string
'''
print("This one is tricky - here is the order of the letters for the solution we had in mind:\n c, g, c, d, c, e, f, b, a, h")
#Question 2
def q2_piat_check(q2_piat):
'''
INPUT
q2_piat - a dictionary
Prints statement related to the correctness of q2_piat
'''
if q2_piat == s.q2_piat:
print("Nice job! That looks right! These two techniques are really common in Machine Learning algorithms to combat overfitting. Though the first technique could be useful, it is not likely to help us right away with our current model. These additional features would likely continue to worsen the nature of overfitting we are seeing here.")
elif q2_piat['add interactions, quadratics, cubics, and other higher order terms'] != s.q2_piat['add interactions, quadratics, cubics, and other higher order terms']:
print("In this case, it is not likely that having more complex features will help us. The model is already forming too complex of a relationship to generalize to new data.")
elif q2_piat['fit the model many times with different rows, then average the responses'] != s.q2_piat['fit the model many times with different rows, then average the responses']:
print("Fitting the model on different rows and ctually a common technique for combatting overfitting. It relates to an idea known as bootstrapping.")
elif q2_piat['subset the features used for fitting the model each time'] != s.q2_piat['subset the features used for fitting the model each time']:
print("Subsetting the features is actually a common way to combat overfitting. This type of feature reduction is done in stochastic gradient methods related to gradient boosting and random forest methods.")
elif q2_piat['this model is hopeless, we should start over'] != s.q2_piat['this model is hopeless, we should start over']:
print("Don't give up hope! We are just getting started!")
#Question 4
def q4_piat_check(q4_piat):
'''
INPUT
q4_piat - a dictionary
Prints statement related to the correctness of q4_piat
'''
if q4_piat == s.q4_piat:
print("Nice job! That looks right! We can see that the model we should impement was the 6th model using 1088 features. It is the model that has the best test rsquared value.")
elif q4_piat['The optimal number of features based on the results is'] != s.q4_piat['The optimal number of features based on the results is']:
print("Oops! That isn't right for the optimal number of features. You can get this as the number of columns in either the training or testing datasets. Note, this is different than the inputs, as they are checking the threshold for the number of missing values in a column, not a threshold for the number of features.")
elif q4_piat['The model we should implement in practice has a train rsquared of'] != s.q4_piat['The model we should implement in practice has a train rsquared of'] or q4_piat['The model we should implement in practice has a test rsquared of'] != s.q4_piat['The model we should implement in practice has a test rsquared of']:
print("The rsquared values don't look right. The optimal model should be the model that performed the best on the test data. The rsquared values should be the rsquared for the training and test sets of data using the same, best model based on the test data.")
elif q4_piat['If we were to allow the number of features to continue to increase'] != s.q4_piat['If we were to allow the number of features to continue to increase']:
print("If you were to allow the number of features to increase, you likely would see the same trend you can see in the visual. That is the test data will continue to provide worse and worse rsquared values, while the training data would go towards 1.")
#Question 5
def q5_piat_check(q5_piat):
'''
INPUT
q5_piat - a dictionary
Prints statement related to the correctness of q5_piat
'''
if q5_piat == s.q5_piat:
print("Nice job! That looks right! The country and years of experience both seem to have a significant impact on the salary of individuals.")
else:
print("Oops! It appears that country and years of experience are indicators of salary values. However, gender columns did not appear in the top 20 features. Additionally, the years of programming didn't follow an always increasing order. Therefore, it wasn't necessarily the case that longer you have programmed leads to higher salary based on the data.")
if __name__ == '__main__':
df = pd.read_csv('../stackoverflow/survey_results_public.csv')
schema = pd.read_csv('../stackoverflow/survey_results_schema.csv')
num_rows = df.shape[0]
num_cols = df.shape[1]
check_rows_cols(num_rows, num_cols)
df_all = mean_amt(df, 'CousinEducation', 'Salary', possible_vals)
# To get a simple answer to our questions - see these two tables.
df_all.sort_values('mean_col', ascending=False)
|
[
"christoph.hilty@garaio.com"
] |
christoph.hilty@garaio.com
|
aa25a83f11b72a1e811b73960ddfc04ac5868160
|
a14548b6d2f9f7c7ada05987108f4b9c250d2f5e
|
/mjuzik/mjuzik/genres/admin.py
|
de61401aa0d0a2f1dbbc42c2139783ac964b6b1d
|
[] |
no_license
|
DylanGuedes/mjuzik
|
c935d73c899c5a81ed51e31a6501f98f7612bf79
|
f0734143a75ac1f2ace9506fc84804de42deb6a8
|
refs/heads/master
| 2021-01-09T20:13:03.381099
| 2016-07-07T14:25:31
| 2016-07-07T14:25:31
| 60,437,026
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 204
|
py
|
from django.contrib import admin
from mjuzik.recommendations.models import Recommendation
class RecommendationAdmin(admin.ModelAdmin):
pass
admin.site.register(Recommendation, RecommendationAdmin)
|
[
"djmgguedes@gmail.com"
] |
djmgguedes@gmail.com
|
77199abd59ed03cca2c93f1fe1e8a4bbdc5b13f2
|
fe38698a14e50253cccf69231a6319e399af1b5e
|
/week1/check_brackets_in_code/run_test.py
|
de45539adb77b0877fc30a6c158bcde15dbdccd9
|
[] |
no_license
|
Light0617/data_structure_coding
|
59d2eeeba9c01a9c24b2ace606b439c473ce22de
|
a3d4a2775c6af47bf03763cdd8a973ad74efe8ea
|
refs/heads/master
| 2020-06-15T11:34:19.124511
| 2019-07-06T08:43:00
| 2019-07-06T08:43:00
| 195,288,187
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 163
|
py
|
for i in {1..54}
do
if [ "$i" -lt 10 ]; then
i='0'$i
fi
echo "Welcome $i times"
python check_brackets.py < tests/$i > out
diff -w out tests/$i.a
done
|
[
"light11470617@gmail.com"
] |
light11470617@gmail.com
|
f2e6aecedabf15bedeef900e7009d3ced8914615
|
ebd8f1d72b5871f1537e357825872530d2c4d0ca
|
/Example.py
|
023b3df843fa985df7ddb0c03639c237853a167f
|
[] |
no_license
|
jcvdev/Sim6502
|
e642bb6b8a849bc728cdfdf2d6122fb1172d56ab
|
7eee1b127e3db7dbd6a492331501b585a8c4414b
|
refs/heads/master
| 2023-06-07T19:56:22.060923
| 2023-05-27T04:49:26
| 2023-05-27T05:09:59
| 465,558,162
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,165
|
py
|
import CPU.MMU as MMU
import CPU.Registers as Registers
import CPU.AddressDispatcher as AddressDispatcher
import CPU.Writeback as Writeback
import CPU.ExecutionUnit as ExecutionUnit
import CPU.Dispatch as Dispatch
import CPU.InstructionDecoder as Decoder
import Devices.GenericRAM as GenericRAM
import Devices.GenericROM as GenericROM
if __name__ == "__main__":
RAM = GenericRAM.GenericRAM(0x0000, 0x8000)
ROM = GenericROM.GenericROM(0xc000, 0x4000)
BRK = SimulatorBRK.SimulatorBRK()
ROM.loadFromFile("firmware.bin")
mmu = MMU.MMU()
mmu.addDevice(BRK) # Must be added first to handle 0xfffe address before any other device
mmu.addDevice(RAM)
mmu.addDevice(ROM)
registers = Registers.RegisterBank()
addrDispatch = AddressDispatcher.AddressDispatcher(mmu, registers)
execDispatch = ExecutionUnit.ExecutionDispatcher(mmu, registers)
writebackDispatch = Writeback.Dispatcher(mmu, registers)
decoder = Decoder.Decoder()
cpu = Dispatch.Dispatcher(decoder, addrDispatch, execDispatch, writebackDispatch, mmu, registers)
cpu.reset()
while True:
cpu.dispatch(flagTrace=True, throttleSecs=0.1)
|
[
"devlink+github@gmail.com"
] |
devlink+github@gmail.com
|
731c0a69dd82a9a4ce9c3ed2a0170d6d06ec5b48
|
339901caa0cbb3bd2762ad83bb9f847c01b0df39
|
/rice_3k_mPing_scripts/Assembly_and_classification_of_mPing_sequences/Get_List_Fastq.py
|
e8fa1034ad4f0a5579ecc1ac896d46fc4f380ab4
|
[] |
no_license
|
stajichlab/Dynamic_rice_publications
|
e592e83a4842eff7354e06e5368e6f7590b472ee
|
93ac8732d64b7ab4831a0b0b9b1593efc5814805
|
refs/heads/master
| 2020-03-24T05:10:56.815367
| 2020-02-11T07:26:17
| 2020-02-11T07:26:17
| 142,477,743
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,998
|
py
|
#!/opt/Python/2.7.3/bin/python
import sys
from collections import defaultdict
import numpy as np
import re
import os
import argparse
import glob
from Bio import SeqIO
sys.path.append('/rhome/cjinfeng/BigData/software/ProgramPython/lib')
from utility import gff_parser, createdir
def usage():
test="name"
message='''
python Split_Fastq2PE.py --input rufipogon_W0180_RelocaTE2.te_reads.fq
Input fastq is a mix of read1 and read2 of PE sequence. We split the fatsq file into read1.fq, read2.fq and unpaired.fq using read name.
'''
print message
def runjob(script, lines):
cmd = 'perl /rhome/cjinfeng/BigData/software/bin/qsub-slurm.pl --maxjob 60 --lines 2 --interval 120 --task 1 --mem 15G --time 100:00:00 --convert no %s' %(lines, script)
#print cmd
os.system(cmd)
#repeat Chr1:38668855..38668857 Left_supporting_reads ERR068809.5052702,ERR068809.7020963,ERR068809.10628656
def locus_reads_list(infile):
data = defaultdict(lambda : list())
with open (infile, 'r') as filehd:
for line in filehd:
line = line.rstrip()
if len(line) > 2:
unit = re.split(r'\t',line)
unit[1] = re.sub(r'\.\.', r'_', unit[1])
unit[1] = re.sub(r':', r'_', unit[1])
locus = '%s_%s' %(unit[0], unit[1])
print line, locus
if len(unit) < 4:
continue
reads = re.split(r',', unit[3])
for read in reads:
data[locus].append(re.split(r':', read)[0])
prefix = re.sub(r'.list', r'', infile)
for locus in data.keys():
ofile = open('%s.%s.list' %(prefix, locus), 'w')
print >> ofile, '\n'.join(list(set(data[locus])))
ofile.close()
def get_fastq_seq_by_list(fastqfile, id_list, prefix):
ofile = open('%s.fq' %(prefix), 'w')
for record in SeqIO.parse(fastqfile, "fastq"):
#print 'id:', record.id
#print 'seq:', record.seq
unit = re.split(r':', str(record.id))
record.id = unit[0]
if id_list.has_key(re.sub(r'read1.', r'', unit[0])) or id_list.has_key(re.sub(r'read2.', r'', unit[0])):
SeqIO.write(record, ofile, 'fastq')
ofile.close()
def read_list(infile):
data = defaultdict(lambda : str())
with open (infile, 'r') as filehd:
for line in filehd:
line = line.rstrip()
if len(line) > 2:
unit = re.split(r'\t',line)
data[unit[0]] = 1
return data
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-l', '--list')
parser.add_argument('-f', '--fastq')
parser.add_argument('-v', dest='verbose', action='store_true')
args = parser.parse_args()
try:
len(args.list) > 0
except:
usage()
sys.exit(2)
id_list = read_list(args.list)
get_fastq_seq_by_list(args.fastq, id_list, re.sub(r'.list', r'', args.list))
if __name__ == '__main__':
main()
|
[
"jinfeng7chen@gmail.com"
] |
jinfeng7chen@gmail.com
|
0e3633f1452ca7bf144981a26b3e2ec8af8f32ce
|
0262964c4fba7b7664e2702bbb9c19cf3b19c607
|
/dashboard/views.py
|
f147118b55dbbae9e6c8632afa44c43c7011185a
|
[
"MIT"
] |
permissive
|
OmkarPathak/Garbage-Level-Monitoring-System
|
13892ddd6c72c8e372659e29af65f626c94185a5
|
302294a22739f88ed6af5f44ac6e7fe3da61267d
|
refs/heads/master
| 2021-01-22T02:57:42.619983
| 2018-10-05T03:14:11
| 2018-10-05T03:14:11
| 102,256,002
| 6
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,732
|
py
|
from django.contrib.auth import authenticate, login, logout
from django.shortcuts import render, redirect
from .forms import UserLoginForm
from django.contrib.auth.decorators import login_required
from api.models import Readings, Dustbins
# Create your views here.
# for logging in a user
def login_view(request):
form = UserLoginForm(request.POST or None)
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(username=username, password=password)
login(request, user)
# print(request.user.is_authenticated())
return redirect('/dashboard')
if request.user.is_authenticated():
return redirect('/dashboard')
return render(request, 'login_form.html', {'form': form})
@login_required(login_url='/login/')
def dashboard_view(request):
# latest_record = Readings.objects.order_by('-recorded_on')
latest_record = Readings.objects.raw('SELECT * FROM api_readings WHERE (`recorded_on`) IN (SELECT MAX(`recorded_on`) FROM api_readings GROUP BY `dustbin_id`) ORDER BY dustbin_id ASC, recorded_on DESC')
filled = []
empty = []
for rows in latest_record:
filled.append(int(rows))
empty.append(100 - int(rows))
return render(request, 'dashboard/index.html', {'latest_record': latest_record , 'filled': filled, 'empty': empty })
@login_required(login_url='/login/')
def details_view(request, dustbin_id):
location = Dustbins.objects.get(id=dustbin_id)
# print(get_record.dustbin_id)
get_record = Readings.objects.filter(dustbin_id=dustbin_id).order_by('-recorded_on')[0]
level = get_record.level
dustbin_id = get_record.dustbin_id
empty = 100 - int(level)
recorded_on = get_record.recorded_on
location = location.location_name
context = {}
context['level'] = level
context['dustbin_id'] = dustbin_id
context['empty'] = empty
context['recorded_on'] = recorded_on
context['location'] = location
return render(request, 'dashboard/details.html', context)
# for registering a user
# def register_view(request):
# form = UserRegisterForm(request.POST or None)
# if form.is_valid():
# user = form.save(commit=False)
# username = form.cleaned_data.get('username')
# password = form.cleaned_data.get('password')
# user.set_password(password)
# user.save()
# # this is required step before login
# user = authenticate(username=username, password=password)
# login(request, user)
# return render(request, 'register_form.html', {'form': form})
def logout_view(request):
logout(request)
return redirect('/login/')
|
[
"omkarpathak27@gmail.com"
] |
omkarpathak27@gmail.com
|
308235a801eb4ad22f412b3d518b7cf66cb12145
|
cd62043772fe4ea91d6d5fd53698c1945206db99
|
/tests/test_examples/check_examples_2.py
|
0d453313d4ad62d13da775159cc78ffb52818ea0
|
[
"MIT"
] |
permissive
|
akapkotel/arcade
|
0808a75640d93aa24c884cc56b200f78c3487d5e
|
6e43ec53e7bfa3dee1aa574404794e3695aad381
|
refs/heads/development
| 2022-06-01T01:01:09.906590
| 2022-05-18T19:10:33
| 2022-05-18T19:10:33
| 246,819,723
| 0
| 0
|
NOASSERTION
| 2020-08-30T11:40:42
| 2020-03-12T11:42:46
| null |
UTF-8
|
Python
| false
| false
| 1,144
|
py
|
import re
import os
from pathlib import Path
def get_references_in_index():
txt = Path('../../doc/examples/index.rst').read_text()
references_in_index = re.findall(":ref:`(.*)`", txt)
return references_in_index
def get_references_in_rsts():
mypath = Path("../../doc/examples/")
# Get list of python files
python_example_filename_list = []
references = []
filenames = os.listdir(mypath)
for filename in filenames:
if filename.endswith(".rst") and filename != "index.rst":
python_example_filename_list.append(filename)
txt = Path(mypath / filename).read_text()
reference = re.findall("\.\. _(.*):", txt)
references.extend(reference)
return references
def main():
references_in_index = get_references_in_index()
files_to_reference = get_references_in_rsts()
for reference in files_to_reference:
if not reference in references_in_index:
print(f"index.rst is missing any mention of '{reference}'")
print("Done with checking to make sure references in doc/examples/*.rst are in doc/examples/index.rst")
main()
|
[
"paul@cravenfamily.com"
] |
paul@cravenfamily.com
|
e21c5e8c9538a19c1caf978135ce398754df0c7f
|
bd70f8d2dc1f7eee0974b3e280fb61773e6700db
|
/scrape/utils.py
|
8f3002684d767508adca661e0b9b0b979e942550
|
[] |
no_license
|
MullerAC/ln-calendar-scraper
|
1c62eb14946b4d48b790072e8a4b416a70f12ba8
|
26736633c9758e32d0f760a9c47de91b606f72d7
|
refs/heads/main
| 2023-04-21T11:59:17.044472
| 2021-05-07T21:13:19
| 2021-05-07T21:13:19
| 359,983,628
| 1
| 0
| null | 2021-05-07T21:13:20
| 2021-04-21T00:09:21
|
Python
|
UTF-8
|
Python
| false
| false
| 1,138
|
py
|
"""
TODO
add methods useful for scraping and posting data:
- run all scrapers and compile into pandas dataframe
- add MAL links from archive
- check for broken links (any) or missing store links
- print out pandas dataframe into reddit markup
- pull existing table from Reddit wiki and run a compare
- post to staging wiki
"""
def get_format(format_input):
physical_words = ['hardcover', 'hc', 'paperback', 'physical', 'tpb']
digital_words = ['digital', 'ebook', 'epub', 'mobi', 'pdf']
audio_words = ['audio', 'audiobook']
physical = any(word.casefold() in format_input.casefold() for word in physical_words)
digital = any(word.casefold() in format_input.casefold() for word in digital_words)
audio = any(word.casefold() in format_input.casefold() for word in audio_words)
format_output = []
if physical:
format_output += 'Physical'
if digital:
format_output += 'Digital'
if audio:
format_output += 'Audio'
if not format_output:
print('Could not find format for: ', format_input)
return 'Other'
else:
return ' & '.join(format_output)
|
[
"andrew.muller@utexas.edu"
] |
andrew.muller@utexas.edu
|
da36a5a68d39e2e71b2ce3fcab2337b3b03dc780
|
12c0f3ab05b45cc8a184708f1cc1142921b627fe
|
/gmail_feed_atom.py
|
833f5d07bb2d640e5f8fc1e8c338587680b05edc
|
[] |
no_license
|
qingzh/learnPy
|
b245c0b51fadbae7c9276af98f2ccca7644a0ded
|
b1ae7472f33153febf701ce6a6d12e57a8d4a7b1
|
refs/heads/master
| 2016-09-15T14:34:34.012966
| 2016-03-12T04:59:13
| 2016-03-12T04:59:13
| 31,577,370
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,759
|
py
|
# -*- coding: utf-8 -*-
'''
https://g33k.wordpress.com/category/google/
Swaroop posted a nifty Perl script to check GMail. The script basically parses an Atom feed of the latest 20 mails provided by Google. Since a Python hacker like Swaroop is dabbling in Perl, I thought it was my duty as a Python evangelist (or is it Pythangelist?) to show the people that the same thing can be achieved using Python with equal ease :) The main code is around 50% of the total code. A large portion of the code is used for the pretty printing. Here it is —
'''
# check-gmail.py -- A command line util to check GMail -*- Python -*-
# ======================================================================
# Copyright (C) 2006 Baishampayan Ghose <b.ghose@ubuntu.com>
# Time-stamp: Mon Jul 31, 2006 20:45+0530
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
# ======================================================================
'''
sample output —
ghoseb@trinka:~$ python check-gmail.py
Enter username for New mail feed at mail.google.com: foo.bar
Enter password for foo.bar in New mail feed at mail.google.com:
Gmail - Inbox for foo.bar@gmail.com
You have 20 new mails
+------------------------------------------------------------------------------------+
| Sl.| Subject | Author |
+------------------------------------------------------------------------------------+
| 0 | Strip Whitespace Middleware[...] | Will McCutchen ([...]|
| 1 | [FOSS Nepal] list of free alternatives to windows[...] | Manish Regmi (r[...] |
| 2 | json serialization[...] | Gábor Farkas (g[...] |
| 3 | editable=False and "Could not find Formfield or[...] | Corey (coordt@e[...] |
| 4 | IronPython 1.0 release candidate[...] | Jeremy Dunck (j[...] |
| 5 | django server tree organization[...] | Kenneth[...] |
| 6 | Project when using multiple sites[...] | Jay Parlar (par[...] |
| 7 | [FOSS Nepal] Neprog (nepali version pogrammer for[...] | ujwal (ujwal2@g[...] |
| 8 | Bug#379789: wrong keymap on Intel MacBook Pro[...] | Frans Pop (elen[...] |
| 9 | debconf is Level 1?[...] | Clytie Siddall ([...]|
| 10 | Weird slowdown with dev server behind nat[...] | Akatemik (tpiev[...] |
| 11 | Database API question: I am not able to return a[...] | DavidA (david.a[...] |
| 12 | Bug#379120: lspci present on i386, verify on[...] | Eddy Petrişor ([...] |
| 13 | New levels of D-I[...] | Eddy Petrişor ([...] |
| 14 | Installed Apps in settings.py[...] | limodou (limodo[...] |
| 15 | where u at man ... where can i call you ??????[...] | Sanjeev[...] |
| 16 | unable to runser ?[...] | Geert[...] |
| 17 | Bug#380585: debian 3.1 install FD[...] | as_hojoe (as_ho[...] |
| 18 | Re: Translated packages descriptions progress[...] | Michael Bramer ([...]|
| 19 | Loading an url takes 60 sec.[...] | and_ltsk (andre[...] |
+------------------------------------------------------------------------------------+
ghoseb@trinka:~$
'''
import urllib # For BasicHTTPAuthentication
import feedparser # For parsing the feed
from textwrap import wrap # For pretty printing assistance
_URL = "https://mail.google.com/gmail/feed/atom"
def auth():
'''The method to do HTTPBasicAuthentication'''
opener = urllib.FancyURLopener()
f = opener.open(_URL)
feed = f.read()
return feed
def fill(text, width):
'''A custom method to assist in pretty printing'''
if len(text) < width:
return text + ' ' * (width - len(text))
else:
return text
def readmail(feed):
'''Parse the Atom feed and print a summary'''
atom = feedparser.parse(feed)
print ""
print atom.feed.title
print "You have %s new mails" % len(atom.entries)
# Mostly pretty printing magic
print "+" + ("-" * 84) + "+"
print "| Sl.|" + " Subject" + ' ' * 48 + "|" + " Author" + ' ' * 15 + "|"
print "+" + ("-" * 84) + "+"
for i in xrange(len(atom.entries)):
print "| %s| %s| %s|" % (
fill(str(i), 3),
fill(wrap(atom.entries[i].title, 50)[0] + "[...]", 55),
fill(wrap(atom.entries[i].author, 15)[0] + "[...]", 21))
print "+" + ("-" * 84) + "+"
if __name__ == "__main__":
f = auth() # Do auth and then get the feed
readmail(f) # Let the feed be chewed by feedparser
|
[
"sjtuzhq@gmail.com"
] |
sjtuzhq@gmail.com
|
3f505c9990eeb9b824d8b9cf8806f8a254ce2398
|
1bfd6a615b6aa1aa069be2a797fdd35bb428fb2d
|
/build/mission/cmake/mission-genmsg-context.py
|
c34633ee64530c77c3fd1f95fc7b5bda288d2d91
|
[] |
no_license
|
gogochiou/eudemo
|
961a8ee4b6f9d7c2b6d4e9793ec0488e04b18f9f
|
fe730b3c2b618ee3a1c4f257c4da816db961d599
|
refs/heads/main
| 2023-04-18T18:18:09.881614
| 2021-05-07T13:57:03
| 2021-05-07T13:57:03
| 365,246,447
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 624
|
py
|
# generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/gogochiou/eudemo_ws/src/mission/msg/maintomission.msg"
services_str = "/home/gogochiou/eudemo_ws/src/mission/srv/mission_camera.srv"
pkg_name = "mission"
dependencies_str = "std_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "mission;/home/gogochiou/eudemo_ws/src/mission/msg;std_msgs;/opt/ros/melodic/share/std_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python2"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/melodic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
|
[
"aaaa8552369@gmail.com"
] |
aaaa8552369@gmail.com
|
17162a8e159958d4d544278acdbb7043ebb8d3d3
|
87438a1eec8d7a40dd2b91515bd93e4581889523
|
/test/test.py
|
166d2029fe1ef5d295b4020e045474af64f0c5b9
|
[] |
no_license
|
MobileCloudNetworking/imsaas
|
285ceb19eca59347e4930ccc4d59358982b897ea
|
30c1e7bd512a8478297413f53ca8141fc820734a
|
refs/heads/master
| 2021-01-10T16:26:27.065835
| 2016-01-28T12:16:34
| 2016-01-28T12:16:34
| 46,068,227
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 220
|
py
|
__author__ = 'gca'
import os
from so.util import util
def main():
print os.getcwd()
stream = open("../../data/ims.yaml",'r')
t = util.stack_parser(stream)
print t
if __name__ == "__main__":
main()
|
[
"giuseppe.a.carella@tu-berlin.de"
] |
giuseppe.a.carella@tu-berlin.de
|
f20f877381e716ec21757f7eed902dce9fb7cf8d
|
aa8fa3c1df75ba2d94cc82dfdd12507ac8954b13
|
/config.py
|
7d63ffdd8be467ea678664712f3c918941cddd51
|
[
"MIT"
] |
permissive
|
manuCR/pygram
|
13d85621fc0a47aeeaad1d6220fbc442abc1953a
|
dbbdc9e195af527c661525c655fc4ceaba829ac2
|
refs/heads/master
| 2020-06-15T06:00:56.237274
| 2014-12-12T18:20:33
| 2014-12-12T18:20:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 115
|
py
|
config = {
'DEBUG' : True,
'TESTING' : True,
'SECRET_KEY' : 'dkaghyt4se-5b5f-45j3-95rb-32342343252'
}
|
[
"pygram.team@gmail.com"
] |
pygram.team@gmail.com
|
e0174d86d625525d4bf7f881f1049b40227eafbf
|
71e2c980ffc30659f4c45e229860245df61105ad
|
/generate_inputs.py
|
318588f016e5c328e9a2e3f8b189e55c95c74741
|
[] |
no_license
|
Squalexy/AED-AVL-tree
|
5f5ffbefe87d7e7ff8f9bc5c48a5148d957bc29a
|
be92b1cac35fcef37b166338d823fe41baa998ed
|
refs/heads/master
| 2023-03-20T19:20:13.477427
| 2021-03-17T23:35:43
| 2021-03-17T23:35:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,888
|
py
|
import random
doencas = ["doenca1", "doenca2", "doenca3", "doenca4", "doenca5", "doenca6", "doenca7", "doenca8", "doenca9",
"doenca10"]
utentes = []
datas = []
for i in range(20):
utentes.append(random.randint(0, 1000))
for i in range(30):
dia = str(random.randint(0, 31)).zfill(2)
mes = str(random.randint(0, 12)).zfill(2)
ano = str(random.randint(2020, 2030))
datas.append(dia + mes + ano)
def generate_10ins_90con(num):
ficheiro = "input_10ins_90con"
with open(ficheiro + str(num) + ".in", 'w') as escrever:
for k in range(num):
doenca_random = random.randint(0, 9)
utente_random = random.randint(0, 19)
data_random = random.randint(0, 29)
escrever.write(
"ACRESCENTA" + " " + str(utentes[utente_random]) + " " + doencas[doenca_random] + " " + datas[
data_random] + "\n")
for j in range(9):
utente_random = random.randint(0, 19)
escrever.write("CONSULTA" + " " + str(utentes[utente_random]) + "\n")
def generate_90ins_10con(num):
ficheiro = "input_90ins_10cons"
with open(ficheiro + str(num) + ".in", 'w') as escrever:
for k in range(num):
utente_random = random.randint(0, 19)
escrever.write("CONSULTA" + " " + str(utentes[utente_random]) + "\n")
for j in range(9):
doenca_random = random.randint(0, 9)
utente_random = random.randint(0, 19)
data_random = random.randint(0, 29)
escrever.write(
"ACRESCENTA" + " " + str(utentes[utente_random]) + " " + doencas[doenca_random] + " " + datas[
data_random] + "\n")
n = 1
for i in range(15):
generate_10ins_90con(n)
n += 1
n = 1
for i in range(15):
generate_90ins_10con(n)
n += 1
|
[
"alexx.da95@gmail.com"
] |
alexx.da95@gmail.com
|
e56e94d9b5b6c4173877e6c231db7969e49b4b4e
|
5c0cc5228b5fb63092e18c459d186c396275ff02
|
/Example/if.py
|
adb7b4b17085eba8a3c15f7b708b2168b1ddba6d
|
[] |
no_license
|
BAEKJungHo/python-basic
|
994b99feb25fb132e47281650f5b458d9ff17725
|
2fe0fd469a36bf6f6be917b9915e4cb9146264aa
|
refs/heads/main
| 2023-02-03T15:16:57.447601
| 2020-12-22T12:56:54
| 2020-12-22T12:56:54
| 319,327,531
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,648
|
py
|
# 파이썬 제어문
# IF 실습
# 파이썬은 들여쓰기(indent)를 하지 않으면 에러가 발생한다.
import sys
import io
sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding= 'utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding= 'utf-8')
print(type(True))
print(type(False)) # 0, "", [], (), {}
if True:
print("TRUE")
else :
print("FALSE")
# 관계연산자 종류
# >, >=, <, <=, ==, !=
x = 15
y = 10
# == 양 변이 같을 때 참.
print(x == y)
# != 양 변이 다를 때 참.
print(x != y)
# > 왼쪽이 클때 참.
print(x > y)
# >= 왼쪽이 크거나 같을 때 참.
print(x >= y)
# < 오른쪽이 클 때 참.
print(x < y)
# <= 오른쪽이 크거나 같을 때 참.
print(x <= y)
# 참 거짓 판별 종류
# 참 : "values", [values], (values), {values}, 1
# 거짓 : "", [], (), {}, 0, None
city = ""
if city:
print("You are in:", city)
else:
# 출력
print("Please enter your city")
city = "Seoul"
if city:
print("You are in:", city)
else:
# 출력
print("Please enter your city")
# 논리연산자(중요)
# and, or, not
# 참고 : https://www.tutorialspoint.com/python/python_basic_operators.htm
a = 75
b = 40
c = 10
print('and : ', a > b and b > c) # a > b > c
print('or : ', a > b or b > c)
print('not : ', not a > b)
print('not : ', not b > c)
print(not True)
print(not False)
# 산술, 관계, 논리 우선순위
# 산술 > 관계 > 논리 순서로 적용
print('e1 : ', 3 + 12 > 7 + 3)
print('e2 : ', 5 + 10 * 3 > 7 + 3 * 20)
print('e3 : ', 5 + 10 > 3 and 7 + 3 == 10)
print('e4 : ', 5 + 10 > 0 and not 7 + 3 == 10)
score1 = 90
score2 = 'A'
# 복수의 조건이 모두 참일 경우에 실행.
if score1 >= 90 and score2 == 'A':
print("Pass.")
else:
print("Fail.")
# 예제
id1 = "vip"
id2 = "admin"
grade = 'platinum'
if id1 == "vip" or id2 == "admin":
print("관리자 인증")
if id2 == "admin" and grade == "platinum":
print("최상위 관리자")
# 다중 조건문
num = 90
if num >= 90:
print('Grade : A')
elif num >= 80:
print('Grade : B')
elif num >= 70:
print('Grade : C')
else:
print('과락')
# 중첩 조건문
grade = 'A'
total = 95
if grade == 'A':
if total >= 90:
print("장학금 100%")
elif total >= 80:
print("장학금 80%")
else:
print("장학금 70%")
else:
print("장학금 50%")
# in, not in
q = [10, 20, 30]
w = {70, 80, 90, 90}
e = {"name": 'Lee', "city": "Seoul", "grade": "A"}
r = (10, 12, 14)
print(15 in q)
print(90 in w)
print(12 not in r)
print("name" in e) # key 검색
print("seoul" in e.values()) # value 검색
|
[
"designjava@naver.com"
] |
designjava@naver.com
|
4ba9caf8edf5c826547328f0d0e509db56fc1fe4
|
b22a0cdf85a9ece7ee0d182a629a7dc5330f33eb
|
/backend/user/urls.py
|
003638c30f5e3f4d1e4ca3f90154464b480de425
|
[] |
no_license
|
codedbychavez/LazyChat
|
09a663f0f9a98eb70965cf474c9391931de1c0b6
|
db591448596f5cf2182b2c042ab3dca9d6299a00
|
refs/heads/main
| 2023-08-23T01:05:04.073357
| 2021-10-20T10:00:32
| 2021-10-20T10:00:32
| 408,805,747
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
from django.urls import path
from .views import *
urlpatterns = [
# Project paths
path('create', create.as_view()),
path('get_user', getUser.as_view()),
]
|
[
"74829200+softiosolutions@users.noreply.github.com"
] |
74829200+softiosolutions@users.noreply.github.com
|
0ec375bb5cac8621e4eb214cf9b0de24427000c9
|
9c37ad937822e964cc78164bfa6b6f8a39048230
|
/backend/src/handler/auth_user.py
|
d2f906dc07e921858e29b7d734f6a3ae95c5d646
|
[] |
no_license
|
ntchung195/S.E.P
|
b1ce5f25d678c60f602ff0516158b5456933935e
|
66e46090bb5195ba43994e5d04740e3d950863bd
|
refs/heads/dev
| 2023-07-27T01:02:15.240566
| 2020-07-29T10:45:29
| 2020-07-29T10:45:29
| 254,452,183
| 0
| 0
| null | 2023-07-06T21:59:54
| 2020-04-09T18:45:57
|
Dart
|
UTF-8
|
Python
| false
| false
| 991
|
py
|
import pyaudio
import wave
# import cv2
import os
import pickle
import time
from scipy.io.wavfile import read
# from IPython.display import Audio, display, clear_output
# from main_functions import *
from src.util.voice import *
from src.service.sql import get_user_id
from src.service.config_api import DetectResult
import src.const as const
def voice_recognite(user_name,user_id,logging,tag = 'recognize'):
user_directory = const.USER_DIR +'/' + user_name
logging.info(" User directory is : {}".format(user_directory))
register_gmm = user_directory + '/{0}.gmm'.format(user_id)
regconize_wav = user_directory + '/{0}_{1}.wav'.format(user_id,tag)
res,score = verify_model(register_gmm,regconize_wav,logging)
if not res:
return DetectResult(code=const.CODE_FAIL,score_auth = 1 + score,data = res, message="cannot recognize user, recognize again!")
return DetectResult(code=const.CODE_DONE,score_auth = 1 + score,data = res, message="recognize success")
|
[
"hung123hung456@gmail.com"
] |
hung123hung456@gmail.com
|
b1feaf9d0a91804367d71f9c9b15e3093b91e126
|
cbbdce3ff0d1f3d2f715eca06dcee5e1255587cf
|
/main/src/apps/launchpad/migrations/0006_auto_20160929_2211.py
|
4c9d7966e4f1090dc02302713a575228038f0b92
|
[] |
no_license
|
treylitefm/hermes
|
68e6c7590ffb8539fa9e54ea53d95ee3dca6ad42
|
e5963ab1c45b3e96861b34c605d098ce42a820fb
|
refs/heads/master
| 2021-06-08T02:37:42.622020
| 2016-11-15T07:06:02
| 2016-11-15T07:06:02
| 69,292,012
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 571
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-29 22:11
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('launchpad', '0005_auto_20160928_0324'),
]
operations = [
migrations.RemoveField(
model_name='testrun',
name='page',
),
migrations.RemoveField(
model_name='page',
name='ping_health',
),
migrations.DeleteModel(
name='TestRun',
),
]
|
[
"griffin.omar@gmail.com"
] |
griffin.omar@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.