blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5fc12459c06d983683095037677ec9e3ae40582b
|
dfb701b247c8dddf479b90ea6ac1808820341e86
|
/finalProject/inverse.py
|
d43b617fcf46dd5f01056bce7313fc8011d293f2
|
[] |
no_license
|
rafid1970/Machine-Learning-and-Data-Mining
|
19aba66549bed4c9002c86cebe9262639d089f94
|
c9f4df3a5a0ea6285a08661fac9be5c225581e83
|
refs/heads/master
| 2020-04-15T01:40:07.452565
| 2017-06-14T03:43:29
| 2017-06-14T03:43:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 190
|
py
|
import pandas as pd
df = pd.read_csv('submission.csv')
df['is_duplicate'] = df['is_duplicate'].apply(lambda x: 0 if x == 1 else 1 )
del df['test_id']
df.to_csv('flippedValues.csv', sep=',')
|
[
"brandonlee503@gmail.com"
] |
brandonlee503@gmail.com
|
f8a5447cc2e15a5395f1dd03fe2b11ce54523af7
|
13156575ec13b43b48bc6531a1ccd5018a5dac13
|
/features/steps/openbrowser.py
|
7964d449a9247e17bdd0c6c01c1c0a1e9d8bf779
|
[] |
no_license
|
VladimirovaAnastasia/Behave
|
aaa3ad9ed32407cda421668d0d292b75332cb818
|
53348ea9cec36ec1f966dbefcefd91254a603a9a
|
refs/heads/master
| 2020-08-13T02:11:26.165791
| 2019-10-13T20:41:11
| 2019-10-13T20:41:11
| 214,888,301
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 962
|
py
|
from behave import *
@given('I open seleniumframework website')
def step_impl(context):
context.browser.get("http://www.seleniumframework.com")
@then('I print the title')
def step_impl(context):
title = context.browser.title
assert "Selenium" in title
@then("I print current url")
def step_impl(context):
print(context.browser.current_url)
@step("I click ABOUT link")
def step_impl(context):
context.browser.find_element_by_link_text("ABOUT").click()
assert "ABOUT" in context.browser.title
@then("I click back on the browser")
def step_impl(context):
context.browser.back()
assert "Selenium" in context.browser.title
@step("I click forward on the browser")
def step_impl(context):
context.browser.forward()
assert "ABOUT" in context.browser.title
@step("I click refresh on the browser")
def step_impl(context):
context.browser.refresh()
@step("print the name")
def step_impl(context):
print(context.browser.name)
|
[
"vladimirova.a-a@yandex.ru"
] |
vladimirova.a-a@yandex.ru
|
cfadc6c0fad4440415bad446d43be5235ed031a9
|
cf5f9fc378cdbbcd7e552b04d9db2db8efcb8f8b
|
/Basic/1_Datatype/Example/datatype.py
|
e6a6294a98321c7879c640741d605121142fa2f0
|
[
"MIT"
] |
permissive
|
s1ko/LearnPython
|
572bc0556ff3afc3f8be84b13c4cdfb0ca111159
|
ea3b01fc93f541c8d136f866a8fd448dbed72ef2
|
refs/heads/master
| 2020-03-31T22:50:43.971204
| 2018-10-07T22:41:19
| 2018-10-07T22:41:19
| 152,633,317
| 1
| 0
|
MIT
| 2018-10-11T17:56:05
| 2018-10-11T17:56:04
| null |
UTF-8
|
Python
| false
| false
| 452
|
py
|
# Date: 05/15/2018
# Author: Mohamed Sheikh
# Description: Datatypes
# strings. Any value that is inside quotes
secret_message = 'This is a secret message'
name = 'Sami'
str_age = '32'
str_age = '87'
str_isAlive = 'False'
# integers. Any whole number
age = 15
speed = 98
# floats. Any decimal number
pi = 3.1415
phi = 1.61803
# booleans. Any value that is either True or False
isAlive = True
isActive = False
continue
|
[
"noreply@github.com"
] |
s1ko.noreply@github.com
|
557a5ed8ae632f3de0ab468bd5e52697ab174391
|
4fe713d02c2007b3ff1a8cc2d54167c950d73b67
|
/bookmark/models.py
|
649017234c07890df24ce6428198f63b1ed33a6a
|
[] |
no_license
|
bvldog/testDjango
|
2c86eef381061306f9034f001a4338e8e61fd8db
|
e2c77c126b33f993ca7a6c101dfe1a5b9456fd55
|
refs/heads/master
| 2020-05-27T08:05:06.397422
| 2019-05-25T09:19:18
| 2019-05-25T09:19:18
| 188,539,986
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 390
|
py
|
from django.db import models
from django.urls import reverse
# Create your models here.
class Bookmark(models.Model):
site_name = models.CharField(max_length = 100)
url = models.URLField('Site URL')
def __str__(self):
return "이름 : "+ self.site_name + ", 주소 : " + self.url
def get_absolute_url(self):
return reverse('detail', args=[str(self.id)])
|
[
"bvldog@gmail.com"
] |
bvldog@gmail.com
|
2c2a553fb25893672df0de7091194bccfa9869b9
|
a9a0ea40a2592f148101cbf26cdc1929c64c92fe
|
/src/joins/admin.py
|
d029fc050e9dd6000d1636e552aa90913c0b2955
|
[] |
no_license
|
TrellixVulnTeam/sharewithcode_TZY5
|
b0177882e7c31f1b163329d7e922cecd8abc964c
|
e489fd42d6c47a87e715e5f5ac36966469f62bd8
|
refs/heads/master
| 2023-03-19T12:07:18.188238
| 2017-06-01T17:23:23
| 2017-06-01T17:23:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 285
|
py
|
from django.contrib import admin
# Register your models here.
from .models import Join
class JoinAdmin(admin.ModelAdmin):
list_display = ['email','friend', 'timestamp', 'updated']
class meta:
model = Join
admin.site.register(Join, JoinAdmin)
#admin.site.register(JoinFriends)
|
[
"ajeetsingh1646@gmail.com"
] |
ajeetsingh1646@gmail.com
|
537b95a9ce2b4a0227b3dcee632a38c8bef0796a
|
1d00caa19baa262b9611e6b0f4f2fab4f2ca882e
|
/app/services/FaceRecService.py
|
068e1be6ec462f69378bdf56fa1e85a7dca4b06e
|
[] |
no_license
|
Free2y/Freezy-AI-DL
|
b808293fc9304ff085ee3cc82b83bc0b5f62fe46
|
da848eeb5d0f9a5ee08300fcb65f843234295e25
|
refs/heads/master
| 2023-02-04T21:59:17.119823
| 2020-12-30T01:06:38
| 2020-12-30T01:06:38
| 325,421,344
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,466
|
py
|
# -*- coding: utf-8 -*-
# 视频眨眼检测
# 导入工具包
from collections import OrderedDict
from keras.models import load_model
from scipy.spatial import distance as dist
import numpy as np
import cv2
import os
import paddlehub as hub
# 设置判断参数
from models.src.anti_spoof_predict import AntiSpoofPredict
# 关键点排序
from models.src.generate_patches import CropImage
from models.src.utility import parse_model_name
FACIAL_LANDMARKS_68_IDXS = OrderedDict([
("mouth", (48, 68)),
("left_eyebrow", (17, 22)),
("right_eyebrow", (22, 27)),
("left_eye", (36, 42)),
("right_eye", (42, 48)),
("nose", (27, 36)),
("jaw", (0, 17))
])
EYE_AR_THRESH = 0.3 # 低于该值则判断为眨眼
BLINK_THRESH = 1 # 眨眼次数的阈值
EYE_AR_CONSEC_FRAMES = 3
SCALE_WIDTH = 320
mask_detector = hub.Module(name="pyramidbox_lite_mobile_mask")
face_landmark = hub.Module(name="face_landmark_localization")
caffemodel = "../../models/detection_model/Widerface-RetinaFace.caffemodel"
deploy = "../../models/detection_model/deploy.prototxt"
as_model = AntiSpoofPredict(0,caffemodel,deploy)
model = load_model("../../models/fas.h5")
def eye_aspect_ratio(eye):
"""
计算眼睛上下关键点欧式距离
:param eye:眼睛关键点位置
:return: 眼睛睁开程度
"""
# 计算距离,竖直的
A = dist.euclidean(eye[1], eye[5])
B = dist.euclidean(eye[2], eye[4])
# 计算距离,水平的
C = dist.euclidean(eye[0], eye[3])
# ear值
ear = (A + B) / (2.0 * C)
return ear
def as_check(image, model_dir='../../models/anti_spoof_models'):
image_cropper = CropImage()
image_bbox = as_model.get_bbox(image)
prediction = np.zeros((1, 3))
# sum the prediction from single model's result
for model_name in os.listdir(model_dir):
h_input, w_input, model_type, scale = parse_model_name(model_name)
param = {
"org_img": image,
"bbox": image_bbox,
"scale": scale,
"out_w": w_input,
"out_h": h_input,
"crop": True,
}
if scale is None:
param["crop"] = False
img = image_cropper.crop(**param)
prediction += as_model.predict(img, os.path.join(model_dir, model_name))
# draw result of prediction
label = np.argmax(prediction)
value = prediction[0][label]/2
return (label,value,image_bbox)
def fas_check(X):
X = (cv2.resize(X,(224,224))-127.5)/127.5
t = model.predict(np.array([X]))[0]
return t
def check_video_uri(video_uri,userinfo = ''):
# 读取视频
TOTAL_BLINK = 0
COUNTER = 0
(lStart, lEnd) = FACIAL_LANDMARKS_68_IDXS["left_eye"]
(rStart, rEnd) = FACIAL_LANDMARKS_68_IDXS["right_eye"]
print("[INFO] starting video stream thread...")
# 湖南直播数据rtmp://58.200.131.2:1935/livetv/hunantv
# print(video_uri)
vs = cv2.VideoCapture(0)
rate = vs.get(cv2.CAP_PROP_FPS)
print(rate)
# 遍历每一帧
flag = False
while True:
# 预处理
frame = vs.read()[1]
if frame is None:
break
(h, w) = frame.shape[:2]
if h > w :
width = 300
else:
width = 600
r = width / float(w)
dim = (width, int(h * r))
frame = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)
# 检测人脸
rects = mask_detector.face_detection([frame])
if len(rects[0]['data']) != 1:
COUNTER = 0
else:
# 遍历每一个检测到的人脸
for data in rects[0]['data']:
h = data['bottom'] - data['top']
w = data['right'] - data['left']
_r = int(max(w,h)*0.6)
cx,cy = (data['left']+data['right'])//2, (data['top']+data['bottom'])//2
x1 = cx - _r
y1 = cy - _r
x1 = int(max(x1,0))
y1 = int(max(y1,0))
x2 = cx + _r
y2 = cy + _r
h,w,c =frame.shape
x2 = int(min(x2 ,w-2))
y2 = int(min(y2, h-2))
_frame = frame[y1:y2 , x1:x2]
value = fas_check(_frame)
if value > 0.95:
result_text = "RealFace Score: {:.2f}".format(float(value))
color = (255, 0, 0)
else:
result_text = "FakeFace Score: {:.2f}".format(float(value))
color = (0, 0, 255)
cv2.rectangle(
frame,
(x1,y1) ,(x2,y2),
color, 2)
cv2.putText(
frame,
result_text,
(x1,y1-5),
cv2.FONT_HERSHEY_COMPLEX, 0.5*frame.shape[0]/512, color)
cv2.rectangle(frame, (x1,y1) ,(x2,y2) , (0,255,0) ,2)
if fas_check(_frame) > 0.95:
shape = face_landmark.keypoint_detection([frame])
if len(shape) == 0:
continue
landmark = shape[0]['data'][0]
# print(landmark)
# 分别计算ear值
leftEye = landmark[lStart:lEnd]
rightEye = landmark[rStart:rEnd]
leftEAR = eye_aspect_ratio(leftEye)
rightEAR = eye_aspect_ratio(rightEye)
# 算一个平均的
ear = (leftEAR + rightEAR) / 2.0
# print(ear)
# 检查是否满足阈值
# for i in landmark:
# cv2.circle(frame, (int(i[0]),int(i[1])), 1, (0, 0, 255), 2)
if ear < EYE_AR_THRESH:
if flag:
COUNTER += 1
# 如果连续几帧都是闭眼的,总数算一次
if COUNTER > EYE_AR_CONSEC_FRAMES:
flag = False
TOTAL_BLINK += 1
# 重置
COUNTER = 0
if TOTAL_BLINK > BLINK_THRESH:
vs.release()
return TOTAL_BLINK
else:
flag = True
COUNTER = 0
# cv2.putText(frame, "Blinks: {}".format(TOTAL_BLINK), (10, 30),
# cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
# cv2.putText(frame, "EAR: {:.2f}".format(ear), (150, 30),
# cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.imshow(video_uri,frame)
cv2.waitKey(int(rate))
vs.release()
return TOTAL_BLINK
# def check_imgfiles(imgfiles, userinfo = ''):
# # 读取视频
# TOTAL_BLINK = 0
# COUNTER = 0
# (lStart, lEnd) = FACIAL_LANDMARKS_68_IDXS["left_eye"]
# (rStart, rEnd) = FACIAL_LANDMARKS_68_IDXS["right_eye"]
# print("[INFO] starting load image frames...")
# flag = False
# # 遍历每一帧
# for file in imgfiles:
# file_bytes = file.read()
# frame = cv2.imdecode(np.asarray(bytearray(file_bytes), dtype='uint8'), cv2.IMREAD_COLOR)
# (h, w) = frame.shape[:2]
# if h > w :
# width = 300
# else:
# width = 600
# r = width / float(w)
# dim = (width, int(h * r))
# frame = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)
# gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# # 检测人脸
# rects = mask_detector.face_detection(images=[frame])
# if len(rects[0]['data']) != 1:
# COUNTER = 0
# else:
# # 遍历每一个检测到的人脸
# for data in rects[0]['data']:
# h = (data['bottom'] - data['top'])
# w = (data['right'] - data['left'])
# _r = int(max(w,h)*0.6)
# cx,cy = (data['left']+data['right'])//2, (data['top']+data['bottom'])//2
#
# x1 = cx - _r
# y1 = cy - _r
#
# x1 = int(max(x1,0))
# y1 = int(max(y1,0))
#
# x2 = cx + _r
# y2 = cy + _r
#
# h,w,c =frame.shape
# x2 = int(min(x2 ,w-2))
# y2 = int(min(y2, h-2))
#
# _frame = frame[y1:y2 , x1:x2]
#
# # print(self.fas_check(_frame))
# if fas_check(_frame) > 0.95:
# # 获取坐标
# shape = face_landmark.keypoint_detection([frame])
# landmark = shape[0]['data'][0]
# # print(landmark)
# # 分别计算ear值
# leftEye = landmark[lStart:lEnd]
# rightEye = landmark[rStart:rEnd]
# leftEAR = eye_aspect_ratio(leftEye)
# rightEAR = eye_aspect_ratio(rightEye)
# # 算一个平均的
# ear = (leftEAR + rightEAR) / 2.0
# # print(ear)
# # 检查是否满足阈值
# if ear < EYE_AR_THRESH:
# if flag:
# COUNTER += 1
# # 如果连续几帧都是闭眼的,总数算一次
# if COUNTER > EYE_AR_CONSEC_FRAMES:
# flag = False
# TOTAL_BLINK += 1
# # 重置
# COUNTER = 0
# if TOTAL_BLINK > BLINK_THRESH:
# return TOTAL_BLINK
# else:
# flag = True
# COUNTER = 0
#
# return TOTAL_BLINK
def LivenessCheck(img_files, video_uri, userinfo):
if len(img_files) > 0:
print(len(img_files))
# result = check_imgfiles(img_files, userinfo)
elif video_uri is not None:
result = check_video_uri(video_uri, userinfo)
else:
return 'no'
if result > BLINK_THRESH:
return 'yes'
else:
return 'no'
if __name__ == '__main__':
LivenessCheck([],'test','')
|
[
"hm"
] |
hm
|
f4a3bc79827d32eee2f03334b95db36697fa1b8a
|
0ae71755e3093703015348096900c9fb34f45d42
|
/proxy2.py
|
ba1c48703e1ebe001312dfa669185ad30fbe00bf
|
[] |
no_license
|
eggertj12/http_proxy
|
86806e695aeca51486112c35149a8c55336f36fa
|
b5f8834d1fe44e5a3a6093269f9975993f8669a6
|
refs/heads/master
| 2021-03-27T15:11:32.667742
| 2014-11-09T23:27:21
| 2014-11-09T23:27:21
| 25,377,485
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,083
|
py
|
# ----------------------------------------------------------------------------------------------------
# http_proxy.py
#
# Simple http proxy server
#
# Uses some helper classes
# Message for representing the messages (request or response) and operations on those
# SocketReader is a wrapper for sockets which provides buffered reading from socket of lines or bytes
# Exceptions contains two custom exceptions we use
# Cache is a static class for wrapping cache operations
# HttpHelper is also a static class containing some operations for handling the communication
#
#
# It handles parallel connections using threading, handling each persistent connection in a separate thread
# Pipelining is supported, although testing with Opera has been not really successful where the browser
# closes the socket resulting in a broken pipe error.
# Logging is done using the thread safe logging module and cache uses locking to protect file operations
# Caching is persisted between sessions by storing all metadata on filesystem
#
#
# ----------------------------------------------------------------------------------------------------
from socket import *
import socket
import sys
import select
import threading
import datetime
import logging
import urllib
import email.utils as eut
import os
# Import our classes
from SocketReader import SocketReader
from HttpHelper import HttpHelper
from Cache import Cache
from Message import Message
from Exceptions import *
#-----------------------------------------------------------------------------------------------------------
# Logging
# Write the request / response line to given log file
def log(request, response, addr):
if not ('host' in request.headers):
request.headers['host'] = ''
log = ': ' + str(addr[0]) + ':' + str(addr[1]) + ' '
log = log + request.verb + ' ' + request.scheme + request.hostname + request.path + ' : '
log = log + response.status + ' ' + response.text
logging.warning(log)
#-----------------------------------------------------------------------------------------------------------
# Connection related
# Setup a connection to the upstream server
def connect_to_server(message):
try:
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.connect((message.hostname, int(message.port)))
# Get this gaierror if it is impossible to open a connection
except socket.gaierror, e:
return None
return conn
# Handle sending the message (request or response) and accompanying data if available
# Will load from cache if this is a cached response
def forward_message(message, reading, writing, cache_file = None):
# Let the world know who we are
message.add_via('RatherPoorProxy')
if message.cache_file != None:
Cache.send_cached_file(message, writing)
else:
# Write the message to target socket
writing.sendall(message.to_string())
content = message.has_content()
if content == 'content-length':
HttpHelper.read_content_length(reading, writing, int(message.headers['content-length']), cache_file)
elif content == 'chunked':
HttpHelper.read_chunked(reading, writing, cache_file)
#-----------------------------------------------------------------------------------------------------------
# The main handler loop
# Handle one persistent connection
def connection_handler(client_socket, addr):
client_reader = SocketReader(client_socket)
persistent = True
# Keep requests and possibly out of order responses (f.ex. cached) in a dictionary
req_id = resp_id = 0
request_queue = {}
response_queue = {}
server_reader = None
# Loop as long as the client wants and also until all queued requests have been answered
while persistent or req_id > resp_id:
try:
# First check if we have a ready response to send to client
if (resp_id in response_queue):
req = request_queue[resp_id]
resp = response_queue.pop(resp_id)
forward_message(resp, server_reader, client_reader)
log(req, resp, addr)
resp_id = resp_id + 1
# If no more queued messages just force close connection
if req_id == resp_id:
persistent = False
continue
# Find out which sockets to try and listen to
socket_list = []
if persistent:
# Client has indicated it wants to keep connection open
socket_list.append(client_socket)
if server_reader != None:
socket_list.append(server_reader.get_socket())
elif req_id > resp_id:
# Still have responses pending, open a connection to the server
req = request_queue[resp_id]
server_socket = connect_to_server(req)
if server_socket == None:
# TODO: handle not opened connection (Should hardly happen here)
print "Could not open connection to server"
break
server_reader = SocketReader(server_socket, req.hostname)
socket_list.append(server_reader.get_socket())
# select blocks on list of sockets until reading / writing is available
# or until timeout happens, set timeout of 30 seconds for dropped connections
readList, writeList, errorList = select.select(socket_list, [], socket_list, SocketReader.TIMEOUT)
if errorList:
print "Socket error"
break
if len(readList) == 0:
print "Socket timeout"
break
# Client is ready to send data
if client_reader != None and client_reader.get_socket() in readList:
req = Message()
try:
req.parse_request(client_reader)
except SocketClosedException:
# Client has closed socket from it's end
# print "Client closed connection"
persistent = False
continue
# Store request to have it available when it's response arrives
request_queue[req_id] = req
# req.print_message(True)
# Only a small subset of requests are supported
if not req.verb in ('GET', 'POST', 'HEAD'):
# Create a response and store in queue until this request will be answered
resp = HttpHelper.create_response('405', 'Method Not Allowed')
resp.headers['connection'] = 'close'
response_queue[req_id] = resp
req_id = req_id + 1
continue
cache_file = Cache.is_in_cache(req.hostname, req.path)
if cache_file != None:
resp = HttpHelper.create_response('200', 'OK')
resp.cache_file = cache_file
response_queue[req_id] = resp
req_id = req_id + 1
continue
if server_reader == None:
server_socket = connect_to_server(req)
if server_socket == None:
# Respond if the requested server can not be connected to
resp = HttpHelper.create_response('502', 'Bad gateway')
resp.headers['connection'] = 'close'
response_queue[req_id] = resp
req_id = req_id + 1
continue
server_reader = SocketReader(server_socket, req.hostname)
# Might have to connect to a different server.
elif server_reader.hostname != req.hostname:
server_socket = connect_to_server(req)
if server_socket == None:
resp = HttpHelper.create_response('502', 'Bad gateway: ' + req.hostname)
resp.headers['connection'] = 'close'
response_queue[req_id] = resp
req_id = req_id + 1
continue
server_reader = SocketReader(server_socket, req.hostname)
# Finally ready to send the request
forward_message(req, client_reader, server_reader)
req_id = req_id + 1
# Server is ready to send data
elif server_reader != None and server_reader.get_socket() in readList:
resp = Message()
try:
resp.parse_response(server_reader)
except SocketClosedException:
# Server has closed socket from it's end
# print "Server closed connection"
server_reader = None
continue
# resp.print_message(True)
resp.hostname = req.hostname
response_queue[resp_id] = resp
cache_file = None
if req.is_cacheable() and resp.is_cacheable():
if 'content-type' in resp.headers:
ct = resp.headers['content-type']
else:
ct = ''
cache_file = Cache.filename(req.hostname, req.path, resp.cache_expiry_date(), ct)
Cache.cache_headers(resp, cache_file)
forward_message(resp, server_reader, client_reader, cache_file)
log(req, resp, addr)
resp_id = resp_id + 1
if not resp.is_persistent():
# Server wants to close connection. Clean up
server_reader.close()
server_socket = None
# Determine if we shall loop
persistent = req.is_persistent()
except TimeoutException:
# print "connection timed out. Closing"
persistent = False
except SocketClosedException, e:
# print 'Client closed socket'
persistent = False
except socket.error, e:
# TODO: handle this more fine grained (or better yet analyse reasons)
persistent = False
break
# End of while loop, cleanup
if server_reader != None:
server_reader.close()
server_reader = None
client_reader.close()
client_reader = None
request_queue = None
response_queue = None
#-----------------------------------------------------------------------------------------------------------
# Program start
#-----------------------------------------------------------------------------------------------------------
#Send in two variables, portnr and log.txt
if (len(sys.argv) != 3 and len(sys.argv) != 4):
print 'Need two arguments, port number and file for logging'
sys.exit(1)
port = int(sys.argv[1])
threaded = True
if len(sys.argv) == 4:
print "Starting in unthreaded mode"
threaded = False
# Set up logger configuration
logging.basicConfig(filename=sys.argv[2], format='%(asctime)s %(message)s', datefmt='%Y-%m-%dT%H:%M:%S+0000')
# Set up a listening socket for accepting connection
listenSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listenSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listenSocket.bind(('', port))
listenSocket.listen(5)
# Then it's easy peasy from here on, just sit back and wait
while True:
incoming_socket, addr = listenSocket.accept()
if threaded:
# dispatch to thread, set it as deamon as not to keep process alive
thr = threading.Thread(target=connection_handler, args=(incoming_socket, addr))
thr.daemon = True
thr.start()
else:
connection_handler(incoming_socket, addr)
# clean up afterwards
listenSocket.shutdown(2)
listenSocket.close()
|
[
"eggert@hugsaser.is"
] |
eggert@hugsaser.is
|
14da75d78e7b49e02eedc62d6071ce110809fa0c
|
cf5e767faabe7307a8e081b35fd9600d003f8665
|
/src/config/urls.py
|
e923d44714df37efef2d8c4f37df88bf212e29b5
|
[
"BSD-3-Clause"
] |
permissive
|
FuShengRuoXian/django-spms
|
bfd0ec8cc7ef957c8e6e308e1e9bf4259cf7c551
|
95ac193891f93da07c3a26feeaf846e6030f3466
|
refs/heads/master
| 2023-05-23T20:09:17.964349
| 2020-03-12T09:58:27
| 2020-03-12T09:58:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,029
|
py
|
# coding=utf-8
"""tutorial URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from rest_framework import routers
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.contrib import admin
from django.http import HttpResponse
from rest_framework_jwt.views import obtain_jwt_token, refresh_jwt_token, verify_jwt_token
from django.conf import settings
from django.views.static import serve
MEDIA_ROOT = getattr(settings, 'MEDIA_ROOT')
SITE_NAME = getattr(settings, 'SITE_NAME')
from rest_framework.documentation import include_docs_urls
from rest_framework_swagger.views import get_swagger_view
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
from django.conf.urls import include, url # For django versions before 2.0
yasg_schema_view = get_schema_view(
openapi.Info(
# title='%s Snippets-API DOCS' % settings.SITE_NAME,
title='Snippets-API DOCS',
default_version='v1',
description="Test description",
terms_of_service="https://www.google.com/policies/terms/",
contact=openapi.Contact(email="contact@snippets.local"),
license=openapi.License(name="BSD License"),
),
validators=['flex', 'ssv'],
public=True,
permission_classes=(permissions.AllowAny,),
)
schema_view = get_swagger_view(title='%s API DOCS' % SITE_NAME)
def health_view(request):
return HttpResponse('')
urlpatterns = [
url(r'^$', lambda x: HttpResponseRedirect('/index/')),
url(r'^index/$', lambda x: HttpResponseRedirect(reverse('quickstart:index')), name='site_index'),
# url(r'^index/$', lambda x: HttpResponseRedirect('/health/'), name='site_index'),
url(r'^cas/', include('mama_cas.urls')),
url(r'^api/jwt/api-token-auth/', obtain_jwt_token),
url(r'^api/jwt/api-token-refresh/', refresh_jwt_token),
url(r'^api/jwt/api-token-verify/', verify_jwt_token),
url(r'^api/auth/', include('rest_framework.urls', namespace='rest_framework')), # api普通验证
url(r'^api/', include(('appxs.api.urls', 'api'), namespace='api')), # DRF api入口
url(r'^accounts/', include(('appxs.account.urls', 'account'), namespace='account')),
url(r'^admin/', admin.site.urls),
url(r'^asset/(?P<path>.*)$', serve, {'document_root': MEDIA_ROOT}), # 静态文件
url(r'^commonx/', include('appxs.commonx.urls', namespace='commonx')),
url(r'^health/', health_view), # 用于监测此应用是否存活
url(r'^quickstart/', include('appxs.quickstart.urls', namespace='quickstart')),
]
# if settings.DEBUG:
if True:
urlpatterns += [
url(r'^api-docs/', include_docs_urls(title='%s API DOCS' % SITE_NAME)),
url(r'^swagger(?P<format>\.json|\.yaml)$', yasg_schema_view.without_ui(cache_timeout=0), name='schema-json'),
url(r'^swagger/$', yasg_schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
url(r'^redoc/$', yasg_schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
# url(r'^docs1/', schema_view, name='docs1'),
# url(r'^docs2/', include('rest_framework_docs.urls')),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
url(r'^admin/uwsgi/', include('django_uwsgi.urls')),
] + urlpatterns
|
[
"spunkmars@gmail.com"
] |
spunkmars@gmail.com
|
3e772f98aeb80b6ec68f02ac944700a678a03a9e
|
7a72ef64482e5f54d226493b39235960d9e129b9
|
/salmon/triplets/samplers/utils.py
|
f20f0a78e1580e2f07212dd152dbdafb9f1552e5
|
[
"BSD-3-Clause"
] |
permissive
|
stsievert/salmon
|
63061bd61b99416c6740273e0587d6bd57367b8a
|
b850b170c523e7a412d3768b06838f1ce9dc27b0
|
refs/heads/master
| 2023-06-22T07:34:35.671912
| 2023-06-16T16:12:39
| 2023-06-16T16:12:39
| 221,306,375
| 8
| 2
|
BSD-3-Clause
| 2023-06-16T19:59:17
| 2019-11-12T20:32:16
|
Python
|
UTF-8
|
Python
| false
| false
| 324
|
py
|
import logging
import pickle
from typing import Any, Dict, List, Tuple
from pydantic import BaseModel
from salmon.utils import get_logger
Query = Tuple[int, Tuple[int, int]] # head, (choice 1, choice 2)
logger = get_logger(__name__)
class Answer(BaseModel):
head: int
left: int
right: int
winner: int
|
[
"noreply@github.com"
] |
stsievert.noreply@github.com
|
0288e235ef59eef85294d55ef4934cc66e58e2dd
|
09e5cfe06e437989a2ccf2aeecb9c73eb998a36c
|
/modules/cctbx_project/cctbx/multipolar.py
|
6deeacb4df764f36dd136cdf508e9154915b5e57
|
[
"BSD-3-Clause",
"BSD-3-Clause-LBNL"
] |
permissive
|
jorgediazjr/dials-dev20191018
|
b81b19653624cee39207b7cefb8dfcb2e99b79eb
|
77d66c719b5746f37af51ad593e2941ed6fbba17
|
refs/heads/master
| 2020-08-21T02:48:54.719532
| 2020-01-25T01:41:37
| 2020-01-25T01:41:37
| 216,089,955
| 0
| 1
|
BSD-3-Clause
| 2020-01-25T01:41:39
| 2019-10-18T19:03:17
|
Python
|
UTF-8
|
Python
| false
| false
| 233
|
py
|
from __future__ import absolute_import, division, print_function
from cctbx.array_family import flex # for tuple mappings
import boost.python
ext = boost.python.import_ext("cctbx_multipolar_ext")
from cctbx_multipolar_ext import *
|
[
"jorge7soccer@gmail.com"
] |
jorge7soccer@gmail.com
|
98d7b30f1b9130e8536534801804e18aecd0d74c
|
8d919cec13aac294a17640da244970e1501a7f72
|
/pos/admin.py
|
6d2d4acf76e6b0acfdc47a5f53dc21bfefc941ea
|
[] |
no_license
|
rumahaah/posq
|
c1ba74141d49f30f57acc926495f3c689f4ebb63
|
ef7f6284a1a6cd3ba83ad766cffb2aada2a11abd
|
refs/heads/main
| 2023-03-28T02:31:29.446502
| 2021-03-25T04:17:49
| 2021-03-25T04:17:49
| 349,239,888
| 0
| 0
| null | 2021-03-18T23:11:56
| 2021-03-18T23:00:09
| null |
UTF-8
|
Python
| false
| false
| 246
|
py
|
from django.contrib import admin
from . models import Product, Order, Cash, OrderItem, Sequence
admin.site.register(Product)
admin.site.register(Order)
admin.site.register(Cash)
admin.site.register(OrderItem)
admin.site.register(Sequence)
|
[
"rumahaah@outlook.com"
] |
rumahaah@outlook.com
|
85cc6d7941875d0aa571c21c739706dce53b04e0
|
e2a545f54dbcbd26406e84ce1b3a2881e473ff0f
|
/Ejemplo11.py
|
b67dd9ed48e11eae18ac534627c4274e833f6644
|
[] |
no_license
|
danieljobvaladezelguera/CYPDANIELJVE
|
736bbec0700d36722de24ccb2272281cfbfa54a4
|
ce32a71f6f559749a9e5eb9d95a6cc7099de213f
|
refs/heads/master
| 2021-06-28T21:38:52.079378
| 2021-01-25T22:04:35
| 2021-01-25T22:04:35
| 207,672,145
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
A = int(input("Dame un numero"))
B = int(input("Dame un numero"))
C = int(input("Dame un numero"))
D = int(input("Dame un numero"))
print ("Los numeros dados son { D } { C } { B } y { A } ")
|
[
"valadezlender@gmail.com"
] |
valadezlender@gmail.com
|
00fdc906660d104b8b62761e93694744748b46a5
|
32631291e7fa5e4abb0d99a84dc7f153502bcdae
|
/关键词替换模板生成网页/python关键词替换模板生成网页.py
|
8ab8d73a02c34b3e9ac942c9c83f252793cad9f3
|
[] |
no_license
|
FGFW/FCNNIC
|
ba5b7c4e8a7fa35b4658169b3e5cb14e78b4fa1e
|
24c7abd11a4390f289ae0ebf0b7194542365f0c0
|
refs/heads/master
| 2021-01-10T17:14:28.040943
| 2017-03-17T18:50:07
| 2017-03-17T18:50:07
| 44,691,126
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 843
|
py
|
"""
python关键词替换模板生成网页.py
http://www.bathome.net/thread-37777-1-1.html
依山居 1:07 2015/11/24
→_→强烈鄙视做垃圾站的
"""
import random
import os
with open("host.txt",encoding="utf-8") as f:
host=[r.rstrip() for r in f.readlines()]
with open("key.txt",encoding="utf-8") as f:
key=[r.rstrip() for r in f.readlines()]
with open("moban.txt",encoding="utf-8") as f:
moban=f.read()
for h in host:
rkey=random.sample(key,4)
dirs="./"+h+"/"+"www."+h+"/"
html=moban
#一行代码长到天涯海角~
html=html.replace("{关键词1}",rkey[0]).replace("{关键词2}",rkey[1]).replace("{关键词3}",rkey[2]).replace("{关键词4}",rkey[3])
if not os.path.exists(dirs):
os.makedirs(dirs)
with open(dirs+"index.htm","w+",encoding="utf-8") as f:
f.write(html)
|
[
"yishanju@gmail.com"
] |
yishanju@gmail.com
|
f42e8a9828e631d190c3141533622da976aa2b2a
|
4119a196ef64f6be0ca05a22a3e06ace95df8835
|
/TrainingCode/Comparison of left and right side/fc.py
|
442b15a3d4432c21c7ca38e8d1eb86de7f61ad08
|
[] |
no_license
|
uhomelee/DeepInverseKinematicsSolver
|
7358a2cf5a6e3222aab3066fedef56c35c68b7ad
|
bbf79d8f75883a53d4c0d9cff53a8ca9ccff4cd2
|
refs/heads/master
| 2020-03-23T05:19:39.066233
| 2019-06-16T09:23:04
| 2019-06-16T09:23:04
| 141,137,272
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,344
|
py
|
import numpy as np
import tensorflow as tf
#import matplotlib.pyplot as plt
X_train = np.loadtxt('./train_x_5_10f.txt')
y_train = np.loadtxt('./train_y_5_10f.txt')
#X_test = np.loadtxt('test_x.txt')
#y_test = np.loadtxt('test_y.txt')
n_dim = 15
"""
n_neurons_1 = 256
n_neurons_2 = 512
n_neurons_3 = 512
n_neurons_4 = 1024
n_neurons_5 = 1024
n_neurons_6 = 1024
n_rot=6
"""
n_neurons_1 = 128
n_neurons_2 = 128
n_neurons_3 = 256
n_rot = 6
# Make Session
net = tf.Session()
# Placeholder
X = tf.placeholder(dtype=tf.float32, shape=[None, n_dim])
Y = tf.placeholder(dtype=tf.float32, shape=[None, n_rot])
# Initializers
sigma = 1
weight_initializer = tf.variance_scaling_initializer(mode="fan_avg", distribution="uniform", scale=sigma)
bias_initializer = tf.zeros_initializer()
W_hiddens=locals()
bias_hiddens=locals()
hiddens=locals()
# Layer 1: Variables for hidden weights and biases
W_hidden_1 = tf.Variable(weight_initializer([n_dim, n_neurons_1]))
bias_hidden_1 = tf.Variable(bias_initializer([n_neurons_1]))
hidden_1 = tf.nn.relu(tf.add(tf.matmul(X, W_hidden_1), bias_hidden_1))
# Layer 2: Variables for hidden weights and biases
W_hidden_2 = tf.Variable(weight_initializer([n_neurons_1, n_neurons_2]))
bias_hidden_2 = tf.Variable(bias_initializer([n_neurons_2]))
hidden_2 = tf.nn.relu(tf.add(tf.matmul(hidden_1, W_hidden_2), bias_hidden_2))
# Layer 3: Variables for hidden weights and biases
W_hidden_3 = tf.Variable(weight_initializer([n_neurons_2, n_neurons_3]))
bias_hidden_3 = tf.Variable(bias_initializer([n_neurons_3]))
hidden_3 = tf.nn.relu(tf.add(tf.matmul(hidden_2, W_hidden_3), bias_hidden_3))
"""
## Layer 4: Variables for hidden weights and biases
W_hidden_4 = tf.Variable(weight_initializer([n_neurons_3, n_neurons_4]))
bias_hidden_4 = tf.Variable(bias_initializer([n_neurons_4]))
hidden_4 = tf.nn.relu(tf.add(tf.matmul(hidden_3, W_hidden_4), bias_hidden_4))
## Layer 5: Variables for hidden we
# ights and biases
W_hidden_5 = tf.Variable(weight_initializer([n_neurons_4, n_neurons_5]))
bias_hidden_5 = tf.Variable(bias_initializer([n_neurons_5]))
hidden_5 = tf.nn.relu(tf.add(tf.matmul(hidden_4, W_hidden_5), bias_hidden_5))
W_hidden_6 = tf.Variable(weight_initializer([n_neurons_5, n_neurons_6]))
bias_hidden_6 = tf.Variable(bias_initializer([n_neurons_6]))
hiddens['hidden_6']=tf.nn.relu(tf.add(tf.matmul(hidden_5, W_hidden_6), bias_hidden_6))
for i in range(7,16):
#print(i)
W_hiddens['W_hidden_%s'%i]=tf.Variable(weight_initializer([1024, 1024]))
bias_hiddens['bias_hidden_%s'%i]=tf.Variable(bias_initializer([1024]))
hiddens['hidden_%s'%i]=tf.nn.relu(tf.add(tf.matmul(hiddens['hidden_%s'%(i-1)], W_hiddens['W_hidden_%s'%i]), bias_hiddens['bias_hidden_%s'%i]))
#print(hiddens['hidden_%s'%i].shape)
# Output layer: Variables for output weights and biases
"""
W_out = tf.Variable(weight_initializer([256, n_rot]))
bias_out = tf.Variable(bias_initializer([n_rot]))
#out = tf.add(tf.matmul(hiddens['hidden_15'], W_out), bias_out)
out = tf.add(tf.matmul(hidden_3, W_out), bias_out)
mse = tf.reduce_mean(tf.squared_difference(out, Y))
opt = tf.train.AdamOptimizer().minimize(mse)
#global_step=tf.Variable(0,trainable=False)
#start_learning_rate=0.001
#learning_rate=tf.train.exponential_decay(start_learning_rate,global_step,decay_steps=850000,decay_rate=0.96,staircase=True)
#optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
#optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
#gvs = optimizer.compute_gradients(mse)
#capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gvs]
#train_op = optimizer.apply_gradients(capped_gvs,global_step=global_step)
#train_op = optimizer.apply_gradients(capped_gvs)
# Compute the gradients for a list of variables.
# Run initializer
net.run(tf.global_variables_initializer())
# Number of epochs and batch size
epochs = 10000
batch_size = 128
saver = tf.train.Saver(max_to_keep=None)
min_mse=100000
for e in range(epochs):
print("------" + str(e) + ":-------")
# Shuffle training data
shuffle_indices = np.random.permutation(np.arange(len(y_train)))
X_train = X_train[shuffle_indices]
y_train = y_train[shuffle_indices]
total_mse = 0
# Minibatch training
for i in range(0, len(y_train) // batch_size):
start = i * batch_size
batch_x = X_train[start:start + batch_size]
batch_y = y_train[start:start + batch_size]
# Run optimizer with batch
net.run(opt, feed_dict={X: batch_x, Y: batch_y})
# Show progress
if np.mod(i, 5) == 0:
mse_final = net.run(mse, feed_dict={X: batch_x, Y: batch_y})
print(mse_final)
total_mse += mse_final
#total_mse+=1
if total_mse < min_mse:
with open("epoch.txt", 'a') as fopen:
path_temp = "./model/model_3layer/model_final"
save_path = saver.save(net, path_temp)
min_mse = total_mse
string = str(e) + "|" + str(total_mse) + '\n'
fopen.write(string)
#if e % 500 == 0:
#model_path = "./model/model/model_" + str(e / 500 + 1)
#save_path = saver.save(net, model_path)
with open("loss.txt", 'a') as f:
f.write(str(e) + " " + str(total_mse) + '\n')
# Print final MSE after Training
#mse_final = net.run(mse, feed_dict={X: X_test, Y: y_test})
#print(mse_final)
|
[
"1057980213@qq.com"
] |
1057980213@qq.com
|
0fe37e6ee18079c74d7bf6b0291667ce4c3633db
|
fc56f5f55653f44b23c66c18ac277bf02a4272c9
|
/producer.py
|
4e45b98772b7b992d97d4e25108464bb1f0cd19c
|
[] |
no_license
|
gogeterobert/Tema1ASC
|
e036166f285ed5cbc97bfe29fc610180d9a8d051
|
3c44f7f545221ff3e0da6a8a7f7d079bd1e5ef56
|
refs/heads/main
| 2023-04-13T19:00:00.457533
| 2021-04-04T11:41:28
| 2021-04-04T11:41:28
| 354,524,754
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,437
|
py
|
"""
This module represents the Producer.
Computer Systems Architecture Course
Assignment 1
March 2021
"""
from threading import Thread
import time
class Producer(Thread):
"""
Class that represents a producer.
"""
def __init__(self, products, marketplace, republish_wait_time, **kwargs):
"""
Constructor.
@type products: List()
@param products: a list of products that the producer will produce
@type marketplace: Marketplace
@param marketplace: a reference to the marketplace
@type republish_wait_time: Time
@param republish_wait_time: the number of seconds that a producer must
wait until the marketplace becomes available
@type kwargs:
@param kwargs: other arguments that are passed to the Thread's __init__()
"""
Thread.__init__(self)
self.products = products
self.marketplace = marketplace
self.republish_wait_time = republish_wait_time
self.kwargs = kwargs
self._id = self.marketplace.register_producer()
def run(self):
while self.marketplace.is_running:
for product in self.products:
for index in range(product[1]):
self.marketplace.publish(self._id, product[0])
time.sleep(product[2])
time.sleep(self.republish_wait_time)
|
[
"gogeterobert@yahoo.com"
] |
gogeterobert@yahoo.com
|
35b4d9c5e3ee0f075cd9c9bec33be7a16b2e8eda
|
cde9682e6f02441b2667336ef438d5f5476751ea
|
/Week 1/Hello World/hello world.py
|
857db6e37bb124f4b019b39b94f7cea35f9a818f
|
[] |
no_license
|
TANISHCHHABRA/MIT-s-6.00.01x
|
83319a28effa41edace07184835dac8ed09b9419
|
0abcd2264935f0445e51bbd855ab081805b5e33e
|
refs/heads/master
| 2020-08-28T01:22:41.548740
| 2020-05-09T13:54:19
| 2020-05-09T13:54:19
| 217,545,902
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 26
|
py
|
print("hello world")
None
|
[
"noreply@github.com"
] |
TANISHCHHABRA.noreply@github.com
|
de6fd5366e7dab8e688fc0b811d7a49ab8dec335
|
e0960c86f6ac0a7d2adf1a4a7b44da9c0a18b4aa
|
/Codes/p/p.py
|
fc1ee7553d4a3e8b722ff9664022343fe10be034
|
[] |
no_license
|
wenchao-h/WorkSpace
|
3fd4c5f3c18a8e2062fcc1248ce7d6de191df530
|
6b1abec2d27fef57397e9e8945e7886c93c1d958
|
refs/heads/master
| 2020-03-16T07:03:37.612724
| 2018-04-13T10:51:06
| 2018-04-13T10:51:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,370
|
py
|
import loadov
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
data, label, info = loadov.loadov('D:\Myfiles\openvibefiles\MI-CSP-r1\signals\GH\GH-171225-acquisition-2') #!Path
left=[]
right=[]
ch=info['channal']
for i in range(0,len(label)):
if label[i]==0:
left.append(data[:,:,i])
else:
right.append(data[:,:,i])
l=np.array(left)
r=np.array(right)
fig1, ax1 =plt.subplots(nrows=4,ncols=2)
ax1=ax1.flat
fig1.suptitle('Label left')
fig2,ax2 =plt.subplots(nrows=4,ncols=2)
ax2=ax2.flat
fig2.suptitle('Label right')
for i in range(0, 8):
f1,P1=signal.welch(l[1,:,i],500,nperseg=500)
f2,P2=signal.welch(r[1,:,i],500,nperseg=500)
ax1[i].set_title(ch[i])
ax2[i].set_title(ch[i])
for j in range(1,10):
f1,p1=signal.welch(l[j,:,i],500,nperseg=500)
f2,p2=signal.welch(r[j,:,i],500,nperseg=500)
P1=P1+p1
P2=P2+p2
ax1[i].plot(f1[8:40],p1[8:40],linewidth=0.5)
ax2[i].plot(f2[8:40],p2[8:40],linewidth=0.5)
#max=np.max(P1[16:60]) if np.max(P1[16:60])>np.max(P2[16:60]) else np.max(P2[16:60])
#P1=P1/max
#P2=P2/max
ax1[i].set_ybound(upper=0.03)
ax2[i].set_ybound(upper=0.03)
plt.figure(101)
plt.subplot(4,2,i+1)
plt.title(ch[i])
plt.plot(f1[8:40],P1[8:40],'r',f2[8:40],P2[8:40],'b')
plt.figure(102)
plt.subplot(4,2,i+1)
plt.title(ch[i])
plt.plot(f1[8:40],P1[8:40]-P2[8:40],'g')
print ('finish ch'+str(i+1))
plt.show()
|
[
"450472377@qq.com"
] |
450472377@qq.com
|
06c6a6658e179d4299c1d81ea39e70d5a6e23564
|
eedf8b85c65f8732cc66967c5f62d248d7a1d1c1
|
/smart_traffic_video_processing/paths.py
|
819fba02971764cabc43af95a2818abb72e9a3e9
|
[] |
no_license
|
Abuelseaoud/Smart-Traffic-light-control
|
06232a624e0b40fe7c0c4533e00620ba06685f4d
|
b2ec7fa4b67d692d2f486e38e28eb4f605a16f91
|
refs/heads/master
| 2020-05-19T21:29:40.233067
| 2019-05-06T13:45:50
| 2019-05-06T13:45:50
| 185,225,871
| 0
| 0
| null | 2019-05-06T15:42:02
| 2019-05-06T15:42:02
| null |
UTF-8
|
Python
| false
| false
| 210
|
py
|
EW_VIDEO_PATH = '/Volumes/SSD 1/Smart-Traffic-light-control1/cars3.mp4'
NS_VIDEO_PATH = '/Volumes/SSD 1/Smart-Traffic-light-control1/cars3.mp4'
IMAGE_PATH = "/Volumes/SSD 1/Smart-Traffic-light-control1/output"
|
[
"muhammedmagdy.dev@gmail.com"
] |
muhammedmagdy.dev@gmail.com
|
640dc97dd5255c32176c195d4fdf6b170e999328
|
35b14d71ed97297165fd5bc970c2c465ee9c7a84
|
/FaceDetection/app/FaceDetectionApp.py
|
cc04f5e95f1901e0ca01acf357211797a0b42ea3
|
[] |
no_license
|
PushpaYa/FaceRecognition
|
a9e0f3d4c39410101765fad29d6d932114f3ee27
|
3598b40aa9bde173f17ffe3c052e1ee8890da7d0
|
refs/heads/master
| 2022-02-20T10:10:38.659185
| 2022-02-12T17:28:46
| 2022-02-12T17:28:46
| 249,934,156
| 1
| 2
| null | 2021-01-15T05:55:07
| 2020-03-25T09:16:51
|
Python
|
UTF-8
|
Python
| false
| false
| 7,366
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri March 27 12:23:59 2020
@author: pushpa
"""
from flask import Flask, request, redirect, url_for, send_from_directory,send_file
from flask import Flask, request, jsonify,render_template
from flask_debug import Debug
from flask import Flask, request, jsonify,render_template
from flask_json import FlaskJSON, JsonError, json_response
import logging
import os
import glob
from os.path import basename
from flask import Response
import numpy as np
import pandas
import pandas as pd
from os.path import basename
from numpy import nan
import re
import json
from werkzeug.utils import secure_filename
import cv2
import face_recognition
import glob
from os.path import basename
import os, sys
import numpy as np
from PIL import Image, ImageDraw
import os
import pickle
cwd = os.getcwd()
app = Flask(__name__)
app.config['JSON_AS_ASCII'] = False
path =cwd+"/"+"upload_image"+"/"
print(path)
UPLOAD_FOLDER = path
Debug(app)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
ALLOWED_EXTENSIONS = set(['jpeg','jpg','JPG','JPEG'])
img_folder=cwd+"/train_image/"
Files = glob.glob(img_folder+'*.*')
img=[]
face_encoding=[]
known_face_encodings=[]
known_face_names=[]
#Train model using train images
for file in Files:
print("file",file)
filename = os.path.splitext(basename(file))[0]
FileExt = os.path.splitext(basename(file))[1]
image = face_recognition.load_image_file(img_folder+filename+FileExt)
encodings = face_recognition.face_encodings(image)
if len(encodings) > 0:
encoding=encodings[0]
known_face_encodings.append(encoding)
known_face_names.append(filename)
else:
print("No faces found in the image!")
#dump the trained encoding in pickle file
with open('dataset_faces.dat', 'wb') as f:
pickle.dump(known_face_encodings, f)
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@app.route('/', methods=['GET', 'POST'])
def upload_file_image():
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
logging.info('app.app_context')
return redirect(request.url)
file = request.files['file']
# if user does not select file, browser also
# submit a empty part without filename
if file.filename == '':
logging.info('app.app_context')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
#return "file uploaded successfully"
return redirect(url_for('upload_file_image',
filename=filename))
return '''
<!doctype html>
<title>Please upload class picture</title>
<h1>Upload new File</h1>
<form action="" method=post enctype=multipart/form-data>
<p><input type=file name=file>
<input type=submit value=Upload>
</form>
'''
global frame
#take latest timestamp file from uploaded folder
def get_file_name():
Files = glob.glob(path+'/*.*')
if len(Files) > 0:
latest_file_path = max(Files, key=os.path.getctime)
filename = os.path.splitext(basename(latest_file_path))[0]
FileExt = os.path.splitext(basename(latest_file_path))[1]
filename = path+filename+FileExt
else:
filename = ""
return filename
#reconige and tag faces ,return roll no ,name and tagged image
@app.route('/face_recognition_frames', methods=['GET', 'POST'])
def face_recognition_frames():
face_locations = []
face_encodings = []
face_names=[]
students_records={}
frame_number = 0
# Load face encodings
with open('dataset_faces.dat', 'rb') as f:
known_face_encodings = pickle.load(f)
frame = get_file_name()
print(frame)
name = "Could not find face check uploded image"
# Load an image with an unknown face
unknown_image = face_recognition.load_image_file(frame)
# Assume the whole image is the location of the face
height, width, _ = unknown_image.shape
# Find all the faces and face encodings in the unknown image
face_locations = face_recognition.face_locations(unknown_image)
face_encodings = face_recognition.face_encodings(unknown_image, face_locations)
# Convert the image to a PIL-format image so that we can draw on top of it with the Pillow library
# See http://pillow.readthedocs.io/ for more about PIL/Pillow
pil_image = Image.fromarray(unknown_image)
# Create a Pillow ImageDraw Draw instance to draw with
draw = ImageDraw.Draw(pil_image)
# Loop through each face found in the unknown image
for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding,tolerance=0.4)
name = "Unknown"
rollno="Unknown"
# See how far apart the test image is from the known faces
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
print("best_match_index",best_match_index)
if matches[best_match_index]:
name_rollno = known_face_names[best_match_index]
name = name_rollno.split("_")[0]+" "+name_rollno.split("_")[1]
rollno=name_rollno.split("_")[-2]
# Draw a box around the face using the Pillow module
draw.rectangle(((left, top), (right, bottom)), outline=(0, 0, 255))
# Draw a label with a name below the face
text_width, text_height = draw.textsize(name)
draw.rectangle(((left, bottom - text_height - 10), (right, bottom)), fill=(0, 0, 255), outline=(0, 0, 255))
draw.text((left + 6, bottom - text_height - 5), name, fill=(255, 255, 255, 255))
face_names.append(name)
students_records[name]=rollno
# Remove the drawing library from memory as per the Pillow docs
del draw
# Display the resulting image
#pil_image.show()
# You can also save a copy of the new image to disk if you want by uncommenting this line
savepath="/home/pushpa/FaceDetection/app/data/"
pil_image.save(os.path.join(savepath)+"image_with_boxes"+str(frame_number)+".jpeg")
frame_number=frame_number+1
if(len(face_names)==0):
response = jsonify("No Face Found")
response.headers['name'] = "No Face"
response.headers['path'] = "No Image Saved"
else:
#image_file=os.path.join(path)+"image_with_boxes"+str(frame_number-1)+".jpeg"
#response = app.make_response(image_file)
response = send_file(os.path.join(savepath)+"image_with_boxes"+str(frame_number-1)+".jpeg")
response.headers['name'] = students_records
response.headers['path'] = os.path.join(savepath)+"image_with_boxes"+str(frame_number-1)+".jpeg"
return response
if __name__ == "__main__":
#app.run(debug=True)
app.run(host = '0.0.0.0',port = 5011)
|
[
"pushpa.yadav@bostonlogix.com"
] |
pushpa.yadav@bostonlogix.com
|
b6562cd716423d6c50a73aeb2eccdc5912c75bed
|
796f7a3f214353372df626a9fc26fc84de7ce4d7
|
/lab2/2Task15.py
|
c60282744bf9d8b5477ea8fbfcefdb6ab3f53b83
|
[] |
no_license
|
saipavans/pythonrepo
|
5ec52efb055bb67b87a2301eef8e3c1a8dee571b
|
c22c9d5317568c7fd5c3598601e64426b0738686
|
refs/heads/master
| 2021-07-07T10:19:32.294714
| 2017-10-04T13:37:27
| 2017-10-04T13:37:27
| 103,655,076
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 709
|
py
|
def first(word):
return word[0]
def last(word):
return word[-1]
def middle(word):
return word[1:-1]
######## task 15 - 1 ##########
word1 = "ab"
word2 = "a"
word3 = ""
print("testing middle with two letters ", middle(word1))
print("testing middle with one letter", middle(word2))
print("testing middle with empty string", middle(word3))
######## task 15-2 #############
def is_palindrome(word):
result = True
if len(word) > 2:
if first(word) == last(word):
is_palindrome(middle(word))
else:
result = False
return(result)
print("checking the word redivider for palindrome", is_palindrome("redivider"))
print("checking the word redividend for palindrome", is_palindrome("redividend"))
|
[
"sp403250@dal.ca"
] |
sp403250@dal.ca
|
c00f60909a37be1910e82edde0016b6d7ac38bd3
|
c263de2879f12e20efdc7044f21edd3b78d8d959
|
/Data_for_analysis.py
|
fb09fd2aff4f610d4053d7c577a1a6fc88b0447f
|
[] |
no_license
|
legovaer/Indoor-positioning-using-machine-learning
|
f55c7aaf559b9534b13eb83aa58224e22492ae44
|
917452622dc65f5585023477b2f4a540ee31e9b5
|
refs/heads/master
| 2021-12-23T04:14:29.045169
| 2017-10-29T04:33:03
| 2017-10-29T04:33:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,386
|
py
|
import serial
import csv
serial = serial.Serial("COM1", 115200)
with open("data_unfiltered.txt", "w") as f:
i=1
while i<=5000:
for v in range(1,22):
data = serial.read()
#print (data)
if v==21:
f.write("\n")
elif v==17:
pass
else:
f.write(str(data))
i+=1
with open("data_unfiltered.txt", "r") as f:
with open("data_filtered.txt", "w") as f1:
for s in range(5000):
data1=f.readline()
f1.write(data1[11:15]+","+data1[16:19]+"\n")
with open('data_filtered.txt', 'r') as f1:
stripped_data = (line.strip() for line in f1)
#lines = (line for line in stripped_data if line)
grouped = [line.split(',') for line in stripped_data]
with open('data_filtered.csv', 'w') as f2:
writer = csv.writer(f2)
writer.writerow(['Slave', 'RSSI'])
writer.writerows(grouped)
with open('data_filtered.csv', 'r') as f2:
reader = csv.reader(f2)
v=1
x=1
y=1
z=1
fb1=open('data_filtered_b1.csv', 'w')
fb2=open('data_filtered_b2.csv', 'w')
fb3=open('data_filtered_b3.csv', 'w')
fb4=open('data_filtered_b4.csv', 'w')
for row in reader:
for field in row:
if field=="69DF":
writer = csv.writer(fb4)
if v==1:
writer.writerow(["Slave","RSSI"])
v+=1
writer.writerow(row)
elif field=="41F3":
writer = csv.writer(fb2)
if x==1:
writer.writerow(["Slave","RSSI"])
x+=1
writer.writerow(row)
elif field=="6CDD":
writer = csv.writer(fb1)
if y==1:
writer.writerow(["Slave","RSSI"])
y+=1
writer.writerow(row)
elif field=="459F":
writer = csv.writer(fb3)
if z==1:
writer.writerow(["Slave","RSSI"])
z+=1
writer.writerow(row)
fb1.close()
fb2.close()
fb3.close()
fb4.close()
v=[]
w=[]
with open('data_filtered_b1.csv', 'r') as fb1:
reader=csv.reader(fb1)
for row in reader:
if row[1]!="RSSI":
v.append(row[1])
z=len(v)%5
y=len(v)-z
a=int(y/5)
i=0
for r in range(a):
s=0
for x in range(5):
s+=int(v[i])
i+=1
avg=(s/5)
w.append(str(avg))
print (w)
with open('data_filtered_b1_avg.csv', 'w') as fb1a:
writer=csv.writer(fb1a)
writer.writerow(["Slave","RSSI"])
for t in w:
writer.writerow(["6CDD",t])
q=[]
e=[]
with open('data_filtered_b2.csv', 'r') as fb2:
reader=csv.reader(fb2)
for row in reader:
if row[1]!="RSSI":
q.append(row[1])
z=len(q)%5
y=len(q)-z
a=int(y/5)
i=0
for r in range(a):
s=0
for x in range(5):
s+=int(q[i])
i+=1
avg=(s/5)
e.append(str(avg))
print (e)
with open('data_filtered_b2_avg.csv', 'w') as fb2a:
writer=csv.writer(fb2a)
writer.writerow(["Slave","RSSI"])
for t in e:
writer.writerow(["41F3",t])
o=[]
p=[]
with open('data_filtered_b3.csv', 'r') as fb3:
reader=csv.reader(fb3)
for row in reader:
if row[1]!="RSSI":
o.append(row[1])
z=len(o)%5
y=len(o)-z
a=int(y/5)
i=0
for r in range(a):
s=0
for x in range(5):
s+=int(o[i])
i+=1
avg=(s/5)
p.append(str(avg))
print (p)
with open('data_filtered_b3_avg.csv', 'w') as fb3a:
writer=csv.writer(fb3a)
writer.writerow(["Slave","RSSI"])
for t in p:
writer.writerow(["459F",t])
g=[]
h=[]
with open('data_filtered_b4.csv', 'r') as fb4:
reader=csv.reader(fb4)
for row in reader:
if row[1]!="RSSI":
g.append(row[1])
z=len(g)%5
y=len(g)-z
a=int(y/5)
i=0
for r in range(a):
s=0
for x in range(5):
s+=int(g[i])
i+=1
avg=(s/5)
h.append(str(avg))
print (h)
with open('data_filtered_b4_avg.csv', 'w') as fb4a:
writer=csv.writer(fb4a)
writer.writerow(["Slave","RSSI"])
for t in h:
writer.writerow(["69DF",t])
|
[
"noreply@github.com"
] |
legovaer.noreply@github.com
|
b193aa0c060db359de32a986431ab19710b810b9
|
fdd6e734b53666a0deddd6acf295aee189cb6729
|
/prototipo/whois/migrations/0009_remove_linksp_fecha.py
|
db86e234530f02395d2cef274ba5dd8bcba36e95
|
[] |
no_license
|
Tatiana1999/hellow-word
|
75f3bdc23a3ae8d63b059ba736f8f3c1b32f9d76
|
73397755d33c4b8d9cc3d910c6a16d63c105db66
|
refs/heads/master
| 2021-04-30T03:31:58.300498
| 2018-05-28T15:00:09
| 2018-05-28T15:00:09
| 121,519,099
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 376
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-01-04 20:11
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('whois', '0008_linksp'),
]
operations = [
migrations.RemoveField(
model_name='linksp',
name='fecha',
),
]
|
[
"noreply@github.com"
] |
Tatiana1999.noreply@github.com
|
d506c5086780f8d69a61cf9028c0c5f2ddeecc78
|
0b2bf68d50e585afbe46875955cd6472bce0e9ae
|
/train_segment.py
|
4317bd5e542736f5d4ee856b335158b5de8d86f5
|
[] |
no_license
|
tachitachi/CycleConsistency
|
80e194f9b8faad44aff8e76dbeb83681a32d7a37
|
1b5246248f3b88b163a07b940b4cc388773fbafd
|
refs/heads/master
| 2020-03-23T03:01:58.420498
| 2018-07-21T05:15:06
| 2018-07-21T05:15:06
| 141,005,607
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,059
|
py
|
import argparse
import tensorflow as tf
import data
import numpy as np
from preprocessing import preprocess
from postprocessing import postprocess
import time
import model
from tqdm import tqdm
import os
from labels import labels
def main(args):
# get datasets
dataset = data.get_dataset(args.dataset, args.split, image_size=args.image_size, data_dir=args.data_dir, is_training=True)
im_x = preprocess(dataset.x, args.preprocessing_a, image_size=args.image_size, output_channels=args.num_channels)
im_y = preprocess(dataset.y, args.preprocessing_b, image_size=args.image_size)
im_batch_x, im_batch_y = data.create_batch([im_x, im_y], batch_size=args.batch_size, shuffle=args.shuffle, queue_size=2, min_queue_size=1)
# build models
transformed_x = model.transformer(im_batch_x, output_channels=dataset.num_classes, output_fn=None, scope='model/AtoB')
transformed_y = model.transformer(im_batch_y, output_channels=args.num_channels, scope='model/BtoA')
cycled_x = model.transformer(tf.nn.softmax(transformed_x), output_channels=args.num_channels, scope='model/BtoA', reuse=True)
cycled_y = model.transformer(transformed_y, output_channels=dataset.num_classes, output_fn=None, scope='model/AtoB', reuse=True)
# create loss functions
cycle_loss_x = tf.losses.absolute_difference(im_batch_x, cycled_x, scope='cycle_loss_x')
cycle_loss_y = tf.losses.softmax_cross_entropy(im_batch_y, cycled_y, scope='cycle_loss_y')
transform_loss_xy = tf.losses.absolute_difference(im_batch_x, transformed_y, scope='transform_loss_xy')
transform_loss_yx = tf.losses.softmax_cross_entropy(im_batch_y, transformed_x, scope='transform_loss_yx')
total_loss = cycle_loss_x + cycle_loss_y + transform_loss_xy + transform_loss_yx
optimizer = tf.train.AdamOptimizer(args.learning_rate, args.beta1, args.beta2, args.epsilon)
inc_global_step = tf.assign_add(tf.train.get_or_create_global_step(), 1)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, inc_global_step)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_tensor = optimizer.minimize(total_loss)
# Set up train op to return loss
with tf.control_dependencies([train_tensor]):
train_op = tf.identity(total_loss, name='train_op')
# set up logging
# Gather initial summaries.
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
# Add summaries for losses.
for loss in tf.get_collection(tf.GraphKeys.LOSSES):
summaries.add(tf.summary.scalar('losses/%s' % loss.op.name, loss))
# Add summaries for variables.
for variable in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):
summaries.add(tf.summary.histogram(variable.op.name, variable))
color_map = np.array(list(map(lambda x: x.color, labels[:dataset.num_classes]))).astype(np.float32)
segmentation_y = postprocess(tf.argmax(im_batch_y, -1), 'segmentation_to_rgb', dataset.num_classes, color_map)
segmentation_transformed_x = postprocess(tf.argmax(transformed_x, -1), 'segmentation_to_rgb', dataset.num_classes, color_map)
segmentation_cycled_y = postprocess(tf.argmax(cycled_y, -1), 'segmentation_to_rgb', dataset.num_classes, color_map)
summaries.add(tf.summary.image('x', im_batch_x))
summaries.add(tf.summary.image('y', segmentation_y))
summaries.add(tf.summary.image('transformed_x', segmentation_transformed_x))
summaries.add(tf.summary.image('transformed_y', transformed_y))
summaries.add(tf.summary.image('cycled_x', cycled_x))
summaries.add(tf.summary.image('cycled_y', segmentation_cycled_y))
# Merge all summaries together.
summary_op = tf.summary.merge(list(summaries), name='summary_op')
# create train loop
if not os.path.isdir(args.output_dir):
os.makedirs(args.output_dir)
saver = tf.train.Saver(var_list=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='model'))
checkpoint_path = os.path.join(args.output_dir, 'model.ckpt')
writer = tf.summary.FileWriter(args.output_dir)
with tf.Session() as sess:
# Tensorflow initializations
sess.run(tf.get_collection(tf.GraphKeys.TABLE_INITIALIZERS))
tf.train.start_queue_runners(sess=sess)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
last_log_time = 0
last_save_time = 0
for i in tqdm(range(args.num_batches)):
if last_log_time < time.time() - args.log_every_n_seconds:
last_log_time = time.time()
summary, loss_val, global_step = sess.run([summary_op, train_op, tf.train.get_global_step()])
writer.add_summary(summary, global_step)
writer.flush()
else:
loss_val, global_step = sess.run([train_op, tf.train.get_global_step()])
if last_save_time < time.time() - args.save_every_n_seconds:
last_save_time = time.time()
saver.save(sess, checkpoint_path, global_step=global_step)
saver.save(sess, checkpoint_path, global_step=args.num_batches)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='kitti')
parser.add_argument('--data_dir', type=str, default='/home/aaron/data/datasets/kitti')
parser.add_argument('--split', type=str, default='train')
parser.add_argument('--preprocessing_a', type=str, default='simple')
parser.add_argument('--preprocessing_b', type=str, default=None)
parser.add_argument('--image_size', type=int, default=256)
parser.add_argument('--num_channels', type=int, default=3)
parser.add_argument('--batch_size', type=int, default=1)
parser.add_argument('--num_batches', type=int, default=100000)
parser.add_argument('--shuffle', type=bool, default=True)
parser.add_argument('--output_dir', type=str, default='output/%d' % int(time.time() * 1000))
parser.add_argument('--log_every_n_seconds', type=int, default=120)
parser.add_argument('--save_every_n_seconds', type=int, default=600)
parser.add_argument('--learning_rate', type=float, default=1e-4)
parser.add_argument('--beta1', type=float, default=0.9)
parser.add_argument('--beta2', type=float, default=0.99)
parser.add_argument('--epsilon', type=float, default=1e-8)
args = parser.parse_args()
main(args)
|
[
"aaronkeech@gatech.edu"
] |
aaronkeech@gatech.edu
|
dd35422618681f9e7ae5ba8f11324048bd7a2807
|
453b3edc0d92207e39cff76a0818026bf050c879
|
/tic_tac_toe.py
|
98540d1252ee682a560e881c871223c07e2b1647
|
[] |
no_license
|
shukl08vk/tic_tac_toe
|
d3f227f4c87535000232af9977a2eea5b1ccbc0e
|
4901e32b74f16a612ae6c131dcb27511cb15d4b6
|
refs/heads/master
| 2023-01-28T19:34:31.590358
| 2020-12-08T17:44:01
| 2020-12-08T17:44:01
| 319,707,384
| 0
| 0
| null | 2020-12-08T17:44:03
| 2020-12-08T17:10:39
|
Python
|
UTF-8
|
Python
| false
| false
| 2,123
|
py
|
board={7:' ',8:' ',9:' ',
6:' ',5:' ',4:' ',
3:' ',2:' ',1:' '}
def printBoard(board):
i=9
while i !=0:
print(board[i-2]+'|'+board[i-1]+'|'+board[i])
print("-+-+-")
i-=3
i=0
def winner(board):
for i in range(1,4):
if board[i]==board[i+3]==board[i+6]!=' ':
return 1
for i in range(1,8,3):
if board[i]==board[i+1]==board[i+2]!=' ':
return 1
if board[1]==board[5]==board[9]!=' ':
return 1
if board[7]==board[5]==board[3]!=' ':
return 1
else:
return 0
def game(board):
player='x'
count=0
for _ in range(1,10):
printBoard(board)
print("Turn of player <"+player+">"+": which place to move (1-9):")
while(1):
try:
moveTo=int(input())
if board[moveTo]==' ':
board[moveTo]=player
break
else:
print("Please Fill in empty place")
print("Turn of player <"+player+">"+" which place to move:")
except ValueError:
print('Please enter postion between 1 to 9..according to your num keyboard')
count+=1
if count>=5:
printBoard(board)
if winner(board)==1:
print("++ Game Over ++")
print("Won Player"+" "+player)
return
if count==9:
print("++ Game Over ++")
print("It's a tie game")
return
if player=="x":
player='o'
else:
player='x'
if __name__ == "__main__":
while(1):
isPlay=input('Do you want to start the game: Enter "y" for YES and "n" for NO:')
if isPlay in ['y',"Y"]:
for i in board.keys():
board[i]=' '
game(board)
elif isPlay in ['n',"N"]:
print("Thanks For Playing :")
break
else:
print('Enter "y" for YES and "n" for NO:')
|
[
"21shukla08@gmail.com"
] |
21shukla08@gmail.com
|
c7bc3c2ab170180b665ad50dc5405b76ebec96fd
|
77985a0973731044c7c73873116403ea381c9cfa
|
/Cryptography/break-cert.py
|
34b2a38d137853cfe96ab4ad666ba9ff326db143
|
[] |
no_license
|
samirettali/ctf-scripts
|
1dc7338e9969cf376087717175cb9cef48e971c8
|
27d6c886903e9b4a1958c3868a51bd0c707604c2
|
refs/heads/master
| 2021-07-15T19:28:42.619409
| 2020-09-26T07:41:15
| 2020-09-26T07:41:15
| 192,199,756
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 582
|
py
|
#!/usr/bin/env python3
import primefac
import OpenSSL.crypto
from sys import argv
# This script tries to break a PEM certificate that uses weak primes
def main():
if len(argv) != 2:
print('Usage: ./%s <pem_file>')
exit(1)
cert = OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_PEM,
open(argv[1]).read()
)
pubkey = cert.get_pubkey().to_cryptography_key().public_numbers()
factors = primefac.factorint(pubkey.n)
factors = [int(n) for n in factors.keys()]
print(factors)
if __name__ == '__main__':
main()
|
[
"ettali.samir@gmail.com"
] |
ettali.samir@gmail.com
|
b9026089df37762b4bd1e3b3ae2b11109493b806
|
1fcdccf5d651b60bfe906f2ddafd6745f4e29860
|
/nufeeb.button/reports/test_instockorderCase.py
|
96c6850aa19dd022b985209874f6696bb87540b3
|
[] |
no_license
|
LimXS/workspace
|
6728d6517a764ef2ac8d47fe784c4dba937a1f1d
|
9669d653f4a7723947da645de526f4c580ddc88b
|
refs/heads/master
| 2021-01-21T06:39:14.126933
| 2017-04-14T03:24:36
| 2017-04-14T03:24:36
| 83,257,374
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,646
|
py
|
#*-* coding:UTF-8 *-*
import unittest
import xml.dom.minidom
import traceback
from common import browserClass
browser=browserClass.browser()
class instockorderreportTest(unittest.TestCase):
u'''报表-进货报表-进货订单统计'''
def setUp(self):
self.driver=browser.startBrowser('chrome')
browser.set_up(self.driver)
cookie = [item["name"] + "=" + item["value"] for item in self.driver.get_cookies()]
#print cookie
self.cookiestr = ';'.join(item for item in cookie)
browser.delaytime(1)
pass
def tearDown(self):
print "test over"
self.driver.close()
pass
def test_instockorderReport(self):
u'''报表-进货报表-进货订单统计'''
header={'cookie':self.cookiestr,"Content-Type": "application/json"}
dom = xml.dom.minidom.parse(r'C:\workspace\nufeeb.button\reports\reportslocation.xml')
module=browser.xmlRead(dom,'module',0)
moduledetail=browser.xmlRead(dom,'moduledetail',0)
moduledd=browser.xmlRead(dom,'moduledd',0)
browser.openModule3(self.driver,module,moduledetail,moduledd)
#页面id
#pageurl=browser.xmlRead(dom,"fixcapsaleurl",0)
#pageid=browser.getalertid(pageurl,header)
try:
browser.exjscommin(self.driver,"关闭")
browser.openModule3(self.driver,module,moduledetail,moduledd)
browser.exjscommin(self.driver,"确定")
#详情
browser.exjscommin(self.driver,"详情")
browser.exjscommin(self.driver,"取消")
browser.exjscommin(self.driver,"详情")
browser.exjscommin(self.driver,"确定")
browser.selectbycon(self.driver,"商品编号")
browser.exjscommin(self.driver,"退出")
#刷新
browser.refreshbutton(self.driver)
#筛选
browser.selectbycon(self.driver,"商品编号")
#查询条件
browser.exjscommin(self.driver,"查询条件")
browser.exjscommin(self.driver,"关闭")
browser.exjscommin(self.driver,"查询条件")
js="$(\"input[id$=edBType]\").last().attr(\"id\",\"conid\")"
browser.delaytime(1)
browser.excutejs(self.driver,js)
conid="conid"
browser.doubleclick(self.driver,conid)
browser.pagechoice(self.driver)
browser.exjscommin(self.driver,"关闭")
browser.doubleclick(self.driver,conid)
browser.exjscommin(self.driver,"进入下级")
browser.exjscommin(self.driver,"查看单位基本信息")
browser.exjscommin(self.driver,"关闭")
browser.exjscommin(self.driver,"返回上级")
browser.exjscommin(self.driver,"选择一类")
browser.doubleclick(self.driver,conid)
browser.exjscommin(self.driver,"选中")
browser.exjscommin(self.driver,"选中")
browser.exjscommin(self.driver,"确定")
#退出
browser.exjscommin(self.driver,"退出")
browser.openModule3(self.driver,module,moduledetail,moduledd)
except:
print traceback.format_exc()
filename=browser.xmlRead(dom,'filename',0)
#print filename+u"常用-单据草稿.png"
#browser.getpicture(self.driver,filename+u"notedraft.png")
browser.getpicture(self.driver,filename+u"报表-进货报表-进货订单统计.png")
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
[
"xsx2018@yahoo.com"
] |
xsx2018@yahoo.com
|
c0d3be7cd952fece2cbf790db1b52a51c174be72
|
d5ebb9ec001028eec9ca6448ec306400c1f9c1dc
|
/venv/Scripts/runxlrd.py
|
00cb9080c15c845f6a35ad86ff1497f73ccbd6e4
|
[] |
no_license
|
ysBA2018/Rapp_myRDB
|
9062f0b3fc2e36b8e69b8b721a772a50b23f7bf3
|
a1b7786e641b234cfdef8af1e330d1bd107c447c
|
refs/heads/master
| 2020-05-30T23:05:52.661719
| 2019-10-16T07:20:41
| 2019-10-16T07:20:41
| 190,004,891
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,343
|
py
|
#!C:\Users\Yannick Simchen\PycharmProjects\Rapp_myRDB\venv\Scripts\python.exe
# Copyright (c) 2005-2012 Stephen John Machin, Lingfo Pty Ltd
# This script is part of the xlrd package, which is released under a
# BSD-style licence.
from __future__ import print_function
cmd_doc = """
Commands:
2rows Print the contents of first and last row in each sheet
3rows Print the contents of first, second and last row in each sheet
bench Same as "show", but doesn't print -- for profiling
biff_count[1] Print a count of each type of BIFF record in the file
biff_dump[1] Print a dump (char and hex) of the BIFF records in the file
fonts hdr + print a dump of all font objects
hdr Mini-overview of file (no per-sheet information)
hotshot Do a hotshot profile run e.g. ... -f1 hotshot bench bigfile*.xls
labels Dump of sheet.col_label_ranges and ...row... for each sheet
name_dump Dump of each object in book.name_obj_list
names Print brief information for each NAME record
ov Overview of file
profile Like "hotshot", but uses cProfile
show Print the contents of all rows in each sheet
version[0] Print versions of xlrd and Python and exit
xfc Print "XF counts" and cell-type counts -- see code for details
[0] means no file arg
[1] means only one file arg i.e. no glob.glob pattern
"""
options = None
if __name__ == "__main__":
PSYCO = 0
import xlrd
import sys
import time
import glob
import traceback
import gc
from xlrd.timemachine import xrange, REPR
class LogHandler(object):
def __init__(self, logfileobj):
self.logfileobj = logfileobj
self.fileheading = None
self.shown = 0
def setfileheading(self, fileheading):
self.fileheading = fileheading
self.shown = 0
def write(self, text):
if self.fileheading and not self.shown:
self.logfileobj.write(self.fileheading)
self.shown = 1
self.logfileobj.write(text)
null_cell = xlrd.empty_cell
def show_row(bk, sh, rowx, colrange, printit):
if bk.ragged_rows:
colrange = range(sh.row_len(rowx))
if not colrange: return
if printit: print()
if bk.formatting_info:
for colx, ty, val, cxfx in get_row_data(bk, sh, rowx, colrange):
if printit:
print("cell %s%d: type=%d, data: %r, xfx: %s"
% (xlrd.colname(colx), rowx+1, ty, val, cxfx))
else:
for colx, ty, val, _unused in get_row_data(bk, sh, rowx, colrange):
if printit:
print("cell %s%d: type=%d, data: %r" % (xlrd.colname(colx), rowx+1, ty, val))
def get_row_data(bk, sh, rowx, colrange):
result = []
dmode = bk.datemode
ctys = sh.row_types(rowx)
cvals = sh.row_values(rowx)
for colx in colrange:
cty = ctys[colx]
cval = cvals[colx]
if bk.formatting_info:
cxfx = str(sh.cell_xf_index(rowx, colx))
else:
cxfx = ''
if cty == xlrd.XL_CELL_DATE:
try:
showval = xlrd.xldate_as_tuple(cval, dmode)
except xlrd.XLDateError as e:
showval = "%s:%s" % (type(e).__name__, e)
cty = xlrd.XL_CELL_ERROR
elif cty == xlrd.XL_CELL_ERROR:
showval = xlrd.error_text_from_code.get(cval, '<Unknown error code 0x%02x>' % cval)
else:
showval = cval
result.append((colx, cty, showval, cxfx))
return result
def bk_header(bk):
print()
print("BIFF version: %s; datemode: %s"
% (xlrd.biff_text_from_num[bk.biff_version], bk.datemode))
print("codepage: %r (encoding: %s); countries: %r"
% (bk.codepage, bk.encoding, bk.countries))
print("Last saved by: %r" % bk.user_name)
print("Number of data sheets: %d" % bk.nsheets)
print("Use mmap: %d; Formatting: %d; On demand: %d"
% (bk.use_mmap, bk.formatting_info, bk.on_demand))
print("Ragged rows: %d" % bk.ragged_rows)
if bk.formatting_info:
print("FORMATs: %d, FONTs: %d, XFs: %d"
% (len(bk.format_list), len(bk.font_list), len(bk.xf_list)))
if not options.suppress_timing:
print("Load time: %.2f seconds (stage 1) %.2f seconds (stage 2)"
% (bk.load_time_stage_1, bk.load_time_stage_2))
print()
def show_fonts(bk):
print("Fonts:")
for x in xrange(len(bk.font_list)):
font = bk.font_list[x]
font.dump(header='== Index %d ==' % x, indent=4)
def show_names(bk, dump=0):
bk_header(bk)
if bk.biff_version < 50:
print("Names not extracted in this BIFF version")
return
nlist = bk.name_obj_list
print("Name list: %d entries" % len(nlist))
for nobj in nlist:
if dump:
nobj.dump(sys.stdout,
header="\n=== Dump of name_obj_list[%d] ===" % nobj.name_index)
else:
print("[%d]\tName:%r macro:%r scope:%d\n\tresult:%r\n"
% (nobj.name_index, nobj.name, nobj.macro, nobj.scope, nobj.result))
def print_labels(sh, labs, title):
if not labs:return
for rlo, rhi, clo, chi in labs:
print("%s label range %s:%s contains:"
% (title, xlrd.cellname(rlo, clo), xlrd.cellname(rhi-1, chi-1)))
for rx in xrange(rlo, rhi):
for cx in xrange(clo, chi):
print(" %s: %r" % (xlrd.cellname(rx, cx), sh.cell_value(rx, cx)))
def show_labels(bk):
# bk_header(bk)
hdr = 0
for shx in range(bk.nsheets):
sh = bk.sheet_by_index(shx)
clabs = sh.col_label_ranges
rlabs = sh.row_label_ranges
if clabs or rlabs:
if not hdr:
bk_header(bk)
hdr = 1
print("sheet %d: name = %r; nrows = %d; ncols = %d" %
(shx, sh.name, sh.nrows, sh.ncols))
print_labels(sh, clabs, 'Col')
print_labels(sh, rlabs, 'Row')
if bk.on_demand: bk.unload_sheet(shx)
def show(bk, nshow=65535, printit=1):
bk_header(bk)
if 0:
rclist = xlrd.sheet.rc_stats.items()
rclist = sorted(rclist)
print("rc stats")
for k, v in rclist:
print("0x%04x %7d" % (k, v))
if options.onesheet:
try:
shx = int(options.onesheet)
except ValueError:
shx = bk.sheet_by_name(options.onesheet).number
shxrange = [shx]
else:
shxrange = range(bk.nsheets)
# print("shxrange", list(shxrange))
for shx in shxrange:
sh = bk.sheet_by_index(shx)
nrows, ncols = sh.nrows, sh.ncols
colrange = range(ncols)
anshow = min(nshow, nrows)
print("sheet %d: name = %s; nrows = %d; ncols = %d" %
(shx, REPR(sh.name), sh.nrows, sh.ncols))
if nrows and ncols:
# Beat the bounds
for rowx in xrange(nrows):
nc = sh.row_len(rowx)
if nc:
sh.row_types(rowx)[nc-1]
sh.row_values(rowx)[nc-1]
sh.cell(rowx, nc-1)
for rowx in xrange(anshow-1):
if not printit and rowx % 10000 == 1 and rowx > 1:
print("done %d rows" % (rowx-1,))
show_row(bk, sh, rowx, colrange, printit)
if anshow and nrows:
show_row(bk, sh, nrows-1, colrange, printit)
print()
if bk.on_demand: bk.unload_sheet(shx)
def count_xfs(bk):
bk_header(bk)
for shx in range(bk.nsheets):
sh = bk.sheet_by_index(shx)
nrows = sh.nrows
print("sheet %d: name = %r; nrows = %d; ncols = %d" %
(shx, sh.name, sh.nrows, sh.ncols))
# Access all xfindexes to force gathering stats
type_stats = [0, 0, 0, 0, 0, 0, 0]
for rowx in xrange(nrows):
for colx in xrange(sh.row_len(rowx)):
xfx = sh.cell_xf_index(rowx, colx)
assert xfx >= 0
cty = sh.cell_type(rowx, colx)
type_stats[cty] += 1
print("XF stats", sh._xf_index_stats)
print("type stats", type_stats)
print()
if bk.on_demand: bk.unload_sheet(shx)
def main(cmd_args):
import optparse
global options, PSYCO
usage = "\n%prog [options] command [input-file-patterns]\n" + cmd_doc
oparser = optparse.OptionParser(usage)
oparser.add_option(
"-l", "--logfilename",
default="",
help="contains error messages")
oparser.add_option(
"-v", "--verbosity",
type="int", default=0,
help="level of information and diagnostics provided")
oparser.add_option(
"-m", "--mmap",
type="int", default=-1,
help="1: use mmap; 0: don't use mmap; -1: accept heuristic")
oparser.add_option(
"-e", "--encoding",
default="",
help="encoding override")
oparser.add_option(
"-f", "--formatting",
type="int", default=0,
help="0 (default): no fmt info\n"
"1: fmt info (all cells)\n",
)
oparser.add_option(
"-g", "--gc",
type="int", default=0,
help="0: auto gc enabled; 1: auto gc disabled, manual collect after each file; 2: no gc")
oparser.add_option(
"-s", "--onesheet",
default="",
help="restrict output to this sheet (name or index)")
oparser.add_option(
"-u", "--unnumbered",
action="store_true", default=0,
help="omit line numbers or offsets in biff_dump")
oparser.add_option(
"-d", "--on-demand",
action="store_true", default=0,
help="load sheets on demand instead of all at once")
oparser.add_option(
"-t", "--suppress-timing",
action="store_true", default=0,
help="don't print timings (diffs are less messy)")
oparser.add_option(
"-r", "--ragged-rows",
action="store_true", default=0,
help="open_workbook(..., ragged_rows=True)")
options, args = oparser.parse_args(cmd_args)
if len(args) == 1 and args[0] in ("version", ):
pass
elif len(args) < 2:
oparser.error("Expected at least 2 args, found %d" % len(args))
cmd = args[0]
xlrd_version = getattr(xlrd, "__VERSION__", "unknown; before 0.5")
if cmd == 'biff_dump':
xlrd.dump(args[1], unnumbered=options.unnumbered)
sys.exit(0)
if cmd == 'biff_count':
xlrd.count_records(args[1])
sys.exit(0)
if cmd == 'version':
print("xlrd: %s, from %s" % (xlrd_version, xlrd.__file__))
print("Python:", sys.version)
sys.exit(0)
if options.logfilename:
logfile = LogHandler(open(options.logfilename, 'w'))
else:
logfile = sys.stdout
mmap_opt = options.mmap
mmap_arg = xlrd.USE_MMAP
if mmap_opt in (1, 0):
mmap_arg = mmap_opt
elif mmap_opt != -1:
print('Unexpected value (%r) for mmap option -- assuming default' % mmap_opt)
fmt_opt = options.formatting | (cmd in ('xfc', ))
gc_mode = options.gc
if gc_mode:
gc.disable()
for pattern in args[1:]:
for fname in glob.glob(pattern):
print("\n=== File: %s ===" % fname)
if logfile != sys.stdout:
logfile.setfileheading("\n=== File: %s ===\n" % fname)
if gc_mode == 1:
n_unreachable = gc.collect()
if n_unreachable:
print("GC before open:", n_unreachable, "unreachable objects")
if PSYCO:
import psyco
psyco.full()
PSYCO = 0
try:
t0 = time.time()
bk = xlrd.open_workbook(
fname,
verbosity=options.verbosity, logfile=logfile,
use_mmap=mmap_arg,
encoding_override=options.encoding,
formatting_info=fmt_opt,
on_demand=options.on_demand,
ragged_rows=options.ragged_rows,
)
t1 = time.time()
if not options.suppress_timing:
print("Open took %.2f seconds" % (t1-t0,))
except xlrd.XLRDError as e:
print("*** Open failed: %s: %s" % (type(e).__name__, e))
continue
except KeyboardInterrupt:
print("*** KeyboardInterrupt ***")
traceback.print_exc(file=sys.stdout)
sys.exit(1)
except BaseException as e:
print("*** Open failed: %s: %s" % (type(e).__name__, e))
traceback.print_exc(file=sys.stdout)
continue
t0 = time.time()
if cmd == 'hdr':
bk_header(bk)
elif cmd == 'ov': # OverView
show(bk, 0)
elif cmd == 'show': # all rows
show(bk)
elif cmd == '2rows': # first row and last row
show(bk, 2)
elif cmd == '3rows': # first row, 2nd row and last row
show(bk, 3)
elif cmd == 'bench':
show(bk, printit=0)
elif cmd == 'fonts':
bk_header(bk)
show_fonts(bk)
elif cmd == 'names': # named reference list
show_names(bk)
elif cmd == 'name_dump': # named reference list
show_names(bk, dump=1)
elif cmd == 'labels':
show_labels(bk)
elif cmd == 'xfc':
count_xfs(bk)
else:
print("*** Unknown command <%s>" % cmd)
sys.exit(1)
del bk
if gc_mode == 1:
n_unreachable = gc.collect()
if n_unreachable:
print("GC post cmd:", fname, "->", n_unreachable, "unreachable objects")
if not options.suppress_timing:
t1 = time.time()
print("\ncommand took %.2f seconds\n" % (t1-t0,))
return None
av = sys.argv[1:]
if not av:
main(av)
firstarg = av[0].lower()
if firstarg == "hotshot":
import hotshot
import hotshot.stats
av = av[1:]
prof_log_name = "XXXX.prof"
prof = hotshot.Profile(prof_log_name)
# benchtime, result = prof.runcall(main, *av)
result = prof.runcall(main, *(av, ))
print("result", repr(result))
prof.close()
stats = hotshot.stats.load(prof_log_name)
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(20)
elif firstarg == "profile":
import cProfile
av = av[1:]
cProfile.run('main(av)', 'YYYY.prof')
import pstats
p = pstats.Stats('YYYY.prof')
p.strip_dirs().sort_stats('cumulative').print_stats(30)
elif firstarg == "psyco":
PSYCO = 1
main(av[1:])
else:
main(av)
|
[
"yannick.simchen@student.mi.hs-rm.de"
] |
yannick.simchen@student.mi.hs-rm.de
|
c1329f935163bf276dc5c80575b1a20f0a14c512
|
9bceba3fdde706db6705fac549e048723e7c56bc
|
/cpepc_server.py
|
156052f7f755c8462ee7120b433a144a901d04e3
|
[] |
no_license
|
qsq522439539/factorytest
|
6a744e96dc632d8395b7e966c3c471aefdca572c
|
a47f300ce668eaa0d0d0e26c8bd24605eb6c5b46
|
refs/heads/master
| 2021-05-11T17:54:18.605443
| 2018-01-19T10:06:55
| 2018-01-19T10:06:55
| 117,809,833
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,598
|
py
|
#!/usr/bin/env python
#coding=utf-8
from ctypes import *
from socket import *
import paramiko
import subprocess
import multiprocessing
import threading
import time
import json
import sys
import re
import os
import struct
import _cffi_backend
import mul_process_package
hoolog = "D:/Autotest/Logs/hoonetmeter.txt"
class CpePCMain:
port = None
threadDict = {}
mqueue = multiprocessing.Queue()
def __init__(self, port=40001):
self.port = port
def run(self):
if not self.startHooMeter():
print "HooNetMeter does not work normally."
return
print "Server is starting"
serversock = socket(AF_INET, SOCK_STREAM)
serversock.bind(('', self.port))
serversock.listen(1)
quitThread = threading.Thread(target = self._quitapp, args = ('quit',))
quitThread.start()
while True:
conn, address = serversock.accept()
try:
#conn.settimeout(500)
recvThread = threading.Thread(target = self._recvSock, args = (conn, None))
recvThread.setDaemon(True)
recvThread.start()
print "New connection is ready"
while True:
try:
next_msg = self.mqueue.get()
except self.mqueue.Empty:
print " ", conn.getpeername() , 'queue empty'
break
else:
data = json.dumps(next_msg)
header = struct.pack("i",int(len(data)+4))
print "Feedback>>> " , next_msg
conn.send(header+data)
if (next_msg[0]=='quit'):
break
except Exception,e:
print e,'Time out'
break
conn.close()
serversock.close()
def _quitapp(self, test):
while True:
data = raw_input('> ')
if data == 'quit':
self.mqueue.put(['quit'])
os.system("taskkill /f /im HooNetMeter.exe" + " 1>NUL 2>&1")
time.sleep(2)
os._exit(0)
def _recvSock(self, sock, test):
dataBuffer = bytes()
while True:
try:
data = sock.recv(1024)
if data:
dataBuffer += data
while True:
if len(dataBuffer)<4:
break;
length = struct.unpack("i",dataBuffer[0:4])
if len(dataBuffer)<length[0]:
break;
body=dataBuffer[4:length[0]]
if not self.parseCmd(body):
print "Normally close socket."
self.mqueue.put(['quit'])
time.sleep(3)
sock.close()
return;
dataBuffer=dataBuffer[length[0]:]
else:
print "No data received, closing socket."
sock.close()
break
except Exception,e:
#print e
print "Socket Disconnected."
sock.close()
break
def parseCmd(self, data):
print 'Received CMD:',data
try:
command = json.loads(data)
except Exception,e:
print e
return False
if command[0]=='quit':
return False
try:
opProcess = multiprocessing.Process(args=(command,self.mqueue),target=self.processCmd)
opProcess.start()
self.threadDict[command[0]]=opProcess
except Exception,e:
print 'parseCmd:',e
return False
return True
def processCmd(self, command, queue):
try:
result = CpeControl().main(command)
#print 'result to be return:',result
queue.put([command[0],command[1],result])
except Exception,e:
print 'processCmd:',e
return False
def startHooMeter(self,prc_name='HooNetMeter'):
try:
os.system("taskkill /f /im " + prc_name+".exe" + " 1>NUL 2>&1")
time.sleep(2)
if os.path.exists(hoolog):
os.remove(hoolog)
for i in range(5):
if self.checkHooMeter(prc_name):
return True
else:
subprocess.Popen(prc_name+".exe")
time.sleep(1)
return False
except Exception,e:
print e
return False
def checkHooMeter(self,prc_name):
psapi = windll.psapi #PSAPI.DLL
kernel = windll.kernel32 #Kernel32.DLL
arr = c_ulong * 256
lpidProcess= arr()
cb = sizeof(lpidProcess)
cbNeeded = c_ulong()
hModule = c_ulong()
count = c_ulong()
modname = c_buffer(30)
PROCESS_QUERY_INFORMATION = 0x0400
PROCESS_VM_READ = 0x0010
#Call Enumprocesses to get hold of process id's
psapi.EnumProcesses(byref(lpidProcess), cb, byref(cbNeeded))
#Number of processes returned
nReturned = cbNeeded.value/sizeof(c_ulong())
pidProcess = [i for i in lpidProcess][:nReturned]
for pid in pidProcess:
#Get handle to the process based on PID
hProcess = kernel.OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, False, pid)
if hProcess:
psapi.EnumProcessModules(hProcess, byref(hModule), sizeof(hModule), byref(count))
psapi.GetModuleBaseNameA(hProcess, hModule.value, modname, sizeof(modname))
processName = "".join([ i for i in modname if i != '\x00'])
#-- Clean up
for i in range(modname._length_):
modname[i]='\x00'
kernel.CloseHandle(hProcess)
if prc_name in processName:
print processName, "is started."
return True
return False
class CpeControl:
client = None
def main(self, command):
try:
if(command[1]=="ATTACH"):
return self.operation_attach(command[0],command[2])
else:
return self.operataion_service(command[0],command[1],command[2])
except Exception,e:
return [False,repr(e)]
#Attach Operation#
def operation_attach(self, cpeip, params):
#[serverip, cpetype, earfcn, pci]
if not self.accessible(cpeip,300): #5 minutes
return [False,'CPE %s is not accessible'%cpeip]
client = self.sshsession(cpeip)
if not client:
return [False,'CPE %s SSH login fails'%cpeip]
isodu = True if params[1]=='ODU' else False
showac = "at at!=showac;" if isodu else "atcmd /dev/ttyACM0 115200 at!=showac"
atcfun0 = "at at+cfun=0;" if isodu else "atcmd /dev/ttyACM0 115200 at+cfun=0"
atcfun1 = "at at+cfun=1;" if isodu else "atcmd /dev/ttyACM0 115200 at+cfun=1"
forcecell = "at at!=\"forcecell dl-earfcn="+str(params[2])+" pci="+str(params[3])+"\";" if isodu else "atcmd /dev/ttyACM0 115200 at!=\"forcecell dl-earfcn="+str(params[2])+" pci="+str(params[3])+"\""
tag = False
for n in range(1,4):
stdin,stdout,stderr=client.exec_command(atcfun0)
time.sleep(3)
stdin,stdout,stderr=client.exec_command(forcecell)
time.sleep(3)
stdin,stdout,stderr=client.exec_command(atcfun1)
time.sleep(15)
#print "The %sth restart:" %n
for m in range(4):
stdin,stdout,stderrc=client.exec_command(showac)
showac_output = stdout.read()
if "CONNECTED" in showac_output:
#print "CPE Attach Success"
tag = True
break
else:
m =m+1
#print "showac not CONNECTED: %s" % m
time.sleep(10)
if tag: break
if not tag:
return [False,'CPE %s attach fails'%cpeip]
time.sleep(10)
tag = False
stdin,stdout,stderr=client.exec_command("ping -c 10 %s"% params[0])
ping_output = stdout.read()
client.close()
#print ping_output
if "bytes from" in ping_output:
return [True, 'CPE %s Ping|SSH|Attach|Traffic OK'%cpeip]
else:
return [False,'CPE %s traffic fails'%cpeip]
def sshsession(self, cpeip, user='root', passwd='Si8a&2vV9', port=22):
'''打开到CPE的SSH会话'''
try:
client=paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(cpeip, int(port), user, passwd)
except Exception, e:
print e
return False
return client
def accessible(self, cpeip, timeout=600):
'''确认CPE是否在规定时间内可被接入(Ping+SSH)'''
starttime = time.time()
while time.time() - starttime < int(timeout):
if self.accesscheck(cpeip):
#print 'CPE %s ok'%cpeip
return True
time.sleep(10)
return False
def accesscheck(self, cpeip, count=8):
'''单次判断CPE是否可被接入(Ping+SSH)'''
pingcmd = 'ping %s -n %d' %(cpeip, int(count))
try:
p = subprocess.Popen(pingcmd,stdin = subprocess.PIPE,stdout = subprocess.PIPE,stderr = subprocess.PIPE,shell = True)
except Exception,e:
print 'subprocess:',e
out = p.stdout.read()
data = unicode(eval(repr(out)),"gbk")
#print data
result = re.findall(r'Ping(.*)Ping', data, re.M|re.S)
if not result: return False
lines = result[0].split('\r\n')
if len(lines) <= 3: return False
lines = lines[1:-2]
ttls = re.findall(r'TTL=',result[0])
#print "Ping result: %s of %s success." % (len(ttls),len(lines))
if float(len(ttls))/float(len(lines)) < 0.5:
return False
try:
localssh=paramiko.SSHClient()
localssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
localssh.connect(cpeip, 22, 'root', 'Si8a&2vV9')
time.sleep(1)
localssh.close()
except Exception, e:
return False
return True
#Service Operation#
def operataion_service(self, cpeip, svctype, params):
cpeipinfo = cpeip.split('.')
#self.pklfile = "D:/Autotest/Logs/"+cpeipinfo[3]+".process.pkl"
test = FtpPerformance()
pids = test.ftp_service(cpeipinfo[3],svctype,params)
del test
ret = self.check_throughput(int(params[1]))
self.stop_ftp_service(pids)
if not ret[0] and not isinstance(ret[1], list):
return ret
tput = self.get_statistics(ret[1])
print "\nThroughout Info:",tput,"\n"
return [ret[0], tput]
def stop_ftp_service(self, pids, prc_name='curl.exe'):
''' Stop background FTP '''
#pids = self._unpickle_processes()
for onepid in pids:
os.system('taskkill /f /pid %s 1>NUL 2>&1' % onepid)
os.system('taskkill /f /im %s 1>NUL 2>&1' % prc_name)
def check_throughput(self, svctime, interval=10):
result = []
zerocount = 0
lastline = ''
time.sleep(30)
starttime = time.time()
while time.time() - starttime < int(svctime):
time.sleep(int(interval))
newline = self.get_lastline()
if not newline:
zerocount += 1
result.append(['wrong log',0,0])
if zerocount >= 5:
return [False,result]
continue
record = re.findall(r'(\d+\/\d+\/\d+\s+\d+:\d+:\d+)\s+(\d+)\s+(\d+)\s+\d+', newline)
if not record or len(record) != 1:
zerocount += 1
result.append(['wrong log',0,0])
if zerocount >= 5:
return [False,result]
result.append([newline,0,0])
continue
result.append([record[0][0],float(record[0][1])/(interval*1024),float(record[0][2])/(interval*1024)])
if float(record[0][1])/interval < 50*1024: #DL<50kbps
zerocount += 1
if zerocount >= 5:
print 'Throughput too low'
return [False,result]
else:
zerocount = 0
if not lastline:
lastline = newline
continue
if lastline == newline:
continue
lastline = newline
return [True,result]
def get_statistics(self, datalist):
if not datalist: return []
dlmax = 0
dlmin = 1000000
dlsum = 0
ulmax = 0
ulmin = 1000000 #1Gbps
ulsum = 0
for rec in datalist:
if rec[1] > dlmax: dlmax = rec[1]
if rec[1] < dlmin: dlmin = rec[1]
dlsum += rec[1]
if rec[2] > ulmax: ulmax = rec[2]
if rec[2] < ulmin: ulmin = rec[2]
ulsum += rec[2]
return [[round(dlsum/len(datalist)/1024,2),round(dlmax/1024,2),round(dlmin/1024,2)],\
[round(ulsum/len(datalist)/1024,2),round(ulmax/1024,2),round(ulmin/1024,2)]]
def get_lastline(self):
last_line = ''
try:
with open(hoolog, 'r') as f:
off = -50
while True:
#seek(off, 2)表示文件指针:从文件末尾(2)开始向前50个字符(-50)
f.seek(off, 2)
lines = f.readlines()
if len(lines)>=2: #判断是否最后至少有两行,这样保证了最后一行是完整的
last_line = lines[-1]
break
#如果off为50时得到的readlines只有一行,不能保证最后一行是完整的
#所以off翻倍重新运行,直到readlines不止一行
off *= 2
except Exception, e:
print e
return last_line
class FtpPerformance:
def _upload_curl(self, remotefile, localfile, serverip, ftpuser, ftppd):
'''To run curl for FTP upload repeatedly'''
curlargs = '-T %s ftp://%s/%s -u %s:%s -s'%(localfile, serverip, remotefile, ftpuser,ftppd)
print 'Upload: curl %s' % curlargs
while True:
curlprc = subprocess.Popen("curl.exe %s" % curlargs, stderr=subprocess.PIPE)
curlprc.stderr.readline()
def _download_curl(self, remotefile, localfile, serverip, ftpuser, ftppd):
'''To run curl for FTP download repeatedly'''
curlargs = 'ftp://%s/%s -u %s:%s -o %s -s'%(serverip, remotefile,ftpuser,ftppd,localfile)
print 'Download: curl %s' % curlargs
while True:
curlprc = subprocess.Popen("curl.exe %s" % curlargs, stderr=subprocess.PIPE)
curlprc.stderr.readline()
def ftp_service(self,cpeid,svctype,params):
if len(params) <12: return []
serverip = params[0]
#svctime = params[1]
ftpuser = params[2]
ftppd = params[3]
dlthread = params[4]
dlpath_s = params[5]
dlpath_c = params[6]
dlprefix = params[7]
ulthread = params[8]
ulpath_s = params[9]
ulpath_c = params[10]
ulprefix = params[11]
if dlpath_c and dlpath_c[-1]=='/': dlpath_c=dlpath_c[:-1]
if ulpath_c and ulpath_c[-1]=='/': ulpath_c=ulpath_c[:-1]
if int(dlthread)>10 or int(ulthread)>10: return []
if os.path.exists("D:/Autotest/Logs/.result.pkl"):
os.remove("D:/Autotest/Logs/.result.pkl")
pids = []
if svctype == "DOWNLOAD": #Download
for i in range(int(dlthread)):
filename = dlprefix + "%s" %i
remotefile = filename if dlpath_s=='' else dlpath_s+'/'+filename
localfile = dlpath_c+'/'+cpeid+'_'+filename
ftptest = multiprocessing.Process(args=(remotefile, localfile, serverip, ftpuser, ftppd,),target=self._download_curl)
ftptest.start()
time.sleep(2)
pids.append(ftptest.pid)
elif svctype == "UPLOAD": #Upload
for i in range(int(ulthread)):
filename = ulprefix + "%s" %i
remotefile = cpeid+'_'+filename if ulpath_s=='' else ulpath_s+'/'+cpeid+'_'+filename
localfile = ulpath_c+'/'+filename
ftptest = multiprocessing.Process(args=(remotefile, localfile, serverip, ftpuser, ftppd,),target=self._upload_curl)
ftptest.start()
time.sleep(2)
pids.append(ftptest.pid)
else: #Download + Upload
for i in range(int(dlthread)):
filename = dlprefix + "%s" %i
remotefile = filename if dlpath_s=='' else dlpath_s+'/'+filename
localfile = dlpath_c+'/'+cpeid+'_'+filename
ftptest = multiprocessing.Process(args=(remotefile, localfile, serverip, ftpuser, ftppd,),target=self._download_curl)
ftptest.start()
time.sleep(2)
pids.append(ftptest.pid)
for i in range(int(ulthread)):
filename = ulprefix + "%s" %i
remotefile = cpeid+'_'+filename if ulpath_s=='' else ulpath_s+'/'+cpeid+'_'+filename
localfile = ulpath_c+'/'+filename
ftptest = multiprocessing.Process(args=(remotefile, localfile, serverip, ftpuser, ftppd,),target=self._upload_curl)
ftptest.start()
time.sleep(2)
pids.append(ftptest.pid)
return pids
if __name__ == "__main__":
multiprocessing.freeze_support()
if len(sys.argv)>=2: hoolog = sys.argv[1]
print 'HooNetMeter Log:',hoolog
CpePCMain().run()
|
[
"522439539@qq.com"
] |
522439539@qq.com
|
1c1bc1d51d094dbdbef416b89e4502aac2fdd706
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/4/usersdata/146/3817/submittedfiles/swamee.py
|
08dfd15fbc717a20c0e5b5b9831d84fdac069f8b
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 470
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import math
f = input ('Digite o valor de f: ')
l = input ('Digite o valor de L: ')
q = input ('Digite o valor de Q: ')
dh = input ('Digite o valor de delta H: ')
v = input ('Digite o valor de v: ')
e = 0.000002
D = ((8*f*l*(q**2))/(math.pi**2)*9.81*dh)**0.2
Rey = (4*q/(math.pi*d*v))
k = 0.25/(math.log10(e/(3.7*d)+5.74/(Rey**0.9))**2)
print ('D = %.4f' %D)
print ('Rey = %.4f' %Rey)
print ('K = %.4f' %k)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
62007b025aac0170cbd6463ff0983879358ae68c
|
f19383a230662ce5d161cc7f7eb61ec8e4b7f37b
|
/apps/operadores/urls.py
|
398796c4af12acc1a826f0d0db78ae37f5adecf5
|
[] |
no_license
|
juanesduque/EPBA
|
5fb907630f3607c18270233e623926bb109899c8
|
733003801f06d5d1a69466e3ea4f79b65788a0d2
|
refs/heads/master
| 2023-04-09T01:16:57.072765
| 2021-04-26T16:09:33
| 2021-04-26T16:09:33
| 310,967,846
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 629
|
py
|
from django.urls import path
from . import views
app_name = 'operadores_app'
urlpatterns = [
path('operadores',views.operadores.as_view(),name = 'operadores'),
path('operadores/ejemplo-1',views.ejemplo1.as_view(),name = 'ejemplo1'),
path('operadores/ejemplo-2',views.ejemplo2.as_view(),name = 'ejemplo2'),
path('operadores/ejemplo-3',views.ejemplo3.as_view(),name = 'ejemplo3'),
path('operadores/ejemplo-4',views.ejemplo4.as_view(),name = 'ejemplo4'),
path('operadores/ejemplo-5',views.ejemplo5.as_view(),name = 'ejemplo5'),
path('operadores/ejemplo-6',views.ejemplo6.as_view(),name = 'ejemplo6'),
]
|
[
"j.duque1@utp.edu.co"
] |
j.duque1@utp.edu.co
|
f3c3bb4ec38551ff5d7db0438fc30090c6e2691f
|
9a43cf826910f937213317ba60fbe33471d635ec
|
/Inventario/apps.py
|
5f88e2667fdec227c7af806845f767722dd069d6
|
[] |
no_license
|
JanoVilches/TheAllMightys_ISW
|
9094622a40343761d9b7097d503c3f23fe366516
|
512f691acfda4c05e463ba271c5126784e3b0b0a
|
refs/heads/master
| 2020-03-12T12:16:53.706535
| 2018-08-30T02:45:28
| 2018-08-30T02:45:28
| 130,614,656
| 0
| 1
| null | 2018-08-06T02:53:01
| 2018-04-22T22:58:03
|
Python
|
UTF-8
|
Python
| false
| false
| 100
|
py
|
from django.apps import AppConfig
class InventarioConfig(AppConfig):
name = 'Inventario'
|
[
"vanessa.guzman@sansano.usm.cl"
] |
vanessa.guzman@sansano.usm.cl
|
be48fd61b46fec863128846d2efbdf19eaab87b5
|
5f653640dbae67f9798fb937c9775e80ea5fb118
|
/33 - Tabuada v.2.py
|
32b80d5676d5f6371ee7bd399f9e6327bd996033
|
[] |
no_license
|
GabrielReira/Python-Exercises
|
92f9cdb4b3d927b86ceb434dfebfd0e9bd45c2f2
|
51ea2f3f36e516a7cc33dc9af4f65e844b0e6df4
|
refs/heads/master
| 2023-02-26T19:00:13.048520
| 2021-02-03T18:56:10
| 2021-02-03T18:56:10
| 274,241,568
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 262
|
py
|
# VERSÃO 2.
print('O programa será encerrado caso você digite um número negativo.')
while True:
num = int(input('Informe um número para saber sua tabuada >>> '))
if num < 0: break
for i in range(1, 10):
print(f'{num} x {i} = {num*i}')
|
[
"gabrielmfreira@gmail.com"
] |
gabrielmfreira@gmail.com
|
b1b3e38c4b2dc01db735731182f547fd1f393fbf
|
dcd70115345e56e4aa27d7093f426313557c690f
|
/main.py
|
0feec3575e47937f55e5f8fde2cae75f4c12cee9
|
[] |
no_license
|
josemariasosa/music-theory
|
a79f9eed24a89e6029a2034600a802aeca689bc4
|
7dbdf4bc7cb81942cf7811abf83ffd2c449aa603
|
refs/heads/master
| 2021-08-06T20:46:16.753667
| 2020-07-02T00:03:24
| 2020-07-02T00:03:24
| 195,426,373
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,295
|
py
|
#!/usr/bin/env python
# coding=utf-8
# ------------------------------------------------------------------------------
# Scales harmonization | Music Theory
# ------------------------------------------------------------------------------
# jose maria sosa
import json
import math
import numpy as np
from root import RootNote
from pprint import pprint
class Harmonization(object):
""" Harmonization of a music scale.
"""
def __init__(self, scale=False):
# 1. Extract the scale parameters.
self.setScaleParameters(scale)
# 2. Define the list of 12 notes.
self.notes = self.importNotes()
# 3. Import the steps.
self.importSteps()
# 4. Default output notation.
self.default_notation = self.setDefaultNotation()
# --------------------------------------------------------------------------
def setScaleParameters(self, scale):
if isinstance(scale, dict):
if 'positions' in scale.keys():
self.positions = scale['positions']
if 'root' in scale.keys():
self.root = scale['root']
if 'scale_name' in scale.keys():
self.scale_name = scale['scale_name']
# --------------------------------------------------------------------------
def importNotes(self):
file_name = "files/notes.json"
with open(file_name, 'r') as f:
notes = json.load(f)['notes']
return notes
# --------------------------------------------------------------------------
def importSteps(self):
file_name = "files/steps.json"
with open(file_name, 'r') as f:
steps = json.load(f)['steps']
self.major3 = steps['major3']
self.minor3 = steps['minor3']
self.major7 = steps['major7']
self.minor7 = steps['minor7']
self.dim7 = steps['dim7']
return steps
# --------------------------------------------------------------------------
def setDefaultNotation(self):
if len(self.root) == 1:
default_notation = 'sharp'
elif self.root[-1] == 'b':
default_notation = 'flat'
elif '#' in self.root:
default_notation = 'sharp'
else:
default_notation = 'sharp'
return default_notation
# --------------------------------------------------------------------------
def importModes(self):
file_name = "files/patterns.json"
with open(file_name, 'r') as f:
modes = json.load(f)['patterns']['modes']
return modes
# --------------------------------------------------------------------------
def formattingSteps(self, distance):
""" If distance == 1.5, then return 1.5, else, if distance == 2.0, then
return 2.
"""
if math.modf(distance)[0] > 0:
return distance
else:
return int(distance)
# --------------------------------------------------------------------------
def stepDistance(self, from_note, to_note):
if to_note > from_note:
semitone = to_note - from_note
elif to_note < from_note:
semitone = (to_note + 12) - from_note
else:
semitone = 0
if semitone % 2 == 0:
return int(semitone / 2)
else:
return semitone / 2
# --------------------------------------------------------------------------
def stepOperations(self, given_note, tones):
semitone = int(tones * 2)
results = given_note + semitone
if results > 11:
while results > 11:
results -= 12
elif results < 0:
while results < 0:
results += 12
return results
# --------------------------------------------------------------------------
def replacePositionNotes(self, scale_or_note):
if isinstance(scale_or_note, list):
return [self.notes[x][self.default_notation] for x in scale_or_note]
else:
return self.notes[scale_or_note][self.default_notation]
# --------------------------------------------------------------------------
def getMode(self, note, scale):
# 1. Import all the modes patterns.
modes = self.importModes()
# 2. Get the position of the current note and sort the scale.
scale = scale[:-1]
note_index = scale.index(note)
sorted_notes = scale[note_index:] + scale[:note_index]
sorted_notes = sorted_notes + [sorted_notes[0]]
# 3. Calculate the step pattern, and compare with the default patterns.
steps = []
for index, value in enumerate(sorted_notes):
if index > 0:
f = sorted_notes[index-1]
t = sorted_notes[index]
steps.append(self.stepDistance(f, t))
for mode in modes.keys():
if all([x==0 for x in np.subtract(steps, modes[mode])]):
return mode
return None
# --------------------------------------------------------------------------
def getTriad(self, note, long_scale):
note_index = long_scale.index(note)
return [
long_scale[note_index],
long_scale[note_index+2],
long_scale[note_index+4]
]
# --------------------------------------------------------------------------
def getQuads(self, note, long_scale):
note_index = long_scale.index(note)
return [
long_scale[note_index],
long_scale[note_index+2],
long_scale[note_index+4],
long_scale[note_index+6]
]
# --------------------------------------------------------------------------
def getStepsList(self, note_list):
steps = []
for ix in range(len(note_list)-1):
steps.append(self.stepDistance(note_list[ix], note_list[ix+1]))
return steps
# --------------------------------------------------------------------------
def nameTriad(self, steps):
if steps[0] == self.major3:
if steps[1] == self.major3:
return 'aug'
elif steps[1] == self.minor3:
return ''
elif steps[0] == self.minor3:
if steps[1] == self.major3:
return 'min'
elif steps[1] == self.minor3:
return 'dim'
return 'unnamed'
# --------------------------------------------------------------------------
def nameQuads(self, steps):
if steps[0] == self.major3:
if steps[1] == self.major3:
if steps[2] == self.major3:
return 'augΔ7'
elif steps[2] == self.minor3:
return 'unnamed'
elif steps[1] == self.minor3:
if steps[2] == self.major3:
return 'Maj7' # Major 7th [M7 - Maj7]
elif steps[2] == self.minor3:
return '7' # Dominant 7th [7 - Δ7]
elif steps[0] == self.minor3:
if steps[1] == self.major3:
if steps[2] == self.major3:
return 'mΔ7'
elif steps[2] == self.minor3:
return 'm7' # Minor 7th [m7 - min7]
elif steps[1] == self.minor3:
if steps[2] == self.major3:
return 'ø7' # Half-diminished 7th [ø7 - min7b5]
elif steps[2] == self.minor3:
return 'o7' # Fully-diminished 7th [o7 - dim7]
return 'unnamed'
# --------------------------------------------------------------------------
def toRoman(self, n):
return {
1: 'i',
2: 'ii',
3: 'iii',
4: 'iv',
5: 'v',
6: 'vi',
7: 'vii'
}[n]
# --------------------------------------------------------------------------
def getNotes(self, root, tones):
results = [root]
accum = 0
for tone in tones:
accum = self.formattingSteps(accum + tone)
results.append(self.stepOperations(root, tones=accum))
return [self.replacePositionNotes(x) for x in results]
# --------------------------------------------------------------------------
def generateAlternatives(self, note, quads_steps):
Note = self.replacePositionNotes(note).title()
results = []
# (3.5) is the sum of a major3 + minor3
seventh = self.major3 + self.minor3 + quads_steps[2]
seventh = self.formattingSteps(seventh)
if seventh == self.major7:
# minor7
alternative_1 = quads_steps.copy()
alternative_1[2] = self.formattingSteps(quads_steps[2] - 0.5)
if ((alternative_1[2] >= self.minor3)
and (alternative_1[2] <= self.major3)):
results.append({
"chord": Note + self.nameQuads(alternative_1),
"tones": alternative_1,
"notes": self.getNotes(note, alternative_1)
})
# dim7
alternative_2 = quads_steps.copy()
alternative_2[2] = self.formattingSteps(quads_steps[2] - 1)
if ((alternative_2[2] >= self.minor3)
and (alternative_2[2] <= self.major3)):
results.append({
"chord": Note + self.nameQuads(alternative_2),
"tones": alternative_2,
"notes": self.getNotes(note, alternative_2)
})
elif seventh == self.minor7:
# major7
alternative_1 = quads_steps.copy()
alternative_1[2] = self.formattingSteps(quads_steps[2] + 0.5)
if ((alternative_1[2] >= self.minor3)
and (alternative_1[2] <= self.major3)):
results.append({
"chord": Note + self.nameQuads(alternative_1),
"tones": alternative_1,
"notes": self.getNotes(note, alternative_1)
})
# dim7
alternative_2 = quads_steps.copy()
alternative_2[2] = self.formattingSteps(quads_steps[2] - 1)
if ((alternative_2[2] >= self.minor3)
and (alternative_2[2] <= self.major3)):
results.append({
"chord": Note + self.nameQuads(alternative_2),
"tones": alternative_2,
"notes": self.getNotes(note, alternative_2)
})
elif seventh == self.dim7:
# major7
alternative_1 = quads_steps.copy()
alternative_1[2] = self.formattingSteps(quads_steps[2] + 1)
if ((alternative_1[2] >= self.minor3)
and (alternative_1[2] <= self.major3)):
results.append({
"chord": Note + self.nameQuads(alternative_1),
"tones": alternative_1,
"notes": self.getNotes(note, alternative_1)
})
# minor7
alternative_2 = quads_steps.copy()
alternative_2[2] = self.formattingSteps(quads_steps[2] + 0.5)
if ((alternative_2[2] >= self.minor3)
and (alternative_2[2] <= self.major3)):
results.append({
"chord": Note + self.nameQuads(alternative_2),
"tones": alternative_2,
"notes": self.getNotes(note, alternative_2)
})
return results
# --------------------------------------------------------------------------
def getChordVariation(self, note, quads_steps):
# Major
if (quads_steps[0], quads_steps[1]) == (self.major3, self.minor3):
results = self.generateAlternatives(note, quads_steps)
# Minor
elif (quads_steps[0], quads_steps[1]) == (self.minor3, self.major3):
results = self.generateAlternatives(note, quads_steps)
# Diminished
elif (quads_steps[0], quads_steps[1]) == (self.minor3, self.minor3):
results = self.generateAlternatives(note, quads_steps)
# Augmented
elif (quads_steps[0], quads_steps[1]) == (self.major3, self.major3):
results = self.generateAlternatives(note, quads_steps)
else:
results = []
return results
# --------------------------------------------------------------------------
def formattingAlts(self, l):
results = []
if len(l) > 0:
for alternative in l["alternatives"]:
results.append({
"notes": "",
"name": alternative['tail']
})
# --------------------------------------------------------------------------
def diatonicHarmonization(self):
scale = self.positions
long_scale = scale[:-1] + scale
harmonized_scale = []
for ix, note in enumerate(scale[:-1]):
mode = self.getMode(note, scale)
triad = self.getTriad(note, long_scale)
quads = self.getQuads(note, long_scale)
triad_steps = self.getStepsList(triad)
triad_tail = self.nameTriad(triad_steps)
quads_steps = self.getStepsList(quads)
quads_tail = self.nameQuads(quads_steps)
quads_chord_variation = self.getChordVariation(note, quads_steps)
formatted_note = self.replacePositionNotes(note).title()
harmonized_scale.append({
"mode": mode,
"grade": self.toRoman(ix+1),
"triad": {
"notes": self.replacePositionNotes(triad),
"chord": formatted_note + triad_tail
},
"quads": {
"notes": self.replacePositionNotes(quads),
"chord": formatted_note + quads_tail,
"tones": quads_steps,
"alternatives": quads_chord_variation
}
})
return {
"harmonization": harmonized_scale
}
# --------------------------------------------------------------------------
def simple(self):
if (len(self.positions) - 1) == 9:
pass
elif (len(self.positions) - 1) == 7:
harmony = self.diatonicHarmonization()
pprint (harmony)
else:
print('not available yet')
# --------------------------------------------------------------------------
# ------------------------------------------------------------------------------
def main():
D_minor = RootNote("C").major
Harmonization(D_minor).simple()
# ------------------------------------------------------------------------------
if __name__ == '__main__':
main()
|
[
"josemaria@rigs.com.mx"
] |
josemaria@rigs.com.mx
|
b7df0160ae164b6c2154ba2eae297e324d81eb9c
|
c2177b7679c38315006d2f477e7d3ac1675678ca
|
/maintenance/bitbucket_issues/issue_56/fixit_7.py
|
9460690b75422a5e8125da88217ee1cd336022c9
|
[] |
no_license
|
sebotic/wikidataBots
|
17a2cc4301770707443d063926c1df90fa36fcf8
|
c444d4aadc4a870ac5cdc472a787dd99c78b0d96
|
refs/heads/feature/metabolites
| 2020-12-27T23:54:36.328120
| 2016-09-17T05:18:53
| 2016-09-17T05:18:53
| 68,481,272
| 1
| 0
| null | 2016-09-17T22:24:37
| 2016-09-17T22:24:37
| null |
UTF-8
|
Python
| false
| false
| 2,008
|
py
|
__author__ = 'andra'
# This is a maintenance bot which removes all occurences where a protein is incorrectly being encoded by another protein
import time
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__))+"/../../../ProteinBoxBot_Core")
import PBB_login
import PBB_settings
import PBB_Core
import pprint
import traceback
from SPARQLWrapper import SPARQLWrapper, JSON
logincreds = PBB_login.WDLogin(PBB_settings.getWikiDataUser(), PBB_settings.getWikiDataPassword())
sparql = SPARQLWrapper("https://query.wikidata.org/bigdata/namespace/wdq/sparql")
sparql.setQuery("""
PREFIX wikibase: <http://wikiba.se/ontology#>
PREFIX wd: <http://www.wikidata.org/entity/>
PREFIX wdt: <http://www.wikidata.org/prop/direct/>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX p: <http://www.wikidata.org/prop/>
PREFIX v: <http://www.wikidata.org/prop/statement/>
SELECT ?gene ?uniprot ?protein_aff ?total WHERE {
{
SELECT distinct ?uniprot (count(?uniprot) as ?total) WHERE {
?gene wdt:P703 wd:Q5 ;
wdt:P279 wd:Q7187 ;
wdt:P688 ?protein .
?protein wdt:P352 ?uniprot ;
wdt:P279 wd:Q8054 ;
wdt:P703 wd:Q5 .
}
Group BY ?uniprot
}
?protein_aff wdt:P352 ?uniprot .
?gene wdt:P688 ?protein_aff ;
wdt:P703 wd:Q5 .
FILTER (?total > 1)
}
ORDER BY ?gene
""")
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
# pprint.pprint(results)
counter = 0
for result in results["results"]["bindings"]:
try:
counter = counter + 1
print(result["gene"]["value"])
gene = result["gene"]["value"].replace("http://www.wikidata.org/entity/", "")
data2add = [PBB_Core.WDBaseDataType.delete_statement(prop_nr='P688')]
wdPage = PBB_Core.WDItemEngine(gene, data=data2add, server="www.wikidata.org",
domain="genes")
wdPage.write(logincreds)
except Exception as e:
print(traceback.format_exc())
print(counter)
|
[
"andra@micelio.be"
] |
andra@micelio.be
|
f7d2dc40aafa004affe72771d18597426fa848ff
|
6ea9a54e083a306da263a996ab43f921e2556e7d
|
/Case/migrations/0002_caseoverview_datecreated.py
|
f3c3f4003a49538db023d5056dafdf4cf9242ef9
|
[] |
no_license
|
Kalviz/CaseTracker
|
188c2dffb0132c61f7544545587c8df91125a22b
|
e90a62479f0e709137fc3ae65b72e62ec123ce18
|
refs/heads/master
| 2020-04-05T04:51:08.638250
| 2018-11-07T15:47:11
| 2018-11-07T15:47:11
| 156,569,726
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 509
|
py
|
# Generated by Django 2.1.1 on 2018-11-05 10:28
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('Case', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='caseoverview',
name='dateCreated',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
]
|
[
"noreply@github.com"
] |
Kalviz.noreply@github.com
|
9bc8b9a2dae8571371e8b53e846ad5b5291d7df9
|
e8eb4a03f89be52de5b2400b93808beea0b28b7a
|
/quandlExample.py
|
89107721f4fb464a2a49e56d4e5ab153aa6ffc6a
|
[] |
no_license
|
vkrishnam/FinancialAnalysis
|
992e0d6fde3d70e0e5575fb32cf11ed39ecc1a3e
|
253d4f1b128dc8ca44a2b1ba060fee4ca0699228
|
refs/heads/master
| 2023-07-23T06:37:29.073282
| 2023-02-07T16:19:47
| 2023-02-07T16:19:47
| 196,515,414
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 416
|
py
|
#############################################
## https://www.quandl.com/data/TC1-Indian-Equities-Adjusted-End-of-Day-Prices/usage/quickstart/python
#############################################
import quandl
quandl.ApiConfig.api_key = "1Rw_ovhmRsBYyzx4p3cL"
#mydata = quandl.get("FRED/GDP")
mydata = quandl.get("DEB/INFY_A_PE")
print(mydata)
#data = quandl.get_table('ZACKS/FC', paginate=True)
#print(data.keys())
|
[
"vkrishnam@nvidia.com"
] |
vkrishnam@nvidia.com
|
e900b33635375b77cbe1283be0d3aefab4fe6cbf
|
4e008b002c70f3a7179bee2d86062e7fc79f6dea
|
/ssq.py
|
08ac26393e432f0754c9ddc3c6beacee6c388df9
|
[] |
no_license
|
lixiangheng/learn
|
948632def882d972d39b21faea7a0481671263ad
|
d5c6041f5c1a9f0758e198f4edec5a196c760c06
|
refs/heads/master
| 2021-06-09T13:34:13.429264
| 2021-04-13T02:25:03
| 2021-04-13T02:25:03
| 131,359,263
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 334
|
py
|
import random
import sys
i= 0
try:
y = int(input('请输入要选择的注数:'))
# y = int(sys.argv[1])
except ValueError:
print('请输入数字!')
while i < y:
red = random.sample(range(1, 34), 6)
blue = random.sample(range(1, 17), 1)
red.sort()
print("%s - %s" %(str(red), str(blue)))
i +=1
|
[
"noreply@github.com"
] |
lixiangheng.noreply@github.com
|
f412d4ab4d0dd14f5091d8ff033eac1effd5749f
|
e9333f51cf76e94b68cce9d0daf5da545ced54a7
|
/server.py
|
58a5a81c278ed515ed79b123716dda546fa935db
|
[] |
no_license
|
locnguyen14/HAR-data-collection
|
a5be42ebb261bedb3b62dd3b736b7b3c5da27467
|
681ccd94208f624fe224fa7e944c9437ee8d31ca
|
refs/heads/master
| 2022-11-14T03:47:44.158681
| 2020-06-01T20:05:55
| 2020-06-01T20:05:55
| 268,619,084
| 0
| 1
| null | 2022-11-02T05:33:39
| 2020-06-01T19:57:10
|
C
|
UTF-8
|
Python
| false
| false
| 3,211
|
py
|
def listen_once():
import matplotlib.pyplot as plt
import socket
from collections import defaultdict
import json
# Get host from Wireless LAN adapter wi-fi ipv4.
# Standard loopback interface address (localhost)
# HOST = socket.gethostbyname(socket.gethostname())
HOST = '192.168.1.4'
PORT = 60230 # Port to listen on (non-privileged ports are > 1023)
print("Listening on " + str(HOST) + ":" + str(PORT))
x_arr, y_arr, z_arr = [], [], []
accelerometerDict = defaultdict(list)
sampling_point = 0
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((HOST, PORT))
s.listen()
conn, addr = s.accept()
fig = plt.figure()
with conn:
print('Connected by client with ip address: ', addr)
while True and sampling_point < 200:
data = conn.recv(1024)
if data.decode('utf-8') != "":
data = data.decode('utf-8')
print("sampling Point {}".format(sampling_point))
print(data)
split = data.replace(")", "").replace(
"(", "").split(",")
try:
x, y, z = float(split[0]), float(
split[1]), float(split[2])
accelerometerDict['x'].append(x)
accelerometerDict['y'].append(y)
accelerometerDict['z'].append(z)
except:
continue
if len(x_arr) > 20:
x_arr.pop(0)
x_arr.append(x)
if len(y_arr) > 20:
y_arr.pop(0)
y_arr.append(y)
if len(z_arr) > 20:
z_arr.pop(0)
z_arr.append(z)
i = list(range(len(x_arr)))
plt.clf()
# Limit the gyro values to max of +/- 1 because we don't want to destroy the wheel
if y < -1:
y = -1
if y > 1:
y = 1
if z < -1:
z = -1
if z > 1:
z = 1
# line = ""
# if y <= 0:
# line += "ACCELERATE %.2f\t" % abs(y)
# if y > 0:
# line += "BREAK %.2f\t" % y
# if z <= 0:
# line += "RIGHT %.2f" % z
# if z > 0:
# line += "LEFT %.2f" % abs(z)
# print(line)
# plt.plot(i, x_arr, 'r-')
plt.plot(i, y_arr, 'b-')
plt.plot(i, z_arr, 'k-')
plt.pause(.02)
sampling_point += 1
conn.close()
s.close()
# Dump it to json file
json_file = json.dumps(accelerometerDict)
f = open("AccelerometerData.json", "w")
f.write(json_file)
f.close()
if __name__ == "__main__":
listen_once()
|
[
"ndxloc1995@gmail.com"
] |
ndxloc1995@gmail.com
|
94507bb96277abb7d1a650690d87d2a5ac9ffdf4
|
d8849023c5d309e8f4ae10f383e1eeda2e37f25e
|
/maximumNumberOfVowels.py
|
358ab20d8655ec310f8ad59fa464d924f6064f75
|
[] |
no_license
|
Darshak1997/leet-Code
|
ba54c0c2e042c437a0fca72902367db3c82a0317
|
075ebadf554dca682466bda3176f68fb1152d7b6
|
refs/heads/master
| 2021-07-09T20:22:42.287957
| 2020-11-12T07:50:55
| 2020-11-12T07:50:55
| 209,810,290
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 400
|
py
|
class Solution:
def maxVowels(self, s: str, k: int) -> int:
res = j = vowels = 0
for i, c in enumerate(s):
vowels += c in 'aeiou'
if i - j + 1 > k:
vowels -= s[j] in 'aeiou'
j += 1
if i - j + 1 == k:
res = max(res, vowels)
# print(s[i:j+1], res)
return res
|
[
"noreply@github.com"
] |
Darshak1997.noreply@github.com
|
06c66a2feee91220ce7a7719c25f1bd59d258626
|
6f6ef78716d3e2ea335c9e079cb4027e2c921bd9
|
/uart/decrypt.py
|
3223b05c4718ea1dedf0758ef6953f70e3cec1d4
|
[] |
no_license
|
Aithu-Snehith/image_stegenography_fpga
|
eb68ddf0ec1aeec7160490223ff99b7540786a85
|
2ccb81cad3ea495ffd3b6d4a5fb5b79264054b78
|
refs/heads/master
| 2020-04-27T12:44:28.437831
| 2019-04-26T11:46:56
| 2019-04-26T11:46:56
| 174,342,784
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 909
|
py
|
import numpy as np
import sys
import numpy
numpy.set_printoptions(threshold=sys.maxsize)
from scipy.misc import imread, imsave
def frombits(bits):
chars = []
for b in range(len(bits) / 8):
byte = bits[b*8:(b+1)*8]
# print(''.join([str(bit) for bit in byte]))
chars.append(chr(int(''.join([str(bit) for bit in byte]), 2)))
return ''.join(chars)
#a = np.genfromtxt("output_raspi.txt", skip_header=15)
a = imread("mod.png")
#imsave("regen.png",img_actual)
msg_len = int(10*(a[-1][-2]%10) + a[-1][-1]%10)
print(msg_len)
rows = 31
cols = 29
yeet = []
flet = a.flatten()[-1*int(8*msg_len + 2):-2].astype(int)
# print(len(flet))
bits = list(flet & np.ones_like(flet).astype(int))
# for i in range(int(8*msg_len + 2)):
# bits = [i|1 for i in flet]
# print(flet)
bits.reverse()
print(bits)
message = frombits(list(bits))
print(msg_len)
print("HIDDEN MESSAGE : " + str(message))
|
[
"ee16btech11041@iith.ac.in"
] |
ee16btech11041@iith.ac.in
|
5cc6efa4923287e64173d99b8588aea459f0e66e
|
2176d2e4e2425660ffe0ef14b42e8433ddd8faea
|
/npl/evaluate/logreg_ll.py
|
2832eb580a7daece8b4d3930ea3d40579f2ae0f8
|
[
"BSD-3-Clause"
] |
permissive
|
edfong/npl
|
87e6f72ace4ed9b7658b06b74f029b17a104244d
|
9e94287a7c253a33addcafb431c384be8a7dd8df
|
refs/heads/master
| 2020-05-17T09:34:05.754240
| 2019-05-18T18:01:39
| 2019-05-18T18:01:39
| 183,636,918
| 9
| 2
|
BSD-3-Clause
| 2019-05-02T11:43:47
| 2019-04-26T13:50:32
| null |
UTF-8
|
Python
| false
| false
| 2,310
|
py
|
"""
File used to evaluate predictive performance on test data of posterior samples
"""
import numpy as np
import scipy as sp
#For all:
#beta = posterior coefficient samples with shape (B,D)
#alpha = intercept coefficient samples with shape (D)
#y = test data classification with shape (N)
#x = test data covariates with shape (N,D)
#evaluate log posterior predictive
def logpp(y,x,beta, alpha):
Ns = np.shape(beta)[0]
logpp = np.zeros(Ns)
pred =np.zeros(Ns)
for n in range(Ns):
z = np.dot(x,beta[n]) + alpha[n]
logeta = -np.logaddexp(0,-z)
logneta = -np.logaddexp(0,z)
logpp[n] = np.sum(y * logeta + (1-y)*logneta)
logpp_mean = (sp.special.logsumexp(logpp)) - np.log(Ns)
return logpp_mean
#evaluate LPPD
def lppd(y,x,beta,alpha):
Ns = np.shape(beta)[0]
N =np.shape(y)[0]
lppd = np.zeros((Ns,N))
pred =np.zeros(Ns)
for n in range(Ns):
z = np.dot(x,beta[n]) + alpha[n]
logeta = -np.logaddexp(0,-z)
logneta = -np.logaddexp(0,z)
lppd[n] = y * logeta + (1-y)*logneta
lppd = sp.special.logsumexp(lppd,axis = 0) - np.log(Ns)
lppd_sum = np.sum(lppd)
return lppd_sum
#evaluate classification percentage correct
def predcorrect(y,x,beta,alpha):
Ns = np.shape(beta)[0]
N =np.shape(y)[0]
pred = np.zeros(N)
N_error = np.zeros(Ns)
logeta = np.zeros((Ns,N))
for n in range(Ns):
z = np.dot(x,beta[n]) + alpha[n]
logeta[n] = -np.logaddexp(0,-z)
logeta_mean = sp.special.logsumexp(logeta,axis = 0) - np.log(Ns)
pred[np.exp(logeta_mean) >= 0.5] = 1
N_error = np.sum(np.abs(pred-y))
return (N-N_error)/N
#evaluate MSE
def MSE(y,x,beta,alpha):
Ns = np.shape(beta)[0]
N =np.shape(y)[0]
pred = np.zeros(N)
MSE = np.zeros(Ns)
logeta = np.zeros((Ns,N))
for n in range(Ns):
z = np.dot(x,beta[n]) + alpha[n]
logeta[n] = -np.logaddexp(0,-z)
#average p(ytest | beta) then re-log
logeta_mean = sp.special.logsumexp(logeta,axis = 0) - np.log(Ns)
MSE = np.mean((np.exp(logeta_mean) - y)**2)
return MSE
#check cardinality of beta
def checkcard(beta,epsilon):
Ns = np.shape(beta)[0]
card = np.count_nonzero(np.abs(beta)> epsilon,axis = 1)
card_mean = np.mean(card)
return card_mean
|
[
"noreply@github.com"
] |
edfong.noreply@github.com
|
0cadb46f04ad4bca411d095743cfc2925a262c36
|
6c922b75cd0f7b76d80d1f0e0b4e7e65b2d6ed52
|
/code/plotting/base/movie_buddy.py
|
5c88c63bdf9b8e861ebc23033ae998d152a511d7
|
[] |
no_license
|
mahdikooshkbaghi/stratified_heat_transport_paper2017
|
5f879e690908604272617915a8e2461b09d003a3
|
db5864e37494f271da8cc0d533381e7378944daa
|
refs/heads/master
| 2022-08-26T07:32:43.489533
| 2018-04-27T19:57:27
| 2018-04-27T19:57:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,814
|
py
|
from plot_buddy_base import *
import matplotlib
matplotlib.rcParams.update({'font.size': 11})
from scipy.stats.mstats import mode
import matplotlib.colorbar as colorbar
POSITIVE_DEFINITE = ['enstrophy']
class MovieBuddy(PlotBuddy):
def __init__(self, dir, profile_post_file=None, **kwargs):
super(MovieBuddy, self).__init__(dir, **kwargs)
if self.idle: return
#Figure out the number range of files for this process.
self.file_number_start = self.files_below + 1
self.file_number_end = self.files_below + self.n_files
self.global_total_writes_below = np.sum(self.global_writes_per_file[:self.files_below])
if profile_post_file != None:
self.profile_post_file = profile_post_file
else:
self.profile_post_file = None
def add_subplot(self, field, rindex, cindex, hlabel='x', vlabel='z', \
zlabel=None, ncols=1, nrows=1, cmap='RdYlBu_r',
sub_t_avg=False, bare=False):
'''
Adds a subplot to the list of subplots that the plotter will track.
field -- the field being plottedin this subplot
rindex -- row index of the subplot, starting from zero
cindex -- column index of the subplot, starting from zero
hlabel -- the label for the horizontal axis ('x', 't', or something else)
vlabel -- the label for the vertical axis ('z', or something else)
zlabel -- the label for the third axis (the colormap axis)
colspan -- The number of columns that the subplot spans across
rowspan -- The number of rows that the subplot spans across
cmap -- The colormap of the plot
sub_t_avg -- If True, subtract the time average from each movie frame.
bare -- If bare, this is for a public talk type movie.
'''
if self.idle: return
plot_info = dict()
plot_info['field'] = field
plot_info['position'] = (rindex, cindex)
plot_info['colspan'] = ncols
plot_info['rowspan'] = nrows
plot_info['hlabel'] = hlabel
plot_info['vlabel'] = vlabel
plot_info['zlabel'] = zlabel
plot_info['cmap'] = cmap
plot_info['sub_t_avg']= sub_t_avg
plot_info['bare'] = bare
self.ax.append(plot_info)
def plot_field(self, xs, zs, field, ax, field_name, cmap='RdYlBu_r', min=None, max=None,\
xlims=None, ylims=None, mod=None, plot_title=True, function='colormap',
bare=False):
'''
Plots a colormap of a given field.
xs -- a 2D grid of x-values
zs -- a 2D grid of z-values
field -- a 2D grid of values in the x-z plane for another parameter.
ax -- the Axis subplot on which to plot the field.
field_name -- The physical name that the numbers in 'field' represent
cmap -- the colormap of the plot
min, max -- the min and max values of the colormap to plot
xlims, ylims -- the boundaries on the x- and y- coordinates.
mod -- A modification on the field to alter the movie. Currently accepts:
"up" -- upflows
"down" -- downflows
"pos" -- Positive parts only
"neg" -- Negative parts only
bare -- If bare, then don't plot up any axis information (public talks, etc.)
'''
print(xs.shape, zs.shape, field.shape)
if self.idle: return
if max == None:
max = np.max(field)
if min == None:
min = np.min(field)
if function == 'colormap':
plot = ax.pcolormesh(xs, zs, field, cmap=cmap, vmin=min, vmax=max)
if not bare:
xticks = np.array([1, np.max(xs)/2, np.max(xs)])
yticks = np.array([1, np.max(zs)/2, np.max(zs)])
plt.xticks(xticks, [r'${:1.2f}$'.format(tick) for tick in xticks], fontsize=11)
plt.yticks(yticks, [r'${:1.2f}$'.format(tick) for tick in yticks], fontsize=11)
if self.atmosphere['atmosphere_name'] == 'single polytrope':
plot_label = '$\epsilon='
if self.atmosphere['epsilon'] < 0.1:
plot_label += '10^{'
plot_label += '{:1.0f}'.format(np.log10(self.atmosphere['epsilon']))
plot_label += '}$'
else:
plot_label += '{:1.1f}$'.format(self.atmosphere['epsilon'])
small_eps = True
ra_log = np.log10(self.atmosphere['rayleigh'])
plot_label += ' | $\mathrm{Ra} = 10^{'
if np.floor(ra_log) == ra_log:
plot_label += '{:1.0f}'.format(ra_log)
small_ra = True
else:
plot_label += '{:1.2f}'.format(ra_log)
plot_label += '}$'
plot_label = r'{:s}'.format(plot_label)
else:
print('ERROR: Unknown atmosphere type')
plot_label=''
if max > 0.1:
cbar_label = '$\pm {:1.2f}$'.format(max)
else:
str = '{:1.2e}'.format(max)
if 'e+0' in str:
newstr = str.replace('e+0', '\\times 10^{')
elif 'e-0' in str:
newstr = str.replace('e-0', '\\times 10^{-')
else:
newstr = str.replace('e', '\\times 10^{')
newstr += '}'
if min != 0:
cbar_label = '$\pm {:s}$'.format(newstr)
else:
cbar_label = '$min: 0 max: {:s}$'.format(newstr)
cbar_label += ' ({:s})'.format(plot_label)
cbar_label += ' {:s}'.format(field_name)
divider = make_axes_locatable(ax)
cax, kw = colorbar.make_axes(ax, fraction=0.07, pad=0.03, aspect=5, anchor=(0,0), location='top')
cbar = colorbar.colorbar_factory(cax, plot, **kw)
trans = cax.get_yaxis_transform()
cax.annotate(r'{:s}'.format(cbar_label), (1.02,0.01), size=11, color='black', xycoords=trans)
cax.tick_params(axis=u'both', which=u'both',length=0)
cax.set_xticklabels([])
cax.set_xticks([])
# for label in cax.xaxis.get_ticklabels():
# label.set_visible(False)
# for label in cax.yaxis.get_ticklabels():
# label.set_visible(False)
elif function == 'contour':
field[np.where(field > max)] = max
field[np.where(field < min)] = min
plot = ax.contour(xs, zs, field, 7, cmap=plt.cm.gray)
if xlims == None or len(xlims) != 2:
ax.set_xlim(np.min(xs), np.max(xs))
else:
ax.set_xlim(xlims[0], xlims[1])
if ylims == None or len(ylims) != 2:
ax.set_ylim(np.min(zs), np.max(zs))
else:
ax.set_ylim(ylims[0], ylims[1])
if bare:
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
def get_time_avg(self, field):
''' Takes a 3D Time x Width x Height field array and returns the time average o
the vertical profile
TODO generalize this to 3D
'''
if self.idle: return
local_counts = np.zeros(1, np.int16)
global_counts = np.zeros(1, np.int16)
local_counts[0] = field.shape[0]
collapsed_profile = np.mean(field, axis=1)
collapsed_profile = np.sum(collapsed_profile, axis=0)
local_sum = np.zeros(collapsed_profile.shape, dtype=np.float64)
local_sum[:] = collapsed_profile[:]
global_sum = np.zeros(local_sum.shape, dtype=np.float64)
self.comm.Allreduce(local_counts, global_counts, op=MPI.SUM)
self.comm.Allreduce(local_sum, global_sum, op=MPI.SUM)
return global_sum/global_counts[0]
def analyze_subplots(self):
'''
Takes provided information about subplots and gets relevant info on how
to set up those subplots
'''
if self.idle: return
field_names = []
total_width = 0
total_height = 0
for i, ax in enumerate(self.ax):
field_names.append(ax['field'])
if ax['position'][0] + 1 > total_height:
total_height = ax['position'][0] + 1
if ax['position'][1] + 1 > total_width:
total_width = ax['position'][1] + 1
self.plot_dimensions = (total_height, total_width) #rows, ncols
slices_fields = self.grab_whole_profile(self.local_files['slices'],\
self.local_writes_per_file,
subkey=['tasks']*len(field_names), \
profile_name=field_names)
for i, ax in enumerate(self.ax):
ax['data'] = slices_fields[ax['field']]
if ax['sub_t_avg']:
ax['t_avg'] = self.get_time_avg(ax['data'])
ax['data'] -= ax['t_avg']
flattened = np.sort(np.abs(ax['data'].flatten()))
ax['max_val'] = flattened[int(0.98*len(flattened))]
def make_plots(self, figsize=None, outdir='snapshots', filename='snapshots', write_number_start=1,\
dpi=300, cbar_factor=0.2, length_div=1, plot_title=True, n_mode_memory=100):
'''
Create all of the plots!
'''
if self.idle: return
output_directory = self.root_dir + '/' + outdir + '/'
if figsize == None:
figsize = (self.plot_dimensions[1]*(2*self.atmosphere['aspect_ratio']), self.plot_dimensions[0]*2+0.5)
if self.cw_rank == 0 and not os.path.exists('{:s}'.format(output_directory)):
os.mkdir('{:s}'.format(output_directory))
logger.info('saving figures to {}'.format(output_directory))
count = int(self.global_total_writes_below+write_number_start)
writes = int(np.sum(self.local_total_writes))
num_in_file = 0
for i in range(writes):
logger.info('Plotting {}/{}'.format(i+1,writes))
movie_count = 0
fig = plt.figure(figsize=figsize)
current_time = self.local_times[i]
axes = dict()
#Plot each subplot
for k, ax in enumerate(self.ax):
position = ax['position']
colspan = ax['colspan']
rowspan = ax['rowspan']
field = ax['field']
hlabel = ax['hlabel']
vlabel = ax['vlabel']
zlabel = ax['zlabel']
cmap = ax['cmap']
bare = ax['bare']
if position in axes.keys():
axis = axes[position]
else:
axis = plt.subplot2grid(self.plot_dimensions, position, colspan=colspan, rowspan=rowspan)
axes[position] = axis
field_base = ax['data'][i,:]
max = ax['max_val']
min = -ax['max_val']
if field in POSITIVE_DEFINITE:
min=0
if zlabel == None:
zlabel = field
if hlabel == 'x' and vlabel == 'y':
xs, ys = np.meshgrid(self.x, self.x)
field_base = field_base[:,:,0]
else:
xs, ys = self.xs/length_div, self.zs/length_div
if len(field_base.shape) == 3:
field_base = field_base[:,0,:]
self.plot_field(xs, ys, field_base, axis, \
zlabel, cmap=cmap, min=min, max=max, plot_title=plot_title,\
bare=bare)#, extra_label=field_base)
if length_div != 1:
axis.set_xlabel('x / L', fontsize=11)
axis.set_ylabel('z / L', fontsize=11)
else:
axis.set_xlabel('x', fontsize=11)
axis.set_ylabel('z', fontsize=11)
if plot_title and not bare:
title_string = 't = {:1.2e}'.format(current_time)
try:
title_string += '; t/t_buoy = {:1.2e}'.format(current_time/self.atmosphere['t_buoy'])
title_string += '; t/t_therm = {:1.2e}'.format(current_time/self.atmosphere['t_therm'])
except:
print("ERROR: Cannot find t_buoy or t_therm")
fig.suptitle(title_string, fontsize=11)
plt.savefig(output_directory+filename+'_{:06d}.png'.format(count), dpi=dpi, bbox_inches='tight',
figsize=figsize)
plt.close()
count += 1
class MultiMovieBuddy():
def __init__(self, dirs, max_files, start_files):
buddies = []
for i, dir in enumerate(dirs):
buddies.append(MovieBuddy(dir, max_files=max_files[i], start_file=start_files[i]))
self.buddies = buddies
def add_subplot(self, *args, **kwargs):
for buddy in self.buddies:
buddy.add_subplot(*args, **kwargs)
def define_plot_grid(self, rows=None, cols=1):
if rows == None:
rows = len(self.buddies)/cols
self.rows = rows
self.cols = cols
def analyze_movie_subplots(self):
for buddy in self.buddies:
buddy.analyze_movie_subplots()
def make_plots(self, outdir, filename='movie_montage', dpi=200, figsize=(10,10), bare=False):
plot_slices = []
for i, buddy in enumerate(self.buddies):
if not hasattr(buddy, 'local_files'):
continue
profs = buddy.grab_whole_profile(buddy.local_files['slices'], buddy.local_writes_per_file, \
subkey=['tasks']*len(buddy.fields_to_grab), profile_name=buddy.fields_to_grab)
slices = profs[buddy.fields_to_grab[0]]
try:
slices -= buddy.ax[0]['t_avg']
except:
slices *= 1
plot_slices.append(slices)
for j in range(plot_slices[0].shape[0]):
try:
fig = plt.figure(figsize=figsize)
save=True
for i, buddy in enumerate(self.buddies):
if not hasattr(buddy, 'local_files') or len(plot_slices) < i+1:
save=False
break
ax = plt.subplot2grid((self.rows, self.cols), (int(np.floor(i/self.cols)), int(np.mod(i, self.cols))), \
colspan=1, rowspan=1)
buddy.plot_field(buddy.xs, buddy.zs, plot_slices[i][j,:,:], ax, buddy.ax[0]['field'],
cmap='RdBu_r', min=-buddy.ax[0]['stdev'], max=buddy.ax[0]['stdev'], bare=bare)
if save:
logger.info('saving fig {}/{}'.format(j+1, plot_slices[0].shape[0]))
fig.savefig(output_directory+'/'+filename+'_{:06d}.png'.format(j+1+buddy.cw_rank*20), dpi=dpi, figsize=figsize, bbox_inches='tight')
plt.close()
except:
plt.close()
|
[
"evan.anders@colorado.edu"
] |
evan.anders@colorado.edu
|
788fe02433f5803b8af2dbf85c5e7cbd09b6e185
|
081df6c86f261e3d168f0ea39099e28e1e61c01b
|
/Flask_api.py
|
6215d5d00246a893e54b2eb0945b4f44358a4adb
|
[] |
no_license
|
Usamamalik11/Pakistan-Super-League-Score
|
7e050d08673866f8d190d1c826a029e795032b45
|
b8459005cd3088a8c9cee61efc8e651a88a8acc8
|
refs/heads/main
| 2023-01-06T12:35:52.229393
| 2020-11-09T15:18:42
| 2020-11-09T15:18:42
| 310,600,248
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,884
|
py
|
from flask import Flask,request
import pandas as pd
import numpy as np
import dill as pickle
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn import preprocessing
app=Flask(__name__)
with open('model' ,'rb') as f1:
loaded_model = pickle.load(f1)
with open('sc','rb') as f2:
stand = pickle.load(f2)
with open('important','rb') as f3:
encoder = pickle.load(f3)
@app.route('/')
def welcome():
return "Welcome All"
@app.route('/predict')
def predict_score():
Over_Ball=request.args.get("Over_Ball")
Batting_Team=request.args.get("Batting_Team")
Bowling_Team=request.args.get("Bowling_Team")
Stadium=request.args.get("Stadium")
Runs_Scored=request.args.get("Runs_Scored")
Extras=request.args.get("Extras")
Fallen_Wickets=request.args.get("Fallen_Wickets")
Cumulative_Runs_Scored=request.args.get("Cumulative_Runs_Scored")
data={'Over_Ball':[Over_Ball],
'Batting_Team':[Batting_Team],
'Bowling_Team':[Bowling_Team],
'Stadium':[Stadium],
'Runs_Scored':[Runs_Scored],
'Extras':[Extras],
'Fallen_Wickets':[Fallen_Wickets],
'Cumulative_Runs_Scored':[Cumulative_Runs_Scored]
}
df = pd.DataFrame (data, columns =['Over_Ball','Batting_Team', 'Bowling_Team','Stadium','Runs_Scored', 'Extras', 'Fallen_Wickets','Cumulative_Runs_Scored'])
df=encoder.fit_transform(df)
prediction=loaded_model.predict(df)
return "Hello The answer is"+str(prediction)
@app.route('/predict_file',methods=["POST"])
def predict_score_test():
df_test=pd.read_csv(request.files.get("file"))
df_test=encoder.fit_transform(df_test)
df_test=stand.fit_transform(df_test)
predictions=loaded_model.predict(df_test)
return str(list(predictions))
if __name__=='__main__':
app.run(host = '127.0.0.1')
|
[
"ushakeel.bee16seecs@seecs.edu.pk"
] |
ushakeel.bee16seecs@seecs.edu.pk
|
3c2417f97db9a810dda8d7ab3bebdd355be46d39
|
5b3ddae168daf35a95788269d6f4d97c68cf21a0
|
/CryptoSPN/aby_conversion/ABY.py
|
0758130ff5804081424e49496389c35f5594a90d
|
[
"MIT"
] |
permissive
|
encryptogroup/CryptoSPN
|
0b9bd99f6c862a1c4418e86c46f91f0ee57f4e2d
|
bb4cb6c035eaa02f8ac789ab66321d8e0160304b
|
refs/heads/master
| 2020-12-21T13:35:21.960894
| 2020-04-29T16:18:58
| 2020-04-29T16:18:58
| 236,446,056
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,871
|
py
|
"""
Created on January 9, 2019
@author: Amos Treiber
"""
import subprocess
import tempfile
from spn.structure.Base import Product, Sum, Leaf, get_nodes_by_type
from spn.structure.leaves.parametric.Parametric import Bernoulli, Poisson, Gaussian
from spn.structure.leaves.histogram.Histograms import Histogram
from math import log, e, pi
from .CPPConversion import aby_header, aby_footer, dec_str, Selection, cmake_file
from CryptoSPN.Constants import CRYPTOSPN_DIR, ABY_DIR, COMPILE_TIMEOUT
import logging
logger = logging.getLogger(__name__)
def spn_to_aby(node, bitlen=64, leaves=None, sel=Selection.OSELECTION, aby_inputs="", aby_circuit="",
aby_declarations=None, nodes_done=None):
if nodes_done is None:
nodes_done = set()
if aby_declarations is None:
aby_declarations = {}
if leaves is None:
leaves = {}
if node in nodes_done:
return bitlen, leaves, aby_inputs, aby_circuit, aby_declarations, nodes_done
if isinstance(node, Bernoulli):
pstr = "std::numeric_limits<" + dec_str(bitlen) + ">::lowest()" if node.p == 0 else str(log(node.p, 2))
opstr = "std::numeric_limits<" + dec_str(bitlen) + ">::lowest()" if node.p == 1 else str(log(1 - node.p, 2))
aby_inputs += " p = " + pstr + ";\n op = " + opstr + ";\n"
aby_inputs += " pptr = (uint" + str(bitlen) + "_t*) &p;\n" + " opptr = (uint" + str(
bitlen) + "_t*) &op;\n"
aby_inputs += " share* s_p_" + str(node) + " = circ->PutINGate(*pptr, bitlen, SERVER);\n" \
" share* s_op_" + str(node) \
+ " = circ->PutINGate(*opptr, bitlen, SERVER);\n"
aby_declarations.update({"p": (dec_str(bitlen), ";\n"), "op": (dec_str(bitlen), ";\n"),
"pptr": ("uint" + str(bitlen) + "_t*", ";\n"),
"opptr": ("uint" + str(bitlen) + "_t*", ";\n")})
if sel == Selection.LINEAR:
aby_inputs += " idx = " + str(node.scope[0]) + ";\n"
aby_inputs += " share* s_idx_" + str(
node) + " = circ->PutINGate(idx, 32, SERVER,);\n"
aby_circuit += " share* s_" + str(node) + " = putBernoulliNode(putInputSelector(s_idx_" + str(
node) + ", s_V_in, s_ids, circ), s_op_" + str(node) + ", s_p_" + str(node) + ", circ);\n"
aby_declarations.update({"s_V_in": ("std::vector<share*>", """;
for (uint64_t b : v_V) {
s_V_in.push_back(circ->PutINGate(b, 1, CLIENT));
}\n"""), "s_ids": ("std::vector<share*>", """;
for (auto i=0; i < v_V.size(); i++) {
s_ids.push_back(circ->PutCONSGate((uint" + str(bitlen) + "_t) i, 32));
}
uint32_t idx;\n""")})
elif sel == Selection.NONE:
aby_circuit += " share* s_" + str(node) + " = putBernoulliNode(s_V_in.at(" + str(
node.scope[0]) + "), s_op_" + str(node) + ", s_p_" + str(node) + ", circ);\n"
aby_declarations.update({"s_V_in": ("std::vector<share*>", """;
for (uint64_t b : v_V) {
s_V_in.push_back(circ->PutINGate(b, 1, CLIENT));
}\n""")})
elif sel == Selection.OSELECTION:
aby_circuit += " share* s_" + str(node) + " = putBernoulliNode(sel[" + str(
leaves[node]) + "], s_op_" + str(node) + ", s_p_" + str(node) + ", circ);\n"
else:
raise Exception("Selection Type not recognized")
nodes_done.add(node)
return bitlen, leaves, aby_inputs, aby_circuit, aby_declarations, nodes_done
if isinstance(node, Gaussian):
log2e = log(e, 2)
aby_inputs += " mu = " + str(node.mean) + ";\n"
aby_inputs += " log2ps2 = " + str(0.0 - log(2.0 * pi * node.variance, 2)) + ";\n"
aby_inputs += " loge2s2 = " + str(log2e / (2.0 * node.variance)) + ";\n"
aby_inputs += " muptr = (uint" + str(bitlen) + "_t*) μ\n" + " log2ps2ptr = (uint" + \
str(bitlen) + "_t*) &log2ps2;\n" + " loge2s2ptr = (uint" + str(bitlen) + "_t*) &loge2s2;\n"
aby_inputs += " share* s_mu_" + str(node) + " = circ->PutINGate(*muptr, bitlen, SERVER);\n" \
" share* s_log2ps2_" + \
str(node) + " = circ->PutINGate(*log2ps2ptr, bitlen, SERVER);\n " \
" share* s_loge2s2_" + str(node) + \
" = circ->PutINGate(*loge2s2ptr, bitlen, SERVER);\n"
aby_declarations.update(
{"mu": (dec_str(bitlen), ";\n"), "log2ps2": (dec_str(bitlen), ";\n"), "loge2s2": (dec_str(bitlen), ";\n"),
"muptr": ("uint" + str(bitlen) + "_t*", ";\n"), "log2ps2ptr": ("uint" + str(bitlen) + "_t*", ";\n"),
"loge2s2ptr": ("uint" + str(bitlen) + "_t*", ";\n")})
if sel == Selection.LINEAR:
aby_inputs += " idx = " + str(node.scope[0]) + ";\n"
aby_inputs += " share* s_idx_" + str(node) + \
" = circ->PutINGate(idx, 32, SERVER);\n"
aby_circuit += " share* s_" + str(node) + " = putLogGaussianNode(putInputSelector(s_idx_" + \
str(node) + ", s_V_in, s_ids, circ), s_mu_" + str(node) + ", s_log2ps2_" + str(node) + \
", s_loge2s2_" + str(node) + ", circ);\n"
aby_declarations.update({"s_ids": ("std::vector<share*>", """;
for (auto i=0; i < v_V.size(); i++) {
s_ids.push_back(circ->PutCONSGate((uint64_t) i, 32));
}
uint32_t idx;\n"""), "s_V_in": ("std::vector<share*>", """;
""" + dec_str(bitlen) + """ val;
for (double k : v_V) {
val = k;
s_V_in.push_back(circ->PutINGate(*(uint""" + str(bitlen) + """_t*) &val, bitlen, CLIENT));
}\n""")})
elif sel == Selection.NONE:
aby_circuit += " share* s_" + str(node) + " = putLogGaussianNode(s_V_in.at(" + str(
node.scope[0]) + "), s_mu_" + str(node) + ", s_log2ps2_" + str(node) + ", s_loge2s2_" + str(
node) + ", circ);\n"
aby_declarations.update({"s_V_in": ("std::vector<share*>", """;
""" + dec_str(bitlen) + """ val;
for (double k : v_V) {
val = k;
s_V_in.push_back(circ->PutINGate(*(uint""" + str(bitlen) + """_t*) &val, bitlen, CLIENT));
}\n""")})
elif sel == Selection.OSELECTION:
aby_circuit += " share* s_" + str(node) + " = putLogGaussianNode(sel[" + str(
leaves[node]) + "], s_mu_" + str(node) + ", s_log2ps2_" + str(node) + ", s_loge2s2_" + str(
node) + ", circ);\n"
else:
raise Exception("Selection Type not recognized")
nodes_done.add(node)
return bitlen, leaves, aby_inputs, aby_circuit, aby_declarations, nodes_done
if isinstance(node, Poisson):
log2e = log(e, 2)
llstr = "std::numeric_limits<" + dec_str(bitlen) + ">::min()" if node.mean == 0 else str(log(node.mean, 2))
aby_inputs += " loglambda = " + llstr + ";\n"
aby_inputs += " lambdaloge = " + str(node.mean * log2e) + ";\n"
aby_inputs += " llptr = (uint" + str(bitlen) + "_t*) &loglambda;\n" + " lleptr = (uint" + \
str(bitlen) + "_t*) &lambdaloge;\n"
aby_inputs += " share* s_ll_" + str(node) + \
" = circ->PutINGate(*llptr, bitlen, SERVER);\n share* s_lle_" + \
str(node) + " = circ->PutINGate(*lleptr, bitlen, SERVER);\n"
aby_declarations.update({"loglambda": (dec_str(bitlen), ";\n"), "lambdaloge": (dec_str(bitlen), ";\n"),
"llptr": ("uint" + str(bitlen) + "_t*", ";\n"),
"lleptr": ("uint" + str(bitlen) + "_t*", ";\n")})
if sel == Selection.LINEAR:
aby_inputs += " idx = " + str(node.scope[0]) + ";\n"
aby_inputs += " share* s_idx_" + str(node) + \
" = circ->PutINGate(idx, 32, SERVER, seed_exp, &expander);\n"
aby_circuit += " share* s_" + str(node) + " = putLogPoissonNode(putInputSelector(s_idx_" + \
str(node) + ", s_V_in, s_ids, circ), s_ll_" + str(node) + ", putInputSelector(s_idx_" + \
str(node) + ", s_V_logfac, s_ids, circ), s_lle_" + str(node) + ", circ);\n"
aby_declarations.update({"s_ids": ("std::vector<share*>", """;
for (auto i=0; i < v_V.size(); i++) {
s_ids.push_back(circ->PutCONSGate((uint64_t) i, 32));
}
uint32_t idx;\n"""), "s_V_in": ("std::vector<share*>", """;
std::vector<share*> s_V_logfac;
""" + dec_str(bitlen) + """ val, val_logfac;
for (double k : v_V) {
val = k;
val_logfac = log2(tgamma(k + 1));
s_V_logfac.push_back(circ->PutINGate(*(uint""" + str(bitlen) + """_t*) &val_logfac, bitlen, CLIENT));
s_V_in.push_back(circ->PutINGate(*(uint""" + str(bitlen) + """_t*) &val, bitlen CLIENT));
}\n""")})
elif sel == Selection.NONE:
aby_circuit += " share* s_" + str(node) + " = putLogPoissonNode(s_V_in.at(" + str(
node.scope[0]) + "), s_ll_" + str(node) + ", s_V_logfac.at(" + str(node.scope[0]) + "), s_lle_" + str(
node) + ", circ);\n"
aby_declarations.update({"s_V_in": ("std::vector<share*>", """;
std::vector<share*> s_V_logfac;
""" + dec_str(bitlen) + """ val, val_logfac;
for (double k : v_V) {
val = k;
val_logfac = log2(tgamma(k + 1));
s_V_logfac.push_back(circ->PutINGate(*(uint""" + str(bitlen) + """_t*) &val_logfac, bitlen, CLIENT));
s_V_in.push_back(circ->PutINGate(*(uint""" + str(bitlen) + """_t*) &val, bitlen, CLIENT));
}\n""")})
elif sel == Selection.OSELECTION:
aby_circuit += " share* s_" + str(node) + " = putLogPoissonNode(sel[" + str(
leaves[node]) + "], s_ll_" + str(node) + ", sel_logfac[" + str(node.scope[0]) + "], s_lle_" + str(
node) + ", circ);\n"
aby_declarations.update({"v_V_logfac": ("std::vector<double>", """;
""" + dec_str(bitlen) + """ val;
for (double k : v_V) {
v_V_logfac.push_back(log2(tgamma(k + 1)));
}\n""")})
else:
raise Exception("Selection Type not recognized")
nodes_done.add(node)
return bitlen, leaves, aby_inputs, aby_circuit, aby_declarations, nodes_done
if isinstance(node, Histogram):
aby_declarations.update(
{"v_s_borders": ("std::vector<share*>", ";\n for (double k : {" + ", ".join(map(str, node.breaks))
+ """}) {
""" + dec_str(bitlen) + """ val = k;
v_s_borders.push_back(circ->PutINGate(*(uint""" + str(bitlen) +
"""_t*) &val, bitlen, SERVER));
}\n""")})
aby_inputs += " std::vector<" + dec_str(bitlen) + "> v_" + str(node) + "_densities = {" + ", ".join(
map(str, map(lambda d: log(d, 2), node.densities))) + "};\n"
aby_inputs += " std::vector<uint" + str(bitlen) + "_t> v_" + str(node) + "_densitiesi = dtoi(v_" + str(
node) + "_densities);\n"
aby_inputs += " std::vector<share*> v_s_" + str(node) + "_densities;\n"
aby_inputs += " for(uint" + str(bitlen) + "_t k : v_" + str(node) + """_densitiesi) {
v_s_""" + str(node) \
+ """_densities.push_back(circ->PutINGate(k, bitlen, SERVER));
}\n"""
if sel == Selection.NONE:
aby_declarations.update({"s_V_in": ("std::vector<share*>", """;
for (double in : v_V) {
""" + dec_str(bitlen) + """ val = in;
s_V_in.push_back(circ->PutINGate(*(uint""" + str(bitlen)
+ """_t*) &val, bitlen, CLIENT));
}\n""")})
aby_circuit += " share* s_" + str(node) + " = putHistogramNode(s_V_in.at(" + \
str(node.scope[0]) + "), v_s_borders, v_s_" + str(node) + "_densities, circ);\n"
elif sel == Selection.LINEAR:
aby_inputs += " idx = " + str(node.scope[0]) + ";\n"
aby_inputs += " share* s_idx_" + str(
node) + " = circ->PutINGate(idx, 32, SERVER);\n"
aby_circuit += " share* s_" + str(node) + " = putHistogramNode(putInputSelector(s_idx_" + str(
node) + ", s_V_in, s_ids, circ), v_s_borders, v_s_" + str(node) + "_densities, circ);\n"
aby_declarations.update({"s_V_in": ("std::vector<share*>", """;
for (uint64_t in : v_V) {
""" + dec_str(bitlen) + """ val = in;
s_V_in.push_back(circ->PutINGate(val, bitlen, CLIENT));
}\n"""), "s_ids": ("std::vector<share*>", """;
for (auto i=0; i < v_V.size(); i++) {
s_ids.push_back(circ->PutCONSGate((uint32_t) i, 32));
}
uint32_t idx;\n""")})
elif sel == Selection.OSELECTION:
aby_circuit += " share* s_" + str(node) + " = putHistogramNode(sel[" + str(leaves[node]) + \
"], v_s_borders, v_s_" + str(node) + "_densities, circ);\n"
else:
raise Exception("Selection Type not recognized")
nodes_done.add(node)
return bitlen, leaves, aby_inputs, aby_circuit, aby_declarations, nodes_done
if isinstance(node, Product) or isinstance(node, Sum):
res_list = list(map(lambda child: spn_to_aby(child, bitlen, leaves, sel, aby_inputs, aby_circuit,
aby_declarations, nodes_done), node.children))
aby_inputs = "".join(map(lambda aby: aby[2], res_list))
aby_circuit = "".join(map(lambda aby: aby[3], res_list))
map(lambda aby: aby_declarations.update(aby[4]), res_list)
map(lambda aby: nodes_done.add(aby[5]), res_list)
if isinstance(node, Product):
aby_circuit += " std::vector<share*> v_" + str(node) + "_children = { " + ", ".join(
map(lambda child: "s_" + str(child), node.children)) + " };\n"
aby_circuit += " share* s_" + str(node) + " = putLogProdNode(v_" + str(node) + "_children, circ);\n"
else:
aby_inputs += " std::vector<" + dec_str(bitlen) + "> v_" + str(node) + "_weights = { " + ", ".join(
map(lambda w: str(log(w, 2)), node.weights)) + " };\n"
aby_inputs += " std::vector<uint" + str(bitlen) + "_t> v_" + str(
node) + "_weightsi = dtoi( v_" + str(
node) + "_weights);\n"
aby_inputs += " std::vector<share*> v_" + str(node) + "_sweights;\n"
aby_inputs += " for (int j = 0; j < v_" + str(node) + """_weights.size(); ++j) {
v_""" + str(node) + "_sweights.push_back(circ->PutINGate(v_" + str(node) \
+ """_weightsi.at(j), bitlen, SERVER));
}\n"""
aby_circuit += " std::vector<share*> v_" + str(node) + "_children = { " + ", ".join(
map(lambda child: "s_" + str(child), node.children)) + " };\n"
aby_circuit += " share* s_" + str(node) + " = putLogSumNode(v_" + str(node) + "_children, v_" + str(
node) + "_sweights, v_" + str(node) + "_children.size() , circ);\n"
nodes_done.add(node)
return bitlen, leaves, aby_inputs, aby_circuit, aby_declarations, nodes_done
raise Exception("Node type not registered: " + str(type(node)))
def spn_to_aby_file(node, cryptospn_path=CRYPTOSPN_DIR, bitlen=64, filename="spntest.cpp", sel=Selection.OSELECTION):
logger.info(f"Creating {filename}...")
aby_head = aby_header(cryptospn_path, bitlen)
aby_end = aby_footer(node, bitlen, filename)
if sel == Selection.OSELECTION:
(bitlen, leaves, aby_inputs, aby_circuit, aby_declarations, nodes_done) = spn_to_aby(node, bitlen,
{node: pos for pos, node in
enumerate(
get_nodes_by_type(
node, Leaf))},
sel)
selection_input = " std::vector<uint32_t>ids = {"
for node, pos in leaves.items():
selection_input += str(node.scope[0]) + ", "
if isinstance(next(iter(leaves.keys())), Poisson):
selection_input += "};\n std::vector<uint" + str(
bitlen) + "_t > v_ConvV = dtoi(v_V);\n std::vector<uint" + str(
bitlen) + "_t > v_ConvV_logfac = dtoi(v_V_logfac);\n" \
" share** sel= selection_GC(v_ConvV, ids.size(), ids.data(), circ);\n" \
" share** sel_logfac = selection_GC(v_ConvV_logfac, ids.size(), ids.data(), circ);\n"
elif isinstance(next(iter(leaves.keys())), Gaussian) or isinstance(next(iter(leaves.keys())), Histogram):
selection_input += "};\n std::vector<uint" + str(bitlen) \
+ "_t > v_ConvV = dtoi(v_V);\n" \
" share** sel= selection_GC(v_ConvV, ids.size(), ids.data(), circ);\n "
else:
selection_input += "};\n std::vector<uint" + str(bitlen) \
+ "_t> v_ConvV(v_V.begin(), v_V.end());\n" \
" share** sel= selection_GC(v_ConvV, ids.size(), ids.data(), circ);\n"
aby_inputs = selection_input + aby_inputs
else:
(bitlen, _, aby_inputs, aby_circuit, aby_declarations, nodes_done) = spn_to_aby(node, bitlen, sel=sel)
for key, (before, after) in aby_declarations.items():
aby_inputs = " " + before + " " + key + after + aby_inputs
f = open(filename, 'w')
f.write(aby_head + aby_inputs + aby_circuit + aby_end)
logger.info(f"{filename} created.")
def spn_to_aby_exec(node, aby_path=ABY_DIR, cryptospn_path=CRYPTOSPN_DIR, bitlen=64, name="spntest",
sel=Selection.OSELECTION):
spn_to_aby_file(node, cryptospn_path, bitlen, name + ".cpp", sel)
with tempfile.NamedTemporaryFile() as tmpfile:
cmake_filename = tmpfile.name
with open(cmake_filename, 'w') as f:
f.write(cmake_file(name + ".cpp", aby_path, name))
logger.debug(f"CMakeLists.txt created in {cmake_filename}.")
logger.info(f"Compiling {name}. This might take some time...")
if cryptospn_path[-1] != '/':
cryptospn_path += '/'
proc = subprocess.Popen([cryptospn_path + 'compiling/compile.sh', cryptospn_path, aby_path, name + '.cpp',
name, cmake_filename])
ret = proc.wait(timeout=COMPILE_TIMEOUT)
if ret == 0:
logger.info(f"Created executable {name}.")
elif ret == 2:
logger.warning(f"Compilation of {name} failed! Did you forget to build ABY?")
else:
logger.warning(f"Compilation of {name} failed!")
|
[
"treiber@encrypto.cs.tu-darmstadt.de"
] |
treiber@encrypto.cs.tu-darmstadt.de
|
0200c688bffd077a67cc58902a058bd882a27bda
|
6ea14d2de6bf34ae66bb35e8d9f11ffda1b65205
|
/notebooks/telegramBot/bot.py
|
cc690eb61a19170feae7ac00838c9a1f06560136
|
[] |
no_license
|
ostapkharysh/AutomatedResponseMessenger
|
f10ec136d7b1a6686d787ccbf93c92fbf9ced180
|
a72ec74f356bdbf187583c000f8f8dc1ea3b2b24
|
refs/heads/master
| 2021-04-06T20:12:19.677054
| 2019-01-31T15:22:49
| 2019-01-31T15:22:49
| 125,359,022
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 710
|
py
|
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
updater = Updater(token='554425042:AAHnLVakFnVxyUAfnO27sMGUxOc8jqg3ylU')
dispatcher = updater.dispatcher
def start_command(bot, update):
bot.send_message(chat_id=update.message.chat_id, text='Whats up')
def text_message(bot, update):
response = 'Received: ' + update.message.text
bot.send_message(chat_id=update.message.chat_id, text=response)
start_command_handler = CommandHandler('start', start_command)
text_message_handler = MessageHandler(Filters.text, text_message)
dispatcher.add_handler(start_command_handler)
dispatcher.add_handler(text_message_handler)
updater.start_polling(clean=True)
updater.idle()
|
[
"olha.bakay@eleks.com"
] |
olha.bakay@eleks.com
|
34561a1b7934a0de12ecdb733f78ec540ce47ece
|
72fa9e96e9eeae6ac213e9355407168483f1a447
|
/lab4.py
|
f5105e773abd11b8b98cbefe6056a0b1c6cb1a40
|
[
"MIT"
] |
permissive
|
kendallsmith327/IA-241
|
d65dca1773692730836192725520642c3e1e0c1b
|
7c1492ff635249849c20c083b5d7fc0518c1ab8a
|
refs/heads/main
| 2023-05-02T11:50:35.945176
| 2021-05-04T14:40:08
| 2021-05-04T14:40:08
| 332,523,955
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 661
|
py
|
"""
lab 4 dict and tuple
"""
#3.1
my_dict = {
'name': 'Tom',
'id':123
}
print(my_dict)
#3.2
print(my_dict.values())
print(my_dict.keys())
#3.3
my_dict['id']=321
print(my_dict)
#3.4
my_dict.pop('name',None)
print(my_dict)
#3.5
my_tweet = {
"tweet_id":1138,
"coordinates": (-75, 40),
"visited_countries": ["GR", "HK", "MY"]
}
print(my_tweet)
#3.6
print(len(my_tweet["visited_countries"]))
#3.7
my_tweet["visited_countries"].append("CH")
print(my_tweet)
#3.8
print("US" in my_tweet["visited_countries"])
#3.9
#(-81,45)
my_tweet["coordinates"]=(81,45)
print(my_tweet)
|
[
"ec2-user@ip-172-31-54-218.ec2.internal"
] |
ec2-user@ip-172-31-54-218.ec2.internal
|
3aedcff2997d62f490182dff85f9a046504d7ecc
|
ede4380d48abfaafd81d6846255ca8f482d043cc
|
/clustering on Mldata/dbscan/dbscan.py
|
81afbc4c1d117cc8199297968a66bb73edeb0988
|
[] |
no_license
|
harsh4723/Intern_projects-2017-summer-
|
cc16a520b418759c794e68995791aeeb7fadc554
|
06e6e79521c3871c0e4ce06643caccd16e7e0ac7
|
refs/heads/master
| 2021-01-01T19:44:19.809410
| 2017-07-28T15:46:11
| 2017-07-28T15:46:11
| 98,664,234
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,570
|
py
|
"""
DBSCAN test on multilabel data
"""
import numpy as np
import csv
import math
from sklearn.cluster import DBSCAN
from sklearn import metrics
loc=6
def dataload(filename): #load data and convert into a list
with open(filename,'rb') as csvfile:
dataset=csv.reader(csvfile)
data=list(dataset)
data.remove([])
datafinal=[]
c=0
for x in data:
if(x==[]):
c=0
elif(x[0]=='@data'):
c=1
if(c==1):
datafinal.append(x)
del(datafinal[0])
for x in range(len(datafinal)):
for y in range(len(datafinal[0])):
datafinal[x][y]=float(datafinal[x][y])
return datafinal
def eucliddistance(x1,x2):
sum=0
for i in range(len(x1)):
d1=pow((x1[i]-x2[i]),2)
sum=sum+d1
return math.sqrt(sum)
data=dataload('scene-train.csv')
lod=len(data[0])
X=[]
for i in range(len(data)):
X.append(data[i][:(lod-loc)])
# X is sample data
l= len(X)
distmatrix=[]
for i in range(l):
li=[]
for j in range(l):
li.append(eucliddistance(X[i],X[j]))
distmatrix.append(li)
mins=[]
for x in distmatrix:
x.remove(0.0)
mins.append(min(x))
mins = [x for x in mins if x != 0.0]
print min(mins)
d=sum(mins)/float(len(mins)) #taking epsilon as average
# Compute DBSCAN
db = DBSCAN(eps=d, min_samples=3).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
print labels
# Number of clusters in labels, ignoring noise if present.
print len(labels)
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print "no of clusters " +str(n_clusters_)
|
[
"noreply@github.com"
] |
harsh4723.noreply@github.com
|
167d321ae89718b7f15ef4f4ebe01e32629453a9
|
c6b19dba34bfc9259168b56a0e34dedad488eacd
|
/tools/compute_z.py
|
e837bcb5bbc2442dadee0523de6d0d02d52f521f
|
[] |
no_license
|
tspurway/learnyx
|
7a5ddf083b14e5264233a182df247a89edccdf0f
|
4ebdb0e2ecaa421f47114c6b3c245dcef6f8fe65
|
refs/heads/master
| 2016-08-08T15:26:16.480877
| 2015-04-30T00:58:10
| 2015-04-30T00:58:10
| 24,125,218
| 0
| 2
| null | 2015-02-24T03:39:35
| 2014-09-17T01:43:04
|
Python
|
UTF-8
|
Python
| false
| false
| 2,744
|
py
|
#!/usr/bin/python
import sys
import re
import logging
from optparse import OptionParser
from math import sqrt
def read_hist(filename):
total = 0.0
histogram = [0]
f = open(filename, "r")
for line in f:
chunk = re.split(',', line)
count = int(chunk[1]) * 1.0
total += count
histogram.append(count)
return total, histogram
def compute_freq_mean(freq, histogram):
mean = 0
for i in range(1, len(histogram)):
mean += histogram[i] * (1 - (1 - freq) ** i)
return mean
def estimate_freq(site_count, site, histogram):
freq = 0.5
left = 0.0000001
right = 0.9999999
while (right - left) > 0.0000001:
mean = compute_freq_mean(freq, histogram)
if mean < site_count:
left = freq
else:
right = freq
freq = (left + right) / 2
return freq
def compute_distrib(site1, site2, sites, histogram):
freq1 = sites[site1]
freq2 = sites[site2]
var = 0
mean = 0
for i in range(2, len(histogram)):
mean += histogram[i] * (1 - (1 - freq1) ** i) * (1 - (1 - freq2) ** (i - 1))
var += histogram[i] * (1 - (1 - freq1) ** i) * (1 - (1 - freq2) ** (i - 1)) * ((1 - freq2) ** (i - 1))
# print site1, site2, mean , var
# print "=========="
return mean, var
def read_sites(filename, histogram):
sites = {}
f = open(filename, "r")
for line in f:
chunk = re.split(',', line)
site = chunk[0]
count = int(chunk[1]) * 1.0
sites[site] = estimate_freq(count, site, histogram)
return sites
def read_lines(sites, histogram):
for line in sys.stdin:
try:
site1, site2, count = re.split(',', line)
if site1 in sites and site2 in sites:
mean, var = compute_distrib(site1, site2, sites, histogram)
count = int(count)
z = (count - mean) / sqrt(var)
print "%s,%s,%d,%.2f" % (site1, site2, count, z)
mean, var = compute_distrib(site2, site1, sites, histogram)
z = (count - mean) / sqrt(var)
print "%s,%s,%d,%.2f" % (site2, site1, count, z)
except Exception as e:
logging.error("Error processing line '%s': %s" % (line, e))
def read_args():
parser = OptionParser()
parser.add_option("-s", "--sites", dest="site_file", help="site counts")
parser.add_option("-x", "--hist", dest="hist_file", help="history size historgram")
return parser.parse_args()
if __name__ == '__main__':
(options, args) = read_args()
tot, hist = read_hist(options.hist_file)
s = read_sites(options.site_file, hist)
read_lines(s, hist)
# read_lines(keys, value)
|
[
"tspurway@gmail.com"
] |
tspurway@gmail.com
|
8cd7a52275e204cadbc101ee0dd3a91b5a3cf40b
|
9d5ae8cc5f53f5aee7247be69142d9118769d395
|
/219. Contains Duplicate II.py
|
be4a241a6c98d774f6c7b50f1b12ab279242bda2
|
[] |
no_license
|
BITMystery/leetcode-journey
|
d4c93319bb555a7e47e62b8b974a2f77578bc760
|
616939d1599b5a135747b0c4dd1f989974835f40
|
refs/heads/master
| 2020-05-24T08:15:30.207996
| 2017-10-21T06:33:17
| 2017-10-21T06:33:17
| 84,839,304
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 577
|
py
|
class Solution(object):
def containsNearbyDuplicate(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: bool
"""
if len(nums) == 0 or k <= 0:
return False
if k + 1 >= len(nums):
return len(set(nums)) < len(nums)
s = set(nums[0:k + 1])
if k + 1 > len(s):
return True
for i in xrange(len(nums) - k - 1):
s.remove(nums[i])
s.add(nums[i + k + 1])
if k + 1 > len(s):
return True
return False
|
[
"noreply@github.com"
] |
BITMystery.noreply@github.com
|
0fa7bd45588c07090cd2cb309e2cbea474f5db0c
|
8a5de861c874190d9503e33fc583402bc90e1dc7
|
/W14/EX2.py
|
3a5d0a36376416abf22cf2adb94e3c8eb03cf568
|
[] |
no_license
|
hj8239ka45/MachineVision
|
7a7b0c00932fcbc88b18dbadea2597d2677fe035
|
2eeb72872f948ecfb403b0a58a118417d0ce15bc
|
refs/heads/master
| 2023-04-13T08:46:36.732849
| 2023-04-05T05:13:56
| 2023-04-05T05:13:56
| 276,903,505
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,087
|
py
|
import cv2
import numpy as np
import glob
def draw(img, corners, imgpts):
imgpts = np.int32(imgpts).reshape(-1,2)
# draw ground floor in green
img = cv2.drawContours(img, [imgpts[:4]],-1,(0,255,0),-3)
# draw pillars in blue color
for i,j in zip(range(4),range(4,8)):
img = cv2.line(img, tuple(imgpts[i]), tuple(imgpts[j]),(255),3)
# draw top layer in red color
img = cv2.drawContours(img, [imgpts[4:]],-1,(0,0,255),3)
return img
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
w = 7
h = 6
objp = np.zeros((w*h,3), np.float32)
objp[:,:2] = np.mgrid[0:w,0:h].T.reshape(-1,2)
objpoints = []
imgpoints = []
#axis = np.float32([[3,0,0], [0,3,0], [0,0,-3]]).reshape(-1,3)
axis = np.float32([[0,0,0], [0,3,0], [3,3,0], [3,0,0],[0,0,-3],[0,3,-3],[3,3,-3],[3,0,-3] ])
mtx = np.array([[293., 0, 286.], [0, 297., 247], [0, 0, 1]])
dist = np.array([0.03, -0.02, 0.01,-0.01, 0.00])
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
frame = cv2.flip(frame, 1)
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (w,h),None)
if ret == True:
cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
objpoints.append(objp)
imgpoints.append(corners)
#cv2.drawChessboardCorners(img, (w,h), corners, ret)
#ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape, None, None)
ret, rvecs, tvecs, inliers = cv2.solvePnPRansac(objp, corners, mtx, dist)
imgpts, jac = cv2.projectPoints(axis, rvecs, tvecs, mtx, dist)
img = draw(frame,corners,imgpts)
d = (tvecs[0, 0]**2+tvecs[1, 0]**2+tvecs[2, 0]**2)**0.5
cv2.putText(frame, "distance = %.3f"%(d), (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
[
"hj8239ka45@gmail.com"
] |
hj8239ka45@gmail.com
|
960bc80f0bf50741ad3d776671f7ea2798a1d770
|
26e8c952b4d7235c35fa4d2b5ad668931d3c0ef8
|
/tests/test_eddystone_scanner_example.py
|
88ebac795feeec4430e9a1097a162e20ef74cb19
|
[
"MIT"
] |
permissive
|
ukBaz/python-bluezero
|
fb7a2e1eea4ee4ef8b58dda308884bceaa62ef0d
|
2b0aba891655bae44c1f281852d5669d5dc9db19
|
refs/heads/main
| 2023-08-17T06:33:27.460187
| 2023-08-06T16:19:09
| 2023-08-06T16:19:09
| 49,202,026
| 360
| 134
|
MIT
| 2023-08-06T16:14:33
| 2016-01-07T11:57:01
|
Python
|
UTF-8
|
Python
| false
| false
| 1,507
|
py
|
import dbus
import dbusmock
import io
from pathlib import Path
import subprocess
# from gi.repository import GLib
from unittest import mock, skip
from tests.mock_eventloop import MockAsync, run_pending_events
from examples import eddystone_scanner
class TestExampleScanner(dbusmock.DBusTestCase):
"""
Test mocking bluetoothd
"""
@classmethod
def setUpClass(cls):
here = Path(__file__).parent
template = str(here.joinpath('dbusmock_templates', 'bluez_scan.py'))
cls.start_system_bus()
cls.dbus_con = cls.get_dbus(True)
(cls.p_mock, cls.obj_bluez) = cls.spawn_server_template(
template, {}, stdout=subprocess.PIPE)
def setUp(self):
self.obj_bluez.Reset()
self.dbusmock = dbus.Interface(self.obj_bluez, dbusmock.MOCK_IFACE)
self.dbusmock_bluez = dbus.Interface(self.obj_bluez, 'org.bluez.Mock')
@classmethod
def tearDownClass(cls):
cls.stop_dbus(cls.system_bus_pid)
cls.p_mock.terminate()
cls.p_mock.wait()
def test_scanner_eddy_url2(self):
expected = 'Eddystone URL: https://www.bluetooth.com ↗ 8 ↘ -61'
self.dbusmock_bluez.AddAdapter('hci0', 'My-Test-Device')
with mock.patch('bluezero.async_tools.EventLoop.run', MockAsync.run):
with mock.patch('sys.stdout', new=io.StringIO()) as fake_out:
eddystone_scanner.main()
self.assertIn(expected,
fake_out.getvalue())
|
[
"barry_byford@yahoo.co.uk"
] |
barry_byford@yahoo.co.uk
|
d39ae8b0d3f623d7faa51d9415dce15c13f2d14a
|
c6f001c85a2c0664a00393dd026b2b36851a5ec2
|
/comentarios/api/urls.py
|
8a60dfb3327cac8697b5eae3ef6db6e0ec75e9db
|
[] |
no_license
|
OscarRuiz15/BackendTG
|
061816c3014c81d5f109738f5707efba2961bb2c
|
c254f6f7dd5df7b7364fbf673d9525e3901423dd
|
refs/heads/master
| 2022-12-11T10:42:50.922058
| 2021-04-15T17:49:43
| 2021-04-15T17:49:43
| 147,595,067
| 0
| 0
| null | 2018-09-08T22:16:45
| 2018-09-06T00:29:14
|
Python
|
UTF-8
|
Python
| false
| false
| 240
|
py
|
from django.urls import path
from .views import ComentarioListView, ComentarioView
urlpatterns = [
path('', ComentarioListView.as_view(), name='comments-create'),
path('<int:id>/', ComentarioView.as_view(), name='comments-rud'),
]
|
[
"andres.medina@correounivalle.edu.co"
] |
andres.medina@correounivalle.edu.co
|
131f9a0dcf44ee0c7a6ce552a572bf306f625caa
|
9e4fd9dce2483272a4d6c3feece3f92f7af69f5d
|
/core/utils/table.py
|
01ec5a2105e37cb460042511d721161c9799084d
|
[] |
no_license
|
matavclez/hakuna_datata
|
25f3f9a05adee17bb14c000ef97c6c8b31a0bb72
|
0d6f19e85093e872d4880f1951a208b0a6edd6fa
|
refs/heads/master
| 2021-02-12T17:15:26.157228
| 2020-03-03T14:39:34
| 2020-03-03T14:39:34
| 244,610,861
| 1
| 1
| null | 2020-03-03T14:42:18
| 2020-03-03T10:48:23
|
Python
|
UTF-8
|
Python
| false
| false
| 1,495
|
py
|
"""
En este script se define la clase base Table.
Con esto crearemos otras clases que se encargarán de leer y guardar las tablas que generemos.
"""
import pandas as pd
from core.data_pipeline.rutas import schema, relative_paths
class Table:
"""
:param usuario: En un string, insertar una de los siguientes nombres: gabriel, pablo, manuel o matias
"""
_ID_TABLE = ""
def __init__(self, usuario: str):
self.usuario = usuario
self.ruta_base = schema[usuario]
self._relative_paths = relative_paths
self.ruta_parcial = self._get_ruta_parcial()
self.ruta_tabla = self._get_ruta_tabla()
def _get_ruta_parcial(self):
if self._ID_TABLE == "":
ruta_parcial = ""
else:
ruta_parcial = self._relative_paths[self._ID_TABLE]
return ruta_parcial
def _get_ruta_tabla(self):
if self._ID_TABLE == "":
ruta_tabla = self.ruta_base
else:
ruta_tabla = "{}/{}".format(self.ruta_base,self.ruta_parcial)
return ruta_tabla
def read(self):
"""
Read the table (as a "view")
:return:
"""
return pd.read_csv(self.ruta_tabla)
def write(self, df):
"""
Write a Dataframe into the table.
:param df: Dataframe with the data to write.
:return: Escribe un csv
"""
return df.to_csv(self.ruta_tabla, index=False)
|
[
"matiasluis.avila@bbvadata.com"
] |
matiasluis.avila@bbvadata.com
|
f2c5f33f36f9726635338f174ba2502274d94595
|
2455c474528f2b73c7190fe2318453d32006bd30
|
/cogs/yags_youtube_message.py
|
aeda7e941c38334c8ef9029fb32cc7c45a02e004
|
[] |
no_license
|
fadedmax/Apple.Py
|
1a6f818eb6d82611cbfb080609ca8103581db6e9
|
454a21afb33db5acc06e939caec8e545d762142e
|
refs/heads/master
| 2023-02-06T02:36:16.970248
| 2021-01-02T01:07:46
| 2021-01-02T01:07:46
| 305,582,687
| 0
| 1
| null | 2020-11-02T10:32:35
| 2020-10-20T03:46:32
|
Python
|
UTF-8
|
Python
| false
| false
| 1,009
|
py
|
import discord
import voxelbotutils as utils
class YagsYoutubeMessage(utils.Cog):
GUILD_ID = 208895639164026880
YAGS_USER_ID = 204255221017214977
YOUTUBE_UPDATE_ROLE = 731597271690510346
@utils.Cog.listener()
async def on_message(self, message:discord.Message):
"""Send a message to people who subscribe via Upgrade.chat"""
if message.guild is None or message.guild.id != self.GUILD_ID:
return
if message.author.id != self.YAGS_USER_ID:
return
if "uploaded a new youtube video" not in message.content:
return
role = message.guild.get_role(self.YOUTUBE_UPDATE_ROLE)
mentionable = role.mentionable
await role.edit(mentionable=True)
m = await message.channel.send(f'{role.mention} ' + message.content)
await message.delete()
await m.publish()
await role.edit(mentionable=mentionable)
def setup(bot:utils.Bot):
x = YagsYoutubeMessage(bot)
bot.add_cog(x)
|
[
"callum@voxelfox.co.uk"
] |
callum@voxelfox.co.uk
|
89db34097f928f54d80cd16ac5675fa925277241
|
55d4c531c6e0454dd5c4d1d5c93e01df4c6c442a
|
/train.py
|
756d04c2276d2c3c367faac4d879a674bfb56876
|
[
"MIT"
] |
permissive
|
freefly518/facenet-tf2
|
6c5c3345e85d92eb812b70f4c02de7b9978892bf
|
b7febab7c235f71a5d9594416916060cb29878d2
|
refs/heads/main
| 2023-07-26T04:38:00.146469
| 2021-09-05T06:36:08
| 2021-09-05T06:36:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,882
|
py
|
from functools import partial
import numpy as np
import tensorflow as tf
from tensorflow.keras.callbacks import EarlyStopping, TensorBoard
from tensorflow.keras.optimizers import Adam
from nets.facenet import facenet
from nets.facenet_training import FacenetDataset, triplet_loss
from utils.callbacks import (ExponentDecayScheduler, LFW_callback,
ModelCheckpoint)
from utils.LFWdataset import LFWDataset
from utils.utils_fit import fit_one_epoch
def get_num_classes(annotation_path):
with open(annotation_path) as f:
dataset_path = f.readlines()
labels = []
for path in dataset_path:
path_split = path.split(";")
labels.append(int(path_split[0]))
num_classes = np.max(labels) + 1
return num_classes
gpus = tf.config.experimental.list_physical_devices(device_type='GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
if __name__ == "__main__":
#----------------------------------------------------#
# 是否使用eager模式训练
#----------------------------------------------------#
eager = False
#--------------------------------------#
# 输入图像大小
#--------------------------------------#
# input_shape = [112,112,3]
input_shape = [160,160,3]
#--------------------------------------#
# 主干特征提取网络的选择
# mobilenet
# inception_resnetv1
#--------------------------------------#
backbone = "mobilenet"
#----------------------------------------------------#
# 获得图片路径和标签
#----------------------------------------------------#
annotation_path = "./cls_train.txt"
num_classes = get_num_classes(annotation_path)
model = facenet(input_shape, num_classes, backbone=backbone, mode="train")
#------------------------------------------------------------------------------------#
# 权值文件请看README,百度网盘下载
# 预训练权重对于99%的情况都必须要用,不用的话权值太过随机,特征提取效果不明显
# 网络训练的结果也不会好,数据的预训练权重对不同数据集是通用的,因为特征是通用的
#------------------------------------------------------------------------------------#
model_path = "model_data/facenet_mobilenet.h5"
model.load_weights(model_path, by_name=True, skip_mismatch=True)
#-------------------------------------------------------------------------------#
# 训练参数的设置
# logging表示tensorboard的保存地址
# checkpoint用于设置权值保存的细节,period用于修改多少epoch保存一次
# reduce_lr用于设置学习率下降的方式
# early_stopping用于设定早停,val_loss多次不下降自动结束训练,表示模型基本收敛
#-------------------------------------------------------------------------------#
checkpoint_period = ModelCheckpoint('logs/ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
monitor='val_loss', save_weights_only=True, save_best_only=False, period=1)
reduce_lr = ExponentDecayScheduler(decay_rate = 0.92, verbose = 1)
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)
tensorboard = TensorBoard(log_dir='logs/')
#----------------------#
# LFW估计
#----------------------#
test_loader = LFWDataset(dir="./lfw", pairs_path="model_data/lfw_pair.txt", batch_size=32, image_size=input_shape)
lfw_callback = LFW_callback(test_loader)
#-------------------------------------------------------#
# 0.05用于验证,0.95用于训练
#-------------------------------------------------------#
val_split = 0.05
with open(annotation_path) as f:
lines = f.readlines()
np.random.seed(10101)
np.random.shuffle(lines)
np.random.seed(None)
num_val = int(len(lines)*val_split)
num_train = len(lines) - num_val
if backbone=="mobilenet":
freeze_layer = 81
elif backbone=="inception_resnetv1":
freeze_layer = 440
else:
raise ValueError('Unsupported backbone - `{}`, Use mobilenet, inception_resnetv1.'.format(backbone))
for i in range(freeze_layer):
model.layers[i].trainable = False
# ------------------------------------------------------#
# 主干特征提取网络特征通用,冻结训练可以加快训练速度
# 也可以在训练初期防止权值被破坏。
# Init_Epoch为起始世代
# Freeze_Epoch为冻结训练的世代
# Epoch总训练世代
# 提示OOM或者显存不足请调小Batch_size
# ------------------------------------------------------#
if True:
Batch_size = 64
Lr = 1e-3
Init_epoch = 0
Freeze_epoch = 50
epoch_step = num_train // Batch_size
epoch_step_val = num_val // Batch_size
if epoch_step == 0 or epoch_step_val == 0:
raise ValueError('数据集过小,无法进行训练,请扩充数据集。')
train_dataset = FacenetDataset(input_shape, lines[:num_train], num_train, num_classes, Batch_size)
val_dataset = FacenetDataset(input_shape, lines[num_train:], num_val, num_classes, Batch_size)
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, Batch_size))
if eager:
gen = tf.data.Dataset.from_generator(partial(train_dataset.generate), (tf.float32, tf.float32))
gen_val = tf.data.Dataset.from_generator(partial(val_dataset.generate), (tf.float32, tf.float32))
gen = gen.shuffle(buffer_size = Batch_size).prefetch(buffer_size = Batch_size)
gen_val = gen_val.shuffle(buffer_size = Batch_size).prefetch(buffer_size = Batch_size)
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate = Lr, decay_steps = epoch_step, decay_rate=0.92, staircase=True)
optimizer = tf.keras.optimizers.Adam(learning_rate = lr_schedule)
for epoch in range(Init_epoch, Freeze_epoch):
fit_one_epoch(model, optimizer, epoch, epoch_step, epoch_step_val, gen, gen_val,
Freeze_epoch, triplet_loss(batch_size=Batch_size), test_loader)
else:
model.compile(
loss={
'Embedding' : triplet_loss(batch_size=Batch_size),
'Softmax' : "categorical_crossentropy",
},
optimizer = Adam(lr=Lr),
metrics={
'Softmax' : 'accuracy'
}
)
model.fit_generator(
train_dataset,
steps_per_epoch = epoch_step,
validation_data = val_dataset,
validation_steps = epoch_step_val,
epochs = Freeze_epoch,
initial_epoch = Init_epoch,
callbacks = [checkpoint_period, reduce_lr, early_stopping, tensorboard, lfw_callback]
)
for i in range(freeze_layer):
model.layers[i].trainable = True
if True:
Batch_size = 32
Lr = 1e-4
Freeze_epoch = 50
Epoch = 100
epoch_step = num_train // Batch_size
epoch_step_val = num_val // Batch_size
if epoch_step == 0 or epoch_step_val == 0:
raise ValueError('数据集过小,无法进行训练,请扩充数据集。')
train_dataset = FacenetDataset(input_shape, lines[:num_train], num_train, num_classes, Batch_size)
val_dataset = FacenetDataset(input_shape, lines[num_train:], num_val, num_classes, Batch_size)
if eager:
gen = tf.data.Dataset.from_generator(partial(train_dataset.generate), (tf.float32, tf.float32))
gen_val = tf.data.Dataset.from_generator(partial(val_dataset.generate), (tf.float32, tf.float32))
gen = gen.shuffle(buffer_size = Batch_size).prefetch(buffer_size = Batch_size)
gen_val = gen_val.shuffle(buffer_size = Batch_size).prefetch(buffer_size = Batch_size)
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate = Lr, decay_steps = epoch_step, decay_rate=0.92, staircase=True)
optimizer = tf.keras.optimizers.Adam(learning_rate = lr_schedule)
for epoch in range(Freeze_epoch, Epoch):
fit_one_epoch(model, optimizer, epoch, epoch_step, epoch_step_val, gen, gen_val,
Freeze_epoch, triplet_loss(batch_size=Batch_size), test_loader)
else:
model.compile(
loss={
'Embedding' : triplet_loss(batch_size=Batch_size),
'Softmax' : "categorical_crossentropy",
},
optimizer = Adam(lr=Lr),
metrics={
'Softmax' : 'accuracy'
}
)
print('Train with batch size {}.'.format(Batch_size))
model.fit_generator(
train_dataset,
steps_per_epoch = epoch_step,
validation_data = val_dataset,
validation_steps = epoch_step_val,
epochs = Epoch,
initial_epoch = Freeze_epoch,
callbacks = [checkpoint_period, reduce_lr, early_stopping, tensorboard, lfw_callback]
)
|
[
"noreply@github.com"
] |
freefly518.noreply@github.com
|
04444af37f384c2e6e1ca403a162c4a2e52aa8c9
|
de5ed11c9a33eb732fda7130ad8d2f5bc0f15929
|
/carla_rllib/utils/spectators.py
|
ec9171dca129067162119dcf4ee5adb4c35de548
|
[
"MIT"
] |
permissive
|
50sven/carla_rllib
|
f6eeb44193eb1e88e3e1a37d303b7f6ea63b5998
|
b5f8bd4de884e591f80a7148db60362882af5b37
|
refs/heads/master
| 2020-12-26T08:40:19.701538
| 2020-06-05T08:49:20
| 2020-06-05T08:49:20
| 237,451,207
| 0
| 0
|
MIT
| 2020-01-31T15:36:16
| 2020-01-31T14:53:18
|
Python
|
UTF-8
|
Python
| false
| false
| 8,133
|
py
|
"""Spectator
This script allows the user to operate a spectator camera.
It enables to switch between all agents in the environment.
"""
import time
import os
import queue
import numpy as np
import pygame
import carla
from carla import ColorConverter as cc
from pygame.locals import K_ESCAPE
from pygame.locals import K_LEFT
from pygame.locals import K_RIGHT
from pygame.locals import K_q
class ActorSpectator(object):
def __init__(self, world, args, integrated=False, recording=False, record_path="~/no_backup/Images/"):
self.world = world
self.sensor = None
self.integrated = integrated
self.queue = queue.Queue()
self.width = args.width
self.height = args.height
self.fov = args.fov
self.gamma = args.gamma
self.location = args.location
self.rotation = args.rotation
self.surface = None
self.hud = None
self.index = 0
self.recording = recording
self.record_path = record_path
self.file_num = 0
self._initialize_world()
self._initialize_pygame()
self._initialize_blueprint()
self._set_camera(self.index)
def _initialize_world(self):
""" """
# Wait for tick
if not self.integrated:
self.world.wait_for_tick(5.0)
# Enable rendering if not yet done
self.settings = self.world.get_settings()
if self.settings.no_rendering_mode:
_ = self.world.apply_settings(carla.WorldSettings(
no_rendering_mode=False,
synchronous_mode=self.settings.synchronous_mode,
fixed_delta_seconds=self.settings.fixed_delta_seconds))
# Get all agents
self.actors = [actor
for actor in self.world.get_actors().filter("vehicle.*")
if "Agent" in actor.attributes["role_name"]]
self.actors = sorted(self.actors, key=lambda x: x.id)
def _initialize_pygame(self):
"""Initializes the pygame window"""
pygame.init()
self.display = pygame.display.set_mode(
(self.width, self.height),
pygame.HWSURFACE | pygame.DOUBLEBUF)
def _initialize_blueprint(self):
"""Initializes the camera blueprint"""
self.bp = self.world.get_blueprint_library().find('sensor.camera.rgb')
self.bp.set_attribute('image_size_x', str(self.width))
self.bp.set_attribute('image_size_y', str(self.height))
self.bp.set_attribute('fov', str(self.fov))
if self.bp.has_attribute('gamma'):
self.bp.set_attribute('gamma', str(self.gamma))
def _set_camera(self, index):
"""Sets the camera sensor"""
index = index % len(self.actors)
if self.sensor is not None:
self.sensor.destroy()
self.surface = None
self.sensor = self.world.spawn_actor(self.bp, carla.Transform(
carla.Location(x=self.location[0],
y=self.location[1],
z=self.location[2]),
carla.Rotation(yaw=self.rotation[0],
pitch=self.rotation[1],
roll=self.rotation[2])),
attach_to=self.actors[index],
attachment_type=carla.AttachmentType.Rigid)
self.sensor.listen(self.queue.put)
self.index = index
def render(self, frame, image=None):
"""Renders a spectator window and allows to display net inputs
Parameters:
----------
frame: int
current frame to retrieve correct image
image: numpy.ndarray
net_input to be displayed
"""
# Render spectator window
array = self._retrieve_data(frame)
self.surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
if self.surface is not None:
self.display.blit(self.surface, (0, 0))
# Render net input(s)
if image is not None:
image = image[:, :, 0] if image.shape[-1] == 1 else image
self.surface_input = pygame.surfarray.make_surface(
image.swapaxes(0, 1))
self.display.blit(self.surface_input, (20, 444))
pygame.display.flip()
# Save pygame display if you want
if self.recording:
self.file_num += 1
filename = os.path.expanduser(
self.record_path + "image_%04d.png" % self.file_num)
pygame.image.save(self.display, filename)
def parse_events(self):
"""Parse the keyboard inputs"""
for event in pygame.event.get():
if event.type == pygame.QUIT:
return 1
elif event.type == pygame.KEYUP:
if event.key == K_ESCAPE:
return 1
if event.key == K_q:
return 2
if any(x != 0 for x in pygame.key.get_pressed()):
self._parse_keys(pygame.key.get_pressed())
def _parse_keys(self, keys):
"""Controls the camera focus"""
prev_index = self.index
if keys[K_RIGHT]:
self.index += 1
if keys[K_LEFT]:
self.index -= 1
if prev_index != self.index:
self._set_camera(self.index)
time.sleep(0.3)
def _retrieve_data(self, frame):
"""Returns the image data"""
while True:
try:
image = self.queue.get(timeout=0.5)
if image.frame == frame:
self.image = self._preprocess_data(image)
return self.image
except:
return np.zeros((1024, 720, 3))
def _preprocess_data(self, image):
"""Process and returns the image data"""
image.convert(cc.Raw)
array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
array = np.reshape(array, (image.height, image.width, 4))
array = array[:, :, :3]
array = array[:, :, ::-1]
return array
def destroy(self):
"""Destroys the camera sensor and quits pygame"""
self.sensor.destroy()
pygame.quit()
_ = self.world.apply_settings(self.settings)
if __name__ == "__main__":
import argparse
argparser = argparse.ArgumentParser(
description='CARLA Spectator')
argparser.add_argument(
'--host',
metavar='H',
default='127.0.0.1',
help='IP of the host server (default: 127.0.0.1)')
argparser.add_argument(
'--port',
metavar='P',
default=2000,
type=int,
help='TCP port to listen to (default: 2000)')
argparser.add_argument(
'--res',
metavar='WIDTHxHEIGHT',
default='1280x720',
help='window resolution (default: 1280x720)')
argparser.add_argument(
'--fov',
metavar='FOV',
default=100.0,
type=float,
help='Field of camera view (default: 100.0)')
argparser.add_argument(
'--gamma',
metavar='GAMMA',
default=2.2,
type=float,
help='Gamma correction of the camera (default: 2.2)')
argparser.add_argument(
'--location',
metavar='LOCATION',
nargs='+',
default=[-8.0, 0.0, 6.0],
type=float,
help='Position of the camera (x, y, z) (default: -8.0 0.0 6.0)')
argparser.add_argument(
'--rotation',
metavar='ROTATION',
nargs='+',
default=[0.0, -30.0, 0.0],
type=float,
help='Rotation of the camera (yaw, pitch, roll) (default: 0.0 -30.0 0.0)')
args = argparser.parse_args()
args.width, args.height = [int(x) for x in args.res.split('x')]
try:
client = carla.Client(args.host, args.port)
client.set_timeout(5.0)
world = client.get_world()
spectator = ActorSpectator(world, args)
while True:
snapshot = world.wait_for_tick(10.0)
if spectator.parse_events():
break
spectator.render(snapshot.frame)
finally:
spectator.destroy()
|
[
"sven.mueller92@gmx.de"
] |
sven.mueller92@gmx.de
|
d3b0c86722a9f79458ce6e73576156a377a39788
|
d970993d08d5a40c6ddaec66ff4a21c7f1929624
|
/myjinja02.py
|
80aebeae6339cc673558e6109f0c9286228b3678
|
[] |
no_license
|
ccmiller214/python_api_class
|
59d0f7a3c236476c7897905b6303bcc85cab722c
|
a284a3d91dfe560a39e85e806d4887be7ea817d6
|
refs/heads/master
| 2020-06-26T03:58:21.185736
| 2019-08-02T18:37:43
| 2019-08-02T18:37:43
| 199,519,772
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 346
|
py
|
#!/usr/bin/python3
from flask import Flask
from flask import render_template
app = Flask(__name__)
@app.route("/")
def index():
return render_template("hellobasic.html")
@app.route("/<username>")
def userbuilder(username):
return render_template("hellobasic2.html",name = username)
if __name__ == "__main__":
app.run(port=5006)
|
[
"ccmiller214@gmail.com"
] |
ccmiller214@gmail.com
|
86533b2ee92b41abb4a1952a9198a001a68dcce3
|
1ded6c4aeeee677925d3a951b2c85b4f3e8cb772
|
/Python自动化开发/day作业/03/1.py
|
c81aafba7a85b3d76e33f8729648e8ea3d54f575
|
[] |
no_license
|
zhangyu-yaoshen/Python
|
90ec2aafcfaeabcdf2df66688be2d27e7062a021
|
be7d3e5cc80d4a961fc0fe44e4dbafe318e7fdec
|
refs/heads/master
| 2021-01-18T16:37:51.692730
| 2019-09-16T00:49:51
| 2019-09-16T00:49:51
| 100,464,481
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,466
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
#第一部分 sql解析
def sql_parse(sql):
parse_func={
'insert':insert_parse,
'delete':delete_parse,
'update':update_parse,
'select':select_parse,
}
# print('sql str is %s '%sql)
sql_l=sql.split(' ') #list
func=sql_l[0]
res=''
if func in parse_func:
res=parse_func[func](sql_l)
return res
def insert_parse(sql_l):
sql_dic = {
'func': insert,
'insert': [],
'into': [], # data.emp
'values': [],
}
return handle_parse(sql_l,sql_dic)
def delete_parse(sql_l):
sql_dic = {
'func': delete,
'delete': [], # search
'from': [], # data.emp
'where': [], # filter condition
}
return handle_parse(sql_l, sql_dic)
def update_parse(sql_l):
sql_dic = {
'func': update ,
'update': [], # search
'from': [],
'where': [],
'set': [], # filter condition
}
return handle_parse(sql_l, sql_dic)
def select_parse(sql_l):
# print('from in the select_parse \033[32;1m%s\033[0m'%sql_l)
sql_dic={
'func':select,
'select':[] ,#search
'from':[], #data.emp
'where':[], #filter condition
'limit':[], #limit condition
}
return handle_parse(sql_l,sql_dic)
def handle_parse(sql_l,sql_dic):
"""
执行sql解析操作,返回sql_dic
:param sql_l:
:param sql_dic:
:return:
"""
# print('sql_l is \033[32;1m%s\033[0m \nsql_dic is \033[41;1m%s\033[0m'%(sql_l,sql_dic))
tag=False
for item in sql_l:
if tag and item in sql_dic:
tag=False
if not tag and item in sql_dic:
tag=True
key=item
continue
if tag:
sql_dic[key].append(item)
if sql_dic.get('where'):
sql_dic['where']=where_parse(sql_dic.get('where')) #['id>4,'and','id<10']
# print('from in the handle_parse sql_dic is \033[32;1m%s\033[0m'%sql_dic)
return sql_dic
#整合字符串
def where_parse(where_l):#['not','id>','4',' ','and','id','<10']
res=[]
key=['and','or','not']
char=''
for i in where_l:
if len(i)==0:continue
if i in key:
# i为key当中存放的逻辑运算符
if len(char)!=0:
char=three_parse(char)
res.append(char) #char='id>4'-->char=['id','>','4']
res.append(i)
char=''
else:
char+=i #'id<10'
else:
char = three_parse(char)
res.append(char)
# print('from in the where_parse res is \033[32;1m%s\033[0m' % res)
return res
def three_parse(exp_str):
"""
'id<=10'-->["id",'<=","10"]
:param exp_str:
:return:
"""
key=[">","<","="]
res=[]
char=''
opt=''
tag=False
for i in exp_str:
if i in key:
tag=True
if len(char)!=0:
res.append(char) #append id,digit
char=''
opt+=i #opt='<='
if not tag:
char+=i #char='id'
#appened <,>,=
if tag and i not in key:
tag=False
res.append(opt)
opt=' '
char+=i #char='1'
else:
res.append(char)
#新增解析like功能
# print('------%s'%res)
if len(res) == 1:
res=res[0].split('like')
res.insert(1,'like')
# print('three_parse res is \033[41;1m%s\033[0m'%res)
return res
#第二部分 sql执行
def sql_action(sql_dic):
"""
从字典sql_dic提取命令,分发给具体的命令执行函数去执行
:param sql_dic:
:return:
"""
return sql_dic.get('func')(sql_dic) #sql_dic.get('func')==select
def insert(sql_dic):
# print('insert %s'%sql_dic)
db, table = sql_dic.get('into')[0].split('.')
with open("%s/%s" % (db, table), 'ab+') as fh:
#读取文件最后一行
offs = -100
while True:
fh.seek(offs, 2)
lines = fh.readlines()
if len(lines) > 1:
last = lines[-1]
break
offs *= 2
last = last.decode(encoding='utf-8')
#id++
last_id = int(last.split(',')[0])
new_id = last_id + 1
#insert into dbl.emp values 张国辉,30,18234561234,运维,2007-8-1
record = sql_dic.get('values')[0].split(',')
record.insert(0, str(new_id))
record_str = ','.join(record) + '\n'#list-->str
fh.write(bytes(record_str, encoding='utf-8'))
fh.flush()
return [['insert successful']]
def delete(sql_dic):
# print('--->%s'%sql_dic.get('from'))
db, table = sql_dic.get('from')[0].split('.')
bak_file = table + 'bak'
with open("%s/%s" % (db, table), 'r', encoding='utf-8')as r_file, \
open("%s/%s" % (db, bak_file), "w", encoding='utf-8')as w_file:
del_count = 0
for line in r_file:
title = "id,name,age,phone,dept,enroll_data"
dic = dict(zip(title.split(','), line.split(',')))
filter_res = logic_action(dic, sql_dic.get('where'))
if not filter_res:
w_file.write(line)
else:
del_count += 1
w_file.flush()
os.remove("%s/%s" % (db, table))
os.rename("%s/%s" % (db, bak_file), "%s/%s" % (db, table))
return [[del_count], ['delete successful']]
def update(sql_dic):
# update dbl.emp set name='sb' where id=1
db, table = sql_dic.get('update')[0].split('.')
set = sql_dic.get('set')[0].split(',')
set_l = []
for i in set:
set_l.append(i.split('='))
bak_file = table + 'bak'
with open("%s/%s" % (db, table), 'r', encoding='utf-8')as r_file, \
open("%s/%s" % (db, bak_file), 'w', encoding="utf-8") as w_file:
update_count = 0
for line in r_file:
title = "id,name,age,phone,dept,enroll_data"
# print(line)
dic = dict(zip(title.split(','), line.split(',')))
# print('----%s'%dic)
filter_res = logic_action(dic, sql_dic.get('where'))
if filter_res:
# print(set_l)
for i in set_l:
k = i[0] #name
v = i[-1].strip(",") #alex
# print("k:%sv:%s" % (k,v))
dic[k] = v
# print('change dic is %s' % dic)
line = []
for i in title.split(','):
line.append(dic[i])
update_count += 1
line = ','.join(line)
w_file.write(line)
# print('>>>>%s'%line)
w_file.flush()
os.remove("%s/%s" % (db, table))
os.rename("%s/%s" % (db, bak_file), "%s/%s" % (db, table))
return [[update_count], ['update successful']]
def select(sql_dic):
print('from select sql_dic is %s'%sql_dic)
db,table=sql_dic.get('from')[0].split('.')
fh=open('%s/%s'%(db,table),'r',encoding='utf-8')
filter_res=where_action(fh,sql_dic.get('where')) #筛选where
# for record in filter_res:
# print('filter res is %s'%record)
limit_res=limit_action(filter_res,sql_dic.get('limit'))#筛选where+limit
# for record in limit_res:
# print('limit res is %s'%record)
search_res=search_action(limit_res,sql_dic.get('select'))#筛选where+limit+select
# for record in search_res:
# print('search res is %s'%record)
return search_res
def limit_action(filter_res,limit_l):
res=[]
if len(limit_l) !=0:
index=int(limit_l[0])
res=filter_res[0:index]#index=='3',列表切片
else:
res=filter_res
return res
def search_action(limit_res,select_l):#select_l ['id,name']
# print('search_action limit res:%s'%limit_res)
res=[]
fields_l=[]
title="id,name,age,phone,dept,enroll_data"
if select_l[0] == '*':
fields_l=title.split(',')
res=limit_res
else:
for record in limit_res:
dic=dict(zip(title.split(','),record))#生成字典
r_l=[]
fileds_l=select_l[0].split(',')#'id','name'
for i in fileds_l:
r_l.append(dic[i].strip())
res.append(r_l)
# print('search_action r_l %s,%s'%(fields_l,r_l))
return(fields_l,res)
def where_action(fh,where_l):
"""
:return:
"""
# print('\033[41;1m%s\033[0m'%where_l)
res=[]
logic_l=['and','or','not']
title="id,name,age,phone,dept,enroll_data"
if len(where_l) != 0:
for line in fh:
dic=dict(zip(title.split(','),line.split(','))) #生成字典的格式,一条记录
#逻辑判断
logic_res=logic_action(dic,where_l)
if logic_res:
res.append(line.split(','))
else:
fh.readlines()
return res
def logic_action(dic,where_l):
# print('from logic_action%s'%dic)
# print(where_l)
res=[]
for exp in where_l:
#dic与exp做布尔运算
if type(exp) is list:
#做布尔运算
exp_k,opt,exp_v=exp
if exp[1]=='=':
opt="%s="%exp[1]
if dic[exp_k].isdigit(): #是否是数字
dic_v=int(dic[exp_k])
exp_v=int(exp_v)
else:
dic_v="'%s'"%dic[exp_k].strip()
if opt !='like':
# print('--->%s\n%s\n%s'%(dic_v,opt,exp_v))
exp=str(eval("%s%s%s"%(dic_v,opt,exp_v)))
else:
if exp_v in dic_v:
exp="True"
else:
exp="False"
res.append(exp) #['True','or','False','or','True']
res=eval(' '.join(res)) #布尔运算
return res
#主程序
if __name__ == '__main__':
while True:
sql=input('sql>:').strip()
if sql=='exit':break
if len(sql) == 0: continue
sql_dic=sql_parse(sql)
if not sql_dic:
print('输入非法,请重新输入!')
continue
res=sql_action(sql_dic)
count = 0
for record in res[-1]:
tag=True
if 'select' in sql_dic and tag:
print('符合条件的记录:%s'%record)
count+=1
else:
tag=False
print(record)
if tag:
print('一共查到记录%s条。' % count)
|
[
"494167883@qq.com"
] |
494167883@qq.com
|
117cbdf9377a4d39b7d4177b25af8c3cada3fe34
|
eb2633ab48c0d625f13d44cee9f67f3cf437fd25
|
/Projet 6/projet6_3_8.py
|
5ca27a03dc29f662ac68c6bacb2da4971c76e878
|
[] |
no_license
|
marecmat/Simulation-numerique
|
178f3a7c99fd27da9d071e6bc91b2537ea6d49c1
|
2959c345e4902f4f7a9efd1a2668c49df87976e1
|
refs/heads/master
| 2021-10-09T01:38:28.397862
| 2018-12-19T23:28:39
| 2018-12-19T23:28:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 802
|
py
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import wavfile
rate, data = wavfile.read('Projet 6/PROJET6.3/chirp.wav')
t1 = np.linspace(0, len(data)/rate, len(data))
#2e chirp: 1.75 --> 4
def find_nearest(array, value): #https://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return array[idx]
debut = find_nearest(t1, 1.75)
fin = find_nearest(t1, 4)
for i in range(len(t1)):
if debut == t1[i]:
i_dbt = i
if fin == t1[i]:
i_fin = i
chirp = data[i_dbt:i_fin]
reverse_chirp = chirp[::-1]
wavfile.write('Projet 6/PROJET6.3/prihc.wav', 44100, reverse_chirp)
fig, ax = plt.subplots()
ax.plot(t1[i_dbt:i_fin], reverse_chirp)
ax.grid(True)
plt.show()
|
[
"oupsmajdsl@gmail.com"
] |
oupsmajdsl@gmail.com
|
6a19f05a1a3b2e3f50a4957a1dae14b715edae24
|
7fc0ec7a1e066e696b3151bea4c83d21574f1805
|
/profiles/migrations/0002_auto_20170301_1823.py
|
fc59530a25d169aca3aff73d09ebe11d0fab5ba6
|
[
"MIT"
] |
permissive
|
pyladieshre/pyladies
|
8dd2cd441f87fc25a377b43397b1a1d518346903
|
5cbea02a48ac64b1194d6329c5bc55183142f2ab
|
refs/heads/master
| 2021-01-11T18:51:19.056554
| 2018-01-12T01:12:06
| 2018-01-12T01:12:06
| 79,638,956
| 0
| 9
|
MIT
| 2020-10-01T02:48:39
| 2017-01-21T10:25:26
|
Python
|
UTF-8
|
Python
| false
| false
| 1,075
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-03-01 16:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='phone_number',
),
migrations.AddField(
model_name='profile',
name='birth_date',
field=models.DateField(blank=True, null=True),
),
migrations.AddField(
model_name='profile',
name='contact_number',
field=models.CharField(blank=True, max_length=16, null=True),
),
migrations.AddField(
model_name='profile',
name='location',
field=models.CharField(blank=True, max_length=30),
),
migrations.AlterField(
model_name='profile',
name='bio',
field=models.TextField(blank=True, max_length=500),
),
]
|
[
"amakarudze@gmail.com"
] |
amakarudze@gmail.com
|
dc75ff660a6fb321a4d206363f9563b6d1989a29
|
06e7c28feded88780ea8540d1ffbc1cd374643c6
|
/app/main/views.py
|
8eaeb982a58f6f909e226fb1a04a8b8de047270b
|
[] |
no_license
|
Niel2016/FlaskApp
|
16232c89b2394b8a4b6e921408765307922dc074
|
64cec2bd14bc8dab44369b8238132893f04729f8
|
refs/heads/master
| 2020-09-22T05:56:21.982752
| 2016-08-22T09:56:16
| 2016-08-22T09:56:16
| 65,982,291
| 0
| 0
| null | 2016-08-22T09:56:16
| 2016-08-18T09:05:20
|
Python
|
UTF-8
|
Python
| false
| false
| 1,080
|
py
|
from datetime import datetime
from flask import render_template, redirect, url_for, session
from . import main
from .. import db
from .forms import NameForm
from ..models import User, Role
from ..email import send_mail
@main.route('/', methods=['GET', 'POST'])
def home():
form = NameForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.name.data).first()
if user is None:
user = User(username=form.name.data)
db.session.add(user)
db.session.commit()
session['known'] = False
send_mail('christianyang@wistronits.com', 'New User', 'mail/new_user', user=user)
else:
session['known'] = True
session['name'] = form.name.data
form.name.data = ''
return redirect(url_for('main.home'))
return render_template('index.html',
username=session.get('name'),
form=form,
known=session.get('known', False),
current_time=datetime.utcnow())
|
[
"montainlion2012.cn@gmail.com"
] |
montainlion2012.cn@gmail.com
|
d09ac153152c07aa6b0b24b4f8b0201f963fd8ad
|
ab49f8e9332d477fd6a188899856930872de7828
|
/Dennis/base/urls.py
|
20878b112d6034732cc9bf8abaa22ab4af980d1f
|
[] |
no_license
|
RodolfoMoralesTH/Discord-Clone
|
b7725b29985a4bd8538a71fbfe619a6dd252a151
|
8e9f408d5ee97e9691fa7704945b3766848940b6
|
refs/heads/master
| 2023-08-27T22:53:47.859157
| 2021-11-09T14:59:24
| 2021-11-09T14:59:24
| 425,206,003
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 533
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('login/', views.loginPage, name='login'),
path('logout/', views.logoutUser, name='logout'),
path('register/', views.registerPage, name='register'),
path('', views.home, name='home'),
path('room/<str:pk>/', views.room, name='room'),
path('create-room/', views.createRoom, name="create-room"),
path('update-room/<str:pk>/', views.updateRoom, name='update-room'),
path('delete-room/<str:pk>/', views.deleteRoom, name='delete-room')
]
|
[
"agnesmoralesth@gmail.com"
] |
agnesmoralesth@gmail.com
|
00dbdd43df691997b6371d541be8e7078d570941
|
d561af33f149261e3c75ffa69472f8fe6c01e0bc
|
/hickory/crawler/iextrading/stock_data_import.py
|
75e89ec11cd1edae569b4a110606427c8169f511
|
[] |
no_license
|
eggyolktech/hickoryStrats
|
0131e4053a37d3be2d8990b0371a3192c1f28f30
|
1ca632313d21ba785f0de5a61b88c330df7753c8
|
refs/heads/master
| 2021-01-19T21:22:04.196906
| 2018-05-24T15:27:04
| 2018-05-24T15:27:04
| 88,647,483
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,558
|
py
|
#!/usr/bin/python
from bs4 import BeautifulSoup
import requests
import locale
import json
import re
from hickory.util import stock_util
from hickory.db import stock_us_db
from datetime import datetime
import concurrent.futures
import random
import traceback
import logging
EL = "\n"
DEL = "\n\n"
TAG_RE = re.compile(r'<[^>]+>')
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
def remove_tags(text):
return TAG_RE.sub('', text)
# Create a function called "chunks" with two arguments, l and n:
def chunks(l, n):
# For item i in a range that is a length of l,
for i in range(0, len(l), n):
# Create an index range for l of n items:
yield l[i:i+n]
def download_data(symbol):
surl = "https://api.iextrading.com/1.0/stock/%s/chart/5y" % symbol
r = requests.get(surl, headers=headers)
jsondata = r.text
filename = '/usr/local/data/iextrading/chart_%s.json' % symbol
with open(filename, 'w') as file:
file.write(jsondata) # use `json.loads` to do the reverse
print("%s downloaded" % filename)
def download_list():
url = "https://api.iextrading.com/1.0/ref-data/symbols"
print("URL: [" + url + "]")
quote_result = {}
r = requests.get(url, headers=headers)
jsondata = r.text
data = json.loads(jsondata)
enabledList = [d for d in data if d['isEnabled'] == True and "#" not in d['symbol']]
enabledList = enabledList[:10]
print("Enabled List Length: %s" % len(enabledList))
return enabledList
def main():
stocks = download_list()
num_workers = 5
with concurrent.futures.ThreadPoolExecutor(max_workers=num_workers) as executor:
# Start the load operations and mark each future with its URL
future_to_manage = {executor.submit(download_data, stock['symbol']): stock for stock in stocks}
for future in concurrent.futures.as_completed(future_to_manage):
code = future_to_manage[future]
try:
data = future.result()
except Exception as exc:
print('%r generated an exception: %s' % (code, exc))
logging.error(" Error retrieving code: " + code)
logging.error(traceback.format_exc())
else:
if (data == False):
print('%r result is %s' % (code, data))
if __name__ == "__main__":
main()
|
[
"eggyolktech@gmail.com"
] |
eggyolktech@gmail.com
|
a18135c296aebdbf4445f38d131edbd7571e458a
|
6f0fb2cec6c32ddd94fa97499b5aa77e43f583dc
|
/getml/placeholder.py
|
a0bee5c25aea3358da700fe0ad85f27f7102fa7f
|
[
"MIT"
] |
permissive
|
srnnkls/getml-python-api
|
22e84056e9e2efd8bf837844f74295a78bcb124f
|
032b2fec19a0e0a519eab480ee61e0d422d63993
|
refs/heads/master
| 2021-01-06T13:40:53.036206
| 2020-02-24T16:48:51
| 2020-02-24T16:48:51
| 241,345,467
| 0
| 0
|
MIT
| 2020-02-18T11:36:16
| 2020-02-18T11:36:15
| null |
UTF-8
|
Python
| false
| false
| 7,128
|
py
|
# Copyright 2019 The SQLNet Company GmbH
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# ------------------------------------------------------------------------------
class Placeholder(object):
"""
Table placeholder
A Placeholder object represents a table in your data model without
containing any actual data. They exist to provide a convenient way to
define your data model. If the data is loaded directly into a
:class:`~getml.engine.DataFrame` there is no need to specify the role of
each column.
Args:
name (str): Name of the table, as it will appear in the generated SQL code.
categorical (List[str], optional): Names of the columns that you want to be
interpreted as categorical variables.
discrete (List[str], optional): Names of the columns that you want to be
interpreted as discrete variables.
numerical (List[str], optional): Names of the columns that you want to
be interpreted as numerical variables.
join_keys (List[str], optional): Names of the columns that
you want to be interpreted as join keys.
time_stamps (List[str], optional): Names of the columns that
you want to be interpreted as time stamps.
targets (List[str], optional): Names of the target variables.
"""
# -------------------------------------------------------------------------
def __init__(
self,
name,
categorical=None,
discrete=None,
numerical=None,
join_keys=None,
time_stamps=None,
targets=None
):
self.thisptr = dict()
self.thisptr["name_"] = name
self.thisptr["join_keys_used_"] = []
self.thisptr["other_join_keys_used_"] = []
self.thisptr["time_stamps_used_"] = []
self.thisptr["other_time_stamps_used_"] = []
self.thisptr["upper_time_stamps_used_"] = []
self.thisptr["joined_tables_"] = []
self.thisptr["categorical_"] = categorical or []
self.thisptr["discrete_"] = discrete or []
self.thisptr["numerical_"] = numerical or []
self.thisptr["join_keys_"] = join_keys or []
self.thisptr["targets_"] = targets or []
self.thisptr["time_stamps_"] = time_stamps or []
self.num = Placeholder.num_placeholders
Placeholder.num_placeholders += 1
# -------------------------------------------------------------------------
num_placeholders = 0
# -------------------------------------------------------------------------
def __find_peripheral(self, name):
for table in self.thisptr["joined_tables_"]:
if table["name_"] == name:
return table
for table in self.thisptr["joined_tables_"]:
temp = Placeholder(table["name_"])
temp.thisptr = table
subtable = temp.__find_peripheral(name)
if subtable is not None:
return subtable
return None
# -------------------------------------------------------------------------
def __replace_empty_strings(self):
"""
C++ will represent empty arrays as empty strings. We need to fix that.
"""
for key, value in self.thisptr.items():
if key == "name_":
continue
if value == "":
self.thisptr[key] = []
# -------------------------------------------------------------------------
def __repr__(self):
return self.thisptr.__repr__()
# -------------------------------------------------------------------------
def join(
self,
other,
join_key,
time_stamp,
other_join_key=None,
other_time_stamp=None,
upper_time_stamp=None,
):
"""
LEFT JOINS another Placeholder object onto this Placeholder object.
Args:
other (:class:`~getml.placeholder.Placeholder`): The other Placeholder.
join_key (str): The name of the join key used for this Placeholder object.
time_stamp (str): The name of the time stamp used for this Placeholder object.
other_join_key (str, optional): The name of the join key used for
the other Placeholder object. If None, then it will be assumed
to be equal to *join_key*. Default: None.
other_time_stamp (str): The name of the time stamp used for the
other Placeholder object. If None, then it will be assumed to
be equal to *time_stamp*. Default: None.
upper_time_stamp (str): Optional additional time stamp in the
joined table that will be used as an upper bound. This is
useful for data that can cease to be relevant, such as address
data after people have moved. Technically, this will add the
condition (t1.time_stamp < t2.upper_time_stamp OR
t2.upper_time_stamp IS NULL) to the feature. Default: None.
"""
if other.num <= self.num:
raise Exception(
"You cannot join a placeholder that was created before the placeholder it is joined to. " +
"This is to avoid circular dependencies. Please reverse the order in which the placeholders '" +
other.thisptr["name_"] + "' and '" + self.thisptr["name_"] + "' are created!")
other_join_key = other_join_key or join_key
other_time_stamp = other_time_stamp or time_stamp
upper_time_stamp = upper_time_stamp or ""
self.thisptr["join_keys_used_"].append(join_key)
self.thisptr["other_join_keys_used_"].append(other_join_key)
self.thisptr["time_stamps_used_"].append(time_stamp)
self.thisptr["other_time_stamps_used_"].append(other_time_stamp)
self.thisptr["upper_time_stamps_used_"].append(upper_time_stamp)
self.thisptr["joined_tables_"].append(other.thisptr)
# ------------------------------------------------------------------------------
|
[
"patrick@sqlnet.ai"
] |
patrick@sqlnet.ai
|
9ff9ef555394dd9af0c6365a29d98a0803cc0f28
|
ecad3cb4d0347bc91f2d9bf0453349f953cb0182
|
/SE01_Dojo_Exercise-2-2_HannaBieri.py
|
627844729073db462f1c1417bd67065be82acfe5
|
[] |
no_license
|
hannabieri/SE01_Dojo
|
9d964e0c4dafe6e4f125c5615a8f7e02cfe43301
|
e1d0b6fa583b80c0ce8f2f4366303601cbc0d1f7
|
refs/heads/main
| 2023-01-19T12:53:36.512200
| 2020-12-01T17:27:38
| 2020-12-01T17:27:38
| 317,202,224
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,617
|
py
|
'''
This was my back-to-version-1 attempt of getting the game logic working.
However
'''
# Function to display beaker to user
def create_beaker(beaker):
print("Beaker 1:", beaker[1][0], "|", beaker[1][1], "|", beaker[1][2])
print("Beaker 2:", beaker[2][0], "|", beaker[2][1], "|", beaker[2][2])
print("Beaker 3:", beaker[3][0], "|", beaker[3][1], "|", beaker[3][2])
# Creating function to convert input (sting) into integer
def parse_input(user_input):
return int(user_input)
def check_if_legal(move_from, move_into):
# Figure out how many units are free in the beaker to move into
count_empty_units = beaker[move_into].count(0)
# Figure out how many units are occupied in the beaker to move from
count_full_units = beaker[move_from].count(1) + beaker[move_from].count(2)
#Figure out if colors (numbers) match of both beakers by getting index of item on top
if beaker[move_into][2-count_empty_units] == beaker[move_from][count_full_units-1]:
# Check if there index next to it is the same value (move two units together)
if beaker[move_from][count_full_units-1] == beaker[move_from][count_full_units-2]:
# Check if there is more space than has to be moved
if count_empty_units >= 2:
return True
# Check if there index next to it is the same value (move two three together)
elif beaker[move_from][count_full_units-1] == beaker[move_from][count_full_units-2] == beaker[move_from][count_full_units-3]:
# Check if there is more space than has to be moved
if count_empty_units >= 3:
return True
# Move one unit
else:
# Check if there is more space than has to be moved
if count_empty_units >= 1:
return True
def play_game(beaker):
create_beaker(beaker)
#user_input_1 = input("\nEnter the beaker to move liquid from: ")
user_input_1 = "3"
#user_input_2 = input("Enter the beaker to move liquid into: ")
user_input_2 = "1"
# Calling function for input (sting) into int
move_from = parse_input(user_input_1)
move_into = parse_input(user_input_2)
if check_if_legal(move_from, move_into) = False:
print("\nAi, ai... Unfortunately, that move is not possible! :( \n")
else:
print("Hooray, legal move!")
#indexing first spot that is not 0 in move_from:
print(first_non_zero(move_from))
#print(next(i for i, x in enumerate(move_from) if x!= 0)) # x!= 0 for strict match
# Beaker starting state, nested list------------------------
beaker = [[], [1, 0, 0], [2, 1, 2], [2, 1, 0]]
# --------------- Welcome and initialize game ---------------
print("\n\nWelcome to the Watersort game!\n")
play_game(beaker)
|
[
"h.bieri@mbpr-hbieri.local"
] |
h.bieri@mbpr-hbieri.local
|
992fe6ae1eb2ae5f729204e5b8780b9ca27d14f6
|
17e9a8f87c83e5856f8c83882a1d19bc6c072ede
|
/display/data.py
|
ccd478fdf321a5fc57f0dbfc0cd0f69b7578a644
|
[] |
no_license
|
tobygaskell/hello_world
|
a778598ae0fd34b344daa3e5ce08d711f9912c8d
|
ea285118c749c3a013733bd93bc0fdabfa3f605a
|
refs/heads/master
| 2022-12-16T10:15:51.071795
| 2020-09-09T15:42:21
| 2020-09-09T15:42:21
| 292,222,939
| 0
| 0
| null | 2020-09-02T08:42:05
| 2020-09-02T08:22:03
| null |
UTF-8
|
Python
| false
| false
| 77
|
py
|
import json
def display_data(dict):
print(json.dumps(dict, indent = 1))
|
[
"toby96@sky.com"
] |
toby96@sky.com
|
65093a6df3302f004eabd7153db265171358e010
|
91ae95209bad9fbdd9d0efe4c3dee2d1aa902068
|
/Pipe.py
|
95eada45ca54ed4175e7d4c1e1fd582f2ae14b39
|
[] |
no_license
|
carminechoi/Blappy-Fird
|
6f4ae45b53ed63723b6452e52c81705963cd5bc7
|
d7c5e99252902c487cbd7fbe700d02adec7b6004
|
refs/heads/master
| 2020-06-20T15:10:21.602873
| 2019-09-09T20:51:48
| 2019-09-09T20:51:48
| 197,160,369
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,871
|
py
|
# PIPE Class
import pygame
import random
import Constants
class Pipe:
def __init__(self, win, lower_pipe, upper_pipe):
self.lower_pipe = lower_pipe
self.upper_pipe = upper_pipe
self.pos_X = 350
self.gap_height = Constants.PIPE_GAP
self.pos_upper_Y = -200
self.pos_lower_Y = self.pos_upper_Y + self.gap_height
self.win = win
self.pipe_timer = 0
self.pipes = list()
self.create_pipe()
# check for collision using rectangles
def is_collision(self, bird):
lower_rect = pygame.Rect(self.pos_X, self. pos_lower_Y, 52, 320)
upper_rect = pygame.Rect(self.pos_X, self. pos_upper_Y, 52, 320)
# return true if is collision
if lower_rect.colliderect(bird.flap_rect):
return True
elif upper_rect.colliderect(bird.flap_rect):
return True
else:
return False
# return a random number for pipe positions
@staticmethod
def get_random_number():
number = random.randint(1, 11)
return number
def calculate_position(self, random_number):
self.pos_upper_Y = random_number * 20 - 300
self.pos_lower_Y = self.pos_upper_Y + self.gap_height
def create_pipe(self):
random_number = self.get_random_number()
self.calculate_position(random_number)
# move pipe to the left based on the SPEED
def update_pipe(self):
self.pos_X -= Constants.SPEED
def draw_pipe(self):
# for testing collision
# pygame.draw.rect(self.win, (0, 0, 255), (self.pos_X, self.pos_lower_Y, 52, 320))
# pygame.draw.rect(self.win, (0, 0, 255), (self.pos_X, self.pos_upper_Y, 52, 320))
self.win.blit(self.upper_pipe, (self.pos_X, self.pos_upper_Y))
self.win.blit(self.lower_pipe, (self.pos_X, self.pos_lower_Y))
|
[
"carminec@uci.edu"
] |
carminec@uci.edu
|
e1228a7eeadd50e92196f70b679938555cff3a44
|
98f05737c7bbf70351ef7977ae74696d980deac9
|
/PythonGame/classfile.py
|
1dfce75aad18532a4ab625476dcd7dc4326eb0cf
|
[] |
no_license
|
steelbath/gameMitPlebs
|
3b16adf6b19fb2044697538f2ad4c543e6cbb7a8
|
925f9bfe20ff25c12bde180956e0258ae9c75bc3
|
refs/heads/master
| 2020-12-29T09:25:18.450045
| 2020-02-21T17:26:47
| 2020-02-21T17:26:47
| 238,554,137
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,004
|
py
|
import pygame as pg
import GameFunctions as gf
import numpy as np
from GameFunctions import cmap
class GameSettings():
def __init__(self):
#screen
self.screen_width = 1024
self.screen_height = 768
self.bg_color = (50,50,50)
class Creature():
def __init__(self, screen, image, startpos:list=[0,0],speed:list=[0.0]):
self.screen = screen
self.image = image
self.rect = self.image.get_rect()
#position and movement
self.pos=list(startpos)
self.speed = list(speed)
self.maxspeed = 2
self.rect.centerx = startpos[0]
self.rect.centery = startpos[1]
def update(self):
if self.speed != [0,0]:
self.pos[0]+=self.speed[0]
self.pos[1]+=self.speed[1]
self.rect.centerx = int(self.pos[0])
self.rect.centery = int(self.pos[1])
if self.rect.centerx > 1024:
self.rect.centerx = 0
self.pos[0] = 0
elif self.rect.centerx <0:
self.rect.centerx = 1024
self.pos[0] = 1024
elif self.rect.centery > 768:
self.rect.centery = 0
self.pos[1] = 0
elif self.rect.centery < 0:
self.rect.centery = 768
self.pos[1] = 768
def blitme(self):
self.screen.blit(self.image, self.rect)
class testmob(Creature):
def __init__(self,arrayindex, *args):
# self.pos=[40,150]
# self.speed =[2,0]
self.ind=arrayindex+1
super().__init__(*args)
# self.rect=self.image.get_rect()
def update(self):
if self.speed != [0,0]:
for i in range(self.rect.y,self.rect.y+self.rect.h):
for j in range(self.rect.x,self.rect.x+self.rect.w):
cmap[i,j,0]=0
cmap[i,j,1]=0
self.rect.centerx = int(self.pos[0])
self.rect.centery = int(self.pos[1])
if self.rect.right+self.speed[0] >= 1023 or self.rect.left+self.speed[0] <0:
self.speed[0]*=-1
elif self.rect.bottom+self.speed[1] >= 767 or self.rect.top+self.speed[1] <= 0:
self.speed[1]*=-1
self.pos[0]+=self.speed[0]
self.pos[1]+=self.speed[1]
for i in range(self.rect.y,self.rect.y+self.rect.h):
for j in range(self.rect.x,self.rect.x+self.rect.w):
if cmap[i,j,0]==0:
cmap[i,j,0]=1
cmap[i,j,1]=self.ind
#else:print('walk into bullet')
class Player(Creature):
#MaxWalkSpeed
MWS=3.5
ACCEL=0.15
def __init__(self, *args):
#up left down right
self.direction = [0, 0]
self.facing = [0,-1]
super().__init__(*args)
#projectile array and last bullet var
self.projectiles = np.zeros((100,4),dtype=int)
self.lb = 0
self.shooting=0
self.shoottickcount=0
#tweakables
self.shootspeed=3
self.bulletspeed = 6
def checkKeys(self):
# Check movement input
# Apply drag and direction
for i in [0,1]:
if not self.direction[i] and self.speed[i]:
# direction is zero but we still have speed
if self.speed[i] < 0:
self.speed[i] += self.ACCEL
if self.speed[i] > 0:
self.speed[i] = 0
else:
self.speed[i] -= self.ACCEL
if self.speed[i] < 0:
self.speed[i] = 0
elif self.direction[i] and (abs(self.speed[i]) < self.MWS or not gf.csign(self.direction[i],self.speed[i])):
#we have input and not max walk speed reached
if gf.csign(self.direction[i],self.speed[i]):
self.speed[i] += self.ACCEL*self.direction[i]
if abs(self.speed[i])>self.MWS:self.speed[i]=self.MWS*self.direction[i]
else:
self.speed[i] += 2*self.ACCEL*self.direction[i]
#shoot if space pressed
if self.shooting:
if self.shoottickcount==0:
self.shoot(self.facing,self.pos)
self.shoottickcount=self.shootspeed
else: self.shoottickcount-=1
if self.direction[0]:self.facing[0]= self.direction[0]
elif self.direction[1]:self.facing[0]=0
if self.direction[1]:self.facing[1]= self.direction[1]
elif self.direction[0]:self.facing[1]=0
def shoot(self, sdirection, spos):
i=self.lb
self.projectiles[i,0] = int (spos[0])
self.projectiles[i,1] = int(spos[1])
self.projectiles[i,2] = sdirection[0]*self.bulletspeed
self.projectiles[i,3] = sdirection[1]*self.bulletspeed
if not self.lb<99:self.lb=0
else: self.lb+=1
|
[
"horizonation@gmx.de"
] |
horizonation@gmx.de
|
25e845e67303200ddfb11f671a2bfbd40396ac48
|
21b79105e8d6f6370cbbd80c75cb2bcdae6e7278
|
/migrations/versions/b4752d1e2173_.py
|
835db85b8f26cc054b16af31853ad6656406108f
|
[] |
no_license
|
rbarket/whereismyshelter
|
d6a4571ff48ccb71d3dff04a2387ea6b27a0340d
|
d8c609c1e5b6a751ceee33e1afb957e6bb1bcd48
|
refs/heads/master
| 2020-04-18T23:51:54.063446
| 2019-01-27T10:41:52
| 2019-01-27T10:41:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,162
|
py
|
"""empty message
Revision ID: b4752d1e2173
Revises:
Create Date: 2019-01-27 02:36:09.711382
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b4752d1e2173'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('ages',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('type', sa.String(length=128), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('genders',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('type', sa.String(length=128), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('shelters',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=128), nullable=False),
sa.Column('lat', sa.Float(), nullable=False),
sa.Column('lon', sa.Float(), nullable=False),
sa.Column('max_capacity', sa.Integer(), nullable=True),
sa.Column('address', sa.String(length=128), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('modified_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('accounts',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('first_name', sa.String(length=128), nullable=False),
sa.Column('last_name', sa.String(length=128), nullable=False),
sa.Column('email', sa.String(length=128), nullable=False),
sa.Column('password', sa.String(length=128), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('modified_at', sa.DateTime(), nullable=True),
sa.Column('shelter_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['shelter_id'], ['shelters.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email')
)
op.create_table('inHeadCounts',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('shelter_id', sa.Integer(), nullable=False),
sa.Column('age_id', sa.Integer(), nullable=False),
sa.Column('gender_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['age_id'], ['ages.id'], ),
sa.ForeignKeyConstraint(['gender_id'], ['genders.id'], ),
sa.ForeignKeyConstraint(['shelter_id'], ['shelters.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('outHeadCounts',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('modified_at', sa.DateTime(), nullable=True),
sa.Column('shelter_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['shelter_id'], ['shelters.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('outHeadCounts')
op.drop_table('inHeadCounts')
op.drop_table('accounts')
op.drop_table('shelters')
op.drop_table('genders')
op.drop_table('ages')
# ### end Alembic commands ###
|
[
"hurjun1995@gmail.com"
] |
hurjun1995@gmail.com
|
bd08997a4a71ca78417b912f995788e96df2fc64
|
ccc92254d0aa153bee94b6f4de9e12f368c5b342
|
/mgrbic_HW2_3.34.py.py
|
367baec3207fc43834465e0dff1ffd3af05ea4e3
|
[] |
no_license
|
mgrbic12/i210
|
dc03147b3385a3861336da0b603a31e4d4490f33
|
1cf1abc9fe4f10325670488b7f8e6892633b37dc
|
refs/heads/master
| 2020-04-13T00:46:28.498585
| 2018-12-23T01:46:13
| 2018-12-23T01:46:13
| 162,854,237
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 456
|
py
|
def pay(wage, hrs_worked):
if hrs_worked > 40:
OT_pay = ((wage * 1.5) * (hrs_worked - 40)) + ( wage * 40 )
return(OT_pay)
else:
weeks_pay = wage * hrs_worked
return(weeks_pay)
wage = eval(input("Please enter your hourly wage: "))
hrs_worked = eval(input("Please enter how many hours were worked in a week: "))
total_pay = pay(wage, hrs_worked)
print("Your total pay for this week is!: ", total_pay)
|
[
"noreply@github.com"
] |
mgrbic12.noreply@github.com
|
21d4123a395d8ecbfbf790f39be30e7bdab5ed45
|
36afe806f4dd97b1673fe915436116df578c8049
|
/bin/easy_install-2.7
|
b0e5056cb67443b57d4b941bebc4de9037867bd3
|
[] |
no_license
|
cpbnicholson/scrapper
|
7511b987264626442a39a0323a7215f7b0d38633
|
4ffefe2130704b8b8c105f302cf619418e0552bc
|
refs/heads/master
| 2021-01-10T13:12:40.880394
| 2016-01-30T09:09:14
| 2016-01-30T09:09:14
| 47,851,455
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 375
|
7
|
#!/Users/cnicholson/PycharmProjects/Scrapper/scrapper/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==0.9.8','console_scripts','easy_install-2.7'
__requires__ = 'setuptools==0.9.8'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('setuptools==0.9.8', 'console_scripts', 'easy_install-2.7')()
)
|
[
"cpbnicholson@gmail.com"
] |
cpbnicholson@gmail.com
|
cf4e6af66b7f89f75baf7df1ed489f55253040f1
|
a75d9050638778ae72260cb9d88fc74ce2ba33a2
|
/XFouth course/7th semester/Information_Security/Lab5 - Digital signature/main.py
|
1d19e049187bb6479cbe62333d75594d90186f26
|
[
"MIT"
] |
permissive
|
gHuwk/University
|
aeacc242bd1d5fe588a84878bd8abd8eb349d615
|
9a0196a45c9cf33ac58018d636c3e4857eba0330
|
refs/heads/master
| 2023-06-11T02:24:24.459902
| 2021-07-03T12:56:03
| 2021-07-03T12:56:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,664
|
py
|
import rsa
import sys
import os
from Crypto.Hash import SHA256
def hash_file(message_file):
h = SHA256.new()
with open(message_file, "rb") as f:
while True:
buf = f.read(1024)
if len(buf) == 0:
break
h.update(buf)
return h.digest() # hash like array baytov
def make_signature(message_file, key):
h = hash_file(message_file)
# crypto hash with closed keys
signature = rsa.encrypt(h, key)
# digital signa in file
signature_file_name = input("signature filename:")
with open(signature_file_name, "wb") as f:
f.write(signature)
print("Signature saved in file '{0}'".format(signature_file_name))
return signature_file_name
# check
def check_signature(message_file, signature_file, key):
# read hash file
h1 = hash_file(message_file)
# encrypt
signature = None
with open(signature_file, "rb") as f:
signature = f.read()
try:
h2 = rsa.decrypt(signature, key)
except rsa.pkcs1.DecryptionError:
return False
return (h1 == h2)
def main():
try:
message_file = sys.argv[1]
except IndexError:
print("input error")
return
if not os.path.exists(message_file):
print("file doesn't exist")
return
(privkey, pubkey) = rsa.newkeys(2048)
signature_file = make_signature(message_file, privkey)
filename = input("file with signature:")
is_valid = check_signature(message_file, filename, pubkey)
if is_valid:
print("All it's okay.")
else:
print("Wrong signature.")
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
gHuwk.noreply@github.com
|
b1fd6a4b81ebcc306a54f38c27a4223b2c68fbd8
|
90814aea40ee3864c30a70bbbcab4790f91f8f70
|
/chap05/quick_sort.py
|
ae27e80b010fee6c8745a0f3e8b691f8bf44220b
|
[] |
no_license
|
chc1129/python_algorithm
|
408fcd33bc11d8cb4f31b96ab9353833379b0f23
|
b097fbc1e15379a05590b83db4307f15c1c475ff
|
refs/heads/master
| 2022-04-05T15:12:05.471796
| 2020-02-22T10:01:57
| 2020-02-22T10:01:57
| 240,675,290
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 589
|
py
|
data = [6, 15, 4, 2, 8, 5, 11, 9, 7, 13]
def quick_sort(data):
if len(data) <= 1:
return data
pivot = data[0] # ピポットとしてリストの先頭を使用
left, right, same = [], [], 0
for i in data:
if i < pivot:
# ピポットより小さい場合は左に
left.append(i)
elif i > pivot:
# ピポットより大きい場合は右に
right.append(i)
else:
same += 1
left = quick_sort(left)
right = quick_sort(right)
# ソートされたものとピポットの値を合わせて返す
return left + [pivot] * same + right
print(quick_sort(data))
|
[
"chc1129@gmail.com"
] |
chc1129@gmail.com
|
b3b37db72e75824ce9edb42e2a654fc899b43922
|
3e321a7c381bf77d7563869b494a6d3a84816e75
|
/Back-end/Abogados/detalleusuario/migrations/0001_initial.py
|
49055f1b4b10174211f015336741a1612507e1eb
|
[] |
no_license
|
MrDavidAlv/Abogados
|
865541efe223582f1eff42ae65d7295a5cc3407f
|
e1f2c4d7957e5686a6035f468f72d1b3fe6d2182
|
refs/heads/master
| 2023-03-18T22:14:07.010015
| 2021-03-08T15:06:38
| 2021-03-08T15:06:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 761
|
py
|
# Generated by Django 3.0.4 on 2020-05-25 03:12
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('usuario', '0001_initial'),
('rol', '0002_rol_nombrerol'),
]
operations = [
migrations.CreateModel(
name='Detalleusuario',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Rol', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='rol.Rol')),
('Usuario', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='usuario.Usuario')),
],
),
]
|
[
"efnaranjo6@misena.edu.co"
] |
efnaranjo6@misena.edu.co
|
f0e7ce7611a4fcc3f21a0921054fa88f1d015553
|
3b227fb1b724314d069cd7eb8197ed33ad9282e3
|
/grocery_list/apps.py
|
0af6fbb49f0c4e2ce113b4fe16518f34b405a815
|
[] |
no_license
|
mansiverma28/Grocery
|
5494d41542d6cffe441916025653df723e519003
|
9c76a1bb14770354662a761762c3b11a33e326ad
|
refs/heads/master
| 2023-07-12T01:29:04.635376
| 2021-08-14T13:00:38
| 2021-08-14T13:00:38
| 396,008,293
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 155
|
py
|
from django.apps import AppConfig
class GroceryListConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'grocery_list'
|
[
"mansiverma805@gmail.com"
] |
mansiverma805@gmail.com
|
d3111715d70c7120db24a4557cf52fadd9bca899
|
895f5d2ad563f1e8ecad1c6c308e2addf1d9a70f
|
/Algorithms/Birthday_Cake_Candles.py
|
32f81178bf425e52de81e959c85d9ad60de69a21
|
[
"MIT"
] |
permissive
|
JavaGarcia/HackerRank
|
6b056e724a33d3178061564d87617215a6c92b1a
|
a5797627181ee2d970f45e91639a88ddd47c3a54
|
refs/heads/master
| 2022-11-22T17:51:14.110116
| 2020-07-27T01:52:48
| 2020-07-27T01:52:48
| 282,374,409
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 446
|
py
|
#!/bin/python
#Javier Garcia
import math
import os
import random
import re
import sys
# Complete the birthdayCakeCandles function below.
def birthdayCakeCandles(ar):
return ar.count(max(ar))
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
ar_count = int(raw_input())
ar = map(int, raw_input().rstrip().split())
result = birthdayCakeCandles(ar)
fptr.write(str(result) + '\n')
fptr.close()
|
[
"javier.garcia454@gmail.com"
] |
javier.garcia454@gmail.com
|
6cffb35c85d7adc498df112da42d1e4f44c2bf56
|
3ae3dc8fd39a98e14ae68853b801d57077ba406c
|
/intermediate/beyond-basics/1-organizing_programs/executable_directories/reader/reader/reader.py
|
a1bd61abf3b820c459fe2951eac213a4ae0b9f7d
|
[] |
no_license
|
diegogcc/py-pluralsight
|
e2047621af2f56ef145dd06af246da5bbd23db00
|
128568228d03f0cba12bd409a495e2e66e2e2505
|
refs/heads/master
| 2020-09-26T06:51:44.553133
| 2020-05-27T03:22:28
| 2020-05-27T03:22:28
| 226,195,125
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 430
|
py
|
import os
from reader.compressed import bzipped, gzipped
extension_map = {
'.bz2' : bzipped.opener,
'.gz' : gzipped.opener,
}
class Reader:
def __init__(self, filename):
extension = os.path.splitext(filename)[1]
opener = extension_map.get(extension, open)
self.f = opener(filename, 'rt')
def close(self):
self.f.close()
def read(self):
return self.f.read()
|
[
"diegoc906@gmail.com"
] |
diegoc906@gmail.com
|
9d2b8983f65c85b28c4249d3c748c5c51ba90fb3
|
259c57ac5ec9d5b58d3a5bfaff184a167801aeec
|
/do_eval.py
|
52d41e1193fac16e155de67aabe69e3d52629547
|
[] |
no_license
|
dongzhuoyao/py_img_seg_eval
|
d654ddcfce9330d9076bfe02d91de2913b386200
|
5b321b87c4f9952f34c323419b875307c9edb04b
|
refs/heads/master
| 2021-01-13T03:27:39.053081
| 2016-12-29T06:38:06
| 2016-12-29T06:38:06
| 77,545,314
| 0
| 0
| null | 2016-12-28T16:13:30
| 2016-12-28T16:13:29
| null |
UTF-8
|
Python
| false
| false
| 1,123
|
py
|
import argparse
import os,glob
import cv2
import eval_segm as es
# Import arguments
parser = argparse.ArgumentParser()
parser.add_argument('--gt_dir', type=str, required=True)
parser.add_argument('--result_dir', type=str, required=True)
parser.add_argument('--list_path', type=str, required=True)
args = parser.parse_args()
gt_dir = args.gt_dir
result_dir = args.result_dir
list_path = args.list_path
f_list = open(list_path,"r")
mIoU = 0
pixel_acc = 0
mean_acc = 0
lines = f_list.readlines()
for line in lines:
file_name = line.rsplit("/",1)[1]
file_name = file_name.strip("\n")
gt_img_path = os.path.join(gt_dir,file_name)
gt_img = cv2.imread(gt_img_path,0)
result_img_path = os.path.join(result_dir,file_name)
result_img = cv2.imread(result_img_path,0)
mIoU += es.mean_IU(result_img,gt_img)
pixel_acc += es.pixel_accuracy(result_img,gt_img)
mean_acc += es.mean_accuracy(result_img,gt_img)
length = len(lines)
mIoU = mIoU/length
pixel_acc = pixel_acc/length
mean_acc = mean_acc/length
print("mIoU: %f"%(mIoU))
print("pixel_acc: %f"%(pixel_acc))
print("mean_acc: %f"%(mean_acc))
|
[
"929109550@qq.com"
] |
929109550@qq.com
|
f3837420701692e028a0ce0ea143fda81311e84b
|
f8d3f814067415485bb439d7fe92dc2bbe22a048
|
/cv2samples/python/plane_tracker.py
|
b092d48dc8e8b80ac8191a1e8088ec0adbfb681e
|
[] |
no_license
|
gmonkman/python
|
2f9ab8f159c01f6235c86cb0cd52062cd3fdedd3
|
9123aa6baf538b662143b9098d963d55165e8409
|
refs/heads/master
| 2023-04-09T15:53:29.746676
| 2022-11-26T20:35:21
| 2022-11-26T20:35:21
| 60,254,898
| 0
| 2
| null | 2023-03-24T22:58:39
| 2016-06-02T10:25:27
|
Python
|
UTF-8
|
Python
| false
| false
| 6,110
|
py
|
#!/usr/bin/env python
'''
Multitarget planar tracking
==================
Example of using features2d framework for interactive video homography matching.
ORB features and FLANN matcher are used. This sample provides PlaneTracker class
and an example of its usage.
video: http://www.youtube.com/watch?v=pzVbhxx6aog
Usage
-----
plane_tracker.py [<video source>]
Keys:
SPACE - pause video
c - clear targets
Select a textured planar object to track by drawing a box with a mouse.
'''
# Python 2/3 compatibility
from __future__ import print_function
import sys
PY3 = sys.version_info[0] == 3
if PY3:
xrange = range
import numpy as np
import cv2
# built-in modules
from collections import namedtuple
# local modules
import video
import common
from video import presets
FLANN_INDEX_KDTREE = 1
FLANN_INDEX_LSH = 6
flann_params = dict(algorithm=FLANN_INDEX_LSH,
table_number=6, # 12
key_size=12, # 20
multi_probe_level=1) # 2
MIN_MATCH_COUNT = 10
'''
image - image to track
rect - tracked rectangle (x1, y1, x2, y2)
keypoints - keypoints detected inside rect
descrs - their descriptors
data - some user-provided data
'''
PlanarTarget = namedtuple(
'PlaneTarget', 'image, rect, keypoints, descrs, data')
'''
target - reference to PlanarTarget
p0 - matched points coords in target image
p1 - matched points coords in input frame
H - homography matrix from p0 to p1
quad - target bounary quad in input frame
'''
TrackedTarget = namedtuple('TrackedTarget', 'target, p0, p1, H, quad')
class PlaneTracker:
def __init__(self):
self.detector = cv2.ORB_create(nfeatures=1000)
# bug : need to pass empty dict (#1329)
self.matcher = cv2.FlannBasedMatcher(flann_params, {})
self.targets = []
self.frame_points = []
def add_target(self, image, rect, data=None):
'''Add a new tracking target.'''
x0, y0, x1, y1 = rect
raw_points, raw_descrs = self.detect_features(image)
points, descs = [], []
for kp, desc in zip(raw_points, raw_descrs):
x, y = kp.pt
if x0 <= x <= x1 and y0 <= y <= y1:
points.append(kp)
descs.append(desc)
descs = np.uint8(descs)
self.matcher.add([descs])
target = PlanarTarget(image=image, rect=rect,
keypoints=points, descrs=descs, data=data)
self.targets.append(target)
def clear(self):
'''Remove all targets'''
self.targets = []
self.matcher.clear()
def track(self, frame):
'''Returns a list of detected TrackedTarget objects'''
self.frame_points, frame_descrs = self.detect_features(frame)
if len(self.frame_points) < MIN_MATCH_COUNT:
return []
matches = self.matcher.knnMatch(frame_descrs, k=2)
matches = [m[0] for m in matches if len(
m) == 2 and m[0].distance < m[1].distance * 0.75]
if len(matches) < MIN_MATCH_COUNT:
return []
matches_by_id = [[] for _ in xrange(len(self.targets))]
for m in matches:
matches_by_id[m.imgIdx].append(m)
tracked = []
for imgIdx, matches in enumerate(matches_by_id):
if len(matches) < MIN_MATCH_COUNT:
continue
target = self.targets[imgIdx]
p0 = [target.keypoints[m.trainIdx].pt for m in matches]
p1 = [self.frame_points[m.queryIdx].pt for m in matches]
p0, p1 = np.float32((p0, p1))
H, status = cv2.findHomography(p0, p1, cv2.RANSAC, 3.0)
status = status.ravel() != 0
if status.sum() < MIN_MATCH_COUNT:
continue
p0, p1 = p0[status], p1[status]
x0, y0, x1, y1 = target.rect
quad = np.float32([[x0, y0], [x1, y0], [x1, y1], [x0, y1]])
quad = cv2.perspectiveTransform(
quad.reshape(1, -1, 2), H).reshape(-1, 2)
track = TrackedTarget(target=target, p0=p0, p1=p1, H=H, quad=quad)
tracked.append(track)
tracked.sort(key=lambda t: len(t.p0), reverse=True)
return tracked
def detect_features(self, frame):
'''detect_features(self, frame) -> keypoints, descrs'''
keypoints, descrs = self.detector.detectAndCompute(frame, None)
if descrs is None: # detectAndCompute returns descs=None if not keypoints found
descrs = []
return keypoints, descrs
class App:
def __init__(self, src):
self.cap = video.create_capture(src, presets['book'])
self.frame = None
self.paused = False
self.tracker = PlaneTracker()
cv2.namedWindow('plane')
self.rect_sel = common.RectSelector('plane', self.on_rect)
def on_rect(self, rect):
self.tracker.add_target(self.frame, rect)
def run(self):
while True:
playing = not self.paused and not self.rect_sel.dragging
if playing or self.frame is None:
ret, frame = self.cap.read()
if not ret:
break
self.frame = frame.copy()
vis = self.frame.copy()
if playing:
tracked = self.tracker.track(self.frame)
for tr in tracked:
cv2.polylines(vis, [np.int32(tr.quad)],
True, (255, 255, 255), 2)
for (x, y) in np.int32(tr.p1):
cv2.circle(vis, (x, y), 2, (255, 255, 255))
self.rect_sel.draw(vis)
cv2.imshow('plane', vis)
ch = cv2.waitKey(1)
if ch == ord(' '):
self.paused = not self.paused
if ch == ord('c'):
self.tracker.clear()
if ch == 27:
break
if __name__ == '__main__':
print(__doc__)
import sys
try:
video_src = sys.argv[1]
except:
video_src = 0
App(video_src).run()
|
[
"gmonkman@mistymountains.biz"
] |
gmonkman@mistymountains.biz
|
2feeb385b52e6c3b40f419f07977b3255425a8c6
|
8d18b10db6f001856f6aa6cc9b4367318d933db2
|
/functional/hello.py
|
724b2ae6fd004347224e24ad9053aa0fdd16284e
|
[] |
no_license
|
Aanjansai/Python-Programs
|
39463f47c6fbff3eaeb993cf175529eac8a33ad1
|
d7afe955c1530fe7e18e6f3e0591d0b794626fa4
|
refs/heads/master
| 2020-04-17T10:56:31.542628
| 2019-03-18T07:06:19
| 2019-03-18T07:06:19
| 166,519,709
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22
|
py
|
print("hello!!! sai")
|
[
"aanjan567@gmail.com"
] |
aanjan567@gmail.com
|
8782ef8c6b5cc795c1320286692f127ef513d023
|
404ffbd9f7cddf4c81f95b6e1c74009ee4ec7d39
|
/auction/urls.py
|
36cb9df7fb84f1f31e5bb58d70d669a3fa4f7d95
|
[] |
no_license
|
anindyamanna/enguru-auction
|
0274f3acb2a0e7f8acf56d46b04f252158b6b0f7
|
e92ca9328bfff1d0c514403b697f98bb5a5cabf6
|
refs/heads/master
| 2022-01-12T10:29:18.552035
| 2018-10-08T05:52:46
| 2018-10-08T05:52:46
| 151,985,270
| 0
| 0
| null | 2022-01-06T22:27:46
| 2018-10-07T21:04:41
|
Python
|
UTF-8
|
Python
| false
| false
| 747
|
py
|
from django.contrib.auth.decorators import login_required
from django.urls import path
from rest_framework import routers
from rest_framework.authtoken import views
from auction.views import ListAuctionItems, AuctionItemDetails, ListBids, SubmitBid
router = routers.DefaultRouter()
app_name = 'auction'
urlpatterns = [
path('api-token-auth/', views.obtain_auth_token),
path('auction-items/<int:item_id>', AuctionItemDetails.as_view(), name='auction-item-details'),
path('auction-items/', ListAuctionItems.as_view(), name='list-auction-items'),
path('list-bids/', login_required(ListBids.as_view()), name='list-bids'),
path('submit-bid/', login_required(SubmitBid.as_view()), name='submit-bid'),
]
urlpatterns += router.urls
|
[
"anindyamanna92@gmail.com"
] |
anindyamanna92@gmail.com
|
40f2c10bf97d4438619050d8e63c3e51319d17b5
|
2e1935b4eda33f86f20487987fa2e1f5d7a61473
|
/BE_Project-2015-08-15/BE Project/Sentiment Analysis using Machine Learning/html_helper.py
|
8978053ed4263edc696c1d3c002be5fa11665eff
|
[] |
no_license
|
SumitAnglekar/SentimentalAnalysis
|
246cb7675cdd7d07247598fa5b9e1ae59cdd63f9
|
23656faa8546f365878f18aae9dbdac52388d307
|
refs/heads/master
| 2022-01-05T08:22:04.968562
| 2019-05-11T05:22:38
| 2019-05-11T05:22:38
| 186,091,206
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,984
|
py
|
import urllib
import datetime
from datetime import timedelta
class HTMLHelper:
#start __init__
def __init__(self):
self.html = ''
self.currDate = datetime.datetime.now()
self.weekDates = []
#self.weekDates.append(self.currDate.strftime("%Y-%m-%d"))
self.weekDates.append(self.currDate.strftime("%b %d"))
for i in range(1,7):
dateDiff = timedelta(days=-i)
newDate = self.currDate + dateDiff
#self.weekDates.append(newDate.strftime("%Y-%m-%d"))
self.weekDates.append(newDate.strftime("%b %d"))
#end
#start getDefaultHTML
def getDefaultHTML(self, error = 0):
html = '''
<html>
<head><title>Sentiment Analysis</title>
<!--link rel="stylesheet" type="text/css" href="http://yui.yahooapis.com/3.4.1/build/cssgrids/grids-min.css" /-->
<link rel="stylesheet" type="text/css" href="static/styles.css" />
</head>
<body>
<div class="yui3-g" id="doc">
<div class="yui3-u" id="hd">
<h2>Sentiment Analysis</h2>
</div>
<div class="yui3-u" id="bd">
<form name="keyform" id="key-form" method="get" onSubmit="return checkEmpty(this);">
<p><input type="text" value="" name="keyword" id="keyword"/><input type="submit" value="Submit" id="sub"/></p>
<div id="timeframe">
<input type="radio" name="time" id="today" value="today" checked="true">Today</input>
<input type="radio" name="time" id="lastweek" value="lastweek">Last 7 days</input>
</div>
<div id="choice">
<input type="radio" name="method" id="baseline" value="baseline" checked="true">Baseline</input>
<input type="radio" name="method" id="naivebayes" value="naivebayes">Naive Bayes</input>
<input type="radio" name="method" id="maxentropy" value="maxentropy">Maximum Entropy</input>
<!--input type="radio" name="method" id="svm" value="svm">Support Vector Machine</input-->
</div>
</form>
'''
if(error == 1):
html += '<div id="error">Unable to fetch TWitter API data. Please try again later.</div>'
elif(error == 2):
html += '<div id="error">Unrecognized Method of Classfication, please choose one from above.</div>'
html += '''
</div>
<div id='ft'>© GPS</div>
<script type="text/javascript">
function checkEmpty(f) {
if (f.keyword.value === "") {
alert('Please enter a valid keyword');
return false;
}else{
f.submit();
return true;
}
}
var _gaq = _gaq || [];
_gaq.push(['_setAccount', 'UA-31119754-1']);
_gaq.push(['_trackPageview']);
window['ga-disable-UA-31119754-1'] = true;
(function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
})();
</script>
</body>
</html>
'''
return html
#end
#start getResultHTML
def getResultHTML(self, keyword, results, time, pos_count, neg_count, neut_count, checked):
keyword = urllib.unquote(keyword.replace("+", " "))
html = '''
<html>
<head><title>Sentiment Analysis</title>
<!--link rel="stylesheet" type="text/css" href="http://yui.yahooapis.com/3.4.1/build/cssgrids/grids-min.css" /-->
<link rel="stylesheet" type="text/css" href="static/styles.css" />
</head>
<body>
<div class="yui3-g" id="doc">
<div class="yui3-u" id="hd">
<h2>Sentiment Analyzer </h2>
</div>
<div class="yui3-u" id="bd">
<form name="keyform" id="key-form" method="get" onSubmit="return checkEmpty(this);">
<p><input type="text" value="" name="keyword" id="keyword"/><input type="submit" value="Search" id="sub"/></p>
<div id="timeframe">
<input type="radio" name="time" id="today" value="today" checked="true">Today</input>
<input type="radio" name="time" id="lastweek" value="lastweek">Last 7 days</input>
</div>
<div id="choice">
<input type="radio" name="method" id="baseline" value="baseline">Baseline</input>
<input type="radio" name="method" id ="naivebayes" value="naivebayes">Naive Bayes</input>
<input type="radio" name="method" id="maxentropy" value="maxentropy">Maximum Entropy</input>
<!-- input type="radio" name="method" id="svm" value="svm">Support Vector Machine</input-->
</div>
</form>
<div id="results">
'''
if(time == 'today'):
html += '<div id="result-chart"></div>'
elif(time == 'lastweek'):
html += '<div id="result-big-chart"></div>'
html += '<div id="content">'
left = '<div id="left"><h3>Positive</h3><ul>'
right = '<div id="right"><h3>Negative</h3><ul>'
middle = '<div id="middle"><h3>Neutral</h3><ul>'
for i in results:
res= results[i]
for j in res:
item = res[j]
if(item['label'] == 'positive'):
left += '<li title="'+self.weekDates[i]+'">' + item['tweet'] + '</li>'
elif(item['label'] == 'neutral'):
middle+= '<li title="'+self.weekDates[i]+'">' + item['tweet'] + '</li>'
elif(item['label'] == 'negative'):
right += '<li title="'+self.weekDates[i]+'">' + item['tweet'] + '</li>'
#end innerloop
#end outerloop
left += '</ul></div>'
right += '</ul></div>'
middle += '</ul></div>'
html += left + middle + right + '</div>'
if(time == 'today'):
html += '''
</div>
</div>
<script type="text/javascript" src="https://www.google.com/jsapi"></script>
<script type="text/javascript">
google.load("visualization", "1", {packages:["corechart"]});
google.setOnLoadCallback(drawChart);
function drawChart() {
var data = google.visualization.arrayToDataTable([
'''
html += "['Sentiment', 'Count'],"
html += "['Positive', "+ str(pos_count[0]) + "],"
html += "['Neutral', "+ str(neut_count[0]) + "],"
html += "['Negative', "+ str(neg_count[0]) + "]"
html += '''
]);
var options = {
'title': 'Sentiment Classification',
'titleTextStyle': {'fontSize': 15},
'hAxis': {'textStyle': {'fontSize': 15}},
'vAxis': {'textStyle': {'fontSize': 15}},
'legend' : {'position' : 'none'}
};
var chart = new google.visualization.ColumnChart(document.getElementById('result-chart'));
chart.draw(data, options);
}
'''
elif(time == 'lastweek'):
html += '''
</div>
</div>
<script type="text/javascript" src="https://www.google.com/jsapi"></script>
<script type="text/javascript">
google.load("visualization", "1", {packages:["corechart"]});
google.setOnLoadCallback(drawChart);
function drawChart() {
var data = google.visualization.arrayToDataTable([
'''
html += "['Date', 'Positive', 'Neutral', 'Negative'],"
l = len(pos_count)
for i in range(l-1, 0, -1):
html += "['"+ self.weekDates[i] + "', "+ str(pos_count[i]) + "," \
+ str(neut_count[i]) + "," + str(neg_count[i]) + "],"
#last one
html += "['"+ self.weekDates[0] + "', "+ str(pos_count[0]) + "," \
+ str(neut_count[0]) + "," + str(neg_count[0]) + "]"
html += '''
]);
var options = {
'title': 'Sentiment Classification',
'colors': ['#04B404', '#6E6E6E', '#FF0000'],
'titleTextStyle': {'fontSize': 15},
'vAxis': {'textStyle': {'fontSize': 15}},
'hAxis': {'textStyle': {'fontSize': 15}, 'slantedText': true, 'slantedTextAngle': 30}
};
var chart = new google.visualization.LineChart(document.getElementById('result-big-chart'));
chart.draw(data, options);
}
'''
checked1 = 'document.getElementById("'+checked+'").checked=true;'
checked2 = 'document.getElementById("'+time+'").checked=true;'
textValue = 'document.getElementById("keyword").value="'+keyword+'";'
html += checked1 + checked2 + textValue
html += '''
function checkEmpty(f) {
if (f.keyword.value === "") {
alert('Please enter a valid keyword');
return false;
}else{
f.submit();
return true;
}
}
var _gaq = _gaq || [];
_gaq.push(['_setAccount', 'UA-31119754-1']);
_gaq.push(['_trackPageview']);
window['ga-disable-UA-31119754-1'] = true;
(function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
})();
</script>
</body>
</html>
'''
return html
#end
#end class
|
[
"anglekar.s@husky.neu.edu"
] |
anglekar.s@husky.neu.edu
|
839db5b423c7fa092c7942c19df2fd38d5f2f3a2
|
5a864dcc6a5875ed46b8dc5a502dde528aab0733
|
/Views/wechat_applet_UI/venv/Scripts/easy_install-script.py
|
36b435c0818a5d3ab7d4d50a89b965855ae9880f
|
[] |
no_license
|
ZhouRuiXiang/Flask_Note
|
bd4d345bbc57cce49506fa2d14d02550df54d07c
|
9b20c5e40a30f6ae6f0b9d06fcceb0749d3ea13a
|
refs/heads/master
| 2020-07-24T13:55:18.331731
| 2020-02-11T05:22:46
| 2020-02-11T05:22:46
| 202,512,840
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 431
|
py
|
#!F:\wechat_applet_UI\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
|
[
"1215686977@qq.com"
] |
1215686977@qq.com
|
2dff885dc58e3d09f9288861c11cedb3521ffaae
|
9c6f045d3c6fc767e05318e72bf9ed3af25452dd
|
/zvt/recorders/eastmoney/finance/base_china_stock_finance_recorder.py
|
682c681274c7ba44e193f304bd7daf9c459fec13
|
[
"MIT"
] |
permissive
|
wuhgogo/zvt
|
5f1230aebd8d87113453c375ba2251b4fb6f21b2
|
6ffd15e3030fafeeedd2afbbc17e89194c20714a
|
refs/heads/master
| 2023-06-25T21:51:01.702178
| 2020-10-10T04:18:04
| 2020-10-10T04:18:04
| 295,753,879
| 1
| 0
|
MIT
| 2020-09-15T14:27:50
| 2020-09-15T14:27:49
| null |
UTF-8
|
Python
| false
| false
| 8,417
|
py
|
# -*- coding: utf-8 -*-
import pandas as pd
from jqdatapy.api import get_fundamentals
from zvt.api.quote import to_jq_report_period
from zvt.contract.api import get_data
from zvt.domain import FinanceFactor
from zvt.recorders.eastmoney.common import company_type_flag, get_fc, EastmoneyTimestampsDataRecorder, \
call_eastmoney_api, get_from_path_fields
from zvt.recorders.joinquant.common import to_jq_entity_id
from zvt.utils.pd_utils import index_df
from zvt.utils.pd_utils import pd_is_not_null
from zvt.utils.time_utils import to_time_str, to_pd_timestamp
class BaseChinaStockFinanceRecorder(EastmoneyTimestampsDataRecorder):
finance_report_type = None
data_type = 1
timestamps_fetching_url = 'https://emh5.eastmoney.com/api/CaiWuFenXi/GetCompanyReportDateList'
timestamp_list_path_fields = ['CompanyReportDateList']
timestamp_path_fields = ['ReportDate']
def __init__(self, entity_type='stock', exchanges=['sh', 'sz'], entity_ids=None, codes=None, batch_size=10,
force_update=False, sleeping_time=5, default_size=2000, real_time=False,
fix_duplicate_way='add', start_timestamp=None, end_timestamp=None, close_hour=0,
close_minute=0) -> None:
super().__init__(entity_type, exchanges, entity_ids, codes, batch_size, force_update, sleeping_time,
default_size, real_time, fix_duplicate_way, start_timestamp, end_timestamp, close_hour,
close_minute)
try:
self.fetch_jq_timestamp = True
except Exception as e:
self.fetch_jq_timestamp = False
self.logger.warning(
f'joinquant account not ok,the timestamp(publish date) for finance would be not correct', e)
def init_timestamps(self, entity):
param = {
"color": "w",
"fc": get_fc(entity),
"DataType": self.data_type
}
if self.finance_report_type == 'LiRunBiaoList' or self.finance_report_type == 'XianJinLiuLiangBiaoList':
param['ReportType'] = 1
timestamp_json_list = call_eastmoney_api(url=self.timestamps_fetching_url,
path_fields=self.timestamp_list_path_fields,
param=param)
if self.timestamp_path_fields:
timestamps = [get_from_path_fields(data, self.timestamp_path_fields) for data in timestamp_json_list]
return [to_pd_timestamp(t) for t in timestamps]
def generate_request_param(self, security_item, start, end, size, timestamps):
if len(timestamps) <= 10:
param = {
"color": "w",
"fc": get_fc(security_item),
"corpType": company_type_flag(security_item),
# 0 means get all types
"reportDateType": 0,
"endDate": '',
"latestCount": size
}
else:
param = {
"color": "w",
"fc": get_fc(security_item),
"corpType": company_type_flag(security_item),
# 0 means get all types
"reportDateType": 0,
"endDate": to_time_str(timestamps[10]),
"latestCount": 10
}
if self.finance_report_type == 'LiRunBiaoList' or self.finance_report_type == 'XianJinLiuLiangBiaoList':
param['reportType'] = 1
return param
def generate_path_fields(self, security_item):
comp_type = company_type_flag(security_item)
if comp_type == "3":
return ['{}_YinHang'.format(self.finance_report_type)]
elif comp_type == "2":
return ['{}_BaoXian'.format(self.finance_report_type)]
elif comp_type == "1":
return ['{}_QuanShang'.format(self.finance_report_type)]
elif comp_type == "4":
return ['{}_QiYe'.format(self.finance_report_type)]
def record(self, entity, start, end, size, timestamps):
# different with the default timestamps handling
param = self.generate_request_param(entity, start, end, size, timestamps)
self.logger.info('request param:{}'.format(param))
return self.api_wrapper.request(url=self.url, param=param, method=self.request_method,
path_fields=self.generate_path_fields(entity))
def get_original_time_field(self):
return 'ReportDate'
def fill_timestamp_with_jq(self, security_item, the_data):
# get report published date from jq
try:
df = get_fundamentals(table='indicator', code=to_jq_entity_id(security_item), columns='pubDate',
date=to_jq_report_period(the_data.report_date), count=None, parse_dates=['pubDate'])
if pd_is_not_null(df):
the_data.timestamp = to_pd_timestamp(df['pubDate'][0])
self.logger.info(
'jq fill {} {} timestamp:{} for report_date:{}'.format(self.data_schema, security_item.id,
the_data.timestamp,
the_data.report_date))
self.session.commit()
except Exception as e:
self.logger.error(e)
def on_finish_entity(self, entity):
super().on_finish_entity(entity)
if not self.fetch_jq_timestamp:
return
# fill the timestamp for report published date
the_data_list = get_data(data_schema=self.data_schema,
provider=self.provider,
entity_id=entity.id,
order=self.data_schema.timestamp.asc(),
return_type='domain',
session=self.session,
filters=[self.data_schema.timestamp == self.data_schema.report_date,
self.data_schema.timestamp >= to_pd_timestamp('2005-01-01')])
if the_data_list:
if self.data_schema == FinanceFactor:
for the_data in the_data_list:
self.fill_timestamp_with_jq(entity, the_data)
else:
df = FinanceFactor.query_data(entity_id=entity.id,
columns=[FinanceFactor.timestamp, FinanceFactor.report_date,
FinanceFactor.id],
filters=[FinanceFactor.timestamp != FinanceFactor.report_date,
FinanceFactor.timestamp >= to_pd_timestamp('2005-01-01'),
FinanceFactor.report_date >= the_data_list[0].report_date,
FinanceFactor.report_date <= the_data_list[-1].report_date, ])
if pd_is_not_null(df):
index_df(df, index='report_date', time_field='report_date')
for the_data in the_data_list:
if (df is not None) and (not df.empty) and the_data.report_date in df.index:
the_data.timestamp = df.at[the_data.report_date, 'timestamp']
self.logger.info(
'db fill {} {} timestamp:{} for report_date:{}'.format(self.data_schema, entity.id,
the_data.timestamp,
the_data.report_date))
self.session.commit()
else:
# self.logger.info(
# 'waiting jq fill {} {} timestamp:{} for report_date:{}'.format(self.data_schema,
# security_item.id,
# the_data.timestamp,
# the_data.report_date))
self.fill_timestamp_with_jq(entity, the_data)
|
[
"5533061@qq.com"
] |
5533061@qq.com
|
9e9d7761fbb90394ad7d3014e79b73f6fa262659
|
916eb769fae98b941602dcad774af7267b4f3025
|
/lg/globals.py
|
f653316b0c9c799a2ef302d928936fe7986502c0
|
[] |
no_license
|
joeedh/implicit-surface-nodes
|
1630dfe9fdbb4b99489b450aa35cb37b36183c0a
|
60ea0617848fb661b33b6a3d0dafbdb486ca39bf
|
refs/heads/master
| 2021-07-14T05:51:17.897665
| 2021-03-06T00:48:17
| 2021-03-06T00:48:17
| 40,988,561
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22
|
py
|
timers = []
jobs = []
|
[
"joeedh@gmail.com"
] |
joeedh@gmail.com
|
03397b0fc672d4553685521d098acb895c9a0ad8
|
b7c93b3b012fd3a8e1eed5c3454088c941315383
|
/src/hybrid_circuits.py
|
5def7b58eb69a2d6a793bb75cc2338822663fa95
|
[
"MIT"
] |
permissive
|
00mjk/FT_qRAM_Circuits
|
49e44eeabe361f8d35bdb87c7befb71f015d7eda
|
c3bba4f19e5cd6ad1adf1a6055282b02a97f8962
|
refs/heads/master
| 2023-03-19T05:59:32.861876
| 2019-12-16T21:35:55
| 2019-12-16T21:35:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,730
|
py
|
# -*- coding: utf-8 -*-
#
# hybrid_circuits.py: Hybrid qRAM circuits and parallelized versions.
#
# © 2018 Olivia Di Matteo (odimatte@uwaterloo.ca)
#
# This file is part of the project FT_qRAM_Circuits.
# Licensed under MIT License.
from circuit import *
class Hybrid(qRAMCircuit):
""" Hybrid circuits: control first on the output of first k bits, then use
outputs to control on valid address on the last n - k bits. Perform both
"tiers" of the circuit in series with no parallelization. """
def __init__(self, n, q, k):
super().__init__(n, q, k)
self.params["name"] = "Hybrid"
if k < q: # Worst case: 2^k k-controlled, 2^q (n-k+1)-controlled
self.params["n_qubits"] = n + pow(2,k) + 1 + max(k-1, n-k)
self.params["depth"] = 2*pow(2,k)*depth(k) + pow(2,q)*depth(n-k+1)
self.params["t_count"] = 2*pow(2,k)*t_c(k) + pow(2,q)*t_c(n-k+1)
self.params["t_depth"] = 2*pow(2,k)*t_d(k) + pow(2,q)*t_d(n-k+1)
self.params["h_count"] = 2*pow(2,k)*h_c(k) + pow(2,q)*h_c(n-k+1)
self.params["cnot_count"] = 2*pow(2,k)*cnot_c(k) + pow(2,q)*cnot_c(n-k+1)
else: # Worst case: 2^q k-controlled, 2^q (n-k+1)-controlled (no common substrings on the bits)
self.params["n_qubits"] = n + pow(2,q) + 1 + max(k-1, n-k)
self.params["depth"] = 2*pow(2,q)*depth(k) + pow(2,q)*depth(n-k+1)
self.params["t_count"] = 2*pow(2,q)*t_c(k) + pow(2,q)*t_c(n-k+1)
self.params["t_depth"] = 2*pow(2,q)*t_d(k) + pow(2,q)*t_d(n-k+1)
self.params["h_count"] = 2*pow(2,q)*h_c(k) + pow(2,q)*h_c(n-k+1)
self.params["cnot_count"] = 2*pow(2,q)*cnot_c(k) + pow(2,q)*cnot_c(n-k+1)
self.params["cliffords"] = self.params["h_count"] + self.params["cnot_count"]
class Hybrid_Tier1Parallel(qRAMCircuit):
""" Hybrid circuit, but with first tier done in parallel, and second tier in series. """
def __init__(self, n, q, k):
super().__init__(n, q, k)
self.params["name"] = "Hybrid_Tier1Parallel"
if k < q:
self.params["n_qubits"] = (k+1)*pow(2,k) + n - k + 1 + max(n-k, pow(2,k)*(k-1))
self.params["depth"] = 2*(k + depth(k)) + pow(2,q)*depth(n-k+1)
self.params["t_count"] = 2*pow(2,k)*t_c(k) + pow(2,q)*t_c(n-k+1)
self.params["t_depth"] = 2*t_d(k) + pow(2,q)*t_d(n-k+1)
self.params["h_count"] = 2*pow(2,k)*h_c(k) + pow(2,q)*h_c(n-k+1)
self.params["cnot_count"] = 2*(k*(pow(2,k)-1) + pow(2,k)*cnot_c(k)) + pow(2,q)*cnot_c(n-k+1)
else: # Worst case: 2^q k-controlled, 2^q (n-k+1)-controlled (no common substrings on the bits)
self.params["n_qubits"] = (k+1)*pow(2,q) + n - k + 1 + max(n-k, pow(2,q)*(k-1))
self.params["depth"] = 2*(q + depth(k)) + pow(2,q)*depth(n-k+1)
self.params["t_count"] = 2*pow(2,q)*t_c(k) + pow(2,q)*t_c(n-k+1)
self.params["t_depth"] = 2*(t_d(k)) + pow(2,q)*t_d(n-k+1)
self.params["h_count"] = 2*pow(2,q)*h_c(k) + pow(2,q)*h_c(n-k+1)
self.params["cnot_count"] = 2*( k*(pow(2,q)-1) + pow(2,q)*cnot_c(k)) + pow(2,q)*cnot_c(n-k+1)
self.params["cliffords"] = self.params["h_count"] + self.params["cnot_count"]
class Hybrid_Tier2Parallel(qRAMCircuit):
""" Hybrid circuit, but with second tier done in parallel, and first tier in series. """
def __init__(self, n, q, k):
super().__init__(n, q, k)
self.params["name"] = "Hybrid_Tier2Parallel"
if k < q:
# In the worst case, one of the 2^k outputs must do 2^(q-1) + 1 of the n-k+1-controlled gates,
# and all the remaining ones must do 1; assume then that one of the registers must perform
# enough CNOTs to copy down to 2^(q-1) + 1 registers.
# Address fanout of the bottom tier can be done in tandem with MPMCTs of the top tier;
# whichever depth is less is the one we go with, so lots of max calculations here.
# See p.157 of nb.
self.params["n_qubits"] = k + (n-k+2)*pow(2,q) + max(k-1, pow(2,q)*(n-k)) + 1
# Top tier + fanout + bottom tier in parallel + max(parity compute/uncompute, fanout + top tier)
self.params["depth"] = 2*pow(2,k)*depth(k) + q + depth(n-k+1) + max(2*q + 2, q + pow(2,k)*depth(k))
self.params["t_count"] = 2*pow(2,k)*t_c(k) + pow(2,q)*t_c(n-k+1)
self.params["t_depth"] = 2*pow(2,k)*t_d(k) + t_d(n-k+1)
self.params["h_count"] = 2*pow(2,k)*h_c(k) + pow(2,q)*h_c(n-k+1)
# CNOTs for first 2^q k + CNOTs to copy outputs + CNOTs to copy (n-k) in parallel + CNOTs for 2^q (n-k+1) controlled + parity calculation
self.params["cnot_count"] = 2*(pow(2,k)*cnot_c(k) + pow(2,q-1) + (n-k)*(pow(2,q)-1)) + pow(2,q)*cnot_c(n-k+1) + 2*(pow(2,q)-1) + 2
else: # Worst case: 2^q k-controlled, 2^q (n-k+1)-controlled (no common substrings on the bits)
self.params["n_qubits"] = k + (n-k+2)*pow(2,q) + max(k-1, pow(2,q)*(n-k)) + 1
self.params["depth"] = 2*pow(2,q)*depth(k) + q + depth(n-k+1) + max(2*q + 2, q + pow(2,q)*depth(k))
self.params["t_count"] = 2*pow(2,q)*t_c(k) + pow(2,q)*t_c(n-k+1)
self.params["t_depth"] = 2*pow(2,q)*t_d(k) + t_d(n-k+1)
self.params["h_count"] = 2*pow(2,q)*h_c(k) + pow(2,q)*h_c(n-k+1)
self.params["cnot_count"] = 2*(pow(2,q)*cnot_c(k) + pow(2,q-1) + (n-k)*(pow(2,q)-1)) + pow(2,q)*cnot_c(n-k+1) + 2*(pow(2,q)-1) + 2
self.params["cliffords"] = self.params["h_count"] + self.params["cnot_count"]
class Hybrid_Parallel(qRAMCircuit):
""" Hybrid circuit with both tiers done in parallel (one after the other, as the
results of the second tier depend on those of the first. """
def __init__(self, n, q, k):
super().__init__(n, q, k)
self.params["name"] = "Hybrid_Parallel"
if k < q: # Similar worst case as above; one of the 2^k top-tier outputs must be copied down to 2^(q-1) + 1 fresh outputs
self.params["n_qubits"] = k*pow(2,k) + (n-k+2)*pow(2,q) + max(pow(2,k)*(k-1), pow(2,q)*(n-k)) + 1
self.params["depth"] = k + depth(k) + q + depth(n-k+1) + max(2*q+2, q + depth(k) + k)
self.params["t_count"] = 2*pow(2,k)*t_c(k) + pow(2,q)*t_c(n-k+1)
self.params["t_depth"] = 2*t_d(k) + t_d(n-k+1)
self.params["h_count"] = 2*pow(2,k)*h_c(k) + pow(2,q)*h_c(n-k+1)
self.params["cnot_count"] = 2 * ( k*(pow(2,k)-1) + pow(2,k)*cnot_c(k) + pow(2,q-1) + (n-k)*(pow(2,q)-1)) + pow(2,q)*cnot_c(n-k+1) + 2*(pow(2,q)-1) + 2
else:
self.params["n_qubits"] = k*pow(2,q) + (n-k+2)*pow(2,q) + max(pow(2,q)*(k-1), pow(2,q)*(n-k)) + 1
self.params["depth"] = q + depth(k) + q + depth(n-k+1) + max(2*q+2, q + depth(k) + q)
self.params["t_count"] = 2*pow(2,q)*t_c(k) + pow(2,q)*t_c(n-k+1)
self.params["t_depth"] = 2*t_d(k) + t_d(n-k+1)
self.params["h_count"] = 2*pow(2,q)*h_c(k) + pow(2,q)*h_c(n-k+1)
self.params["cnot_count"] = 2*(k*(pow(2,q)-1) + pow(2,q)*cnot_c(k) + pow(2,q-1) + (n-k)*(pow(2,q)-1)) + pow(2,q)*cnot_c(n-k+1) + 2*(pow(2,q)-1) + 2
self.params["cliffords"] = self.params["h_count"] + self.params["cnot_count"]
|
[
"dimatteo.olivia@gmail.com"
] |
dimatteo.olivia@gmail.com
|
efd1ec744696d9e1102ffc2cffcbb803a95ddcf2
|
9c8804de344b74bb54f53e5092aa30a19c000a08
|
/sum-ex.py
|
7e27897bfccd8cf17fbec23a7c8a7a5f91cb998e
|
[] |
no_license
|
ajauntor/sum-ex
|
37e3f77ed50855c5d50339a6ff6feb44ba0f6dde
|
65ba46bc923b678262320a5c79ab49476cd1d1cf
|
refs/heads/master
| 2023-03-22T17:19:27.308672
| 2021-03-16T05:38:10
| 2021-03-16T05:38:10
| 348,226,368
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25
|
py
|
print("Hello, AJ Auntor")
|
[
"auntorknocks@gmail.com"
] |
auntorknocks@gmail.com
|
09fc2b083153bca04544201a232010d562c456ce
|
f5548f2219ac7a5530ddbbb5789ef9a8616399c9
|
/horovod/__init__.py
|
41666401d9c3bb48618db348f773be8f555a9f2e
|
[
"BSD-3-Clause",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
jcchai/horovod
|
8b237c923b804e2d582c1ffbf293a509e9ee09f7
|
1480e75d088a49c1b3cb93e6abfcaff5bb5db7b0
|
refs/heads/master
| 2023-05-22T17:03:02.211125
| 2021-06-10T13:18:27
| 2021-06-10T13:18:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 55
|
py
|
from horovod.runner import run
__version__ = '0.22.0'
|
[
"noreply@github.com"
] |
jcchai.noreply@github.com
|
0feb4ae8bc4b0fec63d2204c90cc43ac43e639ba
|
5fa17eeebf24cf1973693de992352ef283f86d31
|
/Todolist_Backend/API/models.py
|
0bb19c4fab0d0cbe9ad2b587e1b468188024a870
|
[] |
no_license
|
jadonbarnes/Todolist-backend
|
a0acf3fae3272d716c90fcf000ef9161ceab5b33
|
fc2875d70bfd28e1ceba5aa1b6f037a1e2e65fa9
|
refs/heads/master
| 2020-08-29T17:38:33.615658
| 2020-01-06T14:28:42
| 2020-01-06T14:28:42
| 218,114,091
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,487
|
py
|
from django.db import models
from django.contrib.auth.models import User
import re
import datetime
class Task(models.Model):
#Fixed fields
Name = models.CharField(max_length=100)
Content = models.TextField(blank=True, null=True)
Notes = models.TextField(blank=True, null=True)
Due_Date = models.DateField(blank=True, null=True)
# These are choice field they allow the user to pick from any of the items listed in the choices list
Priority = (
('1','1'),
('2','2'),
('3','3'),
('4','4')
)
Priority = models.CharField(max_length=1, choices=Priority,null=True, blank=True)
# this is the list that represents the system tags, this can be updated at any time (even without reloding the server)
System_Tags = (
('heading','heading'),
('parent','parent')
)
System_Tags = models.CharField(max_length=30, choices=System_Tags, null=True, blank=True)
#links to foreign objects
Parent = models.ForeignKey('self', on_delete=models.CASCADE, blank=True, null=True)
Project = models.ForeignKey('Project', on_delete=models.CASCADE, blank=True, null=True)
Tags = models.ManyToManyField('Tag', blank=True)
Creator = models.ForeignKey(User, on_delete=models.CASCADE,default=1)
def __str__(self):
return self.Name
class Project(models.Model):
#Fixed fields
Name = models.CharField(max_length=100)
#relationships
Creator = models.ForeignKey(User, on_delete=models.CASCADE,default=1)
Tag = models.ManyToManyField('Tag', blank=True)
def __str__(self):
return self.Name
class Tag(models.Model):
#Fixed fields
Name = models.CharField(max_length=100)
#relationships
Creator = models.ForeignKey(User, on_delete=models.CASCADE, default=1)
def __str__(self):
return self.Name
class Filter(models.Model):
#Fixed fields
Name = models.CharField(max_length=100)
Filter_Command = models.TextField()
#relationships
Creator = models.ForeignKey(User, on_delete=models.CASCADE,default=1)
def __str__(self):
return self.Name
def Filter(self):
Command = self.Filter_Command
Command = re.split('([|&])', Command)
print(Command)
for Position in range(0, len(Command)):
if "(" in Command[Position]:
Command[Position] = Command[Position].replace('(', "", 1)
Command[Position] = Command[Position].replace(')', "", 1)
Command[Position] = Filter(Command[Position])
if Command[Position] != '|' or '&':
Function = 'filter'
if "!" in Command[Position]:
Command[Position] = Command[Position].replace('!', "", 1)
Function = 'exclude'
if "P:" in Command[Position]:
Command[Position] = Command[Position].replace('P:', "", 1)
Command[Position] = "Task.objects." + Function + "(Priority = " + Command[Position] + ")"
elif "@" in Command[Position]:
Command[Position] = Command[Position].replace('@', "", 1)
Tag_Primary_Key = Tag.objects.get(Name=Command[Position])
Command[Position] = "Task.objects." + Function + "(Tags = " + Tag_Primary_Key + ")"
elif "#" in Command[Position]:
Command[Position] = Command[Position].replace('#', "", 1)
Project_Primary_Key = Project.objects.get(Name=Command[Position])
Command[Position] = "Task.objects." + Function + "(Project = " + Project_Primary_Key + ")"
elif "Today" in Command[Position]:
Command[Position] = "Task.objects." + Function + "(Due_Date = datetime.date.today())"
elif "After:" in Command[Position]:
Command[Position] = Command[Position].replace('After:', "", 1)
Command[Position] = "Task.objects." + Function + "(Due_Date__gte = " + Command[Position] + ")"
elif "Before:" in Command[Position]:
Command[Position] = Command[Position].replace('Before:', "", 1)
Command[Position] = "Task.objects." + Function + "(Due_Date__lte = " + Command[Position] + ")"
Command.append("&")
Command.append("Task.objects.filter(Creator = request.user.id)")
Command = ''.join(Command)
return Command
|
[
"email@jadonbarnes.co.uk"
] |
email@jadonbarnes.co.uk
|
a89b03b60fbefdde9bbbdde0ef520e40a92121a5
|
034a61353c94713a926fb601bc7aefee81644636
|
/model/urls.py
|
2dbc1efe6f0667cbba5178f8771d1de05ead3c1c
|
[] |
no_license
|
TaeWonJin/reexample
|
07469a5528617727b38b892dd55c53a1f7b371a9
|
feba0dbaf800dba3dbcd94c154d27046584262ae
|
refs/heads/master
| 2023-05-04T00:22:51.514733
| 2019-07-29T20:15:24
| 2019-07-29T20:15:24
| 195,457,967
| 0
| 0
| null | 2023-04-21T20:33:26
| 2019-07-05T19:25:40
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,326
|
py
|
"""model URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path , include
import modelapp.views
import portfolio.views
import account.views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('',modelapp.views.home, name='home'),
path('blog/',include('modelapp.urls')),
path('portfolio/',portfolio.views.portfolio,name="portfolio"),
path('account/signup/', account.views.signup, name='signup'),
path('account/login/' , account.views.login, name='login'),
path('account/logout/', account.views.logout_request, name='logout'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"wlsxodnjs1@naver.com"
] |
wlsxodnjs1@naver.com
|
53d20df8348f4ae59a429f2409ae483ae8302815
|
b94790631da3d9c98f2edf495e79eded77a71b2c
|
/the_wall/apps/wall_app/migrations/0002_auto_20180719_2232.py
|
e2e8a15b4a86c3bd0c5e079caa084ea713af07d4
|
[] |
no_license
|
estarlight/coding_dojo_django
|
c7894c2a5f310b5cb6f072aca60bf2834de6a510
|
577a714512c3b1bd5e280a987c20666dfd7504ed
|
refs/heads/master
| 2020-03-23T11:01:29.056778
| 2018-09-25T18:21:10
| 2018-09-25T18:21:10
| 141,477,866
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 687
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2018-07-19 22:32
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wall_app', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='comment',
name='messages',
),
migrations.AddField(
model_name='comment',
name='message',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='message_comment', to='wall_app.Message'),
),
]
|
[
"noreply@github.com"
] |
estarlight.noreply@github.com
|
ce50b97bc6950b8198d59226e4f3d69dc58249f9
|
96e66a2ec795104b7e75a8bc42fe499397ec804a
|
/restwebservice/api/controllers/v1/root.py
|
eb34b84c8a592fac505582f6be0271c609e1ba26
|
[] |
no_license
|
aratim/REST-Web-Service
|
eaf5631f05bfc83a7058b47981699d534ac270e0
|
7a8b5916cb4669c5d24dcac2c8ad3b837b6702e8
|
refs/heads/master
| 2020-04-23T17:40:33.062835
| 2014-06-16T16:48:17
| 2014-06-16T16:48:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 598
|
py
|
from pecan import expose
from webob.exc import status_map
from restwebservice.api.controllers.v1 import fibonacci
class RootController(object):
fibonacci = fibonacci.FibonacciController()
@expose('json')
def index(self):
msg = "Welcome to REST Web service"
return msg
@expose('error.html')
def error(self, status):
try:
status = int(status)
except ValueError: # pragma: no cover
status = 500
message = getattr(status_map.get(status), 'explanation', '')
return dict(status=status, message=message)
|
[
"arati.mahimane@rackspace.com"
] |
arati.mahimane@rackspace.com
|
8ad0b231772083ee7d66c657a27e361c086667e2
|
00cfd0283816234879f480071f3b3952930a243b
|
/Apps/Droplet/build/bdist.macosx-10.7-intel/python2.7-standalone/app/temp/fcntl.py
|
8366b51aae8b174c3e8a80d61ffb3920bba3de57
|
[] |
no_license
|
rwal127/Qube
|
aa94788133e20396f0fd36d4cee8a6d718cf3929
|
41264283dc764f55080d6d4e7538b9955fe3812d
|
refs/heads/master
| 2021-05-27T04:34:01.923205
| 2012-04-27T16:26:49
| 2012-04-27T16:26:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 535
|
py
|
def __load():
import imp, os, sys
ext = 'fcntl.so'
for path in sys.path:
if not path.endswith('lib-dynload'):
continue
ext = os.path.join(path, ext)
if os.path.exists(ext):
#print "py2app extension module", __name__, "->", ext
mod = imp.load_dynamic(__name__, ext)
#mod.frozen = 1
break
else:
raise ImportError(repr(ext) + " not found")
else:
raise ImportError("lib-dynload not found")
__load()
del __load
|
[
"brennan.chapman@gmail.com"
] |
brennan.chapman@gmail.com
|
4007b809d87f4b517e582bfcc8d53fa884bce8d0
|
97062249c6eb04069c6fb01e71d06bc334c828e1
|
/desktop/core/src/desktop/management/commands/clean_history_docs.py
|
7d8e2273128e023c9b6c2efbd0867977b6776dd3
|
[
"Apache-2.0"
] |
permissive
|
Albertsss/hue
|
1c8b31c64cc420a029f5b5b80712fb3d0c6cbd6e
|
454d320dd09b6f7946f3cc05bc97c3e2ca6cd485
|
refs/heads/master
| 2021-07-08T17:21:13.237871
| 2018-05-30T06:03:21
| 2018-05-30T06:03:21
| 135,386,450
| 0
| 1
|
Apache-2.0
| 2020-07-25T13:36:58
| 2018-05-30T04:06:18
|
Python
|
UTF-8
|
Python
| false
| false
| 2,457
|
py
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from datetime import datetime, timedelta
from django.core.management.base import BaseCommand
from desktop.models import Document2
LOG = logging.getLogger(__name__)
DEFAULT_EXPIRY_DAYS = 120
class Command(BaseCommand):
"""
Clean up (delete) history documents without a parent or that are older than N number of days.
e.g.
build/env/bin/hue clean_history_docs 30
0 history documents deleted.
"""
args = '<age_in_days> (default is %s)' % DEFAULT_EXPIRY_DAYS
help = 'Delete history documents older than %s days.' % DEFAULT_EXPIRY_DAYS
def handle(self, *args, **options):
count = 0
days = int(args[0]) if len(args) >= 1 else DEFAULT_EXPIRY_DAYS
# Clean up orphan history documents (excluding query docs)
orphans = Document2.objects.exclude(type__startswith='query-').filter(is_history=True).filter(dependents=None)
if orphans.count() > 0:
count += orphans.count()
self.stdout.write('Deleting %d orphan history documents...' % orphans.count())
orphans.delete()
else:
self.stdout.write('No orphan history documents found.')
# Clean up old history documents
old_history_docs = Document2.objects.filter(is_history=True).filter(last_modified__lte=datetime.today() - timedelta(days=days))
if old_history_docs.count() > 0:
count += old_history_docs.count()
self.stdout.write('Deleting %d history documents older than %d days...' % (old_history_docs.count(), days))
old_history_docs.delete()
else:
self.stdout.write('No history documents older than %d days found.' % days)
self.stdout.write('%d total history documents deleted.' % count)
|
[
"540227148@qq.com"
] |
540227148@qq.com
|
08b2d8c34d5b15fed1f3df058fa68c95b1438f4a
|
83a0eed4b2a792161b46519bf217f0cf5c2cde95
|
/ws_serv.py
|
95c5994c6d6c0b033d8fc4ec8f210e8e99f41d8c
|
[] |
no_license
|
ning412128/Lntelligent-Toy
|
03c52e208a63d5d5ca12eaa9c51ad886e36d28d6
|
6ce9ba35bd18f2d704f8dc7979ea7f0480c24d26
|
refs/heads/master
| 2020-04-30T23:24:54.489322
| 2019-03-22T13:27:30
| 2019-03-22T13:27:30
| 177,143,562
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,419
|
py
|
from flask import Flask,request
import json
from geventwebsocket.handler import WebSocketHandler
from gevent.pywsgi import WSGIServer
from geventwebsocket.websocket import WebSocket
ws_serv = Flask(__name__)
user_socket_dict = {}
@ws_serv.route("/toy/<toy_id>")
def toy(toy_id):
user_socket = request.environ.get("wsgi.websocket") # type:WebSocket
if user_socket:
user_socket_dict[toy_id] = user_socket
while 1:
message = user_socket.receive()
msg_dict = json.loads(message)
print(msg_dict)
# {to_user:123,music:2739568486.mp3,from_user:"345"}
app_socket = user_socket_dict.get(msg_dict.pop("to_user"))
# msg_dict["from_user"] = app_id
app_socket.send(json.dumps(msg_dict))
@ws_serv.route("/app/<app_id>")
def app(app_id):
user_socket = request.environ.get("wsgi.websocket") # type:WebSocket
if user_socket:
user_socket_dict[app_id] = user_socket
while 1:
message = user_socket.receive()
msg_dict = json.loads(message)
print(msg_dict)
# {to_user:123,music:2739568486.mp3,from_user:"345"}
toy_socket = user_socket_dict.get(msg_dict.pop("to_user"))
# msg_dict["from_user"] = app_id
toy_socket.send(json.dumps(msg_dict))
if __name__ == '__main__':
http_serv = WSGIServer(("0.0.0.0",8000),ws_serv,handler_class=WebSocketHandler)
http_serv.serve_forever()
|
[
"you@example.com"
] |
you@example.com
|
1726be3204bcc16f66b21039a4e1959f3a1a66db
|
de8f894f3e82c7581ec64ce7767ef6f2c97592b5
|
/popex/test/test_popex_objects_PoPEx.py
|
346efba3adb97b47ca40379db40ae9d95e8aa60f
|
[] |
no_license
|
pjuda/PoPEx-1
|
d553ec1762ea2c71567b0a52c210dd95e6cda8a1
|
ae79192507fc91ad2a579c4be83e4e5bf156b794
|
refs/heads/master
| 2022-04-27T06:58:56.377658
| 2020-04-16T12:52:25
| 2020-04-16T12:52:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,938
|
py
|
""" TEST FILE
Some tests are run to check the behaviour of the class
- 'PoPEx'
in 'popex.popex_objects'. Some toy models are created and added to the PoPEx
structure. The tests are all formulated in terms of the 'assert' method.
"""
# -------------------------------------------------------------------------
# Authors: Christoph Jaeggli, Julien Straubhaar and Philippe Renard
# Year: 2018
# Institut: University of Neuchatel
#
# Copyright (c) 2018 Christoph Jaeggli
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# General imports
import unittest
import numpy as np
import os
# Package imports
from popex.popex_objects import PoPEx, CatParam, ContParam
PATH_TEST = os.path.dirname(os.path.dirname(__file__)) + '/'\
+ 'test/test_dir/'
# Unit test class
class TestPoPEx(unittest.TestCase):
def setUp(self):
param_val1 = np.array([0.5, 1.5, 2, 0.75, 1, 2.5])
categories1 = [[(0.25, 0.75), (2.25, 2.75)],
[(0.75, 1.0)],
[(1.0, 2.25)]]
mod11 = CatParam(param_val=param_val1, categories=categories1)
param_val2 = np.array([1.75, 2.25, 2.249, 0.25])
categories2 = [[(0.25, 0.75), (2.25, 2.75)],
[(0.75, 2.25)]]
mod12 = CatParam(param_val=param_val2, categories=categories2)
param_val3 = np.array([1., 2., 3.4, 5, 6])
mod13 = ContParam(param_val=param_val3)
self.mod1 = (mod11, mod12, mod13)
self.p_lik1 = 2.5
self.cmp_plik1 = True
self.log_p_pri1 = -2.5
self.log_p_gen1 = -1.5
self.nc1 = (1, 2, 0)
self.ncmax = (10, 20, 0)
self.nmtype = 3
self.nmc = 1
def test_PoPEx(self):
popex = PoPEx(ncmax=self.ncmax,
nmtype=self.nmtype,
path_res=PATH_TEST)
self.assertTrue(isinstance(popex, PoPEx))
self.assertTrue(hasattr(popex, 'model'))
self.assertTrue(hasattr(popex, 'log_p_lik'))
self.assertTrue(hasattr(popex, 'cmp_log_p_lik'))
self.assertTrue(hasattr(popex, 'log_p_pri'))
self.assertTrue(hasattr(popex, 'log_p_gen'))
self.assertTrue(hasattr(popex, 'nc'))
self.assertTrue(hasattr(popex, 'path_res'))
popex.add_model(0, self.mod1, self.p_lik1, self.cmp_plik1,
self.log_p_pri1, self.log_p_gen1, self.nc1)
self.assertTrue(isinstance(popex.model, list))
self.assertTrue(isinstance(popex.log_p_lik, np.ndarray))
self.assertTrue(isinstance(popex.cmp_log_p_lik, np.ndarray))
self.assertTrue(isinstance(popex.log_p_pri, np.ndarray))
self.assertTrue(isinstance(popex.log_p_gen, np.ndarray))
self.assertTrue(isinstance(popex.nc, list))
self.assertEqual(popex.cmp_log_p_lik.dtype, 'bool')
self.assertEqual(popex.nmod, 1)
popex.add_model(1, self.mod1, 0, False,
-12., -14.5, (15, 12, 0))
self.assertTrue(isinstance(popex.model, list))
self.assertTrue(isinstance(popex.log_p_lik, np.ndarray))
self.assertTrue(isinstance(popex.cmp_log_p_lik, np.ndarray))
self.assertTrue(isinstance(popex.log_p_pri, np.ndarray))
self.assertTrue(isinstance(popex.log_p_gen, np.ndarray))
self.assertTrue(isinstance(popex.nc, list))
self.assertEqual(popex.cmp_log_p_lik.dtype, 'bool')
self.assertEqual(popex.nmod, 2)
self.assertEqual(len(popex.model), 2)
self.assertEqual(len(popex.log_p_lik), 2)
self.assertEqual(len(popex.cmp_log_p_lik), 2)
self.assertEqual(len(popex.log_p_pri), 2)
self.assertEqual(len(popex.log_p_gen), 2)
self.assertEqual(len(popex.nc), 2)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
[
"christoph.jaeggli@unine.ch"
] |
christoph.jaeggli@unine.ch
|
4ec91cb675a6bcfd705ef5ff70da47d5fdd5929d
|
a043207e157e745eb0d005fc658b57f77c202e5f
|
/yunServer/fee/tests.py
|
449a050748e83707eccf48af53e1fc367194975b
|
[] |
no_license
|
seanlian/lian
|
2f485fafd954eeeababf3897d1ba5c2f187de7a1
|
aa4c1f624c5d82032a31bb8f1570d87567c902e9
|
refs/heads/master
| 2020-04-07T20:37:33.975609
| 2018-12-04T14:19:17
| 2018-12-04T14:19:17
| 158,696,682
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 319
|
py
|
from django.test import TestCase
import time
# Create your tests here.
print (time.strftime("%Y-%m-%d %H:%M:%S",time.localtime()))
str1=str(time.strftime("%Y-%m-%d %H:%M:%S",time.localtime()))
str1=str1.split(" ")
print(str1)
then_time=time.strftime("%Y-%m-%d %H:%M:%S",time.localtime()).split(" ")[0]
print(then_time)
|
[
"maryclearlove@163.com"
] |
maryclearlove@163.com
|
a506bd9f67613ce45f4e61a147bed302494deecc
|
927b5041c54830c60bb764fc0e4098bed2617eca
|
/entity_recognition/Entity_recognition_json_utry_v2/tornado_server.py
|
53da704758341aceb2239c621fab7d06d31b8102
|
[] |
no_license
|
jxz542189/model_house
|
ab625297962bb941f79cf5584a9437cad3b3efed
|
1fd3d360a2a980a0eddb5b3ee600f2976d1a8dba
|
refs/heads/master
| 2021-07-18T09:33:08.196544
| 2019-01-22T10:49:24
| 2019-01-22T10:49:24
| 144,448,043
| 10
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 284
|
py
|
#coding=utf-8
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from entity_server import app
http_server = HTTPServer(WSGIContainer(app))
http_server.listen(6000) #flask默认的端口
IOLoop.instance().start()
|
[
"1318394945@qq.com"
] |
1318394945@qq.com
|
bed79e548883a0b395c3d0dee2bf341e7b9efcbc
|
e6c65e2e354336a4bea5b6a4ccbccd3682915fe2
|
/out-bin/py/google/fhir/models/model_test.runfiles/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/python/eager/test.py
|
8cc80312cb261946dc8a72d181a77028ee729bfb
|
[
"Apache-2.0"
] |
permissive
|
rasalt/fhir-datalab
|
c30ab773d84983dd04a37e9d0ddec8bf2824b8a4
|
3e329fc8b4226d3e3a4a7c23c306a86e7a9ea0de
|
refs/heads/master
| 2021-10-09T05:51:04.593416
| 2018-12-21T18:11:03
| 2018-12-22T05:38:32
| 162,744,237
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 172
|
py
|
/home/rkharwar/.cache/bazel/_bazel_rkharwar/0ddaa3627472ad9d1367a008236ce2f5/external/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/python/eager/test.py
|
[
"ruchika.kharwar@gmail.com"
] |
ruchika.kharwar@gmail.com
|
56edc8600a3adfcba9494a4bebbaa47a16fef0fd
|
62a36ff985ba59766fe7f74e0df92f574372b982
|
/meds/current_session.py
|
0da738e45b4c00f4ef4a0445578ec0fd64f34a93
|
[] |
no_license
|
brandones/ces-metadata-util
|
389ca5324ab2b2761795e0cbd42928f9a0739357
|
88f5f5b88bd3930ffb7a995bb5482d8187f2525a
|
refs/heads/master
| 2021-06-26T04:08:36.474764
| 2019-07-04T05:24:24
| 2019-07-04T05:24:24
| 148,912,203
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,544
|
py
|
# coding: utf-8
import pandas as dp
import pandas as pd
pd.load_csv('results/meds-ces.csv')
pd.read_csv('results/meds-ces.csv')
ces = pd.read_csv('results/meds-ces.csv')
extant_ciel = pd.read_csv('input/ciel-in-concepts-dict.csv')
extant_ciel
any(extant_ciel[1])
extant_ciel[1]
extant_ciel[0]
extant_ciel.columns.values
extant_ciel = pd.read_csv('input/ciel-in-concepts-dict.csv')
extant_ciel.columns.values
extant_ciel["voided"]
list(extant_ciel["voided"])
any(list(extant_ciel["voided"]))
extant_ciel = extant_ciel[extant_ciel["voided"] == 0]
extant_ciel
ces = ces[ces["concept"].starts_with("CIEL")]
ces = ces[ces.concept.starts_with("CIEL")]
ces = ces[ces.concept.startswith("CIEL")]
ces = ces[ces.concept.str.startswith("CIEL")]
ces = ces[!ces["concept"].isna())
ces = ces[~ces["concept"].isna())
ces = ces[~ces["concept"].isna()]
ces = ces[ces.concept.startswith("CIEL")]
ces = ces[ces.concept.str.startswith("CIEL")]
ces
ces_not_in_dict = ces[ces.concept.str.replace('CIEL:', '') not in extant_ciel]
ces_not_in_dict = ces[~ces.concept.str.replace('CIEL:', '').isin(extant_ciel["ID"])]
ces_not_in_dict
ces_not_in_dict.to_csv('results/ces-drugs-from-ciel.csv')
ssa = ssa[~ssa['concept'].isna()]
ssa
ssa = pd.read_csv('results/meds-ssa.csv')
ssa
ssa = ssa[~ssa['concept'].isna()]
ssa = ssa[ssa.concept.str.startswith('CIEL')]
ssa
ssa.concept
ssa_not_in_dict = ssa[~ssa.concept.str.replace("CIEL:", "").isin(extant_ciel.ID)]
ssa_not_in_dict
ssa_not_in_dict.to_csv("results/ssa-drugs-from-ciel.csv")
ces_not_in_dict.concept.unique()
ces_not_in_dict.concept.unique().to_csv()
pd.DataFrame(ces_not_in_dict.concept.unique()).to_csv('results/ssa-ciel-needs.csv')
pd.DataFrame(ces_not_in_dict.concept.unique()).to_csv('results/ces-ciel-needs.csv')
pd.DataFrame(ssa_not_in_dict.concept.unique()).to_csv('results/ssa-ciel-needs.csv')
get_ipython().run_line_magic('save', 'current_session')
get_ipython().run_line_magic('save', 'current_session 1-48')
all_not_in_dict = ces_not_in_dict["concept"] + ssa_not_in_dict["concept"]
all_not_in_dict
all_not_in_dict = ces_not_in_dict["concept"].concat(ssa_not_in_dict["concept"])
all_not_in_dict = pd.concat([ces_not_in_dict["concept"], ssa_not_in_dict["concept"]])
all_not_in_dict
all_not_in_dict.unique()
all_not_in_dict.unique()
pd.DataFrame(all_not_in_dict.unique()).to_csv("results/ciel-needs.csv")
ces_ready = ces[~ces["concept"].isin(all_not_in_dict)]
ces_ready
ces_ready.to_csv("results/ces-ready.csv")
ssa_ready = ssa[~ssa["concept"].isin(all_not_in_dict)]
ssa_ready.to_csv("results/ssa-ready.csv")
|
[
"bistenes@gmail.com"
] |
bistenes@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.