blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
88765ffc6d2e5ee1831ec76273004b16ccb63d99
|
4890476a3327f961088f84fc55070a9fdfe9bf28
|
/Globals/InterpolatorConfig.py
|
8fe764b73b9a908bd8f58fb3c04980043e9acf3e
|
[] |
no_license
|
HeylonNHP/RIFE-Colab
|
57e96dca1bbf595df617537bdf1dd6c26b2e1459
|
4cc5e2a7f6592b638e7bf1285b5b31297b008c8f
|
refs/heads/main
| 2023-04-28T08:56:15.895203
| 2022-12-01T12:11:47
| 2022-12-01T12:11:47
| 324,138,048
| 41
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,370
|
py
|
class InterpolatorConfig:
_interpolators = ["RIFE"]
_interpolator = "RIFE"
_interpolationFactor = 2
_loopable = False
_mode = 1
_clearPngs = True
_nonLocalPngs = True
_scenechangeSensitivity = 0.20
_mpdecimateSensitivity = "64*12,64*8,0.33"
_enableMpdecimate = True
_useAccurateFPS = True
_accountForDuplicateFrames = False
_UhdScaleFactor: float = 0.5
_mode3TargetFPSEnabled: bool = False
_mode3TargetFPSValue: float = 60
_backupThreadStartLimit = -1
_exitOnBackupThreadLimit = False
def setInterpolationFactor(self, interpolationFactor: int):
self._interpolationFactor = interpolationFactor
def getInterpolationFactor(self):
return self._interpolationFactor
def setLoopable(self, loopable: bool):
self._loopable = loopable
def getLoopable(self):
return self._loopable
def setMode(self, mode: int):
modes = [1, 3, 4]
assert mode in modes
self._mode = mode
def getMode(self):
return self._mode
def setClearPngs(self, clearPngs: bool):
self._clearPngs = clearPngs
def getClearPngs(self):
return self._clearPngs
def setNonlocalPngs(self, nonlocalpngs: bool):
self._nonLocalPngs = nonlocalpngs
def getNonlocalPngs(self):
return self._nonLocalPngs
def setScenechangeSensitivity(self, sensitivity: float):
assert 1 >= sensitivity >= 0
self._scenechangeSensitivity = sensitivity
def getScenechangeSensitivity(self):
return self._scenechangeSensitivity
def enableMpdecimate(self, enable):
self._enableMpdecimate = enable
def getMpdecimatedEnabled(self):
return self._enableMpdecimate
def setMpdecimateSensitivity(self, sensitivity: str):
self._mpdecimateSensitivity = sensitivity
def getMpdecimateSensitivity(self):
return self._mpdecimateSensitivity
def setUseAccurateFPS(self, enable: bool):
self._useAccurateFPS = enable
def getUseAccurateFPS(self):
return self._useAccurateFPS
def setAccountForDuplicateFrames(self, enable: bool):
self._accountForDuplicateFrames = enable
def getAccountForDuplicateFrames(self):
return self._accountForDuplicateFrames
def setUhdScale(self, scaleFactor: float):
self._UhdScaleFactor = scaleFactor
def getUhdScale(self):
return self._UhdScaleFactor
def setMode3TargetFPS(self, enable: bool, value: float):
self._mode3TargetFPSEnabled = enable
self._mode3TargetFPSValue = value
def getMode3TargetFPSEnabled(self):
return self._mode3TargetFPSEnabled
def getMode3TargetFPSValue(self):
return self._mode3TargetFPSValue
def setInterpolator(self, interpolator: str):
self._interpolator = interpolator
def getInterpolator(self):
return self._interpolator
def setBackupThreadStartLimit(self, limit: int):
self._backupThreadStartLimit = limit
def getBackupThreadStartLimit(self):
return self._backupThreadStartLimit
def setExitOnBackupThreadLimit(self, exit: bool):
self._exitOnBackupThreadLimit = exit
if not exit:
self.setBackupThreadStartLimit(-1)
def getExitOnBackupThreadLimit(self):
return self._exitOnBackupThreadLimit
|
[
"heylon96@hotmail.com"
] |
heylon96@hotmail.com
|
bd37cf16ee9e512ade1ab66de0a4fc96f3ee351e
|
0ba3a5afc0530ed33a40ccd91c16c61759ae6eaa
|
/src/manage.py
|
9fa5e680576cc736659086628a4c8ef6090e66d6
|
[] |
no_license
|
dusano/skram-si
|
c7e15e65548b2a65701eb25693be997c5cded112
|
8efc8723c16219913137de8ac59ff673880b4a64
|
refs/heads/master
| 2016-09-05T17:02:01.690806
| 2009-10-20T14:02:20
| 2009-10-20T14:02:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 643
|
py
|
#!/usr/bin/env python
from appengine_django import InstallAppengineHelperForDjango
from django.core.management import execute_manager
InstallAppengineHelperForDjango()
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
|
[
"dusano@10.2.10.129"
] |
dusano@10.2.10.129
|
fc66ce7eef4204e29af5b1ed9877349c975bd6e5
|
714ed39eacf82ea38f88049004d5f4a33b36b2e6
|
/app01/admin.py
|
a633ec7062e47583ae9e0bc605857feed3c99012
|
[] |
no_license
|
000ze/blogplus
|
084321edcdafef6699ecc2b5b66022133400e94c
|
4fafedcdf0d4051a31672bfaf57a7975ca58290b
|
refs/heads/master
| 2020-07-06T12:54:49.103609
| 2019-08-18T15:45:11
| 2019-08-18T15:45:11
| 203,024,287
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 237
|
py
|
from django.contrib import admin
# Register your models here.
from app01 import models
admin.site.register(models.UserInfo)
admin.site.register(models.Article)
admin.site.register(models.Blog)
admin.site.register(models.ArticleDetail)
|
[
"1413511414@qq.com"
] |
1413511414@qq.com
|
06fa170ccd418451d8d7b406afe2929d0fbb78fc
|
cb16a721c2c1323fbaa76e97c9e29c5b45cf6cd9
|
/SCRIPTS/spherical_grating_calculator1.py
|
aa485989b1f54baf6ae62f004c3f03563ddcb1be
|
[
"MIT"
] |
permissive
|
JoelBNU/ShadowOui-Tutorial
|
c11a1907dfded9233910aaf0c7993daf492e70dd
|
4629d896c1f02f811d5a1b491f6fca1a7c67a70e
|
refs/heads/master
| 2022-04-19T07:39:16.338099
| 2020-04-16T19:50:57
| 2020-04-16T19:50:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,028
|
py
|
import numpy as np
#
#
#
r = 30.0
theta_deg = 88.2
d0 = 1.0/800
E0 = 1000.0
m = -1
#
# inputs
#
theta = theta_deg * np.pi / 180
print("------------- INPUTS ----------------------")
print("theta = %f deg = %f rad"%(theta_deg,theta))
print("1/d0 = %f lines/mm"%(1/d0))
print("r = %f m"%(r))
print("order = %d m"%(m))
print("photon energy = %f eV"%(E0))
#
# calculations
#
lambda_A = 12398.0/E0
lambda_mm = lambda_A * 1e-7
beta = np.arcsin(m*lambda_mm/2/d0/np.cos(theta)) - theta
alpha = 2*theta + beta
R = r / np.cos(alpha)
rp = R * np.cos(beta)
#
# results
#
print("------------- OUTPUTS ----------------------")
print("Lambda = %f A = %g mm "%(lambda_A,lambda_mm))
print("alpha=%f deg, beta=%f deg"%(alpha*180/np.pi,beta*180/np.pi))
print("R=%f, r=%f, r'=%f"%(R,r,rp))
deltaLambda = d0/m*35e-3/(rp*1e3)*np.cos(beta)
print("estimated Delta Lambda = %f A"%(deltaLambda*1e7))
print("Resolving power = %f "%(lambda_mm/deltaLambda))
print("estimated focal size FWHM = %f um"%(2.35*15*np.cos(alpha)/np.cos(beta)*rp/r))
|
[
"srio@esrf.eu"
] |
srio@esrf.eu
|
3672d80c0bc812c42ce0a7dbdfdb3012e013ef46
|
e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f
|
/indices/nndrift.py
|
cad8168819fddaa2056bd7c0c319f562f787d4d2
|
[] |
no_license
|
psdh/WhatsintheVector
|
e8aabacc054a88b4cb25303548980af9a10c12a8
|
a24168d068d9c69dc7a0fd13f606c080ae82e2a6
|
refs/heads/master
| 2021-01-25T10:34:22.651619
| 2015-09-23T11:54:06
| 2015-09-23T11:54:06
| 42,749,205
| 2
| 3
| null | 2015-09-23T11:54:07
| 2015-09-18T22:06:38
|
Python
|
UTF-8
|
Python
| false
| false
| 1,213
|
py
|
ii = [('EmerRN.py', 1), ('CookGHP3.py', 1), ('LyelCPG2.py', 23), ('CoolWHM2.py', 2), ('GodwWSL2.py', 1), ('RennJIT.py', 1), ('AubePRP2.py', 2), ('MarrFDI3.py', 1), ('BailJD2.py', 1), ('ChalTPW2.py', 3), ('FitzRNS3.py', 6), ('ClarGE2.py', 4), ('CarlTFR.py', 6), ('LyttELD.py', 1), ('AinsWRR3.py', 1), ('KiddJAE.py', 1), ('BailJD1.py', 3), ('CoolWHM.py', 1), ('CrokTPS.py', 2), ('ClarGE.py', 1), ('BuckWGM.py', 32), ('LyelCPG.py', 61), ('GilmCRS.py', 1), ('CrocDNL.py', 3), ('MedwTAI.py', 4), ('FerrSDO2.py', 1), ('NewmJLP.py', 5), ('GodwWLN.py', 1), ('KirbWPW2.py', 1), ('SoutRD2.py', 1), ('BackGNE.py', 132), ('LeakWTI.py', 1), ('MedwTAI2.py', 1), ('BuckWGM2.py', 3), ('MereHHB3.py', 1), ('HowiWRL2.py', 5), ('BailJD3.py', 1), ('WilkJMC.py', 6), ('MartHRW.py', 1), ('FitzRNS4.py', 25), ('CoolWHM3.py', 17), ('FitzRNS.py', 16), ('LyttELD3.py', 1), ('FerrSDO.py', 1), ('RoscTTI.py', 1), ('StorJCC.py', 1), ('LewiMJW.py', 2), ('BellCHM.py', 1), ('AinsWRR2.py', 1), ('ClarGE3.py', 1), ('RogeSIP.py', 2), ('MartHRW2.py', 1), ('FitzRNS2.py', 40), ('DwigTHH.py', 1), ('NortSTC.py', 3), ('BowrJMM2.py', 1), ('LyelCPG3.py', 33), ('BeckWRE.py', 1), ('TaylIF.py', 3), ('ChalTPW.py', 1), ('KirbWPW.py', 1), ('HowiWRL.py', 1)]
|
[
"varunwachaspati@gmail.com"
] |
varunwachaspati@gmail.com
|
f8d1df13a32b152ef6e5c1e221284e1850dc7044
|
5deac722170f2935eded633e817e82c7560df23e
|
/env/bin/pip3.5
|
49101450dc50d9646e32e7f41846c9487837d95f
|
[] |
no_license
|
ajalascuna/midtermlascuna
|
dd3f260d98ba0d2d10ab6aa6e1f986438f660d4f
|
73aee29b62222b7caa66d9595148b162d0203715
|
refs/heads/master
| 2020-04-19T04:13:43.215282
| 2019-01-09T19:18:31
| 2019-01-09T19:18:31
| 167,956,952
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 244
|
5
|
#!/home/ajalascuna/midtermquiz/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"ajalascuna@addu.edu.ph"
] |
ajalascuna@addu.edu.ph
|
c30abdde76f9a5210d02716031d5b3093b7290f9
|
87cfca0f1062fb14633d00c5683a45e9d6e6b4ff
|
/05.Function.py
|
29cc1bf35380872fdc062aded5262bb82b721556
|
[
"Apache-2.0"
] |
permissive
|
r569594043/PythonBeginner
|
2d00604b7165c5d4cad9f5ce27831acfaf09c3f1
|
69b38e3bc99de50530353809c3d501db000314ae
|
refs/heads/master
| 2020-09-12T14:34:48.322057
| 2014-01-15T05:58:29
| 2014-01-15T05:58:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,343
|
py
|
#-*- coding: utf-8 -*-
'''
Functions
See Also: http://docs.python.org/3/tutorial/controlflow.html#defining-functions
http://docs.python.org/3/tutorial/controlflow.html#more-on-defining-functions
'''
# Error
#def func():
def func():
pass
def func(num, num1=1, num2=2):
print(num, num1, num2)
func(1, 3, 4) # 1 3 4
func(5) # 5 1 2
# Error
#func()
def func(**args):
for k, v in args.items():
print('key: ' + k, 'value: ' + v)
for k in args.keys():
print('key: ' + k, 'value: ' + args[k])
func(name = "rxb", age = "24")
func(**{"name": "rxb", "age": "24"})
def func(name, age):
print('name: ' + name, 'age: ' + age)
people = {"name": "rxb", "age": "24"}
func(**people) # name: rxb age: 24
def func(num, *args):
print(num)
for a in args:
print(a)
func(1, 2, 3, 4, 5, 6)
def func(num, num1):
print(num, num1)
func(num1 = 2, num = 1) # 1 2
d = {
"num": 3,
"num1": 4
}
func(**d) # 3 4
t = (4, 5)
func(*t) # 4 5
def func():
'''
The documentation of the func
'''
print("func")
print(func.__doc__)
l = lambda num1, num2: num1 + num2
print(l(2, 3)) # 5
def func2(func, num1, num2):
return func(num1, num2)
def func(num1, num2):
return num1 + num2
print(func2(func, 3, 4)) # 7
print(func2(lambda a, b: a - b, 7, 4)) # 3
|
[
"rxb123b@qq.com"
] |
rxb123b@qq.com
|
8036381aacef7ace854f8b2a4789c058d384c77f
|
788dd0c4d3a72bf259b09130edb9cd8a3c5b5dfd
|
/animation.py
|
e2592f7539f19843cb7957a64c85cadc43f12751
|
[] |
no_license
|
pacmancode/oldcode-python27
|
8c6eb9676420f17655ea20db39872c60a7416c38
|
0ce4ca0c1664663f388c60055108014a6199d471
|
refs/heads/master
| 2020-07-31T01:08:04.663785
| 2019-09-23T19:12:46
| 2019-09-23T19:12:46
| 210,428,272
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,243
|
py
|
class Animation(object):
def __init__(self, name):
self.name = name
self.frames = []
self.col = 0
self.forward = True
self.speed = 0
self.dt = 0
self.finished = False
def addFrame(self, frame):
self.frames.append(frame)
def getFrame(self):
return self.frames[self.col]
def nextFrame(self, dt):
self.dt += dt
if self.dt >= 1.0 / self.speed:
if self.forward:
self.col += 1
else:
self.col -= 1
self.dt = 0
def loop(self, dt):
self.nextFrame(dt)
if self.forward:
if self.col == len(self.frames):
self.col = 0
else:
if self.col == -1:
self.col = len(self.frames) - 1
def onePass(self, dt):
self.nextFrame(dt)
if self.forward:
if self.col == len(self.frames):
self.col = len(self.frames) - 1
self.finished = True
else:
if self.col == -1:
self.col = 0
self.finished = True
def ping(self, dt):
self.nextFrame(dt)
if self.col == len(self.frames):
self.forward = False
self.col -= 2
elif self.col == -1:
self.forward = True
self.col = 1
class AnimationGroup(object):
def __init__(self):
self.animations = []
self.animation = None
self.col = 0
def add(self, animation):
self.animations.append(animation)
def setAnimation(self, name, col):
self.animation = self.getAnimation(name)
self.animation.col = col
def getAnimation(self, name):
for anim in self.animations:
if anim.name == name:
return anim
return None
def getImage(self, frame):
return self.animation.frames[frame]
def loop(self, dt):
self.animation.loop(dt)
return self.animation.getFrame()
def onePass(self, dt):
self.animation.onePass(dt)
return self.animation.getFrame()
def ping(self, dt):
self.animation.ping(dt)
return self.animation.getFrame()
|
[
"jrichards@secureopensolutions.com"
] |
jrichards@secureopensolutions.com
|
1281775fb7304ddf8b2eb0ea08b2564a95409475
|
63935c746e3c34162842f9320a90b58bf7ba0d23
|
/pyFileFinder/__init__.py
|
a0134e944835b1d680fb3126ea082d55d1213369
|
[
"MIT"
] |
permissive
|
20centcroak/pyFileFinder
|
b62b73c4c58a4ddb3bbad1ec5591f68a963135e9
|
bb9499f8ba9f2803e862373ebc97d3bb6c0f4b97
|
refs/heads/main
| 2023-08-10T19:51:17.742819
| 2021-09-28T15:37:31
| 2021-09-28T15:37:31
| 301,203,570
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 38
|
py
|
from pyFileFinder.finder import Finder
|
[
"vpaveau@outook.com"
] |
vpaveau@outook.com
|
7aeb425aab67befbdb125816c194891556d4f865
|
7a86b717b9783ed11813375297b02bd8ccdca1ba
|
/bin/fdh.py
|
9c2f5aa9fd9e7582aded32cd9a6dca34e3c735b7
|
[] |
no_license
|
zaurky/flickr-download-helper
|
c5a16a42e833fce0fc3a37a88e2b6f231193fd35
|
543a00a7a40ba21755e8e17cdc5450d7e9e038b6
|
refs/heads/master
| 2021-01-10T20:24:56.214285
| 2014-05-30T16:04:14
| 2014-05-30T16:04:14
| 1,831,446
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,001
|
py
|
#!/usr/bin/python
"""
flickr_download_helper.py
This small program was developed to help retrieve full bunch of photos from flickr.
To have the description of the parameters, please read flickr_download_helper.config or start the program with the --help flag
"""
import sys
import traceback
import flickr_download_helper
from flickr_download_helper.logger import Logger
from flickr_download_helper.config import OPT
if __name__ == "__main__":
try:
ret, num = flickr_download_helper.main()
sys.exit(ret)
except:
info = sys.exc_info()
if OPT.debug:
try:
Logger().error(info[1])
Logger().print_tb(info[2])
except:
print info
print info[1]
traceback.print_tb(info[2])
else:
try:
Logger().error(info[1])
except:
print info[1]
traceback.print_tb(info[2])
sys.exit(-1)
|
[
"zaurky@zeb.re"
] |
zaurky@zeb.re
|
e4e3961fcf038e77f898d134bfcfaa27cd7b62ed
|
52e4f0785bccebe1f862c59e2c49027c4cfaed4d
|
/049_group_anagrams.py
|
f5ebf57f5d5f64752b5a8e6c8fbc34d01e2efec7
|
[] |
no_license
|
toddbryant/leetcode
|
fab5ee07351dbf6fbdb8c8d68b87489ad94e78ea
|
fb96ab3162f8ee2063b48f4c74d1c1c3b44ed0c3
|
refs/heads/master
| 2022-09-12T01:07:59.553735
| 2022-07-22T20:48:23
| 2022-07-22T20:48:23
| 218,164,688
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 987
|
py
|
"""
Given an array of strings strs, group the anagrams together. You can return the answer in any order.
An Anagram is a word or phrase formed by rearranging the letters of a different word or phrase, typically using all the original letters exactly once.
"""
from typing import List
class Solution:
def groupAnagrams(self, strs: List[str]) -> List[List[str]]:
# Strategy:
# sort each word for its key
# map sorted word --> list of original words
words_by_key = {}
def make_key(word):
counts = [0] * 26
for c in word:
counts[ord(c) - ord('a')] += 1
return tuple(counts)
for word in strs:
key = make_key(word)
# key = ''.join(sorted(word))
if key not in words_by_key:
words_by_key[key] = []
words_by_key[key].append(word)
return [word_list for _, word_list in words_by_key.items()]
|
[
"toddbryant@live.com"
] |
toddbryant@live.com
|
fa070f1b9885fd3f07e93a37204795baecfb1784
|
2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae
|
/python/python_18178.py
|
c0fa06963601f1748528156c7d1517b0475862d4
|
[] |
no_license
|
AK-1121/code_extraction
|
cc812b6832b112e3ffcc2bb7eb4237fd85c88c01
|
5297a4a3aab3bb37efa24a89636935da04a1f8b6
|
refs/heads/master
| 2020-05-23T08:04:11.789141
| 2015-10-22T19:19:40
| 2015-10-22T19:19:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 142
|
py
|
# convert dictionary key and value to unicode
d = {'firstname' : 'Foo', 'lastname' : 'Bar'}
d = {unicode(k):unicode(v) for k,v in d.items() }
|
[
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] |
ubuntu@ip-172-31-7-228.us-west-2.compute.internal
|
66980e80f0d2389c12e9fe0a0102aa543f9e2e3c
|
4f5ecb8e2f57453b0b47dbf97689ed14860df88a
|
/mysite/settings.py
|
4c9acbf551bcc9fd4fefd481593b35dfc64817e4
|
[] |
no_license
|
jonmid/mysite
|
dd438d6adccbaf72f703184dd4f72cb08d7979f6
|
46cabbfb1cfed1a620386db0e63eb9a0c8854e85
|
refs/heads/master
| 2020-03-28T13:20:22.652974
| 2018-09-17T16:38:54
| 2018-09-17T16:38:54
| 148,385,631
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,117
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'j$#u6a^%*_!e7inla_k(ei%w@d)km$d*e*uz9(huncqmt!eveq'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'polls.apps.PollsConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"jamideros@hotmail.com"
] |
jamideros@hotmail.com
|
2027c4c2eed0772be86f773eff59f08150b097fd
|
6a92ef61c9d7f43d9fbc1e78ced7ee9f91d73040
|
/tests/helpers/boring_model.py
|
6ef2518bbef11b52b539fb70c0f389dbdf4ec53a
|
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
kkoutini/pytorch-lightning
|
98af7b144835943f39fa998e0cf072b9bce72e82
|
e98b2d072966536f568df65d2e2c1b8d8f36a327
|
refs/heads/master
| 2021-12-15T01:40:10.085980
| 2021-03-25T13:53:56
| 2021-03-25T13:53:56
| 230,491,888
| 0
| 1
|
Apache-2.0
| 2019-12-27T18:05:24
| 2019-12-27T18:05:24
| null |
UTF-8
|
Python
| false
| false
| 4,924
|
py
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
from torch.utils.data import DataLoader, Dataset, Subset
from pytorch_lightning import LightningDataModule, LightningModule
class RandomDictDataset(Dataset):
def __init__(self, size, length):
self.len = length
self.data = torch.randn(length, size)
def __getitem__(self, index):
a = self.data[index]
b = a + 2
return {'a': a, 'b': b}
def __len__(self):
return self.len
class RandomDictStringDataset(Dataset):
def __init__(self, size, length):
self.len = length
self.data = torch.randn(length, size)
def __getitem__(self, index):
return {"id": str(index), "x": self.data[index]}
def __len__(self):
return self.len
class RandomDataset(Dataset):
def __init__(self, size, length):
self.len = length
self.data = torch.randn(length, size)
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return self.len
class BoringModel(LightningModule):
def __init__(self):
"""
Testing PL Module
Use as follows:
- subclass
- modify the behavior for what you want
class TestModel(BaseTestModel):
def training_step(...):
# do your own thing
or:
model = BaseTestModel()
model.training_epoch_end = None
"""
super().__init__()
self.layer = torch.nn.Linear(32, 2)
def forward(self, x):
return self.layer(x)
def loss(self, batch, prediction):
# An arbitrary loss to have a loss that updates the model weights during `Trainer.fit` calls
return torch.nn.functional.mse_loss(prediction, torch.ones_like(prediction))
def step(self, x):
x = self(x)
out = torch.nn.functional.mse_loss(x, torch.ones_like(x))
return out
def training_step(self, batch, batch_idx):
output = self(batch)
loss = self.loss(batch, output)
return {"loss": loss}
def training_step_end(self, training_step_outputs):
return training_step_outputs
def training_epoch_end(self, outputs) -> None:
torch.stack([x["loss"] for x in outputs]).mean()
def validation_step(self, batch, batch_idx):
output = self(batch)
loss = self.loss(batch, output)
return {"x": loss}
def validation_epoch_end(self, outputs) -> None:
torch.stack([x['x'] for x in outputs]).mean()
def test_step(self, batch, batch_idx):
output = self(batch)
loss = self.loss(batch, output)
return {"y": loss}
def test_epoch_end(self, outputs) -> None:
torch.stack([x["y"] for x in outputs]).mean()
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
return [optimizer], [lr_scheduler]
def train_dataloader(self):
return DataLoader(RandomDataset(32, 64))
def val_dataloader(self):
return DataLoader(RandomDataset(32, 64))
def test_dataloader(self):
return DataLoader(RandomDataset(32, 64))
class BoringDataModule(LightningDataModule):
def __init__(self, data_dir: str = './'):
super().__init__()
self.data_dir = data_dir
self.non_picklable = None
self.checkpoint_state: Optional[str] = None
def prepare_data(self):
self.random_full = RandomDataset(32, 192)
def setup(self, stage: Optional[str] = None):
if stage == "fit" or stage is None:
self.random_train = Subset(self.random_full, indices=range(64))
self.dims = self.random_train[0].shape
if stage in ("fit", "validate") or stage is None:
self.random_val = Subset(self.random_full, indices=range(64, 128))
if stage == "test" or stage is None:
self.random_test = Subset(self.random_full, indices=range(128, 192))
self.dims = getattr(self, "dims", self.random_test[0].shape)
def train_dataloader(self):
return DataLoader(self.random_train)
def val_dataloader(self):
return DataLoader(self.random_val)
def test_dataloader(self):
return DataLoader(self.random_test)
|
[
"noreply@github.com"
] |
kkoutini.noreply@github.com
|
30500571d05223db576fdd6001f47f192e21d484
|
11b9c2c8320dc2116f714835764c3c8d8586da93
|
/tools/tundra2-txml-converter.py
|
708fba2e05f97561308b0a419f69994eafc50cab
|
[
"Apache-2.0"
] |
permissive
|
Ilikia/naali
|
42e57a3ac01469ea66ace11e9020cfc9b824b8ea
|
9a2d06fb6b00cc5b5645794e9a5d4beab61bf56f
|
refs/heads/master
| 2021-01-15T16:11:08.557582
| 2011-08-22T07:37:01
| 2011-08-22T07:37:01
| 2,153,258
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,026
|
py
|
import sys
import os
"""
For this script to work you need the jack.mesh from /media/models in /data/assets
You will have to manually put your txml file name down in the __main__ func to:
--> fileName = "putFileNameHere.txml" <--
1. Run this script on your tundra 1.x txml
2. This will create a new file with "_" appended before the .txml extension.
3. Open this file in your tundra 2.x server
4. Find the "Jack" entity, he will be lurking at pos 0,0,0.
5. Shift+E for entity editor, select jack, if you dont see the manip visual aids, hit "tilde" (next to "1" key)
6. Open Placeable component and rotate how you like, the whole scene will rotate with jack. Seems that -90 to x will do the trick.
7. Shitf+S for scene editor, right click -> save scene as...
Problems with this technique
* You cannot remove or edit the grandparent jacks placeable component
scale needs to be 1,1,1 or it will scale the whole scene (but might be handy if you want to do this)
* Jack grandparent needs to be there always or the scene will flip back.
There is also some experimental placeable Transform manipulations that you can do to the txml, but it wont work
propably on any scene as everything needs a common pivot point for the rotation.
So this is a temp hack. Proper way is to export your modelfs from eg. blender with the correct axis flip built in
and import again.
"""
def getFileContent(filePath):
try:
f = open(filePath, 'r')
c = f.read()
f.close()
return c
except IOError as e:
print "IOError on input file:", filePath
print e
return None
def saveNewContent(filePath, newContent):
try:
f = open(filePath, "w+")
f.write(newContent)
f.close()
except IOError as e:
print "IOError on writing to file:", filePath
print e
class Transform:
def __init__(self, value):
splitValue = value.split(",")
self.pos = {}
self.pos["x"] = splitValue[0]
self.pos["y"] = splitValue[1]
self.pos["z"] = splitValue[2]
self.rot = {}
self.rot["x"] = splitValue[3]
self.rot["y"] = splitValue[4]
self.rot["z"] = splitValue[5]
self.scale = {}
self.scale["x"] = splitValue[6]
self.scale["y"] = splitValue[7]
self.scale["z"] = splitValue[8]
def flip(self, vec, first, second):
temp = vec[first]
vec[first] = vec[second]
vec[second] = temp
def rotate(self, vec, axis, deg):
curDeg = float(vec[axis])
curDeg += deg
vec[axis] = str(curDeg)
def getNewValue(self):
line = self.pos["x"] + "," + self.pos["y"] + "," + self.pos["z"]
line += "," + self.rot["x"] + "," + self.rot["y"] + "," + self.rot["z"]
line += "," + self.scale["x"] + "," + self.scale["y"] + "," + self.scale["z"]
return line
if __name__ == "__main__":
fileName = "putFileNameHere.txml"
newFileName = fileName[:fileName.index(".txml")] + "_.txml"
c = getFileContent(fileName)
lines = c.splitlines(True)
parentName = "GeneratedGrandParentEntity"
parentEntXml = """ <entity id="1">
<component type="EC_Mesh" sync="1">
<attribute value="0,0,0,0,0,0,1,1,1" name="Transform"/>
<attribute value="local://Jack.mesh" name="Mesh ref"/>
<attribute value="" name="Skeleton ref"/>
<attribute value="" name="Mesh materials"/>
<attribute value="0" name="Draw distance"/>
<attribute value="false" name="Cast shadows"/>
</component>
<component type="EC_Placeable" sync="1">
<attribute value="0,0,-20,0,0,0,1,1,1" name="Transform"/>
<attribute value="false" name="Show bounding box"/>
<attribute value="true" name="Visible"/>
<attribute value="1" name="Selection layer"/>
<attribute value="" name="Parent entity ref"/>
<attribute value="" name="Parent bone name"/>
</component>
<component type="EC_Name" sync="1">
<attribute value="GeneratedGrandParentEntity" name="name"/>
<attribute value="" name="description"/>
</component>
</entity>
"""
out = ""
totalIndex = 0
expectParentAttr = False
for line in lines:
totalIndex += len(line)
if line.count("<scene>") > 0:
out += line
out += parentEntXml
continue
if line.count("component type=\"EC_Placeable\"") > 0:
out += line
compEnd = c.find("</component>", totalIndex)
iPlaceableEnd = c.find("name=\"Parent entity ref\"", totalIndex, compEnd)
# Found existing, update
if iPlaceableEnd > 0:
expectParentAttr = True
# did not find, generate
else:
out += " <attribute value=\"" + parentName + "\" name=\"Parent entity ref\"/>\n"
elif expectParentAttr:
if line.count("name=\"Parent entity ref\"") > 0:
expectParentAttr = False
start = line.find("\"")
end = line.find("\"", start+1)
value = line[start+1:end]
if value == "":
out += " <attribute value=\"" + parentName + "\" name=\"Parent entity ref\"/>\n"
else:
newLine = line[:start+1] + parentName + line[end:]
out += newLine
else:
out += line
else:
out += line
"""
if line.count("name=\"Transform\"") <= 0:
out += line
continue
start = line.find("\"")
if start == -1:
out += line
continue
end = line.find("\"", start+1)
value = line[start+1:end]
t = Transform(value)
t.flip(t.rot, "y", "z")
newValue = t.getNewValue()
out += line.replace(value, newValue)
"""
saveNewContent(newFileName, out)
|
[
"jonne.nauha@evocativi.com"
] |
jonne.nauha@evocativi.com
|
c340f4cdf3f2b24c8b031f33a536adccafda34b9
|
d0666366463ec75f12b8516f2aa3cb97adec2f8d
|
/python_codes/leetcode/weakest_row.py
|
b5be6af522f54ed019cf4e6f3702e974c45242ea
|
[] |
no_license
|
RamSinha/MyCode_Practices
|
f45fcd447dc5efd456da5cedee3f3d8a843d2a57
|
403a6130ee791927dfdc2f2a46c1a112271bccb0
|
refs/heads/master
| 2022-12-12T11:13:03.073748
| 2022-05-06T09:35:01
| 2022-05-06T09:35:01
| 20,994,924
| 0
| 1
| null | 2022-12-06T00:03:46
| 2014-06-19T09:27:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,097
|
py
|
#!/usr/bin/python
def kWeakestRows(mat, k):
# Note that there is a more conscise solution just below. This code
# avoids the use of advanced language features.
m = len(mat)
n = len(mat[0])
# Calculate the strength of each row.
strengths = []
for i, row in enumerate(mat):
strength = 0
for j in range(n):
if row[j] == 0: break
strength += 1
strengths.append((strength, i))
# Sort all the strengths. This will sort firstly by strength
# and secondly by index.
strengths.sort()
# Pull out and return the indexes of the smallest k entries.
indexes = []
for i in range(k):
indexes.append(strengths[i][1])
return indexes
if __name__ == '__main__':
row = int(raw_input("enter number of rows "))
col = int(raw_input("enter number of columns "))
rows = []*row
for i in range(0, row):
rowvalues = map(lambda x : int(x), raw_input("enter values for rows {i}".format(i = i)).split(","))
rows.append(rowvalues)
print kWeakestRows(rows, 2)
|
[
"ram.sinha@careem.com"
] |
ram.sinha@careem.com
|
11a2b132db710a54b271d93c567961b45820f1d5
|
fd3193cd703656ea7f553209f69b366499f432fb
|
/NeuralNet/tf_SGD.py
|
eb09da3d0fc76a818fe4c88e9e1ef02156a466d7
|
[] |
no_license
|
mlomnitz/TensorFlow
|
812bf19549edad6e6b98ae00dff7830dd36e48fa
|
fb8fcb7c7523a07351c9ff9942ef146f9a576e09
|
refs/heads/master
| 2020-04-05T12:32:30.548507
| 2017-06-30T17:53:29
| 2017-06-30T17:53:29
| 95,128,615
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,174
|
py
|
# *******************************************************************************
# **
# ** Author: Michael Lomnitz (mrlomnitz@lbl.gov)
# ** Python module defining stochastic gradient descent model for use in
# ** tensorflow classifier
# **
# *******************************************************************************
# Import relevant modules
import numpy as np
import tensorflow as tf
class SGD(object):
def __init__(self,image_size,num_labels):
self.weights = tf.Variable(
tf.truncated_normal([image_size * image_size, num_labels]))
self.biases = tf.Variable(tf.zeros([num_labels]))
#
def train(self,x, y):
logits = tf.matmul(x, self.weights) + self.biases
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=logits))
# Optimizer.
# We are going to find the minimum of this loss using gradient descent.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
return logits, loss, optimizer
def predict(self,x):
return tf.nn.softmax( tf.matmul(x,self.weights) + self.biases)
|
[
"michaellomnitz@Michaels-MacBook-Pro.local"
] |
michaellomnitz@Michaels-MacBook-Pro.local
|
494754f3bba4e0f79b281e6380671c97500bbc39
|
ffab2010f61aa362a9adb98c3fe47a3d7bd6dd09
|
/cartridge/shop/management/commands/product_db.py
|
aaf2ae0a9f03fbf95c89b4485844a664abfa7394
|
[] |
no_license
|
aleksey-zhigulin/fireplace_shop_mezzanine_and_cartridge
|
d8a9e2b986857cc45da1a583b34009a6d0bfed7e
|
2d33e2cf31169f6e095418a31835f63b2b439025
|
refs/heads/master
| 2020-05-25T09:52:28.841786
| 2014-10-20T13:23:47
| 2014-10-20T13:23:47
| 19,139,936
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,908
|
py
|
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
import unicodecsv as csv
import xlrd, xlwt
import os
import shutil
import sys
import datetime
import random
from optparse import make_option
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
from django.db.models.fields import FieldDoesNotExist
from django.utils.translation import ugettext as _
from django.db.utils import IntegrityError
from mezzanine.conf import settings
from cartridge.shop.models import Product
from cartridge.shop.models import ProductOption
from cartridge.shop.models import ProductImage
from cartridge.shop.models import ProductVariation
from cartridge.shop.models import ProductTopka
from cartridge.shop.models import Category
from mezzanine.core.models import CONTENT_STATUS_PUBLISHED
# images get copied from this directory
LOCAL_IMAGE_DIR = settings.PROJECT_ROOT + "/img"
# images get copied to this directory under STATIC_ROOT
IMAGE_SUFFIXES = [".jpg", ".JPG", ".jpeg", ".JPEG", ".tif", ".gif", ".GIF", ".png", ".PNG"]
EMPTY_IMAGE_ENTRIES = ["Please add", "N/A", ""]
DATE_FORMAT = "%Y-%m-%d"
TIME_FORMAT = "%H:%M"
PRODUCT_TYPE = "ProductTopka"
IMAGE = "Изображения"
SITE_MEDIA_IMAGE_DIR = _("product")
PRODUCT_IMAGE_DIR = os.path.join(settings.MEDIA_ROOT, SITE_MEDIA_IMAGE_DIR)
TYPE_CHOICES = {choice:id for id, choice in settings.SHOP_OPTION_TYPE_CHOICES}
# TODO: Make sure no options conflict with other fieldnames.
fieldnames = TYPE_CHOICES.keys()
class Command(BaseCommand):
args = '--import/--export <csv_file>'
help = _('Import/Export products from a csv file.')
option_list = BaseCommand.option_list + (
make_option('--import-xls',
action='store_true',
dest='import-xls',
default=False,
help=_('Import products from xls file.')),
make_option('--export-xls',
action='store_true',
dest='export-xls',
default=False,
help=_('Export products to xls file.')),
make_option('--export-csv',
action='store_true',
dest='export-csv',
default=False,
help=_('Export products to csv file.')),
make_option('--import-csv',
action='store_true',
dest='import-csv',
default=False,
help=_('Import products from csv file.')),
)
def handle(self, *args, **options):
if sys.version_info[0] == 3:
raise CommandError("Python 3 not supported")
try:
file = args[0]
except IndexError:
raise CommandError(_("Please provide csv or xls file to import"))
if options['import-csv']:
import_csv(file)
elif options['export-csv']:
export_products(file)
elif options['import-xls']:
import_xls(file)
elif options['export-xls']:
export_xls(file)
def _product_from_row(row, value):
# TODO: title
product, created = eval("%s.objects.get_or_create(title='%s')" % (PRODUCT_TYPE, value('title')))
product.content = value('content')
# product.description = value('description')
# TODO: set the 2 below from spreadsheet.
product.status = CONTENT_STATUS_PUBLISHED
product.available = True
extra_fields = [(f.name, eval("%s._meta.get_field('%s').verbose_name.title()" % (PRODUCT_TYPE, f.name)))
for f in product._meta.fields if f not in Product._meta.fields]
for name, verbose in extra_fields:
if name != 'product_ptr':
exec "product.%s = value('%s')" % (name, name)
for category in row['Категория'].split(","):
parent_category, created = Category.objects.get_or_create(title=category.split(" / ")[0])
for sub_category in category.split(" / ")[1:]:
cat, created = Category.objects.get_or_create(title=sub_category, parent=parent_category)
parent_category = cat
product.categories.add(parent_category)
return product
def _make_image(image_str, product):
# if image_str in EMPTY_IMAGE_ENTRIES:
# return None
# root, suffix = os.path.splitext(image_str)
# if suffix not in IMAGE_SUFFIXES:
# raise CommandError("INCORRECT SUFFIX: %s" % image_str)
# image_path = os.path.join(LOCAL_IMAGE_DIR, image_str)
# if not os.path.exists(image_path):
# raise CommandError("NO FILE %s" % image_path)
# shutil.copy(image_path, PRODUCT_IMAGE_DIR)
image, created = ProductImage.objects.get_or_create(
file="%s" % (os.path.join(SITE_MEDIA_IMAGE_DIR, image_str)),
description=image_str, # TODO: handle column for this.
product=product)
return image
def import_xls(xls_file):
if settings.DEBUG:
while Category.objects.count():
ids = Category.objects.values_list('pk', flat=True)[:100]
Category.objects.filter(pk__in = ids).delete()
while Product.objects.count():
ids = Product.objects.values_list('pk', flat=True)[:100]
Product.objects.filter(pk__in = ids).delete()
while ProductVariation.objects.count():
ids = ProductVariation.objects.values_list('pk', flat=True)[:100]
ProductVariation.objects.filter(pk__in = ids).delete()
while ProductImage.objects.count():
ids = ProductImage.objects.values_list('pk', flat=True)[:100]
ProductImage.objects.filter(pk__in = ids).delete()
while ProductOption.objects.count():
ids = ProductOption.objects.values_list('pk', flat=True)[:100]
ProductOption.objects.filter(pk__in = ids).delete()
eval("%s.objects.all().delete()" % PRODUCT_TYPE)
print(_("Importing .."))
for sheet in xlrd.open_workbook(xls_file).sheets():
for row_index in range(1, sheet.nrows):
row = {k: v for k, v in zip(
(sheet.cell(0, col_index).value for col_index in xrange(sheet.ncols)),
(sheet.cell(row_index, col_index).value for col_index in xrange(sheet.ncols))
)}
value = lambda s: row[eval("%s._meta.get_field('%s').verbose_name.title()" % (PRODUCT_TYPE, s))]
product = _product_from_row(row, value)
variation = ProductVariation.objects.create(
product=product,
)
variation.num_in_stock = 1000
if value('currency'):
variation.currency = value('currency')
if value('unit_price'):
variation.unit_price = value('unit_price')
for option in TYPE_CHOICES:
if row[option]:
name = "option%s" % TYPE_CHOICES[option]
setattr(variation, name, row[option])
new_option, created = ProductOption.objects.get_or_create(
type=TYPE_CHOICES[option],
name=row[option]
)
variation.save()
image = ''
for img in row[IMAGE].split(','):
try:
image = _make_image(img.strip()+'.jpg', product)
except CommandError:
print("CommandError: %s" % row[IMAGE])
if image:
variation.image = image
try:
product.variations.manage_empty()
product.variations.set_default_images([])
product.copy_default_variation()
product.save()
except IndexError:
print(value('title'))
print("Variations: %s" % ProductVariation.objects.all().count())
print("Products: %s" % eval("%s.objects.all().count()" % PRODUCT_TYPE))
# def export_xls(xls_file):
# print(_("Exporting .."))
# xls = xlwt.Workbook(encoding='utf-8')
# xls_sheet = xls.add_sheet('1')
#
# for field in fieldnames:
# xls_sheet.write(0, COLUMN[field], field)
# for row_index, pv in enumerate(ProductVariation.objects.all(), start=1):
# xls_sheet.write(row_index, COLUMN[TITLE], pv.product.title)
# xls_sheet.write(row_index, COLUMN[CONTENT], pv.product.content.strip('<p>').strip('</p>'))
# xls_sheet.write(row_index, COLUMN[DESCRIPTION], pv.product.description)
# xls_sheet.write(row_index, COLUMN[SKU], pv.sku)
# xls_sheet.write(row_index, COLUMN[IMAGE], unicode(pv.image))
# xls_sheet.write(row_index, COLUMN[CATEGORY] , max([unicode(i) for i in pv.product.categories.all()]))
#
# for option in TYPE_CHOICES:
# xls_sheet.write(row_index, COLUMN[option], getattr(pv, "option%s" % TYPE_CHOICES[option]))
#
# xls_sheet.write(row_index, COLUMN[NUM_IN_STOCK], pv.num_in_stock)
# xls_sheet.write(row_index, COLUMN[UNIT_PRICE], pv.unit_price)
# xls_sheet.write(row_index, COLUMN[SALE_PRICE], pv.sale_price)
# try:
# xls_sheet.write(row_index, COLUMN[SALE_START_DATE], pv.sale_from.strftime(DATE_FORMAT))
# xls_sheet.write(row_index, COLUMN[SALE_START_TIME], pv.sale_from.strftime(TIME_FORMAT))
# except AttributeError:
# pass
# try:
# xls_sheet.write(row_index, COLUMN[SALE_END_DATE], pv.sale_to.strftime(DATE_FORMAT))
# xls_sheet.write(row_index, COLUMN[SALE_END_TIME], pv.sale_to.strftime(TIME_FORMAT))
# except AttributeError:
# pass
# xls.save(xls_file)
#
# def export_csv(csv_file):
# print(_("Exporting .."))
# filehandle = open(csv_file, 'w')
# writer = csv.DictWriter(filehandle, delimiter=';', encoding='cp1251', fieldnames=fieldnames)
# headers = dict()
# for field in fieldnames:
# headers[field] = field
# writer.writerow(headers)
# for pv in ProductVariation.objects.all():
# row = dict()
# row[TITLE] = pv.product.title
# row[CONTENT] = pv.product.content.strip('<p>').strip('</p>')
# row[DESCRIPTION] = pv.product.description
# row[SKU] = pv.sku
# row[IMAGE] = pv.image
# row[CATEGORY] = ','.join([unicode(i) for i in pv.product.categories.all()])
#
# for option in TYPE_CHOICES:
# row[option] = getattr(pv, "option%s" % TYPE_CHOICES[option])
#
# row[NUM_IN_STOCK] = pv.num_in_stock
# row[UNIT_PRICE] = pv.unit_price
# row[SALE_PRICE] = pv.sale_price
# try:
# row[SALE_START_DATE] = pv.sale_from.strftime(DATE_FORMAT)
# row[SALE_START_TIME] = pv.sale_from.strftime(TIME_FORMAT)
# except AttributeError:
# pass
# try:
# row[SALE_END_DATE] = pv.sale_to.strftime(DATE_FORMAT)
# row[SALE_END_TIME] = pv.sale_to.strftime(TIME_FORMAT)
# except AttributeError:
# pass
# writer.writerow(row)
# filehandle.close()
#
|
[
"a.a.zhigulin@yandex.ru"
] |
a.a.zhigulin@yandex.ru
|
6a25d4d6d328e9c9e20a919af3c29624807e564e
|
31fb7c74b94e46a325e6b05501c6972a401cf423
|
/PYTHON/BASIC_PYTHON/수업내용/06/06-022.py
|
1acb02ba0306bc46a2fb34757a430458c72b4e81
|
[] |
no_license
|
superf2t/TIL
|
f2dacc30d6b89f3717c0190ac449730ef341f6a4
|
cadaaf952c44474bed9b8af71e70754f3dbf86fa
|
refs/heads/master
| 2022-04-10T13:55:24.019310
| 2019-12-12T11:15:31
| 2019-12-12T11:15:31
| 268,215,746
| 1
| 0
| null | 2020-05-31T05:32:46
| 2020-05-31T05:32:46
| null |
UTF-8
|
Python
| false
| false
| 211
|
py
|
#06-022.py
import re
pattern = re.compile(r'[a-z]{2}')
ret = pattern.search('123abc123')
#ret = pattern.search('abcXde')
if ret:
print('Matched : ' + ret.group())
else:
print('NOT matched')
|
[
"noreply@github.com"
] |
superf2t.noreply@github.com
|
a488e2a0e9def915d59b71868f6f2957a4f6ebf6
|
5a9159cba858b007ec8946948e6badd0234fe429
|
/aocday23/aoc23a.py
|
dde2266c37bc3c01eb9e3a82418d9c5ec5a28cf4
|
[] |
no_license
|
chakradhar123/aoc2020
|
b9c7a7b6497a508603d132046a729d167ab8dc1e
|
c6dcd2db9b51e92e5453728a069817346d05d9df
|
refs/heads/main
| 2023-02-04T19:18:48.273202
| 2020-12-25T06:04:11
| 2020-12-25T06:04:11
| 317,446,950
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 791
|
py
|
s=[int(x) for x in list(input())]
moves=0
i=0
n=len(s)
m=max(s)
while moves!=100:
curr=s[i]
temparr=[s[(i+1)%n],s[(i+2)%n],s[(i+3)%n]]
p=i
s.pop((p+1)%len(s))
p=s.index(curr)
s.pop((p+1)%len(s))
p=s.index(curr)
s.pop((p+1)%len(s))
tempcurr=curr
while(True):
curr-=1
if(curr==0):
curr=m
if curr in s and curr not in temparr:
pos=s.index(curr)
s.insert((pos+1),temparr[2])
pos=s.index(curr)
s.insert((pos+1),temparr[1])
pos=s.index(curr)
s.insert((pos+1),temparr[0])
break
i=(s.index(tempcurr)+1)%len(s)
moves+=1
pos1=s.index(1)
print(''.join([str(x) for x in s[pos1+1:]])+''.join([str(x) for x in s[:pos1]]))
|
[
"chakradharvasurama@gmail.com"
] |
chakradharvasurama@gmail.com
|
11de8e05cca61b6ffd595bf66f7fe24c18151278
|
ea3f25d71d2bc15674f1222a7948764775b5d2e6
|
/lambada/tests/common.py
|
1fdeb4695102c0c0c956d3820cf748fdf73d704f
|
[
"Apache-2.0"
] |
permissive
|
Superpedestrian/lambada
|
bb671ffd8ed5e111e6a0a39b41df3ee658046eb9
|
adc4fad618f8e5383ca5cd9122e42f89079550f9
|
refs/heads/master
| 2021-07-12T02:45:43.257226
| 2016-10-24T14:55:22
| 2016-10-24T14:55:22
| 69,257,822
| 6
| 2
|
Apache-2.0
| 2021-03-25T21:39:47
| 2016-09-26T14:20:11
|
Python
|
UTF-8
|
Python
| false
| false
| 505
|
py
|
"""
Common test functions used across test files.
"""
import os
def make_fixture_path(folder, filename='lambda.py'):
"""
Make path to fixture using given args.
folder (str): Folder name that contains fixture(s)
filename (str): Filename to pass, if ``None`` will return folder
"""
path = os.path.abspath(
os.path.join(
os.path.dirname(__file__), 'fixtures', folder
)
)
if filename:
path = os.path.join(path, filename)
return path
|
[
"x@carsongee.com"
] |
x@carsongee.com
|
b29bd9952d1e42eb245ca6d98e7ca2b04729ec47
|
86e8fa4b5b3ef494c32efc8d9f92c27247317860
|
/Backspace String Compare-optimal.py
|
c81facf43f754a3b5dc5f149dc81a4adaa0febe7
|
[] |
no_license
|
hemeshwarkonduru/leetcode
|
21135b7585c6bbaf25351d4e8edaacdd5a8c1699
|
a8afa93ffb6f8e788ef5f9711e5dd2648c363043
|
refs/heads/master
| 2022-11-04T21:38:27.685658
| 2020-08-03T16:41:11
| 2020-08-03T16:41:11
| 284,752,691
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,000
|
py
|
class Solution:
def backspaceCompare(self, S: str, T: str) -> bool:
i=len(S)-1
j=len(T)-1
skip1=skip2=0
while(i>-1 or j>-1):
c=S[i] if i>=0 else ""
c1=T[j] if j>=0 else ""
if(c=='#'):
skip1+=1
i-=1
continue
if(c1=='#'):
skip2+=1
j-=1
continue
if(skip1>0):
skip1-=1
i-=1
continue
if(skip2>0):
skip2-=1
j-=1
continue
if(c!=c1):
return False
i-=1
j-=1
return (i<0 and j<0)
'''
debug the code properly this is called two pointer approach
skip variable is used to keep track of #
'''
|
[
"noreply@github.com"
] |
hemeshwarkonduru.noreply@github.com
|
1002d041306ded41191f4bc17fe4371066c97883
|
21a98cb39b51607fa150459d6e2afc79c2818cf0
|
/python_practice/class_python/python_oo.py
|
d4867c7807572c3ee6046c300f39354b022a6472
|
[] |
no_license
|
z1069867141/hogwarts_lg
|
bc038b39d688ce99357d24ed41fe05a63db06bfa
|
faf530b4a81e5c6aae0cf97628b085be708b913f
|
refs/heads/master
| 2023-05-28T20:17:23.864529
| 2021-06-16T02:33:05
| 2021-06-16T02:33:05
| 285,639,680
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 977
|
py
|
# 面向对象
class House:
# 静态属性 -> 变量, 类变量, 在类之中, 方法之外
door = "red"
floor = "white"
# 构造函数,是在类实例化的时候直接执行
def __init__(self):
# 实例变量,是在类实例化的时候直接执行,以“self.变量名的方式去定义”,实例变量的作用域为这个类中的所有方法
self.door
self.yangtai = "大"
# 动态属性 -> 方法(函数)
def sleep(self):
# 普通变量,再类之中、方法之中,并且不会以self.开头
print('房子是用来睡觉的')
def cook(self):
print("房子可以做饭吃")
# 实例化 -> 变量 = 类()
north_house = House()
china_house = House()
# 调用类变量
print(House.door)
House.door = "white"
north_house.door = "black"
print(north_house.door)
#图纸的door 是什么颜色
print(House.door)
# china_house.door 是什么颜色
print(china_house.door)
|
[
"919824370@qq.com"
] |
919824370@qq.com
|
21f724acdad82167ac50071d25b664cab8ce963a
|
adb4f695d8c392c62e005dda67a41dd5ab1fcb6f
|
/subida/tree/preprocessing.py
|
865e4d21d2f0e571e379fe9ad64fe6173d3fcbe3
|
[] |
no_license
|
luisbalru/PumpItUp-PC
|
93e3549856c48a7afc89c324ff6dbc30f5ee9d03
|
381d6d1802407e2db9f3d9c67ce6809443ac99d4
|
refs/heads/master
| 2022-04-05T13:55:53.660532
| 2020-02-18T17:33:11
| 2020-02-18T17:33:11
| 234,961,917
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,804
|
py
|
import numpy as np
import pandas as pd
## Data loading
train_dataset = pd.read_csv("../../data/training.csv")
train_labels = pd.read_csv("../../data/training-labels.csv")
test_dataset = pd.read_csv("../../data/test.csv")
## Conversion of date recorded to date data type
train_dataset['date_recorded'] = pd.to_datetime(train_dataset['date_recorded'])
test_dataset['date_recorded'] = pd.to_datetime(test_dataset['date_recorded'])
## Extraction of year and month of recording
train_dataset['year_recorded'] = train_dataset['date_recorded'].map(
lambda x: x.year
)
test_dataset['year_recorded'] = test_dataset['date_recorded'].map(
lambda x: x.year
)
train_dataset['month_recorded'] = train_dataset['date_recorded'].map(
lambda x: x.month
)
test_dataset['month_recorded'] = test_dataset['date_recorded'].map(
lambda x: x.month
)
## Selection of categorical vars (non numeric)
categorical_vars = train_dataset.select_dtypes(exclude=np.number)
## FEATURE ELIMINATION
## Variables selected a priori to be deleted
variables_to_drop = [
"scheme_name",
"recorded_by",
"region_code",
'amount_tsh',
'num_private'
]
## If a column has more than 100 different categories, it is discarded
for col in categorical_vars.columns:
if len(train_dataset[col].unique()) > 100:
variables_to_drop.append(col)
## Variable dropping
train_dataset.drop(columns=variables_to_drop, inplace=True)
test_dataset.drop(columns=variables_to_drop, inplace=True)
## MISSING VALUES IMPUTATION
## If the column is numeric, imputation with mean
## If the column is nominal, imputation with mode
fill_values = {}
for col in train_dataset.columns:
if np.issubdtype(train_dataset[col].dtype, np.number):
fill_val = np.mean(train_dataset[col])
else:
fill_val = train_dataset[col].mode()[0]
fill_values[col] = fill_val
train_dataset = train_dataset.fillna(value=fill_values)
test_dataset = test_dataset.fillna(value=fill_values)
## Imputation by class for construction year
train_dataset = pd.merge(train_dataset, train_labels)
## Mean of values greater than 0
fill_1 = np.mean(train_dataset.loc[
(train_dataset['construction_year'] > 0) &
(train_dataset['status_group'] == "functional"),
"construction_year"
])
fill_2 = np.mean(train_dataset.loc[
(train_dataset['construction_year'] > 0) &
(train_dataset['status_group'] == "non functional"),
"construction_year"
])
fill_3 = np.mean(train_dataset.loc[
(train_dataset['construction_year'] > 0) &
(train_dataset['status_group'] == "functional needs repair"),
"construction_year"
])
## Substitution of zeroes with the mean value
train_dataset.loc[
(train_dataset['construction_year'] == 0) &
(train_dataset['status_group'] == "functional"),
"construction_year"
] = fill_1
train_dataset.loc[
(train_dataset['construction_year'] == 0) &
(train_dataset['status_group'] == "non functional"),
"construction_year"
] = fill_2
train_dataset.loc[
(train_dataset['construction_year'] == 0) &
(train_dataset['status_group'] == "functional needs repair"),
"construction_year"
] = fill_3
## Precomputed values for test construction year with a trained model
test_construction_year = pd.read_csv("construction_year_test.csv")
test_dataset.loc[
test_dataset['construction_year'] == 0, 'construction_year'
] = test_construction_year['construction_year']
## Calculation of fountain age from year recorded and construction year
train_dataset['age'] = train_dataset['year_recorded'] - train_dataset[
'construction_year'
]
test_dataset['age'] = test_dataset['year_recorded'] - test_dataset[
'construction_year'
]
## Storing of data for training
train_dataset.to_csv("train-preprocessed.csv")
test_dataset.to_csv("test-preprocessed.csv")
|
[
"fluque1995@gmail.com"
] |
fluque1995@gmail.com
|
48a38fed26e0f936067d8ea8bc0bea6ac00ca437
|
e8e7438518680fd0db80cb1c467c745c8db740f2
|
/portCheck.py
|
7f793845bd60f5c531b15d9d8df06c42e36a341e
|
[] |
no_license
|
joerod/python
|
ef5c5184c4acd673e409fded329a4647875d1162
|
d8218338d43abdb6f6f552e234030aaeb93cdcac
|
refs/heads/master
| 2022-06-21T08:05:43.074238
| 2022-05-23T02:24:49
| 2022-05-23T02:24:49
| 23,096,216
| 1
| 1
| null | 2020-12-23T23:53:41
| 2014-08-19T03:58:53
|
Python
|
UTF-8
|
Python
| false
| false
| 663
|
py
|
import socket
import argparse
parser = argparse.ArgumentParser(
description='Python command line tool to check for open ports on a host',
epilog="python portCheck.py --host 192.168.1.221 --port 3389"
)
parser.add_argument("--host", required=True, help="IP of machine running to check")
parser.add_argument("--port", required=True, type=int, help="Port of machine to check")
args = parser.parse_args()
a_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result_of_check = a_socket.connect_ex((args.host,args.port))
if result_of_check == 0:
print(f"Port {args.port} is open")
else:
print(f"Port {args.port} is not open")
a_socket.close()
|
[
"noreply@github.com"
] |
joerod.noreply@github.com
|
d6842d9077daa9def21f14f9375efd1e66cb673a
|
69ad085dc6bab4d48c4db336ccc2dee8589143b1
|
/predicting_stock_price/predicted_vs_reality.py
|
40610589059decb5736c29386bba16a1ee205e71
|
[] |
no_license
|
razisamuely/predict
|
9e1e88885aedcc1393fa49ac8b258f4f5ca66b9d
|
65e1a22bf62d1c5360d62849486e0b6d67b38ffc
|
refs/heads/main
| 2023-03-22T10:44:27.983779
| 2021-02-23T15:50:56
| 2021-02-23T15:50:56
| 305,181,446
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,264
|
py
|
# %%
from datetime import date, timedelta, datetime
from feature_engineering import featureEng
from data_generation import get_data
from train import Train, ClfTrain
import pandas as pd
import numpy as np
pd.set_option('display.max_rows', 500)
import pickle
from predict import Predict
from general_functions import next_week_full_train_behavior, weekly_correlations_to_csv
import matplotlib.pyplot as plt
import copy
import time
from general_functions import weekly_correlations_to_csv
import json
import seaborn as sns
sns.set_style("darkgrid")
# Read configs
configs = json.loads(open('configs.json', 'r').read())
for k, v in configs.items():
exec(f"{k} = '{v}'") if type(v) == str else exec(f"{k} = {v}")
NAME = configs["NAME"]
random_state_plot = configs["random_state_plot"]
test_size = configs["test_size"]
years_back = configs["years_back"]
threshold_days_in_week = configs["threshold_days_in_week"]
min_percentage_data = configs["min_percentage_data"]
min_days_in_week = configs["min_days_in_week"]
corr_threshold = configs["corr_threshold"]
corr_inter_threshold = configs["corr_inter_threshold"]
days_interval = configs["days_interval"]
models_path = configs["models_path"]
corr_inter_threshold_main = configs["corr_inter_threshold_main"]
date_reference = configs["date_reference"]
date_reference_end = configs["date_reference_end"]
correlation_path = "./weekly_diff_test"
# Read ticker list
df_tickers = pd.read_csv('./symbols/israeli_symbols_names.csv')
tickers = list(df_tickers['Symbol'])
with open("./symbols/clean_ticker_list.txt", "rb") as fp:
clean_ticker_list = pickle.load(fp)
print(f'number of ticker in the beegining {len(tickers)}')
print(f'number of ticker after validation {len(clean_ticker_list)}')
print(f'diff is = {len(tickers) - len(clean_ticker_list)}')
measurements_l = ['date_reference',
'next_week_price_full_train',
'predicted_stock_class',
'r2_test',
'r2_train',
'r2_train_full_train',
'rmse_train_full_train',
'False_p',
'True_p',
'predicted_diff_full_train',
'percentage_change_full_train'
]
measurements = {i: [] for i in measurements_l}
measurements
dfr = get_data(ticker_name=NAME, data_from_csv=1, path_from='raw_data')
dfc = get_data(ticker_name=NAME, data_from_csv=1, path_from='data')
df_weekly = get_data(NAME,
data_from_csv=True,
path_from='weekly_diff',
set_as_index=['first_day_in_week', 'last_day_in_week'],
index_type='object')
# %%
while date_reference < date_reference_end:
print(date_reference)
d = featureEng(NAME,
date_reference=date_reference,
years_back=years_back,
data_from_csv=True,
path_from='data')
# df = d.daily_diff()
# df = d.weekly_mean(ticker_name=NAME,
# df=df,
# start_date=date_reference,
# days_interval=days_interval,
# threshold_days_in_week=threshold_days_in_week
# )
print(f'df_weekly.shpe{df_weekly.shape}\ndf_weekly.index[-1]{df_weekly.index[-1]}\n'
f'df_weekly.index[0]{df_weekly.index[0]}')
# Retrieve rows from given time period - (Cutting upper and lower tails)
same_date_last_year = str(datetime.strptime(date_reference, "%Y-%m-%d") - timedelta(days=round(years_back * 365)))
dates_first = df_weekly.reset_index()['first_day_in_week']
dates_last = df_weekly.reset_index()['last_day_in_week']
lower = dates_first[dates_first >= same_date_last_year].index[0]
upper = dates_last[dates_last <= date_reference].index[-1] + 1 # since its started from 0
df_weekly = df_weekly.iloc[lower:upper, :]
with open("./symbols/clean_ticker_list.txt", "rb") as fp: # Unpickling
clean_ticker_list = pickle.load(fp)
new_list = weekly_correlations_to_csv(tickers_list=clean_ticker_list,
years_back_data_generation=years_back,
start_date=date_reference,
days_interval=days_interval,
threshold_days_in_week=threshold_days_in_week,
path='./weekly_diff')
with open("new_list_rolled_tickes.txt", "w") as file:
file.write(str(new_list))
with open("new_list_rolled_tickes.txt", "r") as file:
clean_ticker_list = eval(file.readline())
df_corr, low_week_sampels_dict = d.weekly_correlation(df_weekly_mean=df_weekly,
tickers_list=clean_ticker_list,
date_reference=date_reference,
min_prcnt_data=min_percentage_data,
threshold=min_days_in_week,
path_from='weekly_diff',
set_as_index=['first_day_in_week', 'last_day_in_week']
)
start_time = time.time()
df_reg_full = d.reg_df(
ticker_name=NAME,
df_weekly=df_weekly,
df_corr=df_corr,
start_date=date_reference,
threshold=corr_threshold,
# activate_automated_rolling=True
)
print("\n--- %s seconds ---" % (time.time() - start_time), 'df_reg_full.shape = ', df_reg_full.shape)
start_time = time.time()
try:
df_reg_full = d.df_reg_int(df_reg=df_reg_full,
corr_inter_threshold=corr_inter_threshold,
corr_inter_threshold_main=corr_inter_threshold_main)
df_reg = copy.copy(df_reg_full[:-1])
inter_columns = [inter for inter in df_reg.columns if 'INT' in inter]
number_of_inter = len(inter_columns)
start = time.time()
train = Train(NAME,
df_reg=df_reg,
test_size=test_size,
path=models_path)
train_dict = train.df_filtered_dict
dict_reg_results = {'r2_test': train_dict['r2_test'],
'r2_train': train_dict['r2_train'],
'alpha': train_dict['alpha'],
'rmse_train': train_dict['rmse_train'],
'corra_mean': train_dict['corra_mean'],
'predictor_num': train_dict['predictor_num']
}
# class
clftrain = ClfTrain(tick_name=NAME)
clf = clftrain.fit_lr_gridsearch_cv()
summary_dict = clftrain.generate_clf_summary(clf, classifire_type='lr')
df_pred_actual = clftrain.predict_actual_diffs(clf)
reg = train.reg
colsl = train_dict['current_corrs_str']
df_reg_full = df_reg_full[colsl]
target_column_name = f'{NAME}_main_symb_weekly_mean_diff'
predict = Predict(reg,
target_column_name,
df_reg=df_reg_full,
date_reference=date_reference,
cols=colsl,
days_interval=days_interval)
next_week_behavior = predict.next_week_behavior(df=d._df,
date_reference=date_reference)
next_week_class = predict.next_week_class(clf)
r = next_week_full_train_behavior(main_ticker=NAME,
df_reg_full=df_reg_full,
df_raw=d._df,
cols=colsl,
train_object=train,
clftrain_object=clftrain,
days_interval=days_interval,
date_reference=date_reference
)
df_reg = df_reg[colsl]
# Results from 'train_dict' = partial train object
r2_test = train_dict['r2_test']
r2_train = train_dict['r2_train']
# Results from 'r' = full train object
r2_train_full_train = r['r2_train_full_train']
predicted_diff_full_train = r['predicted_diff_full_train']
percentage_change_full_train = r['percentage_change_full_train']
rmse_train_full_train = r['rmse_train_full_train']
next_week_price_full_train = r['next_week_price_full_train']
predicted_stock_class = r['class']
False_p = r['False_p']
True_p = r['True_p']
for k in measurements.keys():
measurements[k].append(eval(k))
except:
if df_reg_full.shape[1] == 2:
for k in measurements.keys():
measurements[k].append(None) if k != 'date_reference' else measurements[k].append(date_reference)
else:
print('df_reg_full columns number is bigger the 1 something else happened')
break
date_reference = str(datetime.strptime(date_reference, "%Y-%m-%d").date() + timedelta(days=days_interval + 1))
print(measurements)
df_measurements = pd.DataFrame(measurements)
df_measurements.to_csv('df_measurements_nons.csv')
# %%
df_measurements = pd.read_csv('df_measurements.csv').drop('Unnamed: 0', axis=1)
# %%
# Adding: actual prices prioed after, actual date period after, and std by adding and deleting n periods back
n = 10
df_measurements = df_measurements.set_index('date_reference')
dict = {'actual_date': [], 'actual_close_price': [], 'measurments_df_index': []}
# %%
dates = [str(datetime.strptime(df_measurements.index[0], "%Y-%m-%d").date() - timedelta(days=(days_interval + 1) * i))
for i in
range(1, n)][::-1] + list(df_measurements.index)
# %%
list(df_measurements.index)
# %%
for i, j in zip(dates[:-1], dates[1:]):
j = str(datetime.strptime(j, "%Y-%m-%d").date() - timedelta(days=1))
max_exist_date = max(dfc.index[dfc.index <= datetime.strptime(j, "%Y-%m-%d")])
dict['actual_date'].append(max_exist_date)
dict['actual_close_price'].append(round(dfc.loc[max_exist_date, 'close'], 2))
dict['measurments_df_index'].append(i)
df_stat = pd.DataFrame(dict).set_index('measurments_df_index')
df_stat['std'] = df_stat.actual_close_price.rolling(n).std()
df_stat = df_stat.iloc[n - 1:, :]
df_measurements = pd.concat([df_measurements, df_stat], axis=1)
# multiply number of days in week in predicted average diff
df_measurements_index = df_measurements.actual_date.astype(str)
for i in df_measurements_index[:-1]:
f = df_weekly.index.get_level_values('first_day_in_week') >= i
l = df_weekly.index.get_level_values('last_day_in_week') <= i
if any(f == l):
if i in df_measurements_index.to_list():
df_measurements.loc[df_measurements_index == i, 'days_in_week'] = df_weekly[f == l].days_in_week[0]
# metrices of predicted week : std, number of days in week
df_measurements.loc[df_measurements.index[-1], 'days_in_week'] = 5
df_measurements.loc[df_measurements.index[-1], 'std'] = df_stat['std'][-1]
df_measurements[
'next_week_price_full_train_mult'] = df_measurements.next_week_price_full_train + df_measurements.predicted_diff_full_train * (
df_measurements.days_in_week - 1)
df_measurements['predicted_diff_full_train_mult'] = df_measurements.predicted_diff_full_train * (
df_measurements.days_in_week - 1)
# Adding actual diff
df_measurements['actual_diff'] = df_measurements['actual_close_price'].diff(1)
# Adding upper & lower CI bounds
df_measurements['upper'] = df_measurements.next_week_price_full_train_mult + 1.645 * (
df_measurements['std'] / np.sqrt(n))
df_measurements['lower'] = df_measurements.next_week_price_full_train_mult - 1.645 * (
df_measurements['std'] / np.sqrt(n))
# %% Plot
# df_pred_vs_actual = df_pred_vs_actual.set_index(['actual_date'], drop=True)
columns_to_plot1 = ['actual_close_price',
'predicted_diff_full_train_mult',
'actual_diff']
d = df_measurements[columns_to_plot1]
ax = d.plot(style='-o', color=['C1', 'C3', 'C4'])
columns_to_plot2 = 'next_week_price_full_train_mult'
df_predicted = df_measurements[columns_to_plot2]
predicted_color, predicted_alpha = 'C0', .25
ax = df_predicted.plot(ax=ax, style='-o', color=[predicted_color], alpha=predicted_alpha)
plt.fill_between(x=df_measurements.index,
y1=df_measurements['upper'],
y2=df_measurements['lower'],
color='C0',
alpha=predicted_alpha)
ax.legend()
plt.show()
# %% TODO adding try except in case that there is no df_reg for model and think which prediction we want to use in this date and if we want to nark this predections
fig = ax.get_figure()
fig.savefig('matplotlmatib_figure.png') # save the figure to file
plt.close(fig)
|
[
"raz.shmuely@ironsrc.com"
] |
raz.shmuely@ironsrc.com
|
08ffcd1b33e03a0e636914a804faf0bca35a9280
|
95fe1c74e48b38886aa6a0f30aca5f531ac3e4ab
|
/graph_main.py
|
89642c32d64e5d5fd5d25ff54b83ba5a5feb7a64
|
[
"MIT"
] |
permissive
|
svenhsia/Entropic-Wasserstein-Embedding
|
24f7ee16b261b9a56d0de98ba0a14c52f27ca4a0
|
db837b92759cb5c921e7c06b2357861ec687e9de
|
refs/heads/master
| 2021-07-13T09:08:44.369781
| 2020-07-21T12:28:18
| 2020-07-21T12:28:18
| 181,919,633
| 10
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,524
|
py
|
import os
import sys
from time import time
import logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO)
import numpy as np
import tensorflow as tf
import networkx as nx
from graph_generator import GraphGenerator
from utils import *
# graph_id = sys.argv[1]
embed_dims = [2, 5, 10, 20, 30, 40]
n_epochs = 5
num_nodes = 64
for graph_id in range(1, 11):
g = nx.read_gpickle("./graphs/scale_free_{}_{}.pickle".format(num_nodes, graph_id))
graph_name = 'scale_free_{}_{}'.format(num_nodes, graph_id)
logging.info("Load graph {} from local file".format(graph_id))
node_pairs = g.get_node_pairs()
obj_distances = g.get_obj_distances()
logging.info("node pairs shape: {}, obj_distances shape: {}".format(
node_pairs.shape, obj_distances.shape))
batch_size = node_pairs.shape[0] # full batch
for embed_dim in embed_dims:
# Euclidean
logging.info("Running Euclidean embedding, embed dim={}".format(embed_dim))
embeddings, loss_history, time_history, embed_distances, jac = train(
node_pairs, obj_distances, embedding_type='Euc', embed_dim=embed_dim,
learning_rate=0.1, n_epochs=n_epochs, nodes=num_nodes, batch_size=batch_size)
np.savez('./results/{}_{}_{}'.format(graph_name, 'Euclidean', embed_dim),
embeddings=embeddings, loss=loss_history, time=time_history,
embed_distances=embed_distances)
# Hyperbolic
logging.info("Running Hyperbolic embedding, embed dim={}".format(embed_dim))
while True:
try:
embeddings, loss_history, time_history, embed_distances, jac = train(
node_pairs, obj_distances, embedding_type='Hyper', embed_dim=embed_dim,
learning_rate=0.01, n_epochs=n_epochs, nodes=num_nodes, batch_size=batch_size)
break
except RuntimeError:
logging.warning("Got Loss NaN")
continue
np.savez('./results/{}_{}_{}'.format(graph_name, 'Hyperbolic', embed_dim),
embeddings=embeddings, loss=loss_history, time=time_history,
embed_distances=embed_distances)
# Wass R2
logging.info("Running Wasserstein R2 embedding, embed dim={}".format(embed_dim))
embeddings, loss_history, time_history, embed_distances, jac = train(
node_pairs, obj_distances, embedding_type='Wass', embed_dim=embed_dim,
learning_rate=0.1, n_epochs=n_epochs, ground_dim=2, nodes=num_nodes, batch_size=batch_size)
np.savez('./results/{}_{}_{}'.format(graph_name, 'WassR2', embed_dim),
embeddings=embeddings, loss=loss_history, time=time_history,
embed_distances=embed_distances)
# Wass R3
logging.info("Running Wasserstein R3 embedding, embed dim={}".format(embed_dim))
embeddings, loss_history, time_history, embed_distances, jac = train(
node_pairs, obj_distances, embedding_type='Wass', embed_dim=embed_dim,
learning_rate=0.1, n_epochs=n_epochs, ground_dim=3, nodes=num_nodes, batch_size=batch_size)
np.savez('./results/{}_{}_{}'.format(graph_name, 'WassR3', embed_dim),
embeddings=embeddings, loss=loss_history, time=time_history,
embed_distances=embed_distances)
# # Wass R4
# logging.info("Running Wasserstein R4 embedding, embed dim={}".format(embed_dim))
# embeddings, loss_history, time_history, embed_distances, jac = train(
# node_pairs, obj_distances, embedding_type='Wass', embed_dim=embed_dim,
# learning_rate=0.1, n_epochs=n_epochs, ground_dim=4, nodes=num_nodes, batch_size=batch_size)
# np.savez('./results/{}_{}_{}'.format(graph_name, 'WassR4', embed_dim),
# embeddings=embeddings, loss=loss_history, time=time_history,
# embed_distances=embed_distances)
# KL
logging.info("Running KL embedding, embed dim={}".format(embed_dim))
embeddings, loss_history, time_history, embed_distances, jac = train(
node_pairs, obj_distances, embedding_type='KL', embed_dim=embed_dim,
learning_rate=0.01, n_epochs=n_epochs, nodes=num_nodes, batch_size=batch_size)
np.savez('./results/{}_{}_{}'.format(graph_name, 'KL', embed_dim),
embeddings=embeddings, loss=loss_history, time=time_history,
embed_distances=embed_distances)
|
[
"cheng.zhang@polytechnique.edu"
] |
cheng.zhang@polytechnique.edu
|
576d4f1cf0e88629805b3c8cf300bbcc50a634db
|
7b6563dffdd349426395935acafa7bc970d19bad
|
/finchcollector/main_app/migrations/0006_finch_user.py
|
0dd89eb302edcb7c889251e23d3b55f76e83266b
|
[] |
no_license
|
rashmika13/Finch-Collector-App--Django
|
706c01d52ea263533a5339411a941a2d5a4545ae
|
067d980f1ac2e2eb40a1036de801069333219425
|
refs/heads/master
| 2022-11-12T23:23:18.426818
| 2020-07-03T17:54:07
| 2020-07-03T17:54:07
| 275,243,877
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 609
|
py
|
# Generated by Django 3.0.7 on 2020-07-03 16:39
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('main_app', '0005_photo'),
]
operations = [
migrations.AddField(
model_name='finch',
name='user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
|
[
"50966008+rashmika13@users.noreply.github.com"
] |
50966008+rashmika13@users.noreply.github.com
|
70b345612778f35358858da39926f532da20da86
|
db8b789b6f985ae49021c41f421ecef60c2f9265
|
/models/shopify_discount_program.py
|
23aba8f3a9a0ca386d7af77034e9e68fe57dad97
|
[] |
no_license
|
Kanta-sill/shopify_app
|
da04f850c46f474accb581865f975778f5f59321
|
432a887727b739dbb193421e95616348ebe902c6
|
refs/heads/main
| 2023-06-08T06:12:35.079435
| 2021-06-10T08:12:20
| 2021-06-10T08:12:20
| 375,590,917
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,168
|
py
|
import werkzeug
from odoo import api, fields, models, _, tools
import shopify
class ShopifyDiscountProgram(models.Model):
_name = "shopify.discount.program"
_description = "Discount Program"
def get_discount_shop(self):
user_current = self.env['res.users'].search([('id', '=', self._uid)])
shop_current = self.env['shopify.shop'].search([('base_url', '=', user_current.login)])
if shop_current:
return shop_current.id
else:
return None
name = fields.Char(string='Name')
shop_id = fields.Many2one('shopify.shop', string='Shop ID', default=get_discount_shop)
cus_ids = fields.One2many('shopify.discount.program.customer', 'discount_id', string='Discount Customer ID')
pro_ids = fields.One2many('shopify.discount.program.product', 'discount_id', string='Discount Product ID')
@api.depends('shop_id')
def update_shopify_product(self):
if self.shop_id:
shop_app_id = self.env['shopify.shop.app'].sudo().search([('shop', '=', self.shop_id.id)])
app_id = self.env['shopify.app'].sudo().search([('id', '=', shop_app_id.app.id)])
API_KEY = app_id.api_key
API_SECRET = app_id.secret_key
api_version = app_id.api_version
shop_url = self.shop_id.base_url
TOKEN = shop_app_id.token
shopify.Session.setup(api_key=API_KEY, secret=API_SECRET)
shopify_session = shopify.Session(shop_url, api_version, token=TOKEN)
shopify.ShopifyResource.activate_session(shopify_session)
pr = shopify.Product.find(limit=50)
for product in pr:
pro_vals = {
'name': product.title,
'price': product.variants[0].price,
'product_id': product.id,
'variant_id': product.variants,
# 'image_1920': product.images[0].src,
'shop_id': self.shop_id.id
}
check_product = self.env['shopify.product.load'].sudo().search(
[('product_id', '=', product.id)])
if check_product:
check_product.sudo().write(pro_vals)
else:
self.env['shopify.product.load'].sudo().create(pro_vals)
def open_discount_check_product(self):
self.update_shopify_product()
discount_vals = {
'discount_id': self.id,
}
new_discount = self.env['shopify.discount.choose.product'].sudo().create(discount_vals)
if self.shop_id:
pro_list = self.env['shopify.product.load'].sudo().search([('shop_id', '=', self.shop_id.id)], limit=50)
create_pro_ids = []
for pro in pro_list:
create_pro_ids.append((0, 0, {
'discount_id': new_discount.id,
'product_id': pro.id,
}))
pro_vals = {
'pro_ids': create_pro_ids
}
new_discount.write(pro_vals)
view_id = self.env.ref('shopify_app.discount_choose_product_view_form').id
return {
'type': 'ir.actions.act_window',
'name': 'Choose Product for Discount',
'res_model': 'shopify.discount.choose.product',
'views': [[view_id, 'form']],
'res_id': new_discount.id,
'target': 'new'
}
def open_customer(self):
cus_list = self.env['res.partner'].search(
[('shop_id', '=', self.shop_id.id), ('is_company', '=', False)], limit=50)
for cus in cus_list:
check_customer = self.env['shopify.discount.program.customer'].search(
[('discount_id', '=', self.id), ('customer_id', '=', cus.id)])
if not check_customer:
cus_vals = {
'discount_id': self.id,
'customer_id': cus.id,
}
self.env['shopify.discount.program.customer'].create(cus_vals)
class ShopifyDiscountProgramProduct(models.Model):
_name = "shopify.discount.program.product"
discount_id = fields.Many2one('shopify.discount.program', string='Discount ID', ondelete='cascade')
product_id = fields.Many2one('shopify.product.load', string='Product ID', ondelete='cascade')
shop_product_id = fields.Char(related='product_id.product_id', string='Variant ID')
name = fields.Char(related='product_id.name', string='Name')
price = fields.Float(related='product_id.price')
discount_amount = fields.Float(string='Discount Amount')
check_product = fields.Boolean(string='Check')
quantity = fields.Integer(string='Quantity', default=1)
class ShopifyDiscountProgramCustomer(models.Model):
_name = "shopify.discount.program.customer"
discount_id = fields.Many2one('shopify.discount.program', string='Discount ID', ondelete='cascade')
customer_id = fields.Many2one('res.partner', string='Customer ID', ondelete='cascade')
email = fields.Char(related='customer_id.email')
check_person = fields.Boolean(string='Choose Person', default=True)
|
[
"duc200032@gmail.com"
] |
duc200032@gmail.com
|
bc0060f516bc7d3faaa17cf838512f5acc8c5414
|
73c9761bb59609f96625595cc05271fd664b6ba9
|
/self
|
6b46c41182d56095e82844414c5ecd57c5910be2
|
[] |
no_license
|
tahinpekmez/MyPython
|
82bd32afdca246e258a438396e0904b7fcb50f6e
|
c26b7fbd36859e5d5b7d34b0f177f9428900827b
|
refs/heads/master
| 2023-01-07T14:21:40.104874
| 2020-11-05T22:03:21
| 2020-11-05T22:03:21
| 277,694,236
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 141
|
#!/usr/bin/python
class Myclass():
i = 123
def __init__(self):
self.i = 345
a = Myclass()
print(a.i)
print(b.i)
|
[
"noreply@github.com"
] |
tahinpekmez.noreply@github.com
|
|
bc431979c5f0465fa37fddcd96b6854a989da6ea
|
c82e62c44ae6716532c06b12ad2f28d225e69ddb
|
/loader/cifar10C_loader.py
|
56ef1b1a344ff1d53f39fe12aa12eed3cbca2928
|
[
"MIT"
] |
permissive
|
ag027592/Geometric-Sensitivity-Decomposition
|
d38f2d84d05453555b9cfd60eaa29d8be70131b9
|
3770449735a2fd976edb22b644cc0846a860c1f7
|
refs/heads/main
| 2023-08-24T13:47:15.996656
| 2021-10-29T00:37:28
| 2021-10-29T00:37:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,600
|
py
|
from PIL import Image
import os
import os.path
import numpy as np
import pickle
from typing import Any, Callable, Optional, Tuple
from torchvision.datasets.vision import VisionDataset
from torchvision.datasets.utils import check_integrity, download_and_extract_archive
import torch
class CIFAR10C(VisionDataset):
"""`CIFAR10 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.
Args:
root (string): Root directory of dataset where directory
``cifar-10-batches-py`` exists or will be saved to if download is set to True.
train (bool, optional): If True, creates dataset from training set, otherwise
creates from test set.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
url = "https://zenodo.org/record/2535967/files/CIFAR-10-C.tar?download=1" #"https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
filename = "cifar-10-c-python.tar"
tgz_md5 = '56bf5dcef84df0e2308c6dcbcbbd8499'
def __init__(
self,
root: str,
train: bool = True,
dgrd = None,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super(CIFAR10C, self).__init__(root, transform=transform,
target_transform=target_transform)
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
self.targets = np.load(self.root+'/CIFAR-10-C/labels.npy')
data = np.load(self.root +'/CIFAR-10-C/' +dgrd['type'] +'.npy')
self.data = data[(dgrd['value']-1)*10000:dgrd['value']*10000]
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], self.targets[index]
img = Image.fromarray(img)
# import ipdb;ipdb.set_trace()
# doing this so that it is consistent with all other datasets
# to return a PIL Image
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img.type(torch.float), target.astype(np.int)
def __len__(self) -> int:
return len(self.data)
def _check_integrity(self) -> bool:
fpath = os.path.join(self.root, self.filename)
if not check_integrity(fpath, self.tgz_md5):
return False
return True
def download(self) -> None:
if self._check_integrity():
print('Files already downloaded and verified')
return
download_and_extract_archive(self.url, self.root, filename=self.filename, md5=self.tgz_md5)
def extra_repr(self) -> str:
return "Split: {}".format("Train" if self.train is True else "Test")
|
[
"jtian73@gatech.edu"
] |
jtian73@gatech.edu
|
bf5604ca5b513c7fa3cb1c95f48ebed455079e86
|
026f77d3b55be99116eef25278ec13d01a97f469
|
/contact/views.py
|
d5405b4fbbb945b1c0c966eb95f0de68c61d7572
|
[] |
no_license
|
Aleksandr-yask/Django-online-store
|
195dc22f6b878743a6be77c1162fbfb15bdefb5b
|
fd8aa1ca4c8d907859e8280b61333593632112f2
|
refs/heads/master
| 2021-04-13T01:03:09.077950
| 2020-03-22T06:33:16
| 2020-03-22T06:33:16
| 249,122,408
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 120
|
py
|
from django.shortcuts import render
def contact(request):
return render(request, 'contact/contact.html', locals())
|
[
"yasakov.org@gmail.com"
] |
yasakov.org@gmail.com
|
f63b099e702b81e6e40d28709f906a8297067d54
|
dc7de1db263cb661d7c473c9bfcb444cfe730d9f
|
/rel/RE_run2.py
|
47dcc3d39cea5af5e3d18825dc31bc489a1515aa
|
[] |
no_license
|
BBBigBang/PM
|
8e0bb0a4b1c6468f2b2ccd4a51fefd6cca43609e
|
6d5288a10b7929327cac5b3ef731a30d7cb7a344
|
refs/heads/master
| 2020-03-24T04:00:02.071355
| 2018-12-06T03:26:23
| 2018-12-06T03:26:23
| 142,439,877
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,940
|
py
|
import Corpus_PM
#import LSTM_RE_Predict
import LSTM_RE_Predict_v2
import FileUtil
import subprocess
import os, sys
import Keyword_Extraction
from Constants import home_dir, threshold
import logging;
import time;
model_file = home_dir + 'data/liver.in.train.h5'
def run2(abstract, entity_list, eval, rep):
#{{{
"""
an function to extraction relation.
@param:
abstract:
entity_list:
eval:
rep:
@return:
"""
logger=logging.getLogger("relation_run:");
#print 'Generate instances from previous NER results'
startTime=time.time();
insts_embed_list, insts_offset_list = Corpus_PM.gen_insts_from_one_abstract(abstract, entity_list)
logger.debug('Generate instance done, espliced:{}s'.format(time.time()-startTime));
if len(insts_embed_list) == 0:
return [],[]
insts_embed_list, insts_offset_list = Corpus_PM.filter_possible_negatives(insts_embed_list, insts_offset_list)
logger.debug('Filter possible negative instances done, espliced:{}s'.format(time.time()-startTime));
if len(insts_embed_list) == 0:
return [],[]
#print 'Predict relations between the various biomedical entities'
startTime=time.time();
#answer_array_test, filtered_index = LSTM_RE_Predict.binary_relation_extraction(insts_embed_list, eval, rep)
answer_array_test, filtered_index = LSTM_RE_Predict_v2.binary_relation_extraction(insts_embed_list, eval, rep)
logger.debug('Predict relation done, espliced:{}s'.format(time.time()-startTime));
#print answer_array_test
if len(answer_array_test)==0:
return [],[]
#print 'Extract the relation entity pairs'
startTime=time.time();
#true_insts_offset = LSTM_RE_Predict.get_true_insts(insts_offset_list, answer_array_test, filtered_index, threshold)
true_insts_offset = LSTM_RE_Predict_v2.get_true_insts(insts_offset_list, answer_array_test, filtered_index, threshold)
logger.debug('Extract relation done, espliced:{}s'.format(time.time()-startTime));
if len(true_insts_offset)==0:
return [],[]
#print 'Parsing corresonding sentences for interaction word extraction'
startTime=time.time();
inst_index_list, sent_list = Corpus_PM.process_insts_4_parser(true_insts_offset)
print('###########################################################################')
print(' length of sent_list ')
print(len(sent_list))
print(' max length of element in sent_list ')
e_length = [len(k) for k in sent_list]
print(max(e_length))
e_all = [len(k.split(' ')) for k in sent_list]
print(' max word quantity of element in sent_list ')
print(max(e_all))
print('###########################################################################')
FileUtil.writeStrLines(home_dir + 'tempt2.sent', sent_list)
logger.debug('Parsing interaction done, espliced:{}s'.format(time.time()-startTime));
startTime=time.time();
retval = subprocess.call('java -mx3g -cp "' + home_dir + 'stanford-parser/*"' + ' edu.stanford.nlp.parser.lexparser.LexicalizedParser ' ' -nthreads 15 ' \
'-outputFormat "wordsAndTags,typedDependencies" -sentences newline' + \
' edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz ' + \
home_dir + 'tempt2.sent > ' + home_dir + 'tempt2.sent.par',stdout=subprocess.PIPE,stderr=subprocess.PIPE, shell=True)
logger.debug('Stanford parse done, espliced:{}s'.format(time.time()-startTime));
assert retval == 0
#print 'Keyword Extraction'
startTime=time.time();
nodes_list, edges_list, triple_list = Keyword_Extraction.extraction_mid_version(inst_index_list, home_dir + 'tempt2.sent.par', home_dir + 'outHtml/out.html')
logger.debug('Keyword extraction done, espliced:{}s'.format(time.time()-startTime));
#print '#############################################'
#print 'Keyword Extraction'
#print '#############################################'
print 'Find', len(triple_list), 'triples successfully.'
print 'triple_list..................'
print triple_list
return nodes_list, edges_list, triple_list
#}}}
def toJson(nodes,edges):
"""
convert relation to Json for cytoscape.js
@param:
nodes: list, NOTE: should be changed!
edges: list, NOTE: should be changed!
@return:
result: string, json format
"""
import output_json;
#NOTE: here the method generate_JSONfile return python dict,
# NOT string, this not meet our request.
# we should change dumps it to string
# so we use json dumps method
import json;
return json.dumps(output_json.generate_JSONfile(nodes,edges));
|
[
"136481981@qq.com"
] |
136481981@qq.com
|
7f18cf4075648d529d0b0c1e30b44536723806cd
|
991dacb980ffbd1485bb824258148f39c0aea192
|
/Python2.7/ichowdhury_2.3.py
|
851bca620892c7a1c11974604cb944d2ec75ff24
|
[] |
no_license
|
iac-nyc/Python
|
cc8accca9aa16a710de6004343728540deb3e501
|
d39d05ffc45d51e8ca1d260ad9fb7dd659fb0c08
|
refs/heads/master
| 2020-03-30T16:31:30.253188
| 2018-11-28T13:41:49
| 2018-11-28T13:41:49
| 151,413,265
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 542
|
py
|
# Name : Iftekhar Chowdhury
# Date : Oct 31, 2015
# Homework 2.3
sample_text = """ And since you know how you cannot see yourself,
so well as by reflection, I, your glass,
will modestly discover to yourself,
that of yourself which you yet know not of. """
search_string = raw_input ('Please enter a text to replace:')
new_string = raw_input ('Please enter the replacement text: ')
count = sample_text.count(search_string)
print "{} replacements made".format(count)
new_text = sample_text.replace(search_string, new_string)
print new_text
|
[
"007ifte@gmail.com"
] |
007ifte@gmail.com
|
73278f19d0a296b6c7ef34fbfc7a1b5ef6391db3
|
183d2d3f74997d98b6c8e38ddf146a0975464445
|
/gewaraSpider/Config.py
|
2609236b32e77bceb7275c02e916d324b3f571d5
|
[] |
no_license
|
TopcoderWuxie/gewaraSpider
|
a8545b8e1ba196e95416d7205247ff31efcac78c
|
d28b49e2f90b662d0d75c77f0a5f5025a8046c71
|
refs/heads/master
| 2020-03-06T16:03:53.274427
| 2018-03-27T10:01:45
| 2018-03-27T10:01:45
| 101,733,083
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,231
|
py
|
#coding: utf-8
PATH = r"moviesUrls.txt"
import pymysql
conn = pymysql.connect(
host= "59.110.17.233",
port= 6306,
user= "aliPa",
password= "6y3*p*o$Uj>1s$H",
database= "ysali",
charset= 'utf8',
)
# 网页链接的时候使用
BaseUrl = "http://www.gewara.com"
# 服务器响应的时候提交的URL
BasePostUrl = "http://www.gewara.com/activity/ajax/sns/qryComment.xhtml?"
# 保留字段,将来使用
base_url1 = "http://www.gewara.com/movie/searchMovieStore.xhtml?pageNo=0&movietime=all&movietype="
base_url2 = "&order=releasedate"
base_url3 = ""
base_url4 = ""
maxThread = 10
# 所有分类
classification = ['动作', '喜剧', '爱情', '科幻', '奇幻', '灾难', '恐怖', '纪录', '犯罪', '战争', '冒险', '动画', '剧情', '其他']
# post data
DATA = {
'pageNumber' : 0,
'relatedid' : None,
'topic' : '',
'issue' : 'false',
'hasMarks' : 'true',
'isCount' : 'true',
'tag' : 'movie',
'isPic' : 'true',
'isVideo' : 'false',
'userLogo' : '',
'newWalaPage' : 'true',
'isShare' : 'false',
'isNew' : 'true',
'maxCount' : 1500,
'isWide' : 'true',
'isTicket' : 'false',
'effect' : '',
'flag' : ''
}
# headers
HEADERS = {
'Accept' : 'text/html, application/xml, text/xml, */*',
'Accept-Encoding' : 'gzip, deflate',
'Accept-Language' : 'zh-CN,zh;q=0.8',
'Cache-Control' : 'no-cache,no-store',
'Cookie' : 'citycode=110000; _gwtc_=1499049758746_3zAF_0b5d; Hm_lvt_8bfee023e1e68ac3a080fa535c651f00=1499049759,1499130597',
'Host' : 'www.gewara.com',
'If-Modified-Since' : '0',
'Proxy-Connection' : 'keep-alive',
'Referer' : 'http://www.gewara.com/movie/',
'User-Agent' : 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',
'X-Requested-With' : 'XMLHttpRequest',
}
# free PROXIES
# http://dev.kuaidaili.com/api/getproxy?orderid=949187989849476&num=100&kps=1
from Proxies import getProxies
PROXIES = {'http': 'http://120.24.216.121:16816'}
PROXIES1 = {'http': 'http://61.147.103.207:16816'}
PROXIES2 = {'http': 'http://211.149.189.148:16816'}
|
[
"top.wuxie@gmail.com"
] |
top.wuxie@gmail.com
|
8e2a04db0d7e5ceb1c9cf1d397ba155439e61a9c
|
e6a875704e32276bd9a7da63520ae5ba18ed27d5
|
/app.py
|
b424a3c98aa0349713b21c5e163b121cfe744693
|
[] |
no_license
|
xLightless/IOWrapper
|
83685c8a44977d263349faebfd0a877c9f75764d
|
c7b1d8453ef7f193f7438ad6112770ec798b4dff
|
refs/heads/master
| 2023-08-31T06:05:00.274036
| 2021-10-25T03:14:21
| 2021-10-25T03:14:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,827
|
py
|
import os
import time
import pandas as pd
import json
file = "credentials.txt"
class Client(object):
def __init__(self,auth=False):
""" Wraps the authentication process of the HTTP requests into a managable system """
self.auth = auth
self.credentials = {}
def set_additional_creds(self):
""" Appends new credentials to a memory/file location """
pass
def set_values_to_new_creds(self):
""" Sets new values for the appended credentials in '#set_additional_creds()' """
pass
def get_keys_and_values(self):
""" Lets the user get authentication information to be used. If k/v else None """
pass
def _update_cred_keys(self):
""" Updates the credentials of the client if the value exists, else return None """
pass
def _update_cred_values(self):
""" Updates the credentials of the client if the key exists, else return None """
d = self.credentials
msg = input(f">> Enter a Key to update it's value (i.e. 'discord_username'): ")
if msg in d:
for k in d.items():
uv = input(">> Enter the new value of this item: ")
if msg==k:
print("Old Value: ",k,d.get(k))
d[k] = [uv]
print("New Value: ",k,d.get(k))
break
with open(file,"w") as f:
f.write(str(dict(d.items())))
f.close()
else: print("The key you entered could not be found, please try again.")
def _run_builder(self):
building = True
count = 0
print("Please carefully enter the correct values below...")
while building:
try:
key = input(">> Enter a key for the dictionary: ")
value = input(f">> Enter a value for {key}: ")
if key != '': self.credentials[key] = [value]
if key == '': raise KeyError
except KeyError:
count=(count+1)
print(f"Invalid ['k({key}):value'].\n")
if count==2:
print(">> Too many false attempts to set a key|value, terminating...\n")
print(f"Writing valid keys and values to memory/file. Please check '{file}'")
building = False
if building == False:
with open(file,"w") as f:
f.write(str(dict(self.credentials.items())))
f.close()
def build(self):
if self.auth == False:
print("Building Application...",
"If you need to use explicit features you will be asked to enter credentials.")
elif self.auth == True:
floc = os.path.exists(file)
try:
if (~(floc)):
create_file = open(file,"x")
return create_file
except Exception:
pass
if floc==True:
with open(file,"r") as f:
data = json.dumps(f.read())
fdata = data.replace("'",'"')
d = dict(fdata)
if os.stat(file).st_size != 0:
print(d)
ans = input("Do you want to update your credentials?: ").lower()
if ans=="y":
pass
else:
print("Everything is good, Authentication Over! :D")
exit()
else:
self._run_builder()
client = Client(auth=True)
client.build()
|
[
"lightlessgaming@gmail.com"
] |
lightlessgaming@gmail.com
|
026d15229905e121cbef24b485b49a5ea8634d13
|
4f55d730827f07c1f54262d35ac916957fa4c409
|
/dynamicsynapse/DynamicSynapsePhasePotraitSimple.py
|
8aab57c2f510840f69727bb036ed84b53d1d5eb7
|
[] |
no_license
|
InsectRobotics/DynamicSynapsePublic
|
be84eae967e76496dc92c8a3e01ba55d9c272d1a
|
18b000b7344bb82ecbc31045ec3b5bb72234b8c9
|
refs/heads/master
| 2020-03-22T20:43:18.044297
| 2018-07-11T22:24:24
| 2018-07-11T22:24:24
| 140,625,783
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 42,969
|
py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 1 18:05:14 2017
@author: chitianqilin
"""
import numpy as np
import copy
import matplotlib as mpl
import matplotlib.pyplot as plt
import time
import dill
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.backends.backend_pdf import PdfPages
from multiprocessing import Pool, TimeoutError
import logging, sys, traceback
from cycler import cycler
from matplotlib.collections import LineCollection
from matplotlib.colors import ListedColormap, BoundaryNorm
def rk4(h, y, inputs, Parameters, f):
k1 = f(y, inputs, Parameters)
# print(y)
# print(h)
# print(k1)
# print(y + 0.5*h*k1)
k2 = f(y + 0.5*h*k1, inputs, Parameters)
k3 = f(y + 0.5*h*k2, inputs, Parameters)
k4 = f(y + k3*h, inputs, Parameters)
return y + (k1 + 2*(k2 + k3) + k4)*h/6.0
class DynamicSynapseArray:
def __init__(self, NumberOfSynapses = 5, CW = 100, tauWV = 40, aWV = 100, rWV = 5000, scale=10, \
WeightersCentral = None , WeighterVarDamping = None, WeighteAmountPerSynapse = 1, \
Weighters = None, WeighterVarRates = None, WeightersCentralUpdateRate = 0.000012,\
DampingUpdateRate = 0.0000003/100 , WeightersCentralUpdateCompensate =0, MaxDamping = 10):
#self.NumberOfNeuron=NumberOfSynapses[0]
self.NumberOfSynapses = NumberOfSynapses#[1]
self.CW = CW
self.tauWV = tauWV
self.aWV = aWV
self.rWV = rWV
self.scale =scale
self.WeightersCentral = WeightersCentral if WeightersCentral is not None else np.ones(NumberOfSynapses)/2+[[0.1, 0.1,0.1, 0.1, 0.1]]*NumberOfSynapses[0] #[0.2, 0.1, 0]
self.WeighterVarDamping = WeighterVarDamping if WeighterVarDamping is not None else np.ones(NumberOfSynapses) * [[2,2,2,2,2]]*NumberOfSynapses[0] #[2,2,2]
self.DampingUpdateRate = DampingUpdateRate
self.GetParameters = lambda: [self.CW, self.tauWV, self.aWV , self.rWV, self.scale, self.WeightersCentral,self.WeighterVarDamping]
self.Parameters = [self.CW, self.tauWV, self.aWV , self.rWV, self.scale, self.WeightersCentral,self.WeighterVarDamping]
self.WeighteAmountPerSynapse = WeighteAmountPerSynapse
self.WeightersLast = Weighters if Weighters is not None else 0.5*np.ones(NumberOfSynapses) +0.001*np.random.rand(NumberOfSynapses)
self.Weighters = self.WeightersLast
self.WeighterInAxon = self.WeighteAmountPerSynapse* self.NumberOfSynapses[1] - self.WeightersLast
self.WeighterInAxonConcentration = self.WeighterInAxon/self.NumberOfSynapses[1]
self.WeighterVarRatesLast = WeighterVarRates if WeighterVarRates is not None else np.zeros(NumberOfSynapses)
self.WeighterVarRates = self.WeighterVarRatesLast
self.EquivalentVolume = (1+(2*self.WeightersCentral-(self.WeighterInAxonConcentration+self.WeightersLast))/((self.WeighterInAxonConcentration+self.WeightersLast)-self.WeightersCentral))
self.WeightersCentralUpdateRate = WeightersCentralUpdateRate
self.WeightersCentralUpdateCompensate = WeightersCentralUpdateCompensate
self.MaxDamping = MaxDamping
def Derivative (self, state=None , inputs=None, Parameters=None):
if state is not None:
WeightersLast, WeighterVarRatesLast = state
else:
WeightersLast, WeighterVarRatesLast = self.WeightersLast, self.WeighterVarRatesLast
if inputs is not None:
WeighterInAxonConcentration=inputs
else:
WeighterInAxonConcentration=self.WeighterInAxonConcentration
if Parameters is not None:
CW, tauWV, aWV, rWV, scale, WeightersCentral,WeighterVarDamping = Parameters
else:
CW, tauWV, aWV, rWV, scale, WeightersCentral,WeighterVarDamping = self.Parameters()
#print(WeighterVarRatesLast , WeighterInAxonConcentration , WeightersLast , self.scale)
EquivalentVolume = (1+(2*WeightersCentral-(WeighterInAxonConcentration+WeightersLast))/((WeighterInAxonConcentration+WeightersLast)-WeightersCentral))
self.EquivalentVolume = EquivalentVolume
# DWeighters = WeighterVarRatesLast * WeighterInAxonConcentration * WeightersLast /self.scale*2
# DWeighters = WeighterVarRatesLast /self.scale/2
# CW = 100
# tauWV = 17.8#40#17.8#40 if flow rate times w*v choose 40, if flow rate times w or v choose 20
# aWV = 100
# rWV = 5000
# scale=1
# tauWV =500#40#19#17.8#40#35#50#40#17.8#40 if flow rate times w*v choose 40, if flow rate times w or v choose 20
# aWV = 170#130 100
# rWV = 7000 #7000
# scale=1
# damping =2
# CW = 100
# SimulationTimeInterval = 10
# DWeighters = WeighterVarRatesLast * ( WeighterInAxonConcentration + WeightersLast/EquivalentVolume +np.sign(WeighterVarRatesLast)*(WeighterInAxonConcentration - WeightersLast/self.EquivalentVolume)) /2 /self.scale
# DWeighterVarRates = ( (WeighterInAxonConcentration - WeightersLast/EquivalentVolume \
# +aWV * np.sign(WeighterVarRatesLast)*np.power(np.abs(WeighterVarRatesLast), 0.5))/ rWV - WeighterVarDamping*WeighterVarRatesLast ) / tauWV /scale
DWeighters = WeighterVarRatesLast * ( WeighterInAxonConcentration + WeightersLast/EquivalentVolume +np.sign(WeighterVarRatesLast)*(WeighterInAxonConcentration - WeightersLast/EquivalentVolume)) /2 /self.scale
DWeighterVarRates = ( (tauWV*(WeighterInAxonConcentration - (WeightersLast/EquivalentVolume)**2 ) \
+aWV * np.sign(WeighterVarRatesLast)*np.power(np.abs(WeighterVarRatesLast), 0.5)) - WeighterVarDamping*WeighterVarRatesLast ) /scale / rWV
# print('DWeighters, DWeighterVarRates')
# print(DWeighters, DWeighterVarRates)
## DWeighters = WeighterVarRatesLast * WeighterInAxonConcentration * WeightersLast /self.scale*2
## DWeighters = WeighterVarRatesLast /self.scale/2
## CW = 100
## tauWV = 17.8#40#17.8#40 if flow rate times w*v choose 40, if flow rate times w or v choose 20
## aWV = 100
## rWV = 5000
## scale=1
# DWeighters = WeighterVarRatesLast * ( WeighterInAxonConcentration + WeightersLast/self.EquivalentVolume +np.sign(WeighterVarRatesLast)*(WeighterInAxonConcentration - WeightersLast/self.EquivalentVolume)) /2 /self.scale
# DWeighterVarRates = ( (WeighterInAxonConcentration - WeightersLast/self.EquivalentVolume \
# +aWV * np.sign(WeighterVarRatesLast)*np.power(np.abs(WeighterVarRatesLast), 0.5))/ rWV - WeighterVarDamping*WeighterVarRatesLast ) / tauWV /self.scale
#very chaos, no distinguish between pump rate and transport speed
# CW = 100
# tauWV = 17.8--50#40 if flow rate times w*v choose 40, if flow rate times w or v choose 20
# aWV = 100
# rWV = 5000
# scale=1
# DWeighters = WeighterVarRatesLast /self.scale
# DWeighterVarRates = ( (WeighterInAxonConcentration - WeightersLast/self.EquivalentVolume \
# +aWV * np.sign(WeighterVarRatesLast)*np.power(np.abs(WeighterVarRatesLast), 0.5))/ rWV - WeighterVarDamping*WeighterVarRatesLast ) / tauWV * ( WeighterInAxonConcentration + WeightersLast +np.sign(WeighterVarRatesLast)*(WeighterInAxonConcentration - WeightersLast)) /2 /self.scale
#instantious catch-up lateral mobility resistance, no pump resistance
## CW = 100
## tauWV = 40#50#40#17.8#40 if flow rate times w*v choose 40, if flow rate times w or v choose 20
## aWV = 0.0005#0.02 #100
## rWV = 200000
## scale=1
# DWeighters = WeighterVarRatesLast + (WeighterInAxonConcentration - WeightersLast/self.EquivalentVolume)/ rWV /self.scale
# DWeighterVarRates = ( \
# (aWV * np.sign(WeighterVarRatesLast)*np.power(np.abs(WeighterVarRatesLast), 0.5) - WeighterVarDamping*WeighterVarRatesLast ) * ( WeighterInAxonConcentration + WeightersLast +np.sign(WeighterVarRatesLast)*(WeighterInAxonConcentration - WeightersLast)) /2 )/self.scale
#original
# DWeighters = WeighterVarRatesLast * WeighterInAxonConcentration * WeightersLast /self.scale
# DWeighterVarRates = ( (WeighterInAxonConcentration - WeightersLast/(1+(2*WeightersCentral-(WeighterInAxonConcentration+WeightersLast))/((WeighterInAxonConcentration+WeightersLast)-WeightersCentral) ) \
# +aWV * np.sign(WeighterVarRatesLast)*np.power(np.abs(WeighterVarRatesLast), 0.5))/ rWV - WeighterVarDamping*WeighterVarRatesLast ) / tauWV /self.scale
return np.array([DWeighters, DWeighterVarRates])
#def SynapseDerivative(WeightersLast, WeighterVarRatesLast, WeighterInAxon, Parameters):
# tauW, tauWV, aWV = Parameters
# DWeightersLast = WeighterVarRatesLast
# DWeighterVarRatesLast = aWV * ( WeighterInAxon - WeightersLast + np.sign(WeighterVarRatesLast)*np.power(WeighterVarRatesLast, 0.5))
# return DWeightersLast, DWeighterVarRatesLast
def Jacobian(self, state=None , inputs=None, Parameters=None):
if state is not None:
WeightersLast, WeighterVarRatesLast = state
else:
WeightersLast, WeighterVarRatesLast = self.WeightersLast, self.WeighterVarRatesLast
if inputs is not None:
WeighterInAxonConcentration=inputs
else:
WeighterInAxonConcentration=self.WeighterInAxonConcentration
if Parameters is not None:
CW, tauWV, aWV, rWV, scale, WeightersCentral,WeighterVarDamping = Parameters
else:
CW, tauWV, aWV, rWV, scale, WeightersCentral,WeighterVarDamping = self.Parameters()
DDWDW = WeighterVarRatesLast * ( 1 -np.sign(WeighterVarRatesLast) ) /2 /self.scale
DDWDWV = ( WeighterInAxonConcentration + WeightersLast +np.sign(WeighterVarRatesLast)*(WeighterInAxonConcentration - WeightersLast)) /2 /self.scale
DDWVDW = ( (WeighterInAxonConcentration - WeightersLast/(1+(2*WeightersCentral-(WeighterInAxonConcentration+WeightersLast))/((WeighterInAxonConcentration+WeightersLast)-WeightersCentral) ) \
+aWV * np.sign(WeighterVarRatesLast)*np.power(np.abs(WeighterVarRatesLast), 0.5))/ rWV - WeighterVarDamping*WeighterVarRatesLast ) / tauWV /self.scale
DDWVDDWV = ( (WeighterInAxonConcentration - WeightersLast/(1+(2*WeightersCentral-(WeighterInAxonConcentration+WeightersLast))/((WeighterInAxonConcentration+WeightersLast)-WeightersCentral) ) \
+aWV * np.sign(WeighterVarRatesLast)*np.power(np.abs(WeighterVarRatesLast), 0.5))/ rWV - WeighterVarDamping*WeighterVarRatesLast ) / tauWV /self.scale
return np.array([DWeighters, DWeighterVarRates])
def StepSynapseDynamics(self, dt, ModulatorAmount):
# ModulatorAmount=np.array(ModulatorAmount)
# ModulatorAmount=ModulatorAmount.reshape(np.append(ModulatorAmount.shape,1).astype(int))
self.Parameters = [self.CW, self.tauWV, self.aWV , self.rWV, self.scale, self.WeightersCentral,self.WeighterVarDamping]
self.Weighters, self.WeighterVarRates = rk4(dt, np.array([self.WeightersLast, self.WeighterVarRatesLast]), self.WeighterInAxonConcentration, self.Parameters, self.Derivative)
# print('self.Weighters')
# print(self.Weighters)
# if np.isnan(self.Weighters):
# pass
self.WeighterInAxon = self.WeighteAmountPerSynapse* self.NumberOfSynapses[1] - self.WeightersLast
#print(self.WeighterInAxon, self.WeighteAmountPerSynapse, self.NumberOfSynapses[1] , self.WeightersLast.sum(axis=1,keepdims=True))
self.WeighterInAxonConcentration = self.WeighterInAxon/self.NumberOfSynapses[1]
self.WeightersCentral += (self.Weighters-self.WeightersCentral)*ModulatorAmount *self.WeightersCentralUpdateRate*dt*(1+self.WeightersCentralUpdateCompensate*(self.Weighters>self.WeightersCentral)) #0.000015##0.00002
# self.WeightersCentral += (self.Weighters-self.WeightersCentral)*ModulatorAmount *self.WeightersCentralUpdateRate*dt #0.000015##0.00002
#print(self.WeightersCentral)
self.WeighterVarDamping += (self.MaxDamping-self.WeighterVarDamping)*self.WeighterVarDamping*ModulatorAmount*self.DampingUpdateRate *dt #
#print(self.WeighterVarDamping)
return self.Weighters, self.WeighterVarRates, self.WeighterInAxon, self.WeighterInAxonConcentration
def StateUpdate(self):
self.WeightersLast, self.WeighterVarRatesLast = self.Weighters, self.WeighterVarRates
def InitRecording(self, lenth, SampleStep = 1):
self.RecordingState = True
self.RecordingLenth = lenth
self.RecordingInPointer = 0
self.Trace = {'Weighters':np.zeros(np.append(lenth,self.NumberOfSynapses).ravel()), \
'WeightersCentral' : np.zeros(np.append(lenth,self.NumberOfSynapses).ravel()), \
'WeighterVarRates' : np.zeros(np.append(lenth,self.NumberOfSynapses).ravel()), \
'WeighterVarDamping' : np.zeros(np.append(lenth,self.NumberOfSynapses).ravel()),\
'WeighterInAxonConcentration' : np.zeros(np.append(np.append(lenth,self.NumberOfSynapses[0]),1).ravel()), \
'EquivalentVolume':np.zeros(np.append(lenth,self.NumberOfSynapses).ravel()) \
}
def Recording(self):
Temp = None
for key in self.Trace:
exec("Temp = self.%s" % (key))
# print ("Temp = self.%s" % (key))
# print(Temp)
self.Trace[key][self.RecordingInPointer, :] = Temp
self.RecordingInPointer += 1
if self.RecordingInPointer>= self.RecordingLenth:
self.RecordingInPointer = 0
#%%
def PlotPhasePortrait(self, xlim, ylim, fig=None, ax=None, inputs=None, Parameters=None):
# fig2 = plt.figure(figsize=(10, 20))
# ax4 = fig2.add_subplot(1,1,1)
if fig == None or ax == None:
fig, ax = plt.subplots(1,sharex=False , figsize=(20, 12))#
if Parameters is not None:
CW, tauWV, aWV, rWV, scale, WeightersCentral,WeighterVarDamping = Parameters
else:
CW, tauWV, aWV, rWV, scale, WeightersCentral,WeighterVarDamping = self.GetParameters()
if inputs is not None:
d, h = WeighterInAxonConcentration, WeightersCentral=inputs
else:
d = WeighterInAxonConcentration=self.WeighterInAxonConcentration
a= aWV / tauWV
b= WeighterVarDamping / tauWV
h = WeightersCentral
s=1
Wis=np.linspace(xlim[0],xlim[1],num=10000)
w= Wis
vis = np.linspace(ylim[0],ylim[1],num=10000)
d=self.WeighteAmountPerSynapse-Wis
colors=['r','c']
# temp1 = 2 * (b*h*s)**2
# temp2 = a * (h*s)**(3/2)
# temp3 = a**2 * h * s
# temp4 = 4 * (b*d*h*s - b*d*s*w + b*h*w - b*w**2)
# temp5 = (a*h*s)**2
# temp6 = 2*b*d*(h*s)**2 - 2*b*d*h*s**2*w +2*b*h**2*s*w - 2*b*h*s*w**2
# ax.plot( Wis, (-temp2 * np.sqrt(temp3+temp4) + temp5 + temp6)/ temp1, lw=2, label='v-nullcline 0' )
# ax.plot( Wis, (temp2 * np.sqrt(temp3+temp4) + temp5 + temp6)/ temp1, lw=2, label='v-nullcline 1' )
# ax.plot( Wis, (-temp2 * np.sqrt(temp3-temp4) - temp5 + temp6)/ temp1, lw=2, label='v-nullcline 2' )
# ax.plot( Wis, (temp2 * np.sqrt(temp3-temp4) - temp5 + temp6)/ temp1, lw=2, label='v-nullcline 3' )
# temp1 = 2 * b**2
# temp2 = a
# temp3 = a**2
# temp4 = 4 * (b*d - b*w)
# temp5 = a**2
# temp6 = 2*b*d - 2*b*w
# ax.plot( Wis, (-temp2 * np.sqrt(temp3+temp4) + temp5 + temp6)/ temp1, lw=2, label='v-nullcline 0' )
# ax.plot( Wis, (temp2 * np.sqrt(temp3+temp4) + temp5 + temp6)/ temp1, lw=2, label='v-nullcline 1' )
# ax.plot( Wis, (-temp2 * np.sqrt(temp3-temp4) - temp5 + temp6)/ temp1, lw=2, label='v-nullcline 2' )
# ax.plot( Wis, (temp2 * np.sqrt(temp3-temp4) - temp5 + temp6)/ temp1, lw=2, label='v-nullcline 3' )
# ax.plot( Wis,np.zeros(Wis.shape) , lw=2, label='w-nullcline' )
# ax.plot(a*np.sign(vis)*np.sqrt(np.abs(vis))-b*vis+d, vis, lw=2, label='v-nullcline 4' )
temp1 = 2 * b**2
temp2 = a
temp3 = a**2
temp4 = 4 * (b*d - b*w**2)
temp5 = a**2
temp6 = 2*b*d - 2*b*w**2
ax.plot( Wis, (-temp2 * np.sqrt(temp3+temp4) + temp5 + temp6)/ temp1, lw=2, label='v-nullcline 0' )
ax.plot( Wis, (temp2 * np.sqrt(temp3+temp4) + temp5 + temp6)/ temp1, lw=2, label='v-nullcline 1' )
ax.plot( Wis, (-temp2 * np.sqrt(temp3-temp4) - temp5 + temp6)/ temp1, lw=2, label='v-nullcline 2' )
ax.plot( Wis, (temp2 * np.sqrt(temp3-temp4) - temp5 + temp6)/ temp1, lw=2, label='v-nullcline 3' )
ax.plot( Wis,np.zeros(Wis.shape) , lw=2, label='w-nullcline' )
ax.plot(np.sqrt(a*np.sign(vis)*np.sqrt(np.abs(vis))-b*vis+d), vis, lw=2, label='v-nullcline 4' )
ax.set_ylim (ylim)
# ax.axvline( 0 , lw=2, label='w-nullcline' ) #
Wispace=np.linspace(xlim[0],xlim[1], num=30)
Vspace=np.linspace(ylim[0],ylim[1], num=20)
Wistep=Wispace[1]-Wispace[0]
Vstep=Vspace[1]-Vspace[0]
W1 , V1 = np.meshgrid(Wispace, Vspace)
print(W1)
print(V1)
DW1, DV1=self.Derivative(state=[W1, V1] , inputs=self.WeighteAmountPerSynapse-W1, Parameters=[CW, tauWV, aWV, rWV, scale, WeightersCentral,WeighterVarDamping] )
# VectorZ=DW1+ DV1*1j
# M = np.log(np.hypot(DW1, DV1))
M = np.greater(DV1, 0)
ax.quiver(W1 , V1, DW1, DV1, (M), width=0.002, angles='xy')#pivot='mid')
#ax.legend(bbox_to_anchor=(0.6, 0.2), loc=2, borderaxespad=0.,prop={'size':12})
ax.legend(prop={'size':12})
ax.grid()
return [fig, ax]
def plot(TimOfRecording, Traces = None, path='', savePlots = False, StartTimeRate=0.3, DownSampleRate=10,linewidth =1, FullScale=False):
# plt.rc('axes', prop_cycle=(cycler('color',['C0','C1','C2','C3','C4','C5','C6','C7','C8','C9','b','k'])))
mpl.rcParams['axes.prop_cycle']=cycler('color',['#1f77b4','#ff7f0e','#2ca02c','#d62728','#9467bd','#8c564b','#e377c2','#7f7f7f','#bcbd22','#17becf','b','k'])
# mpl.rcParams['axes.prop_cycle']=cycler(color='category20')
if Traces is not None:
Tracet, TraceWeighters, TraceWeighterVarRates, TraceWeighterInAxonConcentration, TraceWeightersCentral,TraceWeighterVarDamping,TraceEquivalentVolume = Traces
# else:
# for key in self.Trace:
# exec("Trace%s = self.Trace[%s]" % (key,key))
TracetInS=Tracet.astype(float)/1000
NumberOfSteps = len(TracetInS)
if StartTimeRate == 0:
StartStep = 0
else:
StartStep = NumberOfSteps - int(NumberOfSteps*StartTimeRate)
figure1 = plt.figure()
labels = [str(i) for i in range(TraceWeighters.shape[1])]
figure1lines = plt.plot(TracetInS, TraceWeighters, label=labels, linewidth= linewidth)
plt.legend(figure1lines, labels)
plt.xlabel('Time (s)')
plt.title('Instantaneous Synaptic Strength')
figure2 = plt.figure();
plt.plot(TracetInS, TraceWeighterVarRates,linewidth= linewidth)
plt.xlabel('Time (s)')
plt.title("'Pump' rate")
figure3 = plt.figure()
ConcentrationLines =plt.plot(TracetInS[::DownSampleRate], TraceWeighters[::DownSampleRate]/TraceEquivalentVolume[::DownSampleRate],linewidth= linewidth)
AxonConcentrationLines=plt.plot(TracetInS, TraceWeighterInAxonConcentration,linewidth= linewidth)
plt.legend([ConcentrationLines,AxonConcentrationLines], [labels,'Axon'])
plt.xlabel('Time (s)')
plt.title('Receptor Concentration')
X=TraceWeighters[StartStep:NumberOfSteps,0][::DownSampleRate]
Y=TraceWeighters[StartStep:NumberOfSteps,1][::DownSampleRate]
Z=TraceWeighters[StartStep:NumberOfSteps,2][::DownSampleRate]
figure4 = plt.figure()
plt.plot(X,Y)
plt.xlabel('Time (s)')
plt.title('2 Instantaneous Synaptic Strength')
plt.xlabel('Instantaneous Synaptic Strength 0')
plt.ylabel('Instantaneous Synaptic Strength 1')
figure5 = plt.figure()
ax = figure5.add_subplot(111, projection='3d')
ax.plot(X,Y,Z)
ax.set_xlabel('Instantaneous Synaptic Strength 0')
ax.set_ylabel('Instantaneous Synaptic Strength 1')
ax.set_zlabel('Instantaneous Synaptic Strength 2')
# Create cubic bounding box to simulate equal aspect ratio
max_range = np.array([X.max()-X.min(), Y.max()-Y.min(), Z.max()-Z.min()]).max()
Xb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][0].flatten() + 0.5*(X.max()+X.min())
Yb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][1].flatten() + 0.5*(Y.max()+Y.min())
Zb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][2].flatten() + 0.5*(Z.max()+Z.min())
# Comment or uncomment following both lines to test the fake bounding box:
for xb, yb, zb in zip(Xb, Yb, Zb):
ax.plot([xb], [yb], [zb], 'w',linewidth= linewidth)
if FullScale:
ax.set_xlim(0,1)
ax.set_ylim(0,1)
ax.set_zlim(0,1)
figure6 = plt.figure()
figure6lines = plt.plot(TracetInS[::DownSampleRate], TraceWeightersCentral[::DownSampleRate], label=labels, linewidth= linewidth)
plt.legend(figure6lines, labels)
plt.title('Center of Synaptic Strength Oscillation')
plt.xlabel('Time (s)')
figure7 = plt.figure()
figure7lines = plt.plot(TracetInS[::DownSampleRate], TraceWeighterVarDamping[::DownSampleRate], label=labels, linewidth= linewidth)
plt.legend(figure7lines, labels)
plt.title('Damping factor')
plt.xlabel('Time (s)')
figure8 = plt.figure()
figure8lines = plt.plot(TracetInS[::DownSampleRate], TraceEquivalentVolume[::DownSampleRate], label=labels, linewidth= linewidth)
plt.legend(figure7lines, labels)
plt.xlabel('Time (s)')
plt.title('Receptor Storage Capacity')
#%
figure9 = plt.figure()
figure9ax1 = figure9 .add_subplot(111)
points0,points1 = CrossAnalysis(TraceWeighters[:,0],TraceWeightersCentral[:,0],TraceWeighters,TracetInS)
if FullScale:
figure9ax1.set_xlim(0,1)
figure9ax1.set_ylim(0,1)
# points0={'t':[],'points':[]}
# points1={'t':[],'points':[]}
# GreaterThanCentre=(TraceWeighters[0,0]>TraceWeightersCentral[0,0])
# print(TraceWeighters[0,0])
# print(TraceWeightersCentral[0,0])
# for i1 in range(len(TraceWeighters)):
## print(TraceWeighters[i1,0])
## print(TraceWeightersCentral[i1,0])
# if GreaterThanCentre == True:
# if TraceWeighters[i1,0]<TraceWeightersCentral[i1,0]:
# #print(TraceWeighters[i1,0])
# points0['points'].append(TraceWeighters[i1])
# points0['t'].append(TracetInS[i1])
# GreaterThanCentre = False
# elif GreaterThanCentre == False:
# if TraceWeighters[i1,0]>TraceWeightersCentral[i1,0]:
# #print(TraceWeighters[i1,0])
# points1['points'].append(TraceWeighters[i1])
# points1['t'].append(TracetInS[i1])
# GreaterThanCentre = True
# #c = np.empty(len(m[:,0])); c.fill(megno)
# points0['points']=np.array(points0['points'])
# points1['points']=np.array(points1['points'])
print('points0')
print(points0['points'])
print('points1')
print(points1['points'])
pointsploted0 = figure9ax1.scatter(points0['points'][:,1],points0['points'][:,2],c=points0['t'], cmap=plt.cm.get_cmap('Greens'), marker=".", edgecolor='none') #c=c, , cmap=cm
pointsploted1 = figure9ax1.scatter(points1['points'][:,1],points1['points'][:,2],c=points1['t'], cmap=plt.cm.get_cmap('Blues'), marker=".", edgecolor='none')
#plt.legend(figure7lines, labels)
plt.colorbar(pointsploted0)
plt.colorbar(pointsploted1)
plt.title('Poincare map')
plt.xlabel('Instantaneous Synaptic Strength 1')
plt.ylabel('Instantaneous Synaptic Strength 2')
#%
figure1.tight_layout()
figure2.tight_layout()
figure3.tight_layout()
figure4.tight_layout()
figure5.tight_layout()
figure6.tight_layout()
figure7.tight_layout()
figure8.tight_layout()
figure9.tight_layout()
if savePlots == True:
pp = PdfPages(path+"DynamicSynapse"+TimOfRecording+'.pdf')
figure1.savefig(pp, format='pdf')
figure2.savefig(pp, format='pdf')
figure3.savefig(pp, format='pdf')
figure4.savefig(pp, format='pdf')
figure5.savefig(pp, format='pdf')
figure6.savefig(pp, format='pdf')
figure7.savefig(pp, format='pdf')
figure8.savefig(pp, format='pdf')
figure9.savefig(pp, format='pdf')
pp.close()
# Figures = {'TraceWeighters':figure1, 'TraceWeighterVarRates':figure2, 'TraceWeighterInAxon':figure3, '2TraceWeighters':figure4, '3DTraceWeighters':figure5, 'WeightersCentral':figure6, 'Damping':figure7,'EquivalentVolume':figure8,'Poincare map':figure9}
# with open(path+"DynamicSynapse"+TimOfRecording+'.pkl', 'wb') as pkl:
# dill.dump(Figures, pkl)
return [figure1,figure2,figure3,figure4,figure5, figure6, figure7, figure8,figure9, ax]
#%%
def CrossAnalysis(Oscillate,Reference,OscillateArray,Tracet):
points0={'t':[],'points':[]}
points1={'t':[],'points':[]}
GreaterThanCentre=(Oscillate[0]>Reference[0])
print(Oscillate[0])
print(Reference[0])
for i1 in range(len(Oscillate)):
# print(Oscillates[i1,0])
# print(References[i1,0])
if GreaterThanCentre == True:
if Oscillate[i1]<Reference[i1]:
#print(GreaterThanCentre)
#print(Oscillates[i1,0])
points0['points'].append(OscillateArray[i1])
points0['t'].append(Tracet[i1])
GreaterThanCentre = False
elif GreaterThanCentre == False:
if Oscillate[i1]>Reference[i1]:
#print (GreaterThanCentre)
#print(Oscillates[i1,0])
points1['points'].append(OscillateArray[i1])
points1['t'].append(Tracet[i1])
GreaterThanCentre = True
#c = np.empty(len(m[:,0])); c.fill(megno)
points0['points']=np.array(points0['points'])
points1['points']=np.array(points1['points'])
points0['t']=np.array(points0['t'])
points1['t']=np.array(points1['t'])
return points0, points1
def NearestFinder(array,value):
idx = np.argmin(np.abs(array-value))
return idx
def DistanceFinder(Data):
try:
ADSA, dt, NumberOfSteps, Arg0 , Arg1 ,Index0,Index1,phase=Data
ADSA, Traces=SimulationLoop( ADSA, dt, NumberOfSteps, Arg0 , Arg1 ,phase,Index0,Index1)
points0,points1 = CrossAnalysis(Traces[1][:,0],Traces[4][:,0],Traces[1],Traces[0])
Distance = np.zeros([1])
DistanceAv = np.zeros([1])
DistanceAcc = np.zeros([1])
DistanceAccAv = np.zeros([1])
print(points0)
if len(points0['t'] )>Traces[-1]/30000/3:
Distance0=np.array(points0['points'][1:,:])-np.array(points0['points'][0:-1,:])
Distance0=np.vstack((np.zeros(Distance0[0].shape), Distance0) )
# Distance1=np.array(points1['points'][1:,:])-np.array(points1['points'][0:-1,:])
# Distance1=np.append(np.zeros(Distance1[0].shape), Distance1 )
print('Distance0'+str(Distance0))
Distance=np.linalg.norm(Distance0,axis=1)
DistanceAv = np.average(Distance)
DistanceAcc=Distance[1:]-Distance[0:-1]
DistanceAcc=np.append(np.zeros(DistanceAcc[0].shape), DistanceAcc)
DistanceAccAv=np.average(DistanceAcc)
except:
traceback.print_exc(file=sys.stderr)
return Index0, Index1, ADSA, Arg0 , Arg1, Distance, DistanceAv, DistanceAcc, DistanceAccAv
#def ParameterOptimizer(AMBONs,gm , TDm):
def DataGenerator(ADSA,dt, NumberOfSteps, Arg0 , Arg1 , phase):
Index0=0
Index1=0
while Index0<len(Arg0)and Index1<len(Arg1[0]):
data=[ADSA,dt, NumberOfSteps,Arg0[Index0,Index1], Arg1[Index0,Index1],Index0,Index1,phase]
yield data
if Index1<len(Arg0[0])-1:
Index1 +=1
else:
#if Index0<len(gm)-1:
Index1=0
Index0 +=1
def SimulationLoop(ADSA,dt, NumberOfSteps, Arg0 , Arg1 , phase=0,Index0=0,Index1=0):
ADSA.tauWV = Arg0
ADSA.aWV = Arg1
ADSA.InitRecording(NumberOfSteps)
Tracet = np.zeros(NumberOfSteps)
for step in range(NumberOfSteps):
# WeightersLast = copy.deepcopy(Weighters)
# WeighterVarRatesLast = copy.deepcopy(WeighterVarRates)
ADSA.StateUpdate()
ADSA.StepSynapseDynamics( SimulationTimeInterval,0)
if ADSA.RecordingState:
ADSA.Recording()
Tracet[step] = step*SimulationTimeInterval
#%%
if step%(100000./dt)<1:
print ('phase=%s,Index0=%d, Index1=%d, tauWV=%s, aWV=%s, step=%s'%(phase,Index0,Index1,ADSA.tauWV, ADSA.aWV,step))
# Tracet, TraceWeighters, TraceWeighterVarRates, TraceWeighterInAxon, traceWeightersCentral,traceWeighterVarDamping
NeuonNumber=1
newSlice= [slice(None)]*3
newSlice[1]=NeuonNumber
Traces = Tracet, ADSA.Trace['Weighters'][newSlice], ADSA.Trace['WeighterVarRates'][newSlice], ADSA.Trace['WeighterInAxon'][newSlice], ADSA.Trace['WeightersCentral'][newSlice], ADSA.Trace['WeighterVarDamping'][newSlice], ADSA.Trace['EquivalentVolume'][newSlice]
return ADSA, Traces
if __name__=="__main__":
InintialDS =1
SearchParameters =0
InintialSearch =1
SingleSimulation = 1
PlotPhasePotrait = 1
TimOfRecording=time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime())
if InintialDS:
NumberOfNeuron=1
NumberOfSynapses = 1# N =3 tauWV =50; #N = 6 tauWV = 25
Weighters= 0.25#np.random.rand(NumberOfNeuron,NumberOfSynapses) #0.5*np.ones(NumberOfSynapses) +0.001*np.random.rand(NumberOfSynapses) #
WeighteAmountPerSynapse = 1
WeighterInAxon = WeighteAmountPerSynapse* NumberOfSynapses - Weighters
WeighterVarRates = 0#np.zeros((NumberOfNeuron,NumberOfSynapses))
# TraceWeighters = np.zeros((NumberOfSteps,NumberOfNeuron,NumberOfSynapses))
# TraceWeighterVarRates = np.zeros((NumberOfSteps,NumberOfNeuron,NumberOfSynapses))
# TraceWeighterInAxon = np.zeros((NumberOfSteps,NumberOfNeuron))
# CW = 100
# tauWV =60#40#19#17.8#40#35#50#40#17.8#40 if flow rate times w*v choose 40, if flow rate times w or v choose 20
# aWV = 170#130 100
# rWV = 7000 #7000
# scale=1
# damping =2
# CW = 100
# SimulationTimeInterval = 30
# CW = 100
### ratio of intergration of postive value oscillation and nagative value oscillation is low
# tauWV =500#40#19#17.8#40#35#50#40#17.8#40 if flow rate times w*v choose 40, if flow rate times w or v choose 20
# aWV = 170#130 100
# rWV = 7000 #7000
# scale=1
# damping =2
# CW = 100
# SimulationTimeInterval = 10
## oscillation with periods of 300 to 500 seconds
#
# tauWV =0.1#40#19#17.8#40#35#50#40#17.8#40 if flow rate times w*v choose 40, if flow rate times w or v choose 20
# aWV = 34#170#130 100
# rWV = 7000*500*100#7000
# scale=1
# damping =2*7000
# CW = 100
# SimulationTimeInterval = 100
#
## oscillation with periods of 20seconds *** when receptor amount 10
# tauWV =0.1#40#19#17.8#40#35#50#40#17.8#40 if flow rate times w*v choose 40, if flow rate times w or v choose 20
# aWV = 100#170#130 100
# rWV = 7000*500*300#7000
# scale=1
# damping =2*3000
# CW = 100
## oscillation with periods of 50seconds *** when receptor amount 1
#
tauWV =0.5#40#19#17.8#40#35#50#40#17.8#40 if flow rate times w*v choose 40, if flow rate times w or v choose 20
aWV = 100#170#130 100
rWV = 7000*500#7000
scale=1
damping =2*7000
CW = 100
SimulationTimeInterval = 100
# tauWV =500#40#19#17.8#40#35#50#40#17.8#40 if flow rate times w*v choose 40, if flow rate times w or v choose 20
# aWV = 85#130 100
# rWV = 3500 #7000
# scale=1
# damping =2
# CW = 100
# SimulationTimeInterval = 10
# tauWV = 20#40#19#17.8#40#35#50#40#17.8#40 if flow rate times w*v choose 40, if flow rate times w or v choose 20
# aWV = 130#100
# rWV = 5000
# scale=1
# damping =2
# SimulationTimeInterval = 30
# CW = 100
# tauWV = 40#19#17.8#40#35#50#40#17.8#40 if flow rate times w*v choose 40, if flow rate times w or v choose 20
# aWV = 100
# rWV = 10000
# scale=1
# damping =2
# SimulationTimeInterval = 30
# CW = 100
# tauWV = 40#50#40#17.8#40 if flow rate times w*v choose 40, if flow rate times w or v choose 20
# aWV = 0.0005#0.02 #100
# rWV = 200000
# scale=1
#0.4 0.05
WeightersCentral = 0.5#(NumberOfSynapses) #* np.random.rand(NumberOfNeuron,NumberOfSynapses) # 0.6 * np.random.rand(NumberOfNeuron,NumberOfSynapses) #np.array([4, 1, 1, 1, 1])#np.ones(NumberOfSynapses)*0.4 + 0.3 * np.random.rand(NumberOfSynapses) #np.ones(NumberOfSynapses)/2 + [0.8, 0.1,0.1, 0.1, 0] #[0.2, 0.1, 0]
WeighterVarDamping = damping #np.array([10, 2, 2, 2, 2]) #[10,2,2,2,2] #[2,2,2]
# WeighterVarDamping[0,1] = 4
Parameters = [CW, tauWV, aWV , rWV, WeightersCentral,WeighterVarDamping]
ADSA=DynamicSynapseArray( NumberOfSynapses = [NumberOfNeuron, NumberOfSynapses], CW = CW, tauWV = tauWV, aWV = aWV, rWV = rWV,scale=scale, \
WeightersCentral = WeightersCentral , WeighterVarDamping = WeighterVarDamping, WeighteAmountPerSynapse = WeighteAmountPerSynapse, \
Weighters = Weighters, WeighterVarRates = WeighterVarRates,WeightersCentralUpdateCompensate = 0)
# ADSA.DampingUpdateRate=0
SimulationTimeLenth = 60*60*1000
dt = SimulationTimeInterval
NumberOfSteps = int(SimulationTimeLenth/SimulationTimeInterval)
Tracet = np.zeros(NumberOfSteps)
ADSA.InitRecording(NumberOfSteps)
if SearchParameters :
if InintialSearch:
searchSamples=[10,10]
centralSearchSamples=np.floor(np.array(searchSamples)/2).astype(int)
DistanceAvLastTime=0
traceDistanceAccAv=np.zeros(searchSamples)
traceDistanceAv=np.zeros(searchSamples)
Arg0Indexs=[]
Arg0Maxs=[]
Arg1Maxs=[]
Scales=[]
phase=1
ax1=[None for i in range(phase)]
fig=[None for i in range(phase)]
img=[None for i in range(phase)]
DistanceAccAvThisTime=0
numberOfProcess=15
DistanceAccAv=np.zeros(searchSamples)
DistanceMax=np.zeros(searchSamples)
pool = Pool(processes=numberOfProcess)
#_unordered
Arg0spaceLim=[10,40]
#gspaceLim=[0.7,0.72]
Arg0Max=np.average(Arg0spaceLim)
Arg1spaceLim=[100,200]
#Arg1spaceLim=[38, 38.6]
Arg1Max=np.average(Arg1spaceLim)
Arg0space=np.linspace(Arg0spaceLim[0],Arg0spaceLim[1], num=searchSamples[0])
Arg1space=np.linspace(Arg1spaceLim[0],Arg1spaceLim[1], num=searchSamples[1])
Scale=np.array([Arg0spaceLim[1]-Arg0spaceLim[0],Arg1spaceLim[1]-Arg1spaceLim[0]])
for i1 in range(phase):
Arg0space=np.linspace(Arg0Max-Scale[0]/2,Arg0Max+Scale[0]/2, num=searchSamples[0])
Arg1space=np.linspace(Arg1Max-Scale[1]/2,Arg1Max+Scale[1]/2, num=searchSamples[1])
randSearchRate0=(np.random.random_sample(Arg0space.shape)-0.5)*0#.1
randSearchRate0[centralSearchSamples[0]]=0
randSearchRate1=(np.random.random_sample(Arg1space.shape)-0.5)*0#.1
randSearchRate0[centralSearchSamples[1]]=0
Arg0m , Arg1m = np.meshgrid(Arg0space*(1+randSearchRate0), Arg1space*(1+randSearchRate1))
print (Arg0m, Arg1m)
# traceV,traceU,traceI,Iss=SimulationLoop(AMBONs,gm , Arg1m )
iTestResultsOfTests= pool.imap_unordered(DistanceFinder, DataGenerator(ADSA, dt, NumberOfSteps, Arg0m , Arg1m, i1)) #
for AResult in iTestResultsOfTests:
Index0, Index1, ADSA, Arg0 , Arg1, Distance, DistanceAv, DistanceAcc, DistanceAccAv =AResult
traceDistanceAv[Index0, Index1]=DistanceAv
traceDistanceAccAv[Index0, Index1]=DistanceAccAv
# traceDistanceAccAv.append(DistanceAccAv)
# traceDistanceAv.append(DistanceAv)
print('Distance:'+str(Distance))
# MaxIndex=np.unravel_index(np.argmax(traceDistanceAv),traceDistanceAv.shape)
MaxIndex=np.unravel_index(np.argmax(traceDistanceAv),traceDistanceAccAv.shape)
# Arg0Index=int(MaxIndex % searchSamples[0])
Arg0Max=Arg0space[MaxIndex[0]]
# Arg0Indexs.append(Arg0Index)
# Arg0Maxs.append(Arg0Max)
# Arg1Index=np.floor(MaxIndex/searchSamples[0]).astype(int)
Arg1Max=Arg1space[MaxIndex[1]]
# Arg1Maxs.append(Arg1Max)
DistanceAvThisTime=traceDistanceAv[MaxIndex]
Improve=DistanceAvThisTime-DistanceAvLastTime
if Improve <-0.1:
break
print("negative imporve")
else:
DistanceAvLastTime=DistanceAvThisTime
Scale=Scale*0.5#/np.average(searchSamples)*2
Scales.append(Scale)
# for i1 in range(10):
# print ("Arg0Max=%64f, Arg1Max=%64f, i=%s,DistanceAccAvThisTime=%64f, Improve=%64f"%(Arg0Max,Arg1Max, i1,DistanceAvThisTime,Improve))
#plt.imshow(traceDistanceAccAv)
print ("Arg0Max=%f, Arg1Max=%f, i=%s,DistanceAccAvThisTime=%f, Improve=%f"%(Arg0Max,Arg1Max, i1,DistanceAvThisTime,Improve))
#%%
fig[i1]=plt.figure()
x,y=np.mgrid[slice(Arg0space[0],Arg0space[-1],searchSamples[0]*1j),slice(Arg1space[-1],Arg1space[0],searchSamples[1]*1j)]
ax1[i1] = fig[i1].add_subplot(111)
img[i1]=ax1[i1].pcolormesh(x,y,traceDistanceAv)
fig[i1].colorbar(img[i1],ax=ax1[i])
fig[i1].show()
#%%
print (traceDistanceAccAv,traceDistanceAv)
ADSA.tauWV = Arg0Max
ADSA.aWV = Arg1Max
#%%
if SingleSimulation:
#%%
# DampingUpdateRateCache = ADSA.DampingUpdateRate
for step in range(NumberOfSteps):
# WeightersLast = copy.deepcopy(Weighters)
# WeighterVarRatesLast = copy.deepcopy(WeighterVarRates)
# if step * SimulationTimeInterval<60*60*1000:
# ADSA.DampingUpdateRate=0
# else:
# ADSA.DampingUpdateRate= DampingUpdateRateCache
ADSA.StateUpdate()
# ADSA.StepSynapseDynamics( SimulationTimeInterval,0)
ADSA.StepSynapseDynamics( SimulationTimeInterval,0)
if ADSA.RecordingState:
ADSA.Recording()
Tracet[step] = step*SimulationTimeInterval
#%
if step % 1000 == 0:
print('%d of %d steps'%(step,NumberOfSteps))
# Tracet, TraceWeighters, TraceWeighterVarRates, TraceWeighterInAxon, traceWeightersCentral,traceWeighterVarDamping
NeuonNumber=0
newSlice= [slice(None)]*3
newSlice[1]=NeuonNumber
Traces = Tracet, ADSA.Trace['Weighters'][newSlice], ADSA.Trace['WeighterVarRates'][newSlice], ADSA.Trace['WeighterInAxonConcentration'][newSlice], ADSA.Trace['WeightersCentral'][newSlice], ADSA.Trace['WeighterVarDamping'][newSlice], ADSA.Trace['EquivalentVolume'][newSlice]
#%%
UpHalfWeightSum= (ADSA.Trace['Weighters'][newSlice]-WeightersCentral)[ADSA.Trace['Weighters'][newSlice]>WeightersCentral].sum()
UpHalfTime=ADSA.Trace['Weighters'][newSlice][ADSA.Trace['Weighters'][newSlice]>WeightersCentral].shape[0]*dt
DownHalfWeightSum= (WeightersCentral-ADSA.Trace['Weighters'][newSlice])[ADSA.Trace['Weighters'][newSlice]<WeightersCentral].sum()
DownHalfTime=ADSA.Trace['Weighters'][newSlice][ADSA.Trace['Weighters'][newSlice]<WeightersCentral].shape[0]*dt
print("UpHalfWeightSum: %f, UpHalfTime: %f"%(UpHalfWeightSum, UpHalfTime))
print("DownHalfWeightSum:%f, DownHalfTime:%f"%(DownHalfWeightSum, DownHalfTime))
print("UDWeightRate:%f, UDTimeRate:%f"%(UpHalfWeightSum/DownHalfWeightSum, float(UpHalfTime)/DownHalfTime))
#%%
# figure1,figure2,figure3,figure4,figure5, figure6, figure7,figure8,figure9,ax = plot(TimOfRecording, Traces, path='/media/archive2T/chitianqilin/SimulationResult/DynamicSynapse/Plots/', savePlots=True, linewidth= 0.2) #path=
#%%
# f = open("I:/OneDrive - University of Edinburgh/Documents/MushroomBody Model/DynamicSynapse/DynamicSynapse"+TimOfRecording+'.pkl', 'wb')
#
# dill.dump(figure5,f )
#%%
# for angle in range(0, 360):
# ax.view_init(30, angle)
# #plt.draw()
# plt.pause(.0001)
if PlotPhasePotrait :
ViewScale=40
PhasePotraitfig, PhasePotraitax = ADSA.PlotPhasePortrait( xlim=[0,1], ylim=[-0.00010, 0.00010], fig=None, ax=None, inputs=[WeighteAmountPerSynapse- WeightersCentral, WeightersCentral], Parameters=None)
# PhasePotraitfig, PhasePotraitax = ADSA.PlotPhasePortrait( xlim=[0.0,1.5], ylim=[-0.000005*ViewScale, 0.000005*ViewScale], fig=None, ax=None, inputs=[1, WeightersCentral], Parameters=None)
#PhasePotraitax.plot(ADSA.Trace['Weighters'][:,0,0],ADSA.Trace['WeighterVarRates'][:,0,0])
points = np.array([ADSA.Trace['Weighters'][:,0,0], ADSA.Trace['WeighterVarRates'][:,0,0]]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
DistancePerStep=np.linalg.norm(points[1:]- points[:-1], axis=2).ravel()
norm = mpl.colors.Normalize(vmin=DistancePerStep.min(), vmax=DistancePerStep.max(), clip=True)
mapper = mpl.cm.ScalarMappable(norm=norm, cmap=mpl.cm.hot)
color=mapper.to_rgba(DistancePerStep)
lc = LineCollection(segments, cmap=plt.get_cmap('copper'),colors=color)
PhasePotraitax.add_collection(lc)
|
[
"chitianqilin@163.com"
] |
chitianqilin@163.com
|
67dd5b06d0a10a8ed2e4e11b0a3227b995768ab7
|
557544d64ea0e3017769e2c8e0efbb89df59dbcc
|
/ae.py
|
0f8a59531a1bd95759dd7f1bd3bff8bf9a15863f
|
[] |
no_license
|
SameepYadav/Processing-of-missing-data-by-neural-networks
|
b3a2131e52c52bb86e6cb630b5e06df4863fa2b6
|
47060bb45efefd48086926a3bc381ae9aa5ba46b
|
refs/heads/master
| 2020-06-10T17:07:27.273515
| 2019-04-16T19:08:46
| 2019-04-16T19:08:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,476
|
py
|
import os
from datetime import datetime
from time import time
import numpy as np
import tensorflow as tf
from sklearn.impute import SimpleImputer
from sklearn.mixture import GaussianMixture
from tqdm import tqdm
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
RANDOM_SEED = 42
tf.set_random_seed(RANDOM_SEED)
# Training Parameters
learning_rate = 0.01
n_epochs = 250
batch_size = 64
# Network Parameters
num_hidden_1 = 256 # 1st layer num features
num_hidden_2 = 128 # 2nd layer num features (the latent dim)
num_hidden_3 = 64 # 3nd layer num features (the latent dim)
num_input = 784 # MNIST data_rbfn input (img shape: 28*28)
n_distribution = 5 # number of n_distribution
width_mask = 13 # size of window mask
# tf Graph input (only pictures)
X = tf.placeholder("float", [None, num_input])
initializer = tf.contrib.layers.variance_scaling_initializer()
weights = {
'encoder_h1': tf.Variable(initializer([num_input, num_hidden_1])),
'encoder_h2': tf.Variable(initializer([num_hidden_1, num_hidden_2])),
'encoder_h3': tf.Variable(initializer([num_hidden_2, num_hidden_3])),
'decoder_h1': tf.Variable(initializer([num_hidden_3, num_hidden_2])),
'decoder_h2': tf.Variable(initializer([num_hidden_2, num_hidden_1])),
'decoder_h3': tf.Variable(initializer([num_hidden_1, num_input])),
}
biases = {
'encoder_b1': tf.Variable(tf.random_normal([num_hidden_1])),
'encoder_b2': tf.Variable(tf.random_normal([num_hidden_2])),
'encoder_b3': tf.Variable(tf.random_normal([num_hidden_3])),
'decoder_b1': tf.Variable(tf.random_normal([num_hidden_2])),
'decoder_b2': tf.Variable(tf.random_normal([num_hidden_1])),
'decoder_b3': tf.Variable(tf.random_normal([num_input])),
}
def random_mask(width_window, margin=0):
margin_left = margin
margin_righ = margin
margin_top = margin
margin_bottom = margin
start_width = margin_top + np.random.randint(28 - width_window - margin_top - margin_bottom)
start_height = margin_left + np.random.randint(28 - width_window - margin_left - margin_righ)
return np.concatenate([28 * i + np.arange(start_height, start_height + width_window) for i in
np.arange(start_width, start_width + width_window)], axis=0).astype(np.int32)
def data_with_mask(x, width_window=10):
h = width_window
for i in range(x.shape[0]):
if width_window <= 0:
h = np.random.randint(8, 20)
maska = random_mask(h)
x[i, maska] = np.nan
return x
def nr(mu, sigma):
non_zero = tf.not_equal(sigma, 0.)
new_sigma = tf.where(non_zero, sigma, tf.fill(tf.shape(sigma), 1e-20))
sqrt_sigma = tf.sqrt(new_sigma)
w = tf.div(mu, sqrt_sigma)
nr_values = sqrt_sigma * (tf.div(tf.exp(tf.div(-tf.square(w), 2.)), np.sqrt(2 * np.pi)) +
tf.multiply(tf.div(w, 2.), 1 + tf.erf(tf.div(w, np.sqrt(2)))))
nr_values = tf.where(non_zero, nr_values, (mu - tf.abs(mu)) / 2.)
return nr_values
def conv_first(x, means, covs, p, gamma):
gamma_ = tf.abs(gamma)
# gamma_ = tf.cond(tf.less(gamma_[0], 1.), lambda: gamma_, lambda: tf.square(gamma_))
covs_ = tf.abs(covs)
p_ = tf.nn.softmax(p, axis=0)
check_isnan = tf.is_nan(x)
check_isnan = tf.reduce_sum(tf.cast(check_isnan, tf.int32), 1)
x_miss = tf.gather(x, tf.reshape(tf.where(check_isnan > 0), [-1])) # data_rbfn with missing values
x = tf.gather(x, tf.reshape(tf.where(tf.equal(check_isnan, 0)), [-1])) # data_rbfn without missing values
# data_rbfn without missing
layer_1 = tf.nn.relu(tf.add(tf.matmul(x, weights['encoder_h1']), biases['encoder_b1']))
# data_rbfn with missing
where_isnan = tf.is_nan(x_miss)
where_isfinite = tf.is_finite(x_miss)
size = tf.shape(x_miss)
weights2 = tf.square(weights['encoder_h1'])
# Collect distributions
distributions = tf.TensorArray(dtype=x.dtype, size=n_distribution)
q_collector = tf.TensorArray(dtype=x.dtype, size=n_distribution)
# Each loop iteration calculates all per component
def calculate_component(i, collect1, collect2):
data_miss = tf.where(where_isnan, tf.reshape(tf.tile(means[i, :], [size[0]]), [-1, size[1]]), x_miss)
miss_cov = tf.where(where_isnan, tf.reshape(tf.tile(covs_[i, :], [size[0]]), [-1, size[1]]),
tf.zeros([size[0], size[1]]))
layer_1_m = tf.add(tf.matmul(data_miss, weights['encoder_h1']), biases['encoder_b1'])
layer_1_m = nr(layer_1_m, tf.matmul(miss_cov, weights2))
norm = tf.subtract(data_miss, means[i, :])
norm = tf.square(norm)
q = tf.where(where_isfinite,
tf.reshape(tf.tile(tf.add(gamma_, covs_[i, :]), [size[0]]), [-1, size[1]]),
tf.ones_like(x_miss))
norm = tf.div(norm, q)
norm = tf.reduce_sum(norm, axis=1)
q = tf.log(q)
q = tf.reduce_sum(q, axis=1)
q = tf.add(q, norm)
norm = tf.cast(tf.reduce_sum(tf.cast(where_isfinite, tf.int32), axis=1), tf.float32)
norm = tf.multiply(norm, tf.log(2 * np.pi))
q = tf.add(q, norm)
q = -0.5 * q
return i + 1, collect1.write(i, layer_1_m), collect2.write(i, q)
i = tf.constant(0)
_, final_distributions, final_q = tf.while_loop(lambda i, c1, c2: i < n_distribution, calculate_component,
loop_vars=(i, distributions, q_collector),
swap_memory=True, parallel_iterations=1)
distrib = final_distributions.stack()
log_q = final_q.stack()
log_q = tf.add(log_q, tf.log(p_))
r = tf.nn.softmax(log_q, axis=0)
layer_1_miss = tf.multiply(distrib, r[:, :, tf.newaxis])
layer_1_miss = tf.reduce_sum(layer_1_miss, axis=0)
# join layer for data_rbfn with missing values with layer for data_rbfn without missing values
layer_1 = tf.concat((layer_1, layer_1_miss), axis=0)
return layer_1
# Building the encoder
def encoder(x, means, covs, p, gamma):
layer_1 = conv_first(x, means, covs, p, gamma)
# Encoder Hidden layer with sigmoid activation
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['encoder_h2']), biases['encoder_b2']))
layer_3 = tf.nn.sigmoid(tf.add(tf.matmul(layer_2, weights['encoder_h3']), biases['encoder_b3']))
return layer_3
# Building the decoder
def decoder(x):
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_h1']), biases['decoder_b1']))
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_h2']), biases['decoder_b2']))
layer_3 = tf.nn.sigmoid(tf.add(tf.matmul(layer_2, weights['decoder_h3']), biases['decoder_b3']))
return layer_3
def prep_x(x):
check_isnan = tf.is_nan(x)
check_isnan = tf.reduce_sum(tf.cast(check_isnan, tf.int32), 1)
x_miss = tf.gather(x, tf.reshape(tf.where(check_isnan > 0), [-1]))
x = tf.gather(x, tf.reshape(tf.where(tf.equal(check_isnan, 0)), [-1]))
return tf.concat((x, x_miss), axis=0)
t0 = time()
mnist = tf.keras.datasets.mnist
try:
with np.load('./data/mnist.npz') as f:
x_train, y_train = f['x_train'], f['y_train']
x_test, y_test = f['x_test'], f['y_test']
except FileNotFoundError:
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
print("Read data done in %0.3fs." % (time() - t0))
data_train = x_train
# choose test images nn * 10
nn = 100
data_test = x_test[np.where(y_test == 0)[0][:nn], :]
for i in range(1, 10):
data_test = np.concatenate([data_test, x_test[np.where(y_test == i)[0][:nn], :]], axis=0)
data_test = np.random.permutation(data_test)
del mnist
# change background to white
data_train = 1. - data_train.reshape(-1, num_input)
data_test = 1. - data_test.reshape(-1, num_input)
# create missing data_rbfn train
data_train = data_with_mask(data_train, width_mask)
# create missing data_rbfn test
data_test = data_with_mask(data_test, width_mask)
imp = SimpleImputer(missing_values=np.nan, strategy='mean')
data = imp.fit_transform(data_train)
t0 = time()
gmm = GaussianMixture(n_components=n_distribution, covariance_type='diag').fit(data)
print("GMM done in %0.3fs." % (time() - t0))
p = tf.Variable(initial_value=np.log(gmm.weights_.reshape((-1, 1))), dtype=tf.float32)
means = tf.Variable(initial_value=gmm.means_, dtype=tf.float32)
covs = tf.Variable(initial_value=gmm.covariances_, dtype=tf.float32)
gamma = tf.Variable(initial_value=tf.random_normal(shape=(1,), mean=1., stddev=1.), dtype=tf.float32)
del data, gmm
# Construct model
encoder_op = encoder(X, means, covs, p, gamma)
decoder_op = decoder(encoder_op)
y_pred = decoder_op # prediction
y_true = prep_x(X) # Targets (Labels) are the input data_rbfn.
where_isnan = tf.is_nan(y_true)
y_pred = tf.where(where_isnan, tf.zeros_like(y_pred), y_pred)
y_true = tf.where(where_isnan, tf.zeros_like(y_true), y_true)
# Define loss and optimizer, minimize the squared error
loss = tf.reduce_mean(tf.pow(y_true - y_pred, 2))
optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(loss)
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
trn_summary = [[] for _ in range(5)]
trn_imgs = [[] for _ in range(2)]
with tf.name_scope('train'):
trn_summary[0] = tf.summary.scalar('loss', loss)
trn_summary[1] = tf.summary.histogram("p", tf.nn.softmax(p, axis=0))
for i in range(n_distribution):
trn_summary[2].append(tf.summary.histogram("mean/{:d}".format(i), means[i]))
trn_summary[3].append(tf.summary.histogram("cov/{:d}".format(i), tf.abs(covs[i])))
trn_summary[4] = tf.summary.scalar('gamma', tf.abs(gamma)[0])
image_grid = tf.contrib.gan.eval.image_grid(tf.gather(prep_x(X), np.arange(25)), (5, 5), (28, 28), 1)
trn_imgs[0] = tf.summary.image('input', image_grid, 1)
image_grid = tf.contrib.gan.eval.image_grid(tf.gather(decoder_op, np.arange(25)), (5, 5), (28, 28), 1)
trn_imgs[1] = tf.summary.image('output', image_grid, 1)
tst_summary = [[] for _ in range(3)]
with tf.name_scope('test'):
tst_summary[0] = tf.summary.scalar('loss', loss)
image_grid = tf.contrib.gan.eval.image_grid(tf.gather(prep_x(X), np.arange(25)), (5, 5), (28, 28), 1)
tst_summary[1] = tf.summary.image('input', image_grid, 1)
image_grid = tf.contrib.gan.eval.image_grid(tf.gather(decoder_op, np.arange(25)), (5, 5), (28, 28), 1)
tst_summary[2] = tf.summary.image('output', image_grid, 1)
current_date = datetime.now()
current_date = current_date.strftime('%d%b_%H%M%S')
with tf.Session() as sess:
train_writer = tf.summary.FileWriter('./log/{}'.format(current_date), sess.graph)
sess.run(init) # run the initializer
res = sess.run([*trn_summary], feed_dict={X: data_test[:25]})
train_writer.add_summary(res[1], -1)
for i in range(n_distribution):
train_writer.add_summary(res[2][i], -1)
train_writer.add_summary(res[3][i], -1)
train_writer.add_summary(res[4], -1)
epoch_tqdm = tqdm(range(1, n_epochs + 1), desc="Loss", leave=False)
for epoch in epoch_tqdm:
n_batch = data_train.shape[0] // batch_size
for iteration in tqdm(range(n_batch), desc="Batches", leave=False):
batch_x = data_train[(iteration * batch_size):((iteration + 1) * batch_size), :]
# Run optimization op (backprop) and cost op (to get loss value)
res = sess.run([optimizer, loss, *trn_summary, *trn_imgs], feed_dict={X: batch_x})
train_writer.add_summary(res[-2], n_batch * (epoch - 1) + iteration)
train_writer.add_summary(res[-1], n_batch * (epoch - 1) + iteration)
train_writer.add_summary(res[2], n_batch * (epoch - 1) + iteration)
train_writer.add_summary(res[3], n_batch * (epoch - 1) + iteration)
for i in range(n_distribution):
train_writer.add_summary(res[4][i], n_batch * (epoch - 1) + iteration)
train_writer.add_summary(res[5][i], n_batch * (epoch - 1) + iteration)
train_writer.add_summary(res[6], n_batch * (epoch - 1) + iteration)
epoch_tqdm.set_description("Loss: {:.5f}".format(res[1]))
tst_loss, tst_input, tst_output = sess.run([*tst_summary], feed_dict={X: data_test[:25]})
train_writer.add_summary(tst_loss, epoch)
train_writer.add_summary(tst_input, epoch)
train_writer.add_summary(tst_output, epoch)
|
[
"struski@ii.uj.edu.pl"
] |
struski@ii.uj.edu.pl
|
b4adbee7dc1a76265d5a9b2eba55bc3f1aced083
|
d96dc8f7dc72de011b7d73745994a1c3cb8f748a
|
/services/__init__.py
|
ebf1484b2a72250748154a4142d6c98348ddc1ec
|
[] |
no_license
|
xChuCx/RetailHero-Recommender
|
5fe12cf9d271b97c1ab9cd2d382fdfc5a0903200
|
abaa16b4522cc4725bd2c142e3af93fd84bba67d
|
refs/heads/master
| 2022-04-05T06:55:28.333399
| 2020-02-24T08:35:22
| 2020-02-24T08:35:22
| 236,315,375
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 339
|
py
|
# services/__init__.py
import os
from flask import Flask
from services.api.RecSystem import RecSystem_blueprint
def create_app(script_info=None):
app = Flask(__name__)
# config
app_settings = os.getenv('APP_SETTINGS')
app.config.from_object(app_settings)
app.register_blueprint(RecSystem_blueprint)
return app
|
[
"xChuCx@gmail.com"
] |
xChuCx@gmail.com
|
677d0ea08eaa32b52af16f400ea56ea987e0451f
|
5a965b99a698bae7b2ade1bc3541380bfbe0c59e
|
/21.Functions_As_Objects.py
|
006be0a3b8f8c6539f4679e0af015a316b735527
|
[] |
no_license
|
sanjay-3129/Python-Tutorial
|
29b03368db10140af39883e3ceef32ffe3710d64
|
ba505f0ef1e79a6190fddb7159249e28acaf8ae6
|
refs/heads/master
| 2021-10-23T22:50:11.773143
| 2021-10-23T08:00:12
| 2021-10-23T08:00:12
| 230,701,854
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 467
|
py
|
"""
Functions can be assigned and reassigned to variables and later referenced by those names.
"""
#(i)
def multiply(a,b):
return a*b
opr = multiply
print(opr(5,6))
#(ii)
def multiply(a,b):
return a*b
opr = multiply(5,6)
print(opr)
#(iii) - functions can also be used as arguments of other functions
def add(x, y):
return x + y
def do_twice(func, x, y):
return func(func(x, y), func(x, y))
a = 5
b = 10
print(do_twice(add, a, b))
|
[
"noreply@github.com"
] |
sanjay-3129.noreply@github.com
|
10d59197b17d10daa02d8b52a2e5f4754e2b561f
|
833147941be211a1743dded2fd5ae294e5cd6edf
|
/ctapipe/io/tests/conftest.py
|
27705d2c05a44188f63b846b24ee63cb7da950fb
|
[
"BSD-3-Clause"
] |
permissive
|
mireianievas/ctapipe
|
a1b852713c1829a17de51449265deaf7cf237a93
|
70f29b1c6b9be1366721609454b03d788419ce01
|
refs/heads/master
| 2022-05-02T16:42:40.520869
| 2022-03-16T15:28:43
| 2022-03-16T15:28:43
| 338,308,440
| 0
| 0
|
BSD-3-Clause
| 2021-02-12T12:09:08
| 2021-02-12T12:09:07
| null |
UTF-8
|
Python
| false
| false
| 840
|
py
|
import pytest
from ctapipe.io import EventSource, DataWriter
from ctapipe.utils import get_dataset_path
@pytest.fixture(scope="session")
def r1_path(tmp_path_factory):
return tmp_path_factory.mktemp("r1")
@pytest.fixture(scope="session")
def r1_hdf5_file(r1_path):
source = EventSource(
get_dataset_path("gamma_LaPalma_baseline_20Zd_180Az_prod3b_test.simtel.gz"),
max_events=5,
allowed_tels=[1, 2, 3, 4],
)
path = r1_path / "test_r1.h5"
writer = DataWriter(
event_source=source,
output_path=path,
write_parameters=False,
write_images=False,
write_stereo_shower=False,
write_mono_shower=False,
write_raw_waveforms=False,
write_waveforms=True,
)
for e in source:
writer(e)
writer.finish()
return path
|
[
"maximilian.noethe@tu-dortmund.de"
] |
maximilian.noethe@tu-dortmund.de
|
3e82565d895cbfaf27bde201ca3c75de23def36f
|
9f73bf74e1f090c69f0401aecba84d8ce1c2c11c
|
/linear_series/src/linear_series/class_base_points.py
|
6d301d5aa6a7a6801340c5dce006e109857068fc
|
[
"MIT"
] |
permissive
|
niels-lubbes/linear_series
|
3ae6d7465919aa212b46e0a278c496f47117660c
|
51cb24d34fcb822ee120930c6d7da006a349e08c
|
refs/heads/master
| 2020-12-02T17:46:30.263662
| 2019-12-13T17:58:15
| 2019-12-13T17:58:15
| 96,425,185
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,210
|
py
|
'''
Use of this source code is governed by a MIT-style license that can be found in the LICENSE file.
Created on Aug 4, 2016
@author: Niels Lubbes
This file declares 2 classes: "BasePointTree" and "BasePoint".
'''
class BasePointTree():
'''
This class represents a tree of base points of a linear series of
curves.
'''
# If True than the string representation of this object is short.
short = True
def __init__( self, chart_lst = ['z', 'x', 'y'] ):
'''
Constructor.
'''
# Linear series whose base points
# are represented by this object.
#
self.ls = None
# A list of charts where a chart is denoted by a String.
# See documentation of "get_base_point_tree.in_previous_chart()"
# for specification of chart strings.
#
self.chart_lst = chart_lst
# Dictionary where a key is a chart
# and a value is a list of BasePoint objects.
# "self.chart_lst" is an ordered list of keys of this
# dictionary.
#
self.chart_tree_dct = {}
def add( self, chart, sol, mult ):
'''
INPUT:
- "self" -- BasePointTree object.
- "chart" -- A String denoting a chart.
- "sol" -- A 2-tuple of elements in "PolyRing.num_field"
representing a base point in a chart.
- "mult" -- An integer representing the multiplicity of the base point.
OUTPUT:
- Adds a base point to "self[chart]"
- Return the added base point.
'''
bp = BasePoint( 0, chart, None )
bp.sol = sol
bp.mult = mult
self[chart] += [bp]
return bp
# operator overloading for []
def __getitem__( self, chart ):
if chart not in self.chart_lst:
raise ValueError( 'The chart key should be an element in:', self.chart_lst )
if chart not in self.chart_tree_dct:
self.chart_tree_dct[chart] = []
return self.chart_tree_dct[chart]
# operator overloading for []
def __setitem__( self, chart, item ):
if chart not in self.chart_lst:
raise ValueError( 'The chart key should be an element in:', self.chart_lst )
self.chart_tree_dct[chart] = item
# overloads str(): human readable string representation of object
def __str__( self ):
'''
OUTPUT:
- Human readable string representation of object.
The String consists of lines with the following format
chart=[C], depth=[Integer], mult=[Integer], sol=[P], [LinearSeries]
where
* C = An element of "self.chart_lst".
* P = A 2-tuple: ( [PolyRing.num_field], [PolyRing.num_field] )
and where
* sol : A point P in the zeroset of the linear series.
* chart: The current chart of the the point P.
* mult : The multiplicity of P as a root.
* depth: The depth when considering P as an infinitely near point
in a tree structure.
* For each blowup chart we also depict the corresponding [LinearSeries].
Note that the lines represent a tree structure.
Below we see an example.
EXAMPLE:
- sage: ls = LinearSeries( ['x^2+y^2', 'x*z+y^2'], PolyRing( 'x,y,z', True ) )
sage: print( ls.get_bp_tree() )
out :
{ 2, <<x^2 + y^2, y^2 + x*z>>, QQ( <a0|t^2 + 1> )[x, y, z] }
chart=z, depth=0, mult=1, sol=(0, 0), { 2, <<x^2 + y^2, y^2 + x>>, QQ( <a0|t^2 + 1> )[x, y] }
chart=t, depth=1, mult=1, sol=(0, 0), { 2, <<x^2*y + y, x + y>>, QQ( <a0|t^2 + 1> )[x, y] }
chart=z, depth=0, mult=1, sol=(1, (-a0)), { 2, <<x^2 + y^2, y^2 + x>>, QQ( <a0|t^2 + 1> )[x, y] }
chart=z, depth=0, mult=1, sol=(1, (a0)), { 2, <<x^2 + y^2, y^2 + x>>, QQ( <a0|t^2 + 1> )[x, y] }
'''
tree_str = ''
if self.ls != None:
tree_str += '\n' + str( self.ls )
for chart in self.chart_lst:
for bp in self[chart]:
tree_str += str( bp )
return tree_str.replace( '\n', '\n\t' )
def alt_str( self ):
'''
This method can be useful for testing.
OUTPUT:
- A string representation without the linear series.
'''
tree_str = ''
for chart in self.chart_lst:
for bp in self[chart]:
tree_str += bp.alt_str()
return tree_str.replace( '\n', '\n\t' )
class BasePoint():
'''
This class represents a binary tree of base points.
If a base point has an infinitely near base point then
its 2 leaves represent two charts 's' and 't' of the blowup at these
base points.
'''
def __init__( self , depth, chart, ls ):
'''
Constructor.
'''
# Depth of base point tree.
#
self.depth = int( depth )
# Chart denoted by a string.
# See docs of "get_base_point_tree.in_previous_chart()"
#
self.chart = chart
# LinearSeries
#
self.ls = ls
# Base point represented as a 2-tuple of
# elements in a number field.
#
self.sol = None
# Multiplicity of a solution
# (0=no solution, -1=overlapping chart)
#
self.mult = 0
# lists of base points
#
self.bp_lst_t = []
self.bp_lst_s = []
def add( self, chart_st, sol, mult ):
'''
INPUT:
- "self" --
- "chart_st" -- 's' or 't'
- "sol" -- A 2-tuple of elements in "PolyRing.num_field"
representing an infinitely near base point in a blowup chart.
- "mult" -- An integer representing the multiplicity of the base point.
OUTPUT:
- Adds a base point to either "self.bp_lst_s" or "self.bp_lst_t".
- Return the added infinitely near base point.
'''
if chart_st not in ['s', 't']:
raise ValueError( 'Expecting "chart_st" to be either "s" or "t":', chart_st )
bp = BasePoint( self.depth + 1, chart_st, None )
bp.sol = sol
bp.mult = mult
dct = {'s':self.bp_lst_s, 't':self.bp_lst_t}
dct[chart_st] += [bp]
return bp
def __str__( self ):
'''
OUTPUT:
- Human readable string representation of object.
See "BasePointTree.__str__()".
'''
if BasePointTree.short == True and self.mult in [0, -1]:
return ''
bp_str = ''
bp_str += '\n' + 4 * self.depth * ' ' + 'chart=' + self.chart + ', '
if self.mult == -1:
bp_str += '(overlapping chart)' + ', '
if self.mult == 0:
bp_str += '(no solution)' + ', '
bp_str += 'depth=' + str( self.depth ) + ', '
bp_str += 'mult=' + str( self.mult ) + ', '
bp_str += 'sol=' + str( self.sol ) + ', '
bp_str += str( self.ls )
for bp in self.bp_lst_t:
bp_str += str( bp )
for bp in self.bp_lst_s:
bp_str += str( bp )
return bp_str
def alt_str( self ):
'''
This method can be useful for testing.
OUTPUT:
- A string representation without the linear series.
'''
if self.mult in [0, -1]:
return ''
bp_str = ''
bp_str += '\n' + 4 * self.depth * ' '
bp_str += 'chart=' + self.chart + ', '
bp_str += 'depth=' + str( self.depth ) + ', '
bp_str += 'mult=' + str( self.mult ) + ', '
bp_str += 'sol=' + str( self.sol ) + ', '
for bp in self.bp_lst_t:
bp_str += bp.alt_str()
for bp in self.bp_lst_s:
bp_str += bp.alt_str()
return bp_str
|
[
"niels.lubbes@noadds"
] |
niels.lubbes@noadds
|
47255733e840182e4df902b7997de36140721620
|
62c8d299cffa3211d3a04378005c5f3523cd58bd
|
/studentMain.py
|
8640b45883d55b0272299fdf809b9775b5af79c9
|
[] |
no_license
|
Shanjiv/Linear-Regression
|
a5ae794be19c56679364d5417882248f54141752
|
d1b12b1becbae344ff932037935b97b71756363e
|
refs/heads/master
| 2021-01-10T15:37:11.181517
| 2016-01-27T16:14:34
| 2016-01-27T16:14:34
| 50,519,477
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 737
|
py
|
#!/usr/bin/python
import numpy
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from studentRegression import studentReg
from class_vis import prettyPicture, output_image
from ages_net_worths import ageNetWorthData
ages_train, ages_test, net_worths_train, net_worths_test = ageNetWorthData()
reg = studentReg(ages_train, net_worths_train)
plt.clf()
plt.scatter(ages_train, net_worths_train, color="b", label="train data")
plt.scatter(ages_test, net_worths_test, color="r", label="test data")
plt.plot(ages_test, reg.predict(ages_test), color="black")
plt.legend(loc=2)
plt.xlabel("ages")
plt.ylabel("net worths")
plt.savefig("test.png")
output_image("test.png", "png", open("test.png", "rb").read())
|
[
"shan_ratnam@yahoo.de"
] |
shan_ratnam@yahoo.de
|
1098889d8bb98070a11efb5e7df7e42a2f322918
|
bd8967aaea29ebfe9ec5264d927816de53472a86
|
/config/test_settings.py
|
a5fa23ed10f25fdc648c429f2ced40b2e4b7defa
|
[] |
no_license
|
MaksimLion/django-graphql-react-simpleapp
|
1067c0ef4d67824370301562dc8edfca5c4a7ac3
|
cca1153934fcf58690c44c86d80365051bee547b
|
refs/heads/master
| 2020-05-17T02:05:23.359156
| 2019-04-25T14:01:52
| 2019-04-25T14:01:52
| 183,445,529
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 277
|
py
|
from .settings import *
DATABASES = {
'default' : {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'
}
}
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.MD5PasswordHasher',
)
DEFAULT_FILE_STORAGE = 'inmemorystorage.InMemoryStorage'
|
[
"maxim226356@mail.ru"
] |
maxim226356@mail.ru
|
e96d13937dcfa94ceeaed7298fcf4cb23198c36f
|
d33516802a7c4674dacd7f1335097abc16d7a211
|
/SharedParking_Group_4_code/王文浩-接口自动化代码/start/start_api.py
|
31df0a39f0dc95847149d2aa89708053a2422b79
|
[] |
no_license
|
wangwenhaohao/shareparking_G4
|
f16be52da03d20dbaf066a51a40ba9355e6f193d
|
60fefa75435c31e3dbfc4446e5fd93dedeaee22b
|
refs/heads/main
| 2023-01-14T17:16:13.976424
| 2020-11-26T03:23:37
| 2020-11-26T03:23:37
| 313,612,426
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 708
|
py
|
from HTMLTestRunner_cn import HTMLTestRunner
from sharedParkingPlace1111.tools.parse import FileParse
import unittest
from sharedParkingPlace1111.testcase.owner_parking_api import TestParkingAPI
class AllStart:
def start(self, path):
suite = unittest.TestSuite()
loader = unittest.TestLoader()
test_case_info = FileParse.get_txt(path)
print(test_case_info)
tests = loader.loadTestsFromTestCase(test_case_info)
suite.addTests(tests)
with open('report.html', 'w') as file:
runner = HTMLTestRunner(stream=file, verbosity=2)
runner.run(suite)
if __name__ == '__main__':
AllStart().start("..\\conf\\case_class_path.conf")
|
[
"1500115788@qq.com"
] |
1500115788@qq.com
|
d2ef7781954aa600215bead6ed1537f201d90757
|
2ef014463a30fca56ae045884465fd8672c47b3d
|
/django_pro/Users/models.py
|
221731f0416cdce15eaa45f9bd47cc471e5467d7
|
[] |
no_license
|
Jiangkai2018/django_pro
|
e226aa5eaa2b712093fb5eb81490790ab891ee73
|
a5c4408a4317b9be384b4f5216b00d4d4ed4b7c7
|
refs/heads/master
| 2020-04-12T02:52:52.752835
| 2018-12-18T08:29:35
| 2018-12-18T08:29:38
| 162,256,468
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 427
|
py
|
from django.db import models
# Create your models here.
class Users(models.Model):
username = models.CharField("用户名",max_length=20)
password = models.CharField("密码",max_length=20)
realname = models.CharField("真实姓名", max_length=255)
sex = models.CharField("性别", max_length=10)
email = models.EmailField("电子 邮箱", blank=True)
def __str__(self):
return self.username
|
[
"1637213781@qq.com"
] |
1637213781@qq.com
|
3afe179639c2001840cd2dbc07063d1cad265419
|
d3a5b967b16aa4e0a0e296e62cbd43a8fd780363
|
/DataFrame/itertuples().py
|
f25e4f6ed945b7cc4a6f925edd4c3e7843deb008
|
[] |
no_license
|
andrew5205/My_Pandas
|
690dc054d69968c390128c5fea7dd9f5d971a1aa
|
35a7cda2f9fbd4150114888f12d8f3e29c07cbf1
|
refs/heads/master
| 2023-02-12T15:45:42.694933
| 2021-01-16T04:29:45
| 2021-01-16T04:29:45
| 326,882,206
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 817
|
py
|
# df.itertuples(index=True, name=Pandas) - Iterate over DataFrame rows as namedtuples.
import pandas as pd
df = pd.DataFrame({
'num_legs': [4, 2],
'num_wings': [0, 2],},
index=['dog', 'hawk']
)
# print(df)
# # num_legs num_wings
# # dog 4 0
# # hawk 2 2
print(df.itertuples()) # <map object at 0x7fcf7b2d89a0>
for row in df.itertuples():
print(row)
# Pandas(Index='dog', num_legs=4, num_wings=0)
# Pandas(Index='hawk', num_legs=2, num_wings=2)
for row in df.itertuples(index=False):
print(row)
# Pandas(num_legs=4, num_wings=0)
# Pandas(num_legs=2, num_wings=2)
for row in df.itertuples(name='Animals'):
print(row)
# Animals(Index='dog', num_legs=4, num_wings=0)
# Animals(Index='hawk', num_legs=2, num_wings=2)
|
[
"andrewchung11@gmail.com"
] |
andrewchung11@gmail.com
|
a5fd28c7ffef396eaa2b4349febc201f7c44d61b
|
50621c0cbfc7e9c23957c557b4e21971ec11fcf6
|
/vehicle counting/main.py
|
9d7a43eff073ebcf1c9c7851592ddc8b353f84f2
|
[] |
no_license
|
Hashmeet229/Vehicle_count_mdel
|
2701a2078ea3012108faaa6a33a8f3975a298956
|
e067b1a7e5830711a8008c42c639edcc6effad22
|
refs/heads/master
| 2023-04-01T00:54:12.144324
| 2021-03-29T10:46:54
| 2021-03-29T10:46:54
| 352,604,670
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,093
|
py
|
import cv2
import numpy as np
from time import sleep
largura_min=80 #Largura minima do retangulo
altura_min=80 #Altura minima do retangulo
offset=6 #Erro permitido entre pixel
pos_linha=550 #Posição da linha de contagem
delay= 60 #FPS do vídeo
detec = []
carros= 0
def pega_centro(x, y, w, h):
x1 = int(w / 2)
y1 = int(h / 2)
cx = x + x1
cy = y + y1
return cx,cy
cap = cv2.VideoCapture('night.mp4')
subtracao = cv2.bgsegm.createBackgroundSubtractorMOG()
while True:
ret , frame1 = cap.read()
tempo = float(1/delay)
sleep(tempo)
grey = cv2.cvtColor(frame1,cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(grey,(3,3),5)
img_sub = subtracao.apply(blur)
dilat = cv2.dilate(img_sub,np.ones((5,5)))
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
dilatada = cv2.morphologyEx (dilat, cv2. MORPH_CLOSE , kernel)
dilatada = cv2.morphologyEx (dilatada, cv2. MORPH_CLOSE , kernel)
contorno,h=cv2.findContours(dilatada,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
cv2.line(frame1, (25, pos_linha), (1200, pos_linha), (255,127,0), 3)
for(i,c) in enumerate(contorno):
(x,y,w,h) = cv2.boundingRect(c)
validar_contorno = (w >= largura_min) and (h >= altura_min)
if not validar_contorno:
continue
cv2.rectangle(frame1,(x,y),(x+w,y+h),(0,255,0),2)
centro = pega_centro(x, y, w, h)
detec.append(centro)
cv2.circle(frame1, centro, 4, (0, 0,255), -1)
for (x,y) in detec:
if y<(pos_linha+offset) and y>(pos_linha-offset):
carros+=1
cv2.line(frame1, (25, pos_linha), (1200, pos_linha), (0,127,255), 3)
detec.remove((x,y))
print("car is detected : "+str(carros))
cv2.putText(frame1, "VEHICLE COUNT : "+str(carros), (450, 70), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255),5)
cv2.imshow("Video Original" , frame1)
cv2.imshow("Detectar",dilatada)
if cv2.waitKey(1) == 27:
break
cv2.destroyAllWindows()
cap.release()
|
[
"hashmeetsingh409@yahoo.com"
] |
hashmeetsingh409@yahoo.com
|
766f3f730b71b9f62fb1644f7c289f4b0e1fda4b
|
fbbae3b182f601670b6568f295a23cd7b7f7f620
|
/scripts/pick_ref_from_paf.py
|
51125bc62dbb6d670e5041f35d2edeaa4253b325
|
[
"MIT"
] |
permissive
|
jmeppley/np_read_clustering
|
4a74896973892bd917516736335ebdd6376838d2
|
88a23c729f42f2249926d2e3420f3c30e01bfddd
|
refs/heads/main
| 2023-04-19T20:23:13.104017
| 2021-05-14T00:50:31
| 2021-05-14T00:50:31
| 351,255,777
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 921
|
py
|
import pandas
from hit_tables import parse_blast_m8, PAF
from Bio import SeqIO
# pick a best read
hits = parse_blast_m8(str(snakemake.input.paf),format=PAF)
hit_matches = hits.groupby(['hit','query']).agg({'matches':sum})
mean_matches = {r:hit_matches.query(f'hit != query and (hit == "{r}" or query == "{r}")').matches.mean()
for r in set(i[0] for i in hit_matches.index).union(i[1] for i in hit_matches.index)}
best_matches = sorted(mean_matches.keys(), key=lambda r: mean_matches[r], reverse=True)
ref_read = best_matches[0]
# write out to 2 files
with open(str(snakemake.output.ref), 'wt') as ref_out:
with open(str(snakemake.output.others), 'wt') as others_out:
for read in SeqIO.parse(str(snakemake.input.fasta), 'fasta'):
if read.id == ref_read:
ref_out.write(read.format('fasta'))
else:
others_out.write(read.format('fasta'))
|
[
"jmeppley@gmail.com"
] |
jmeppley@gmail.com
|
4f2f99004a0e686eef2bc53c51e5ed5b221a0cb3
|
288a00d2ab34cba6c389b8c2444455aee55a8a95
|
/tests/data23/recipe-578221.py
|
be4002651ea48b719e8744eeded95287e6f3f139
|
[
"BSD-2-Clause"
] |
permissive
|
JohannesBuchner/pystrict3
|
ffd77b7bbc378bd4d8f21b5c6bd69a0d64a52ddb
|
18b0dd369082422f9bf0f89c72e7acb53a49849c
|
refs/heads/master
| 2023-08-14T06:37:37.954880
| 2023-07-13T11:16:38
| 2023-07-13T11:16:38
| 268,571,175
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,507
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Recipe
# ----------------------------------------------------------------------------
class recipe:
@staticmethod
def enter_event(notified_f):
def wrapper(f):
def caller(obj, *args, **kargs):
notified_f(obj, *args, **kargs)
ret = f(obj, *args, **kargs)
return ret
return caller
return wrapper
@staticmethod
def exit_event(notified_f):
def wrapper(f):
def caller(obj, *args, **kargs):
# Start of diff between enter_event
ret = f(obj, *args, **kargs)
notified_f(obj, *args, **kargs)
# End of diff between enter_event
return ret
return caller
return wrapper
# Tests
# ----------------------------------------------------------------------------
class c:
def notify_entering(self, *args, **kargs):
print(' - function notify_entering() is triggered :')
print(' - self : [%s]' % self)
print(' - args : %s' % repr(args))
print(' - kargs : %s' % repr(kargs))
print()
def notify_exiting(self, *args, **kargs):
print(' - function notify_exiting() is triggered :')
print(' - self : [%s]' % self)
print(' - args : %s' % repr(args))
print(' - kargs : %s' % repr(kargs))
print()
# Method
@recipe.enter_event(notify_entering)
@recipe.exit_event(notify_exiting)
def f(self, x):
print(' - inside o.f() ...')
print(' - self = [%s]' % self)
print(' - x = [%s]' % x)
print()
# Class method
@classmethod
@recipe.enter_event(notify_entering)
@recipe.exit_event(notify_exiting)
def fclass(cls, x):
print(' - inside o.fclass() ...')
print(' - cls = [%s]' % cls)
print(' - x = [%s]' % x)
print()
# Static method
@staticmethod
@recipe.enter_event(notify_entering)
@recipe.exit_event(notify_exiting)
def fstatic(x):
print(' - inside o.fstatic() ...')
print(' - x = [%s]' % x)
print()
if __name__ == '__main__':
o = c()
print('-' * 78)
print('- calling o.f(123) ...')
o.f(123)
print('-' * 78)
print('- calling o.fclass(234) ...')
o.fclass(234)
print('-' * 78)
print('- calling o.fstatic(345) ...')
o.fstatic(345)
|
[
"johannes.buchner.acad@gmx.com"
] |
johannes.buchner.acad@gmx.com
|
6f506575a29cec64e93559190ea52f8a7ae7108d
|
2d209ce06ce527d29997ed9af9938e5bfd049903
|
/tests/test_app.py
|
8e9f53fe3be83d8f5269c45c2a10342ba409fe5d
|
[
"MIT"
] |
permissive
|
martinmaina/flaskWeb
|
d925ccc237fb62511c062e2e42feb260a303e369
|
c3bf6fe63ae76d6168a4e6c7c2ae17c33b41f31a
|
refs/heads/master
| 2022-12-11T12:24:56.402365
| 2019-03-15T00:44:03
| 2019-03-15T00:44:03
| 175,322,492
| 0
| 0
|
MIT
| 2022-12-08T01:41:54
| 2019-03-13T01:19:28
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,752
|
py
|
import unittest
from app import app
class TestingApp(unittest.TestCase):
#Does the app loads up correctly
def test_index(self):
tester = app.test_client(self)
resp = tester.get('/', content_type='html/text')
self.assertEqual(resp.status_code, 200)
#Does the app requires login when one wants to add a post
def test_requiresLoginBeforePost(self):
tester = app.test_client(self)
resp = tester.get('/addPost', data=dict(username="username1", password="password1"), follow_redirects=True)
self.assertIn(b'You are trying', resp.data)
'''
#If the user visits unknown Page. Does the error page loads
def test_unkownPageVisited(self):
tester = app.test_client(self)
resp = tester.post('/posts/maina', data=dict(username="user", password="pass"), follow_redirects=True)
self.assertIn(b'Sorry', resp.data)
#Corerct logins
def test_correctLogin(self):
tester = app.test_client(self)
resp = tester.post('/login', data=dict(username='user', password='pass'), follow_redirects=True)
self.assertIn(b'You are logged in', resp.data)
#Incorrect Logins
def test_incorrectLogint(self):
tester = app.test_client(self)
resp = tester.post('/login', data=dict(username='username', password='passwoird'), follow_redirects=True)
self.assertIn(b'Please try again', resp.data)
#Does the user able to add a post
def test_addPost(self):
tester = app.test_client(self)
resp = tester.post('/addpost', data=dict(username='username', password='passwoird'), follow_redirects=True)
self.assertEqual(resp.status_code, 200)
'''
if __name__ == '__main__':
unittest.main()
|
[
"m0Lzixs3m0qy@gmail.com"
] |
m0Lzixs3m0qy@gmail.com
|
bf77d76c896cc754fd8b930a615cc17a6d6ceb5d
|
6d13de0d1ca89badfb76c677ffa8d7e2829677cb
|
/beaconWeb/apps/beacon/models/deal_place.py
|
d89319726a19ca74e04f52af68c266a76337c0c7
|
[] |
no_license
|
jasjitsingh85/beaconweb
|
08a2b97346aea6db87dd19567c39a0d99f383ae8
|
269c6683f759fd7e75d13ea9eec8ad63ee24df53
|
refs/heads/master
| 2021-01-13T03:43:09.308401
| 2016-12-24T16:12:15
| 2016-12-24T16:12:15
| 77,268,841
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,527
|
py
|
from django.db import models
from beaconWeb.common_utils import smart_format
from beaconWeb.apps.beacon.common.constants.place_sales_status import PLACE_SALES_STATUS
from point_of_sale import PointOfSale
class DealPlace(models.Model):
longitude = models.FloatField(db_index=True)
latitude = models.FloatField(db_index=True)
name = models.CharField(max_length=128)
street_address = models.CharField(max_length=128)
phone = models.CharField(max_length=20, blank=True, null=True)
source_image_url = models.CharField(max_length=400, blank=True, null=True)
# image_url = models.CharField(max_length=400, blank=True, null=True)
yelp_id = models.CharField(max_length=128, blank=True, null=True)
yelp_rating_image_url = models.CharField(max_length=256, blank=True, null=True)
yelp_review_count = models.IntegerField(blank=True, null=True)
foursquare_id = models.CharField(max_length=128, blank=True, null=True)
facebook_id = models.CharField(max_length=128, blank=True, null=True)
instagram_id = models.CharField(max_length=128, blank=True, null=True)
twitter_id = models.CharField(max_length=128, blank=True, null=True)
place_description = models.TextField(blank=True, null=True)
place_type = models.CharField(max_length=150, blank=True, null=True)
neighborhood = models.CharField(max_length=150, blank=True, null=True)
email = models.CharField(max_length=40, blank=True, null=True)
website = models.CharField(max_length=500, blank=True, null=True)
events_url = models.CharField(max_length=250, blank=True, null=True)
closed = models.BooleanField(default=False)
date_updated = models.DateTimeField("Date Updated", auto_now=True)
in_review = models.BooleanField(default=False)
pipeline_status = models.CharField(max_length=10, choices=PLACE_SALES_STATUS.ENUM, blank=True, null=True)
point_of_sale = models.OneToOneField(PointOfSale, blank=True, null=True)
class Meta:
app_label = 'beacon'
def __unicode__(self):
return smart_format("{0}, {1}", self.name, self.street_address)
@property
def image_url(self):
if self.source_image_url:
return self.source_image_url
else:
index = (self.id % 9) + 1
url = "https://s3-us-west-2.amazonaws.com/hotspot-venue-images/placeholder{0}.png".format(index)
return url
@property
def has_pos(self):
if self.point_of_sale:
return True
else:
return False
|
[
"jazjit.singh@gmail.com"
] |
jazjit.singh@gmail.com
|
59124cc9eaca6ffaa9d4578175f23e410831ad22
|
d36ed6850556e159594156f069280c6bb4aa9e13
|
/manage.py
|
29f5145a6288c75313bf20bc66b772edd16fc6a3
|
[
"MIT"
] |
permissive
|
underflow101/intraviewAI
|
ab85e2211e839ece1af1de57c72f277c53a3bdf5
|
0775fe532d650c53d3248a3ccf5b6d67b948ebbe
|
refs/heads/master
| 2022-09-10T17:44:39.594494
| 2020-05-29T02:38:51
| 2020-05-29T02:38:51
| 265,801,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 631
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'intraviewAI.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"ikarus125@gmail.com"
] |
ikarus125@gmail.com
|
470e383186c84807b83b797a5d3fe2a8f83a3797
|
9ed9ee7b72c2aecefac44b24ddf0172607ebe86b
|
/venv3/bin/django-admin
|
e7be1a88a44490e7be83d227fd10b593a2319af8
|
[
"MIT"
] |
permissive
|
vict0rl/lovealldogs
|
e414497be388ed1df7bdc4af73e10e2a7f08bc9b
|
8ed5f4376cd2b692004fe445f4b4fe2919885d11
|
refs/heads/master
| 2021-09-23T10:09:21.036242
| 2020-02-07T00:05:48
| 2020-02-07T00:05:48
| 238,802,365
| 0
| 0
|
MIT
| 2021-09-22T18:31:47
| 2020-02-06T23:01:29
|
Python
|
UTF-8
|
Python
| false
| false
| 307
|
#!/home/pypi/Desktop/cleanup/loveofalldogs/venv3/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
|
[
"leungvb@gmail.com"
] |
leungvb@gmail.com
|
|
903717c0239cdbaab433ca713f2762dd4090b147
|
1e90b086510e82ece55c75036c89cc8961b6c1c4
|
/client.py
|
40708145c3215f117ca0471c414907e1d48801e4
|
[] |
no_license
|
saurabh1120/python-chat-application
|
363072684a910505ed906e4c9b1e60c79d5031ee
|
b8f387a4125ef11bae38d9e8ec042925165d7623
|
refs/heads/main
| 2023-01-05T18:49:15.649127
| 2020-10-25T18:30:41
| 2020-10-25T18:30:41
| 307,165,189
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 504
|
py
|
import socket
UDP_IP = "127.0.0.1"
UDP_PORT = 5018
print("IP address",UDP_IP)
print("Port number ",UDP_PORT)
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
try:
while True:
message=input("enter message to be sent ").encode()
if message in [b"Q",b"q"]:
break;
sock.sendto(message,(UDP_IP, UDP_PORT))
data, addr = sock.recvfrom(1024)
if data in [b"Q",b"q"]:
break;
print(data.decode())
except Exception as e:
print(e)
|
[
"noreply@github.com"
] |
saurabh1120.noreply@github.com
|
c9916da70d4b249b2cffcb7bc6636e200540d8ba
|
a73cb19f3e20f8205552052397b5aee1040bc119
|
/edx_ws/build/catkin_generated/order_packages.py
|
a58c31d83cf7496f24b54419fba20d73e5053b46
|
[] |
no_license
|
aabs7/ROS
|
bfa5d01c125afb3811f9af42475be7cb8922901b
|
ed037520c940715b909350b9793e944d2207775e
|
refs/heads/master
| 2022-12-08T10:24:57.466726
| 2020-08-23T04:10:29
| 2020-08-23T04:10:29
| 288,091,329
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 402
|
py
|
# generated from catkin/cmake/template/order_packages.context.py.in
source_root_dir = '/home/abhish/tutorial_ws/edx_ws/src'
whitelisted_packages = ''.split(';') if '' != '' else []
blacklisted_packages = ''.split(';') if '' != '' else []
underlay_workspaces = '/home/abhish/tutorial_ws/edx_ws/devel;/opt/ros/melodic'.split(';') if '/home/abhish/tutorial_ws/edx_ws/devel;/opt/ros/melodic' != '' else []
|
[
"abheeshkhanal@gmail.com"
] |
abheeshkhanal@gmail.com
|
bdb95ee4505887d95877ca3f592d4b8b5565ae5f
|
7db4fe5ccdc894536870a913cc49c5740845257b
|
/exercicios_curso_em_video/ex078.py
|
d62ad7de3b38046e13324b5c6885bce6c73b167c
|
[] |
no_license
|
ErickJonesA7X/py_lessons2
|
b62ea2f6d9e238d3a1cd67c6c933512e707dfac2
|
6472a21c7c2cb3e772eec3dd3d1e75be05f8608b
|
refs/heads/master
| 2022-12-08T09:25:14.669312
| 2020-07-17T14:57:37
| 2020-07-17T14:57:37
| 252,834,258
| 0
| 0
| null | 2021-06-02T02:31:46
| 2020-04-03T20:30:36
|
Python
|
UTF-8
|
Python
| false
| false
| 700
|
py
|
valores = list()
maior = menor = 0
for c in range(0, 5):
valores.append(int(input(f'Digite um valor para a posição {c}: ')))
if c == 0:
maior = menor = valores[c]
else:
if valores[c] > maior:
maior = valores[c]
if valores[c] < menor:
menor = valores[c]
print('=-'*30)
print(f'Você digitou os valores {valores}')
print(f'O maior valor digitado foi {maior} nas posições ', end='')
for i, v in enumerate(valores):
if v == maior:
print(f'{i}... ', end='')
print()
print(f'O menor valor difitado foi {menor} nas posições ', end='')
for i, v in enumerate(valores):
if v == menor:
print(f'{i}...', end='')
print()
|
[
"jonescomercial@gmail.com"
] |
jonescomercial@gmail.com
|
7c9ddf86c654c30a83b0d6faa6ebd8e0e03e5c96
|
3d7afb98400180c74b3b642e95f107e9f2639e6e
|
/02_api_server/app_name/models.py
|
a6eba645ff72878c406bdfbc6b9c9f581b39cbe1
|
[] |
no_license
|
parkeunsang/django_blog
|
6cd96e9be5a62c3ed519ceb95157f29bc39a642b
|
3bba2137de05a35b64af605753815b91ee5eaa53
|
refs/heads/master
| 2023-06-24T10:22:37.352454
| 2021-07-18T08:04:27
| 2021-07-18T08:04:27
| 374,155,988
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 133
|
py
|
from django.db import models
class Question(models.Model):
name = models.CharField(max_length=5)
age = models.IntegerField()
|
[
"dislive@naver.com"
] |
dislive@naver.com
|
c73d9c7f05e17c3588c3b21d88860513158f2b14
|
4d0d8d045d3a104a87a50c3dfc4569faac095254
|
/cementerio/forms.py
|
f3b8c755535bf6c2e3a9a65f46535bc0a662f1d1
|
[] |
no_license
|
tapiaw38/obraspublicas
|
07dedadda38e5e33f4bac3760974cd251b4e253c
|
fc1721ecf421402ca3d6b746cf733a6da47a8694
|
refs/heads/master
| 2020-12-29T12:08:52.301814
| 2020-04-29T04:31:44
| 2020-04-29T04:31:44
| 238,602,291
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,177
|
py
|
from django import forms
from cementerio.models import Cementerio
# Create you forms
class cementerioForm(forms.ModelForm):
class Meta:
model = Cementerio
fields = [
'fecha_compra',
'usuario',
'construccion',
'lote',
'lote_num',
'lote_cuadro',
'dimension',
'precio_lote',
'vigencia',
'dias_concesion',
'prorroga_iniciar',
'prorroga_trabajo',
'abono_anual',
'contrato',
]
labels = {
'fecha_compra':'Fecha solicitud',
'usuario':'Usuario',
'construccion':'Tipo de construcción',
'lote':'Lote',
'lote_num':'Número',
'lote_cuadro':'Cuadro',
'dimension':'Dimensión',
'precio_lote':'Precio',
'vigencia':'Años del Contrato',
'dias_concesion':'Plazo en dias para renovación de contratos',
'prorroga_iniciar':'Plazo en días para iniciar construcción',
'prorroga_trabajo':'Plazo en días para terminar construcción',
'abono_anual':'Precio del pago Anual',
'contrato':'Imagen del contrato',
}
TIEMPO_CONTRATO = (
(1, '1'),
(2, '2'),
(3, '3'),
(4, '4'),
)
TIPO = (
('fosa', 'Fosa'),
('nicho', 'Nicho'),
('mansuleo', 'Mansuleo'),
)
widgets = {
'fecha_compra': forms.DateTimeInput(attrs={'class':'form-control'}),
'usuario': forms.Select(attrs={'class':'form-control'}),
'construccion':forms.Select(choices=TIPO,attrs={'class':'form-control'}),
'lote': forms.TextInput(attrs={'class':'form-control'}),
'lote_num': forms.NumberInput(attrs={'class':'form-control'}),
'lote_cuadro':forms.TextInput(attrs={'class':'form-control'}),
'dimension':forms.TextInput(attrs={'class':'form-control'}),
'precio_lote':forms.NumberInput(attrs={'class':'form-control'}),
'vigencia':forms.Select(choices=TIEMPO_CONTRATO,attrs={'class':'form-control'}),
'dias_concesion':forms.NumberInput(attrs={'class':'form-control'}),
'prorroga_iniciar':forms.NumberInput(attrs={'class':'form-control'}),
'prorroga_trabajo':forms.NumberInput(attrs={'class':'form-control'}),
'abono_anual':forms.NumberInput(attrs={'class':'form-control'}),
'contrato': forms.ClearableFileInput(attrs={'class':'form-control'}),
}
class anualForm_1(forms.ModelForm):
class Meta:
model = Cementerio
fields = [
'monto_anual1',
]
labels = {
'monto_anual1':'Monto a pagar cuota N°1',
}
widgets = {
'monto_anual1': forms.NumberInput(attrs={'class':'form-control'}),
}
class anualForm_2(forms.ModelForm):
class Meta:
model = Cementerio
fields = [
'monto_anual2',
]
labels = {
'monto_anual2': 'Monto a pagar cuota N°2',
}
widgets = {
'monto_anual2': forms.NumberInput(attrs={'class': 'form-control'}),
}
class anualForm_3(forms.ModelForm):
class Meta:
model = Cementerio
fields = [
'monto_anual3',
]
labels = {
'monto_anual3': 'Monto a pagar cuota N°3',
}
widgets = {
'monto_anual3': forms.NumberInput(attrs={'class': 'form-control'}),
}
class anualForm_4(forms.ModelForm):
class Meta:
model = Cementerio
fields = [
'monto_anual4',
]
labels = {
'monto_anual4': 'Monto a pagar cuota N°4',
}
widgets = {
'monto_anual4': forms.NumberInput(attrs={'class': 'form-control'}),
}
class permisoForm(forms.ModelForm):
class Meta:
model = Cementerio
fields = [
'fecha_inicio',
'metro',
'precio_metro',
'prorroga_trabajo',
# Cambiar estado a True en la vista
]
labels = {
'fecha_inicio':'Inicio de Construcción',
'metro':'Metros cuadrados',
'precio_metro':'Precio por metro cuadrado',
'prorroga_trabajo':'Dias de prorroga para finalizar construcción',
}
widgets = {
'fecha_inicio':forms.TextInput(attrs={'class': 'form-control'}),
'metro':forms.NumberInput(attrs={'class': 'form-control'}),
'precio_metro':forms.NumberInput(attrs={'class': 'form-control'}),
'prorroga_trabajo': forms.TextInput(attrs={'class': 'form-control'}),
}
class fintrabajoForm(forms.ModelForm):
class Meta:
model = Cementerio
fields = [
'trabajo_final',
]
labels = {
'trabajo_final':'Finalizacíon de obra',
}
widgets = {
'trabajo_final':forms.NullBooleanSelect(),
}
|
[
"tapiaw38@gmail.com"
] |
tapiaw38@gmail.com
|
24425531c7148d11ec8d15bbc859b75ade65dd3b
|
c95acbeffa00fee3efbc1b290ad7b1fa15769025
|
/server.py
|
fb3a1a9fa2bb00fdd3fabd438fe79801a6c3e8b4
|
[] |
no_license
|
saugatsthapit/FTP-server
|
07f6c87400bf679b9bdfd098e93598e5c407e19a
|
6f7d580ecae213d21bb447fe7d6014dac076e5d6
|
refs/heads/master
| 2020-04-05T19:05:52.875834
| 2018-11-25T21:39:57
| 2018-11-25T21:39:57
| 157,118,415
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 35,032
|
py
|
#!/usr/bin/env python3
# --*-- coding: utf-8 --*--
import socket
import threading
import os
import stat
import sys
import time
import grp
import pwd
import ssl
import codecs
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
try:
HOST = '127.0.0.1' # socket.gethostbyname(socket.gethostname())
except socket.error:
HOST = '127.0.0.1'
PORT = 8443 # command port
CWD = os.path.abspath('.') # os.getenv('HOME')
allow_delete = True # used to indicate if it's allowed to delete files or not
logfile = os.getcwd() + r'/socket-server.log' # name of the log file
config_file_path = "ftpserverd.conf"
''' Reads the settings from the digital_ocean.ini file '''
config = ConfigParser.SafeConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/' + config_file_path)
def log(func, cmd, client_address=None):
if client_address is not None:
client = "%s, %s" % client_address
logmsg = time.strftime("%Y-%m-%d %H-%M-%S [-] [" + client + "] " + func)
else:
logmsg = time.strftime("%Y-%m-%d %H-%M-%S [-] " + func)
print(logmsg, cmd)
# Write log to file
f = open(logfile, 'a+') # 'a' will append to an existing file if it exists
f.write(logmsg + " {}\n".format(cmd)) # write the text to the logfile and move to next line
# Load config properties
try:
# if config.has_option('server_options', 'port_mode'):
# self.api_token = config.get('server_options', 'port_mode')
port_mode = config.get('server_options', 'port_mode').encode('utf-8')
pasv_mode = config.get('server_options', 'pasv_mode').encode('utf-8')
except Exception as err:
log('Config ERR', err)
# List of available commands
COMMANDS = ["CDUP", "CWD", "EPRT", "EPSV", "HELP", "LIST", "PASS",
"PASV", "PORT", "PWD", "QUIT", "RETR", "STOR", "SYST", "TYPE", "USER",
"NLIST", "DELE", "MKD", "RMD", "RNFR", "RNTO", "REST", "APPE"]
class FtpServerProtocol(threading.Thread):
def __init__(self, conn, address):
threading.Thread.__init__(self)
self.authenticated = False
self.banned_username = False
self.pasv_mode = False
self.rest = False
self.cwd = CWD
self.commSock = conn # communication socket as command channel
self.address = address
self.dataSockAddr = HOST
self.dataSockPort = PORT
self._epsvall = False # used for EPSV
self._af = socket.AF_INET # address_family
def run(self):
"""
receive commands from client and execute commands
"""
self.sendWelcome()
while True:
try:
# Receive the data in small chunks and retransmit it
data = self.commSock.recv(1024).rstrip()
try:
cmd = data.decode('utf-8')
log('Received data from client: ', cmd, self.address)
except AttributeError:
cmd = data
# if received data is empty or not exists break this loop
if not cmd or cmd is None:
break
except socket.error as err:
log('Receive', err)
try:
cmd, arg = cmd[:4].strip().upper(), cmd[4:].strip() or None
if cmd not in COMMANDS:
self.sendCommand('Not valid command\r\n')
continue
if not self.authenticated and cmd not in ["USER", "PASS", "HELP"]:
self.sendCommand('530 User not logged in.\r\n')
continue
func = getattr(self, cmd)
func(arg)
except Exception as err:
self.sendCommand('500 Syntax error, command unrecognized. '
'This may include errors such as command line too long.\r\n')
log('Error while trying to call command based on received data', err)
#-------------------------------------#
# # Create Ftp data transport channel ##
#-------------------------------------#
def startDataSock(self):
log('startDataSock', 'Opening a data channel')
try:
self.dataSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.pasv_mode:
self.dataSock, self.address = self.serverSock.accept()
else:
self.dataSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.dataSock.connect((self.dataSockAddr, self.dataSockPort))
except socket.error as err:
log('startDataSock', err)
def stopDataSock(self):
log('stopDataSock', 'Closing a data channel')
try:
if hasattr(self, 'dataSock') and self.dataSock is not None:
self.dataSock.close()
if self.pasv_mode:
self.serverSock.close()
except socket.error as err:
log('stopDataSock', err)
def sendCommand(self, cmd):
self.commSock.send(cmd.encode('utf-8'))
def sendData(self, data):
self.dataSock.send(data.encode('utf-8'))
def sendWelcome(self):
"""
when connection created with client will send a welcome message to the client
"""
self.sendCommand('220 Welcome.\r\n')
def _make_epasv(self, extmode=False):
"""Initialize a passive data channel with remote client which
issued a PASV or EPSV command.
If extmode argument is True we assume that client issued EPSV in
which case extended passive mode will be used (see RFC-2428).
"""
# close established data connections, if any
if hasattr(self, 'dataSock') and self.dataSock is not None:
self.stopDataSock()
# open data channel
try:
self.pasv_mode = True # extmode
self._af = self.getIpVersion(HOST, PORT)
self.serverSock = socket.socket(self._af, socket.SOCK_STREAM)
self.serverSock.bind((HOST, 0))
self.serverSock.listen(5) # Enable a server to accept connections.
addr, port = self.serverSock.getsockname()
ipnum = socket.inet_aton(addr)
log("EPSV", 'Address: ' + ipnum)
if extmode:
self.sendCommand("229 Entering Extended Passive Mode (|||" + str(port) + "|)")
log("EPSV", 'Open socket Address: ' + addr + " and Port: " + str(port))
else:
self.sendCommand('227 Entering Passive Mode (%s,%u,%u).\r\n' % (','.join(addr.split('.')), port >> 8 & 0xFF, port & 0xFF))
log("PASV", 'Open socket Address: ' + addr + " and Port: " + str(port))
except:
self.sendCommand("500 (EPSV) Failed to create data socket.")
#-------------------------------------#
# # Create FTP utilities functions ##
#-------------------------------------#
def validateCredentials(self):
if not self.authenticated:
for line in open("accounts.txt", "r").readlines(): # checks whether username/password is in the file
info = line.split() # splits a string into a list. Default separator is any whitespace.
if self.username == info[0] and self.passwd == info[1]:
self.authenticated = True
self.sendCommand('230 User logged in, proceed.\r\n')
self.saveAuthentication(True)
break
if not self.authenticated:
self.sendCommand('Provided credentials are not found.\r\n')
# Function used to save all authentication data together with number of tries to authenticate
def saveAuthentication(self, resset):
if self.username is not None and self.passwd is not None:
user_founded = False
# Read authentication saved data
file = open('ftpserver.secure', 'r+') # open the file:
lines = file.readlines() # get all your lines from the file
file.close() # close the file
file = open('ftpserver.secure', 'w') # reopen it in write mode
for line in lines:
if line.startswith(self.username): # username found
user_founded = True
cnt_auth = int(line.split(":")[2])
if cnt_auth > 3:
self.banned_username = True
if resset:
file.write(self.username + ":" + self.passwd + ":%d" % (1))
else:
file.write(self.username + ":" + self.passwd + ":%d" % (cnt_auth + 1))
else:
file.write(line) # write your lines back
file.close() # close the file again
# means credentials will be inserted into file
if not user_founded:
# open a file for writing and create it if does not exist
with open('ftpserver.secure', 'a+') as f:
f.write(self.username + ":" + self.passwd + ":%d" % (1))
def checkBlockedUsername(self):
if hasattr(self, 'username') and self.username is not None:
file = open('ftpserver.secure', 'r+') # open the file:
lines = file.readlines() # get all your lines from the file
for line in lines:
if line.startswith(self.username): # username found
cnt_auth = int(line.split(":")[2])
if cnt_auth > 3:
self.banned_username = True
return True
return False
def _support_hybrid_ipv6(self):
"""Return True if it is possible to use hybrid IPv6/IPv4 sockets on this platform.
"""
# Note: IPPROTO_IPV6 constant is broken on Windows, see:
# http://bugs.python.org/issue6926
try:
if not socket.has_ipv6:
return False
return not self.serverSock.getsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY)
except:
return False
def fileProperty(self, filepath):
"""
return information from given file, like this "-rw-r--r-- 1 User Group 312 Aug 1 2014 filename"
"""
st = os.stat(filepath)
file_message = [ ]
def _getFileMode():
modes = [
stat.S_IRUSR, stat.S_IWUSR, stat.S_IXUSR,
stat.S_IRGRP, stat.S_IWGRP, stat.S_IXGRP,
stat.S_IROTH, stat.S_IWOTH, stat.S_IXOTH,
]
mode = st.st_mode
fullmode = ''
fullmode += os.path.isdir(filepath) and 'd' or '-'
for i in range(9):
fullmode += bool(mode & modes[i]) and 'rwxrwxrwx'[i] or '-'
return fullmode
def _getFilesNumber():
return str(st.st_nlink)
def _getUser():
return pwd.getpwuid(st.st_uid).pw_name
def _getGroup():
return grp.getgrgid(st.st_gid).gr_name
def _getSize():
return str(st.st_size)
def _getLastTime():
return time.strftime('%b %d %H:%M', time.gmtime(st.st_mtime))
for func in ('_getFileMode()', '_getFilesNumber()', '_getUser()', '_getGroup()', '_getSize()', '_getLastTime()'):
file_message.append(eval(func))
file_message.append(os.path.basename(filepath))
return ' '.join(file_message)
#------------------------------#
# # Ftp services and functions ##
#------------------------------#
# Change the working directory to the parent directory.
def CDUP(self, cmd):
log('CDUP', self.cwd)
try:
self.cwd = os.path.abspath(os.path.join(self.cwd, '..'))
self.sendCommand('200 CDUP Command successful.\r\n' + self.cwd + '\r\n')
except Exception as err:
log('CDUP', err)
# Change the working directory
def CWD(self, dirpath):
try:
pathname = dirpath.endswith(os.path.sep) and dirpath or os.path.join(self.cwd, dirpath)
log('CWD', pathname)
if not os.path.exists(pathname) or not os.path.isdir(pathname):
self.sendCommand('550 CWD failed Directory not exists.\r\n')
return
self.cwd = pathname
self.sendCommand('250 CWD Command successful.' + self.cwd + '\r\n')
except Exception as err:
log('CWD', err)
# Specifies an extended port to which the server should connect.
def EPRT(self, line):
'''Send a EPRT command with the current host and the given port number.'''
log('EPRT', line)
try:
"""Start an active data channel by choosing the network
protocol to use (IPv4/IPv6) as defined in RFC-2428. """
if self._epsvall:
self.sendCommand("501 EPRT not allowed after EPSV ALL.\r\n")
return
# Parse EPRT request for getting protocol, IP and PORT.
# Request comes in as:
# <d>proto<d>ip<d>port<d>
# ...where <d> is an arbitrary delimiter character (usually "|") and
# <proto> is the network protocol to use (1 for IPv4, 2 for IPv6).
try:
af, ip, port = line.split(line[0])[1:-1]
port = int(port)
if not 0 <= port <= 65535:
raise ValueError
except (ValueError, IndexError, OverflowError):
self.sendCommand("501 Invalid EPRT format.\r\n")
return
if af == "1":
# test if AF_INET6 and IPV6_V6ONLY
if (self._af == socket.AF_INET6 and not self._support_hybrid_ipv6()):
self.sendCommand('522 Network protocol not supported (use 2).\r\n')
else:
try:
octs = list(map(int, ip.split('.')))
if len(octs) != 4:
raise ValueError
for x in octs:
if not 0 <= x <= 255:
raise ValueError
except (ValueError, OverflowError):
self.sendCommand("501 Invalid EPRT format.\r\n")
else:
self.dataSockAddr = ip
self.dataSockPort = port
# self.startDataSock()
elif af == "2":
if self._af == socket.AF_INET:
self.sendCommand('522 Network protocol not supported (use 1).\r\n')
else:
self.dataSockAddr = ip
self.dataSockPort = port
# self.startDataSock()
else:
if self._af == socket.AF_INET:
self.sendCommand('501 Unknown network protocol (use 1).\r\n')
else:
self.sendCommand('501 Unknown network protocol (use 2).\r\n')
# The format of EPRT is: EPRT<space><d><net-prt><d><net-addr><d><tcp-port><d>
# <net-prt>:
# AF Number Protocol
# --------- --------
# 1 Internet Protocol, Version 4 [Pos81a]
# 2 Internet Protocol, Version 6 [DH96]
self.sendCommand('200 Success: '
+"EPRT |" + af + "|" + self.dataSockAddr + "|" + str(self.dataSockPort) + "|\r\n")
except Exception as err:
log('EPRT', err)
# Set passive data connection over IPv4 or IPv6 (RFC-2428 - FTP Extensions for IPv6 and NATs)
def EPSV(self, cmd):
log('EPSV', cmd)
try:
log('EPSV', cmd)
"""Start a passive data channel by using IPv4 or IPv6 as defined in RFC-2428. """
# RFC-2428 specifies that if an optional parameter is given,
# we have to determine the address family from that otherwise
# use the same address family used on the control connection.
# In such a scenario a client may use IPv4 on the control channel
# and choose to use IPv6 for the data channel.
# But how could we use IPv6 on the data channel without knowing
# which IPv6 address to use for binding the socket?
# Unfortunately RFC-2428 does not provide satisfing information
# on how to do that. The assumption is that we don't have any way
# to know wich address to use, hence we just use the same address
# family used on the control connection.
if not cmd:
self._make_epasv(extmode=True)
# IPv4
elif cmd == "1":
if self._af != socket.AF_INET:
self.sendCommand('522 Network protocol not supported (use 2).\r\n')
else:
self._make_epasv(extmode=True)
# IPv6
elif cmd == "2":
if self._af == socket.AF_INET:
self.sendCommand('522 Network protocol not supported (use 1).\r\n')
else:
self._make_epasv(extmode=True)
elif cmd.lower() == 'all':
self._epsvall = True
self.sendCommand('220 Other commands other than EPSV are now disabled.\r\n')
else:
if self._af == socket.AF_INET:
self.sendCommand('501 Unknown network protocol (use 1).\r\n')
else:
self.sendCommand('501 Unknown network protocol (use 2).\r\n')
except Exception as err:
log('EPSV', err)
# A HELP request asks for human-readable information from the server.
# The server may accept this request with code 211 or 214, or reject it with code 502.
def HELP(self, arg):
help = """
214
CDUP Changes the working directory on the remote host to the parent of the current directory.
'Syntax: CDUP (go to parent directory).'
CWD Type a directory path to change working directory.
'Syntax: CWD [<SP> dir-name] (change working directory).'
EPRT Initiate a data connection required to transfer data (such as directory listings or files) between the client and server.
Is required during IPv6 active mode transfers.
'Syntax: EPRT <SP> |protocol|ip|port| (extended active mode).'
EPSV Tells the server to enter a passive FTP session rather than Active. (Its use is required for IPv6.)
This allows users behind routers/firewalls to connect over FTP when they might not be able to connect over an
Active (PORT/EPRT) FTP session. EPSV mode has the server tell the client where to connect for the data port on the server.
'Syntax: EPSV [<SP> proto/"ALL"] (extended passive mode).'
HELP Displays help information.
'Syntax: HELP [<SP> cmd] (show help).'
LIST [dirpath or filename] This command allows the server to send the list to the passive DTP. If
the pathname specifies a path or The other set of files, the server sends a list of files in
the specified directory. Current information if you specify a file path name, the server will
send the file.
'Syntax: LIST [<SP> path] (list files).'
PASS [password], Its argument is used to specify the user password string.
'Syntax: PASS [<SP> password] (set user password).'
PASV The directive requires server-DTP in a data port.
'Syntax: PASV (open passive data connection).'
PORT [h1, h2, h3, h4, p1, p2] The command parameter is used for the data connection data port
'Syntax: PORT <sp> h,h,h,h,p,p (open active data connection).'
PWD Get current working directory.
'Syntax: PWD (get current working directory).'
QUIT This command terminates a user, if not being executed file transfer, the server will shut down
Control connection
'Syntax: QUIT (quit current session).'
RETR This command allows server-FTP send a copy of a file with the specified path name to the data
connection The other end.
'Syntax: RETR <SP> file-name (retrieve a file).'
STOR This command allows server-DTP to receive data transmitted via a data connection, and data is
stored as A file server site.
'Syntax: STOR <SP> file-name (store a file).'
SYST This command is used to find the server's operating system type.
'Syntax: SYST (get operating system type).'
USER [name], Its argument is used to specify the user's string. It is used for user authentication.
'Syntax: USER <SP> user-name (set username).'
\r\n.
"""
self.sendCommand(help)
# Asks the server to send the contents of a directory over the data connection already established
def LIST(self, dirpath):
if not self.authenticated:
self.sendCommand('530 User not logged in.\r\n')
return
if not dirpath:
pathname = os.path.abspath(os.path.join(self.cwd, '.'))
elif dirpath.startswith(os.path.sep):
pathname = os.path.abspath(dirpath)
else:
pathname = os.path.abspath(os.path.join(self.cwd, dirpath))
log('LIST', pathname)
if not self.authenticated:
self.sendCommand('530 User not logged in.\r\n')
elif not os.path.exists(pathname):
self.sendCommand('550 LIST failed Path name not exists.\r\n')
else:
self.sendCommand('150 Listing content.\r\n')
self.startDataSock()
if not os.path.isdir(pathname):
file_message = self.fileProperty(pathname)
self.dataSock.sock(file_message + '\r\n')
else:
for file in os.listdir(pathname):
file_message = self.fileProperty(os.path.join(pathname, file))
self.sendData(file_message + '\r\n')
self.stopDataSock()
self.sendCommand('226 List done.\r\n')
# Set password for current user used to authenticate
def PASS(self, passwd):
log("PASS", passwd)
if passwd is None or not passwd:
self.sendCommand('501 Syntax error in parameters or arguments.\r\n')
elif not hasattr(self, 'username') or not self.username:
self.sendCommand('503 The username is not available. '
'Please set username first calling the function "USER".\r\n')
else:
self.checkBlockedUsername()
if self.banned_username:
log('PASS', "The username: " + self.username + " is blocked. You should unlock username first.")
else:
self.passwd = passwd
self.saveAuthentication(False)
self.validateCredentials()
# Asks the server to accept a data connection on a new TCP port selected by the server.
# PASV parameters are prohibited
def PASV(self, cmd):
if pasv_mode is not None and pasv_mode.lower().decode() == "yes":
log("PASV", cmd)
self.pasv_mode = True
self.serverSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.serverSock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.serverSock.bind((HOST, 0))
self.serverSock.listen(5)
addr, port = self.serverSock.getsockname()
self.sendCommand('227 Entering Passive Mode (%s,%u,%u).\r\n' %
(','.join(addr.split('.')), port >> 8 & 0xFF, port & 0xFF))
else:
log("PASV", "PASV function is disabled by config file")
# Use a different mechanism of creating a data connection. The PORT request has a parameter in the form:
# h1,h2,h3,h4,p1,p2 : meaning that the client is listening for connections on TCP port p1*256+p2
# at IP address h1.h2.h3.h4
def PORT(self, cmd):
if port_mode is not None and port_mode.lower().decode() == "yes" :
"""Start an active data channel by using IPv4."""
log("PORT: ", cmd)
if self.pasv_mode:
self.servsock.close()
self.pasv_mode = False
l = cmd[5:].split(',')
self.dataSockAddr = '.'.join(l[:4])
self.dataSockPort = (int(l[4]) << 8) + int(l[5])
self.sendCommand('200 Get port.\r\n')
else:
log("PORT", "PORT function is disabled by config file")
# Return current working directory
def PWD(self, cmd):
log('PWD', cmd)
self.sendCommand('257 "%s".\r\n' % self.cwd)
def QUIT(self, arg):
log('QUIT', arg)
self.authenticated = False
self.username = None
self.passwd = None
self.sendCommand('221 Goodbye.\r\n')
# Send the contents of a file over the data connection already established
def RETR(self, filename):
pathname = os.path.join(self.cwd, filename)
log('RETR', pathname)
if not os.path.exists(pathname):
return
try:
if self.mode == 'I':
file = open(pathname, 'rb')
else:
file = open(pathname, 'r')
except OSError as err:
log('RETR', err)
self.sendCommand('150 Opening data connection.\r\n')
if self.rest:
file.seek(self.pos)
self.rest = False
self.startDataSock()
while True:
data = file.read(1024)
if not data: break
self.sendData(data)
file.close()
self.stopDataSock()
self.sendCommand('226 Transfer complete.\r\n')
# Read the contents of a file and upload to server
def STOR(self, filename):
if not self.authenticated:
self.sendCommand('530 STOR failed User not logged in.\r\n')
return
pathname = os.path.join(self.cwd, filename)
log('STOR', pathname)
try:
if self.mode == 'I':
file = open(pathname, 'wb')
else:
file = open(pathname, 'w')
except OSError as err:
log('STOR', err)
self.sendCommand('150 Opening data connection.\r\n')
self.startDataSock()
while True:
data = self.dataSock.recv(1024)
if not data: break
file.write(data)
file.close()
self.stopDataSock()
self.sendCommand('226 Transfer completed.\r\n')
# Sets the transfer mode (ASCII/Binary).
def TYPE(self, type):
log('TYPE', type)
self.mode = type
if self.mode == 'I':
self.sendCommand('200 Binary mode.\r\n')
elif self.mode == 'A':
self.sendCommand('200 Ascii mode.\r\n')
# Information about the server's operating system
def SYST(self, arg):
log('SYST', arg)
self.sendCommand('215 %s type.\r\n' % sys.platform)
# Set the username required to authenticate
def USER(self, user):
log("USER", user)
if not user:
self.sendCommand('501 Syntax error in parameters or arguments.\r\n')
else:
if self.banned_username:
log('USER', "This username is blocked: " + user)
else:
self.sendCommand('331 User name okay, need password.\r\n')
self.username = user
# # Optional functions ##
def NLIST(self, dirpath):
self.LIST(dirpath)
def DELE(self, filename):
pathname = filename.endswith(os.path.sep) and filename or os.path.join(self.cwd, filename)
log('DELE', pathname)
if not self.authenticated:
self.sendCommand('530 User not logged in.\r\n')
elif not os.path.exists(pathname):
self.sendCommand('550 DELE failed File %s not exists.\r\n' % pathname)
elif not allow_delete:
self.sendCommand('450 DELE failed delete not allow.\r\n')
else:
os.remove(pathname)
self.sendCommand('250 File deleted.\r\n')
def MKD(self, dirname):
pathname = dirname.endswith(os.path.sep) and dirname or os.path.join(self.cwd, dirname)
log('MKD', pathname)
if not self.authenticated:
self.sendCommand('530 User not logged in.\r\n')
else:
try:
os.mkdir(pathname)
self.sendCommand('257 Directory created.\r\n')
except OSError:
self.sendCommand('550 MKD failed Directory "%s" already exists.\r\n' % pathname)
def RMD(self, dirname):
import shutil
pathname = dirname.endswith(os.path.sep) and dirname or os.path.join(self.cwd, dirname)
log('RMD', pathname)
if not self.authenticated:
self.sendCommand('530 User not logged in.\r\n')
elif not allow_delete:
self.sendCommand('450 Directory deleted.\r\n')
elif not os.path.exists(pathname):
self.sendCommand('550 RMDIR failed Directory "%s" not exists.\r\n' % pathname)
else:
shutil.rmtree(pathname)
self.sendCommand('250 Directory deleted.\r\n')
def RNFR(self, filename):
pathname = filename.endswith(os.path.sep) and filename or os.path.join(self.cwd, filename)
log('RNFR', pathname)
if not os.path.exists(pathname):
self.sendCommand('550 RNFR failed File or Directory %s not exists.\r\n' % pathname)
else:
self.rnfr = pathname
def RNTO(self, filename):
pathname = filename.endswith(os.path.sep) and filename or os.path.join(self.cwd, filename)
log('RNTO', pathname)
if not os.path.exists(os.path.sep):
self.sendCommand('550 RNTO failed File or Direcotry %s not exists.\r\n' % pathname)
else:
try:
os.rename(self.rnfr, pathname)
except OSError as err:
log('RNTO', err)
def REST(self, pos):
self.pos = int(pos)
log('REST', self.pos)
self.rest = True
self.sendCommand('250 File position reseted.\r\n')
def APPE(self, filename):
if not self.authenticated:
self.sendCommand('530 APPE failed User not logged in.\r\n')
return
pathname = filename.endswith(os.path.sep) and filename or os.path.join(self.cwd, filename)
log('APPE', pathname)
self.sendCommand('150 Opening data connection.\r\n')
self.startDataSock()
if not os.path.exists(pathname):
if self.mode == 'I':
file = open(pathname, 'wb')
else:
file = open(pathname, 'w')
while True:
data = self.dataSock.recv(1024)
if not data:
break
file.write(data)
else:
n = 1
while not os.path.exists(pathname):
filename, extname = os.path.splitext(pathname)
pathname = filename + '(%s)' % n + extname
n += 1
if self.mode == 'I':
file = open(pathname, 'wb')
else:
file = open(pathname, 'w')
while True:
data = self.dataSock.recv(1024)
if not data:
break
file.write(data)
file.close()
self.stopDataSock()
self.sendCommand('226 Transfer completed.\r\n')
def serverListener():
''' AF_INET refers to the address family ipv4 '''
''' SOCK_STREAM means connection oriented TCP protocol '''
listen_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listen_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listen_sock.bind((HOST, PORT))
listen_sock.listen(5) # put the socket into listening mode
log('Server started', 'Listen on: %s, %s' % listen_sock.getsockname())
''' a forever loop until we interrupt it or an error occurs '''
while True:
connection, address = listen_sock.accept() # Establish connection with client.
f = FtpServerProtocol(connection, address)
f.start()
log('Accept', 'Created a new connection %s, %s' % address)
if __name__ == "__main__":
# if config file is not configured properly the stop the server
if port_mode.lower().decode() == "no" and pasv_mode.lower().decode() == "no":
log('Server stop', "PortMode and PasvMode can't be both disabled. Please check config file")
sys.exit()
# the program should have 2 arguments: `1- log file; 2- port number
if len(sys.argv) == 3: # Should be check for 3 because the first argument is the running filename
arg_log_file = sys.argv[1] # This should be the path file to write logs
arg_port = int(sys.argv[2]) # This should be the port number to run the server
if os.path.exists(os.path.dirname(arg_log_file)):
logfile = arg_log_file
else:
logfile = os.getcwd() + r'/' + arg_log_file
if not 0 <= arg_port <= 65535:
log('Server stop', 'The port number should be between 0 and 65535')
sys.exit()
else:
PORT = arg_port
log('Start ftp server:', 'Enter q or Q to stop ftpServer...')
listener = threading.Thread(target=serverListener)
listener.start()
if sys.version_info[0] < 3:
input = raw_input
if input().lower() == "q":
listen_sock.close()
log('Server stop', 'Server closed')
sys.exit()
else: # send error
log('Server stop', 'To start the socket server you should pass 2 arguments')
log('Server stop', 'First is the log file and the Second is the port which the program will be running')
log('Server stop', 'Syntax: python ftp_server_v0.1 socket-server.log 8888.')
sys.exit()
|
[
"noreply@github.com"
] |
saugatsthapit.noreply@github.com
|
50aa3d3fde4f45e299e57f5470df22dd1506ecc9
|
4b59ace76840cbeb28f0fac19f128cd3959a7c3a
|
/frontend/run.py
|
d381fa5212fed8cb580617aea4b4eea3d3db4729
|
[] |
no_license
|
JoshuadeJong/ecommerce-microservice
|
2a8f4d2f091c459dc9bcb47c01904f21c478cf91
|
246c6d0eb014df2946874cafcddebea1e0eaa97d
|
refs/heads/master
| 2023-03-25T23:57:15.369285
| 2021-03-19T15:47:32
| 2021-03-19T15:47:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 99
|
py
|
from app.main import create_app
app = create_app()
app.run(host="0.0.0.0", port=5600, debug=True)
|
[
"joshuakdejong@gmail.com"
] |
joshuakdejong@gmail.com
|
dcc0392f6613bccd534f30864e88e1035d08ff51
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_229/ch119_2020_04_01_13_58_16_858102.py
|
a55a95c429d1eaf3d9aa548c9c3726d2275bef4b
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 173
|
py
|
import math
def calcula_euler(i,s,n,x):
i = 0
s = 0
for i in range(n+1):
s += (x**i)/math.factorial(i)
print(s)
return s
print(calcula_euler)
|
[
"you@example.com"
] |
you@example.com
|
d268b8cd4ef4affc04c41df7e087074e54334cf2
|
07519ddf6395a765f484464dab0e8ba0305486ae
|
/rango/migrations/0001_initial.py
|
47130fea4d3da9a31111ce705601aea8599ef9d3
|
[] |
no_license
|
AdamClarkCode/tango_with_django_project
|
57e95d5d798a97b274c27926e98fd935253a87c5
|
19a2ba5deb59437448e4ba840f6b46ab694708e4
|
refs/heads/main
| 2023-03-03T08:41:58.571591
| 2021-02-11T22:19:08
| 2021-02-11T22:19:08
| 329,870,396
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,033
|
py
|
# Generated by Django 2.2.17 on 2021-01-29 10:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, unique=True)),
],
),
migrations.CreateModel(
name='Page',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=128)),
('url', models.URLField()),
('views', models.IntegerField(default=0)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='rango.Category')),
],
),
]
|
[
"2468460C@student.gla.ac.uk"
] |
2468460C@student.gla.ac.uk
|
14eb245a021e3d508f9db7fff54caa3161fff836
|
76fb402dec3e1f9cb6c67f1fae81d3b3864fc120
|
/phoenix/modules/mkpasswd.py
|
573d0169b099764a848ec85d0b3accfebb47ca80
|
[
"Apache-2.0"
] |
permissive
|
IsaPeter/PythonProjects
|
3021dbbd15398254f647bc8f1ddc13fd540df22a
|
62885fa6d4180e7b2c83fbb67541dc3fc3e29489
|
refs/heads/master
| 2023-04-21T13:53:09.114918
| 2021-05-14T09:09:24
| 2021-05-14T09:09:24
| 312,594,530
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,750
|
py
|
#!/usr/bin/env python3
import crypt
import os,sys
# append base path to sys.path
runpath = os.path.dirname(os.path.realpath(__file__))
approot = os.path.abspath(os.path.join(runpath, os.pardir))
sys.path.append(os.path.join(runpath,'..'))
sys.path.append(approot)
import lib.commandparser as cp
# python3 -c 'import crypt; print(crypt.crypt("test", crypt.mksalt(crypt.METHOD_SHA512)))'
module_name ="Make Password"
module_id = "mkpasswd"
module_description="Create UNIX like passwords"
module_type = 'module'
variables = {'password':'','type':'sha512',}
args = None
parser = None
def parse_arguments(command):
global args, parser
parser = cp.commandParser(command)
parser.add_argument('-t','--type',name='hashtype',help='The type of the used HASH algo.')
parser.add_argument('-p','--password',name='password',help='The password string')
parser.add_argument('-l','--list',name='listtypes',hasvalue=False,help='list the available hash types')
parser.add_argument('-h','--help',name='help',hasvalue=False,help='Show the help menu')
args = parser.parse()
return args
def help():
pass
def run(arguments=''):
makepass = True
hashtype = 'sha512'
password = ''
if arguments == '':
password = variables['password']
if password != '':
enc = crypt.crypt(password, crypt.mksalt(crypt.METHOD_SHA512))
print(enc)
return enc
else:
print("Please specify a Password string")
else:
args = parse_arguments(arguments)
if parser.exists('password'): password = parser.get_value('password')
if parser.exists('hashtype'): hashtype = parser.get_value('hashtype').lower()
if parser.exists('listtypes'):
makepass == False
print("Blowfish; Crypt; MD5; SHA256; SHA512")
return None
if parser.exists('help'):
parser.print_help()
if makepass:
if password != '':
if hashtype == 'blowfish':
enc = crypt.crypt(password, crypt.mksalt(crypt.METHOD_BLOWFISH))
elif hashtype == 'crypt':
enc = crypt.crypt(password, crypt.mksalt(crypt.METHOD_CRYPT))
elif hashtype == 'md5':
enc = crypt.crypt(password, crypt.mksalt(crypt.METHOD_MD5))
elif hashtype == 'sha256':
enc = crypt.crypt(password, crypt.mksalt(crypt.METHOD_SHA256))
elif hashtype == 'sha512':
enc = crypt.crypt(password, crypt.mksalt(crypt.METHOD_SHA512))
print(enc)
return enc
#run('programneme --help')
|
[
"venom@kali.local"
] |
venom@kali.local
|
00bae78e87a3f76100a2e8f156bf108f25544378
|
aafa649d1faae4bc9ab703ea20d605c921f7e953
|
/fib_sum_last.py
|
d8894c6706f1d91d6487bfd0e5d31ec0a5f1304c
|
[] |
no_license
|
sivakumarmanoharan/Algorithms-WarmUp
|
010424b57138b1c41dd0a516762510d9f245b5fd
|
ae2da1f35a100b74e04b70151430224a8db34851
|
refs/heads/master
| 2022-11-27T18:49:28.247010
| 2020-08-08T06:50:23
| 2020-08-08T06:50:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 201
|
py
|
a=[0,1]
n=int(input())
if n==0:
print(a[0])
elif n==1:
print(a[1])
else:
for i in range(2,n+1):
fib=a[i-1]+a[i-2]
a.append(fib)
add=sum(a)
print(add%10)
|
[
"noreply@github.com"
] |
sivakumarmanoharan.noreply@github.com
|
d9f56c3577f1d257f62e56961bf26a6d25353d16
|
ca31add88a0cc32e2a52f7369fd90c9a37121e44
|
/Code.py
|
147fd8c585b49d2df5099adcad2014a89c61bb28
|
[
"MIT"
] |
permissive
|
sumitsantape30/The-Random-Guess
|
852359f398c32b50e69d3cf56ba664bea92b4558
|
39b2ada17bd23bf45eabba7eebd0734c3f9108fa
|
refs/heads/main
| 2023-01-18T17:18:36.615272
| 2020-11-21T03:01:49
| 2020-11-21T03:01:49
| 314,531,936
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 710
|
py
|
import random
randNumber= random.randint(1,100)
userGuess= None
guesses= 0
while( userGuess != randNumber):
userGuess= int(input("Enter your guess"))
guesses += 1
if( userGuess == randNumber):
print("You guressed it right!")
else:
if(userGuess > randNumber):
print("You guessed it wrong! Enter the smaller Number")
else:
print("You guessed it wrong! Enter the larger Number")
print(f"You guessed the number in {guesses} guesses.")
with open("hiscore.txt","r") as f:
hiscore = int(f.read())
if(guesses < hiscore):
print("You have just broken the high Score")
with open("hiscore.txt","w") as f:
f.write(str(guesses))
|
[
"noreply@github.com"
] |
sumitsantape30.noreply@github.com
|
aef02d6ee5b4300fff588f020b1734f514e9c784
|
1691d4ad8e91db36573238724034e9e82e60e484
|
/encoder_decoder_model/enet_decoder.py
|
789bf7b0f10e05e8d42ea832cac4cdbfedb8f95a
|
[] |
no_license
|
lzb863/Enet
|
7276c6f992b403c0712c922e418295b190662c7d
|
69cc8c6fe31004284c80de0782f9fcf2709d7cd6
|
refs/heads/master
| 2022-01-11T13:18:05.423742
| 2019-07-09T02:44:36
| 2019-07-09T02:44:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,340
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2018.09.06 14:48
# @Author : Aaron Ran
# @IDE: PyCharm Community Edition
"""
实现一个基于Enet-encoder的特征编码类
"""
from collections import OrderedDict
import tensorflow as tf
from encoder_decoder_model.ENet import Enet_model
class Enet_decoder(Enet_model):
"""
实现了一个基于ENet的特征解码类
"""
def __init__(self):
"""
"""
super(Enet_decoder, self).__init__()
def decode_seg(self, input_tensor, later_drop_prob,
pooling_indices_1, pooling_indices_2, scope):
ret = OrderedDict()
with tf.variable_scope(scope):
# Encoder_3_seg
print("####### Encoder_3_seg")
network_seg = self.encoder_bottleneck_regular(x=input_tensor, output_depth=128,
drop_prob=later_drop_prob,
scope="seg_bottleneck_3_1")
network_seg = self.encoder_bottleneck_dilated(x=network_seg, output_depth=128,
drop_prob=later_drop_prob,
scope="seg_bottleneck_3_2", dilation_rate=2)
network_seg = self.encoder_bottleneck_asymmetric(x=network_seg, output_depth=128,
drop_prob=later_drop_prob,
scope="seg_bottleneck_3_3")
network_seg = self.encoder_bottleneck_dilated(x=network_seg, output_depth=128,
drop_prob=later_drop_prob,
scope="seg_bottleneck_3_4", dilation_rate=4)
network_seg = self.encoder_bottleneck_regular(x=network_seg, output_depth=128,
drop_prob=later_drop_prob,
scope="seg_bottleneck_3_5")
network_seg = self.encoder_bottleneck_dilated(x=network_seg, output_depth=128,
drop_prob=later_drop_prob,
scope="seg_bottleneck_3_6", dilation_rate=8)
network_seg = self.encoder_bottleneck_asymmetric(x=network_seg, output_depth=128,
drop_prob=later_drop_prob,
scope="seg_bottleneck_3_7")
network_seg = self.encoder_bottleneck_dilated(x=network_seg, output_depth=128,
drop_prob=later_drop_prob,
scope="seg_bottleneck_3_8", dilation_rate=16)
ret['stage3_seg'] = dict()
ret['stage3_seg']['data'] = network_seg
ret['stage3_seg']['shape'] = network_seg.get_shape().as_list()
# decoder
# # Decoder_1_seg
print("####### # # Decoder_1_seg")
network_seg = self.decoder_bottleneck(x=network_seg, output_depth=64,
scope="seg_bottleneck_4_0", upsampling=True,
pooling_indices=pooling_indices_2)
network_seg = self.decoder_bottleneck(x=network_seg, output_depth=64,
scope="seg_bottleneck_4_1")
network_seg = self.decoder_bottleneck(x=network_seg, output_depth=64,
scope="seg_bottleneck_4_2")
ret['stage4_seg'] = dict()
ret['stage4_seg']['data'] = network_seg
ret['stage4_seg']['shape'] = network_seg.get_shape().as_list()
# # Decoder_2_seg
print("####### # # Decoder_2_seg")
network_seg = self.decoder_bottleneck(x=network_seg, output_depth=16,
scope="seg_bottleneck_5_0", upsampling=True,
pooling_indices=pooling_indices_1)
network_seg = self.decoder_bottleneck(x=network_seg, output_depth=16,
scope="seg_bottleneck_5_1")
ret['stage5_seg'] = dict()
ret['stage5_seg']['data'] = network_seg
ret['stage5_seg']['shape'] = network_seg.get_shape().as_list()
# segmentation
# # arg[1] = 2: in semantic segmentation branch
# # arg[1] = 3: in embedding branch
network_seg = tf.contrib.slim.conv2d_transpose(network_seg, 2,
[2, 2], stride=2, scope="seg_fullconv", padding="SAME")
print("################ total output = %s" % network_seg.get_shape().as_list())
ret['fullconv_seg'] = dict()
ret['fullconv_seg']['data'] = network_seg#输出的二值分割图像
ret['fullconv_seg']['shape'] = network_seg.get_shape().as_list()
return ret
def decode_emb(self, input_tensor, later_drop_prob,
pooling_indices_1, pooling_indices_2, scope):
ret = OrderedDict()
with tf.variable_scope(scope):
# Encoder_3_emb
network_emb = self.encoder_bottleneck_regular(x=input_tensor, output_depth=128,
drop_prob=later_drop_prob,
scope="emb_bottleneck_3_1")
network_emb = self.encoder_bottleneck_dilated(x=network_emb, output_depth=128,
drop_prob=later_drop_prob,
scope="emb_bottleneck_3_2", dilation_rate=2)
network_emb = self.encoder_bottleneck_asymmetric(x=network_emb, output_depth=128,
drop_prob=later_drop_prob,
scope="emb_bottleneck_3_3")
network_emb = self.encoder_bottleneck_dilated(x=network_emb, output_depth=128,
drop_prob=later_drop_prob,
scope="emb_bottleneck_3_4", dilation_rate=4)
network_emb = self.encoder_bottleneck_regular(x=network_emb, output_depth=128,
drop_prob=later_drop_prob,
scope="emb_bottleneck_3_5")
network_emb = self.encoder_bottleneck_dilated(x=network_emb, output_depth=128,
drop_prob=later_drop_prob,
scope="emb_bottleneck_3_6", dilation_rate=8)
network_emb = self.encoder_bottleneck_asymmetric(x=network_emb, output_depth=128,
drop_prob=later_drop_prob,
scope="emb_bottleneck_3_7")
network_emb = self.encoder_bottleneck_dilated(x=network_emb, output_depth=128,
drop_prob=later_drop_prob,
scope="emb_bottleneck_3_8", dilation_rate=16)
ret['stage3_emb'] = dict()
ret['stage3_emb']['data'] = network_emb
ret['stage3_emb']['shape'] = network_emb.get_shape().as_list()
# decoder
# # Decoder_1_emb
network_emb = self.decoder_bottleneck(x=network_emb, output_depth=64,
scope="emb_bottleneck_4_0", upsampling=True,
pooling_indices=pooling_indices_2)
network_emb = self.decoder_bottleneck(x=network_emb, output_depth=64,
scope="emb_bottleneck_4_1")
network_emb = self.decoder_bottleneck(x=network_emb, output_depth=64,
scope="emb_bottleneck_4_2")
ret['stage4_emb'] = dict()
ret['stage4_emb']['data'] = network_emb
ret['stage4_emb']['shape'] = network_emb.get_shape().as_list()
# # Decoder_2_emb
network_emb = self.decoder_bottleneck(x=network_emb, output_depth=16,
scope="emb_bottleneck_5_0", upsampling=True,
pooling_indices=pooling_indices_1)
network_emb = self.decoder_bottleneck(x=network_emb, output_depth=16,
scope="emb_bottleneck_5_1")
ret['stage5_emb'] = dict()
ret['stage5_emb']['data'] = network_emb
ret['stage5_emb']['shape'] = network_emb.get_shape().as_list()
# embedding
# # arg[1] = 1: in semantic segmentation branch
# # arg[1] = 3: in embedding branch
network_emb = tf.contrib.slim.conv2d_transpose(network_emb, 3,
[2, 2], stride=2, scope="emb_fullconv", padding="SAME")
ret['fullconv_emb'] = dict()
ret['fullconv_emb']['data'] = network_emb
ret['fullconv_emb']['shape'] = network_emb.get_shape().as_list()
return ret
if __name__ == '__main__':
input_tensor = tf.placeholder(tf.float32, shape=[1, 90, 160, 128], name="input_tensor")
later_drop_prob_ph = tf.placeholder(tf.float32, name="later_drop_prob_ph")
inputs_shape_1 = tf.placeholder(tf.float32, shape=[1, 360, 640, 16], name="inputs_shape_1")
inputs_shape_2 = tf.placeholder(tf.float32, shape=[1, 180, 320, 64], name="inputs_shape_2")
pooling_indices_1 = tf.placeholder(tf.float32, shape=[1, 180, 320, 16], name="pooling_indices_1")
pooling_indices_2 = tf.placeholder(tf.float32, shape=[1, 90, 160, 64], name="pooling_indices_2")
decoder = Enet_decoder()
seg = decoder.decode_seg(input_tensor=input_tensor, later_drop_prob=later_drop_prob_ph,
pooling_indices_1=pooling_indices_1, pooling_indices_2=pooling_indices_2,
scope="decode_seg")
for layer_name, layer_info in seg.items():
print('layer name: {:s} shape: {}'.format(layer_name, layer_info['shape']))
emb = decoder.decode_emb(input_tensor=input_tensor, later_drop_prob=later_drop_prob_ph,
pooling_indices_1=pooling_indices_1, pooling_indices_2=pooling_indices_2,
scope="decode_emb")
for layer_name, layer_info in emb.items():
print('layer name: {:s} shape: {}'.format(layer_name, layer_info['shape']))
|
[
"654053334@qq.com"
] |
654053334@qq.com
|
8d258ed0b77093840a7c7eeaeadb01c70954d23e
|
ddce82b1d34fb613d0fa251d4aed93ce232703df
|
/wrappedAlgorithms/PISWAP/networkx/algorithms/flow/tests/test_mincost.py
|
ae0dac623c416aa4e11de81fb2b6c685678d3d5f
|
[] |
no_license
|
waynebhayes/SANA
|
64906c7a7761a07085e2112a0685fa9fbe7313e6
|
458cbc5e83d0541717184a5ff0930d7003c3e3ef
|
refs/heads/SANA2
| 2023-08-21T20:25:47.376666
| 2023-08-10T19:54:57
| 2023-08-10T19:54:57
| 86,848,250
| 20
| 99
| null | 2023-03-07T19:43:10
| 2017-03-31T18:21:01
|
Game Maker Language
|
UTF-8
|
Python
| false
| false
| 10,348
|
py
|
# -*- coding: utf-8 -*-
import networkx as nx
from nose.tools import assert_equal, assert_raises
class TestNetworkSimplex:
def test_simple_digraph(self):
G = nx.DiGraph()
G.add_node('a', demand = -5)
G.add_node('d', demand = 5)
G.add_edge('a', 'b', weight = 3, capacity = 4)
G.add_edge('a', 'c', weight = 6, capacity = 10)
G.add_edge('b', 'd', weight = 1, capacity = 9)
G.add_edge('c', 'd', weight = 2, capacity = 5)
flowCost, H = nx.network_simplex(G)
soln = {'a': {'b': 4, 'c': 1},
'b': {'d': 4},
'c': {'d': 1},
'd': {}}
assert_equal(flowCost, 24)
assert_equal(nx.min_cost_flow_cost(G), 24)
assert_equal(H, soln)
assert_equal(nx.min_cost_flow(G), soln)
assert_equal(nx.cost_of_flow(G, H), 24)
def test_negcycle_infcap(self):
G = nx.DiGraph()
G.add_node('s', demand = -5)
G.add_node('t', demand = 5)
G.add_edge('s', 'a', weight = 1, capacity = 3)
G.add_edge('a', 'b', weight = 3)
G.add_edge('c', 'a', weight = -6)
G.add_edge('b', 'd', weight = 1)
G.add_edge('d', 'c', weight = -2)
G.add_edge('d', 't', weight = 1, capacity = 3)
assert_raises(nx.NetworkXUnbounded, nx.network_simplex, G)
def test_sum_demands_not_zero(self):
G = nx.DiGraph()
G.add_node('s', demand = -5)
G.add_node('t', demand = 4)
G.add_edge('s', 'a', weight = 1, capacity = 3)
G.add_edge('a', 'b', weight = 3)
G.add_edge('a', 'c', weight = -6)
G.add_edge('b', 'd', weight = 1)
G.add_edge('c', 'd', weight = -2)
G.add_edge('d', 't', weight = 1, capacity = 3)
assert_raises(nx.NetworkXUnfeasible, nx.network_simplex, G)
def test_no_flow_satisfying_demands(self):
G = nx.DiGraph()
G.add_node('s', demand = -5)
G.add_node('t', demand = 5)
G.add_edge('s', 'a', weight = 1, capacity = 3)
G.add_edge('a', 'b', weight = 3)
G.add_edge('a', 'c', weight = -6)
G.add_edge('b', 'd', weight = 1)
G.add_edge('c', 'd', weight = -2)
G.add_edge('d', 't', weight = 1, capacity = 3)
assert_raises(nx.NetworkXUnfeasible, nx.network_simplex, G)
def test_transshipment(self):
G = nx.DiGraph()
G.add_node('a', demand = 1)
G.add_node('b', demand = -2)
G.add_node('c', demand = -2)
G.add_node('d', demand = 3)
G.add_node('e', demand = -4)
G.add_node('f', demand = -4)
G.add_node('g', demand = 3)
G.add_node('h', demand = 2)
G.add_node('r', demand = 3)
G.add_edge('a', 'c', weight = 3)
G.add_edge('r', 'a', weight = 2)
G.add_edge('b', 'a', weight = 9)
G.add_edge('r', 'c', weight = 0)
G.add_edge('b', 'r', weight = -6)
G.add_edge('c', 'd', weight = 5)
G.add_edge('e', 'r', weight = 4)
G.add_edge('e', 'f', weight = 3)
G.add_edge('h', 'b', weight = 4)
G.add_edge('f', 'd', weight = 7)
G.add_edge('f', 'h', weight = 12)
G.add_edge('g', 'd', weight = 12)
G.add_edge('f', 'g', weight = -1)
G.add_edge('h', 'g', weight = -10)
flowCost, H = nx.network_simplex(G)
soln = {'a': {'c': 0},
'b': {'a': 0, 'r': 2},
'c': {'d': 3},
'd': {},
'e': {'r': 3, 'f': 1},
'f': {'d': 0, 'g': 3, 'h': 2},
'g': {'d': 0},
'h': {'b': 0, 'g': 0},
'r': {'a': 1, 'c': 1}}
assert_equal(flowCost, 41)
assert_equal(nx.min_cost_flow_cost(G), 41)
assert_equal(H, soln)
assert_equal(nx.min_cost_flow(G), soln)
assert_equal(nx.cost_of_flow(G, H), 41)
def test_max_flow_min_cost(self):
G = nx.DiGraph()
G.add_edge('s', 'a', bandwidth = 6)
G.add_edge('s', 'c', bandwidth = 10, cost = 10)
G.add_edge('a', 'b', cost = 6)
G.add_edge('b', 'd', bandwidth = 8, cost = 7)
G.add_edge('c', 'd', cost = 10)
G.add_edge('d', 't', bandwidth = 5, cost = 5)
soln = {'s': {'a': 5, 'c': 0},
'a': {'b': 5},
'b': {'d': 5},
'c': {'d': 0},
'd': {'t': 5},
't': {}}
flow = nx.max_flow_min_cost(G, 's', 't', capacity = 'bandwidth',
weight = 'cost')
assert_equal(flow, soln)
assert_equal(nx.cost_of_flow(G, flow, weight = 'cost'), 90)
def test_digraph1(self):
# From Bradley, S. P., Hax, A. C. and Magnanti, T. L. Applied
# Mathematical Programming. Addison-Wesley, 1977.
G = nx.DiGraph()
G.add_node(1, demand = -20)
G.add_node(4, demand = 5)
G.add_node(5, demand = 15)
G.add_edges_from([(1, 2, {'capacity': 15, 'weight': 4}),
(1, 3, {'capacity': 8, 'weight': 4}),
(2, 3, {'weight': 2}),
(2, 4, {'capacity': 4, 'weight': 2}),
(2, 5, {'capacity': 10, 'weight': 6}),
(3, 4, {'capacity': 15, 'weight': 1}),
(3, 5, {'capacity': 5, 'weight': 3}),
(4, 5, {'weight': 2}),
(5, 3, {'capacity': 4, 'weight': 1})])
flowCost, H = nx.network_simplex(G)
soln = {1: {2: 12, 3: 8},
2: {3: 8, 4: 4, 5: 0},
3: {4: 11, 5: 5},
4: {5: 10},
5: {3: 0}}
assert_equal(flowCost, 150)
assert_equal(nx.min_cost_flow_cost(G), 150)
assert_equal(H, soln)
assert_equal(nx.min_cost_flow(G), soln)
assert_equal(nx.cost_of_flow(G, H), 150)
def test_digraph2(self):
# Example from ticket #430 from mfrasca. Original source:
# http://www.cs.princeton.edu/courses/archive/spr03/cs226/lectures/mincost.4up.pdf, slide 11.
G = nx.DiGraph()
G.add_edge('s', 1, capacity=12)
G.add_edge('s', 2, capacity=6)
G.add_edge('s', 3, capacity=14)
G.add_edge(1, 2, capacity=11, weight=4)
G.add_edge(2, 3, capacity=9, weight=6)
G.add_edge(1, 4, capacity=5, weight=5)
G.add_edge(1, 5, capacity=2, weight=12)
G.add_edge(2, 5, capacity=4, weight=4)
G.add_edge(2, 6, capacity=2, weight=6)
G.add_edge(3, 6, capacity=31, weight=3)
G.add_edge(4, 5, capacity=18, weight=4)
G.add_edge(5, 6, capacity=9, weight=5)
G.add_edge(4, 't', capacity=3)
G.add_edge(5, 't', capacity=7)
G.add_edge(6, 't', capacity=22)
flow = nx.max_flow_min_cost(G, 's', 't')
soln = {1: {2: 6, 4: 5, 5: 1},
2: {3: 6, 5: 4, 6: 2},
3: {6: 20},
4: {5: 2, 't': 3},
5: {6: 0, 't': 7},
6: {'t': 22},
's': {1: 12, 2: 6, 3: 14},
't': {}}
assert_equal(flow, soln)
def test_digraph3(self):
"""Combinatorial Optimization: Algorithms and Complexity,
Papadimitriou Steiglitz at page 140 has an example, 7.1, but that
admits multiple solutions, so I alter it a bit. From ticket #430
by mfrasca."""
G = nx.DiGraph()
G.add_edge('s', 'a', {0: 2, 1: 4})
G.add_edge('s', 'b', {0: 2, 1: 1})
G.add_edge('a', 'b', {0: 5, 1: 2})
G.add_edge('a', 't', {0: 1, 1: 5})
G.add_edge('b', 'a', {0: 1, 1: 3})
G.add_edge('b', 't', {0: 3, 1: 2})
"PS.ex.7.1: testing main function"
sol = nx.max_flow_min_cost(G, 's', 't', capacity=0, weight=1)
flow = sum(v for v in sol['s'].values())
assert_equal(4, flow)
assert_equal(23, nx.cost_of_flow(G, sol, weight=1))
assert_equal(sol['s'], {'a': 2, 'b': 2})
assert_equal(sol['a'], {'b': 1, 't': 1})
assert_equal(sol['b'], {'a': 0, 't': 3})
assert_equal(sol['t'], {})
def test_zero_capacity_edges(self):
"""Address issue raised in ticket #617 by arv."""
G = nx.DiGraph()
G.add_edges_from([(1, 2, {'capacity': 1, 'weight': 1}),
(1, 5, {'capacity': 1, 'weight': 1}),
(2, 3, {'capacity': 0, 'weight': 1}),
(2, 5, {'capacity': 1, 'weight': 1}),
(5, 3, {'capacity': 2, 'weight': 1}),
(5, 4, {'capacity': 0, 'weight': 1}),
(3, 4, {'capacity': 2, 'weight': 1})])
G.node[1]['demand'] = -1
G.node[2]['demand'] = -1
G.node[4]['demand'] = 2
flowCost, H = nx.network_simplex(G)
soln = {1: {2: 0, 5: 1},
2: {3: 0, 5: 1},
3: {4: 2},
4: {},
5: {3: 2, 4: 0}}
assert_equal(flowCost, 6)
assert_equal(nx.min_cost_flow_cost(G), 6)
assert_equal(H, soln)
assert_equal(nx.min_cost_flow(G), soln)
assert_equal(nx.cost_of_flow(G, H), 6)
def test_digon(self):
"""Check if digons are handled properly. Taken from ticket
#618 by arv."""
nodes = [(1, {}),
(2, {'demand': -4}),
(3, {'demand': 4}),
]
edges = [(1, 2, {'capacity': 3, 'weight': 600000}),
(2, 1, {'capacity': 2, 'weight': 0}),
(2, 3, {'capacity': 5, 'weight': 714285}),
(3, 2, {'capacity': 2, 'weight': 0}),
]
G = nx.DiGraph(edges)
G.add_nodes_from(nodes)
flowCost, H = nx.network_simplex(G)
soln = {1: {2: 0},
2: {1: 0, 3: 4},
3: {2: 0}}
assert_equal(flowCost, 2857140)
assert_equal(nx.min_cost_flow_cost(G), 2857140)
assert_equal(H, soln)
assert_equal(nx.min_cost_flow(G), soln)
assert_equal(nx.cost_of_flow(G, H), 2857140)
def test_multidigraph(self):
"""Raise an exception for multidigraph."""
G = nx.MultiDiGraph()
G.add_weighted_edges_from([(1, 2, 1), (2, 3, 2)], weight='capacity')
assert_raises(nx.NetworkXError, nx.network_simplex, G)
|
[
"palmere@uci.edu"
] |
palmere@uci.edu
|
44210d0cd422a15c1cab58501594a0d005acabfa
|
ac409a5f1ffc1362a937ec14604244425586cadf
|
/ngcccbase/p2ptrade/tests/test_comm.py
|
a966f9b5296d3469effa04c67be2f75622d6daaa
|
[
"MIT"
] |
permissive
|
petertodd/ngcccbase
|
c7209bf913bab7a6bd4e340739013a3319628604
|
9e346eebaf4461589e5fdce098c8d0c5793c9461
|
refs/heads/master
| 2021-01-18T07:19:50.884524
| 2014-03-11T15:50:22
| 2014-03-11T15:50:22
| 17,777,682
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,139
|
py
|
#!/usr/bin/env python
import SocketServer
import SimpleHTTPServer
import threading
import time
import unittest
from ngcccbase.p2ptrade.comm import HTTPComm, ThreadedComm, CommThread
class MockAgent(object):
def dispatch_message(self, m):
pass
class TestServer(threading.Thread):
def __init__(self, address, port):
super(TestServer, self).__init__()
self.httpd = SocketServer.TCPServer((address, port), TestHandler)
def run(self):
self.httpd.serve_forever()
def shutdown(self):
self.httpd.shutdown()
self.httpd.socket.close()
class TestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_response(self, response):
self.send_response(200)
self.send_header("Content-type", "application/json")
self.send_header("Content-length", len(response))
self.end_headers()
self.wfile.write(response)
def do_POST(self):
self.do_response("Success")
def do_GET(self):
self.do_response('[{"content": {"msgid":1, "a":"blah"}, "serial": 1}]')
class TestComm(unittest.TestCase):
def setUp(self):
self.config = {"offer_expiry_interval": 30, "ep_expiry_interval": 30}
self.hcomm = HTTPComm(self.config)
self.msg = {"msgid": 2, "a": "b"}
self.httpd = TestServer("localhost", 8080)
self.httpd.start()
self.tcomm = ThreadedComm(self.hcomm)
self.tcomm.add_agent(MockAgent())
def tearDown(self):
self.httpd.shutdown()
def test_post_message(self):
self.assertTrue(self.hcomm.post_message(self.msg))
def test_poll_and_dispatch(self):
self.hcomm.poll_and_dispatch()
self.assertEqual(self.hcomm.lastpoll, 1)
self.hcomm.poll_and_dispatch()
self.assertEqual(self.hcomm.lastpoll, 1)
def test_threadcomm(self):
self.tcomm.start()
time.sleep(2)
self.hcomm.post_message(self.msg)
self.tcomm.post_message(self.msg)
self.tcomm.poll_and_dispatch()
time.sleep(2)
self.tcomm.stop()
if __name__ == '__main__':
unittest.main()
|
[
"jaejoon@gmail.com"
] |
jaejoon@gmail.com
|
b50412b4ddb40ffff3c5e8ec5fa4a2555b606caa
|
66223c313c447d048412ea582ad93dc2db34d64b
|
/rnn-lstm-model/train_pred.py
|
7083da5a77b5e7eb5ed4b3695120db536aae7720
|
[] |
no_license
|
vishal-k9/Stance_Detection_Task
|
c571b3b4a3d4597449fb73a1cc610b4bb8a34979
|
fb8fe76544bc1f4ad6417cc781fcc3953fbac6d8
|
refs/heads/master
| 2021-08-24T01:51:59.602088
| 2017-12-07T14:41:32
| 2017-12-07T14:41:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,478
|
py
|
#!/usr/bin/python
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.rnn import GRUCell
from tensorflow.python.ops.rnn import dynamic_rnn as rnn
from tensorflow.python.ops.rnn import bidirectional_dynamic_rnn as bi_rnn
from keras.datasets import imdb
from attention import attention
from utils import *
from tabulate import tabulate
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
from gensim.models.word2vec import Word2Vec
from collections import Counter, defaultdict
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
from sklearn.cross_validation import cross_val_score
from sklearn.cross_validation import StratifiedShuffleSplit
from itertools import izip
from nltk.tokenize import TweetTokenizer
from sklearn.cluster import KMeans
train_data= "../train dataset/Tweet.csv"
train_topic= "../train dataset/Target.csv"
train_label= "../train dataset/Stance.csv"
test_data= "../test dataset/Tweet.csv"
test_topic= "../test dataset/Target.csv"
test_label= "../test dataset/Stance.csv"
target_dict={}
stance_dict={}
inv_target_dict={}
inv_stance_dict={}
x=set()
with open("../train dataset/Target.csv", "rb") as f:
for row in f:
x.add(row.strip())
x=list(x)
i=0
for tar in x:
target_dict[tar]=i
inv_target_dict[i]=tar
i+=1
x=set()
with open("../train dataset/Stance.csv", "rb") as f:
for row in f:
x.add(row.strip())
x=list(x)
i=0
for tar in x:
stance_dict[tar]=i
inv_stance_dict[i]=tar
i+=1
# print target_dict,stance_dict
tknzr=TweetTokenizer()
x_train, y_train = [[] for i in range(5)], [[] for i in range(5)]
X_train, Y_train = [[] for i in range(5)], [[] for i in range(5)]
with open("../train dataset/Tweet.csv", "rb") as f1, open("../train dataset/Target.csv", "rb") as f2, open("../train dataset/Stance.csv", "rb") as f3:
for l1,l2,l3 in izip(f1,f2,f3):
tweet=tknzr.tokenize(l1.strip())
x_train[target_dict[l2.strip()]].append(tweet)
y_train[target_dict[l2.strip()]].append(l3.strip())
x_dev, y_dev = [[] for i in range(5)], [[] for i in range(5)]
X_dev, Y_dev = [[] for i in range(5)], [[] for i in range(5)]
with open("../dev dataset/Tweet.csv", "rb") as f1, open("../dev dataset/Target.csv", "rb") as f2, open("../dev dataset/Stance.csv", "rb") as f3:
for l1,l2,l3 in izip(f1,f2,f3):
tweet=tknzr.tokenize(l1.strip())
x_dev[target_dict[l2.strip()]].append(tweet)
y_dev[target_dict[l2.strip()]].append(l3.strip())
x_test, y_test = [[] for i in range(5)], [[] for i in range(5)]
X_test, Y_test = [[] for i in range(5)], [[] for i in range(5)]
with open("../test dataset/Tweet.csv", "rb") as f1, open("../test dataset/Target.csv", "rb") as f2, open("../test dataset/Stance.csv", "rb") as f3:
for l1,l2,l3 in izip(f1,f2,f3):
tweet=tknzr.tokenize(l1.strip())
x_test[target_dict[l2.strip()]].append(tweet)
y_test[target_dict[l2.strip()]].append(l3.strip())
all_words=[set(w for sen in x_train[i] for w in sen) for i in range(5)]
word_idx=[{} for i in range(5)]
for i in xrange(5):
j=0;
for word in all_words[i]:
word_idx[i][word]=j
j+=1
NUM_WORDS = 10000
INDEX_FROM = 3
SEQUENCE_LENGTH = 250
EMBEDDING_DIM = 100
HIDDEN_SIZE = 150
ATTENTION_SIZE = 50
KEEP_PROB = 0.8
BATCH_SIZE = 20
NUM_EPOCHS = 10000
DELTA = 0.5
learning_rate=0.05
vocabulary_size=[None for _ in range(5)]
f=open("Prediction.csv","wb")
from random import shuffle
def classifier(X):
pred=np.array([[x] for x in X])
kmeans = KMeans(n_clusters=3, random_state=0).fit(pred)
centres=np.sort(kmeans.cluster_centers_)
res=[]
for elem in X:
val=0
dist=float("inf")
for i in xrange(3):
if(abs(elem-centres[i])<dist):
dist=abs(elem-centres[i])
val=i
res.append(val)
return np.array(res)
for i in xrange(5):
x_train[i]=convert_into_idx(x_train[i], word_idx[i])
vocabulary_size[i] = get_vocabulary_size(x_train[i])
x_test[i] = fit_in_vocabulary(x_test[i],vocabulary_size[i], word_idx[i])
x_dev[i] = fit_in_vocabulary(x_dev[i],vocabulary_size[i], word_idx[i])
X_train[i] = zero_pad(x_train[i], SEQUENCE_LENGTH)
X_dev[i] = zero_pad(x_dev[i], SEQUENCE_LENGTH)
X_test[i] = zero_pad(x_test[i], SEQUENCE_LENGTH)
Y_train[i] = encoding(y_train[i],stance_dict)
Y_dev[i] = encoding(y_dev[i],stance_dict)
Y_test[i] = encoding(y_test[i],stance_dict)
batch_ph = tf.placeholder(tf.int32, [None, SEQUENCE_LENGTH])
target_ph = tf.placeholder(tf.float32, [None])
seq_len_ph = tf.placeholder(tf.int32, [None])
keep_prob_ph = tf.placeholder(tf.float32)
# Embedding layer
embeddings_var = tf.Variable(tf.random_uniform([vocabulary_size[i], EMBEDDING_DIM], -1.0, 1.0), trainable=True)
batch_embedded = tf.nn.embedding_lookup(embeddings_var, batch_ph)
# (Bi-)RNN layer(-s)
with tf.variable_scope(str(i)):
rnn_outputs, _ = bi_rnn(GRUCell(HIDDEN_SIZE), GRUCell(HIDDEN_SIZE),
inputs=batch_embedded, sequence_length=seq_len_ph, dtype=tf.float32)
# Attention layer
attention_output, alphas = attention(rnn_outputs, ATTENTION_SIZE, return_alphas=True)
drop = tf.nn.dropout(attention_output, keep_prob_ph)
W = tf.Variable(tf.truncated_normal([drop.get_shape()[1].value, 1], stddev=0.1))
b = tf.Variable(tf.constant(0., shape=[1]))
y_hat = tf.nn.xw_plus_b(drop, W, b)
y_hat = tf.squeeze(y_hat)
# Cross-entropy loss and optimizer initialization
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=y_hat, labels=target_ph))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
# Accuracy metric
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.round(tf.sigmoid(y_hat)), target_ph), tf.float32))
# Actual lengths of sequences
seq_len_dev = np.array([list(x).index(0) + 1 for x in X_dev[i]])
seq_len_test = np.array([list(x).index(0) + 1 for x in X_test[i]])
seq_len_train = np.array([list(x).index(0) + 1 for x in X_train[i]])
# Batch generators
train_batch_generator = batch_generator(X_train[i], Y_train[i], BATCH_SIZE)
test_batch_generator = batch_generator(X_test[i], Y_test[i], BATCH_SIZE)
dev_batch_generator = batch_generator(X_dev[i], Y_dev[i], BATCH_SIZE)
saver = tf.train.Saver()
if __name__ == "__main__":
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print("Start learning...")
for epoch in range(NUM_EPOCHS):
loss_train = 0
loss_test = 0
accuracy_train = 0
accuracy_test = 0
print("epoch: {}\t".format(epoch), end="")
# Training
num_batches = X_train[i].shape[0] / BATCH_SIZE
for b in range(num_batches):
x_batch, y_batch = train_batch_generator.next()
seq_len = np.array([list(x).index(0) + 1 for x in x_batch]) # actual lengths of sequences
temp=x_batch
loss_tr, acc, _ = sess.run([loss, accuracy, optimizer], feed_dict={batch_ph: x_batch, target_ph: y_batch, seq_len_ph: seq_len, keep_prob_ph: KEEP_PROB})
accuracy_train += acc
loss_train = loss_tr * DELTA + loss_train * (1 - DELTA)
accuracy_train /= num_batches
# Testing
num_batches = X_dev[i].shape[0] / BATCH_SIZE
for b in range(num_batches):
x_batch, y_batch = dev_batch_generator.next()
temp=x_batch
seq_len = np.array([list(x).index(0) + 1 for x in x_batch]) # actual lengths of sequences
y_hatv, loss_test_batch, acc = sess.run([y_hat, loss, accuracy], feed_dict={batch_ph: x_batch, target_ph: y_batch, seq_len_ph: seq_len, keep_prob_ph: 1.0})
accuracy_test += acc
loss_test += loss_test_batch
accuracy_test /= num_batches
loss_test /= num_batches
print("loss: {:.3f}, val_loss: {:.3f}, acc: {:.3f}, val_acc: {:.3f}".format(
loss_train, loss_test, accuracy_train, accuracy_test
))
c = list(zip(X_test[i],Y_test[i]))
shuffle(c)
X_test[i],Y_test[i]=zip(*c)
for j in xrange(0, len(X_test[i]), 10):
x_batch_test, y_batch_test = X_test[i][j:j+10], Y_test[i][j:j+10]
seq_len_test = np.array([list(x).index(0) + 1 for x in x_batch_test])
alphas_test, y_hatv = sess.run([alphas,y_hat], feed_dict={batch_ph: x_batch_test, target_ph: y_batch_test,seq_len_ph: seq_len_test, keep_prob_ph: 1.0})
pred=classifier(y_hatv)
for p,r in izip(pred,y_batch_test):
f.write(inv_stance_dict[p]+"\t"+inv_stance_dict[r]+"\n")
saver.save(sess, "model_"+ str(i))
f.close()
|
[
"vishal.ku86@gmail.com"
] |
vishal.ku86@gmail.com
|
3ba85f8874dcec36bc766b4fc1d150cb2916954c
|
56210435afa0c80f8d3d7a56de2c430b49826b73
|
/classify_fb.py
|
b4efc07e11d3d305c81209671f9be62ddd46c36f
|
[] |
no_license
|
iamKunal/gucci_gang
|
f6417b9fda989b76b41556fe2d0f7126a15e4e01
|
2ad8b7771c84cf93c14407aaa19a63ea482da66a
|
refs/heads/master
| 2021-03-19T17:42:10.292894
| 2018-03-22T20:04:11
| 2018-03-22T20:04:11
| 117,331,223
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,649
|
py
|
from facepy import GraphAPI
import time
from datetime import datetime
from calendar import timegm
import requests
import json
from google.cloud import language
from google.cloud.language import enums
from google.cloud.language import types
import time
import sys
posts = []
images = []
videos = []
fb = {}
id_list = []
TMP_TOKEN = 'EAACEdEose0cBACAOvrU9C0hZALmVacUDgvWQW94kzw2ZBv9wvGcWqFphtulwLvxtSLaMjjtYEo0ilO9DZBvJr0cjKAImfN9FCkegxLtHyfvRygydbEQkf1f6VaVE1Ag6ZBm4TlzCvwwBZCxCdqGU8q9BfpljGh2WCuZCb9MZAVFs3WZAT3ApYGiDZCRZBCyaOqqXkP8aH7PTM6IgZDZD'
PER_TOKEN = 'EAACEdEose0cBACvYlc3kFWuOMotBFZBlCKkCmFoPt2BxT2GSzKAp5pqd7ImD7pku75rBft4ZCD9qDmDjhoiA7MoTrbkYpdNhAiZBssOQUy2A3g8vLZAyaOhn4eoSZBTaF4tMPe6uc1UvFhbVMZAPKMjNnN2SEbtxEYJoUjQ0dBXGsOTPyRIx1MD0ynZC0x2gJVMzUIWdRafvx2ugraX6gzR'
from google.cloud import language
from google.cloud.language import enums
from google.cloud.language import types
from HTMLParser import HTMLParser
from boilerpipe.extract import Extractor
def text_extractor(URL):
extractor = Extractor(extractor='ArticleExtractor', url=URL).getText()
return extractor
def classify(text):
try:
client = language.LanguageServiceClient()
document = types.Document(
content=text,
type=enums.Document.Type.PLAIN_TEXT)
response = client.classify_text(
document=document
)
return response.categories
except:
return [None]
class GGFB():
url = None
seconds = None
no_of_posts = None
time_gap = None
graph = GraphAPI(oauth_token=TMP_TOKEN, version='2.10')
posts = []
images = []
videos = []
fb = {}
def fun_feeds(self):
json = self.graph.get(self.url + '/feed?limit=' + str(self.no_of_posts) + '&since=' + str(
self.time_gap) + "&fields=created_time,reactions.limit(0).summary(total_count),comments.limit(0).summary(total_count),message,link")[
"data"]
for feed in json:
post = {}
post['reactions'] = int(feed["reactions"]["summary"]["total_count"])
post['comments'] = int(feed["comments"]["summary"]["total_count"])
post['timestamp'] = timegm(time.strptime(feed['created_time'], '%Y-%m-%dT%H:%M:%S+0000'))
post['caption'] = feed.get('message')
post['id'] = feed['id']
post['channel'] = "fb"
post["attachment"] = feed.get("link")
post['weight'] = 'None'
post['type'] = "post"
post['url'] = 'www.facebook.com' + str(feed['id'])
post[
'embed'] = 'https://www.facebook.com/plugins/post.php?href=https%3A%2F%2Fwww.facebook.com%2F' + self.url + '%2Fposts%2F' + str(
feed['id'].split('_')[1])
self.posts.append(post.copy())
def fun_images(self):
json = self.graph.get(self.url + '/photos?limit=' + str(self.no_of_posts) + '&since=' + str(
self.time_gap) + "&fields=created_time,reactions.limit(0).summary(total_count),comments.limit(0).summary(total_count),message,link")[
"data"]
for photo in json:
image = {}
image['reactions'] = int(photo['reactions']['summary']['total_count'])
image['comments'] = int(photo['comments']['summary']['total_count'])
image['timestamp'] = timegm(time.strptime(photo['created_time'], '%Y-%m-%dT%H:%M:%S+0000'))
image['id'] = photo['id']
image['channel'] = "fb"
image["attachment"] = photo.get('link')
image['weight'] = 'None'
image['type'] = 'photo'
image['url'] = 'www.facebook.com' + str(photo['id'])
self.images.append(image)
def fun_videos(self):
json = self.graph.get(self.url + '/videos?limit=' + str(self.no_of_posts) + '&since=' + str(
self.time_gap) + "&fields=created_time,reactions.limit(0).summary(total_count),comments.limit(0).summary(total_count),description,link")[
"data"]
for vid in json:
video = {}
video['reactions'] = int(vid['reactions']['summary']['total_count'])
video['comments'] = int(vid['comments']['summary']['total_count'])
video['timestamp'] = timegm(time.strptime(vid['created_time'], '%Y-%m-%dT%H:%M:%S+0000'))
video['caption'] = vid.get('description') ##very less captions
video['id'] = vid['id']
video["attachment"] = vid.get('link')
video['channel'] = "fb"
video['weight'] = 'None'
video['type'] = 'video'
video['url'] = 'www.facebook.com' + str(vid['id'])
video[
'embed'] = 'https://www.facebook.com/plugins/video.php?href=https%3A%2F%2Fwww.facebook.com%2F' + self.url + '%2Fvideos%2F' + str(
vid['id']) + '%2F'
self.videos.append(video)
def __init__(self, url, seconds, no_of_posts):
self.posts = []
self.images = []
self.videos = []
self.fb = {}
self.url = url
self.seconds = seconds
self.no_of_posts = no_of_posts
self.time_gap = timegm(datetime.utcnow().utctimetuple()) - self.seconds
def fun_all(self):
self.fun_feeds()
self.fun_images()
self.fun_videos()
final_data = {'post': self.posts,
'photo': self.images,
'video': self.videos}
return final_data
def classify(self, text):
try:
client = language.LanguageServiceClient()
document = types.Document(
content=text,
type=enums.Document.Type.PLAIN_TEXT)
response = client.classify_text(
document=document
)
return response.categories[0].name
except:
return None
def f(x):
categories = classify(text_extractor(x["attachment"]))
print x["attachment"], categories
if __name__ == '__main__':
# start = 0
# while True:
# fb_class = GGFB('WittyFeed', 60 * 60 * 24 * 30, 25)
# final_data = fb_class.fun_all() ##this is to be finally returned
# for posts_class in final_data:
# # for post in final_data[posts_class]:
# # if bool(bool(post["id"] not in id_list) and bool(
# # post["attachment"] and len(post["attachment"].strip()) > 0)):
text = text_extractor(sys.argv[1])
# print text
categories = classify(text)
# print sys.argv[1], categories
for category in categories:
if (category):
print category.name.split('/')[-1]
|
[
"kunal.gupta@myself.com"
] |
kunal.gupta@myself.com
|
025f7c0a625c99f0fcba414638161fe8afd276dd
|
86e85c922fa1db10ec40ad816f35211335639cfe
|
/absolute/script/__main__.py
|
9e792fa28b039cbb6ebc541fda73c0d2d3656269
|
[] |
no_license
|
niconico25/import
|
c873c7da812d277b90d582ecd59100c4408614fe
|
2ddd6881c346161aa72fbfbca3d0da6503b0d246
|
refs/heads/master
| 2020-04-13T06:57:22.231143
| 2018-12-26T09:20:38
| 2018-12-26T09:20:38
| 163,035,867
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 169
|
py
|
# ~/import/absolute/script/__main__.py
import sys
print('---')
print(sys.path[0])
print(__file__)
print(__package__)
print(__name__)
import module_a
import sub_package
|
[
"shotaroh19850701@gmail.com"
] |
shotaroh19850701@gmail.com
|
c8cc54dc0e053b341498757a0bff85b8f3776b48
|
570c5e469188970b1192c03636ac01cc0d2c0a6a
|
/1327/A.py
|
435e6f1233ce996748015e82beca8cae7a0bf2a8
|
[] |
no_license
|
koustav-dhar/Competitive-Programming
|
f08d79d7eeaaa07b7fa1845e1b127e0eba53e37c
|
e375281861e5a7e1a0faa01296aa5183964ac014
|
refs/heads/master
| 2022-07-05T11:05:01.951609
| 2020-05-13T21:29:35
| 2020-05-13T21:29:35
| 263,733,180
| 1
| 0
| null | 2020-05-13T21:24:53
| 2020-05-13T20:12:50
|
C++
|
UTF-8
|
Python
| false
| false
| 486
|
py
|
# your code goes here
import math
for _ in range(int(input())):
n,m=map(int,input().split())
if n%2==0:
if m%2==0:
if m<=((int)(math.sqrt(n))):
print("YES")
else:
print("NO")
else:
print("NO")
else:
if m%2!=0:
if m<=((int)(math.sqrt(n))):
print("YES")
else:
print("NO")
else:
print("NO")
|
[
"noreply@github.com"
] |
koustav-dhar.noreply@github.com
|
217db26224589a242aa1990575abacb18a884ee4
|
4a3fa55358b9d70ffad6f2f7d152a2a6258adf41
|
/kutil.py
|
51fb8a6aa13b9ce7c45bed42ac61ab100b27f7e2
|
[] |
no_license
|
SooRyu7/git_test
|
6f9c8cfe55dae6f52f8a6fac194940e2d7676809
|
3ba055ee0139eea8e3ccd4258710522b2008a086
|
refs/heads/master
| 2023-04-12T15:38:55.284581
| 2021-05-21T06:52:23
| 2021-05-21T06:52:23
| 369,440,407
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 884
|
py
|
import random
def nrandom(start, end, n , duplicated = False):
'''
start와 end 사이의 정수 난수를 n개 생성해 반환
인자
start:시작 정수
end:마지막 정수
n: 난수 개수
duplicated: 중복 허용 여부 , 기본은 중요하지 않음 true면 중복허용
'''
lst = [] #반환할 난수 리스트
if duplicated:
for _ in range(n):
lst.append(random.randint(start,end))#1~45까지.
else:
lst = list(random.sample(range(start, end+1),n))#안에 리스트나 시퀀스 중에 n개를 중복되지않게 빼낸다. range로 시퀀스를 만듬. end까지니까 range는 end-1 범위 이므로 end+1
#모두 정렬해 반환
return sorted(lst)
if __name__ == '__main__':
print('로또 복권: ',nrandom(1,45,6))
print('주사위 3번: ',nrandom(1,6,3,True))#주사위 중복허용
|
[
"80309470+SooRyu7@users.noreply.github.com"
] |
80309470+SooRyu7@users.noreply.github.com
|
d6d90fa95dbb4368d84692d2aefe55f2d253c92d
|
9c5a76d423a79a9b926199099dfe2b676364ee7e
|
/get_features_for_sheet.py
|
94ef230bb7677a45bedae964394597b3f5486286
|
[] |
no_license
|
Gin93/excel_data_extraction_max_entropy
|
a9cff992ea552c02c161f4bed0f25f854cccd83f
|
2dd68e7c5127d0644c2eed8353dacc4bb11a8f89
|
refs/heads/master
| 2021-09-01T18:40:57.769238
| 2017-12-20T03:30:11
| 2017-12-20T03:30:11
| 114,588,698
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,298
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 13 16:43:04 2017
@author: cisdi
"""
import xlrd
import string
import pandas as pd
import csv
import os
import random
class Sheet:
def __init__(self, file_path):
try:
self.sheet = xlrd.open_workbook(file_path, formatting_info=True).sheets()[0]
except:
self.sheet = xlrd.open_workbook(file_path).sheets()[0]
self.rows = self.sheet.nrows
self.cols = self.sheet.ncols
self.merged = self.sheet.merged_cells
self.features = []
# self.neighbors = ['up','down','left','right','up2','down2','left2','right2','upl','upr','downl','downr']
self.neighbors = ['up','down','left','right','up2','down2','left2','right2']
def filling(self):
'''
将合并过的单元格全部填充
可以假填充:
if merged:
data = cell_value(x1,y1)
'''
def selflens(self,data):
# l = len(str(data))
# if l <=3:
# return 'datashort'
# elif l <= 6:
# return 'datanormallen'
# elif l <= 9:
# return 'datalong'
# elif l <= 15:
# return 'dataverylong'
# else:
# return 'dataextremelylong'
count_en = count_dg = count_sp = count_zh = count_pu = 0
for c in str(data):
if c in string.ascii_letters:
count_en += 1
elif c.isdigit():
count_dg += 1
elif c.isspace():
count_sp += 1
elif c.isalpha():
count_zh += 1
else:
count_pu += 1
l = count_en + count_zh + count_pu
if l < 1:
return 'dataveryshort'
elif l == 1:
return 'datashort'
elif l <= 6:
return 'datanormallen'
elif l <= 12:
return 'datalong'
elif l <= 20:
return 'dataverylong'
else:
return 'dataextremelylong'
def valid(self,x,y,rows,cols): # 可以优化
if x < 0 or y < 0 or x > rows-1 or y > cols-1:
return False
return True
def ismerged(self,x,y):
'''
输出格子的大小或者是类型
以及该单元格的位置
'''
merged_cells = self.merged
for a,b,c,d in merged_cells:
if x >= a and x < b and y >= c and y < d:
r_s = b-a
c_s = d-c
space = r_s * c_s
# if space <= 4:
# m_type = 'small'
# elif c_s - r_s >= 5:
# m_type = 'verylong'
# else:
# m_type = 'huge'
if r_s == 1 and (c_s == 2 or c_s == 3):
m_type = 'shortlong'
elif c_s == 1 and (r_s == 2 or r_s == 3):
m_type = 'thin'
elif c_s - r_s >= 5:
m_type = 'verylong'
elif space <= 12:
m_type = 'midsize'
else:
m_type = 'huge'
###判断位置
if c_s == 1: #宽度为一的竖条状,特征分为highup & highdown
if x == a:
m_pos = 'highup'
elif x == b - 1:
m_pos = 'highdown'
else:
m_pos = 'highmid'
elif r_s == 1:#长度为一的长条状,特征分为longleft & longright
if y == c:
m_pos = 'longleft'
elif y == d - 1:
m_pos = 'longright'
else:
m_pos = 'longmid'
else:
if(x,y) == (a,c):
m_pos = 'topleft'
elif(x,y) == (b-1,c):
m_pos = 'botleft'
elif(x,y) == (a,d-1):
m_pos = 'topright'
elif(x,y) == (b-1,d-1):
m_pos = 'botright'
else:
m_pos = 'middle'
return (True,self.sheet.cell_value(a,c),m_type,m_pos)
return (False,'','','')
def cell_type(self,data):
'''
input:str, current cell data
output: [str,int] the data type of this cell : 正整数,负数,小数,纯字符,字符跟数字混合,
含有特殊符号,单位字典,空值,合并导致的空值. 以及长度(不用于训练,用于提取特征)
integer/dec/neg/string/mixed/specical/null/
int
表外空值 这个特征在后面的函数中计算
merged 提前判断
'''
def mixed(a):
for i in a:
try:
float(i)
return True
except :
pass
return False
def puncuation(a):
for i in a:
if (str(i) in string.punctuation):
return True
return False
if data == '' or data == '\n':
return 'null'
if isinstance(data,int):
return 'integer'
elif isinstance(data,float):
return 'decimal'
try:
if float(data) < 0:
return 'neg' #至此不可能有负数
if data.count('.') == 1:
return 'decimal'
else:
return 'integer'#至此不再有数字
except:#string/mixed/special
if puncuation(data):
return 'special'
elif mixed(data):
return 'mixed'
else:
return 'string'
def lens(self,x,y):
'''
input:str,str
output:str
'''
x = str(x)
y = str(y)
if len(x) == len(y):
return 'same'
elif 0 < len(x) - len(y) < 3 :
return 'more'
elif -3 < len(x) - len(y) < 0 :
return 'less'
elif len(x) - len(y) >= 3:
return 'muchmore'
elif len(x) - len(y) <= -3:
return 'muchless'
else:
print(x)
print(y)
print(len(x) - len(y))
print('asdasd')
def neighbor(self,location,row,col,rows,cols):
####确定要检索的位置
'''
f1: if valid
f2: if merged
f3: cell data type
'''
if location == 'up':
r = row - 2
c = col - 1
elif location == 'down':
r = row
c = col - 1
elif location == 'left':
r = row - 1
c = col - 2
elif location == 'right':
r = row - 1
c = col
elif location == 'up2':
r = row - 3
c = col - 1
elif location == 'down2':
r = row + 1
c = col - 1
elif location == 'left2':
r = row - 1
c = col - 3
elif location == 'right2':
r = row - 1
c = col + 1
elif location == 'upl':
r = row - 2
c = col - 2
elif location == 'upr':
r = row - 2
c = col
elif location == 'downl':
r = row
c = col - 2
elif location == 'downr':
r = row
c = col
if self.valid(r,c,rows,cols):
data = self.sheet.cell_value(r,c) # 当前检索到的邻居的数据
f1 = 'valid'
center_data = self.sheet.cell_value(row-1,col-1) #中心的数据
boolean, merged_data,merged_type,merged_pos = self.ismerged(r,c)
if boolean: # 单元格是合并过的
f2 = 'merged'
f3 = self.cell_type(merged_data)
f5 = merged_type
f6 = merged_pos
f4 = self.lens(center_data,merged_data) # 对于合并过的单元格,是比较有数据的那个单元格的数据与中心数据
f7 = self.selflens(merged_data)
else: #单元格没有合并过
f2 = 'single'
f3 = self.cell_type(data)
f5 = 'singletype'
f6 = 'singlepos'
f4 = self.lens(center_data,data) #对于非合并的,操作自己与中心
f7 = self.selflens(data)
else:
f1 = f2 = f3 = f4 = f5 = f6 = f7 = 'invalid'
ls = location
return [ls + f1, ls + f2, ls + f3 ,ls + f5, ls + f6 ,ls + f7] #删除了f4
'''
def get_lens_features(self,f1,f2,location):
# input: feature1 feature2
# f1: current_cell
# f2: neighbor_cell
# output: the dif of lens
if f1 in f2:
return [location + self.lens(f1,f2)]
else:
return [location + 'diftype']
'''
def get_features (self, current_pos):
'''
给定想要提取特征的位置,以及他的邻居们,得到输出
current_pos: (row,col)
neighbors: locations ['up','down','left','right'.....]
'''
r , c = current_pos
r = r+1
c = c+1
f = []
#f1 = self.cell_type(self.sheet.cell_value(r-1,c-1)) #### 可以整的好看点
boolean, merged_data,merged_type,merged_pos = self.ismerged(r-1,c-1)
if boolean:
f1 = self.cell_type(merged_data)
f2 = 'merged'
f3 = merged_type
f4 = merged_pos
f5 = self.selflens(merged_data)
else:
f1 = self.cell_type(self.sheet.cell_value(r-1,c-1))
f2 = f3 = f4 = 'single'
f5 = self.selflens(self.sheet.cell_value(r-1,c-1))
f = f + [f1,f2,f3,f4,f5]
neighbors = self.neighbors
for i in neighbors:
fff = self.neighbor(i,r,c,self.rows,self.cols)
f = f + fff
return f
def get_features_map (self): # 可以优化一波nparray
'''
提取整个sheet的所有位置的所有特征
暂时不考虑空值的特征
'''
d = {}
for i in range(self.rows):
for j in range(self.cols):
d[(i,j)] = self.get_features( (i,j) )
return d
'''
a = Sheet(p)
x = a.get_features_map()
'''
|
[
"noreply@github.com"
] |
Gin93.noreply@github.com
|
20e666c46149cfd35b42bf3ec8f0e913d5cc848c
|
e10cec4cc5d45a69c4f2eaa3d85565553458bcbe
|
/emptyeffi/tracker_resolution_plots_28.8196160799.py
|
37b5cf3a0b9dd0eee96c8d1600913a348b700cfa
|
[] |
no_license
|
jnugent42/mcsframework
|
7f3ba2c11b96175d014e53f61bea292a5e0e3b03
|
2c75f6e071229ab0585cf8d594fa26d7b5794388
|
refs/heads/master
| 2022-04-28T07:34:32.076223
| 2022-04-07T16:14:17
| 2022-04-07T16:14:17
| 91,683,266
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 76,003
|
py
|
#!/usr/bin/env python
# This file is part of MAUS: http://micewww.pp.rl.ac.uk/projects/maus
#
# MAUS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MAUS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MAUS. If not, see <http://www.gnu.org/licenses/>.
#
"""
This script loads tracker Recon and MC data and compares the two to produce
plots of the reconstruction resolution and residuals.
Script Aglorithm :
- Create Virtual Plane - Tracker Plane lookup
- Load Recon and MC Event
- Find trackpoints in each tracker plane
- Look for the nearest virtual planes in Z
- Create lookup dictionary
- Analyse all events
- Bin Recon histograms
- Bin Residual histograms
- Bin residuals in bins of Pt
- Bin residuals in bins of Pz
- Calculate resolutions from histograms
- Save all plots to a single root file (options PDF output)
"""
# pylint: disable = W0311, E1101, W0613, W0621, C0103, C0111, W0702, W0611
# pylint: disable = R0914, R0912, R0915, W0603, W0612, C0302
# Import MAUS Framework (Required!)
import MAUS
# Generic Python imports
import sys
import os
import argparse
import math
from math import sqrt
import array
# Third Party library import statements
import json
import event_loader
import analysis
from analysis import tools
from analysis import covariances
from analysis import hit_types
import ROOT
# Useful Constants and configuration
RECON_STATION = 1
RECON_PLANE = 0
SEED_STATION = 1
SEED_PLANE = 0
EXPECTED_STRAIGHT_TRACKPOINTS = 9
EXPECTED_HELIX_TRACKPOINTS = 12
REQUIRE_DATA = True
P_VALUE_CUT = 0.0
MUON_PID = [13, -13]
RECON_TRACKERS = [0, 1]
REQUIRE_ALL_PLANES = True
P_MIN = 0.0
P_MAX = 1000.0
MAX_GRADIENT = 2.0
PT_MIN = 0.0
PT_MAX = 100.0
PT_BIN = 10
PT_BIN_WIDTH = 10.0
PZ_MIN = 120.0
PZ_MAX = 260.0
PZ_BIN = 14
PZ_BIN_WIDTH = 10.0
ALIGNMENT_TOLERANCE = 0.01
RESOLUTION_BINS = 10
EFFICIENCY_BINS = 10
TRACK_ALGORITHM = 1
ENSEMBLE_SIZE = 2000
TOF_ul = 29.0196160799
TOF_ll = 28.8196160799
#TOF_ul = 28.5365596446
#TOF_ll = 28.3365596446
meanp = 90
sigmap = 0.012
#UP_COV_MC = covariances.CovarianceMatrix()
#DOWN_COV_MC = covariances.CovarianceMatrix()
#UP_COV_RECON = covariances.CovarianceMatrix()
#DOWN_COV_RECON = covariances.CovarianceMatrix()
UP_COV_MC = []
DOWN_COV_MC = []
UP_COV_RECON = []
DOWN_COV_RECON = []
UP_CORRECTION = covariances.CorrectionMatrix()
DOWN_CORRECTION = covariances.CorrectionMatrix()
VIRTUAL_PLANE_DICT = None
INVERSE_PLANE_DICT = {}
TRACKER_PLANE_RADIUS = 150.0
SELECT_EVENTS = False
GOOD_EVENTS = None
def get_pz_bin(pz) :
offset = pz - PZ_MIN
return int(offset/PZ_BIN_WIDTH)
def init_plots_data() :
"""
Initialised all the plots in a dictionary to pass around to the other
functions.
"""
global UP_COV_MC
global DOWN_COV_MC
global UP_COV_RECON
global DOWN_COV_RECON
global PZ_BIN
global PT_BIN
PZ_BIN = int(((PZ_MAX-PZ_MIN) / PZ_BIN_WIDTH) + 0.5)
PT_BIN = int(((PT_MAX-PT_MIN) / PT_BIN_WIDTH) + 0.5)
UP_COV_MC = [ covariances.CovarianceMatrix() for _ in range(PZ_BIN) ]
DOWN_COV_MC = [ covariances.CovarianceMatrix() for _ in range(PZ_BIN) ]
UP_COV_RECON = [ covariances.CovarianceMatrix() for _ in range(PZ_BIN) ]
DOWN_COV_RECON = [ covariances.CovarianceMatrix() for _ in range(PZ_BIN) ]
plot_dict = {'upstream' : {}, 'downstream' : {}, \
'missing_tracks' : {}, 'pulls' : {}}
for tracker in [ 'upstream', 'downstream' ] :
tracker_dict = {}
tracker_dict['ntp'] = ROOT.TH1F(tracker+'_ntp', \
'No. TrackPoints', 15, 0.5, 15.5 )
tracker_dict['xy'] = ROOT.TH2F( tracker+'_xy', \
'Position', 500, -200.0, 200.0, 500, -200.0, 200.0 )
tracker_dict['pxpy'] = ROOT.TH2F(tracker+'_pxpy', \
'Momentum', 500, -200.0, 200.0, 500, -200.0, 200.0 )
tracker_dict['pt'] = ROOT.TH1F( tracker+'_pt', \
'Transvere Momentum', 500, -0.0, 200.0 )
tracker_dict['pz'] = ROOT.TH1F( tracker+'_pz', \
'Longitudinal Momentum', 500, 100.0, 300.0 )
tracker_dict['L'] = ROOT.TH1F( tracker+'_L', \
'Angular Momentum', 1000, -25000.0, 25000.0 )
tracker_dict['L_canon'] = ROOT.TH1F( tracker+'_L_canon', \
'Canonical Angular Momentum', 1000, -25000.0, 25000.0 )
tracker_dict['L_r'] = ROOT.TH2F( tracker+'_L_r', "L in r", \
6000, -30000.0, 30000.0, 300, 0.0, 200.0 )
tracker_dict['L_canon_r'] = ROOT.TH2F( \
tracker+'_L_canon_r', "L_{canon} in r", \
6000, -30000.0, 30000.0, 300, 0.0, 200.0 )
tracker_dict['mc_xy'] = ROOT.TH2F( tracker+'_mc_xy', \
'MC Position', 500, -200.0, 200.0, 500, -200.0, 200.0 )
tracker_dict['mc_pxpy'] = ROOT.TH2F( tracker+'_mc_pxpy', \
'MC Momentum', 500, -200.0, 200.0, 500, -200.0, 200.0 )
tracker_dict['mc_pt'] = ROOT.TH1F( tracker+'_mc_pt', \
'MC Transvere Momentum', 500, -0.0, 200.0 )
tracker_dict['mc_pz'] = ROOT.TH1F( tracker+'_mc_pz', \
'MC Longitudinal Momentum', 500, 100.0, 300.0 )
tracker_dict['mc_L'] = ROOT.TH1F( tracker+'_mc_L', \
'MC Angular Momentum', 1000, -25000.0, 25000.0 )
tracker_dict['mc_L_canon'] = ROOT.TH1F( tracker+'_mc_L_canon', \
'MC Canonical Angular Momentum', 1000, -25000.0, 25000.0 )
tracker_dict['mc_L_r'] = ROOT.TH2F( tracker+'_mc_L_r', "L_{mc} in r", \
6000, -30000.0, 30000.0, 300, 0.0, 200.0 )
tracker_dict['mc_L_canon_r'] = ROOT.TH2F( \
tracker+'_mc_L_canon_r', "L_{canon} in r", \
6000, -30000.0, 30000.0, 300, 0.0, 200.0 )
tracker_dict['residual_xy'] = ROOT.TH2F( tracker+'_residual_xy', \
'Residual Position', 800, -20.0, 20.0, 800, -20.0, 20.0 )
tracker_dict['residual_mxmy'] = ROOT.TH2F( tracker+'_residual_mxmy', \
'Residual Gradient', 500, -0.5, 0.5, 500, -0.5, 0.5 )
tracker_dict['residual_pxpy'] = ROOT.TH2F( tracker+'_residual_pxpy', \
'Residual Momentum', 500, -50.0, 50.0, 500, -50.0, 50.0 )
tracker_dict['residual_pt'] = ROOT.TH1F( tracker+'_residual_pt', \
"p_{t} Residuals", 500, -50.0, 50.0 )
tracker_dict['residual_pz'] = ROOT.TH1F( tracker+'_residual_pz', \
"p_{z} Residuals", 500, -50.0, 50.0 )
tracker_dict['residual_L'] = ROOT.TH1F( tracker+'_residual_L', \
"L Residuals", 1000, -1000.0, 1000.0 )
tracker_dict['residual_L_canon'] = ROOT.TH1F( tracker+'_residual_L_canon', \
"L Residuals", 1000, -1000.0, 1000.0 )
tracker_dict['ntp_pt'] = ROOT.TH2F( \
tracker+'_ntp_pt', "No. Trackpoints in P_{t}", \
PT_BIN, PT_MIN, PT_MAX, 15, 0.5, 15.5 )
tracker_dict['ntp_mc_pt'] = ROOT.TH2F( \
tracker+'_ntp_mc_pt', "No. MC Trackpoints in P_{t}", \
PT_BIN, PT_MIN, PT_MAX, 15, 0.5, 15.5 )
tracker_dict['ntp_pz'] = ROOT.TH2F( \
tracker+'_ntp_pz', "No. Trackpoints in P_{z}", \
PZ_BIN, PZ_MIN, PZ_MAX, 15, 0.5, 15.5 )
tracker_dict['ntp_mc_pz'] = ROOT.TH2F( \
tracker+'_ntp_mc_pz', "No. MC Trackpoints in P_{z}", \
PZ_BIN, PZ_MIN, PZ_MAX, 15, 0.5, 15.5 )
tracker_dict['trackpoint_efficiency'] = ROOT.TEfficiency( \
tracker+'_trackpoint_efficiency', \
"Track Point Efficiency in P_{z} and P_{#perp}", \
PZ_BIN, PZ_MIN, PZ_MAX, PT_BIN, PT_MIN, PT_MAX )
tracker_dict['trackpoint_efficiency_pt'] = ROOT.TEfficiency( \
tracker+'_trackpoint_efficiency_pt', \
"Track Point Efficiency in P_{#perp}", \
PT_BIN, PT_MIN, PT_MAX )
tracker_dict['trackpoint_efficiency_pz'] = ROOT.TEfficiency( \
tracker+'_trackpoint_efficiency_pz', \
"Track Point Efficiency in P_z", \
PZ_BIN, PZ_MIN, PZ_MAX )
tracker_dict['ntracks_pt'] = ROOT.TH1F( \
tracker+'_ntracks_pt', "No. Tracks in P_{#perp}", \
PT_BIN, PT_MIN, PT_MAX )
tracker_dict['ntracks_mc_pt'] = ROOT.TH1F( \
tracker+'_ntracks_mc_pt', "No. MC Tracks in P_{#perp}", \
PT_BIN, PT_MIN, PT_MAX )
tracker_dict['ntracks_pz'] = ROOT.TH1F( \
tracker+'_ntracks_pz', "No. Tracks in P_{z}", \
PZ_BIN, PZ_MIN, PZ_MAX )
tracker_dict['ntracks_mc_pz'] = ROOT.TH1F( \
tracker+'_ntracks_mc_pz', "No. MC Tracks in P_{z}", \
PZ_BIN, PZ_MIN, PZ_MAX )
tracker_dict['track_efficiency'] = ROOT.TEfficiency( \
tracker+'_track_efficiency', "Track Efficiency in P_z and P_{#perp}", \
PZ_BIN, PZ_MIN, PZ_MAX, PT_BIN, PT_MIN, PT_MAX )
tracker_dict['track_efficiency_pt'] = ROOT.TEfficiency( \
tracker+'_track_efficiency_pt', "Track Efficiency in P_{#perp}", \
PT_BIN, PT_MIN, PT_MAX )
tracker_dict['track_efficiency_pz'] = ROOT.TEfficiency( \
tracker+'_track_efficiency_pz', "Track Efficiency in P_z", \
PZ_BIN, PZ_MIN, PZ_MAX )
tracker_dict['track_efficiency_L_canon'] = ROOT.TEfficiency( \
tracker+'_track_efficiency_L_canon', "Track Efficiency in L_{canon}", \
200, -100.0, 100.0 )
tracker_dict['L_residual_r'] = ROOT.TH2F( \
tracker+'_L_residual_r', "L Residuals in r", \
1000, -250.0, 250.0, 300, 0.0, 150.0 )
tracker_dict['L_canon_residual_r'] = ROOT.TH2F( \
tracker+'_L_canon_residual_r', "L_{canon} Residuals in r", \
1000, -250.0, 250.0, 300, 0.0, 150.0 )
tracker_dict['x_residual_p'] = ROOT.TH2F( \
tracker+'_x_residual_p', "X Residuals in p", \
PZ_BIN, PZ_MIN, PZ_MAX, 500, -20.0, 20.0 )
tracker_dict['y_residual_p'] = ROOT.TH2F( \
tracker+'_y_residual_p', "Y Residuals in p", \
PZ_BIN, PZ_MIN, PZ_MAX, 500, -20.0, 20.0 )
tracker_dict['r_residual_p'] = ROOT.TH2F( \
tracker+'_r_residual_p', "Radius Residuals in p", \
PZ_BIN, PZ_MIN, PZ_MAX, 500, 0.0, 50.0 )
tracker_dict['px_residual_p'] = ROOT.TH2F( \
tracker+'_px_residual_p', "p_{x} Residuals in p", \
PZ_BIN, PZ_MIN, PZ_MAX, 500, -50.0, 50.0 )
tracker_dict['py_residual_p'] = ROOT.TH2F( \
tracker+'_py_residual_p', "p_{y} Residuals in p", \
PZ_BIN, PZ_MIN, PZ_MAX, 500, -50.0, 50.0 )
tracker_dict['pt_residual_p'] = ROOT.TH2F( \
tracker+'_p_residual_p', "p_{t} Residuals in p", \
PZ_BIN, PZ_MIN, PZ_MAX, 500, -50.0, 50.0 )
tracker_dict['pz_residual_p'] = ROOT.TH2F( \
tracker+'_pz_residual_p', "p_{z} Residuals in p", \
PZ_BIN, PZ_MIN, PZ_MAX, 500, -50.0, 50.0 )
tracker_dict['p_residual_p'] = ROOT.TH2F( \
tracker+'_p_residual_p', "p Residuals in p", \
PZ_BIN, PZ_MIN, PZ_MAX, 500, -50.0, 50.0 )
tracker_dict['x_residual_pt'] = ROOT.TH2F( \
tracker+'_x_residual_pt', "X Residuals in p_{t}", \
PT_BIN, PT_MIN, PT_MAX, 500, -20.0, 20.0 )
tracker_dict['y_residual_pt'] = ROOT.TH2F( \
tracker+'_y_residual_pt', "Y Residuals in p_{t}", \
PT_BIN, PT_MIN, PT_MAX, 500, -20.0, 20.0 )
tracker_dict['r_residual_pt'] = ROOT.TH2F( \
tracker+'_r_residual_pt', "Radius Residuals in p_{t}", \
PT_BIN, PT_MIN, PT_MAX, 500, 0.0, 50.0 )
tracker_dict['px_residual_pt'] = ROOT.TH2F( \
tracker+'_px_residual_pt', "p_{x} Residuals in p_{t}", \
PT_BIN, PT_MIN, PT_MAX, 500, -50.0, 50.0 )
tracker_dict['py_residual_pt'] = ROOT.TH2F( \
tracker+'_py_residual_pt', "p_{y} Residuals in p_{t}", \
PT_BIN, PT_MIN, PT_MAX, 500, -50.0, 50.0 )
tracker_dict['pt_residual_pt'] = ROOT.TH2F( \
tracker+'_pt_residual_pt', "p_{t} Residuals in p_{t}", \
PT_BIN, PT_MIN, PT_MAX, 500, -50.0, 50.0 )
tracker_dict['pz_residual_pt'] = ROOT.TH2F( \
tracker+'_pz_residual_pt', "p_{z} Residuals in p_{t}", \
PT_BIN, PT_MIN, PT_MAX, 500, -50.0, 50.0 )
tracker_dict['p_residual_pt'] = ROOT.TH2F( \
tracker+'_p_residual_pt', "p Residuals in p_{t}", \
PT_BIN, PT_MIN, PT_MAX, 500, -50.0, 50.0 )
tracker_dict['x_residual_pz'] = ROOT.TH2F( \
tracker+'_x_residual_pz', "X Residuals in p_{z}", \
PZ_BIN, PZ_MIN, PZ_MAX, 500, -20.0, 20.0 )
tracker_dict['y_residual_pz'] = ROOT.TH2F( \
tracker+'_y_residual_pz', "Y Residuals in p_{z}", \
PZ_BIN, PZ_MIN, PZ_MAX, 500, -20.0, 20.0 )
tracker_dict['r_residual_pz'] = ROOT.TH2F( \
tracker+'_r_residual_pz', "Radius Residuals in p_{z}", \
PZ_BIN, PZ_MIN, PZ_MAX, 500, 0.0, 50.0 )
tracker_dict['mx_residual_pz'] = ROOT.TH2F( \
tracker+'_mx_residual_pz', "m_{x} Residuals in p_{z}", \
PZ_BIN, PZ_MIN, PZ_MAX, 500, -0.5, 0.5 )
tracker_dict['my_residual_pz'] = ROOT.TH2F( \
tracker+'_my_residual_pz', "m_{y} Residuals in p_{z}", \
PZ_BIN, PZ_MIN, PZ_MAX, 500, -0.5, 0.5 )
tracker_dict['px_residual_pz'] = ROOT.TH2F( \
tracker+'_px_residual_pz', "p_{x} Residuals in p_{z}", \
PZ_BIN, PZ_MIN, PZ_MAX, 500, -50.0, 50.0 )
tracker_dict['py_residual_pz'] = ROOT.TH2F( \
tracker+'_py_residual_pz', "p_{y} Residuals in p_{z}", \
PZ_BIN, PZ_MIN, PZ_MAX, 500, -50.0, 50.0 )
tracker_dict['pt_residual_pz'] = ROOT.TH2F( \
tracker+'_pt_residual_pz', "p_{t} Residuals in p_{z}", \
PZ_BIN, PZ_MIN, PZ_MAX, 500, -50.0, 50.0 )
tracker_dict['pz_residual_pz'] = ROOT.TH2F( \
tracker+'_pz_residual_pz', "p_{z} Residuals in p_{z}", \
PZ_BIN, PZ_MIN, PZ_MAX, 500, -50.0, 50.0 )
tracker_dict['p_residual_pz'] = ROOT.TH2F( \
tracker+'_p_residual_pz', "p Residuals in pz", \
PZ_BIN, PZ_MIN, PZ_MAX, 500, -50.0, 50.0 )
tracker_dict['mc_alpha'] = ROOT.TH2F( tracker+'_mc_alpha', \
"MC Alpha Reconstruction Pz", PZ_BIN, PZ_MIN, PZ_MAX, \
200, -2.0, 2.0 )
tracker_dict['mc_beta'] = ROOT.TH2F( tracker+'_mc_beta', \
"MC Beta Reconstruction Pz", PZ_BIN, PZ_MIN, PZ_MAX, \
1000, 0.0, 2500.0 )
tracker_dict['mc_emittance'] = ROOT.TH2F( tracker+'_mc_emittance', \
"MC Emittance Reconstruction Pz", PZ_BIN, PZ_MIN, PZ_MAX, \
500, 0.0, 20.0 )
tracker_dict['mc_momentum'] = ROOT.TH2F( \
tracker+'_mc_momentum', "MC Momentum Pz", \
PZ_BIN, PZ_MIN, PZ_MAX, 200, -10.0, 10.0 )
tracker_dict['recon_alpha'] = ROOT.TH2F( tracker+'_recon_alpha', \
"Alpha Reconstruction Pz", PZ_BIN, PZ_MIN, PZ_MAX, \
200, -2.0, 2.0 )
tracker_dict['recon_beta'] = ROOT.TH2F( tracker+'_recon_beta', \
"Beta Reconstruction Pz", PZ_BIN, PZ_MIN, PZ_MAX, \
1000, 0.0, 2500.0 )
tracker_dict['recon_emittance'] = ROOT.TH2F( \
tracker+'_recon_emittance', "Emittance Reconstruction Pz", \
PZ_BIN, PZ_MIN, PZ_MAX, 500, 0.0, 20.0 )
tracker_dict['recon_momentum'] = ROOT.TH2F( \
tracker+'_recon_momentum', "Recon Momentum Pz", \
PZ_BIN, PZ_MIN, PZ_MAX, 200, -10.0, 10.0 )
tracker_dict['residual_alpha'] = ROOT.TH2F( \
tracker+'_residual_alpha', "Alpha Residual Pz", PZ_BIN, \
PZ_MIN, PZ_MAX, 200, -1.0, 1.0 )
tracker_dict['residual_beta'] = ROOT.TH2F( \
tracker+'_residual_beta', "Beta Residual Pz", \
PZ_BIN, PZ_MIN, PZ_MAX, 200, -100.0, 100.0 )
tracker_dict['residual_emittance'] = ROOT.TH2F( \
tracker+'_residual_emittance', "Emittance Residual Pz", \
PZ_BIN, PZ_MIN, PZ_MAX, 200, -10.0, 10.0 )
tracker_dict['residual_momentum'] = ROOT.TH2F( \
tracker+'_residual_momentum', "Momentum Residual Pz", \
PZ_BIN, PZ_MIN, PZ_MAX, 200, -10.0, 10.0 )
for component in ['x', 'y', 'px', 'py', 'pt'] :
tracker_dict['seed_'+component+'_residual'] = \
ROOT.TH1F( tracker+'_patrec_seed_'+component+'_residual', \
"Residual: "+component, 201, -10.05, 10.05 )
tracker_dict['seed_mx_residual'] = ROOT.TH1F( \
tracker+'_patrec_seed_mx_residual', "Residual: m_{x}", 501, -0.5, 0.5 )
tracker_dict['seed_my_residual'] = ROOT.TH1F( \
tracker+'_patrec_seed_my_residual', "Residual: m_{y}", 501, -0.5, 0.5 )
tracker_dict['seed_pz_residual'] = ROOT.TH1F( \
tracker+'_patrec_seed_pz_residual', "Residual: pz", 501, -50.1, 50.1 )
tracker_dict['seed_p_residual'] = ROOT.TH1F( \
tracker+'_patrec_seed_p_residual', "Residual: p", 501, -50.1, 50.1 )
tracker_dict['seed_pz_residual_pz'] = ROOT.TH2F( \
tracker+'_patrec_seed_pz-pz', "True p_{z} - Seed p_{z}", \
PZ_BIN, PZ_MIN, PZ_MAX, 200, -50.0, 50.0 )
tracker_dict['seed_pt_residual_pt'] = ROOT.TH2F( \
tracker+'_patrec_seed_pt-pt', "True p_{#perp} - Seed p_{#perp}", \
PT_BIN, PT_MIN, PT_MAX, 200, -50.0, 50.0 )
tracker_dict['seed_pz_residual_pt'] = ROOT.TH2F( \
tracker+'_patrec_seed_pz-pt', "True p_{z} - Seed p_{#perp}", \
PT_BIN, PT_MIN, PT_MAX, 200, -50.0, 50.0 )
tracker_dict['seed_pt_residual_pz'] = ROOT.TH2F( \
tracker+'_patrec_seed_pt-pz', "True p_{#perp} - Seed p_{z}", \
PZ_BIN, PZ_MIN, PZ_MAX, 200, -50.0, 50.0 )
tracker_dict['seed_p_residual_p'] = ROOT.TH2F( \
tracker+'_patrec_seed_p-p', "True p - Seed p", \
PZ_BIN, PZ_MIN, PZ_MAX, 200, -50.0, 50.0 )
tracker_dict['recon_theta_x'] = ROOT.TH1F(tracker+'_recon_theta_x', \
'recon_theta_x', 47, -0.0705, 0.0705 )
tracker_dict['MC_theta_x'] = ROOT.TH1F(tracker+'_MC_theta_x', \
'MC_theta_x', 47, -0.0705, 0.0705 )
tracker_dict['efficiency_scat_x'] = ROOT.TEfficiency()
tracker_dict['recon_theta_y'] = ROOT.TH1F(tracker+'_recon_theta_y', \
'recon_theta_y', 47, -0.0705, 0.0705 )
tracker_dict['MC_theta_y'] = ROOT.TH1F(tracker+'_MC_theta_Y', \
'MC_theta_y', 47, -0.0705, 0.0705 )
tracker_dict['efficiency_scat_y'] = ROOT.TH1F(tracker+'efficiency_y', \
'efficiency_Y', 47, -0.0705, 0.0705 )
tracker_dict['recon_theta_scatt'] = ROOT.TH1F(tracker+'_recon_theta_scatt', \
'recon_theta_scatt', 47, 0., 0.0705 )
tracker_dict['MC_theta_scatt'] = ROOT.TH1F(tracker+'_MC_theta_scatt', \
'MC_theta_scatt', 47, 0., 0.0705 )
tracker_dict['efficiency_scat_scatt'] = ROOT.TH1F(tracker+'efficiency_scatt', \
'efficiency_scatt', 47, 0., 0.0705 )
tracker_dict['recon_theta_2scatt'] = ROOT.TH1F(tracker+'_recon_theta_2scatt', \
'recon_theta_2scatt', 47, 0., 0.004 )
tracker_dict['MC_theta_2scatt'] = ROOT.TH1F(tracker+'_MC_theta_2scatt', \
'MC_theta_2scatt', 47, 0., 0.004 )
tracker_dict['efficiency_scat_2scatt'] = ROOT.TH1F(tracker+'efficiency_2scatt', \
'efficiency_2scatt', 47, 0., 0.004 )
plot_dict[tracker] = tracker_dict
missing_tracks = {}
for tracker in [ 'upstream', 'downstream' ] :
missing_tracker = {}
missing_tracker['x_y'] = ROOT.TH2F(tracker+'_x_y_missing', \
"Missing Tracks x:y", 400, -200.0, 200.0, 400, -200.0, 200.0 )
missing_tracker['px_py'] = ROOT.TH2F(tracker+'_px_py_missing', \
"Missing Tracks p_{x}:p_{y}", 400, -200.0, 200.0, 400, -200.0, 200.0 )
missing_tracker['x_px'] = ROOT.TH2F(tracker+'_x_px_missing', \
"Missing Tracks x:p_{x}", 400, -200.0, 200.0, 400, -200.0, 200.0 )
missing_tracker['y_py'] = ROOT.TH2F(tracker+'_y_py_missing', \
"Missing Tracks y:p_{y}", 400, -200.0, 200.0, 400, -200.0, 200.0 )
missing_tracker['x_py'] = ROOT.TH2F(tracker+'_x_py_missing', \
"Missing Tracks x:p_{y}", 400, -200.0, 200.0, 400, -200.0, 200.0 )
missing_tracker['y_px'] = ROOT.TH2F(tracker+'_y_px_missing', \
"Missing Tracks y:p_{x}", 400, -200.0, 200.0, 400, -200.0, 200.0 )
missing_tracker['pt'] = ROOT.TH1F(tracker+'_pt_missing', \
"Missing Tracks pt", PT_BIN, PT_MIN, PT_MAX )
missing_tracker['pz'] = ROOT.TH1F(tracker+'_pz_missing', \
"Missing Tracks pz", PZ_BIN, PZ_MIN, PZ_MAX )
missing_tracker['pz_pt'] = ROOT.TH2F(tracker+'_pz_pt_missing', \
"Missing Tracks pz", PZ_BIN, PZ_MIN, PZ_MAX, PT_BIN, PT_MIN, PT_MAX )
missing_tracks[tracker] = missing_tracker
plot_dict['missing_tracks'] = missing_tracks
for pl_id in range( -15, 0 ) + range( 1, 16 ) :
pull_plot_name = 'kalman_pulls_{0:02d}'.format(pl_id)
plot_dict['pulls'][pull_plot_name] = ROOT.TH1F( \
pull_plot_name, "Kalman Pulls", 101, -5.05, 5.05 )
data_dict = { 'counters' : {'upstream' : {}, 'downstream' : {} }, \
'data' : {} }
data_dict['counters']['number_events'] = 0
for tracker in ['upstream', 'downstream'] :
data_dict['counters'][tracker]['number_virtual'] = 0
data_dict['counters'][tracker]['missing_virtuals'] = 0
data_dict['counters'][tracker]['number_tracks'] = 0
data_dict['counters'][tracker]['number_candidates'] = 0
data_dict['counters'][tracker]['found_tracks'] = 0
data_dict['counters'][tracker]['wrong_track_type'] = 0
data_dict['counters'][tracker]['p_value_cut'] = 0
data_dict['counters'][tracker]['superfluous_track_events'] = 0
data_dict['counters'][tracker]['missing_tracks'] = 0
data_dict['counters'][tracker]['missing_reference_hits'] = 0
data_dict['counters'][tracker]['momentum_cut'] = 0
data_dict['counters'][tracker]['gradient_cut'] = 0
data_dict['counters'][tracker]['found_pairs'] = 0
return plot_dict, data_dict
def create_virtual_plane_dict(file_reader) :
"""
Matches up scifitrackpoints to virtual planes to make a lookup dictionary
"""
virtual_plane_dict = {}
for num in range( -15, 0, 1 ) :
virtual_plane_dict[ num ] = ( -1, (ALIGNMENT_TOLERANCE * 100.0) )
for num in range( 1, 16, 1 ) :
virtual_plane_dict[ num ] = ( -1, (ALIGNMENT_TOLERANCE * 100.0) )
while file_reader.next_event() :
scifi_event = file_reader.get_event( 'scifi' )
mc_event = file_reader.get_event( 'mc' )
tracks = scifi_event.scifitracks()
for track in tracks :
if track.tracker() not in RECON_TRACKERS :
continue
trackpoints = track.scifitrackpoints()
for trkpt in trackpoints :
z_pos = trkpt.pos().z()
plane_id = analysis.tools.calculate_plane_id(\
trkpt.tracker(), trkpt.station(), trkpt.plane())
for vhit_num in xrange(mc_event.GetVirtualHitsSize()) :
vhit = mc_event.GetAVirtualHit(vhit_num)
diff = math.fabs(vhit.GetPosition().z() - z_pos)
if diff < virtual_plane_dict[ plane_id ][1] :
virtual_plane_dict[ plane_id ] = ( vhit.GetStationId(), diff )
done = True
for tracker in RECON_TRACKERS :
for station in [1, 2, 3, 4, 5] :
for plane in [0, 1, 2] :
plane_id = analysis.tools.calculate_plane_id( \
tracker, station, plane )
if virtual_plane_dict[plane_id][1] > ALIGNMENT_TOLERANCE :
#print plane_id, virtual_plane_dict[plane_id]
done = False
if done :
break
else :
if REQUIRE_ALL_PLANES :
print
print virtual_plane_dict
raise ValueError("Could not locate all virtuals planes")
file_reader.reset()
return virtual_plane_dict
def inverse_virtual_plane_dict(virtual_plane_dict) :
"""
Create the inverse lookup.
"""
inverse_dict = {}
for num in range( -15, 0, 1 ) :
inverse_dict[virtual_plane_dict[num][0]] = num
for num in range( 1, 16, 1 ) :
inverse_dict[virtual_plane_dict[num][0]] = num
return inverse_dict
def get_expected_tracks(mc_event, virtual_plane_dict, scifi_event, tofevent) :
upstream_planes = [ virtual_plane_dict[i][0] for i in range(-15, 0)]
downstream_planes = [ virtual_plane_dict[i][0] for i in range(1, 16)]
upstream_track = None
downstream_track = None
upstream_hits = {}
downstream_hits = {}
u = []
w = []
d = []
USX = 0
USY = 0
tof0 = 0
tof1 = 0
tof2 = 0
rawTOF2HitTime = -1
rawTOF1HitTime = -1
rawTOF0HitTime = -1
if len(tofevent.GetTOFEventSpacePoint().GetTOF2SpacePointArray()) > 0 :
rawTOF2HitTime = tofevent.GetTOFEventSpacePoint().GetTOF2SpacePointArray()[0].GetTime()
if len(tofevent.GetTOFEventSpacePoint().GetTOF1SpacePointArray()) > 0 :
rawTOF1HitTime = tofevent.GetTOFEventSpacePoint().GetTOF1SpacePointArray()[0].GetTime()
if len(tofevent.GetTOFEventSpacePoint().GetTOF0SpacePointArray()) > 0 :
rawTOF1HitTime = tofevent.GetTOFEventSpacePoint().GetTOF0SpacePointArray()[0].GetTime()
dt = 100.0
if rawTOF0HitTime != -1 and rawTOF1HitTime != -1 :
dt = rawTOF1HitTime - rawTOF0HitTime
jUS=-1
jDS=-1
kUS=-1
kDS=-1
abspos = 0
phi = 0
zdiff = 0
xabs = 0
yabs = 0
thX = 0
thY = 0
tof0 = 0
tof1 = 0
for vhit_num in xrange(mc_event.GetVirtualHitsSize()) :
vhit = mc_event.GetAVirtualHit(vhit_num)
tofhit = mc_event.GetTOFHits()
for i in xrange(len(tofhit)) :
if tofhit[i].GetPosition().Z() - 5287 < 20 and tofhit[i].GetPosition().Z() - 5287 > -20 :
tof0 = tofhit[i].GetTime()
if tofhit[i].GetPosition().Z() - 12929 < 20 and tofhit[i].GetPosition().Z() - 12929 > -20 :
tof1 = tofhit[i].GetTime()
if tofhit[i].GetPosition().Z() - 21138 < 20 and tofhit[i].GetPosition().Z() - 21138 > -20 :
tof2 = tofhit[i].GetTime()
if (tof1-tof0) < TOF_ul and (tof1-tof0) > TOF_ll :
station_id = vhit.GetStationId()
if station_id in upstream_planes :
plane_id = INVERSE_PLANE_DICT[station_id]
upstream_hits[plane_id] = vhit
if station_id in downstream_planes :
plane_id = INVERSE_PLANE_DICT[station_id]
downstream_hits[plane_id] = vhit
# print station_id, vhit.GetPosition().Z()
if station_id == 46 :
kUS = 1
jUS = 1
USdXdz = vhit.GetMomentum().Px()/vhit.GetMomentum().Pz()
USdYdz = vhit.GetMomentum().Py()/vhit.GetMomentum().Pz()
USnorm = 1./sqrt(1 + USdXdz*USdXdz + USdYdz*USdYdz)
USX = vhit.GetPosition().X()
USY = vhit.GetPosition().Y()
USZ = vhit.GetPosition().Z()
u = [USdXdz*USnorm, USdYdz*USnorm, USnorm]
w = [-u[0]*u[1], (u[0]*u[0] + u[2]*u[2]), -u[1]*u[2]]
Wnorm = 1./sqrt(w[0]*w[0] + w[1]*w[1] + w[2]*w[2])
w[0] *= Wnorm
w[1] *= Wnorm
w[2] *= Wnorm
abspos = 16952.5
zdiff = math.fabs(16952.5 - USZ)
USXproj = (USdXdz) * zdiff + USX
USYproj = (USdYdz) * zdiff + USY
#z0 = abspos+549.95
z0 = 19948.8
phi = math.atan2(USdYdz, USdXdz)
zdiff = math.fabs(z0 - 16952.5)
#USXproj = (USdXdz + sigmap*math.cos(phi)) * zdiff + USXproj
#USYproj = (USdYdz + sigmap*math.sin(phi)) * zdiff + USYproj
USXproj = USdXdz * zdiff + USXproj
USYproj = USdYdz * zdiff + USYproj
zdi = 13620-USZ
USXdiff = (USdXdz) * zdi + USX
USYdiff = (USdYdz) * zdi + USY
if (station_id == 55 and len(w)==3 and len(u)==3 and sqrt(USXproj*USXproj + USYproj*USYproj)<meanp and sqrt(USXdiff*USXdiff + USYdiff*USYdiff)<90) :
projTheta = []
DSdXdz = vhit.GetMomentum().Px()/vhit.GetMomentum().Pz()
DSdYdz = vhit.GetMomentum().Py()/vhit.GetMomentum().Pz()
DSnorm = 1./sqrt(1 + DSdXdz*DSdXdz + DSdYdz*DSdYdz)
d = [DSdXdz*DSnorm, DSdYdz*DSnorm, DSnorm]
projTheta.append( math.atan( (d[0]*w[0] + d[1]*w[1] + d[2]*w[2])/(d[0]*u[0] + d[1]*u[1] + d[2]*u[2]) ))
projTheta.append( math.atan( (d[0]*u[2] - u[0]*d[2])/(d[0]*u[0] + d[1]*u[1] + d[2]*d[2])*1./sqrt(u[2]*u[2] + u[0]*u[0])) )
projTheta.append( math.acos( ( (1 + USdXdz * DSdXdz + USdYdz * DSdYdz )/
sqrt(1 + USdXdz*USdXdz + USdYdz*USdYdz)/
sqrt(1 + DSdXdz*DSdXdz + DSdYdz*DSdYdz))) )
# if (sqrt(projTheta[0]*projTheta[0]+projTheta[1]*projTheta[1])<0.190):
if (sqrt(projTheta[0]*projTheta[0]+projTheta[1]*projTheta[1])<0.075):
plot_dict['downstream']['MC_theta_y'].Fill(projTheta[1])
plot_dict['downstream']['MC_theta_x'].Fill(projTheta[0])
plot_dict['downstream']['MC_theta_scatt'].Fill(projTheta[2])
plot_dict['downstream']['MC_theta_2scatt'].Fill(projTheta[2]*projTheta[2])
if (len(scifi_event.scifitracks()) == 2):
'''
X = scifi_event.scifitracks()[0].scifitrackpoints()[0].pos().x()
Y = scifi_event.scifitracks()[0].scifitrackpoints()[0].pos().y()
Z = scifi_event.scifitracks()[0].scifitrackpoints()[0].pos().z()
dXdz = scifi_event.scifitracks()[0].scifitrackpoints()[0].gradient().x() + math.tan(math.atan(1.)/45.0)
dYdz = scifi_event.scifitracks()[0].scifitrackpoints()[0].gradient().y() + math.tan(math.atan(1.)/45.0)
pz = pz
px = scifi_event.scifitracks()[0].scifitrackpoints()[0].mom().x() + math.tan(math.atan(1.)/45.0) * pz
py = scifi_event.scifitracks()[0].scifitrackpoints()[0].mom().y() + math.tan(math.atan(1.)/45.0) * pz
abspos = 16952.5
phi = math.atan2(dYdz, dXdz)
z0 = abspos+549.95
dXdz += sigmap*math.cos(phi)
dYdz += sigmap*math.sin(phi)
px += sigmap*math.cos(phi)*pz
py += sigmap*math.sin(phi)*pz
X = px/pz * (z0 - Z) + X
Y = py/pz * (z0 - Z) + Y
Z = z0
#if ( sqrt(X*X + Y*Y) < 140):
'''
plot_dict['downstream']['recon_theta_y'].Fill(projTheta[1])
plot_dict['downstream']['recon_theta_x'].Fill(projTheta[0])
plot_dict['downstream']['recon_theta_scatt'].Fill(projTheta[2])
plot_dict['downstream']['recon_theta_2scatt'].Fill(projTheta[2]*projTheta[2])
u = []
w = []
d = []
if TRACK_ALGORITHM == 1 :
if len(upstream_hits) > EXPECTED_HELIX_TRACKPOINTS :
upstream_track = upstream_hits
if len(downstream_hits) > EXPECTED_HELIX_TRACKPOINTS :
downstream_track = downstream_hits
elif TRACK_ALGORITHM == 0 :
if len(upstream_hits) > EXPECTED_STRAIGHT_TRACKPOINTS :
upstream_track = upstream_hits
if len(downstream_hits) > EXPECTED_STRAIGHT_TRACKPOINTS :
downstream_track = downstream_hits
else:
raise ValueError("Unknown track algorithm found!")
return upstream_track, downstream_track
def calculate_efficiency(plot_dict) :
"""
scat analysis efficiency numbers
"""
t1 = ROOT.TText(0.28,0.185,"MICE preliminary [simulation]")
t2 = ROOT.TText(0.28,0.15,"ISIS Cycle 2015/04")
t1.SetNDC(1)
t1.SetTextSize(0.04)
t1.SetTextFont(42)
t2.SetNDC(1)
t2.SetTextSize(0.03)
t2.SetTextFont(42)
f = ROOT.TFile("tracker_resolution_plots_"+str(TOF_ll)+".root","RECREATE")
pEffx = ROOT.TEfficiency()
pEffx = ROOT.TEfficiency(plot_dict['downstream']['recon_theta_x'],plot_dict['downstream']['MC_theta_x'])
c5 = ROOT.TCanvas()
plot_dict['downstream']['efficiency_scat_x'] = pEffx.CreateGraph()
pEffx_graph = pEffx.CreateGraph()
pEffx_graph.SetName("Effx_graph")
#pEffx_graph.SetTitle("Acceptance plot #theta_x")
pEffx_graph.GetXaxis().SetTitle("#theta_{x} (mrad)")
pEffx_graph.GetYaxis().SetTitle("Efficiency")
pEffx_graph.Draw("ap")
f1 = ROOT.TF1("f1","pol2",-0.040,0.040)
#f1.SetParameters(1,1)
#pEffx_graph.Fit("f1","R")
#pEffx_graph.SetRangeUser(-0.60,0.60)
t1.Draw("same")
t2.Draw("same")
t1.Paint()
t2.Paint()
c5.SaveAs("pEff_x.pdf")
pEffx_graph.Write()
pEffy = ROOT.TEfficiency()
pEffy = ROOT.TEfficiency(plot_dict['downstream']['recon_theta_y'],plot_dict['downstream']['MC_theta_y'])
c7 = ROOT.TCanvas()
plot_dict['downstream']['efficiency_scat_y'] = pEffy.CreateGraph()
pEffy_graph = pEffy.CreateGraph()
pEffy_graph.SetName("Effy_graph")
#pEffy_graph.SetTitle("Acceptance plot #theta_y")
pEffy_graph.GetXaxis().SetTitle("#theta_{y} (mrad)")
pEffy_graph.GetYaxis().SetTitle("Efficiency")
pEffy_graph.Draw("ap")
#pEffy_graph.Fit("f1","R")
t1.Draw("same")
t2.Draw("same")
t1.Paint()
t2.Paint()
c7.SaveAs("pEff_y.pdf")
pEffy_graph.Write()
pEffscatt = ROOT.TEfficiency()
pEffscatt = ROOT.TEfficiency(plot_dict['downstream']['recon_theta_scatt'],plot_dict['downstream']['MC_theta_scatt'])
c17 = ROOT.TCanvas()
plot_dict['downstream']['efficiency_scat_scatt'] = pEffscatt.CreateGraph()
pEffscatt.Draw()
c17.SaveAs("pEff_scatt.pdf")
pEffscatt_graph = pEffscatt.CreateGraph()
pEffscatt_graph.SetName("Effscatt_graph")
pEffscatt_graph.Write()
pEff2scatt = ROOT.TEfficiency()
pEff2scatt = ROOT.TEfficiency(plot_dict['downstream']['recon_theta_2scatt'],plot_dict['downstream']['MC_theta_2scatt'])
c17 = ROOT.TCanvas()
plot_dict['downstream']['efficiency_scat_2scatt'] = pEff2scatt.CreateGraph()
pEff2scatt.Draw()
c17.SaveAs("pEff_2scatt.pdf")
pEff2scatt_graph = pEff2scatt.CreateGraph()
pEff2scatt_graph.SetName("Eff2scatt_graph")
pEff2scatt_graph.Write()
f.Close()
'''
c3 = ROOT.TCanvas()
plot_dict['downstream']['efficiency_scat'].Divide(plot_dict['downstream']['recon_theta'].Draw(),ROOT.TH1(plot_dict['downstream']['MC_theta'].Draw()))
plot_dict['downstream']['recon_theta'].Draw()
line = ROOT.TLine(-0.0705,1,0.0705,1)
line.SetLineColor(22)
line.Draw()
c3.SaveAs('effi.pdf')
'''
c1 = ROOT.TCanvas()
plot_dict['downstream']['recon_theta_x'].Draw()
plot_dict['downstream']['MC_theta_x'].Draw("SAMES")
plot_dict['downstream']['MC_theta_x'].SetLineColor(2)
plot_dict['downstream']['MC_theta_x'].SetLineStyle(2)
c1.SaveAs('recon_theta.pdf')
c1.Clear()
c4= ROOT.TCanvas()
#recon_theta_hist = plot_dict['downstream']['recon_theta'].Draw()
#recon_theta_hist.Sumw2()
#c4.SaveAs('recon_theta_hist.pdf')
c2 = ROOT.TCanvas()
MC_theta = plot_dict['downstream']['MC_theta_x'].Draw()
c2.SaveAs('MC_theta.pdf')
#plot_dict['downstream']['efficiency_scat'] = recon_theta_hist
#plot_dict['downstream']['efficiency_scat'].Divide(MC_theta)
#plot_dict['downstream']['efficiency_scat'].Draw()
def get_found_tracks(scifi_event, plot_dict, data_dict, tofevent) :
"""
Find all the single tracks that pass the cuts.
"""
upstream_tracks = []
downstream_tracks = []
tracks = scifi_event.scifitracks()
for track in tracks :
if track.tracker() == 0 :
tracker = "upstream"
else :
tracker = "downstream"
data_dict['counters'][tracker]['number_tracks'] += 1
if track.GetAlgorithmUsed() != TRACK_ALGORITHM :
data_dict['counters'][tracker]['wrong_track_type'] += 1
continue
if track.P_value() < P_VALUE_CUT :
data_dict['counters'][tracker]['p_value_cut'] += 1
continue
data_dict['counters'][tracker]['number_candidates'] += 1
if track.tracker() == 0 :
upstream_tracks.append(track)
if track.tracker() == 1 :
downstream_tracks.append(track)
if len(upstream_tracks) > 1 :
data_dict['counters']['upstream']['superfluous_track_events'] += 1
if len(downstream_tracks) > 1 :
data_dict['counters']['downstream']['superfluous_track_events'] += 1
if len(upstream_tracks) == 1 :
upstream_track = upstream_tracks[0]
data_dict['counters']['upstream']['found_tracks'] += 1
else :
upstream_track = None
if len(downstream_tracks) == 1 :
downstream_track = downstream_tracks[0]
data_dict['counters']['downstream']['found_tracks'] += 1
else :
downstream_track = None
rawTOF1HitTime = -1
rawTOF0HitTime = -1
if len(tofevent.GetTOFEventSpacePoint().GetTOF1SpacePointArray()) > 0 :
rawTOF1HitTime = tofevent.GetTOFEventSpacePoint().GetTOF1SpacePointArray()[0].GetTime()
if len(tofevent.GetTOFEventSpacePoint().GetTOF0SpacePointArray()) > 0 :
rawTOF0HitTime = tofevent.GetTOFEventSpacePoint().GetTOF0SpacePointArray()[0].GetTime()
dt = 100.0
if rawTOF1HitTime != -1 and rawTOF0HitTime != -1 :
dt = rawTOF1HitTime - rawTOF0HitTime
if dt < TOF_ul and dt > TOF_ll :
jUS=-1
jDS=-1
kUS=-1
kDS=-1
thX = 0
thY = 0
j = 0
for track in tracks:
maxUS=0.0
minDS=44000
tracker = track.tracker()
trkpoints = track.scifitrackpoints()
for trkpoint in trkpoints :
zpos = trkpoint.pos().z()
if(tracker==0 and zpos > maxUS):
maxUS = zpos
kUS = 1
jUS = 1
USdXdz = trkpoint.gradient().x() + math.tan(thX*math.atan(1.)/45.0)
USdYdz = trkpoint.gradient().y() + math.tan(thY*math.atan(1.)/45.0)
if(tracker==1 and zpos < minDS):
minDS = zpos
kDS = 1
jDS = 1
DSdXdz = trkpoint.gradient().x() + math.tan(thX*math.atan(1.)/45.0)
DSdYdz = trkpoint.gradient().y() + math.tan(thY*math.atan(1.)/45.0)
if (jUS != -1 and kUS != -1 and jDS != -1 and kDS != -1) :
projTheta = []
USnorm = 1./sqrt(1 + USdXdz*USdXdz + USdYdz*USdYdz)
u = [USdXdz*USnorm, USdYdz*USnorm, USnorm]
w = [-u[0]*u[1], (u[0]*u[0] + u[2]*u[2]), -u[1]*u[2]]
Wnorm = 1./sqrt(w[0]*w[0] + w[1]*w[1] + w[2]*w[2])
w[0] *= Wnorm
w[1] *= Wnorm
w[2] *= Wnorm
DSnorm = 1./sqrt(1 + DSdXdz*DSdXdz + DSdYdz*DSdYdz)
d = [DSdXdz*DSnorm, DSdYdz*DSnorm, DSnorm]
projTheta.append( math.atan( (d[0]*w[0] + d[1]*w[1] + d[2]*w[2])/
(d[0]*u[0] + d[1]*u[1] + d[2]*u[2]) ))
projTheta.append( math.atan( (d[0]*u[2] - u[0]*d[2])\
/(d[0]*u[0] + d[1]*u[1] + d[2]*d[2]) *
1./sqrt(u[2]*u[2] + u[0]*u[0])) )
projTheta.append( math.acos( ( (1 + USdXdz * DSdXdz + USdYdz * DSdYdz )/
sqrt(1 + USdXdz*USdXdz + USdYdz*USdYdz)/
sqrt(1 + DSdXdz*DSdXdz + DSdYdz*DSdYdz))) )
#print "projTheta[0]",projTheta[0]
#plot_dict['downstream']['recon_theta'].Fill(projTheta[0])
return upstream_track, downstream_track
def make_scifi_mc_pairs(plot_dict, data_dict, virtual_plane_dict, \
scif_event, mc_event, tofevent) :
"""
Make pairs of SciFiTrackpoints and MC VirtualHits
"""
paired_hits = []
paired_seeds = []
expected_up, expected_down = get_expected_tracks(mc_event, virtual_plane_dict, scifi_event, tofevent)
found_up, found_down = get_found_tracks(scifi_event, plot_dict, data_dict, tofevent)
downstream_pt = 0.0
downstream_pz = 0.0
data_dict['counters']['number_events'] += 1
for tracker_num, tracker, scifi_track, virtual_track in \
[ (0, "upstream", found_up, expected_up), \
(1, "downstream", found_down, expected_down) ] :
if virtual_track is None :
continue
ref_plane = tools.calculate_plane_id(tracker_num,
RECON_STATION, RECON_PLANE)
seed_plane = tools.calculate_plane_id(tracker_num,
SEED_STATION, SEED_PLANE)
virtual_pt = 0.0
virtual_pz = 0.0
virtual_field = 0.0
virtual_charge = 0.0
virtual_L = 0.0
virtual_X_0 = 0.0
virtual_Y_0 = 0.0
virtual_radius = 0.0
virtual_hits = 0
scifi_hits = 0
seed_virt = None
reference_virt = None
reference_scifi = None
for plane in virtual_track :
if virtual_track[plane] is not None :
hit = virtual_track[plane]
px = hit.GetMomentum().x()
py = hit.GetMomentum().y()
pt = math.sqrt( px**2 + py**2)
field = hit.GetBField().z() * 1000.0
q = hit.GetCharge()
virtual_pt += pt
virtual_pz += hit.GetMomentum().z()
virtual_field += field
virtual_charge += q
virtual_L += hit.GetPosition().x()*py - \
hit.GetPosition().y()*px
if field != 0:
virtual_radius += pt/(q*field)
virtual_X_0 += hit.GetPosition().x() - py / (q*field)
virtual_Y_0 += hit.GetPosition().y() + px / (q*field)
virtual_hits += 1
if plane == ref_plane :
reference_virt = virtual_track[plane]
if plane == seed_plane :
seed_virt = virtual_track[plane]
virtual_pt /= virtual_hits
virtual_pz /= virtual_hits
virtual_field /= virtual_hits
virtual_charge /= virtual_hits
virtual_L /= virtual_hits
virtual_p = math.sqrt( virtual_pt**2 + virtual_pz**2 )
virtual_X_0 /= virtual_hits
virtual_Y_0 /= virtual_hits
virtual_radius /= virtual_hits
if field != 0:
rho = virtual_pt / (virtual_charge*virtual_field)
else:
rho = 0
C = virtual_charge*(virtual_field * rho*rho) / 2.0
virtual_L_canon = virtual_L + C
# virtual_L_canon = virtual_radius**2 - (virtual_X_0**2 + virtual_Y_0**2)
# print
# print
# print virtual_pt
# print virtual_pz
# print virtual_field
# print virtual_charge
# print virtual_L
# print virtual_L_canon
# print virtual_X_0
# print virtual_Y_0
# print virtual_radius
# print
# print
if virtual_p > P_MAX or virtual_p < P_MIN :
data_dict['counters'][tracker]['momentum_cut'] += 1
continue
elif virtual_pt / virtual_p > MAX_GRADIENT :
data_dict['counters'][tracker]['gradient_cut'] += 1
continue
else :
data_dict['counters'][tracker]['number_virtual'] += 1
plot_dict[tracker]['ntracks_mc_pt'].Fill( virtual_pt )
plot_dict[tracker]['ntracks_mc_pz'].Fill( virtual_pz )
plot_dict[tracker]['ntp_mc_pt'].Fill( virtual_pt, virtual_hits )
plot_dict[tracker]['ntp_mc_pz'].Fill( virtual_pz, virtual_hits )
if scifi_track is None :
plot_dict[tracker]['track_efficiency'].Fill(False, virtual_pz, virtual_pt)
plot_dict[tracker]['track_efficiency_pt'].Fill(False, virtual_pt)
plot_dict[tracker]['track_efficiency_pz'].Fill(False, virtual_pz)
data_dict['counters'][tracker]['missing_tracks'] += 1
# for i in range(virtual_hits) :
# plot_dict[tracker]['trackpoint_efficiency'].Fill(False, virtual_pz,\
# virtual_pt)
# plot_dict[tracker]['trackpoint_efficiency_pt'].Fill(False, virtual_pt)
# plot_dict[tracker]['trackpoint_efficiency_pz'].Fill(False, virtual_pz)
if reference_virt is not None :
plot_dict['missing_tracks'][tracker]['x_y'].Fill( \
reference_virt.GetPosition().x(), reference_virt.GetPosition().y())
plot_dict['missing_tracks'][tracker]['px_py'].Fill( \
reference_virt.GetMomentum().x(), reference_virt.GetMomentum().y())
plot_dict['missing_tracks'][tracker]['x_px'].Fill( \
reference_virt.GetPosition().x(), reference_virt.GetMomentum().x())
plot_dict['missing_tracks'][tracker]['y_py'].Fill( \
reference_virt.GetPosition().y(), reference_virt.GetMomentum().y())
plot_dict['missing_tracks'][tracker]['x_py'].Fill( \
reference_virt.GetPosition().x(), reference_virt.GetMomentum().y())
plot_dict['missing_tracks'][tracker]['y_px'].Fill( \
reference_virt.GetPosition().y(), reference_virt.GetMomentum().x())
plot_dict['missing_tracks'][tracker]['pz'].Fill( virtual_pz )
plot_dict['missing_tracks'][tracker]['pt'].Fill( virtual_pt )
plot_dict['missing_tracks'][tracker]['pz_pt'].Fill( \
virtual_pz, virtual_pt )
continue # Can't do anything else without a scifi track
for scifi_hit in scifi_track.scifitrackpoints() :
if scifi_hit.has_data() :
scifi_hits += 1
pl_id = analysis.tools.calculate_plane_id(scifi_hit.tracker(), \
scifi_hit.station(), scifi_hit.plane())
plot_name = 'kalman_pulls_{0:02d}'.format(pl_id)
plot_dict['pulls'][plot_name].Fill( scifi_hit.pull() )
if scifi_hit.station() == RECON_STATION and \
scifi_hit.plane() == RECON_PLANE :
reference_scifi = scifi_hit
plot_dict[tracker]['track_efficiency'].Fill(True, virtual_pz, virtual_pt)
plot_dict[tracker]['track_efficiency_pt'].Fill(True, virtual_pt)
plot_dict[tracker]['track_efficiency_pz'].Fill(True, virtual_pz)
plot_dict[tracker]['track_efficiency_L_canon'].Fill(True, virtual_L_canon)
plot_dict[tracker]['ntracks_pt'].Fill( virtual_pt )
plot_dict[tracker]['ntracks_pz'].Fill( virtual_pz )
plot_dict[tracker]['ntp'].Fill( scifi_hits )
plot_dict[tracker]['ntp_pt'].Fill( virtual_pt, scifi_hits )
plot_dict[tracker]['ntp_pz'].Fill( virtual_pz, scifi_hits )
if scifi_hits >= virtual_hits :
for i in range(virtual_hits) :
plot_dict[tracker]['trackpoint_efficiency'].Fill(True, \
virtual_pz, virtual_pt)
plot_dict[tracker]['trackpoint_efficiency_pt'].Fill(True, virtual_pt)
plot_dict[tracker]['trackpoint_efficiency_pz'].Fill(True, virtual_pz)
else :
for i in range( virtual_hits - scifi_hits ) :
plot_dict[tracker]['trackpoint_efficiency'].Fill(False, \
virtual_pz, virtual_pt)
plot_dict[tracker]['trackpoint_efficiency_pt'].Fill(False, virtual_pt)
plot_dict[tracker]['trackpoint_efficiency_pz'].Fill(False, virtual_pz)
for i in range( scifi_hits ) :
plot_dict[tracker]['trackpoint_efficiency'].Fill(True, \
virtual_pz, virtual_pt)
plot_dict[tracker]['trackpoint_efficiency_pt'].Fill(True, virtual_pt)
plot_dict[tracker]['trackpoint_efficiency_pz'].Fill(True, virtual_pz)
if reference_virt is None :
data_dict['counters'][tracker]['missing_virtuals'] += 1
if reference_scifi is None :
data_dict['counters'][tracker]['missing_reference_hits'] += 1
if reference_virt is not None and reference_scifi is not None :
paired_hits.append( (reference_scifi, reference_virt) )
data_dict['counters'][tracker]['found_pairs'] += 1
if seed_virt is not None and scifi_track is not None :
paired_seeds.append( (scifi_track, seed_virt))
return paired_hits, paired_seeds
def fill_plots(plot_dict, data_dict, hit_pairs) :
"""
Fill Plots with Track and Residual Data
"""
for scifi_hit, virt_hit in hit_pairs :
tracker_num = scifi_hit.tracker()
pz_bin = get_pz_bin( virt_hit.GetMomentum().z() )
if pz_bin >= PZ_BIN or pz_bin < 0 :
continue
mc_cov = None
recon_cov = None
correction_matrix = None
if tracker_num == 0 :
tracker = 'upstream'
mc_cov = UP_COV_MC[pz_bin]
recon_cov = UP_COV_RECON[pz_bin]
correction_matrix = UP_CORRECTION
else :
tracker = 'downstream'
mc_cov = DOWN_COV_MC[pz_bin]
recon_cov = DOWN_COV_RECON[pz_bin]
correction_matrix = DOWN_CORRECTION
tracker_plots = plot_dict[tracker]
mc_cov.add_hit(hit_types.AnalysisHit(virtual_track_point=virt_hit))
recon_cov.add_hit(hit_types.AnalysisHit(scifi_track_point=scifi_hit))
correction_matrix.add_hit(\
hit_types.AnalysisHit(scifi_track_point=scifi_hit), \
hit_types.AnalysisHit(virtual_track_point=virt_hit))
scifi_pos = [scifi_hit.pos().x(), scifi_hit.pos().y(), scifi_hit.pos().z()]
scifi_mom = [scifi_hit.mom().x(), scifi_hit.mom().y(), scifi_hit.mom().z()]
virt_pos = [virt_hit.GetPosition().x(), \
virt_hit.GetPosition().y(), virt_hit.GetPosition().z()]
virt_mom = [virt_hit.GetMomentum().x(), \
virt_hit.GetMomentum().y(), virt_hit.GetMomentum().z()]
res_pos = [ scifi_pos[0] - virt_pos[0], \
scifi_pos[1] - virt_pos[1], \
scifi_pos[2] - virt_pos[2] ]
res_mom = [ scifi_mom[0] - virt_mom[0], \
scifi_mom[1] - virt_mom[1], \
scifi_mom[2] - virt_mom[2] ]
res_gra = [ scifi_mom[0]/scifi_mom[2] - virt_mom[0]/virt_mom[2], \
scifi_mom[1]/scifi_mom[2] - virt_mom[1]/virt_mom[2] ]
Pt_mc = math.sqrt( virt_mom[0] ** 2 + virt_mom[1] ** 2 )
Pz_mc = virt_mom[2]
P_mc = math.sqrt(Pz_mc**2 +Pt_mc**2)
L_mc = virt_pos[0]*virt_mom[1] - virt_pos[1]*virt_mom[0]
Pt_recon = math.sqrt( scifi_mom[0] ** 2 + scifi_mom[1] ** 2 )
P_recon = math.sqrt(Pt_recon**2 + scifi_mom[2]**2)
L_recon = scifi_pos[0]*scifi_mom[1] - scifi_pos[1]*scifi_mom[0]
Pt_res = Pt_recon - Pt_mc
P_res = P_recon - P_mc
B_field = virt_hit.GetBField().z() * 1000.0
q = virt_hit.GetCharge()
rho = sqrt(scifi_pos[0]**2 + scifi_pos[1]**2)
rho_mc = sqrt(virt_pos[0]**2 + virt_pos[1]**2)
C = q*(B_field * rho*rho) / 2.0
C_mc = q*(B_field * rho_mc*rho_mc) / 2.0
tracker_plots['xy'].Fill(scifi_pos[0], scifi_pos[1])
tracker_plots['pxpy'].Fill(scifi_mom[0], scifi_mom[1])
tracker_plots['pt'].Fill(Pt_recon)
tracker_plots['pz'].Fill(scifi_mom[2])
tracker_plots['L'].Fill(L_recon)
tracker_plots['L_canon'].Fill(L_recon + C)
tracker_plots['mc_xy'].Fill(virt_pos[0], virt_pos[1])
tracker_plots['mc_pxpy'].Fill(virt_mom[0], virt_mom[1])
tracker_plots['mc_pt'].Fill(Pt_mc)
tracker_plots['mc_pz'].Fill(Pz_mc)
tracker_plots['mc_L'].Fill(L_mc)
tracker_plots['mc_L_canon'].Fill(L_mc + C_mc)
tracker_plots['L_r'].Fill( L_recon, sqrt(scifi_pos[0]**2 + scifi_pos[1]**2))
tracker_plots['L_canon_r'].Fill( L_recon+C,
sqrt(scifi_pos[0]**2 + scifi_pos[1]**2))
tracker_plots['mc_L_r'].Fill( L_mc, sqrt(virt_pos[0]**2 + virt_pos[1]**2))
tracker_plots['mc_L_canon_r'].Fill( L_mc+C_mc, \
sqrt(virt_pos[0]**2 + virt_pos[1]**2))
tracker_plots['residual_xy'].Fill(res_pos[0], res_pos[1])
tracker_plots['residual_pxpy'].Fill(res_mom[0], res_mom[1])
tracker_plots['residual_mxmy'].Fill(res_gra[0], res_gra[1])
tracker_plots['residual_pt'].Fill(Pt_res)
tracker_plots['residual_pz'].Fill(res_mom[2])
tracker_plots['residual_L'].Fill(L_recon-L_mc)
tracker_plots['residual_L_canon'].Fill((L_recon+C)-(L_mc+C_mc))
tracker_plots['L_residual_r'].Fill( L_recon-L_mc, \
sqrt(virt_pos[0]**2 + virt_pos[1]**2))
tracker_plots['L_canon_residual_r'].Fill( (L_recon+C)-(L_mc+C_mc), \
sqrt(virt_pos[0]**2 + virt_pos[1]**2))
tracker_plots['x_residual_pt'].Fill( Pt_mc, res_pos[0] )
tracker_plots['y_residual_pt'].Fill( Pt_mc, res_pos[1] )
tracker_plots['r_residual_pt'].Fill( Pt_mc, \
sqrt(res_pos[0]**2 + res_pos[1]**2) )
tracker_plots['px_residual_pt'].Fill( Pt_mc, res_mom[0] )
tracker_plots['py_residual_pt'].Fill( Pt_mc, res_mom[1] )
tracker_plots['pt_residual_pt'].Fill( Pt_mc, Pt_res )
tracker_plots['pz_residual_pt'].Fill( Pt_mc, res_mom[2] )
tracker_plots['p_residual_pt'].Fill( Pt_mc, P_res )
tracker_plots['x_residual_p'].Fill( P_mc, res_pos[0] )
tracker_plots['y_residual_p'].Fill( P_mc, res_pos[1] )
tracker_plots['r_residual_p'].Fill( P_mc, \
sqrt(res_pos[0]**2 + res_pos[1]**2) )
tracker_plots['px_residual_p'].Fill( P_mc, res_mom[0] )
tracker_plots['py_residual_p'].Fill( P_mc, res_mom[1] )
tracker_plots['pt_residual_p'].Fill( P_mc, Pt_res )
tracker_plots['pz_residual_p'].Fill( P_mc, res_mom[2] )
tracker_plots['p_residual_p'].Fill( P_mc, P_res )
tracker_plots['x_residual_pz'].Fill( Pz_mc, res_pos[0] )
tracker_plots['y_residual_pz'].Fill( Pz_mc, res_pos[1] )
tracker_plots['r_residual_pz'].Fill( Pz_mc, \
sqrt(res_pos[0]**2 + res_pos[1]**2) )
tracker_plots['mx_residual_pz'].Fill( Pz_mc, res_gra[0] )
tracker_plots['my_residual_pz'].Fill( Pz_mc, res_gra[1] )
tracker_plots['px_residual_pz'].Fill( Pz_mc, res_mom[0] )
tracker_plots['py_residual_pz'].Fill( Pz_mc, res_mom[1] )
tracker_plots['pt_residual_pz'].Fill( Pz_mc, Pt_res )
tracker_plots['pz_residual_pz'].Fill( Pz_mc, res_mom[2] )
tracker_plots['p_residual_pz'].Fill( Pz_mc, P_res )
if mc_cov.length() == ENSEMBLE_SIZE :
pz = mc_cov.get_mean('pz')
tracker_plots['mc_alpha'].Fill(pz, mc_cov.get_alpha(['x','y']))
tracker_plots['mc_beta'].Fill(pz, mc_cov.get_beta(['x','y']))
tracker_plots['mc_emittance'].Fill(pz, mc_cov.get_emittance(\
['x','px','y','py']))
tracker_plots['mc_momentum'].Fill(pz, mc_cov.get_momentum())
tracker_plots['recon_alpha'].Fill(pz, recon_cov.get_alpha(\
['x','y']))
tracker_plots['recon_beta'].Fill(pz, recon_cov.get_beta(\
['x','y']))
tracker_plots['recon_emittance'].Fill(pz, \
recon_cov.get_emittance(['x','px','y','py']))
tracker_plots['recon_momentum'].Fill(pz, \
recon_cov.get_momentum())
tracker_plots['residual_alpha'].Fill(pz, \
recon_cov.get_alpha(['x','y']) - mc_cov.get_alpha(['x','y']))
tracker_plots['residual_beta'].Fill(pz, \
recon_cov.get_beta(['x','y']) - mc_cov.get_beta(['x','y']))
tracker_plots['residual_emittance'].Fill(pz, \
recon_cov.get_emittance(['x','px','y','py']) - \
mc_cov.get_emittance(['x','px','y','py']))
tracker_plots['residual_momentum'].Fill(pz, \
recon_cov.get_momentum() - mc_cov.get_momentum())
mc_cov.clear()
recon_cov.clear()
def fill_plots_seeds(plot_dict, data_dict, hit_pairs) :
"""
Fill Plots with Track and Residual Data
"""
for scifi_track, virt_hit in hit_pairs :
tracker_num = scifi_track.tracker()
pz_bin = get_pz_bin( virt_hit.GetMomentum().z() )
if pz_bin >= PZ_BIN or pz_bin < 0 :
continue
if tracker_num == 0 :
tracker = 'upstream'
else :
tracker = 'downstream'
tracker_plots = plot_dict[tracker]
scifi_pos = [scifi_track.GetSeedPosition().x(), \
scifi_track.GetSeedPosition().y(), scifi_track.GetSeedPosition().z()]
scifi_mom = [scifi_track.GetSeedMomentum().x(), \
scifi_track.GetSeedMomentum().y(), scifi_track.GetSeedMomentum().z()]
virt_pos = [virt_hit.GetPosition().x(), \
virt_hit.GetPosition().y(), virt_hit.GetPosition().z()]
virt_mom = [virt_hit.GetMomentum().x(), \
virt_hit.GetMomentum().y(), virt_hit.GetMomentum().z()]
res_pos = [ scifi_pos[0] - virt_pos[0], \
scifi_pos[1] - virt_pos[1], \
scifi_pos[2] - virt_pos[2] ]
res_mom = [ scifi_mom[0] - virt_mom[0], \
scifi_mom[1] - virt_mom[1], \
scifi_mom[2] - virt_mom[2] ]
res_gra = [ scifi_mom[0]/scifi_mom[2] - virt_mom[0]/virt_mom[2], \
scifi_mom[1]/scifi_mom[2] - virt_mom[1]/virt_mom[2] ]
Pt_mc = math.sqrt( virt_mom[0] ** 2 + virt_mom[1] ** 2 )
P_mc = math.sqrt( virt_mom[0] ** 2 + virt_mom[1] ** 2 + virt_mom[2] ** 2 )
Pz_mc = virt_mom[2]
Pt_recon = math.sqrt( scifi_mom[0] ** 2 + scifi_mom[1] ** 2 )
P_recon = math.sqrt( scifi_mom[0] ** 2 + scifi_mom[1] ** 2 + \
scifi_mom[2] ** 2 )
Pt_res = Pt_recon - Pt_mc
P_res = P_recon - P_mc
tracker_plots['seed_x_residual'].Fill(res_pos[0])
tracker_plots['seed_y_residual'].Fill(res_pos[1])
tracker_plots['seed_px_residual'].Fill(res_mom[0])
tracker_plots['seed_py_residual'].Fill(res_mom[1])
tracker_plots['seed_pz_residual'].Fill(res_mom[2])
tracker_plots['seed_mx_residual'].Fill(res_gra[0])
tracker_plots['seed_my_residual'].Fill(res_gra[1])
tracker_plots['seed_pt_residual'].Fill(Pt_res)
tracker_plots['seed_p_residual'].Fill(P_res)
tracker_plots['seed_pz_residual_pz'].Fill(Pz_mc, res_mom[2])
tracker_plots['seed_pt_residual_pt'].Fill(Pt_mc, Pt_res)
tracker_plots['seed_pz_residual_pt'].Fill(Pt_mc, res_mom[2])
tracker_plots['seed_pt_residual_pz'].Fill(Pz_mc, Pt_res)
tracker_plots['seed_p_residual_p'].Fill(P_mc, P_res)
def analyse_plots(plot_dict, data_dict) :
"""
Use existing plots to perform some useful analysis
"""
# Print out some simple stats
print
print "There were:"
print " {0:0.0f} Events".format( data_dict['counters']['number_events'] )
print " {0:0.0f} Upstream Tracks".format( \
data_dict['counters']['upstream']['number_tracks'] )
print " {0:0.0f} Downstream Tracks".format( \
data_dict['counters']['downstream']['number_tracks'] )
print " {0:0.0f} Upstream Vitual Tracks".format( \
data_dict['counters']['upstream']['number_virtual'] )
print " {0:0.0f} Downstream Virtual Tracks".format( \
data_dict['counters']['upstream']['number_virtual'] )
print " Excluded {0:0.0f} Upstream Tracks outside momentum window".format( \
data_dict['counters']['upstream']['momentum_cut'] )
print " Excluded {0:0.0f} Downstream Tracks outside momentum window".format(\
data_dict['counters']['upstream']['momentum_cut'] )
print
print "Found {0:0.0f} Upstream Tracks of the wrong type".format( \
data_dict['counters']['upstream']['wrong_track_type'] )
print "Found {0:0.0f} Downstream Tracks of the wrong type".format( \
data_dict['counters']['downstream']['wrong_track_type'] )
print "Cut {0:0.0f} Upstream Tracks (P-Value Cut)".format( \
data_dict['counters']['upstream']['p_value_cut'] )
print "Cut {0:0.0f} Downstream Tracks (P-Value Cut)".format( \
data_dict['counters']['downstream']['p_value_cut'] )
print
print "{0:0.0f} Upstream Tracks for analysis".format( \
data_dict['counters']['upstream']['number_candidates'] )
print "{0:0.0f} Downstream Tracks for analysis".format( \
data_dict['counters']['downstream']['number_candidates'] )
print
print "Missed {0:0.0f} Upstream Virtual Hits".format( \
data_dict['counters']['upstream']['missing_virtuals'] )
print "Missed {0:0.0f} Downstream Virtual Hits".format( \
data_dict['counters']['downstream']['missing_virtuals'] )
print "Missed {0:0.0f} Upstream Reference Plane Hits".format( \
data_dict['counters']['upstream']['missing_reference_hits'] )
print "Missed {0:0.0f} Downstream Reference Plane Hits".format( \
data_dict['counters']['downstream']['missing_reference_hits'] )
print "Missed {0:0.0f} Upstream Tracks".format( \
data_dict['counters']['upstream']['missing_tracks'] )
print "Missed {0:0.0f} Downstream Tracks".format( \
data_dict['counters']['downstream']['missing_tracks'] )
print
print "Matched {0:0.0f} Upstream Tracks".format( \
data_dict['counters']['upstream']['found_tracks'] )
print "Matched {0:0.0f} Downstream Tracks".format( \
data_dict['counters']['downstream']['found_tracks'] )
print
print "Found {0:0.0f} Upstream Superfluous Track Events".format( \
data_dict['counters']['upstream']['superfluous_track_events'] )
print "Found {0:0.0f} Downstream Superfluous Track Events".format( \
data_dict['counters']['downstream']['superfluous_track_events'] )
print
# Make the pretty plots
for tracker in [ "upstream", "downstream" ] :
for component in [ "x_", "y_", "r_", "px_", "py_", "pt_", "pz_", "p_" ] :
for plot_axis in [ "residual_pt", "residual_pz", "residual_p" ] :
plot = plot_dict[tracker][component+plot_axis]
rms_error = array.array( 'd' )
bin_size = array.array( 'd' )
bins = array.array( 'd' )
rms = array.array( 'd' )
mean = array.array( 'd' )
mean_error = array.array( 'd' )
width = plot.GetXaxis().GetBinWidth(1)
for i in range( 0, plot.GetXaxis().GetNbins() ) :
projection = plot.ProjectionY( \
tracker+component+plot_axis+'_pro_'+str(i), i, (i+1) )
plot_mean = plot.GetXaxis().GetBinCenter( i ) + width
pro_mean, pro_mean_err, pro_std, pro_std_err = \
analysis.tools.fit_gaussian(projection)
bin_size.append( width*0.5 )
bins.append( plot_mean )
rms.append( pro_std )
rms_error.append( pro_std_err )
mean.append( pro_mean )
mean_error.append( pro_mean_err )
if len(bins) != 0 :
resolution_graph = ROOT.TGraphErrors( len(bins), \
bins, rms, bin_size, rms_error )
bias_graph = ROOT.TGraphErrors( len(bins), \
bins, mean, bin_size, mean_error )
else :
resolution_graph = None
bias_graph = None
plot_dict[tracker][component+plot_axis+'_resolution'] = \
resolution_graph
plot_dict[tracker][component+plot_axis+'_bias'] = bias_graph
for tracker in [ "upstream", "downstream" ] :
# for component in [ "pt_", "pz_", ] :
# for plot_axis in [ "residual_pt", "residual_pz" ] :
for plot_name in [ "pt_residual_pt", "pt_residual_pz", "pz_residual_pt", \
"pz_residual_pz", "p_residual_p" ] :
plot = plot_dict[tracker]['seed_'+plot_name]
rms_error = array.array( 'd' )
bin_size = array.array( 'd' )
bins = array.array( 'd' )
rms = array.array( 'd' )
mean = array.array( 'd' )
mean_error = array.array( 'd' )
width = plot.GetXaxis().GetBinWidth(1)
for i in range( 0, plot.GetXaxis().GetNbins() ) :
projection = plot.ProjectionY( \
tracker+plot_name+'_pro_'+str(i), i, (i+1) )
plot_mean = plot.GetXaxis().GetBinCenter( i ) + width
pro_mean, pro_mean_err, pro_std, pro_std_err = \
analysis.tools.fit_gaussian(projection)
bin_size.append( width*0.5 )
bins.append( plot_mean )
rms.append( pro_std )
rms_error.append( pro_std_err )
mean.append( pro_mean )
mean_error.append( pro_mean_err )
if len(bins) != 0 :
resolution_graph = ROOT.TGraphErrors( len(bins), \
bins, rms, bin_size, rms_error )
bias_graph = ROOT.TGraphErrors( len(bins), \
bins, mean, bin_size, mean_error )
else :
resolution_graph = None
bias_graph = None
plot_dict[tracker]['seed_'+plot_name+'_resolution'] = resolution_graph
plot_dict[tracker]['seed_'+plot_name+'_bias'] = bias_graph
return data_dict
if __name__ == "__main__" :
ROOT.gROOT.SetBatch( True )
ROOT.gErrorIgnoreLevel = ROOT.kError
parser = argparse.ArgumentParser( description='An example script showing '+\
'some basic data extraction and analysis routines' )
parser.add_argument( 'maus_root_files', nargs='+', help='List of MAUS '+\
'output root files containing reconstructed straight tracks')
parser.add_argument( '-N', '--max_num_events', type=int, \
help='Maximum number of events to analyse.')
parser.add_argument( '-O', '--output_filename', \
default='tracker_resolution_plots', help='Set the output filename')
parser.add_argument( '-D', '--output_directory', \
default='./', help='Set the output directory')
parser.add_argument( '-V', '--virtual_plane_dictionary', default=None, \
help='Specify a json file containing a dictionary of the '+\
'virtual plane lookup' )
parser.add_argument( '-P', '--print_plots', action='store_true', \
help="Flag to save the plots as individual pdf files" )
parser.add_argument( '--cut_number_trackpoints', type=int, default=0, \
help="Specify the minumum number of trackpoints required per track" )
parser.add_argument( '--cut_p_value', type=float, default=0.0, \
help="Specify the P-Value below which tracks are removed from the analysis" )
parser.add_argument( '--track_algorithm', type=int, default=1, \
help="Specify the track reconstruction algorithm. "+\
"1 for Helical Tracks and 0 for Straight Tracks" )
parser.add_argument( '--ensemble_size', type=int, default=2000, \
help="Specify the size of the ensemble of particles "+\
"to consider per emittance measurement." )
parser.add_argument( '--pz_bin', type=float, default=PZ_BIN_WIDTH, \
help="Specify the size of the Pz bins which are used to select "+\
"particles for the reconstruction of optical functions." )
parser.add_argument( '--pz_window', type=float, nargs=2, \
default=[PZ_MIN, PZ_MAX], help="Specify the range of Pz to consider "+\
"for the reconstruction of optical functions." )
parser.add_argument( '--pt_bin', type=float, default=PT_BIN_WIDTH, \
help="Specify the size of the Pt bins which are used to select "+\
"particles for the reconstruction of optical functions." )
parser.add_argument( '--pt_window', type=float, nargs=2, \
default=[PT_MIN, PT_MAX], help="Specify the range of Pt to consider "+\
"for the reconstruction of optical functions." )
parser.add_argument( '--trackers', type=int, default=RECON_TRACKERS, \
nargs='+', help="Specifies the trackers to analyse" )
parser.add_argument( '--p_window', type=float, nargs=2, \
default=[P_MIN, P_MAX], help="Specify the range of the total " + \
"momentum to consider for analysis." )
parser.add_argument( '--max_gradient', type=float, default=MAX_GRADIENT, \
help='Specify the maximum gradient to analyse.' + \
' This eliminates non-physical muons' )
parser.add_argument( '-C', '--save_corrections', action='store_true', \
help="Flag to create the correction matrix files" )
parser.add_argument( '--selection_file', default=None, \
help='Name of a JSON file containing the events to analyses' )
parser.add_argument( '--not_require_all_planes', action="store_true", \
help="Don't require all the virtual planes to be located" )
parser.add_argument( '--not_require_cluster', action="store_true", \
help="Don't require a cluster in the reference plane" )
# parser.add_argument( '-C', '--configuration_file', help='Configuration '+\
# 'file for the reconstruction. I need the geometry information' )
try :
namespace = parser.parse_args()
EXPECTED_HELIX_TRACKPOINTS = namespace.cut_number_trackpoints
EXPECTED_STRAIGHT_TRACKPOINTS = namespace.cut_number_trackpoints
P_VALUE_CUT = namespace.cut_p_value
TRACK_ALGORITHM = namespace.track_algorithm
ENSEMBLE_SIZE = namespace.ensemble_size
if namespace.not_require_cluster :
REQUIRE_DATA = False
if namespace.not_require_all_planes :
REQUIRE_ALL_PLANES = False
RECON_TRACKERS = namespace.trackers
P_MIN = namespace.p_window[0]
P_MAX = namespace.p_window[1]
MAX_GRADIENT = namespace.max_gradient
PZ_MIN = namespace.pz_window[0]
PZ_MAX = namespace.pz_window[1]
PZ_BIN_WIDTH = namespace.pz_bin
PT_MIN = namespace.pt_window[0]
PT_MAX = namespace.pt_window[1]
PT_BIN_WIDTH = namespace.pt_bin
if namespace.selection_file is not None :
SELECT_EVENTS = True
with open(namespace.selection_file, 'r') as infile :
GOOD_EVENTS = json.load(infile)
else :
SELECT_EVENTS = False
if namespace.virtual_plane_dictionary is not None :
VIRTUAL_PLANE_DICT = analysis.tools.load_virtual_plane_dict( \
namespace.virtual_plane_dictionary )
except BaseException as ex:
raise
else :
##### 1. Load MAUS globals and geometry. - NOT NECESSARY AT PRESENT
# geom = load_tracker_geometry(namespace.configuration_file)
##### 2. Intialise plots ######################################################
print
sys.stdout.write( "\n- Initialising Plots : Running\r" )
sys.stdout.flush()
plot_dict, data_dict = init_plots_data()
sys.stdout.write( "- Initialising Plots : Done \n" )
file_reader = event_loader.maus_reader(namespace.maus_root_files)
file_reader.set_max_num_events(1000)
##### 3. Initialise Plane Dictionary ##########################################
if VIRTUAL_PLANE_DICT is None :
sys.stdout.write( "\n- Finding Virtual Planes : Running\r" )
sys.stdout.flush()
virtual_plane_dictionary = create_virtual_plane_dict(file_reader)
VIRTUAL_PLANE_DICT = virtual_plane_dictionary
sys.stdout.write( "- Finding Virtual Planes : Done \n" )
INVERSE_PLANE_DICT = inverse_virtual_plane_dict(VIRTUAL_PLANE_DICT)
file_reader.select_events(GOOD_EVENTS)
file_reader.set_max_num_events(namespace.max_num_events)
file_reader.set_print_progress('spill')
##### 4. Load Events ##########################################################
print "\n- Loading Spills...\n"
try :
while file_reader.next_selected_event() :
try :
tofevent = file_reader.get_event( 'tof' )
scifi_event = file_reader.get_event( 'scifi' )
mc_event = file_reader.get_event( 'mc' )
##### 5. Extract tracks and Fill Plots ########################################
paired_hits, seed_pairs = make_scifi_mc_pairs(plot_dict, data_dict, \
VIRTUAL_PLANE_DICT, scifi_event, mc_event, \
tofevent)
fill_plots(plot_dict, data_dict, paired_hits)
fill_plots_seeds(plot_dict, data_dict, seed_pairs)
except ValueError as ex :
print "An Error Occured: " + str(ex)
print "Skipping Event: " +\
str(file_reader.get_current_event_number()) + " In Spill: " + \
str(file_reader.get_current_spill_number()) + " In File: " + \
str(file_reader.get_current_filenumber()) + "\n"
continue
except KeyboardInterrupt :
print
print " ### Keyboard Interrupt ###"
print
print "- {0:0.0f} Spills Loaded ".format( \
file_reader.get_total_num_spills())
calculate_efficiency(plot_dict)
##### 6. Analysing Plots ######################################################
print"\n- Analysing Data...\n"
analyse_plots(plot_dict, data_dict)
##### 7. Saving Plots and Data ################################################
sys.stdout.write( "\n- Saving Plots and Data : Running\r" )
sys.stdout.flush()
# save_pretty(plot_dict, namespace.output_directory )
# save_plots(plot_dict, namespace.output_directory, \
# namespace.output_filename, namespace.print_plots)
filename = os.path.join(namespace.output_directory, \
namespace.output_filename)
analysis.tools.save_plots(plot_dict, filename+'.root')
if namespace.save_corrections :
UP_CORRECTION.save_full_correction(filename+'_up_correction.txt')
DOWN_CORRECTION.save_full_correction(filename+'_down_correction.txt')
UP_CORRECTION.save_R_matrix(filename+'_up_correction-R.txt')
UP_CORRECTION.save_C_matrix(filename+'_up_correction-C.txt')
DOWN_CORRECTION.save_R_matrix(filename+'_down_correction-R.txt')
DOWN_CORRECTION.save_C_matrix(filename+'_down_correction-C.txt')
sys.stdout.write( "- Saving Plots and Data : Done \n" )
print
print "Complete."
print
|
[
"john.nugent@glasgow.ac.uk"
] |
john.nugent@glasgow.ac.uk
|
b4f2d03ca0e8a1c1d98f717ae376a1a058ea197c
|
0ce6774ce9ac8c05bd3a07e34244afb67cf1967b
|
/scripts/dict_chars_ponykapi/chars.py
|
c445366447715270f4056f86c835c7c7d9c22ec9
|
[
"WTFPL"
] |
permissive
|
Shadybloom/amber-in-the-dark
|
ecb38b6883f167569239410117f6c1fc4eea80b3
|
35925fd31ad749d37542372986e9a140b453d8aa
|
refs/heads/master
| 2022-11-25T22:24:34.545442
| 2022-11-03T01:12:49
| 2022-11-03T01:12:49
| 96,470,745
| 0
| 2
|
WTFPL
| 2018-06-28T10:39:56
| 2017-07-06T20:48:02
|
Python
|
UTF-8
|
Python
| false
| false
| 15,295
|
py
|
# Карточки персонажей в стиле Fallout 2.
##----
# Герои
metadict_army['Персонажи'] = {
'Джина':1,
'Квинта':1,
}
metadict_army['Джина'] = {
# Джина-алугви
'Trait (Jinxed)':1,
'Trait (Small Frame)':1,
'Джина (параметры)':1,
'Джина (жизнь в Кладже)':1,
}
metadict_army['Квинта'] = {
# Квинта Сидни
'Trait (Sex Appeal)':1,
'Trait (Good Natured)':1,
'Квинта (параметры)':1,
'Квинта (жизнь в Кладже)':1,
}
metadict_army['Кумо'] = {
# Кумо-керети
'Trait (Gifted)':1,
'Кумо (параметры)':1,
'Кумо (жизнь в Кладже)':1,
}
metadict_army['Тето'] = {
# Тето-арройо
'Trait (Kamikaze)':1,
'Trait (Fast Shot)':1,
'Тето (параметры)':1,
'Тето (жизнь в Кладже)':1,
}
metadict_army['Арики'] = {
# Арики-арройо
'Trait (Skilled)':1,
'Арики (параметры)':1,
'Арики (жизнь в Кладже)':1,
}
##----
# Характеристики, параметры:
metadict_army['Джина (параметры)'] = {
'Базовые способности':1,
'Strenght (сила)':6,
'Perception (восприятие)':7,
'Endurance (выносливость)':6,
'Charisma (харизма)':3,
'Intelligence (интеллект)':7,
'Agility (ловкость)':9,
'Luck (удача)':2,
}
metadict_army['Квинта (параметры)'] = {
'Базовые способности':1,
'Strenght (сила)':6,
'Perception (восприятие)':4,
'Endurance (выносливость)':5,
'Charisma (харизма)':9,
'Intelligence (интеллект)':5,
'Agility (ловкость)':7,
'Luck (удача)':4,
}
metadict_army['Кумо (параметры)'] = {
'Базовые способности':1,
'Strenght (сила)':4,
'Perception (восприятие)':7,
'Endurance (выносливость)':4,
'Charisma (харизма)':5,
'Intelligence (интеллект)':8,
'Agility (ловкость)':7,
'Luck (удача)':5,
}
metadict_army['Тето (параметры)'] = {
'Базовые способности':1,
'Strenght (сила)':7,
'Perception (восприятие)':5,
'Endurance (выносливость)':7,
'Charisma (харизма)':4,
'Intelligence (интеллект)':5,
'Agility (ловкость)':7,
'Luck (удача)':5,
}
metadict_army['Арики (параметры)'] = {
'Базовые способности':1,
'Strenght (сила)':5,
'Perception (восприятие)':5,
'Endurance (выносливость)':5,
'Charisma (харизма)':5,
'Intelligence (интеллект)':10,
'Agility (ловкость)':5,
'Luck (удача)':5,
}
##----
# Характеристики --> навыки:
metadict_army['Базовые способности'] = {
'Action Points':5,
'Hit Points (base)':15,
'Melee Damage':-5,
'Skill (Small Guns)':5,
'Skill (Unarmed)':30,
'Skill (Melee Weapons)':20,
'Skill (Doctor)':5,
'Skill (Sneak)':5,
'Skill (Lockpick)':10,
'Skill (Traps)':10,
'Skill (Science)':10,
}
metadict_army['Strenght (сила)'] = {
# hp: 15 + Strenght + (2 * Endurance)
# Unarmed
'-Strenght':1,
'Hit Points (base)':1,
# Жеребята маленькие, 15 футов/ед.силы
#'--Weight-max (lbs)':25,
'--Weight-max (lbs)':15,
'Melee Damage':1,
'Skill (Unarmed)':2,
'Skill (Melee Weapons)':2,
}
metadict_army['Perception (восприятие)'] = {
# https://fallout.fandom.com/wiki/Sequence
'Sequence':2,
'-Perception':1,
'Skill (First Aid)':2,
'Skill (Doctor)':1,
'Skill (Lockpick)':1,
'Skill (Traps)':1,
'Skill (Pilot)':2,
}
metadict_army['Endurance (выносливость)'] = {
'-Endurance':1,
'Hit Points (base)':2,
'Skill (Outdoorsman)':2,
}
metadict_army['Charisma (харизма)'] = {
'-Charisma':1,
'Skill (Speech)':5,
'Skill (Barter)':4,
}
metadict_army['Intelligence (интеллект)'] = {
'-Intelligence':1,
'Skill (First Aid)':2,
'Skill (Doctor)':1,
'Skill (Science)':2,
'Skill (Repair)':3,
'Skill (Outdoorsman)':2,
}
metadict_army['Agility (ловкость)'] = {
'-Agility':1,
'Action Points':1/2,
'Skill (Small Guns)':4,
'Skill (Big Guns)':2,
'Skill (Energy Weapons)':2,
'Skill (Unarmed)':2,
'Skill (Melee Weapons)':2,
'Skill (Throwing)':4,
'Skill (Sneak)':3,
'Skill (Lockpick)':1,
'Skill (Steal)':3,
'Skill (Traps)':1,
'Skill (Pilot)':2,
}
metadict_army['Luck (удача)'] = {
'-Luck':1,
'Critical Chance (%)':1,
'Skill (Gambling)':5,
}
##----
# Traits
metadict_army['Trait (Heavy Handed)'] = {
# https://fallout.fandom.com/wiki/Fallout_2_traits
'Melee Damage':+4,
'-Trait (Heavy Handed)':1,
}
metadict_army['Trait (Jinxed)'] = {
# https://fallout.fandom.com/wiki/Jinxed
'-Trait (Jinxed)':1,
}
metadict_army['Trait (Small Frame)'] = {
'-Trait (Small Frame)':1,
'Agility (ловкость)':+1,
}
metadict_army['Trait (Fast Shot)'] = {
# All throwing and firearm attacks cost 1 less AP
# Cannot aim attacks
'-Trait (Fast Shot)':1,
}
metadict_army['Trait (Kamikaze)'] = {
'Sequence':+5,
'-Trait (Kamikaze)':1,
}
metadict_army['Trait (Sex Appeal)'] = {
'-Trait (Sex Appeal)':1,
}
metadict_army['Trait (Gifted)'] = {
# https://fallout.fandom.com/wiki/Gifted
# +1 to all seven stats
# -10% to all skills
# 5 less skill Points per level
'-Trait (Gifted)':1,
'Strenght (сила)':+1,
'Perception (восприятие)':+1,
'Endurance (выносливость)':+1,
'Charisma (харизма)':+1,
'Intelligence (интеллект)':+1,
'Agility (ловкость)':+1,
'Luck (удача)':+1,
# -10% to all skills
'Skill (Unarmed)':-10,
'Skill (Melee Weapons)':-10,
'Skill (First Aid)':-10,
'Skill (Doctor)':-10,
'Skill (Lockpick)':-10,
'Skill (Traps)':-10,
'Skill (Outdoorsman)':-10,
'Skill (Speech)':-10,
'Skill (Barter)':-10,
'Skill (First Aid)':-10,
'Skill (Doctor)':-10,
'Skill (Science)':-10,
'Skill (Repair)':-10,
'Skill (Outdoorsman)':-10,
'Skill (Small Guns)':-10,
'Skill (Big Guns)':-10,
'Skill (Energy Weapons)':-10,
'Skill (Unarmed)':-10,
'Skill (Melee Weapons)':-10,
'Skill (Throwing)':-10,
'Skill (Sneak)':-10,
'Skill (Lockpick)':-10,
'Skill (Steal)':-10,
'Skill (Traps)':-10,
'Skill (Gambling)':-10,
}
metadict_army['Trait (Skilled)'] = {
# +5 skill Points per level
# +1 Perk rate
'-Trait (Skilled)':1,
}
metadict_army['Trait (Good Natured)'] = {
# +15% to First Aid, Doctor, Speech, and Barter
'-Trait (Good Natured)':1,
'Skill (First Aid)':+15,
'Skill (Doctor)':+15,
'Skill (Speech)':+15,
'Skill (Barter)':+15,
# -10% to Big Guns, Small Guns, Energy Weapons, Throwing, Melee Weapons, and Unarmed
'Skill (Big Guns)':-10,
'Skill (Small Guns)':-10,
'Skill (Energy Weapons)':-10,
'Skill (Throwing)':-10,
'Skill (Melee Weapons)':-10,
'Skill (Unarmed)':-10,
}
metadict_army['Trait (Small Frame)'] = {
# +1 Agility
# Carry Weight = 25 + (15 x your Strength)
'Agility (ловкость)':+1,
}
##----
# Perks
metadict_army['Perk (Healer)'] = {
# PE 7, IN 5, AG 6, First Aid 40%
# 4-10 more hit points healed when using First Aid or Doctor skills
'-Perk (Healer)':1,
}
metadict_army['Perk (Kama Sutra Master)'] = {
'-Perk (Kama Sutra Master)':1,
}
metadict_army['Perk (Thief)'] = {
# +10% to skills: Sneak, Lockpick, Steal and Traps
'Skill (Sneak)':+10,
'Skill (Lockpick)':+10,
'Skill (Steal)':+10,
'Skill (Traps)':+10,
'-Perk (Thief)':1,
}
##----
# Упрощения:
metadict_army['Hit Points (base)'] = {
'--HP':1,
}
metadict_army['Hit Points (level)'] = {
'--HP':1,
}
##----
# Бонусы предметов:
metadict_army['Book (First Aid Book)'] = {
# https://fallout.fandom.com/wiki/Fallout_2_skill_books
# In Fallout 2, the amount of skill Points gained is equal to 100,
# subtract the current skill level, divide by 10, and then rounded down.
# Thus, the maximum a skill can increased by books is up to 91%.
'|Book (First Aid Book)':1,
'Skill (First Aid)':10,
'--Weight':2,
}
metadict_army['Book (Big Book of Science)'] = {
'|Book (Big Book of Science)':1,
'Skill (Science)':10,
'--Weight':5,
}
metadict_army['Book (Dean\'s Electronics)'] = {
'|Book (Dean\'s Electronics)':1,
'Skill (Repair)':10,
'--Weight':2,
}
metadict_army['Book (Scout Handbook)'] = {
'|Book (Scout Handbook)':1,
'Skill (Outdoorsman)':10,
'--Weight':3,
}
metadict_army['Book (Guns and Bullets)'] = {
'|Book (Guns and Bullets)':1,
'Skill (Small Guns)':10,
'--Weight':3,
}
##----
# Бонусы предметов:
metadict_army['Item (First Aid Kit)'] = {
'|Item (First Aid Kit) (usage)':3,
'Skill (First Aid)':20,
}
metadict_army['Item (Doctor\'s bag)'] = {
'|Item (Doctor\'s bag) (usage)':3,
'Skill (Doctor)':20,
}
metadict_army['Item (Tool)'] = {
'|Item (Tool)':1,
'Skill (Repair)':20,
}
metadict_army['Item (Lock picks)'] = {
'|Item (Lock picks)':1,
'Skill (Lockpick)':20,
}
metadict_army['Item (Motion sensor)'] = {
'|Item (Motion sensor)':1,
'Skill (Outdoorsman)':20,
}
metadict_army['Item (Assault rifle)'] = {
'|Item (Assault rifle)':1,
'--Weight':7,
}
metadict_army['Item (10mm SMG)'] = {
'|Item (10mm SMG)':1,
'--Weight':5,
}
metadict_army['Item (Laser rifle)'] = {
'|Item (Laser rifle)':1,
'--Weight':7,
}
metadict_army['Item (Light support weapon)'] = {
'|Item (Light support weapon)':1,
'--Weight':20,
}
metadict_army['Item (Combat armor)'] = {
# https://fallout.fandom.com/wiki/Combat_armor_(Fallout)
# https://fallout.fandom.com/wiki/Armor_Class
'|Item (Combat armor)':1,
'--Weight':20,
}
metadict_army['Item (Spectacles)'] = {
'|Item (Spectacles)':1,
# Очки Квинты
}
metadict_army['Item (Camera)'] = {
'|Item (Camera)':1,
# Фотик Джин
}
##----
# Развитие персонажей:
# https://fallout.fandom.com/wiki/Level
##----
# Джина-алугви:
metadict_army['Джина (жизнь в Кладже)'] = {
'Джина lvl 1':1,
'Book (First Aid Book)':1,
'Item (First Aid Kit)':1,
'Item (Doctor\'s bag)':1,
'Item (10mm SMG)':1,
'Item (Camera)':1,
}
##----
#
metadict_army['Джина lvl 1'] = {
# Skill Points: 5 + INT x 2
# Hit Points: 2 + END / 2
#'Hit Points (level)':2 + 6/2,
#'Skill Points':5 + 7*2,
'Skill (Doctor)':20,
'Skill (Sneak)':20,
'Skill (Small Guns)':20,
}
##----
# Квинта Сидни
metadict_army['Квинта (жизнь в Кладже)'] = {
# https://fallout.fandom.com/wiki/Pilot
'Квинта lvl 1':1,
'Book (First Aid Book)':1,
'Item (First Aid Kit)':1,
'Item (Spectacles)':1,
}
##----
#
metadict_army['Квинта lvl 1'] = {
#'Hit Points (level)':2 + 5/2,
#'Skill Points':5 + 5*2,
'Skill (Speech)':20,
'Skill (Barter)':20,
'Skill (Pilot)':20,
}
##----
# Кумо-керети
metadict_army['Кумо (жизнь в Кладже)'] = {
# Помогал Ниру с радиостанцией.
'Кумо lvl 1':1,
'Book (Scout Handbook)':1,
'Book (Guns and Bullets)':1,
'Item (Assault rifle)':1,
'Item (Lock picks)':1,
}
##----
#
metadict_army['Кумо lvl 1'] = {
#'Hit Points (level)':2 + 5/2,
#'Skill Points':9*2,
'Skill (Outdoorsman)':20,
'Skill (Lockpick)':20,
'Skill (Small Guns)':20,
}
##----
# Тето-арройо
metadict_army['Тето (жизнь в Кладже)'] = {
'Тето lvl 1':1,
'Item (Combat armor)':1,
'Item (Light support weapon)':1,
}
##----
#
metadict_army['Тето lvl 1'] = {
# https://fallout.fandom.com/wiki/Unarmed#Fallout_2_and_Fallout_Tactics
# Крутые атаки даются только на 5-6 уровне, но пофиг, Джин больно пинается:
# Hammer Punch
# Unarmed 75%, Agility 6, Strength 5, level 6
# +5 DMG, +5% crit
# Jab
# Unarmed 75%, Agility 7, Strength 5, level 5
# +3 DMG, +10% crit
# Snap Kick
# Unarmed 60%, Agility 6, Level 6
# +7 DMG
# Hip Kick
# Unarmed 60%, Agility 7, Strength 6, Level 6
# +7 DMG
#'Hit Points (level)':2 + 7/2,
#'Skill Points':5 + 5*2,
'Skill (Unarmed)':20,
'Skill (Throwing)':20,
'Skill (Big Guns)':20,
}
##----
# Арики-арройо
metadict_army['Арики (жизнь в Кладже)'] = {
'Арики lvl 1':1,
'Book (Big Book of Science)':1,
'Item (Laser rifle)':1,
'Item (Tool)':1,
}
##----
#
metadict_army['Арики lvl 1'] = {
#'Hit Points (level)':2 + 5/2,
#'Skill Points':10 + 10*2,
'Skill (Science)':20,
'Skill (Repair)':20,
'Skill (Energy Weapons)':20,
}
|
[
"celestia@safe-mail.net"
] |
celestia@safe-mail.net
|
5006a9e571a17c043f30217b05bb0a9f2eff2cd5
|
8afb5afd38548c631f6f9536846039ef6cb297b9
|
/MY_REPOS/INTERVIEW-PREP-COMPLETE/Leetcode/189.py
|
82da4095255e1cbfd69a405b3eb408b4b5321f61
|
[
"MIT"
] |
permissive
|
bgoonz/UsefulResourceRepo2.0
|
d87588ffd668bb498f7787b896cc7b20d83ce0ad
|
2cb4b45dd14a230aa0e800042e893f8dfb23beda
|
refs/heads/master
| 2023-03-17T01:22:05.254751
| 2022-08-11T03:18:22
| 2022-08-11T03:18:22
| 382,628,698
| 10
| 12
|
MIT
| 2022-10-10T14:13:54
| 2021-07-03T13:58:52
| null |
UTF-8
|
Python
| false
| false
| 361
|
py
|
class Solution:
def rotate(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: void Do not return anything, modify nums in-place instead.
"""
if not nums:
return
n = len(nums)
k = k % n
nums[:] = nums[-k:] + nums[:-k]
# nums[:] = nums[n-k:] + nums[:n-k]
|
[
"bryan.guner@gmail.com"
] |
bryan.guner@gmail.com
|
610ee9aef21da8a8601ead05f788e0a1a93d62da
|
57d2af4d831f396959950fb1251a1d6d443abdce
|
/pywxgrideditmixin.py
|
5bacabd38e876ef41a118a9ce465d32fbf74e444
|
[
"Apache-2.0"
] |
permissive
|
MA0R/Swerlein-Algorithm
|
b9e8227bb9075a7cc84bb628fffb356f490a8c82
|
7c642ef137e772ac74b94cd1f23059ea5b132c12
|
refs/heads/master
| 2021-01-19T13:41:50.024494
| 2017-05-10T21:36:19
| 2017-05-10T21:36:19
| 88,101,478
| 1
| 1
| null | 2017-09-23T10:30:55
| 2017-04-12T22:24:43
|
Python
|
UTF-8
|
Python
| false
| false
| 10,892
|
py
|
import wx
import wx.grid
"""
Mixin for wx.grid to implement cut/copy/paste and undo/redo.
Handlers are in the method Key below. Other handlers (e.g., menu, toolbar) should call the functions in OnMixinKeypress.
https://github.com/wleepang/MMFitter/blob/master/pywxgrideditmixin.py
"""
class PyWXGridEditMixin():
""" A Copy/Paste and undo/redo mixin for wx.grid. Undo/redo is per-table, not yet global."""
def __init_mixin__(self):
"""caller must invoke this method to enable keystrokes, or call these handlers if they are overridden."""
wx.EVT_KEY_DOWN(self, self.OnMixinKeypress)
wx.grid.EVT_GRID_CELL_CHANGE(self, self.Mixin_OnCellChange)
wx.grid.EVT_GRID_EDITOR_SHOWN(self, self.Mixin_OnCellEditor)
self._undoStack = []
self._redoStack = []
self._stackPtr = 0
def OnMixinKeypress(self, event):
"""Keystroke handler."""
key = event.GetKeyCode()
if key == ord(" ") and event.ShiftDown and not event.ControlDown:
self.SelectRow(self.GetGridCursorRow())
return
if not event.ControlDown: return
if key == 67: self.Copy()
elif key == 86: self.OnPaste()
elif key == ord("X"): self.OnCut()
elif key == wx.WXK_DELETE: self.Delete()
elif key == ord("Z"): self.Undo()
elif key == ord("Y"): self.Redo()
elif key == ord(" "): self.SelectCol(self.GetGridCursorCol())
elif key: event.Skip()
def Mixin_OnCellEditor(self, evt=None):
"""this method saves the value of cell before it's edited (when that value disappears)"""
top, left, rows, cols = self.GetSelectionBox()[0]
v = self.GetCellValue(top, left)
self._editOldValue = v+"\n"
def Mixin_OnCellChange(self, evt):
"""Undo/redo handler Use saved value from above for undo."""
box = self.GetSelectionBox()[0]
newValue = self.GetCellValue(*box[:2])
self.AddUndo(undo=(self.Paste, (box, self._editOldValue)),
redo=(self.Paste, (box, newValue)))
self._editOldValue = None
def GetSelectionBox(self):
"""Produce a set of selection boxes of the form (top, left, nrows, ncols)"""
#For wxGrid, blocks, cells, rows and cols all have different selection notations.
#This captures them all into a single "box" tuple (top, left, rows, cols)
gridRows = self.GetNumberRows()
gridCols = self.GetNumberCols()
tl, br = self.GetSelectionBlockTopLeft(), self.GetSelectionBlockBottomRight()
# need to reorder based on what should get copy/pasted first
boxes = []
# collect top, left, rows, cols in boxes for each selection
for blk in range(len(tl)):
boxes.append((tl[blk][0], tl[blk][1], br[blk][0] - tl[blk][0]+1, br[blk][1]-tl[blk][1]+1))
for row in self.GetSelectedRows():
boxes.append((row, 0, 1, gridCols))
for col in self.GetSelectedCols():
boxes.append((0, col, gridRows, 1))
# if not selecting rows, cols, or blocks, add the current cursor (this is not picked up in GetSelectedCells
if len(boxes) ==0:
boxes.append((self.GetGridCursorRow(), self.GetGridCursorCol(), 1, 1))
for (top, left) in self.GetSelectedCells():
boxes.append((top, left, 1, 1)) # single cells are 1x1 rowsxcols.
return boxes
def Copy(self):
"""Copy selected range into clipboard. If more than one range is selected at a time, only the first is copied"""
top, left, rows,cols = self.GetSelectionBox()[0]
data = self.Box2String(top, left, rows, cols, True, True)
# Create text data object for use by TheClipboard
clipboard = wx.TextDataObject()
clipboard.SetText(data)
# Put the data in the clipboard
if wx.TheClipboard.Open():
wx.TheClipboard.SetData(clipboard)
wx.TheClipboard.Close()
else:
print "Can't open the clipboard"
def Box2String(self, top, left, rows, cols, getRowLabels=False, getColLabels=False):
"""Return values in a selected cell range as a string. This is used to pass text to clipboard."""
data = '' # collect strings in grid for clipboard
# Tabs '\t' separate cols and '\n' separate rows
# retrieve the row and column labels
# WLP: added options to retrieve row and column labels
if getColLabels:
colLabels = [self.GetColLabelValue(c) for c in range(left, left+cols)]
colLabels = str.join('\t', colLabels) + '\n'
if getRowLabels:
colLabels = '\t' + colLabels
data += colLabels
for r in range(top, top+rows):
rowAsString = [str(self.GetCellValue(r, c)) for c in range(left, left+cols) if self.CellInGrid(r,c)]
rowAsString = str.join('\t', rowAsString) + '\n'
if getRowLabels:
rowAsString = self.GetRowLabelValue(r) + '\t' + rowAsString
data += rowAsString
return data
def OnPaste(self):
"""Event handler to paste from clipboard into grid. Data assumed to be separated by tab (columns) and "\n" (rows)."""
clipboard = wx.TextDataObject()
if wx.TheClipboard.Open():
wx.TheClipboard.GetData(clipboard)
wx.TheClipboard.Close()
else:
print "Can't open the clipboard"
data = clipboard.GetText()
table = [r.split('\t') for r in data.splitlines()] # convert to array
#Determine the paste area given the size of the data in the clipboard (clipBox) and the current selection (selBox)
top, left, selRows,selCols = self.GetSelectionBox()[0]
if len(table) ==0 or type(table[0]) is not list: table = [table]
pBox = self._DeterminePasteArea(top, left, len(table), len(table[0]), selRows, selCols)
self.AddUndo(undo=(self.Paste, (pBox, self.Box2String(*pBox))),
redo=(self.Paste, (pBox, data)))
self.Paste(pBox, data)
def _DeterminePasteArea(self, top, left, clipRows, clipCols, selRows, selCols):
"""paste area rules: if 1-d selection (either directon separately) and 2-d clipboard, use clipboard size, otherwise use selection size"""
pRows = selRows ==1 and clipRows > 1 and clipRows or selRows
pCols = selCols ==1 and clipCols > 1 and clipCols or selCols
return top, left, pRows, pCols
if clipRows ==1 and clipCols ==1: # constrain paste range by what's in clipboard
pRows, pCols = clipRows, clipCols
else: # constrain paste range by current selection
pRows, pCols = selRows, selCols
return top, left, pRows, pCols # the actual area we'll paste into
def Paste(self, box, dataString):
top, left, rows, cols = box
data = [r.split('\t') for r in dataString.splitlines()]
if len(data) ==0 or type(data[0]) is not list: data = [data]
# get sizes (rows, cols) of both clipboard and current selection
dataRows, dataCols = len(data), len(data[0])
for r in range(rows):
row = top + r
for c in range(cols):
col = left + c
if self.CellInGrid(row, col): self.SetCellValue(row, col, data[r %dataRows][c % dataCols])
return
def CellInGrid(self, r, c): # only paste data that actually falls on the table
return r >=0 and c >=0 and r < self.GetNumberRows() and c < self.GetNumberCols()
def OnCut(self):
"""Cut cells from grid into clipboard"""
box = self.GetSelectionBox()[0]
self.Copy()
self.Delete() #this takes care of undo/redo
def Delete(self):
"""Clear Cell contents"""
boxes = self.GetSelectionBox()
for box in boxes: #allow multiple selection areas to be deleted
# first, save data in undo stack
self.AddUndo(undo=(self.Paste, (box, self.Box2String(*box))),
redo=(self.Paste, (box, "\n")))
self.Paste(box, "\n")
def AddUndo(self, undo, redo):
"""Add an undo/redo combination to the respective stack"""
(meth, parms) = undo
#print self._stackPtr, "set undo: ",parms, "redo=",redo[1]
self._undoStack.append((meth, parms))
(meth, parms) = redo
self._redoStack.append((meth, parms))
self._stackPtr+= 1
# remove past undos beyond the current one.
self._undoStack = self._undoStack[:self._stackPtr]
self._redoStack = self._redoStack[:self._stackPtr]
def Undo(self, evt = None):
if self._stackPtr > 0:
self._stackPtr -= 1
(funct, params) = self._undoStack[self._stackPtr]
#print "UNdoing:"+`self._stackPtr`+"=",`params[0]`
funct(*params)
# set cursor at loc asd selection if block
top, left, rows, cols = params[0]
self.SelectBlock(top, left, top+rows-1, left+cols-1)
self.SetGridCursor(top,left)
def Redo(self, evt = None):
if self._stackPtr < len(self._redoStack):
(funct, params) = self._redoStack[self._stackPtr]
#print "REdoing:"+`self._stackPtr`+"=",`params[0]`
funct(*params)
# set cursor at loc
top, left, rows, cols = params[0]
self.SetGridCursor(top, left)
self.SelectBlock(top, left, top+rows-1, left+cols-1)
self._stackPtr += 1
if __name__ == '__main__':
import sys
app = wx.PySimpleApp()
frame = wx.Frame(None, -1, size=(700,500), title = "wx.Grid example")
grid = wx.grid.Grid(frame)
grid.CreateGrid(20,6)
# To add capability, mix it in, then set key handler, or add call to grid.Key() in your own handler
wx.grid.Grid.__bases__ += (PyWXGridEditMixin,)
grid.__init_mixin__()
grid.SetDefaultColSize(70, 1)
grid.EnableDragGridSize(False)
grid.SetCellValue(0,0,"Col is")
grid.SetCellValue(1,0,"Read Only")
grid.SetCellValue(1,1,"hello")
grid.SetCellValue(2,1,"23")
grid.SetCellValue(4,3,"greren")
grid.SetCellValue(5,3,"geeges")
# make column 1 multiline, autowrap
cattr = wx.grid.GridCellAttr()
cattr.SetEditor(wx.grid.GridCellAutoWrapStringEditor())
#cattr.SetRenderer(wx.grid.GridCellAutoWrapStringRenderer())
grid.SetColAttr(1, cattr)
frame.Show(True)
app.MainLoop()
|
[
"noreply@github.com"
] |
MA0R.noreply@github.com
|
4d6fdaef6df0a43f5897937e3ed223e6025aadcf
|
f58833787321241a23c2ca7a067000af46d55363
|
/.local/bin/launch_instance
|
7c7dbe8049852865f0124f34fc50f63e22df7a30
|
[] |
no_license
|
shwetaguj/assignment_emp
|
2216b1ed0dc0ffb3e3b6d80459be04a20ea32805
|
476c838f1561f5a02c6605e085227ec3b94b342c
|
refs/heads/master
| 2021-05-25T20:20:02.326178
| 2020-04-07T20:29:08
| 2020-04-07T20:29:08
| 253,904,314
| 0
| 1
| null | 2020-07-26T02:10:47
| 2020-04-07T20:27:24
|
Python
|
UTF-8
|
Python
| false
| false
| 10,602
|
#!/usr/bin/python
# Copyright (c) 2009 Chris Moyer http://coredumped.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
#
# Utility to launch an EC2 Instance
#
VERSION="0.2"
CLOUD_INIT_SCRIPT = """#!/usr/bin/env python
f = open("/etc/boto.cfg", "w")
f.write(\"\"\"%s\"\"\")
f.close()
"""
import boto.pyami.config
import boto.utils
import re, os
from boto.compat import ConfigParser
class Config(boto.pyami.config.Config):
"""A special config class that also adds import abilities
Directly in the config file. To have a config file import
another config file, simply use "#import <path>" where <path>
is either a relative path or a full URL to another config
"""
def __init__(self):
ConfigParser.__init__(self, {'working_dir' : '/mnt/pyami', 'debug' : '0'})
def add_config(self, file_url):
"""Add a config file to this configuration
:param file_url: URL for the file to add, or a local path
:type file_url: str
"""
if not re.match("^([a-zA-Z0-9]*:\/\/)(.*)", file_url):
if not file_url.startswith("/"):
file_url = os.path.join(os.getcwd(), file_url)
file_url = "file://%s" % file_url
(base_url, file_name) = file_url.rsplit("/", 1)
base_config = boto.utils.fetch_file(file_url)
base_config.seek(0)
for line in base_config.readlines():
match = re.match("^#import[\s\t]*([^\s^\t]*)[\s\t]*$", line)
if match:
self.add_config("%s/%s" % (base_url, match.group(1)))
base_config.seek(0)
self.readfp(base_config)
def add_creds(self, ec2):
"""Add the credentials to this config if they don't already exist"""
if not self.has_section('Credentials'):
self.add_section('Credentials')
self.set('Credentials', 'aws_access_key_id', ec2.aws_access_key_id)
self.set('Credentials', 'aws_secret_access_key', ec2.aws_secret_access_key)
def __str__(self):
"""Get config as string"""
from StringIO import StringIO
s = StringIO()
self.write(s)
return s.getvalue()
SCRIPTS = []
def scripts_callback(option, opt, value, parser):
arg = value.split(',')
if len(arg) == 1:
SCRIPTS.append(arg[0])
else:
SCRIPTS.extend(arg)
setattr(parser.values, option.dest, SCRIPTS)
def add_script(scr_url):
"""Read a script and any scripts that are added using #import"""
base_url = '/'.join(scr_url.split('/')[:-1]) + '/'
script_raw = boto.utils.fetch_file(scr_url)
script_content = ''
for line in script_raw.readlines():
match = re.match("^#import[\s\t]*([^\s^\t]*)[\s\t]*$", line)
#if there is an import
if match:
#Read the other script and put it in that spot
script_content += add_script("%s/%s" % (base_url, match.group(1)))
else:
#Otherwise, add the line and move on
script_content += line
return script_content
if __name__ == "__main__":
try:
import readline
except ImportError:
pass
import sys
import time
import boto
from boto.ec2 import regions
from optparse import OptionParser
from boto.mashups.iobject import IObject
parser = OptionParser(version=VERSION, usage="%prog [options] config_url")
parser.add_option("-c", "--max-count", help="Maximum number of this type of instance to launch", dest="max_count", default="1")
parser.add_option("--min-count", help="Minimum number of this type of instance to launch", dest="min_count", default="1")
parser.add_option("--cloud-init", help="Indicates that this is an instance that uses 'CloudInit', Ubuntu's cloud bootstrap process. This wraps the config in a shell script command instead of just passing it in directly", dest="cloud_init", default=False, action="store_true")
parser.add_option("-g", "--groups", help="Security Groups to add this instance to", action="append", dest="groups")
parser.add_option("-a", "--ami", help="AMI to launch", dest="ami_id")
parser.add_option("-t", "--type", help="Type of Instance (default m1.small)", dest="type", default="m1.small")
parser.add_option("-k", "--key", help="Keypair", dest="key_name")
parser.add_option("-z", "--zone", help="Zone (default us-east-1a)", dest="zone", default="us-east-1a")
parser.add_option("-r", "--region", help="Region (default us-east-1)", dest="region", default="us-east-1")
parser.add_option("-i", "--ip", help="Elastic IP", dest="elastic_ip")
parser.add_option("-n", "--no-add-cred", help="Don't add a credentials section", default=False, action="store_true", dest="nocred")
parser.add_option("--save-ebs", help="Save the EBS volume on shutdown, instead of deleting it", default=False, action="store_true", dest="save_ebs")
parser.add_option("-w", "--wait", help="Wait until instance is running", default=False, action="store_true", dest="wait")
parser.add_option("-d", "--dns", help="Returns public and private DNS (implicates --wait)", default=False, action="store_true", dest="dns")
parser.add_option("-T", "--tag", help="Set tag", default=None, action="append", dest="tags", metavar="key:value")
parser.add_option("-s", "--scripts", help="Pass in a script or a folder containing scripts to be run when the instance starts up, assumes cloud-init. Specify scripts in a list specified by commas. If multiple scripts are specified, they are run lexically (A good way to ensure they run in the order is to prefix filenames with numbers)", type='string', action="callback", callback=scripts_callback)
parser.add_option("--role", help="IAM Role to use, this implies --no-add-cred", dest="role")
(options, args) = parser.parse_args()
if len(args) < 1:
parser.print_help()
sys.exit(1)
file_url = os.path.expanduser(args[0])
cfg = Config()
cfg.add_config(file_url)
for r in regions():
if r.name == options.region:
region = r
break
else:
print("Region %s not found." % options.region)
sys.exit(1)
ec2 = boto.connect_ec2(region=region)
if not options.nocred and not options.role:
cfg.add_creds(ec2)
iobj = IObject()
if options.ami_id:
ami = ec2.get_image(options.ami_id)
else:
ami_id = options.ami_id
l = [(a, a.id, a.location) for a in ec2.get_all_images()]
ami = iobj.choose_from_list(l, prompt='Choose AMI')
if options.key_name:
key_name = options.key_name
else:
l = [(k, k.name, '') for k in ec2.get_all_key_pairs()]
key_name = iobj.choose_from_list(l, prompt='Choose Keypair').name
if options.groups:
groups = options.groups
else:
groups = []
l = [(g, g.name, g.description) for g in ec2.get_all_security_groups()]
g = iobj.choose_from_list(l, prompt='Choose Primary Security Group')
while g != None:
groups.append(g)
l.remove((g, g.name, g.description))
g = iobj.choose_from_list(l, prompt='Choose Additional Security Group (0 to quit)')
user_data = str(cfg)
# If it's a cloud init AMI,
# then we need to wrap the config in our
# little wrapper shell script
if options.cloud_init:
user_data = CLOUD_INIT_SCRIPT % user_data
scriptuples = []
if options.scripts:
scripts = options.scripts
scriptuples.append(('user_data', user_data))
for scr in scripts:
scr_url = scr
if not re.match("^([a-zA-Z0-9]*:\/\/)(.*)", scr_url):
if not scr_url.startswith("/"):
scr_url = os.path.join(os.getcwd(), scr_url)
try:
newfiles = os.listdir(scr_url)
for f in newfiles:
#put the scripts in the folder in the array such that they run in the correct order
scripts.insert(scripts.index(scr) + 1, scr.split("/")[-1] + "/" + f)
except OSError:
scr_url = "file://%s" % scr_url
try:
scriptuples.append((scr, add_script(scr_url)))
except Exception as e:
pass
user_data = boto.utils.write_mime_multipart(scriptuples, compress=True)
shutdown_proc = "terminate"
if options.save_ebs:
shutdown_proc = "save"
instance_profile_name = None
if options.role:
instance_profile_name = options.role
r = ami.run(min_count=int(options.min_count), max_count=int(options.max_count),
key_name=key_name, user_data=user_data,
security_groups=groups, instance_type=options.type,
placement=options.zone, instance_initiated_shutdown_behavior=shutdown_proc,
instance_profile_name=instance_profile_name)
instance = r.instances[0]
if options.tags:
for tag_pair in options.tags:
name = tag_pair
value = ''
if ':' in tag_pair:
name, value = tag_pair.split(':', 1)
instance.add_tag(name, value)
if options.dns:
options.wait = True
if not options.wait:
sys.exit(0)
while True:
instance.update()
if instance.state == 'running':
break
time.sleep(3)
if options.dns:
print("Public DNS name: %s" % instance.public_dns_name)
print("Private DNS name: %s" % instance.private_dns_name)
|
[
"shwetaguj1989@gmail.com"
] |
shwetaguj1989@gmail.com
|
|
1ce7dea0fd7552ceedae5741ff1131aac9e99da4
|
6cb05a891514514ce94d80992c8eb2e80176c3b9
|
/aiohue/lights.py
|
37c5c26fc659fc21c3e49c2c39e86e0dd1816355
|
[] |
no_license
|
kampfschlaefer/aiohue
|
0b96310146afb5062678398355887dbbfc1cfd09
|
d6326466d3dc4d093c81ee3b4c66b5570c03785f
|
refs/heads/master
| 2020-03-07T15:57:08.509052
| 2018-03-26T20:43:06
| 2018-03-26T20:43:06
| 127,568,759
| 0
| 0
| null | 2018-03-31T20:29:08
| 2018-03-31T20:29:08
| null |
UTF-8
|
Python
| false
| false
| 1,773
|
py
|
from .api import APIItems
class Lights(APIItems):
"""Represents Hue Lights.
https://developers.meethue.com/documentation/lights-api
"""
def __init__(self, raw, request):
super().__init__(raw, request, 'lights', Light)
class Light:
"""Represents a Hue light."""
def __init__(self, id, raw, request):
self.id = id
self.raw = raw
self._request = request
@property
def uniqueid(self):
return self.raw['uniqueid']
@property
def manufacturername(self):
return self.raw['manufacturername']
@property
def name(self):
return self.raw['name']
@property
def state(self):
return self.raw['state']
@property
def type(self):
return self.raw['type']
async def set_state(self, on=None, bri=None, hue=None, sat=None, xy=None,
ct=None, alert=None, effect=None, transitiontime=None,
bri_inc=None, sat_inc=None, hue_inc=None, ct_inc=None,
xy_inc=None):
"""Change state of a light."""
data = {
key: value for key, value in {
'on': on,
'bri': bri,
'hue': hue,
'sat': sat,
'xy': xy,
'ct': ct,
'alert': alert,
'effect': effect,
'transitiontime': transitiontime,
'bri_inc': bri_inc,
'sat_inc': sat_inc,
'hue_inc': hue_inc,
'ct_inc': ct_inc,
'xy_inc': xy_inc,
}.items() if value is not None
}
await self._request('put', 'lights/{}/state'.format(self.id),
json=data)
|
[
"paulus@paulusschoutsen.nl"
] |
paulus@paulusschoutsen.nl
|
c59b42d597c1e95f261ae28f9ba59ed424761e4e
|
7673794038fc0d12588c45ebb8922ab00871949b
|
/ichnaea/api/locate/tests/conftest.py
|
6ff80a2b369e413c4e12b9bc3eb0aa7c04de418a
|
[
"Apache-2.0"
] |
permissive
|
BBOXX/ichnaea
|
a7fb816f5eca6f07391a487ead59a4d4b12585f1
|
15362d5b4d2a45d28cdf4864a89c9d3fa62b8c28
|
refs/heads/bboxx
| 2021-09-13T19:16:29.303460
| 2018-05-03T10:52:34
| 2018-05-03T10:52:34
| 103,156,005
| 1
| 1
|
Apache-2.0
| 2018-05-03T11:08:35
| 2017-09-11T15:50:23
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,493
|
py
|
import pytest
from ichnaea.api.locate.tests.base import DummyModel
@pytest.fixture(scope='class')
def cls_source(request, data_queues, geoip_db, http_session,
raven_client, redis_client, stats_client):
source = request.cls.Source(
geoip_db=geoip_db,
raven_client=raven_client,
redis_client=redis_client,
stats_client=stats_client,
data_queues=data_queues,
)
yield source
@pytest.fixture(scope='function')
def source(cls_source, raven, redis, stats):
yield cls_source
@pytest.fixture(scope='session')
def bhutan_model(geoip_data):
bhutan = geoip_data['Bhutan']
yield DummyModel(
lat=bhutan['latitude'],
lon=bhutan['longitude'],
radius=bhutan['radius'],
code=bhutan['region_code'],
name=bhutan['region_name'],
ip=bhutan['ip'])
@pytest.fixture(scope='session')
def london_model(geoip_data):
london = geoip_data['London']
yield DummyModel(
lat=london['latitude'],
lon=london['longitude'],
radius=london['radius'],
code=london['region_code'],
name=london['region_name'],
ip=london['ip'])
@pytest.fixture(scope='session')
def london2_model(geoip_data):
london = geoip_data['London2']
yield DummyModel(
lat=london['latitude'],
lon=london['longitude'],
radius=london['radius'],
code=london['region_code'],
name=london['region_name'],
ip=london['ip'])
|
[
"hanno@hannosch.eu"
] |
hanno@hannosch.eu
|
3246b5ca210e7f3305d2f91f378ef2974217d3fb
|
65fdf5839d7b017930f223819bbafccc742fe066
|
/human_detection_z5221116/human_detectors.py
|
5d178f4f5b79d73100591cbb261765983f6b1087
|
[] |
no_license
|
denzilsaldanha/comp9517
|
f5a366da8a41eecbb1eb2aa11451fa9695276137
|
bce350fb438d9f3154570155cd1c2aea61a9e784
|
refs/heads/master
| 2022-04-27T10:38:43.752791
| 2020-04-24T14:04:04
| 2020-04-24T14:04:04
| 254,309,655
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,201
|
py
|
import numpy as np
import cv2
class Human_Detectors(object):
# Class to detect objects in a frame
def __init__(self):
self.sub = cv2.createBackgroundSubtractorMOG2()
def Detect(self, frame):
"""
Detects objects in the single video frame with the following steps:
1. Convert frame in gray scale
2. Apply background subtraction
3. Apply some morphology techniques
4. Get contours
5. Get centroid of the contours using cv2.Moments
6. Draw rectangle around the contour.
7. Collect all the center points in a list and return the list
"""
# Convert BGR to GRAY
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#apply background substraction to the grey colored image
fgmask = self.sub.apply(gray)
# initialize a kernel to apply to morphological trnasformation to reduce noise
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
# Closing is reverse of Opening, Dilation followed by Erosion
closing = cv2.morphologyEx(fgmask, cv2.MORPH_CLOSE, kernel)
# Opening is just another name of erosion followed by dilation
opening = cv2.morphologyEx(closing, cv2.MORPH_OPEN, kernel)
# increases the white region in the image or size of foreground object increases
dilation = cv2.dilate(opening, kernel)
# setting all pixel values above 220 to be 255 - shadow removal
retvalbin, bins = cv2.threshold(dilation, 220, 255, cv2.THRESH_BINARY)
_, contours, hierarchy = cv2.findContours(dilation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
minimum_area = 400
maximum_area = 50000
centers = []
# goes through all contours in a single frame
for x in range(len(contours)):
# checks only for the parent contour
if hierarchy[0, x, 3] == -1:
#calculate area for each contour to place the bounding box
contour_area = cv2.contourArea(contours[x])
if minimum_area<contour_area<maximum_area:
#cont_num+=1
cont = contours[x]
# compute the centre of the contour
M = cv2.moments(cont)
cx = int(M['m10'] / M['m00'])
cy = int(M['m01'] / M['m00'])
centroid = (cx,cy)
b = np.array([[cx], [cy]])
centers.append(np.round(b))
# find coordinats of straight bounding rectangle of a contour
x_coord, y_coord, width, height = cv2.boundingRect(cont)
# draw a rectangle around the contour
cv2.rectangle(frame, (x_coord, y_coord), (x_coord + width, y_coord + height), (0, 255, 0), 2)
cv2.putText(frame, str(cx) + "," + str(cy), (cx + 10, cy + 10), cv2.FONT_HERSHEY_SIMPLEX,.3, (0, 0, 255), 1)
cv2.drawMarker(frame, (cx, cy), (0, 255, 255), cv2.MARKER_SQUARE, markerSize=6, thickness=2,line_type=cv2.LINE_8)
return centers
|
[
"noreply@github.com"
] |
denzilsaldanha.noreply@github.com
|
e206ea87f9a2bf71479d06b81a9cc8424ea22e8c
|
3c76e51f90e8875f823fda2795b9041465d3afd6
|
/Douglas_Peucker.py
|
e56cdeafe16c7a33327cf8ccee3bb58dc95c14fb
|
[] |
no_license
|
hansintheair/Douglas_Peucker_Geoprocessing
|
087c6d37e5b88f8a1fef20c59ed16969b2805bae
|
40e21d7a8ca8e60d3573850d80cd631e8586165c
|
refs/heads/master
| 2020-05-01T12:15:46.288607
| 2019-03-28T00:11:31
| 2019-03-28T00:11:31
| 177,461,800
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,364
|
py
|
# --Douglas-Peucker Algorithm--
#
# Program by: Hannes Ziegler
# Date: 12/1/2013 (updated 3/24/2019)
#
# --Description--
# The Douglas-Puecker algorithm takes as input a set of
# x and y coordinates and reduces the number of vertices
# within the given set of points.
#
#-------------------------------------------------------
# -- Import Libraries -- #
import os, math, arcpy
# Set arcpy environment settings
arcpy.env.overwriteOutput = True
# -- Function Definitions -- #
class Line:
def __init__(self, point_a, point_b):
self._xa = float(point_a[0])
self._ya = float(point_a[1])
self._xb = float(point_b[0])
self._yb = float(point_b[1])
def __repr__(self):
return 'Line ({!r}, {!r}), ({!r}, {!r})'.format(self._xa, self._ya, self._xb, self._yb)
def slope(self): # Returns m (slope) of two points (xa, ya) and (xb, yb).
return (self._ya- self._yb)/(self._xa- self._xb)
def distance(self): # Returns d (distance) between two points (xa, ya) and (xb, yb).
return math.hypot(self._xa - self._xb, self._ya - self._yb)
def y_intercept(self): # Returns b (y-intercept) of a line with slope m and point (x, y).
return self._ya - (self.slope()*self._xa)
def slope_reciprocal(self): # Returns the reciprocal of slope, the slope of a line perpendicular to this line.
return -1/self.slope()
def intersect(self, other): #Returns the point where a second line (given as other) intersects this line (given as self)
self_b, other_b, self_m, other_m = self.y_intercept(), other.y_intercept(), self.slope(), other.slope()
point_x = (other_b - self_b) / (self_m - other_m)
point_y = (other_m * point_x) + other_b
return [point_x, point_y]
def line_of_perp_offset(self, offset_point):
self_b, self_m, perp_m = self.y_intercept(), self.slope(), self.slope_reciprocal()
perp_b = offset_point[1] - (perp_m * offset_point[0])
intersect_x = (perp_b - self_b) / (self_m - perp_m)
intersect_y = (perp_m * intersect_x) + perp_b
intersect_point = [intersect_x, intersect_y]
return Line(offset_point, intersect_point)
# perpendicular_distances(point_list): Selects the first and last point on the line as key points,
# finds the perpendicular distances of all points inbetween
# the key points to the line created by the key points,
# and returns the largest perpendicular distance as well as the
# index of the point to which that largest perpendicular distance
# corresponds in the original list. point_list must contain at
# least three points.
def perpendicular_distances(point_list):
#Create Line object from first and last point in point_list.
trunk = Line(point_list[0], point_list[-1])
#Create a list of perpendicular distances to trunk line from all points between first and last points in point list
return [trunk.line_of_perp_offset(offset_point).distance() for offset_point in point_list[1:-1]]
def enumerate_max(values):
max_val = max(values)
i = values.index(max_val)
return i, max_val
# Douglas_Peucker_Algorithm(point_list, tolerance): Implements the Douglas-Puecker Algorithm to reduce the number of vertices
# in a set of points.
def douglas_peucker_algorithm(point_list, tolerance):
point_list_copy = [point_list]
x = 0
while x < len(point_list_copy): #Enter a while loop to iterate over the recursively expanding list of split and reduced lines.
if len(point_list_copy[x]) <= 2: #When a line has been reduced to two points, skip over it and move on to the next line.
pass
else: #Otherwise ->
perp_distances = perpendicular_distances(point_list_copy[x]) #find the perpendicular distances of all points between the first and last point.
i, largest = enumerate_max(perp_distances) #find the largest vertical distance and note it's index in the list.
i+=1
if largest >= float(tolerance): #If the largest distance is longer than the tolerance, split the line at the noted index of largest perpendicular distance.
point_list_copy.insert(x+1, point_list_copy[x][i:])
point_list_copy.insert(x+1, point_list_copy[x][:i+1])
point_list_copy.remove(point_list_copy[x]) # remove the initial list and (previous two statements) add the two split lines in its place.
x-=1
else: #If the largest distance is shorter than the tolerance, remove all the points inbetween the first and last points.
point_list_copy.insert(x+1, [point_list_copy[x][0], point_list_copy[x][-1]])
point_list_copy.remove(point_list_copy[x]) #remove the initial list and place the shortened list in its places.
x+=1
point_list = point_list_copy[0] #Re-format the raw list of points returned by the previous operation into the actual remaining points.
for item in point_list_copy[1:]:
point_list.append(item[-1])
return point_list
# -- Begin Main Body -- #
infc = arcpy.GetParameterAsText(0) #input featureclass (must be a polyline)
infc_path = arcpy.Describe(infc).catalogPath
outpath = arcpy.GetParameterAsText(1) #output featureclass
outdir, outname = os.path.split(outpath)
tolerance = float(arcpy.GetParameterAsText(2)) #set the tolerance which is used to determine how much noise is removed from the line.
outfc = arcpy.management.CreateFeatureclass(outdir, outname) #create output featureclass
arcpy.management.Copy(infc_path, outfc, "Datasets") #copy output features
#arcpy.AddMessage("Tolerance: " + str(tolerance)) ##DEBUG##
with arcpy.da.UpdateCursor(outfc, "SHAPE@") as cursor:
for row in cursor:
for part in row[0]:
line = [[point.X, point.Y] for point in part]
line = douglas_peucker_algorithm(line, tolerance)
array = arcpy.Array()
for xy in line:
array.append(arcpy.Point(xy[0], xy[1]))
polyline = arcpy.Polyline(array)
row[0] = polyline
cursor.updateRow(row)
|
[
"hannesz1@gmail.com"
] |
hannesz1@gmail.com
|
8187122813a07a9a2b003bec6922431e0665d295
|
cc687705b763653a325347315739536ff5fc348a
|
/day06/day06.py
|
4d3d8798062d05b061adbc7957c2ecd6a4008c8a
|
[] |
no_license
|
t-ah/adventofcode-2016
|
0220a2a5f847a756d6816c790aa13e343fc24cc8
|
3c860f23cc36a47b0d9b97f504524bc6d99be954
|
refs/heads/master
| 2021-04-26T23:34:59.445632
| 2016-12-25T08:15:02
| 2016-12-25T08:15:02
| 123,824,083
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 435
|
py
|
#!/usr/bin/env python
from collections import Counter
counters = []
for i in range(8):
counters.append(Counter())
with open("input.txt", "r") as f:
for line in f:
for i in range(8):
counters[i][line[i]] += 1
result, result2 = "", ""
for i in range(8):
result += counters[i].most_common(1)[0][0]
result2 += counters[i].most_common()[:-2:-1][0][0]
print "Part One:", result, "\nPart Two:", result2
|
[
"ta10@tu-clausthal.de"
] |
ta10@tu-clausthal.de
|
2acf11f1e923f7d443ab75e7ae81e857adcfdd51
|
6c35789131e3f934538f4a65970fd8668ca64bc7
|
/app/seeds/users.py
|
00492a70ed7ede2d693f9181537951fe47e57237
|
[] |
no_license
|
AmberJolieH/CollabHERative-React-Flask-Python-Web-App
|
368552f1387d822b9ecd79af9b9e30b54319c9bd
|
8ba5ab13f217dfe59ab38d2618b674c229ed1dc6
|
refs/heads/main
| 2023-04-06T13:27:58.121287
| 2021-04-22T01:15:28
| 2021-04-22T01:15:28
| 343,210,035
| 10
| 0
| null | 2021-04-19T21:03:52
| 2021-02-28T20:41:38
|
Python
|
UTF-8
|
Python
| false
| false
| 3,364
|
py
|
from werkzeug.security import generate_password_hash
from app.models import db, User
# Adds a demo user, you can add other users here if you want
def seed_users():
demo = User(username='Demo', email='demo@aa.io',
password='password', firstname="Demo", lastname="User",
techcategoryid=1, imgurl='https://collabherative.s3.us-east-2.amazonaws.com/demo_user.png')
Amber = User(username='Amberjolie', email='amberjolieh@gmail.com',
password='password', firstname='Amber', lastname='Horn',
techcategoryid=3,
imgurl="https://collabherative.s3.us-east-2.amazonaws.com/ambernew_Profile.png")
Courtney = User(username='CJNewcomer',
email='courtney@test.com', password='password',
firstname='Courtney', lastname='Newcomer',
techcategoryid=3, imgurl="https://collabherative.s3.us-east-2.amazonaws.com/Courtney_Profile.png")
Arianna = User(username='AriannaJ', email='arianna@test.com',
password='password',
firstname='Arianna', lastname='Johnson', techcategoryid=3,
imgurl='https://collabherative.s3.us-east-2.amazonaws.com/Arianna_Profile-3.png')
Nicole = User(username='NicoleL', email='Nicole@test.com',
password='password', firstname='Nicole', lastname='Loescher',
techcategoryid=3, imgurl='https://collabherative.s3.us-east-2.amazonaws.com/Nicole_Profile.png')
Kristen = User(username='KristenF', email='kristen@test.com',
password='password', firstname='Kristen',
lastname='Florey', techcategoryid=1, imgurl='https://collabherative.s3.us-east-2.amazonaws.com/Kristen_Profile-2.png'
)
Zoe = User(username='ZoeD', email='Zoe@test.com',
password='password', firstname='Zoe', lastname='D',
techcategoryid=4, imgurl='https://collabherative.s3.us-east-2.amazonaws.com/Zoe_Profile.png')
Valarie = User(username='ValarieB', email='valarie@test.com',
password='password', firstname='Valarie', lastname='B',
techcategoryid=5, imgurl='https://collabherative.s3.us-east-2.amazonaws.com/Valarie_Profile.png')
Tara = User(username='TaraK', email='tara@test.com', password='password',
firstname='Tara', lastname='K',
techcategoryid=6, imgurl='https://collabherative.s3.us-east-2.amazonaws.com/Tara_Profile.png')
Sarah = User(username='SarahT', email='sarah@test.com', password='password',
firstname='Sarah', lastname='T', techcategoryid='7', imgurl='https://collabherative.s3.us-east-2.amazonaws.com/Sarah_Profile.png')
db.session.add(demo)
db.session.add(Amber)
db.session.add(Courtney)
db.session.add(Arianna)
db.session.add(Nicole)
db.session.add(Kristen)
db.session.add(Zoe)
db.session.add(Valarie)
db.session.add(Tara)
db.session.add(Sarah)
db.session.commit()
# Uses a raw SQL query to TRUNCATE the users table.
# SQLAlchemy doesn't have a built in function to do this
# TRUNCATE Removes all the data from the table, and resets
# the auto incrementing primary key
def undo_users():
db.session.execute('TRUNCATE users RESTART IDENTITY CASCADE;')
db.session.commit()
|
[
"amberjolieh@gmail.com"
] |
amberjolieh@gmail.com
|
e7ef8060788cf4a52389f687dd1b722418b978b0
|
96d53a5a1264487e51a5271298115c2d810298b2
|
/url_open_sssss.py
|
0285e7d472c924fff21a300d3ef38938c81b6116
|
[] |
no_license
|
wangyeon-Lee/Yeon
|
2bac1db96762b6e249d30e694b96aa9aa4d73956
|
4a83bed3d2eb6aa762a1d6b351f119b50d64b7b6
|
refs/heads/master
| 2020-09-23T00:26:17.112200
| 2019-12-23T09:12:41
| 2019-12-23T09:12:41
| 225,353,373
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 342
|
py
|
import requests
from bs4 import BeautifulSoup
url = "https://bp.eosgo.io/"
result = requests.get(url =url)
bs_obj = BeautifulSoup(result.content, "html.parser")
lf_items = bs_obj.findAll("div", {"class":"lf-item"})
print(lf_items)
hrefs = [div.find("a")['href'] for div in lf_items]
print(len(hrefs[0:5]))
print(hrefs[0:5])
|
[
"noreply@github.com"
] |
wangyeon-Lee.noreply@github.com
|
fa50e89a704e23243f033a882d261bf04cd5a1f2
|
cbfc607f93a3e17762ef0bf861d83a1805d7f234
|
/tambahan/config/docs.py
|
79e611b7bd9df370a1fc8395f735039ee81040d7
|
[
"MIT"
] |
permissive
|
zeta17/tambahan
|
ce5c0ff1b04d471209372c6addc3f136791edac0
|
d7b0378f78a889544830e350f3b06f93afea0b4f
|
refs/heads/master
| 2021-01-10T01:46:17.710544
| 2017-11-18T06:38:02
| 2017-11-18T06:38:02
| 53,401,529
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 316
|
py
|
"""
Configuration for docs
"""
# source_link = "https://github.com/[org_name]/tambahan"
# docs_base_url = "https://[org_name].github.io/tambahan"
# headline = "App that does everything"
# sub_heading = "Yes, you got that right the first time, everything"
def get_context(context):
context.brand_html = "Tambahan"
|
[
"hendrik.zeta@gmail.com"
] |
hendrik.zeta@gmail.com
|
9c1824153db15e45e28d51dd4e53c8e05177ac26
|
ebbc2ebe88e90c212e21819a966218fab48a2c5e
|
/simple_lr.py
|
7bd3a1c9e727ad70a498e353245323a5c2bdf6f8
|
[] |
no_license
|
kushal-07/DS281220-ML
|
75bc2b78466b592058a70e474ef002438c918d97
|
842dd86f0363ee2bb149ca7ce80294329f2d73cb
|
refs/heads/main
| 2023-07-03T14:53:55.519038
| 2021-08-08T14:54:30
| 2021-08-08T14:54:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,069
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 23 20:04:55 2021
@author: RISHBANS
"""
import pandas as pd
dataset = pd.read_csv("Company_Profit.csv")
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 1].values
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 0)
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
lr.fit(X_train, y_train)
print(lr.score(X_train, y_train))
y_pred = lr.predict(X_train)
#Training Set
import matplotlib.pyplot as plt
plt.scatter(X_train, y_train, color = 'orange')
plt.plot(X_train, y_pred, color = 'red')
plt.scatter(X_train, y_pred, color = 'blue')
plt.title("training set")
plt.xlabel("years in operation")
plt.ylabel("profit")
plt.show()
#Test Set
plt.scatter(X_test, y_test, color = 'orange')
plt.plot(X_test, lr.predict(X_test), color = 'red')
plt.scatter(X_test, lr.predict(X_test), color = 'blue')
plt.title("Test Set")
plt.xlabel("years in operation")
plt.ylabel("profit")
plt.show()
|
[
"rishibansal02@gmail.com"
] |
rishibansal02@gmail.com
|
bfd6412dc1be79d80a09398afbb60d97a7e669a7
|
73ea8e2cb158cb363acad15ae5d410760c6c0970
|
/rc4_debug.py
|
826c52a3a82ea954bb2464c0d70ac8f8886615c2
|
[] |
no_license
|
bytemare/wep_rc4_chop_chop
|
5d6edd65302e40de8bd1d89ef839cbaec44f3ae1
|
eaf4439305a77ad391415e30e8240f3a97504fba
|
refs/heads/master
| 2021-05-11T00:14:52.822118
| 2019-01-14T23:43:59
| 2019-01-14T23:43:59
| 118,300,436
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,663
|
py
|
import sys
from os import urandom
from sys import version_info
from bitstring import Bits, BitArray
fun_name = sys._getframe().f_code.co_name
if version_info[0] < 3:
raise Exception("Python 3 or a more recent version is required.")
class Frame:
def __init__(self, iv, crc, payload):
self.iv = iv
self.crc = crc # clair
self.payload = payload # chiffré
def is_valid(self, key: Bits, verbose=True):
"""
(copy) Reduced function of below "rc4_decrypt"
Returns True or False whether the Frame is valid, i.e. its crc32 is coherent to the message transported
:param verbose:
:param key:
:return: True or False
"""
ivk = wep_make_ivk(key, self.iv)
if verbose:
debug(verbose, fun_name + " : ivk = " + str(ivk))
decrypted = rc4_crypt(self.payload, ivk, verbose)
if verbose:
debug(verbose, fun_name + " : decrypted = " + str(ivk))
decrypted_message = decrypted[:-len(self.crc)]
if verbose:
debug(verbose, fun_name + " : decrypted_message = " + str(decrypted_message))
decrypted_crc = decrypted[-len(self.crc):]
if verbose:
debug(verbose, fun_name + " : decrypted_crc = " + str(decrypted_crc))
int_computed_crc, computed_crc = crc32(decrypted_message)
if verbose:
debug(verbose, fun_name + " : computed_crc = " + str(computed_crc))
debug(verbose, fun_name + " : computed_crc = " + str(int_computed_crc))
debug(verbose, fun_name + " : frame_crc = " + str(self.crc))
return decrypted_crc == computed_crc
def __iter__(self):
yield self.iv
yield self.crc
yield self.payload
def __str__(self):
return "Initialisation Vector : " + str(self.iv) + "\nCRC32 : " + str(
self.crc) + "\nEncrypted payload : " + str(self.payload)
def wep_make_ivk(key: Bits, iv: Bits, order="key+iv"):
"""
Given a key and initialisation vector, returns the concatenation of both,
depending on the order given by order (never sure what order it is)
Default is to append vi to key.
:param key:
:param iv:
:param order:
:param debug:
:return:
"""
if order == "key+iv":
return key + iv
elif order == "iv+key":
return iv + key
else:
raise ValueError("Unhandled value for argument 'orrder' : " + order + ". Try 'key+iv' or 'iv+key'.")
def debug(state, message):
"""
If state is set to True, then message is printed. If not, nothing happens.
:param state:
:param message:
:return:
"""
if state:
print(message)
def crc32(data: Bits):
"""
Calculates the CRC32 value of message m
:param data:
:return: bytearray
"""
m = bytearray(data.tobytes())
remainder = int("0xFFFFFFFF", 16)
# qx = int("0x04C11DB7", 16)
qx = int("0xEDB88320", 16)
for i in range(len(m) * 8):
bit = (m[i // 8] >> (i % 8)) & 1
remainder ^= bit
if remainder & 1:
multiple = qx
else:
multiple = 0
remainder >>= 1
remainder ^= multiple
result = ~remainder % (1 << 32)
return result, Bits(uint=result, length=32)
def rc4_extended_crc32(m: Bits):
"""
Given a message m, returns encoding of (as by X^32 . m(X)) and the CRC32 of m
:param m:
:return:
"""
return m + crc32(m)[1]
def rc4_ksa(key_bits: Bits):
"""
Key-Scheduling Algorithm
Given a key, returns the RC4 register after initialisation phase.
:param key_bits:
:return r: rc4 initialised register
"""
key = bytearray(key_bits.tobytes())
w = 256
r = list(range(w))
keylength = len(key)
j = 0
for i in range(w):
j = (j + r[i] + key[i % keylength]) % w
r[i], r[j] = r[j], r[i]
return r
def rc4_prga(r, t: int):
"""
Pseudo-random generation algorithm
Given a register R and an integer t, returns a RC4 cipher stream of length t
:param stream:
:param r:
:type t: int
:return:
"""
w = 256
i = j = 0
s = BitArray()
print("CHANGE THE STREAM LENGTH HERE !!!")
t = t // 8
for l in range(t):
i = (i + 1) % w
j = (j + r[i]) % w
r[i], r[j] = r[j], r[i]
k = r[(r[i] + r[j]) % w]
s += Bits(bytearray(k.to_bytes(1, byteorder='big')))
debug(True, fun_name + " : stream = " + str(s))
return s
def rc4_crypt(m: Bits, k: Bits, verbose=True):
"""
RC4 Encryption
Can be used for encryption and decryption
Given a message m and key k, returns the rc4 de/encryption of m with key k
:param verbose:
:type m: Bits
:type k: Bits
:return:
"""
length = len(m)
r = rc4_ksa(k)
debug(verbose, fun_name + " : length = " + str(length))
debug(verbose, fun_name + " : m (= " + str(m.len) + ") : " + str(m))
debug(verbose, fun_name + " : r = " + str(r))
stream = rc4_prga(r, length)
debug(verbose, fun_name + " : cipherstream (" + str(stream.len) + ") : " + str(stream))
"""
s = Bits()
a = bytearray()
for l in range(length):
n = next(stream)
t = bytearray(n.to_bytes(1, byteorder='big'))
a.extend(t)
s += Bits(t)
debug(verbose, fun_name + " : cipherstream(generator) = " + str(s))
debug(verbose, fun_name + " : cipherstream(generator) = " + str(Bits(a)))
"""
retained_stream = stream
result = m ^ retained_stream
debug(verbose, fun_name + " : key = " + str(k))
debug(verbose, fun_name + " : stream = " + str(retained_stream))
debug(verbose, fun_name + " : message = " + str(m))
debug(verbose, fun_name + " : result = " + str(result))
return result
def random_iv(length=24):
"""
Returns a list of random bits, with default length 24.
:param length:
:return: Bits
"""
n_bytes = -(-length // 8) # round up by upside down floor division
return Bits(urandom(n_bytes))
def wep_rc4_encrypt(m: Bits, k: Bits, verbose=True):
"""
RC4 Encryption in WEP mode
Given a message m and key k, returns the WEP implementation of the rc4 encryption of m with key k
:type m
:param k:
:return:
"""
iv = random_iv()
debug(verbose, fun_name + " : iv = " + str(iv))
ivk = wep_make_ivk(k, iv)
debug(verbose, fun_name + " : ivk = " + str(ivk))
cipher = rc4_crypt(m, ivk)
debug(verbose, fun_name + " : cipher = " + str(cipher))
return iv, cipher
def wep_make_frame(m: Bits, key: Bits, verbose=True):
"""
FR : Trame
Given a message m and a key k, returns a frame, i.e. :
- an IV, associated to the frame
- a CRC32 of m (noted crc)
- a WEP RC4 cipher of m || crc
:param m:
:param key:
:return: IV, CRC, Cipher
"""
crc = crc32(m)[1]
debug(verbose, fun_name + " : crc = " + str(crc))
m_and_crc = m + crc
debug(verbose, fun_name + " : m_and_crc = " + str(m_and_crc))
iv, cipher = wep_rc4_encrypt(m_and_crc, key)
return Frame(iv, crc, cipher)
def rc4_decrypt(k: Bits, frame: Frame, verbose=True):
"""
Given a key k and frame f, decrypts frame with key and returns cleartext.
An error is raised if frame is not a valid frame.
:type k: bytearray
:type frame: Frame
:return:
"""
# Preprare key for decryption
ivk = wep_make_ivk(k, frame.iv)
debug(verbose, fun_name + " : ivk = " + str(ivk))
# Decrypt
decrypted_payload = rc4_crypt(frame.payload, ivk)
debug(verbose, fun_name + " : decrypted_payload = " + str(decrypted_payload))
# Get the cleartext and the crc that were in the encrypted packet
cleartext_msg = decrypted_payload[:-len(frame.crc)]
decrypted_crc = decrypted_payload[-len(frame.crc):]
debug(verbose, fun_name + " : cleartext_msg = " + str(cleartext_msg))
debug(verbose, fun_name + " : decrypted_crc = " + str(decrypted_crc))
# Compute crc32 from decrypted message
computed_crc = crc32(cleartext_msg)[1]
debug(verbose, fun_name + " : computed_crc = " + str(computed_crc))
# Check if Frame is valid by verifying crc32 fingerprints
try:
assert decrypted_crc == computed_crc
except AssertionError:
return "[ERROR] MAC ERROR. Invalid Frame (possibly corrupted). Cause : crc32 invalidation."
debug(verbose, fun_name + " : Frame is valid.")
return cleartext_msg
def mix_crc(a: Bits, b: Bits, c: Bits, verbose=True):
"""
Given 3 bytearrays, returns the xor between the 3 crcs of the input data
:param a:
:param b:
:param c:
:return:
"""
i_a_crc, _ = crc32(a)
i_b_crc, _ = crc32(b)
i_c_crc, _ = crc32(c)
xor = i_a_crc ^ i_b_crc ^ i_c_crc
debug(verbose, fun_name + " : crc(a) ^ crc(b) ^ crc(c) = " + str(xor))
return xor
def prepend_zeros(data: bytes, length: int):
"""
Given a bytes type input, returns it prepended with length '0'
:param data:
:param length:
:return:
"""
print("prepend " + str(length))
return length * b"0" + data
def bin_inject(m_prime: Bits, m: Bits, frame, key: Bits, verbose=True):
"""
Given two messages m1 and m2, and the frame associated with m2 (as by the return values of wep_frame()),
returns a valid frame for m1^m2
=== Trick ===
Base :
crc(m1^m2^m3) = crc(m1) ^ crc(m2) ^ crc(m3)
if you take m3 = 0, xoring the messages is like m1^m2.
Hence,
crc(m1^m2) = crc(m1) ^ crc(m2) ^ crc(0)
Therefore :
rc4(k||iv) ^ ( (m1^m2) || crc(m1^m2) )
= rc4(k||iv) ^ (m1 || crc(m1)) ^ (m2 || crc(m2) ^ ( crc(0) ))
Conclusion :
To inject, you simply xor the encrypted payload with
inject_message || ( crc(inject_message) ^ crc(0) )
In decryption, this would give the following
decrypted payload : ( inject_message ^ m ) || ( crc(inject_message) ^ crc(m) ^ crc(0) )
Since we have
crc(inject_message ^ m) = crc(inject_message) ^ crc(m) ^ crc(0)
The decrypted message is considered valid.
=============
What we will do here is, given a frame for message m, inejcted the message 'inject_message
:param m_prime:
:param m:
:param frame:
:return:
"""
reference_length = len(frame.payload) - len(frame.crc)
debug(verbose, fun_name + " : reference_length = " + str(reference_length))
inject = m_prime
debug(verbose, fun_name + " : inject length = " + str(inject.len))
inject_crc_bits = crc32(m_prime)[1]
zero_bits = Bits((reference_length // 8) * b"\0")
debug(verbose, fun_name + " : zero length = " + str(zero_bits.len))
zero_crc_bits = crc32(zero_bits)[1]
debug(verbose, fun_name + " : zero_bits = " + str(zero_bits))
debug(verbose, fun_name + " : zero_crc_bits = " + str(zero_crc_bits))
debug(verbose, fun_name + " : inject = " + str(inject))
debug(verbose, fun_name + " : inject^0 = " + str(inject ^ zero_bits))
m_crc_bits = crc32(m)[1]
inject_crc_suffix = inject_crc_bits ^ zero_crc_bits
debug(verbose, fun_name + " : inject_crc_suffix = " + str(inject_crc_suffix))
resulting_crc = inject_crc_suffix ^ m_crc_bits
debug(verbose, fun_name + " : resulting_crc = " + str(resulting_crc))
xored_payload_without_zero = m ^ inject
xored_payload_with_zero = m ^ inject ^ zero_bits
debug(verbose, fun_name + " : xored_payload_wo_zero = " + str(xored_payload_without_zero))
debug(verbose, fun_name + " : xored_payload_w_zero = " + str(xored_payload_with_zero))
computed_crc_wo_zero = crc32(xored_payload_without_zero)[1]
computed_crc_w_zero = crc32(xored_payload_with_zero)[1]
debug(verbose, fun_name + " : computed_crc_wo_zero = " + str(computed_crc_wo_zero))
debug(verbose, fun_name + " : computed_crc_w_zero = " + str(computed_crc_w_zero))
debug(verbose, fun_name + " : inject_crc_suffix = " + str(inject_crc_suffix))
result_payload = frame.payload ^ (inject + inject_crc_suffix)
debug(verbose, fun_name + "### Verification ...")
ivk = wep_make_ivk(key, frame.iv)
r = rc4_ksa(ivk)
# stream = rc4_prga(r, len(m))
# cipherstream = frame.payload ^ (m + m_crc_bits)
return Frame(frame.iv, resulting_crc, result_payload)
if __name__ == '__main__':
# Variables (You would want to play here and change the values)
# plaintext = "My cleartext"
# secret_key = "Key"
# inject_message = "is modified!"
plaintext = b"000yay"
secret_key = b"c"
inject_message = b"secret"
print("=== Test Run ===")
print("=> Plaintext : " + str(plaintext))
print("=> secret : " + str(secret_key))
print("=> injection message : " + str(inject_message))
print("")
print("### Setting parameters ...")
# Plaintext
plain = bytearray()
plain.extend(plaintext)
# Secret
key = bytearray()
key.extend(secret_key)
injection = bytearray()
injection.extend(inject_message)
print("")
print("### 1. Executing CRC32:=proc(M) ###")
print("CRC32(plaintext) = " + str(crc32(Bits(plain))[0]))
print("")
print("### 2. Executing RC4KSA:=proc(K) ###")
r = rc4_ksa(Bits(key))
print("RC4KSA(key) = " + str(r))
print("")
print("### 3. Executing RC4PRGA:=proc(R, t) ###")
stream = list(rc4_prga(r, len(plaintext)))
print("RC4PRGA(R, t) = " + str(stream))
print("")
print("### 4. Executing RC4:=proc(M, K) ###")
rc4 = rc4_crypt(Bits(plain), Bits(key))
print("RC4(M, K) = " + str(rc4))
print("")
print("### 5. Executing RandomIV:=proc() ###")
iv = random_iv()
print("RandomIV() = " + str(iv))
print("")
print("### 6. Executing Trame:=proc(M, K) ###")
f_iv, f_crc, f_cipher = frame = wep_make_frame(Bits(plain), Bits(key), verbose=True)
print(frame)
print("Frame Validity : " + str(frame.is_valid(Bits(key))))
print("")
print("### 7. Executing Decrypt:=proc(K, T) ###")
clear = rc4_decrypt(Bits(key), frame)
if clear == plaintext:
print("Success !")
else:
print("Failed to correctly decrypt :(")
print("Decrypted payload : " + str(clear))
print("")
print("### 8. Executing Inject:=proc(K, T) ###")
try:
assert len(plain) == len(injection)
except AssertionError:
print("For now only injection messages of same length as plaintext are accepted. Injection Aborted.")
exit(0)
# new_frame = bin_inject(Bits(injection), Bits(plain), frame, Bits(key), True)
# print("New Frame :")
# print(new_frame)
# print("Frame Validity : " + str(new_frame.is_valid(key, True)))
bin_frame = bin_inject(Bits(injection), Bits(plain), frame, Bits(key), True)
print("Injected Frame :")
print(bin_frame)
print("Injected Frame Validity : " + str(bin_frame.is_valid(Bits(key), True)))
clear = rc4_decrypt(Bits(key), bin_frame)
try:
print("decrypted : " + str(clear))
except TypeError:
print(clear)
compare = bytearray()
for i in range(max(len(plain), len(injection))):
if i >= len(plain):
print("correct this") # compare.extend(inject[i:i + 1])
else:
if i >= len(injection):
compare.extend(plain[i:i + 1])
else:
compare.extend((plain[i] ^ injection[i]).to_bytes(1, byteorder='big'))
if bin_frame.is_valid(Bits(key)) and clear == compare:
print("Successfull injection !")
else:
print("Injection failed :(")
exit(0)
|
[
"daniel@bourdrez.fr"
] |
daniel@bourdrez.fr
|
f41057ef9b525c5494e975869cc5f9d1b52837ae
|
4329df9aa5306d83cd74e4923cc2dfca1f06f2f0
|
/src/mountaincar/train.py
|
3b0d1b430464c57571f81b8a342e5cc0eed3abc4
|
[] |
no_license
|
erzhu419/Transfer-Learning-for-RL
|
01d410a94f930eec68ee445c2a6c7da782b40f0c
|
de6649db6f6d13ec9463a1f7780deaf24bd8bc47
|
refs/heads/master
| 2022-03-14T16:48:45.051816
| 2019-11-26T18:45:12
| 2019-11-26T18:45:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,892
|
py
|
"""
DQN in PyTorch
"""
import os
import cv2
import torch
import torch.nn as nn
import numpy as np
import datetime
import random
from collections import namedtuple
from collections import deque
from typing import List, Tuple
import matplotlib.pyplot as plt
from tensorboardX import SummaryWriter
import gym
from gym import spaces
from gym.utils import seeding
from arguments import args
from autoencoders.autoencoder import AutoencoderConv
from mountaincar import MountainCarEnv
# from autoencoders.config import args
# CUDA compatability
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# seed
np_r, seed = seeding.np_random(None)
if args.tensorboard:
print('Init tensorboardX')
writer = SummaryWriter(log_dir='runs/{}'.format(datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")))
def preprocessImg(img):
'''
Convert to [1,c,h,w] from [h,w,c]
'''
# img = img.astype(np.float64)
img = cv2.resize(img,(300,200))
img = np.transpose(img, (2,0,1))
img = np.expand_dims(img,0)
img = img/255
return img
class DQN(nn.Module):
def __init__(self, input_dim: int, output_dim: int, hidden_dim: int) -> None:
"""DQN Network
Args:
input_dim (int): `state` dimension.
`state` is 2-D tensor of shape (n, input_dim)
output_dim (int): Number of actions.
Q_value is 2-D tensor of shape (n, output_dim)
hidden_dim (int): Hidden dimension in fc layer
"""
super(DQN, self).__init__()
self.layer1 = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.BatchNorm1d(hidden_dim),
nn.PReLU()
)
self.layer2 = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.BatchNorm1d(hidden_dim),
nn.PReLU()
)
self.final = nn.Linear(hidden_dim, output_dim)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Returns a Q_value
Args:
x (torch.Tensor): `State` 2-D tensor of shape (n, input_dim)
Returns:
torch.Tensor: Q_value, 2-D tensor of shape (n, output_dim)
"""
x = self.layer1(x)
x = self.layer2(x)
x = self.final(x)
return x
Transition = namedtuple("Transition",
field_names=["state", "action", "reward", "next_state", "done"])
States_Buffer = namedtuple("States",field_names=["orig_state","new_state"])
class ReplayMemory(object):
def __init__(self, capacity: int) -> None:
"""Replay memory class
Args:
capacity (int): Max size of this memory
"""
self.capacity = capacity
self.cursor = 0
self.memory = []
def push(self,
state: np.ndarray,
action: int,
reward: int,
next_state: np.ndarray,
done: bool) -> None:
"""Creates `Transition` and insert
Args:
state (np.ndarray): 1-D tensor of shape (input_dim,)
action (int): action index (0 <= action < output_dim)
reward (int): reward value
next_state (np.ndarray): 1-D tensor of shape (input_dim,)
done (bool): whether this state was last step
"""
if len(self) < self.capacity:
self.memory.append(None)
self.memory[self.cursor] = Transition(state,
action, reward, next_state, done)
self.cursor = (self.cursor + 1) % self.capacity
def pop(self, batch_size: int) -> List[Transition]:
"""Returns a minibatch of `Transition` randomly
Args:
batch_size (int): Size of mini-bach
Returns:
List[Transition]: Minibatch of `Transition`
"""
return random.sample(self.memory, batch_size)
def __len__(self) -> int:
"""Returns the length """
return len(self.memory)
class stateBuffer(object):
'''
Buffer for storing states for autoencoders
'''
def __init__(self, capacity: int) -> None:
"""Replay memory class
Args:
capacity (int): Max size of this memory
"""
self.capacity = capacity
self.cursor = 0
self.memory = []
def push(self,
orig_state: np.ndarray,
new_state : np.ndarray,
) -> None:
"""Creates `Transition` and insert
Args:
orig_state (np.ndarray): 3-D tensor of shape (input_dim,)
next_state (np.ndarray): 3-D tensor of shape (input_dim,)
"""
if len(self) < self.capacity:
self.memory.append(None)
self.memory[self.cursor] = States_Buffer(orig_state, new_state)
self.cursor = (self.cursor + 1) % self.capacity
def pop(self, batch_size: int) -> List[States_Buffer]:
"""Returns a minibatch of `Transition` randomly
Args:
batch_size (int): Size of mini-bach
Returns:
List[Transition]: Minibatch of `Transition`
"""
return random.sample(self.memory, batch_size)
def __len__(self) -> int:
"""Returns the length """
return len(self.memory)
class Agent(object):
def __init__(self, input_dim: int, output_dim: int, hidden_dim: int) -> None:
"""Agent class that choose action and train
Args:
input_dim (int): input dimension
output_dim (int): output dimension
hidden_dim (int): hidden dimension
"""
self.dqn = DQN(input_dim, output_dim, hidden_dim)
self.input_dim = input_dim
self.output_dim = output_dim
self.loss_fn = nn.MSELoss()
self.optim = torch.optim.Adam(self.dqn.parameters())
def _to_variable(self, x: np.ndarray) -> torch.Tensor:
"""torch.Variable syntax helper
Args:
x (np.ndarray): 2-D tensor of shape (n, input_dim)
Returns:
torch.Tensor: torch variable
"""
return torch.autograd.Variable(torch.Tensor(x))
def get_action(self, states: np.ndarray, eps: float) -> int:
"""Returns an action
Args:
states (np.ndarray): 2-D tensor of shape (n, input_dim)
eps (float): 𝜺-greedy for exploration
Returns:
int: action index
"""
if np.random.rand() < eps:
return np.random.choice(self.output_dim)
else:
self.dqn.train(mode=False)
scores = self.get_Q(states)
_, argmax = torch.max(scores.data, 1)
return int(argmax.numpy())
def get_Q(self, states: np.ndarray) -> torch.FloatTensor:
"""Returns `Q-value`
Args:
states (np.ndarray): 2-D Tensor of shape (n, input_dim)
Returns:
torch.FloatTensor: 2-D Tensor of shape (n, output_dim)
"""
states = self._to_variable(states.reshape(-1, self.input_dim))
self.dqn.train(mode=False)
return self.dqn(states)
def train(self, Q_pred: torch.FloatTensor, Q_true: torch.FloatTensor) -> float:
"""Computes `loss` and backpropagation
Args:
Q_pred (torch.FloatTensor): Predicted value by the network,
2-D Tensor of shape(n, output_dim)
Q_true (torch.FloatTensor): Target value obtained from the game,
2-D Tensor of shape(n, output_dim)
Returns:
float: loss value
"""
# print("Training RL agent")
self.dqn.train(mode=True)
self.optim.zero_grad()
loss = self.loss_fn(Q_pred, Q_true)
loss.backward()
self.optim.step()
return loss
class Darling(object):
# DisentAngled Representation LearnING
# Parody of DARLA :P
def __init__(self,tensorboard=0):
self.autoencoder1 = AutoencoderConv()
self.autoencoder2 = AutoencoderConv()
self.criterion = nn.MSELoss()
self.optimizer1 = torch.optim.Adam(self.autoencoder1.parameters(), lr=1e-3, weight_decay=1e-5)
self.optimizer2 = torch.optim.Adam(self.autoencoder2.parameters(), lr=1e-3, weight_decay=1e-5)
self.losses = []
self.tensorboard = tensorboard
# self.loss = 0
self.epoch = 0
def train(self,minibatch: List[Transition]):
# print('Training Autoencoder')
orig_states = np.vstack([x.orig_state for x in minibatch])
new_states = np.vstack([x.new_state for x in minibatch])
orig_states = torch.FloatTensor(orig_states)
new_states = torch.FloatTensor(new_states)
s1,z1 = self.autoencoder1(orig_states)
s2,z2 = self.autoencoder2(new_states)
reconstruction_loss1 = self.criterion(orig_states,s1)
reconstruction_loss2 = self.criterion(new_states,s2)
latent_loss = self.criterion(z1,z2)
if args.loss_type == 'total':
loss = args.alpha_latent*latent_loss + args.alpha_recon1*reconstruction_loss1 + args.alpha_recon2*reconstruction_loss2
elif args.loss_type == 'seperate':
loss1 = args.alpha_latent*latent_loss + args.alpha_recon1*reconstruction_loss1
loss2 = args.alpha_latent*latent_loss + args.alpha_recon2*reconstruction_loss2
if args.tensorboard:
writer.add_scalar('Autoencoder_1_Loss',args.alpha_recon1*reconstruction_loss1.item(),self.epoch)
writer.add_scalar('Autoencoder_2_Loss',args.alpha_recon2*reconstruction_loss2.item(),self.epoch)
writer.add_scalar('Latent_Loss',args.alpha_latent*latent_loss.item(),self.epoch)
if args.loss_type == 'total':
writer.add_scalar('Total_Loss',loss.item(),self.epoch)
elif args.loss_type == 'seperate':
writer.add_scalar('Loss1',loss1.item(),self.epoch)
writer.add_scalar('Loss2',loss2.item(),self.epoch)
print('Recon Loss 1:{:5} \t Recon Loss 2:{:5}\t Latent Loss:{:5}'.format(\
args.alpha_recon1*reconstruction_loss1.item(),\
args.alpha_recon2*reconstruction_loss2.item(),\
args.alpha_latent*latent_loss.item()))
# self.losses.append(loss.detach().numpy())
# self.loss = np.copy(loss.detach().numpy())
# print('Backward 1')
if args.loss_type == 'total':
self.optimizer1.zero_grad()
loss.backward(retain_graph=True)
self.optimizer1.step()
# print('Backward 2')
self.optimizer2.zero_grad()
loss.backward()
self.optimizer2.step()
elif args.loss_type == 'seperate':
self.optimizer1.zero_grad()
loss1.backward(retain_graph=True)
self.optimizer1.step()
self.optimizer2.zero_grad()
loss2.backward()
self.optimizer2.step()
self.epoch += 1
# print('Done Traininf')
def save(self,args):
print('Saving Weights')
if not os.path.exists('./Weights'):
os.makedirs('./Weights')
torch.save({
'model_state_dict': self.autoencoder1.state_dict(),
'optimizer_state_dict': self.optimizer1.state_dict(),
}, args.weight_paths[0])
torch.save({
'model_state_dict': self.autoencoder2.state_dict(),
'optimizer_state_dict': self.optimizer2.state_dict(),
}, args.weight_paths[1])
def train_helper(agent: Agent, minibatch: List[Transition], gamma: float) -> float:
"""Prepare minibatch and train them
Args:
agent (Agent): Agent has `train(Q_pred, Q_true)` method
minibatch (List[Transition]): Minibatch of `Transition`
gamma (float): Discount rate of Q_target
Returns:
float: Loss value
"""
states = np.vstack([x.state for x in minibatch])
actions = np.array([x.action for x in minibatch])
rewards = np.array([x.reward for x in minibatch])
next_states = np.vstack([x.next_state for x in minibatch])
done = np.array([x.done for x in minibatch])
Q_predict = agent.get_Q(states)
Q_target = Q_predict.clone().data.numpy()
Q_target[np.arange(len(Q_target)), actions] = rewards + gamma * np.max(agent.get_Q(next_states).data.numpy(), axis=1) * ~done
Q_target = agent._to_variable(Q_target)
return agent.train(Q_predict, Q_target)
def play_episode(orig_env: MountainCarEnv,
new_env: MountainCarEnv,
agent: Agent,
autoencoder_agent: Darling,
replay_memory: ReplayMemory,
state_memory: stateBuffer,
eps: float,
batch_size: int) -> int:
"""Play an epsiode and train
Args:
env (gym.Env): gym environment (CartPole-v0)
agent (Agent): agent will train and get action
replay_memory (ReplayMemory): trajectory is saved here
eps (float): 𝜺-greedy for exploration
batch_size (int): batch size
Returns:
int: reward earned in this episode
"""
init_state = np.array([np_r.uniform(low=-0.6, high=-0.4), 0])
# initialise both envs to same state
s = orig_env.reset(init_state)
new_env.reset(init_state)
done = False
total_reward = 0
while not done:
a = agent.get_action(s, eps)
s2, r, done, _ = orig_env.step(a)
_,_,_,_ = new_env.step(a)
# get frames for both environments
orig_img = orig_env.render(mode='rgb_array')
new_img = new_env.render(mode='rgb_array')
orig_img = preprocessImg(orig_img)
new_img = preprocessImg(new_img)
total_reward += r
if done:
r = -1
replay_memory.push(s, a, r, s2, done)
# state_memory.push(s,np.flip(s))
# push frames for both envs in buffer
state_memory.push(orig_img,new_img)
if len(replay_memory)%batch_size == 0:
minibatch = replay_memory.pop(batch_size)
train_helper(agent, minibatch, args.gamma)
if len(replay_memory)%1000 ==0:
for i in range(int(1000/batch_size)):
print('Update: ',i)
minibatch_autoencoder = state_memory.pop(batch_size)
autoencoder_agent.train(minibatch_autoencoder)
autoencoder_agent.save(args)
s = s2
return total_reward
def get_env_dim(env: gym.Env) -> Tuple[int, int]:
"""Returns input_dim & output_dim
Args:
env (gym.Env): gym Environment (CartPole-v0)
Returns:
int: input_dim
int: output_dim
"""
input_dim = env.observation_space.shape[0]
output_dim = env.action_space.n
return input_dim, output_dim
def epsilon_annealing(epsiode: int, max_episode: int, min_eps: float) -> float:
"""Returns 𝜺-greedy
1.0---|\
| \
| \
min_e +---+------->
|
max_episode
Args:
epsiode (int): Current episode (0<= episode)
max_episode (int): After max episode, 𝜺 will be `min_eps`
min_eps (float): 𝜺 will never go below this value
Returns:
float: 𝜺 value
"""
slope = (min_eps - 1.0) / max_episode
return max(slope * epsiode + 1.0, min_eps)
def main():
"""Main
"""
try:
# env = gym.make(FLAGS.env)
orig_env = MountainCarEnv()
new_env = MountainCarEnv(color=[1,0,0,0.5])
# env = gym.wrappers.Monitor(env, directory="monitors", force=True)
rewards = deque(maxlen=100)
input_dim, output_dim = get_env_dim(orig_env)
agent = Agent(input_dim, output_dim, args.hidden_dim)
replay_memory = ReplayMemory(args.capacity)
state_memory = stateBuffer(args.capacity)
autoencoder_agent = Darling()
for i in range(args.n_episode):
eps = epsilon_annealing(i, args.max_episode, args.min_eps)
r = play_episode(orig_env, new_env, agent,autoencoder_agent, replay_memory,state_memory, eps, args.batch_size)
autoencoder_loss = autoencoder_agent.loss
print("[Episode: {:5}] Reward: {:5} 𝜺-greedy: {:5.2f} Autoencoder Loss: {:5}".format(i + 1, r, eps,autoencoder_loss))
rewards.append(r)
writer.add_scalar('Agent Reward',r ,i)
if len(rewards) == rewards.maxlen:
if np.mean(rewards) >= 200:
print("Game cleared in {} games with {}".format(i + 1, np.mean(rewards)))
break
autoencoder_agent.save(args)
# plt.plot(autoencoder_agent.losses)
# plt.grid()
# plt.show()
finally:
orig_env.close()
new_env.close()
if __name__ == '__main__':
main()
|
[
"siddharthnayak98@gmail.com"
] |
siddharthnayak98@gmail.com
|
281b5ead6e1f177a9c3115e9da75be8f64bd2d08
|
fa91c2e77648a84b15b1bc741dcfc2c243cc5c21
|
/LostAndFound/forms.py
|
a7d363eacfc00011ec63e47e341e2eb011e41a97
|
[] |
no_license
|
kmu-fringles/zisae-project
|
015867fc521f4b9e7af64634f3f13d9dac06ce36
|
9b3a3825683220b72c41935754f895bfc9345b8d
|
refs/heads/master
| 2020-06-17T00:29:57.748391
| 2019-07-20T06:54:28
| 2019-07-20T06:54:28
| 195,743,954
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 242
|
py
|
from .models import LostAndFound, Comment
from django import forms
class CommentForm(forms.ModelForm):
#text = forms.TextInput(label = '댓글')
class Meta:
model = Comment
fields = ['comment_writer', 'comment_text']
|
[
"gus7Wn@gmail.com"
] |
gus7Wn@gmail.com
|
0aef00a48547dbd3801bb30ce5a3d51f492ebac1
|
7c996d94d5f7cbd6a94bf103579cc50d811adc93
|
/migrations/versions/cdf84c62b8b6_.py
|
8e0322584d15e2568a7a5c1a8c009956efdef89f
|
[] |
no_license
|
cfurukawa6/core
|
ca0a1d9b1d582bc7a642518f41c3ac071ce5cf71
|
8dafe2d14e0823cddb15aa11cf0a56967cc23126
|
refs/heads/master
| 2020-07-28T09:23:35.064693
| 2019-09-13T06:27:27
| 2019-09-13T06:27:27
| 173,833,913
| 0
| 0
| null | 2019-03-04T22:48:14
| 2019-03-04T22:48:14
| null |
UTF-8
|
Python
| false
| false
| 1,434
|
py
|
"""empty message
Revision ID: cdf84c62b8b6
Revises:
Create Date: 2019-04-17 02:42:36.886679
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'cdf84c62b8b6'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('sheet',
sa.Column('sheet_label', sa.String(), nullable=False),
sa.Column('avgmoe', sa.DECIMAL(), nullable=True),
sa.Column('avgsg', sa.DECIMAL(), nullable=True),
sa.Column('avgmc', sa.DECIMAL(), nullable=True),
sa.Column('avgvel', sa.DECIMAL(), nullable=True),
sa.Column('avgupt', sa.DECIMAL(), nullable=True),
sa.Column('pkdensity', sa.DECIMAL(), nullable=True),
sa.Column('effvel', sa.ARRAY(sa.DECIMAL()), nullable=True),
sa.Column('lvel', sa.ARRAY(sa.DECIMAL()), nullable=True),
sa.Column('rvel', sa.ARRAY(sa.DECIMAL()), nullable=True),
sa.Column('lupt', sa.ARRAY(sa.DECIMAL()), nullable=True),
sa.Column('rupt', sa.ARRAY(sa.DECIMAL()), nullable=True),
sa.Column('sg', sa.ARRAY(sa.DECIMAL()), nullable=True),
sa.Column('mc', sa.ARRAY(sa.DECIMAL()), nullable=True),
sa.PrimaryKeyConstraint('sheet_label')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('sheet')
# ### end Alembic commands ###
|
[
"langbuana.yuka@hotmail.com"
] |
langbuana.yuka@hotmail.com
|
07e06ed4f9d0da01270e8ac320fa79d404150d1a
|
57008b377d6c926123b22bc7c530576e794c64f0
|
/htmap/_startup.py
|
4326d074f8d9d0454d134bedabc1162665023009
|
[
"Apache-2.0"
] |
permissive
|
ChristinaLK/htmap
|
75ec276a9e18d08ad61f5e93b45082fce9bdc003
|
fdc4c0a09418b976c9e66f781a76f8f18f9aa1a0
|
refs/heads/master
| 2020-07-01T22:36:57.012816
| 2019-08-01T00:11:29
| 2019-08-01T00:11:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,495
|
py
|
import os
import logging
from logging import handlers
from pathlib import Path
from . import settings, names
logger = logging.getLogger('htmap')
LOGS_DIR_PATH = Path(settings['HTMAP_DIR']) / names.LOGS_DIR
def setup_internal_file_logger():
LOGS_DIR_PATH.mkdir(parents = True, exist_ok = True)
LOG_FILE = LOGS_DIR_PATH / 'htmap.log'
_logfile_handler = handlers.RotatingFileHandler(
filename = LOG_FILE,
mode = 'a',
maxBytes = 10 * 1024 * 1024, # 10 MB
backupCount = 4,
)
_fmt = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
_logfile_handler.setFormatter(_fmt)
_logfile_handler.setLevel(logging.DEBUG)
logger.addHandler(_logfile_handler)
def ensure_htmap_dir_exists():
from . import names as _names
_htmap_dir = Path(settings['HTMAP_DIR'])
try:
did_not_exist = not _htmap_dir.exists()
dirs = (
_htmap_dir,
_htmap_dir / _names.MAPS_DIR,
_htmap_dir / _names.TAGS_DIR,
_htmap_dir / _names.REMOVED_TAGS_DIR
)
for dir in dirs:
dir.mkdir(parents = True, exist_ok = True)
if did_not_exist:
logger.debug(f'created HTMap dir at {_htmap_dir}')
except PermissionError as e:
raise PermissionError(f'the HTMap directory ({_htmap_dir}) needs to be writable') from e
if os.getenv('HTMAP_ON_EXECUTE') != '1':
ensure_htmap_dir_exists()
setup_internal_file_logger()
|
[
"noreply@github.com"
] |
ChristinaLK.noreply@github.com
|
181479fad7eed6028469449cf9d77b1e5ce1468f
|
8e689176bba5607dabaeef57ad312661220e5f14
|
/Python/pyrenn/SavedNN/example_all_run.py
|
ffeb7f599990cb0d5df94e34757e29bcd81ff56b
|
[] |
no_license
|
arnoplaetinck/Masterproef
|
bc53c10cf427fd917ee703226d26a2cfeda0c9eb
|
f0be6811d1318409c3257d10b402f331e3b3be59
|
refs/heads/master
| 2020-07-18T22:03:29.491467
| 2020-05-01T13:27:41
| 2020-05-01T13:27:41
| 206,317,530
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,203
|
py
|
from __future__ import absolute_import, division, print_function, unicode_literals
from numpy import genfromtxt
import pyrenn as prn
import csv
import time
from statistics import mean
import psutil
import functools
import pandas as pd
import numpy as np
import tensorflow as tf
import os
from tensorflow import keras
cores = []
cpu_percent = []
virtual_mem = []
time_start = []
time_stop = []
time_diff = []
time_total = 0
iterations = 1
labels = ["compair", "friction", "narendra4", "pt2",
"P0Y0_narendra4", "P0Y0_compair", "gradient", "Text Classificatie", "Totaal"]
###
# Creating a filename
seconds = time.time()
local_time = time.ctime(seconds)
naam2 = local_time.split()
naam = "MP_NN_ALL_RUN_PC"
for i in range(len(naam2)):
naam += "_" + naam2[i]
naam = naam.replace(':', '_')
def review_encode(s):
encoded = [1]
for word in s:
if word.lower() in word_index:
encoded.append(word_index[word.lower()])
else:
encoded.append(2)
return encoded
def decode_review(text):
return " ".join([reverse_word_index.get(i, "?") for i in text])
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# first time calling cpu percent to get rid of 0,0
psutil.cpu_percent(interval=None, percpu=True)
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# example_compair.py
for i in range(iterations):
# Read Example Data
df = genfromtxt('example_data_compressed_air.csv', delimiter=',')
P = np.array([df[1], df[2], df[3]])
Y = np.array([df[4], df[5]])
Ptest = np.array([df[6], df[7], df[8]])
Ytest = np.array([df[9], df[10]])
# Load saved NN from file
net = prn.loadNN("./SavedNN/compair.csv")
psutil.cpu_percent(interval=None, percpu=True)
time_start.append(time.time())
# Calculate outputs of the trained NN for train and test data
y = prn.NNOut(P, net)
ytest = prn.NNOut(Ptest, net)
time_stop.append(time.time())
cores.append(psutil.cpu_percent(interval=None, percpu=True))
virtual_mem.append(psutil.virtual_memory())
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# example_friction.py
# This is an example of a static system with one output and one input
for i in range(iterations):
# Read Example Data
df = genfromtxt('example_data_friction.csv', delimiter=',')
P = df[1]
Y = df[2]
Ptest = df[3]
Ytest = df[4]
# Load saved NN from file
net = prn.loadNN("./SavedNN/friction.csv")
psutil.cpu_percent(interval=None, percpu=True)
time_start.append(time.time())
# Calculate outputs of the trained NN for train and test data
y = prn.NNOut(P, net)
ytest = prn.NNOut(Ptest, net)
time_stop.append(time.time())
cores.append(psutil.cpu_percent(interval=None, percpu=True))
virtual_mem.append(psutil.virtual_memory())
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# example_narendra4.py
# This is an example of a dynamic system with one output and one delayed input
for i in range(iterations):
# Read Example Data
df = genfromtxt('example_data_narendra4.csv', delimiter=',')
P = df[1]
Y = df[2]
Ptest = df[3]
Ytest = df[4]
# Load saved NN from file
net = prn.loadNN("./SavedNN/narendra4.csv")
psutil.cpu_percent(interval=None, percpu=True)
time_start.append(time.time())
# Calculate outputs of the trained NN for train and test data
y = prn.NNOut(P, net)
ytest = prn.NNOut(Ptest, net)
time_stop.append(time.time())
cores.append(psutil.cpu_percent(interval=None, percpu=True))
virtual_mem.append(psutil.virtual_memory())
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# example_pt2.py
# This is an example of a dynamic system with one input and one output
for i in range(iterations):
# Read Example Data
df = genfromtxt('example_data_friction.csv', delimiter=',')
P = df[1]
Y = df[2]
Ptest = df[3]
Ytest = df[4]
# Load saved NN from file
net = prn.loadNN("./SavedNN/pt2.csv")
psutil.cpu_percent(interval=None, percpu=True)
time_start.append(time.time())
# Calculate outputs of the trained NN for train and test data
y = prn.NNOut(P, net)
ytest = prn.NNOut(Ptest, net)
time_stop.append(time.time())
cores.append(psutil.cpu_percent(interval=None, percpu=True))
virtual_mem.append(psutil.virtual_memory())
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# example_using_P0Y0_narendra4.py
for i in range(iterations):
# Read Example Data
df = genfromtxt('example_data_narendra4.csv', delimiter=',')
P = df[1]
Y = df[2]
Ptest_ = df[3]
Ytest_ = df[4]
# define the first 3 timesteps t=[0,1,2] of Test Data as previous (known) data P0test and Y0test
P0test = Ptest_[0:3]
Y0test = Ytest_[0:3]
# Use the timesteps t = [3..99] as Test Data
Ptest = Ptest_[3:100]
Ytest = Ytest_[3:100]
# Load saved NN from file
net = prn.loadNN("./SavedNN/using_P0Y0_narendra4.csv")
psutil.cpu_percent(interval=None, percpu=True)
time_start.append(time.time())
# Calculate outputs of the trained NN for test data with and without previous input P0 and output Y0
ytest = prn.NNOut(Ptest, net)
y0test = prn.NNOut(Ptest, net, P0=P0test, Y0=Y0test)
time_stop.append(time.time())
cores.append(psutil.cpu_percent(interval=None, percpu=True))
virtual_mem.append(psutil.virtual_memory())
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# example__using_P0Y0_compair.py
# This is an example of a dynamic system with 2 outputs and 3 inputs
for i in range(iterations):
# Read Example Data
df = genfromtxt('example_data_compressed_air.csv', delimiter=',')
P = np.array([df[1], df[2], df[3]])
Y = np.array([df[4], df[5]])
Ptest_ = np.array([df[6], df[7], df[8]])
Ytest_ = np.array([df[9], df[10]])
# define the first timestep t=0 of Test Data as previous (known) data P0test and Y0test
P0test = Ptest_[:, 0:1]
Y0test = Ytest_[:, 0:1]
# Use the timesteps t = [1..99] as Test Data
Ptest = Ptest_[:, 1:100]
Ytest = Ytest_[:, 1:100]
# Load saved NN from file
net = prn.loadNN("./SavedNN/using_P0Y0_compair.csv")
psutil.cpu_percent(interval=None, percpu=True)
time_start.append(time.time())
# Calculate outputs of the trained NN for test data with and without previous input P0 and output Y0
ytest = prn.NNOut(Ptest, net)
y0test = prn.NNOut(Ptest, net, P0=P0test, Y0=Y0test)
time_stop.append(time.time())
cores.append(psutil.cpu_percent(interval=None, percpu=True))
virtual_mem.append(psutil.virtual_memory())
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# example_gradient.py
for i in range(iterations):
df = genfromtxt('example_data_pt2.csv', delimiter=',')
P = df[1]
Y = df[2]
# Load saved NN from file
net = prn.loadNN("./SavedNN/gradient.csv")
psutil.cpu_percent(interval=None, percpu=True)
time_start.append(time.time())
# Prepare input Data for gradient calculation
data, net = prn.prepare_data(P, Y, net)
# Real Time Recurrent Learning
J, E, e = prn.RTRL(net, data)
g_rtrl = 2 * np.dot(J.transpose(), e) # calculate g from Jacobian and error vector
# Back Propagation Through Time
g_bptt, E = prn.BPTT(net, data)
# Compare
# print('\n\n\nComparing Methods:')
# print('Time RTRL: ', (t1_rtrl - t0_rtrl), 's')
# print('Time BPTT: ', (t1_bptt - t0_bptt), 's')
# if not np.any(np.abs(g_rtrl - g_bptt) > 1e-9):
# print('\nBoth methods showing the same result!')
# print('g_rtrl/g_bptt = ', g_rtrl / g_bptt)
time_stop.append(time.time())
cores.append(psutil.cpu_percent(interval=None, percpu=True))
virtual_mem.append(psutil.virtual_memory())
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# example_gradient.py
data = keras.datasets.imdb
(train_data, train_labels), (test_data, test_labels) = data.load_data(num_words=10000)
print(train_data[0])
word_index = data.get_word_index()
word_index = {k: (v + 3) for k, v in word_index.items()}
word_index["<PAD>"] = 0
word_index["<START>"] = 1
word_index["<UNK>"] = 2
word_index["<UNUSED>"] = 3
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
# preprocessing data to make it consistent (different lengths for different reviews)
train_data = keras.preprocessing.sequence.pad_sequences(train_data, value=word_index["<PAD>"], padding="post",
maxlen=250)
test_data = keras.preprocessing.sequence.pad_sequences(test_data, value=word_index["<PAD>"], padding="post", maxlen=250)
for i in range(iterations):
model = keras.models.load_model("model.h5")
psutil.cpu_percent(interval=None, percpu=True)
time_start.append(time.time())
with open("test.txt", encoding="utf-8") as f:
for line in f.readlines():
nline = line.replace(",", "").replace(".", "").replace("(", "").replace(")", "").replace(":", "").replace(
"\"",
"").strip(
" ")
encode = review_encode(nline)
encode = keras.preprocessing.sequence.pad_sequences(test_data, value=word_index["<PAD>"], padding="post",
maxlen=250)
predict = model.predict(encode)
time_stop.append(time.time())
cores.append(psutil.cpu_percent(interval=None, percpu=True))
virtual_mem.append(psutil.virtual_memory())
cores.append(psutil.cpu_percent(interval=None, percpu=True))
virtual_mem.append(psutil.virtual_memory())
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Logging data
for i in range(iterations*(len(labels)-1)):
time_diff.append(round(time_stop[i] - time_start[i], 10))
time_total += time_stop[i] - time_start[i]
time_diff.append(round(time_total/iterations, 10))
i = 0
for core in cores:
cpu_percent.append(mean(cores[i]))
i += 1
i = 0
with open('./logging/' + naam + ".csv", mode='w') as results_file:
fieldnames = ['Naam', 'CPU Percentage', 'timediff', 'virtual mem']
file_writer = csv.DictWriter(results_file, fieldnames=fieldnames)
file_writer.writeheader()
for i in range(iterations*(len(labels)-1)+1):
j = int(i/iterations)
file_writer.writerow({'Naam': labels[j], 'CPU Percentage': str(cpu_percent[i]), 'timediff': str(time_diff[i]),
'virtual mem': str(virtual_mem[i])})
|
[
"arno.plaetinck@hotmail.com"
] |
arno.plaetinck@hotmail.com
|
0f486160c87953554de09581004fbb09a73078c1
|
141346be61f39c2b7a1645cdec278f23b7137b9f
|
/src/stand_alone.py
|
2efd9e9d1dff4d985dca17a504b1293c2bf9ffd6
|
[] |
no_license
|
drimyus/facs_with_eeg_python
|
443bbcd0922e5cc8ea92bf6ec17e404bf484a71a
|
c9b4bc7ebcca363a8ee73a3117b3469bb7252f6a
|
refs/heads/master
| 2021-08-30T12:23:44.056485
| 2017-12-16T05:55:57
| 2017-12-16T05:55:57
| 113,594,180
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,348
|
py
|
from sklearn.svm import SVC
from sklearn.externals import joblib
import numpy as np
import tkinter
import tkinter.filedialog
import dlib
import cv2
import sys
import os
import random
from PIL import Image, ExifTags
from src.face import Face
FACE_DATASET = "crop"
TRAIN_DATASET = "train"
TEST_DATASET = "test"
class StandAlone:
def __init__(self, dataset, model_path, stand_flag=False):
# location of classifier model
self.model = None
self.model_path = model_path
# location of dataset
self.dataset = dataset
self.crop_dataset = os.path.join(dataset, FACE_DATASET)
self.train_dataset = os.path.join(dataset, TRAIN_DATASET)
self.test_dataset = os.path.join(dataset, TEST_DATASET)
self.clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
self.ensmeble_ratio = 0.75 # trains : tests = 3 : 1
self.stand_flag = stand_flag
self.dlib_face = Face('dlib')
self.haar_face = Face('haar')
self.face_width = 151
self.face_height = 151
self.labels = ["angry", "contemp", "disgust", "fear", "happy", "neutral", "sadness", "suprise"]
self.rect_color = (0, 255, 0)
self.text_color = (255, 0, 255)
# load the model
sys.stdout.write("Loading the model.\n")
success = self.load()
# if not success:
# # No exist trained model, so training...
# self.train_model()
def load(self):
if os.path.isfile(self.model_path):
try:
# loading
self.model = joblib.load(self.model_path)
return True
except Exception as ex:
print(ex)
else:
sys.stdout.write(" No exist Model {}, so it should be trained\n".format(self.model_path))
def load_image(self, file_path):
try:
image = Image.open(file_path)
orientation = None
for key in ExifTags.TAGS.keys():
if ExifTags.TAGS[key] == 'Orientation':
orientation = key
break
exif = dict(image._getexif().items())
if exif[orientation] == 3:
image = image.rotate(180, expand=True)
elif exif[orientation] == 6:
image = image.rotate(270, expand=True)
elif exif[orientation] == 8:
image = image.rotate(90, expand=True)
# image.save(file_path)
# image.close()
cv_img = np.array(image)
cv_img = cv_img[:, :, ::-1].copy()
return np.array(cv_img)
except (AttributeError, KeyError, IndexError):
# cases: image don't have getexif
cv_img = cv2.imread(file_path)
return cv_img
def calib_orientation(self, image):
face = self.dlib_face
max_rects = []
max_image = image
for rotate_code in range(3):
rot_image = cv2.rotate(image, rotateCode=rotate_code)
rects = face.detect_face(rot_image)
if len(rects) > len(max_rects):
max_rects = rects
max_image = rot_image
return max_rects, max_image
def ensemble_data(self):
crop_dataset = self.crop_dataset
train_dataset = self.train_dataset
if not os.path.isdir(train_dataset):
os.mkdir(train_dataset)
test_dataset = self.test_dataset
if not os.path.isdir(test_dataset):
os.mkdir(test_dataset)
sys.stdout.write("Ensembiling the data.\n")
if not os.path.isdir(crop_dataset):
sys.stderr.write("\tNo exist such directory: {}\n".format(crop_dataset))
sys.exit(1)
sys.stdout.write("\tdataset: {}\n".format(crop_dataset))
""" counting """
sys.stdout.write("\tCount the # files(faces) on dataset.\n")
persons = []
cnts = []
for dirpath, dirnames, filenames in os.walk(crop_dataset):
dirnames.sort()
for subdirname in dirnames:
subdirpath = os.path.join(dirpath, subdirname)
cnts.append(len(os.listdir(subdirpath)))
persons.append(subdirname)
sys.stdout.write("\t\tperson: {}, images: {}\n".format(subdirname, len(os.listdir(subdirpath))))
break
""" ensembling """
sys.stdout.write("\tBalance the dataset.\n")
min_cnt = min(cnts)
for person in persons:
subdirpath = os.path.join(crop_dataset, person)
files = os.listdir(subdirpath)
samples = random.sample(files, min_cnt) # pickle the random items from the list
for sample in samples:
src = os.path.join(subdirpath, sample)
if samples.index(sample) <= self.ensmeble_ratio * len(samples): # for training
new_subdirpath = os.path.join(train_dataset, person)
if not os.path.isdir(new_subdirpath):
os.mkdir(new_subdirpath)
dst = os.path.join(new_subdirpath, sample)
else: # for testing
new_subdirpath = os.path.join(test_dataset, person)
if not os.path.isdir(new_subdirpath):
os.mkdir(new_subdirpath)
dst = os.path.join(new_subdirpath, sample)
crop = cv2.imread(src)
if self.stand_flag:
stand_img = self.standardize_face(crop)
else:
stand_img = crop
cv2.imwrite(dst, stand_img)
cv2.imshow("face", crop)
cv2.imshow("stand", stand_img)
sys.stdout.write("\nEnsembled!\n")
def standardize_face(self, face):
gray = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
clahe_image = self.clahe.apply(gray)
stand = cv2.cvtColor(clahe_image, cv2.COLOR_GRAY2BGR)
return stand
def image_descriptions(self, image, face):
if image.shape[:2] == (self.face_height, self.face_width):
rects = [dlib.rectangle(int(0), int(0), int(image.shape[1]), int(image.shape[0]))]
else:
rects = face.detect_face(image)
descriptions = []
calib_image = None
if len(rects) == 0:
_, calib_image = self.calib_orientation(image)
else:
calib_image = image
for rect in rects:
crop = calib_image[max(0, rect.top()): max(image.shape[0], rect.bottom()),
max(rect.left(), 0):min(rect.right(), image.shape[1])]
if self.stand_flag:
stand_face = self.standardize_face(crop)
else:
stand_face = crop
resize = cv2.resize(stand_face, (self.face_width, self.face_height))
description = self.dlib_face.recog_description(resize)
descriptions.append(description)
return calib_image, descriptions, rects
def train_data(self, raw_data):
dataset = self.crop_dataset
if not os.path.isdir(dataset):
os.mkdir(dataset)
sys.stdout.write("Preparing the face dataset from the raw images.\n")
if not os.path.isdir(raw_data):
sys.stderr.write("\tCan not find source directory: {}\n".format(raw_data))
if not os.path.isdir(dataset):
sys.stdout.write("\tNo exist destination director, so will create new directory: {}\n".format(dataset))
os.mkdir(dataset)
sys.stdout.write("\tsource: {}\n".format(raw_data))
sys.stdout.write("\tdestination: {}\n".format(dataset))
sys.stdout.write("\tScaning the source directory.\n")
for dirpath, dirnames, filenames in os.walk(raw_data):
dirnames.sort()
for subdirname in dirnames:
subdirpath = os.path.join(dirpath, subdirname)
new_subdirpath = os.path.join(dataset, subdirname)
if not os.path.isdir(new_subdirpath):
os.mkdir(new_subdirpath)
for filename in os.listdir(subdirpath):
sys.stdout.write("\r\t\t{} / {}".format(subdirname, filename))
sys.stdout.flush()
crop = None
image = self.load_image(os.path.join(subdirpath, filename))
if image.shape[:2] == (self.face_height, self.face_width):
crop = image
else:
# cropping the face from the image and resizing
rects = self.dlib_face.detect_face(image)
if len(rects) == 0: # find the correct orientation
rects, image = self.calib_orientation(image)
if len(rects) != 0:
(x, y, w, h) = (rects[0].left(), rects[0].top(), rects[0].right() - rects[0].left(),
rects[0].bottom() - rects[0].top())
height, width = image.shape[:2]
crop = image[max(0, y): min(y + h, height), max(0, x):min(width, x + w)]
if self.stand_flag:
crop = self.standardize_face(crop)
if crop is not None:
cv2.imwrite(filename=os.path.join(new_subdirpath, filename), img=crop)
cv2.imshow("face", crop)
cv2.waitKey(1)
else:
sys.stdout.write("\t no face: {} / {}\n".format(subdirpath, filename))
sys.stdout.write("\nCropped!\n")
def train_model(self, model_path=None):
if model_path is None:
model_path = self.model_path
else:
self.model_path = model_path
dataset = self.train_dataset
sys.stdout.write("Training the model.\n")
if not os.path.isdir(dataset):
sys.stderr.write("\tNo exist Dataset for training{}\n".format(dataset))
exit(1)
# initialize the data matrix and labels list
data = []
labels = []
"""-----------------------------------------------------------------------------------------"""
sys.stdout.write("\tScanning the dataset.\n")
# loop over the input images
for dirpath, dirnames, filenames in os.walk(dataset):
dirnames.sort()
for subdirname in dirnames:
subject_path = os.path.join(dirpath, subdirname)
for filename in os.listdir(subject_path):
sys.stdout.write("\r\t\tscanning: {} {}".format(subject_path, filename))
sys.stdout.flush()
img = self.load_image(os.path.join(subject_path, filename))
_, descriptions, rects = self.image_descriptions(img, self.dlib_face)
if len(descriptions) == 0:
continue
label, hist = subdirname, descriptions[0] # get label, histogram
data.append(hist)
labels.append(label)
"""-----------------------------------------------------------------------------------------"""
sys.stdout.write("\nConfigure the SVM model.\n")
# Configure the model : linear SVM model with probability capabilities
"""'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or a callable."""
model = SVC(C=1.0, kernel='poly', degree=3, gamma='auto', coef0=0.0, shrinking=True, probability=True,
tol=0.001, cache_size=200, class_weight='balanced', verbose=False, max_iter=-1,
decision_function_shape='ovr', random_state=None)
# model = SVC(C=1.0, kernel='linear')
# model = SVC(probability=True)
# Train the model
model.fit(data, labels)
joblib.dump(model, self.model_path)
"""-----------------------------------------------------------------------------------------"""
sys.stdout.write("\tFinished the training.\n")
self.model = model
def check_precision(self, dir_path):
confuse_mat = []
total = 0
num_pos = 0
num_neg = 0
sys.stdout.write("Check the precision with dataset {}\n".format(dir_path))
if not os.path.isdir(dir_path):
sys.stderr.write("\tCan not find such directory: {}\n".format(dir_path))
sys.exit(1)
sys.stdout.write("\tsource: {}\n".format(dir_path))
sys.stdout.write("\tScaning the source directory.\n")
for dirpath, dirnames, filenames in os.walk(dir_path):
dirnames.sort()
for subdirname in dirnames:
prec_line = np.zeros(len(dirnames), dtype=np.uint8).tolist()
subdirpath = os.path.join(dirpath, subdirname)
for filename in os.listdir(subdirpath):
sys.stdout.write("\r\t\tscanning: {} {}".format(subdirname, filename))
sys.stdout.flush()
img = self.load_image(os.path.join(subdirpath, filename))
_, descriptions, rects = self.image_descriptions(img, self.dlib_face)
if len(descriptions) == 0:
continue
fid, idx, probs = self.classify_description(descriptions[0])
if fid is not None:
prec_line[idx] += 1
if idx == dirnames.index(subdirname):
num_pos += 1
else:
num_neg += 1
total += 1
prec_line.append(subdirname)
prec_line.append(len(os.listdir(subdirpath)))
confuse_mat.append(prec_line)
sys.stdout.write(
"\ntotal: {}, positive: {}, negative: {}, precision:{}\n".format(total, num_pos, num_neg,
float(num_pos) / float(total)))
for line in confuse_mat:
print(line)
def classify_description(self, description):
try:
description = description.reshape(1, -1)
# Get a prediction from the model including probability:
probab = self.model.predict_proba(description)
max_ind = np.argmax(probab)
# Rearrange by size
sort_probab = np.sort(probab, axis=None)[::-1]
if sort_probab[0] / sort_probab[1] < 0.7:
predlbl = "UnKnown"
else:
predlbl = self.model.classes_[max_ind]
return predlbl, max_ind, probab
except Exception as e:
sys.stdout.write(str(e) + "\n")
pass
def test_image_file(self):
root = tkinter.Tk()
root.withdraw()
select_file = (tkinter.filedialog.askopenfile(initialdir='.', title='Please select a image file'))
image_path = select_file.name
root.update()
try:
# Loop to recognize faces
image = self.load_image(image_path)
calib_image, descriptions, rects = self.image_descriptions(image, self.haar_face)
if len(descriptions) == 0:
sys.stdout.write("No face image\n")
sys.exit(1)
else:
for i in range(len(rects)):
description = descriptions[i]
rect = rects[i]
fid, idx, probs = self.classify_description(description)
cv2.rectangle(calib_image, (rect.left(), rect.top()), (rect.right(), rect.bottom()), self.rect_color, 3)
cv2.putText(calib_image, fid, (rect.left(), rect.top()), cv2.FONT_HERSHEY_SIMPLEX, 1, self.text_color, 3)
show_image = cv2.resize(calib_image, (int(max(calib_image.shape[1] / 4, 130)), int(max(calib_image.shape[0] / 4, 150))))
cv2.imshow(image_path[-20:], show_image)
sys.stdout.write("[{}] id: {}\n".format(fid, str(idx)))
print(probs)
cv2.waitKey(0)
except Exception as e:
sys.stdout.write(str(e) + "\n")
def live_video(self, video_path):
cap = cv2.VideoCapture(video_path)
width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
ret, frame = cap.read()
while ret:
ret, frame = cap.read()
frame = cv2.resize(frame, (int(width/2.5), int(height/2.5)))
rects = self.dlib_face.detect_face(frame)
for rect in rects:
crop = frame[max(0, rect.top()): max(frame.shape[0], rect.bottom()),
max(rect.left(), 0):min(rect.right(), frame.shape[1])]
if self.stand_flag:
stand_face = self.standardize_face(crop)
else:
stand_face = crop
resize = cv2.resize(stand_face, (self.face_width, self.face_height))
description = self.dlib_face.recog_description(resize)
fid, idx, probs = self.classify_description(description)
frame = self.show_result(frame, rect, probs)
cv2.imshow("frame", frame)
cur_pos = cap.get(cv2.CAP_PROP_POS_FRAMES)
cap.set(cv2.CAP_PROP_POS_FRAMES, cur_pos + 250)
key = cv2.waitKey(5000)
if key == ord('q'):
break
elif key == ord('n'):
cur_pos = cap.get(cv2.CAP_PROP_POS_FRAMES)
cap.set(cv2.CAP_PROP_POS_FRAMES, cur_pos + 500)
cap.release()
cv2.destroyAllWindows()
def show_result(self, image, rect, probs):
cv2.rectangle(image, (rect.left(), rect.top()), (rect.right(), rect.bottom()), self.rect_color, 1)
cv2.circle(image, (rect.left(), rect.top()), 1, self.rect_color, -1)
cv2.circle(image, (rect.left(), rect.bottom()), 1, self.rect_color, -1)
cv2.circle(image, (rect.right(), rect.top()), 1, self.rect_color, -1)
cv2.circle(image, (rect.right(), rect.bottom()), 1, self.rect_color, -1)
sum = 0.0
for i in range(len(probs[0])):
cv2.putText(image, "{:}:{:1.2f}".format(self.labels[i], probs[0][i]), (0, 10 + 20 * i),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, self.text_color, 2)
cv2.line(image, (100, 10 + 20 * i), (int(100 + probs[0][i] * 200), 10 + 20 * i), self.text_color, 3)
sum += probs[0][i]
print sum
return image
if __name__ == '__main__':
model = "../model/classifier/model.pkl"
root_dataset = "../dataset"
st = StandAlone(dataset=root_dataset, model_path=model)
# raw_images_folder = "../dataset/raw_data"
# st.train_data(raw_data=raw_images_folder)
# st.ensemble_data()
# st.train_model()
# check_dataset = "../dataset/test";
# st.check_precision(check_dataset)
#
# st.test_image_file()
st.live_video("../data/THE FINAL MANUP 20171080P.mp4")
|
[
"dreamyouth@engineer.com"
] |
dreamyouth@engineer.com
|
2f2600b2bb687234a23788b3695b66ef8445a9fd
|
1ce2cd99bee3c5abf9c51138684efb73762174a5
|
/python/examples/example_grid_layout.py
|
6d726be732359f6a16e34851ad6aa3d1f545c6b2
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
BrainCOGS/neuroglancer
|
2c27d500c9cddec1472c7de760946067b791baa1
|
b0db084a167e60009ed8ae1ec787557a4b1ae626
|
refs/heads/master
| 2023-08-30T10:13:37.495165
| 2021-05-18T15:26:15
| 2021-05-18T15:26:15
| 290,582,565
| 2
| 1
|
Apache-2.0
| 2020-08-26T19:08:45
| 2020-08-26T19:08:44
| null |
UTF-8
|
Python
| false
| false
| 943
|
py
|
from __future__ import print_function
import webbrowser
import neuroglancer
viewer = neuroglancer.Viewer()
with viewer.txn() as s:
s.layers['image'] = neuroglancer.ImageLayer(
source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/image',
)
s.layers['ground_truth'] = neuroglancer.SegmentationLayer(
source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth',
)
s.layout = neuroglancer.row_layout([
neuroglancer.column_layout([
neuroglancer.LayerGroupViewer(layers=['image', 'ground_truth']),
neuroglancer.LayerGroupViewer(layers=['image', 'ground_truth']),
]),
neuroglancer.column_layout([
neuroglancer.LayerGroupViewer(layers=['ground_truth']),
neuroglancer.LayerGroupViewer(layers=['ground_truth']),
]),
])
print(viewer.state)
print(viewer)
webbrowser.open_new(viewer.get_viewer_url())
|
[
"jbms@google.com"
] |
jbms@google.com
|
ed50f20c4a664cdb4ac6f1c80a23dbc3833ed00e
|
15d015714f73ce97ba2ffe84924b94fcd3af7890
|
/Atividade-27.03.2020/mqtt2.py
|
c2665a7f1860a203d4d8cc193ab8b192087f1cb7
|
[] |
no_license
|
MGStigger/PI_V
|
90ee2efc7b8476801bcb5c42849f1f5c4fc65840
|
fd96d62501dc6abb8bc68bd197ab47b29bb627d6
|
refs/heads/master
| 2021-05-21T22:57:27.103338
| 2020-07-08T06:10:55
| 2020-07-08T06:10:55
| 252,844,586
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 199
|
py
|
# Ensures paho is in PYTHONPATH
import context
# Importa o publish do paho-mqtt
import paho.mqtt.publish as publish
# Publica
publish.single("mgstigger", "Olá Mundo!", hostname="mqtt.eclipse.org")
|
[
"noreply@github.com"
] |
MGStigger.noreply@github.com
|
56e7149515588bdc3a1f36861b7a5690a7c1b104
|
dd116fe1e94191749ab7a9b00be25bfd88641d82
|
/cairis/cairis/ExceptionListCtrl.py
|
70ab9eaa008416236f63a7f1c15b501471b4ec42
|
[
"Apache-2.0"
] |
permissive
|
RobinQuetin/CAIRIS-web
|
fbad99327707ea3b995bdfb4841a83695989e011
|
4a6822db654fecb05a09689c8ba59a4b1255c0fc
|
HEAD
| 2018-12-28T10:53:00.595152
| 2015-06-20T16:53:39
| 2015-06-20T16:53:39
| 33,935,403
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,415
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#$URL$ $Id: ExceptionListCtrl.py 337 2010-11-07 23:58:53Z shaf $
import wx
import armid
import ARM
from Borg import Borg
from ExceptionDialog import ExceptionDialog
import ObstacleFactory
class ExceptionListCtrl(wx.ListCtrl):
def __init__(self,parent,envName,stepGrid):
wx.ListCtrl.__init__(self,parent,armid.USECASE_LISTEXCEPTIONS_ID,size=wx.DefaultSize,style=wx.LC_REPORT)
self.stepGrid = stepGrid
self.theEnvironmentName = envName
self.theLastSelection = ''
self.InsertColumn(0,'Exception')
self.SetColumnWidth(0,250)
self.theSelectedIdx = -1
self.theExcMenu = wx.Menu()
self.theExcMenu.Append(armid.DIMLIST_MENUADD_ID,'Add')
self.theExcMenu.Append(armid.DIMLIST_MENUDELETE_ID,'Delete')
self.theExcMenu.Append(armid.DIMLIST_MENUGENERATE_ID,'Generate Obstacle')
self.Bind(wx.EVT_RIGHT_DOWN,self.OnRightDown)
wx.EVT_MENU(self.theExcMenu,armid.DIMLIST_MENUADD_ID,self.onAddException)
wx.EVT_MENU(self.theExcMenu,armid.DIMLIST_MENUDELETE_ID,self.onDeleteException)
wx.EVT_MENU(self.theExcMenu,armid.DIMLIST_MENUGENERATE_ID,self.onGenerateObstacle)
self.Bind(wx.EVT_LIST_ITEM_SELECTED,self.OnItemSelected)
self.Bind(wx.EVT_LIST_ITEM_DESELECTED,self.OnItemDeselected)
self.Bind(wx.EVT_LIST_ITEM_ACTIVATED,self.onExceptionActivated)
def setEnvironment(self,envName):
self.theEnvironmentName = envName
def OnItemSelected(self,evt):
self.theSelectedIdx = evt.GetIndex()
self.theLastSelection = self.GetItemText(self.theSelectedIdx)
def OnItemDeselected(self,evt):
self.theSelectedIdx = -1
def OnRightDown(self,evt):
self.PopupMenu(self.theExcMenu)
def onAddException(self,evt):
dlg = ExceptionDialog(self,self.theEnvironmentName)
if (dlg.ShowModal() == armid.EXCEPTION_BUTTONCOMMIT_ID):
exc = dlg.parameters()
pos = self.stepGrid.GetGridCursorRow()
table = self.stepGrid.GetTable()
currentStep = table.steps[pos]
currentStep.addException(exc)
table.steps[pos] = currentStep
self.InsertStringItem(0,exc[0])
def onDeleteException(self,evt):
if (self.theSelectedIdx == -1):
dlg = wx.MessageDialog(self,'No exception selected','Delete exception',wx.OK)
dlg.ShowModal()
dlg.Destroy()
else:
excName = self.GetItemText(self.theSelectedIdx)
self.DeleteItem(self.theSelectedIdx)
pos = self.stepGrid.GetGridCursorRow()
table = self.stepGrid.GetTable()
currentStep = table.steps[pos]
currentStep.deleteException(excName)
table.steps[pos] = currentStep
def onExceptionActivated(self,evt):
self.theSelectedIdx = evt.GetIndex()
excName = self.GetItemText(self.theSelectedIdx)
exc = self.stepGrid.stepException(excName)
dlg = ExceptionDialog(self,self.theEnvironmentName,exc[0],exc[1],exc[2],exc[3],exc[4])
if (dlg.ShowModal() == armid.EXCEPTION_BUTTONCOMMIT_ID):
updExc = dlg.parameters()
self.stepGrid.setStepException(self.theSelectedIdx,excName,updExc)
self.SetStringItem(self.theSelectedIdx,0,updExc[0])
def onGenerateObstacle(self,evt):
obsParameters = ObstacleFactory.build(self.theEnvironmentName,self.stepGrid.stepException(self.theLastSelection))
b = Borg()
obsId = b.dbProxy.addObstacle(obsParameters)
obsDict = b.dbProxy.getObstacles(obsId)
obsName = (obsDict.keys())[0]
dlg = wx.MessageDialog(self,'Generated obstacle: ' + obsName,'Generate obstacle',wx.OK)
dlg.ShowModal()
def load(self,excs):
self.DeleteAllItems()
for ex in excs:
idx = self.GetItemCount()
self.InsertStringItem(idx,ex)
|
[
"shamal.faily@googlemail.com"
] |
shamal.faily@googlemail.com
|
b9fcca818f418d7a6017eb3122da668dc4416005
|
f85f09e2b1971d60a9d5ca4063fc99a41702d5b9
|
/summarypython/summarypython/urls.py
|
cc646bf193dd5e2c9dc1ac69435e35de7ea76c0e
|
[] |
no_license
|
architsehgal019/Multimode-Summarized-Text-to-Speech-Conversion
|
9f47dbea1caa8137c45afafaba3bc9fc5361e9e8
|
858b589cefe33db6daf677f8e259288e090aa56e
|
refs/heads/master
| 2020-12-19T06:10:22.246185
| 2020-01-22T19:17:38
| 2020-01-22T19:17:38
| 235,640,744
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 755
|
py
|
"""summarypython URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
|
[
"archit_150@yahoo.co.in"
] |
archit_150@yahoo.co.in
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.