blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
19e5914084188d5755d4c357569877b84a3db38f
|
5532a4b50e29cc114a439ee851640abcff8f5c1f
|
/baikeSpider/spiderMain.py
|
b482a72122424c12cace2af668327d389b3e8bf0
|
[] |
no_license
|
xingjia05/crawler
|
c998620af1b90a1643bccdbc5d4a6a96d2f03c78
|
2ecd0420330feb5b63b206973fcabcbffbea09ba
|
refs/heads/master
| 2022-11-19T19:32:18.917684
| 2020-07-25T15:58:46
| 2020-07-25T15:58:46
| 282,476,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,129
|
py
|
# coding:utf8
import urlManager, htmlDownloader, htmlParser, htmlOutputer
class SpiderMain(object):
def __init__(self):
self.urls = urlManager.UrlManager()
self.downloader = htmlDownloader.HtmlDownloader()
self.parser = htmlParser.HtmlParser()
self.outputer = htmlOutputer.HtmlOutputer()
def craw(self, root_url):
count = 1
self.urls.addNewUrl(rootUrl)
while self.urls.hasNewUrl():
#try :
newUrl = self.urls.getNewUrl()
print('craw %d:%s'%(count,newUrl))
htmlContent = self.downloader.download(newUrl)
newUrls, newData = self.parser.parser(newUrl, htmlContent)
self.urls.addNewUrls(newUrls)
self.outputer.collectData(newData)
if count == 5:
break
count = count + 1
#except:
# print("craw failed")
self.outputer.outputHtml()
if __name__=="__main__":
rootUrl = "https://baike.baidu.com/item/Python/407313"
objSpider = SpiderMain()
objSpider.craw(rootUrl)
|
[
"xingjiazhang@192.168.1.4"
] |
xingjiazhang@192.168.1.4
|
9929b559634ed099ff922aaf1f3e3d1868db8a59
|
b438c197c2c564cce2c6c6525fe048e8804a5b6d
|
/setup.py
|
87c32fc998759e89874d5055790d290efefe706e
|
[] |
no_license
|
serge-m/dlibfacedetector
|
06101bf4d7e3dc732c8857b1dc42fdb9c63ffb8c
|
0bc96d6ddea3660f670aa3029e7f515709c3d2a2
|
refs/heads/master
| 2021-01-20T18:15:25.239639
| 2017-07-29T09:52:02
| 2017-07-29T09:52:02
| 90,914,347
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 421
|
py
|
#!/usr/bin/env python
from distutils.core import setup
with open('requirements.txt') as f:
required = f.read().splitlines()
setup(name='dlibfaceextractor',
version='0.0.1',
description='Face extractor based on dlib',
author='sergem',
author_email='sbmatyunin@gmail.com',
url='https://serge-m.github.io',
install_requires=required,
packages=['dlibfaceextractor'],
)
|
[
"sbmatyunin@gmail.com"
] |
sbmatyunin@gmail.com
|
7269fba168be92f031d45efcf6db471198159283
|
3acc2ba3aedf9ec54ef70384d8481a6f1ef8f9d8
|
/habgnab_art.py
|
6aa22731e8478c137052fb08fb4cc1c32a6dd6fa
|
[] |
no_license
|
Aknexad/hangman
|
df8f7fce2bec5bcf191b6b2a62ec10dbe67b51d4
|
736f174a9652062410a0275a3853ad5b43a5d1e9
|
refs/heads/main
| 2023-05-06T20:45:21.265140
| 2021-06-02T19:10:18
| 2021-06-02T19:10:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 533
|
py
|
stage = [ '''
+---+
| |
O |
/|\ |
/ \ |
|
=========''',
'''
+---+
| |
O |
/|\ |
/ |
|
=========''',
'''
+---+
| |
O |
/|\ |
|
|
=========''',
'''
+---+
| |
O |
/| |
|
|
=========''', '''
+---+
| |
O |
| |
|
|
=========''',
'''
+---+
| |
O |
|
|
|
=========''',
'''
+---+
| |
|
|
|
|
=========''']
|
[
"noreply@github.com"
] |
Aknexad.noreply@github.com
|
c636e5b018738670e0f6b6f4b1801954dca8e009
|
a85ad1cb4744755f8320319e154bca64fa84b889
|
/my_modules.py
|
c359669fbc7db8d085f62f126b7a04b1d4301500
|
[] |
no_license
|
vfedotovs/text-based-password-generator
|
fc3946c7e443010f36bf8c2576a14a8a290f248d
|
ed8d3778370fa733b6b4fc1ea39e2bc44b693d6a
|
refs/heads/master
| 2022-11-29T04:50:48.052416
| 2020-07-25T21:18:51
| 2020-07-25T21:18:51
| 268,333,049
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,249
|
py
|
import random
spec_chars = ['!', '"', '£', '$', '%', '&', '*', '(', ')', '_', '+']
numbers = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
letters = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
def gen_random_index_list(count: int, range_len: int) -> list:
"Function to generate list of random numbers"
index_list = []
for i in range(count):
x = random.randrange(1, range_len)
index_list.append(x)
return index_list
def shuffle_pass(str_list: list) -> str:
"Function takes row character list and return shuffled character string"
# changes str_list to different characters and shuffles
random.shuffle(str_list)
pass_char_list = ""
for pass_char in str_list:
pass_char_list += str(pass_char)
return pass_char_list
def get_pwd_len() -> int:
"Function collects password requiremnts from STDIN and "
"validates that value is int and > 4 and < 30"
"Return int: pwd_len"
min_len = 4
max_len = 30
len_valid = True
format_valid = True
while format_valid:
try:
pwd_len = int(input("Choose password lenght(characters): "))
format_valid = False
except ValueError:
print("Password lenght should be digit")
while len_valid:
if pwd_len > min_len and pwd_len < max_len:
return pwd_len
if pwd_len < min_len:
print("Minimum password lenght is 4, try again")
pwd_len = int(input("Choose password lenght(characters): "))
if pwd_len > max_len:
print("Max password lenght is 30 characters, try again")
pwd_len = int(input("Choose password lenght(characters): "))
def get_char_types() -> list:
"Function collects input from STDIN"
"No error checking for invalid inputs TODO fix that"
req_list = []
include_letter_up = str(
input("Do you want include upper case letters (y/n)?:"))
include_letter_low = str(
input("Do you want include lower case letters (y/n)?:"))
include_nums = str(input("Do you want include numbers (y/n)?:"))
include_specials = str(
input("Do you want include special characters (y/n)?:"))
req_list.append(include_letter_up)
req_list.append(include_letter_low)
req_list.append(include_nums)
req_list.append(include_specials)
return req_list
def calc_sect_len(req_list: list) -> list:
"Function calculates section lenght for each character type"
sect_count = 0
count_len_diff = []
pass_len = req_list[0]
for req in req_list:
if req is True:
sect_count += 1
sect_len = pass_len // sect_count
diff = pass_len - (sect_count * sect_len)
count_len_diff.append(sect_count)
count_len_diff.append(sect_len)
count_len_diff.append(diff)
return count_len_diff
def pass_generator(requirements: list, count_len_diff: list) -> str:
# sect_count = count_len_diff[0]
sect_len = count_len_diff[1]
diff = count_len_diff[2]
final_pass = []
if diff > 0:
let_idx_list = gen_random_index_list(diff, len(letters))
for index in let_idx_list:
final_pass.append(letters[index])
if requirements[3]:
num_idx_list = gen_random_index_list(sect_len, len(numbers))
for index in num_idx_list:
final_pass.append(numbers[index])
# enabled_letters_up
if requirements[1]:
let_idx_list = gen_random_index_list(sect_len, len(letters))
for index in let_idx_list:
final_pass.append(letters[index])
# enabled_letters_low
if requirements[2]:
let_idx_list = gen_random_index_list(sect_len, len(letters))
for index in let_idx_list:
low_str = letters[index]
low = low_str.lower()
final_pass.append(low)
if requirements[4]:
spec_idx_list = gen_random_index_list(sect_len, len(spec_chars))
for index in spec_idx_list:
final_pass.append(spec_chars[index])
return final_pass
|
[
"vtrader@inbox.lv"
] |
vtrader@inbox.lv
|
623e0cd2ec63ea3b4ba362632a976b5f02d1847c
|
582be7636d99fa5d4523b9bfe2b1d99d2a79bd47
|
/PostModeling.py
|
4d13205b074b1b0d1b5a31d0b3a4fcfc912a7931
|
[] |
no_license
|
srobles09/COVIDvaccineAllocationIP2021
|
d4ac3b11b95576467c6bb11b379197f83307a34c
|
d0f08f74893a99e197ad418dc690a29e9c43f712
|
refs/heads/master
| 2023-04-17T10:24:59.914576
| 2021-05-04T19:34:03
| 2021-05-04T19:34:03
| 357,250,644
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,450
|
py
|
#import os
import pandas as pd
import numpy as np
# Plotting packages
import matplotlib.pyplot as plt
#import matplotlib.lines as mlines
#from matplotlib.colors import ListedColormap
#import seaborn as sns
# Geospatial packages
import geopandas as gpd
import fiona
from shapely.geometry import Point # Shapely for converting latitude/longtitude to geometry
#from shapely.wkt import loads as load_wkt # Get centroids from shapely file
## Read in data
shp_path = "D:/Sandy Oaks/Documents/Grad School/S21_MATH-7594/Project/COVIDvaccineAllocationIP2021/shp_data/american_community_survey_tracts_2015_2019.shp"
mod_path = "D:/Sandy Oaks/Documents/Grad School/S21_MATH-7594/Project/COVIDvaccineAllocationIP2021/Final Problem Solutions.xlsx"
mod = pd.read_excel(mod_path,sheet_name='TRACTS')
fiona.open(shp_path)
denver_tracts = gpd.read_file(shp_path)
denver_tracts.reset_index(inplace=True)
servprov_path = "D:/Sandy Oaks/Documents/Grad School/S21_MATH-7594/Project/COVIDvaccineAllocationIP2021/Data_Denver_Vaccination_Sites.xlsx"
serv_prov = pd.read_excel(servprov_path,sheet_name='Service Provider Sites')
## Handle service providers
lat_long = serv_prov[['Lat','Long']].to_numpy()
geometry = [Point(xy) for xy in zip(serv_prov['Long'], serv_prov['Lat'])]
#crs = {'init': 'epsg:4326'} # In degrees
crs = {'init': 'epsg:3857'} # In meters
serv_lat_long = gpd.GeoDataFrame(serv_prov, crs = crs, geometry = geometry)
mod['GEO_NAME'] = denver_tracts['GEO_NAME'] # save me later
denver_tracts['social_metric'] = mod['SVI']
denver_tracts['Perc Vac iter'] = mod['Perc Vac iter']
denver_tracts['Perc Vac Full'] = mod['Perc Vac Full']
#Nice pink: PuRd
plt.rcParams['axes.titlesize'] = 50
ax = denver_tracts.plot(column='social_metric', cmap='Blues', linewidth=0.8, edgecolor='black', figsize=(30, 18))
x, y = serv_prov['Long'].values, serv_prov['Lat'].values
ax.scatter(x,y, marker="o", color='r')
plt.title('SVI Metric')
plt.show()
ax = denver_tracts.plot(column='Perc Vac Full', cmap='Blues', linewidth=0.8, edgecolor='black', figsize=(30, 18))
x, y = serv_prov['Long'].values, serv_prov['Lat'].values
ax.scatter(x,y, marker="o", color='r')
plt.title('One batch distribution')
plt.show()
ax = denver_tracts.plot(column='Perc Vac iter', cmap='Blues', linewidth=0.8, edgecolor='black', figsize=(30, 18))
x, y = serv_prov['Long'].values, serv_prov['Lat'].values
ax.scatter(x,y, marker="o", color='r')
plt.title('Multi-batch distribution')
plt.show()
|
[
"69159760+srobles09@users.noreply.github.com"
] |
69159760+srobles09@users.noreply.github.com
|
abd99effc8066d0f99b112d3383ed33be7e2b560
|
2d732fa72d31bbbc3654037c8f9e8d73a2b34855
|
/CalcBWMatrix.py
|
34171373fc551d35efe8dae408c9af72c70e80e0
|
[] |
no_license
|
seshadrs/EC2Tools
|
5380fe532355404353a08b53fe4b80da68c15d1b
|
b8ff5f8676bf2404796e914442c85848ed00fbcd
|
refs/heads/master
| 2016-09-06T07:05:01.755735
| 2013-02-17T21:38:53
| 2013-02-17T21:38:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,462
|
py
|
"""
Author : Seshadri Sridharan
A script that calculates the Bandwidth matrix for all EC2 instances using 'netconf'
Input : Instances txt file with each line in the format "<ami-id> <public-dns>"
"""
import subprocess
import sys
class EC2Instance:
"""
lets you ssh to an EC2 instance using subprocess, execute a command, return result.
"""
def __init__(self, uname, host, pemfile=None):
"""uname, host and key file(optional)"""
self.UNAME = uname
self.HOST = host
self.KEY = pemfile
def execute(self, command):
"""executes shell command and returns result as a string"""
ssh = None
if self.KEY:
ssh = subprocess.Popen(["ssh", "-i "+ self.KEY, self.UNAME+"@"+self.HOST, command],
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
else:
ssh = subprocess.Popen(["ssh", self.UNAME+"@"+self.HOST, command],
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
result = ssh.stdout.readlines()
if result == []:
error = ssh.stderr.readlines()
print >> sys.stderr, "ERROR: %s" % error
return ''.join(result)
def executeMulti(self, commands):
""" executes multiple commands.
takes as input a list of commands to execute
returns the nested-list of the result string """
results=[]
ssh = None
for command in commands:
result = self.execute(command)
results.append(result)
return results
def processorType(self):
"""
returns a list containing the instance's processor type and the cpuinfo file content
"""
cpuinfo = self.execute("cat /proc/cpuinfo")
lines = cpuinfo.split('\n')
for l in lines:
if l[:10]=="model name":
return [l.split(':')[1].strip(), cpuinfo]
return [None,cpuinfo]
def get_bw(netconfRes):
"""
Extracts n/w bandwidth from netconf result
"""
bw = netconfRes.strip().split(' ')[-1]
return bw
if __name__ == "__main__":
print "# OBTAINING ALL INSTANCES FORM instances txt file\n"
IPs=[]
for l in open(sys.argv[1]).readlines():
l=l.strip()
if l:
IPs.append(l.split(' ')[-1].strip())
instances = [EC2Instance("ubuntu",ip) for ip in IPs]
print "\n".join(IPs)
# print "# STARTING NETSERVER ON ALL INSTANCES\n"
# for instance in instances:
# print instance.execute("sudo netserver") #start the netserver in all
#run netconf test on all A-B pair-combinations of instances
print "# RUNNING NETCONF SERVER ON ALL INSTANCES\n"
combinations ={} #holds result for all A-B combinations
for a in range(len(instances)):
for b in range(len(instances)):
combination = tuple(sorted([a,b]))
if b!=a and combination not in combinations:
print "=> RUNNING COMBINATION ",a,b, IPs[a], IPs[b]
res = instances[a].execute("netperf -H "+IPs[b]) #A as client, B as server
bw = get_bw(res)
combinations[combination] = bw
print bw, res
print "# COMPLETED RUNNING ON ", len(combinations), " COMBINATIONS\n"
print "# CONSTRUCTING BANDWIDTH MATRIX\n"
bwmat=[["" for x in range(len(instances))] for y in range(len(instances))]
for i in range(len(instances)):
for j in range(len(instances)):
if j==i:
bwmat[i][i]="0"
elif j>i:
combination = tuple(sorted([i,j]))
bwmat[i][j]=combinations[combination]
else:
bwmat[i][j]=""
print "#BANDWIDTH MATRIX:\n"
for i in range(len(instances)):
print ", ".join(bwmat[i])
|
[
"seshadrs@cs.cmu.edu"
] |
seshadrs@cs.cmu.edu
|
d2e9df288e273a43ccb5fec56e34bdea637cba51
|
ac5e52a3fc52dde58d208746cddabef2e378119e
|
/exps-gsn-edf/gsn-edf_ut=3.0_rd=0.5_rw=0.06_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=10/params.py
|
9a7717a916f586bbc8ca9cafff2ceb6bd92ebf1a
|
[] |
no_license
|
ricardobtxr/experiment-scripts
|
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
|
7bcebff7ac2f2822423f211f1162cd017a18babb
|
refs/heads/master
| 2023-04-09T02:37:41.466794
| 2021-04-25T03:27:16
| 2021-04-25T03:27:16
| 358,926,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 254
|
py
|
{'cpus': 4,
'duration': 30,
'final_util': '3.021810',
'max_util': '3.0',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '0.5',
'res_nmb': '4',
'res_weight': '0.06',
'scheduler': 'GSN-EDF',
'trial': 10,
'utils': 'uni-medium-3'}
|
[
"ricardo.btxr@gmail.com"
] |
ricardo.btxr@gmail.com
|
d8b4a8e03d911c074131883c717616a071878443
|
1bd299b05b2c0769d9dfd77ecd336a103bf969fe
|
/Q2a.py
|
e0d8e349cd4259bd101cc6d11c0b4e6068c691e0
|
[] |
no_license
|
xueyiyao/cs165a_hw1
|
89954e37c9412c5805a4961145072faf2a8ab07a
|
a1b0d0187d9cc2651783e4d831bb80166dbcf107
|
refs/heads/master
| 2020-12-20T04:57:40.982470
| 2020-01-25T00:09:08
| 2020-01-25T00:09:08
| 235,968,703
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 424
|
py
|
import numpy as np
#Note: Should use numpy array for A and B
# A: 5x4
# B: 4x3
A = np.arange(1,21).reshape(5,4)
print(A)
B = np.arange(1,13).reshape(4,3)
print(B)
AxB = A.dot(B)
print(AxB)
"""
Output example:
[[ 1 2 3 4]
[ 5 6 7 8]
[ 9 10 11 12]
[13 14 15 16]
[17 18 19 20]]
[[ 1 2 3]
[ 4 5 6]
[ 7 8 9]
[10 11 12]]
[[ 70 80 90]
[158 184 210]
[246 288 330]
[334 392 450]
[422 496 570]]
"""
|
[
"xueyiyao@umail.ucsb.edu"
] |
xueyiyao@umail.ucsb.edu
|
c95e855e9dbebcb86c55c09cf4e937203dc39f61
|
39a0b2d123198782fa39ae8f37855a7d312e2600
|
/prime.py
|
6dc64b70bac969d8aeebe20edc623f02dd4b312d
|
[] |
no_license
|
ddchristian/Prime-Inventory
|
10effb95562e468dc89f35d078b95a215fb8ceeb
|
2230904f70b3711e11318f21c045710504fc11d3
|
refs/heads/master
| 2021-04-09T11:23:59.866737
| 2019-02-03T03:28:57
| 2019-02-03T03:28:57
| 125,563,068
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,389
|
py
|
from startup import check_startup
from queryMethods import getDevice
startup_vars = check_startup(spark_bot=False)
print('From __main__: startup_vars=', startup_vars)
print('new startup_vars =', startup_vars)
query_options = {'A' : 'serialNumberQ', 'B' : 'ipAddrQ', 'C' : 'macAddrQ', 'D' : 'deviceTypeQ', 'E' : 'softwareQ'}
mac_options = {'A' : 'network', 'B' : 'client'}
searchOp_options = {'A' : 'eq', 'B' : 'startsWith', 'C' : 'contains'}
software_options = {'A' : 'IOS', 'B' : 'IOS-XE', 'C' : 'NX-OS', 'D' : 'Cisco Controller'}
queryType = {'serialNumberQ': 'Serial Number', 'ipAddrQ': 'IP Address', 'macAddrQ': 'MAC Address', 'deviceTypeQ': 'Device Type', 'softwareQ': 'Software Type'}
print('\nA Serial Number \nB IP Address \nC MAC Address \nD Device Type \nE Software Version')
query = input('Select the query type(A, B, C, etc.): ')
query = query_options[query.upper()]
option = ''
if query == 'macAddrQ':
mac = input('Select the MAC lookup type(A or B):\nA Network MAC \nB Client MAC')
option = mac_options[mac.upper()]
elif query == 'deviceTypeQ':
searchOp = input('Select the search option for Device Type (A, B or C):\nA Equal \nB Starts With \nC Contains')
option = searchOp_options[searchOp.upper()]
elif query == 'softwareQ' :
print('\nA IOS \nB IOS-XE \nC NX-OS \nD Cisco Controller')
software = input('Select the software type (A, B or C):')
option = software_options[software.upper()]
searchValue =''
if not query == 'softwareQ' :
searchValue = input('\nEnter search value:\n').upper().strip()
print('query is:', query)
print('option is:', option)
print('searchValue is:', searchValue)
result = getDevice(startup_vars, query, searchValue, option)
if query in ['serialNumberQ', 'ipAddrQ', 'macAddrQ'] :
print('\n\nSummary details for search with', queryType[query], ':', searchValue, '!')
for key, value in result.items() :
print(key, ' : ', value)
if query in ['softwareQ', 'deviceTypeQ'] :
print('Total records found: ', len(result), '\n\n')
for record in range(len(result)) :
print('Record Number: ', record + 1)
print('----------------------------------------\n')
for key, value in result[record].items() :
print(key, ':', value)
print('\n\n')
if not result : print('Nothing returned from search. Item', searchValue, 'not found in Prime database.')
|
[
"dchristi@cisco.com"
] |
dchristi@cisco.com
|
f51c4db2199af5db649ce0d2dd84febdacf977fe
|
f0edb1fdfc89e3b01b82eca668b833c8e996c919
|
/src/meanvar.py
|
9127ddd84417a583b3f5b811ea7fd63d49fc1704
|
[] |
no_license
|
SixByNine/gwdetect
|
420c14b595662485b735490323440d007f00e129
|
9cba6713d09e7442e1cbb2b04668f653c1688d6e
|
refs/heads/master
| 2021-01-02T09:39:04.249855
| 2017-06-29T09:37:10
| 2017-06-29T09:37:10
| 10,794,507
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 931
|
py
|
#!/usr/bin/python
from sys import argv
from numpy import *
import argparse
parser = argparse.ArgumentParser(description='Mean, variance, other useful things.')
parser.add_argument('file')
parser.add_argument('col',type=int)
parser.add_argument('-m','--median',action="store_const", const='median')
parser.add_argument('-w','--weights',type=int,default=-1)
args=parser.parse_args()
w=list()
vals=list()
f=open(args.file)
col=int(args.col)-1
wcol=0
if args.weights > 0:
wcol = args.weights
for line in f:
elems=line.split()
vals.append(float(elems[col]))
if wcol > 0:
s=float(elems[wcol-1])
w.append(1.0/pow(s,2))
else:
w.append(1.0)
w=array(w)
w/=sum(w)
ovals=array(vals)
vals=ovals*w
m= sum(vals)
vals = (ovals - m)
rms=sum(w*w*vals*vals)/sum(w*w)
if args.median == "median":
p=percentile(ovals,84)
med=median(ovals)
print med,p,p-med
else:
print m,rms,sqrt(rms)
|
[
"mkeith@pulsarastronomy.net"
] |
mkeith@pulsarastronomy.net
|
504a3b7543dcbf0a72d82f024bc05d10b5648ab0
|
84907367f182ef7d5708c4232d9e41ea82a5c725
|
/Print_masterfile.py
|
77d3d22ff21d26afb09e7dbaa7cd42c2388078ce
|
[] |
no_license
|
Paulina-Panek/BiopythonScripts
|
db91a53ffadd2bb7be006c9eca1379c1afd82e1c
|
b5e49b782684346a66e196b9ee63a6e6bf3a87c8
|
refs/heads/master
| 2021-03-17T09:31:08.708612
| 2020-05-01T00:06:27
| 2020-05-01T00:06:27
| 246,980,284
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,545
|
py
|
# Paulina Panek
# April 2020
# Script parsing result GenPept file (.gp) to get a csv file with all results
from Bio import Entrez
Entrez.email = "ppanek@hpu.edu"
from Bio import SeqIO
from Bio import Align
from Bio.SubsMat.MatrixInfo import gonnet
from PercentIdentity import * #imports all functions from PercentIdentity.py
def numberRecords(ListRecords):
#Function prints number of records
records = list(SeqIO.parse(ListRecords, "genbank"))
print("Found %i records in initial file " % len(records))
def CheckIfDuplicate(first_sequence_name, second_sequence_name, first_sequence, second_sequence):
# returns 0 (same sequences), 1 (not same sequences, or 3 (something went wrong, function didn't work
return_value = 3
# if same species AND length of sequence is the same, check if the sequence is the same
if (first_sequence_name == second_sequence_name):
if first_sequence == second_sequence:
return_value = 0 #same sequences
else:
return_value = 1
else:
return_value = 1
return(return_value)
def RemoveLike(protein_name):
#if protein has word like in it's name, returns 1
ret_val = 0
if "like" in protein_name:
ret_val = 1
return ret_val
def unknown_aas(sequence):
#returns number of unknown amino acids in sequence
X_in_sequence = 0
if 'X' in sequence:
X_in_sequence = X_in_sequence + 1
return X_in_sequence
numberRecords("arc_sequences_04202020.gp")
file = open("allResults_classified.csv", "w")
def MakeExcel(ListRecords):
#assigns group, write with sequence to a file, (in progress) remove duplicate sequences or unknown XXXX
counter = 0
counterRecs = 0
duplicates = 0
old_sequence_name = "empty"
old_sequence = "no sequence yet"
new_sequence_name = "empty2"
sequence_title = "error! check what happened here"
for seq_record in SeqIO.parse(ListRecords, "gb"): #for every record in the list
# setting up initial vatiables
new_sequence_name = seq_record.annotations["source"]
new_sequence_length = len(seq_record)
new_sequence = str(seq_record.seq)
assignment = "UNASSIGNED FIX ME"
Number_of_X = unknown_aas(new_sequence)
prot_name = seq_record.description
#if (CheckIfDuplicate(new_sequence_name, old_sequence_name, new_sequence, old_sequence) == 1) and (Number_of_X == 0) and RemoveLike(prot_name) == 0: # if not the same and no unknown aas (X) and no "like" in protein name, continue
#Classification block begins~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if seq_record.annotations["taxonomy"][2] == "Ecdysozoa": #classify as invertebrate
assignment = "Invertebrate"
elif seq_record.annotations["taxonomy"][6] == "Amphibia": # classify as amphibia
assignment = "Amphibian"
elif seq_record.annotations["taxonomy"][6] == "Actinopterygii": # classify as fish
assignment = "Fish"
elif seq_record.annotations["taxonomy"][6] == "Archelosauria": # classify as reptile or bird
if seq_record.annotations["taxonomy"][11] == "Coelurosauria" or seq_record.annotations["taxonomy"][11] == "Aves": #bird
assignment = "Bird"
else:
assignment = "Reptile"
elif seq_record.annotations["taxonomy"][6] == "Archosauria": # classify as bird
if seq_record.annotations["taxonomy"][11] == "Aves": # bird
assignment = "Bird"
else:
counter = counter + 1
elif seq_record.annotations["taxonomy"][6] == "Lepidosauria" or seq_record.annotations["taxonomy"][6] == "Testudines + Archosauria group":
assignment = "Reptile"
elif seq_record.annotations["taxonomy"][6] == "Mammalia":
if seq_record.annotations["taxonomy"][9] == "Primates":
assignment = "Primate"
else:
assignment = "Mammal"
else:
assignment = "UNCLASSIFIED FIX ME\n"
counter = counter + 1
#end of classification block~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
counterRecs = counterRecs + 1 #counter of records
recNumber = str(counterRecs)
printname = seq_record.description
comment = " "
if (CheckIfDuplicate(new_sequence_name, old_sequence_name, new_sequence, old_sequence) == 0):
comment = ("*REMOVED* Duplicate " )
elif (RemoveLike(prot_name)) == 1:
comment = ("*REMOVED* Like-protein ")
elif (Number_of_X != 0):
comment = ("*REMOVED* Unknown AAs ")
file.write(recNumber + "," + printname + "," + seq_record.annotations["source"] + "," + assignment + "," + str(new_sequence_length) + "," + seq_record.id+ ","+ comment +"\n")
old_sequence_length = new_sequence_length
old_sequence_name = new_sequence_name
old_sequence = new_sequence
print("Number of unclassified species:", counter)
print("Number of records written to file: ", counterRecs)
file.close()
MakeExcel("arc_sequences_04202020.gp")
#for seq_record in SeqIO.parse("arc_sequences_04202020.gp","gb"): #uses GenPept file
#print(seq_record.description) #protein name [organism]
#print(seq_record.seq) # sequence
#print(seq_record.annotations["source"]) #name (common name)
#print(seq_record.annotations["taxonomy"][0])
#print(seq_record.annotations)
#print(len(seq_record)) #length of sequence
|
[
"noreply@github.com"
] |
Paulina-Panek.noreply@github.com
|
dc9824e46b5a8cc9a859a0acb9b5bcb1772b368b
|
21c788e7d1a7b9b00eb355af223ede47fd877094
|
/stefan/RC4_stefan.py
|
f5f614bce21774f0a1afe5dbad2d849d4f7ea435
|
[] |
no_license
|
StefanB7/EHN-410-Practical-2-AES-3DES-RC4
|
b97382a42924ef3208f071f9b77f1ff0370674ae
|
da232b6457a8ca53bcc12f2ad29bbd9881cadeb3
|
refs/heads/main
| 2023-05-30T20:55:51.364034
| 2021-06-12T18:30:10
| 2021-06-12T18:30:10
| 362,843,729
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,146
|
py
|
# EHN 410 - Practical 2 - 2021
# RC4 Encryption and Decryption
# Group 7
# Created: 14 May 2021 by Stefan Buys
import numpy as np
import copy
from PIL import Image
##### MAIN CIPHER FUNCTIONS #####
def RC4_Enctrypt(inspect_mode, plaintext, key):
#Generate the required stream generation variables:
S = bytearray(256)
T = bytearray(256)
#Stores the current state of the S table:
Sarchive = []
#Transform key to bytearray:
keyBytes = bytearray(len(key))
for i in range(len(key)):
keyBytes[i] = ord(key[i])
#Initialization:
for i in range(256):
S[i] = i
T[i] = keyBytes[i % len(key)]
#Perform a permutation on S:
temp = 0
index = 0
for i in range(255):
index = (index + S[i] + T[i]) % 256
temp = S[i]
S[i] = S[index]
S[index] = temp
### Plaintext Encoding ###
# If the plaintext is a string to be encrypted:
if (isinstance(plaintext, str)):
cipherText = bytearray(len(plaintext))
#Transform the plaintext input into a bytearray:
plaintextBytes = bytearray(len(plaintext))
for i in range(len(plaintext)):
plaintextBytes[i] = ord(plaintext[i])
#Encrypt the plaintext:
i = 0
j = 0
for index in range(len(plaintextBytes)):
#Generate the next stream element:
i = (i+1) % 256
j = (j+S[i]) % 256
temp = S[i]
S[i] = S[j]
S[j] = temp
streamElementIndex = (S[i] + S[j]) % 256
streamElement = S[streamElementIndex]
cipherText[index] = plaintextBytes[index] ^ streamElement
#If inspect mode, add the S table to Sarchive:
if (inspect_mode):
Sarchive.append(makeBoxS(S))
cipherTextString = ''
for i in range(len(cipherText)):
cipherTextString = cipherTextString + chr(cipherText[i])
if (inspect_mode):
return {"S-table": Sarchive, "Ciphertext": cipherTextString}
else:
return cipherTextString
# If the plaintext is an image (ndarray) that needs to be encrypted:
if (isinstance(plaintext, np.ndarray)):
# Check the plaintext's dimentions:
numRows = plaintext.shape[0]
numColumns = plaintext.shape[1]
numLayers = plaintext.shape[2]
# Test if there is an AlphaLayer:
bAlphaLayer = False
if (numLayers > 3):
bAlphaLayer = True
numLayers = 3
alpha_layer = np.array(plaintext[:, :, 3])
# Ciphertext variable:
cipherText = np.zeros((numRows, numColumns, numLayers), dtype='u1')
#Variables used in the stream cipher should persist over different layer encryption:
i = 0
j = 0
for layer in range(numLayers):
#Create an input plaintext bytearray for the current layer:
index = 0
plaintextBytes = bytearray(numRows*numColumns)
cipherTextBytes = bytearray(numRows*numColumns)
for i in range(numRows):
for j in range(numColumns):
plaintextBytes[index] = plaintext[i][j][layer]
index += 1
#Encrypt the plaintext:
for index in range(len(plaintextBytes)):
# Generate the next stream element:
i = (i + 1) % 256
j = (j + S[i]) % 256
temp = S[i]
S[i] = S[j]
S[j] = temp
streamElementIndex = (S[i] + S[j]) % 256
streamElement = S[streamElementIndex]
cipherTextBytes[index] = plaintextBytes[index] ^ streamElement
# If inspect mode, add the S table to Sarchive:
if (inspect_mode):
Sarchive.append(makeBoxS(S))
#Transfer the calculated output to the ciphertext image ndarray variable:
index = 0
for i in range(numRows):
for j in range(numColumns):
cipherText[i][j][layer] = cipherTextBytes[index]
index += 1
if bAlphaLayer:
cipherText = np.dstack((cipherText, alpha_layer))
if (inspect_mode):
return {"S-table": Sarchive, "Ciphertext": cipherText.astype(int)}
else:
return cipherText.astype(int)
def RC4_Decrypt(inspect_mode, ciphertext, key):
#Generate the required stream generation variables:
S = bytearray(256)
T = bytearray(256)
#Stores the current state of the S table:
Sarchive = []
#Transform key to bytearray:
keyBytes = bytearray(len(key))
for i in range(len(key)):
keyBytes[i] = ord(key[i])
#Initialization:
for i in range(256):
S[i] = i
T[i] = keyBytes[i % len(key)]
#Perform a permutation on S:
temp = 0
index = 0
for i in range(255):
index = (index + S[i] + T[i]) % 256
temp = S[i]
S[i] = S[index]
S[index] = temp
### Text Decoding ###
# If the ciphertext is a string to be encrypted:
if (isinstance(ciphertext, str)):
plainText = bytearray(len(ciphertext))
#Transform the plaintext input into a bytearray:
ciphertextBytes = bytearray(len(ciphertext))
for i in range(len(ciphertext)):
ciphertextBytes[i] = ord(ciphertext[i])
#Decrypt the ciphertext:
i = 0
j = 0
for index in range(len(ciphertextBytes)):
#Generate the next stream element:
i = (i+1) % 256
j = (j+S[i]) % 256
temp = S[i]
S[i] = S[j]
S[j] = temp
streamElementIndex = (S[i] + S[j]) % 256
streamElement = S[streamElementIndex]
plainText[index] = ciphertextBytes[index] ^ streamElement
# If inspect mode, add the S table to Sarchive:
if (inspect_mode):
Sarchive.append(makeBoxS(S))
plainTextString = ''
for i in range(len(plainText)):
plainTextString = plainTextString + chr(plainText[i])
if (inspect_mode):
return {"S-table": Sarchive, "Ciphertext": plainTextString}
else:
return plainTextString
# If the plaintext is an image (ndarray) that needs to be encrypted:
if (isinstance(ciphertext, np.ndarray)):
# Check the plaintext's dimentions:
numRows = ciphertext.shape[0]
numColumns = ciphertext.shape[1]
numLayers = ciphertext.shape[2]
# Test if there is an AlphaLayer:
bAlphaLayer = False
if (numLayers > 3):
bAlphaLayer = True
numLayers = 3
alpha_layer = np.array(ciphertext[:, :, 3])
# Ciphertext variable:
plainText = np.zeros((numRows, numColumns, numLayers), dtype='u1')
# Variables used in the stream cipher should persist over different layer encryption:
i = 0
j = 0
for layer in range(numLayers):
# Create an input plaintext bytearray for the current layer:
index = 0
cipherTextBytes = bytearray(numRows * numColumns)
plainTextBytes = bytearray(numRows * numColumns)
for i in range(numRows):
for j in range(numColumns):
cipherTextBytes[index] = ciphertext[i][j][layer]
index += 1
# Encrypt the plaintext:
for index in range(len(cipherTextBytes)):
# Generate the next stream element:
i = (i + 1) % 256
j = (j + S[i]) % 256
temp = S[i]
S[i] = S[j]
S[j] = temp
streamElementIndex = (S[i] + S[j]) % 256
streamElement = S[streamElementIndex]
plainTextBytes[index] = cipherTextBytes[index] ^ streamElement
# If inspect mode, add the S table to Sarchive:
if (inspect_mode):
Sarchive.append(makeBoxS(S))
# Transfer the calculated output to the ciphertext image ndarray variable:
index = 0
for i in range(numRows):
for j in range(numColumns):
plainText[i][j][layer] = plainTextBytes[index]
index += 1
if bAlphaLayer:
cipherText = np.dstack((plainText, alpha_layer))
if (inspect_mode):
return {"S-table": Sarchive, "Ciphertext": plainText.astype("int")}
else:
return plainText.astype("int")
#This function returns a 16x16 numpy array consisting of the hex values of each byte in S_table (bytearray)
def makeBoxS(S_table):
S_temp = [['' for i in range(16)] for j in range(16)]
index = 0
singleByte = bytearray(1)
for row in range(16):
for column in range(16):
singleByte[0] = S_table[index]
S_temp[row][column] = singleByte.hex().upper()
index += 1
return np.array(S_temp)
|
[
"22056509+StefanB7@users.noreply.github.com"
] |
22056509+StefanB7@users.noreply.github.com
|
f0fb2f80c33baa93c5b997b44cf6feaf76ebabe8
|
eccffae4ff27bccaab1d2c46f1fcd21751e3ad15
|
/aayudh/scanner.py
|
ced96a79d3a0364b8c8151c7086a4e9665a8d286
|
[] |
no_license
|
pl0mo/aayudh
|
8430718dcf8d136c6bc9250c17ca082b2d55d021
|
18314e4e2276fa8402b2122e4cb18b99c85160ad
|
refs/heads/master
| 2021-12-22T10:56:26.374085
| 2017-10-13T06:38:55
| 2017-10-13T06:38:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 41,015
|
py
|
# -*- coding: utf-8 -*-
import re
import logging.config
import pygal
from pygal import Config
from pygal.style import LightColorizedStyle, RedBlueStyle, CleanStyle
import yara
import pylibemu
from external import utilitybelt
import utils
class Scanner:
def __init__(self, config={}):
self.logger = logging.getLogger(__name__)
self.config = config
self.online_reports = {
"AlienVault": "http://www.alienvault.com/apps/rep_monitor/ip/{{host}}",
"Fortiguard": "http://www.fortiguard.com/ip_rep/index.php?data={{host}}&lookup=Lookup",
"FreeGeoIP": "http://freegeoip.net/json/{{host}}",
"IP-API": "http://ip-api.com/#{{host}}",
"IPVoid": "http://www.ipvoid.com/scan/{{host}}",
"MalwareDomainList": "http://www.malwaredomainlist.com/mdl.php?search={{host}}&colsearch=All&quantity=50",
"Robtex": "https://robtex.com/{{host}}",
"VirusTotal": "https://www.virustotal.com/en/ip-address/{{host}}/information/",
"Google Safe Browsing": "http://safebrowsing.clients.google.com/safebrowsing/diagnostic?site={{host}}",
"Arin Whois": "http://whois.arin.net/rest/nets;q={{host}}?showDetails=true",
"Yandex": "https://yandex.com/infected?l10n=en&url={{host}}",
"URLVoid": "http://www.urlvoid.com/ip/{{host}}",
"Mnemonic PDNS": "http://passivedns.mnemonic.no/search/?query={{host}}&method=exact",
"BGP HE": "http://bgp.he.net/ip/{{host}}"
}
self.regexes = {
"info": {
#0: {
# "regex": re.compile(r"\w{10}", re.I | re.S | re.M),
# "description": "TEST/IGNORE"
#},
100: {
"regex": re.compile(r"((https?|ftps?|gopher|telnet|file|notes|ms-help):((//)|(\\\\))+[\w\d:#@%/;$()~_?\+-=\\\.&]*)", re.I | re.S | re.M),
"description": "Detects a URL over HTTP, HTTPS, FTP, Gopher, Telnet, File, Notes, MS-Help"
},
#101: {
# "regex": re.compile(r"(https?:\/\/)?(www.)?(youtube\.com\/watch\?v=|youtu\.be\/|youtube\.com\/watch\?feature=player_embedded&v=)([A-Za-z0-9_-]*)(\&\S+)?(\?\S+)?", re.I | re.S | re.M),
# "description": "Detects YouTube links"
#},
#102: {
# "regex": re.compile(r"https?:\/\/(www.)?vimeo\.com\/([A-Za-z0-9._%-]*)((\?|#)\S+)?", re.I | re.S | re.M),
# "description": "Detects Vimeo links"
#},
105: {
"regex": re.compile(r"\W([\w-]+\.)(docx|doc|csv|pdf|xlsx|xls|rtf|txt|pptx|ppt)", re.I | re.S | re.M),
"description": "Detects MS Office filenames via extension"
},
106: {
"regex": re.compile(r"\W([\w-]+\.)(html|php|js)", re.I | re.S | re.M),
"description": "Detects HTML, PHP or JS filenames via extension"
},
#107: {
# "regex": re.compile(r"\W([\w-]+\.)(exe|dll|jar)", re.I | re.S | re.M),
# "description": "Detects EXE, DLL or JAR filenames via extension"
#},
108: {
"regex": re.compile(r"\W([\w-]+\.)(zip|zipx|7z|rar|tar|gz)", re.I | re.S | re.M),
"description": "Detects ZIP, ZIPX, 7Z, RAR, TAR or GZ archive filenames via extension"
},
109: {
"regex": re.compile(r"\W([\w-]+\.)(jpeg|jpg|gif|png|tiff|bmp)", re.I | re.S | re.M),
"description": "Detects JPEG, JPG, GIF, PNG, TIFF or BMP image filenames via extension"
},
110: {
"regex": re.compile(r"\W([\w-]+\.)(flv|swf)", re.I | re.S | re.M),
"description": "Detects FLV or SWF filenames via extension"
},
111: {
"regex": re.compile(r"\\b[a-f0-9]{32}\\b", re.I | re.S | re.M),
"description": "Detects MD5 hash strings"
},
112: {
"regex": re.compile(r"\\b[a-f0-9]{40}\\b", re.I | re.S | re.M),
"description": "Detects SHA1 hash strings"
},
113: {
"regex": re.compile(r"\\b[a-f0-9]{64}\\b", re.I | re.S | re.M),
"description": "Detects SHA256 hash strings"
},
114: {
"regex": re.compile(r"\\b[a-f0-9]{128}\\b", re.I | re.S | re.M),
"description": "Detects SHA512 hash strings"
},
115: {
"regex": re.compile(r"\\b\\d{2}:[A-Za-z0-9/+]{3,}:[A-Za-z0-9/+]{3,}\\b", re.I | re.S | re.M),
"description": "Detects SSDEEP fuzzy hash strings"
},
116: {
"regex": re.compile('(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)', re.I | re.S | re.M),
"description": "Detects an IPv4 address"
},
118: {
"regex": re.compile('(?=^.{4,255}$)(^((?!-)[a-zA-Z0-9-]{1,63}(?<!-)\.)+[a-zA-Z]{2,63}$)', re.I | re.S | re.M),
"description": "Detects a FQDN string"
},
119: {
"regex": re.compile(r"(CVE-(19|20)\\d{2}-\\d{4,7})", re.I | re.S | re.M),
"description": "Detects a CVE string identifier"
},
120: {
"regex": re.compile(r"(((([01]? d?\\d)|(2[0-5]{2}))\\.){3}(([01]?\\d?\\d)|(2[0-5]{2})))|(([A-F0-9]){4}(:|::)){1,7}(([A-F0-9]){4})", re.I | re.S | re.M),
"description": "Detects an IPv6 addrss"
},
121: {
"regex": re.compile(r"([a-zA-Z0-9\.-_]+@)([a-zA-Z0-9-]+\.)(com|net|biz|cat|aero|asia|coop|info|int|jobs|mobi|museum|name|org|post|pre|tel|travel|xxx|edu|gov|mil|br|cc|ca|uk|ch|co|cx|de|fr|hk|jp|kr|nl|nr|ru|tk|ws|tw)\W", re.I | re.S | re.M),
"description": "Detects an email address - 1"
},
122: {
"regex": re.compile(r"(?<=^|(?<=[^a-zA-Z0-9-_\.]))(@)([A-Za-z]+[A-Za-z0-9]+){4}", re.I | re.S | re.M),
"description": "Detects a Twitter handle"
}
},
"low": {
200: {
"regex": re.compile(r"(\d{3}\-\d{2}\-\d{3})|(\d{3}\s\d{2}\s\d{3})", re.I | re.S | re.M),
"description": "Detects a Social Security Number"
},
201: { # http://stackoverflow.com/questions/7165056/regex-to-match-email-addresses-and-common-obfuscations
"regex": re.compile(r"^[A-Z0-9\._%+-]+(@|\s*\[\s*at\s*\]\s*)[A-Z0-9\.-]+(\.|\s*\[\s*dot\s*\]\s*)[a-z]{2,6}$", re.I | re.S | re.M),
"description": "Detects an obfuscated email address"
},
202: { # https://www.sans.org/security-resources/idfaq/snort-detect-credit-card-numbers.php
"regex": re.compile(r"4\d{3}(\s|-)?\d{4}(\s|-)?\d{4}(\s|-)?\d{4}", re.I | re.S | re.M),
"description": "Detects a VISA Credit Card number"
},
203: { # https://www.sans.org/security-resources/idfaq/snort-detect-credit-card-numbers.php
"regex": re.compile(r"5\d{3}(\s|-)?\d{4}(\s|-)?\d{4}(\s|-)?\d{4}", re.I | re.S | re.M),
"description": "Detects a Master Card number"
},
204: { # https://www.sans.org/security-resources/idfaq/snort-detect-credit-card-numbers.php
"regex": re.compile(r"6011(\s|-)?\d{4}(\s|-)?\d{4}(\s|-)?\d{4}", re.I | re.S | re.M),
"description": "Detects a Discover Credit Card number"
},
205: { # https://www.sans.org/security-resources/idfaq/snort-detect-credit-card-numbers.php
"regex": re.compile(r"3\d{3}(\s|-)?\d{6}(\s|-)?\d{5}", re.I | re.S | re.M),
"description": "Detects an American Express Credit Card number"
}
},
"medium": {
#300: {
# "regex": re.compile(r"e.{0,2}v.{0,2}a.{0,2}l", re.I | re.S | re.M),
# "description": "Detects obfuscated calls to JavaScript eval method"
#},
301: {
"regex": re.compile(r"u.{0,2}n.{0,2}e.{0,2}s.{0,2}c.{0,2}a.{0,2}p.{0,1}e", re.I | re.S | re.M),
"description": "Detects obfuscated calls to JavaScript unescape method"
},
302: {
"regex": re.compile(r"s.{0,4}u.{0,4}b.{0,4}s.{0,4}t.{0,4}r.{0,4}", re.I | re.S | re.M),
"description": "Detects obfuscated calls to JavaScript substr method"
},
303: {
"regex": re.compile(r"[zrtypqsdfghjklmwxcvbnZRTYPQSDFGHJKLMWXCVBN]{6,}", re.I | re.S | re.M),
"description": "Detects 6 or more consecutive occurences of consonants"
},
304: { # https://community.emc.com/community/connect/rsaxchange/netwitness/blog/2013/03/19/detecting-malicious-and-suspicious-user-agent-strings
"regex": re.compile(r"funwebproducts", re.I | re.S),
"description": "Probable Funwebproduct Adware BHO generated traffic"
},
305: { # https://community.emc.com/community/connect/rsaxchange/netwitness/blog/2013/03/19/detecting-malicious-and-suspicious-user-agent-strings
"regex": re.compile(r"(maar|btrs|searchtoolbar|fctb|cpntdf|talwinhttpclient|bsalsa)", re.I | re.S),
"description": "Probable Adware generated traffic"
}
},
"high": {
400: {
"regex": re.compile(r"\xeb.*\x31.*\x20\x8b.*\x74\x07\xeb.*\xe8.*\xff\xff\xff", re.I | re.S | re.M),
"description": "This regex detects presence of CLET encoded byte sequences"
},
401: { # http://networkraptor.blogspot.in/2015/01/user-agent-strings.html, http://networkraptor.blogspot.in/p/user-agent-strings.html
"regex": re.compile(r"((\w+)|(\W+))((<\|>)|(\\))((\w+)|(\W+))((<\|>)|(\\))((\w+)|(\W+))((<\|>)|(\\))[^<|\\]+((<\|>)|(\\))((\w+)|(\W+))[^<|\\]+((<\|>)|(\\))[^<|\\]+((\w+)|(\W+))((\w+)|(\W+))+", re.I | re.S),
"description": "Probable Houdini/Iniduoh/njRAT malware generated traffic"
},
402: { # http://networkraptor.blogspot.in/2015/01/user-agent-strings.html, http://networkraptor.blogspot.in/p/user-agent-strings.html
"regex": re.compile(r"nsis_inetc\s\(mozilla\)", re.I | re.S),
"description": "Probable Zero Access malware generated traffic"
},
403: { # http://networkraptor.blogspot.in/2015/01/user-agent-strings.html, http://networkraptor.blogspot.in/p/user-agent-strings.html
"regex": re.compile(r"Mozilla\/5\.0\sWinInet", re.I | re.S),
"description": "Probable Generic Trojan generated traffic"
},
404: { # http://networkraptor.blogspot.in/2015/01/user-agent-strings.html, http://networkraptor.blogspot.in/p/user-agent-strings.html
"regex": re.compile(r"Wget\/1\.9\+cvs-stable\s\(Red\sHat\smodified\)", re.I | re.S),
"description": "Probable Dyre/Upatre malware generated traffic"
},
405: { # http://networkraptor.blogspot.in/2015/01/user-agent-strings.html, http://networkraptor.blogspot.in/p/user-agent-strings.html
"regex": re.compile(r"RookIE\/1\.0", re.I | re.S),
"description": "Probable generic password stealing trojan generated traffic"
},
406: { # http://networkraptor.blogspot.in/2015/01/user-agent-strings.html, http://networkraptor.blogspot.in/p/user-agent-strings.html
"regex": re.compile(r"Mozilla\/4\.0\s\(compatible;\sMSIE\s8\.0;\sWindows\sNT\s5\.1;\sTrident\/4\.0\)", re.I | re.S),
"description": "Probable Egamipload malware generated traffic"
},
407: { # http://networkraptor.blogspot.in/2015/01/user-agent-strings.html, http://networkraptor.blogspot.in/p/user-agent-strings.html
"regex": re.compile(r"Mozilla\/4\.0\s\(compatible;\sMSIE\s6\.0;\sWindows\sNT\s5\.1;\sSV1\)", re.I | re.S),
"description": "Probable Botnet/Adware generated traffic"
},
408: { # http://networkraptor.blogspot.in/2015/01/user-agent-strings.html, http://networkraptor.blogspot.in/p/user-agent-strings.html
"regex": re.compile(r"Mozilla\/4\.0\s\(compatible;MSIE\s7\.0;Windows\sNT\s6\.0\)", re.I | re.S),
"description": "Probable Yakes malware generated traffic"
},
409: { # http://networkraptor.blogspot.in/2015/01/user-agent-strings.html, http://networkraptor.blogspot.in/p/user-agent-strings.html
"regex": re.compile(r"^AutoIt$", re.I | re.S),
"description": "Probable Tupym malware generated traffic"
},
410: { # http://networkraptor.blogspot.in/2015/01/user-agent-strings.html, http://networkraptor.blogspot.in/p/user-agent-strings.html
"regex": re.compile(r"^M$", re.I | re.S),
"description": "Probable HkMain malware generated traffic"
},
411: { # http://networkraptor.blogspot.in/2015/01/user-agent-strings.html, http://networkraptor.blogspot.in/p/user-agent-strings.html
"regex": re.compile(r"^InetAll$", re.I | re.S),
"description": "Probable Pennonec malware generated traffic"
},
412: { # http://networkraptor.blogspot.in/2015/01/user-agent-strings.html, http://networkraptor.blogspot.in/p/user-agent-strings.html
"regex": re.compile(r"Opera\/9\.80", re.I | re.S),
"description": "Probable Andromeda malware generated traffic"
},
413: { # http://networkraptor.blogspot.in/2015/01/user-agent-strings.html, http://networkraptor.blogspot.in/p/user-agent-strings.html
"regex": re.compile(r"Mozilla/4\.0\s\(compatible;\sMSIE;\sWin32\)", re.I | re.S),
"description": "Probable Bandoo adware generated traffic"
},
414: { # http://networkraptor.blogspot.in/2015/01/user-agent-strings.html, http://networkraptor.blogspot.in/p/user-agent-strings.html
"regex": re.compile(r"Mozilla/4\.0\s\(compatible;\sMSIE\s8\.0;\sWindows\sNT\s6\.0\)", re.I | re.S),
"description": "Probable IRCbot malware generated traffic"
},
415: { # http://networkraptor.blogspot.in/2015/01/user-agent-strings.html, http://networkraptor.blogspot.in/p/user-agent-strings.html
"regex": re.compile(r"(Mozilla/5\.0\s\(compatible;\sMSIE\s9\.0;\sWindows\sNT\s7\.1;\sTrident/5\.0\)|Mozilla/5\.0\s\(Windows;\sU;\sMSIE\s7\.0;\sWindows\sNT\s6\.0;\sen-US\))", re.I | re.S),
"description": "Probable Geodo/Feodo malware generated traffic"
},
416: { # http://networkraptor.blogspot.in/2015/01/user-agent-strings.html, http://networkraptor.blogspot.in/p/user-agent-strings.html
"regex": re.compile(r"Mozilla/5\.0\s\(windows\snt\s6\.1;\swow64;\srv:25\.0\)\sGecko/20100101\sfirefox/25\.0", re.I | re.S),
"description": "Probable Kuluoz malware generated traffic"
},
417: { # http://networkraptor.blogspot.in/2015/01/user-agent-strings.html, http://networkraptor.blogspot.in/p/user-agent-strings.html
"regex": re.compile(r"Mozilla/4\.0\s\(compatible;\sMSIE\s6\.0;\sWindows\sNT\s5\.1;\sSV1;\s\.NET\sCLR\s1\.0\.1(288|975)\)", re.I | re.S),
"description": "Probable Symml malware generated traffic"
}
}
}
self.matchdict = {}
def inspect(self, report, filetype):
if self.config['enable_yara']:
report = self.inspect_yara(report, filetype)
if self.config['enable_shellcode']:
report = self.inspect_shellcode(report, filetype)
if self.config['enable_regex']:
report = self.inspect_regex(report, filetype)
if self.config['enable_heuristics']:
report = self.inspect_heuristics(report, filetype)
self.logger.info('Running post-inspection cleanup tasks upon report dict')
for k in sorted(report['flows'].keys()):
proto = k.split(' - ')[2]
if 'currtid' in report['flows'][k].keys():
del report['flows'][k]['currtid']
if 'transactions' in report['flows'][k].keys() and report['flows'][k]['transactions']:
for tid in sorted(report['flows'][k]['transactions'].keys()):
if proto == 'UDP':
if 'yara' not in report['flows'][k]['transactions'][tid].keys():
report['flows'][k]['transactions'][tid]['yara'] = {
'buf': None
}
if 'shellcode' not in report['flows'][k]['transactions'][tid].keys():
report['flows'][k]['transactions'][tid]['shellcode'] = {
'buf': None
}
if 'regex' not in report['flows'][k]['transactions'][tid].keys():
report['flows'][k]['transactions'][tid]['regex'] = {
'buf': None
}
if 'heuristics' not in report['flows'][k]['transactions'][tid].keys():
report['flows'][k]['transactions'][tid]['heuristics'] = {
'buf': None
}
if proto == 'TCP':
if 'yara' not in report['flows'][k]['transactions'][tid].keys():
report['flows'][k]['transactions'][tid]['yara'] = {
'cts': None,
'stc': None
}
if 'shellcode' not in report['flows'][k]['transactions'][tid].keys():
report['flows'][k]['transactions'][tid]['shellcode'] = {
'cts': None,
'stc': None
}
if 'regex' not in report['flows'][k]['transactions'][tid].keys():
report['flows'][k]['transactions'][tid]['regex'] = {
'cts': None,
'stc': None
}
if 'heuristics' not in report['flows'][k]['transactions'][tid].keys():
report['flows'][k]['transactions'][tid]['heuristics'] = {
'cts': None,
'stc': None
}
stats = None
if proto == 'UDP' and report['flows'][k]['transactions'][tid]['buf']:
stats = utils.entropy_compression_stats_buf(report['flows'][k]['transactions'][tid]['buf'])
report['flows'][k]['transactions'][tid]['bufcompressionratio'] = float(stats['compressionratio'])
report['flows'][k]['transactions'][tid]['bufentropy'] = float(stats['shannonentropy'])
# if entropy falls within the 0 - 1 or 7 - 8 range, categorize as suspicious
if (report['flows'][k]['transactions'][tid]['bufentropy'] > 0 and report['flows'][k]['transactions'][tid]['bufentropy'] < 1) or report['flows'][k]['transactions'][tid]['bufentropy'] > 7:
report['flows'][k]['transactions'][tid]['bufentropy_category'] = 'SUSPICIOUS'
else:
report['flows'][k]['transactions'][tid]['bufentropy_category'] = 'NORMAL'
report['flows'][k]['transactions'][tid]['bufmindatasize'] = stats['mindatasize']
stats = None
if proto == 'TCP' and report['flows'][k]['transactions'][tid]['ctsbuf']:
stats = utils.entropy_compression_stats_buf(report['flows'][k]['transactions'][tid]['ctsbuf'])
report['flows'][k]['transactions'][tid]['ctsbufcompressionratio'] = float(stats['compressionratio'])
report['flows'][k]['transactions'][tid]['ctsbufentropy'] = float(stats['shannonentropy'])
# if entropy falls within the 0 - 1 or 7 - 8 range, categorize as suspicious
if (report['flows'][k]['transactions'][tid]['ctsbufentropy'] > 0 and report['flows'][k]['transactions'][tid]['ctsbufentropy'] < 1) or report['flows'][k]['transactions'][tid]['ctsbufentropy'] > 7:
report['flows'][k]['transactions'][tid]['ctsbufentropy_category'] = 'SUSPICIOUS'
else:
report['flows'][k]['transactions'][tid]['ctsbufentropy_category'] = 'NORMAL'
report['flows'][k]['transactions'][tid]['ctsbufmindatasize'] = stats['mindatasize']
stats = None
if proto == 'TCP' and report['flows'][k]['transactions'][tid]['stcbuf']:
stats = utils.entropy_compression_stats_buf(report['flows'][k]['transactions'][tid]['stcbuf'])
report['flows'][k]['transactions'][tid]['stcbufcompressionratio'] = float(stats['compressionratio'])
report['flows'][k]['transactions'][tid]['stcbufentropy'] = float(stats['shannonentropy'])
# if entropy falls within the 0 - 1 or 7 - 8 range, categorize as suspicious
if (report['flows'][k]['transactions'][tid]['stcbufentropy'] > 0 and report['flows'][k]['transactions'][tid]['stcbufentropy'] < 1) or report['flows'][k]['transactions'][tid]['stcbufentropy'] > 7:
report['flows'][k]['transactions'][tid]['stcbufentropy_category'] = 'SUSPICIOUS'
else:
report['flows'][k]['transactions'][tid]['stcbufentropy_category'] = 'NORMAL'
report['flows'][k]['transactions'][tid]['stcbufmindatasize'] = stats['mindatasize']
for host in report['hosts'].keys():
if utilitybelt.is_rfc1918(host) or utilitybelt.is_reserved(host):
report['hosts'][host]['is_private'] = True
report['hosts'][host]['online_reports'] = None
else:
report['hosts'][host]['is_private'] = False
report['hosts'][host]['online_reports'] = self.online_reports
for key, value in report['hosts'][host]['online_reports'].iteritems():
report['hosts'][host]['online_reports'][key] = re.sub(r"{{host}}", host, value)
return dict(report)
def inspect_yara(self, report, filetype):
if filetype == 'PCAP':
self.logger.info('Loading yara rules from %s' % self.config['yara_rules_dir'])
rulefiles = []
rulefiles = utils.find_files(search_dir=self.config['yara_rules_dir'], regex=r"*.yar") + utils.find_files(search_dir=self.config['yara_rules_dir'], regex=r"*.yara")
rulefiles = sorted(rulefiles)
self.logger.debug('Found %d yara rule files in %s' % (len(rulefiles), self.config['yara_rules_dir']))
self.logger.info('Testing all rules found in %d files over %d sessions' % (len(rulefiles), len(report['flows'].keys())))
for k in sorted(report['flows'].keys()):
proto = k.split(' - ')[2]
for f in rulefiles:
match = None
y = yara.compile(f)
if 'transactions' in report['flows'][k].keys() and report['flows'][k]['transactions']:
for tid in sorted(report['flows'][k]['transactions']):
if 'yara' not in report['flows'][k]['transactions'][tid].keys():
if proto == 'TCP':
report['flows'][k]['transactions'][tid]['yara'] = {
'cts': None,
'stc': None
}
elif proto == 'UDP':
report['flows'][k]['transactions'][tid]['yara'] = {
'buf': None
}
if proto == 'UDP' and report['flows'][k]['transactions'][tid]['buf']:
if self.config['inspect_udp_depth'] > 0:
scanbuf = report['flows'][k]['transactions'][tid]['buf'][:self.config['inspect_udp_depth']]
else:
scanbuf = report['flows'][k]['transactions'][tid]['buf']
matches = None
try:
matches = y.match(
data=scanbuf,
timeout=self.config['yara_match_timeout'])
except Exception, e:
pass
if matches:
rulefile = f.rpartition('/')[2]
self.logger.debug('%s (UDP, Trans: #%d) matches %d rules from %s' % (k, tid, len(matches), rulefile))
for m in matches:
rulename = m.rule.encode('utf-8').strip()
if not report['flows'][k]['transactions'][tid]['yara']['buf']:
report['flows'][k]['transactions'][tid]['yara']['buf'] = {
rulefile: {
rulename: {
'tags': None,
'description': None,
'strings': None,
'namespace': None
}
}
}
elif rulefile not in report['flows'][k]['transactions'][tid]['yara']['buf'].keys():
report['flows'][k]['transactions'][tid]['yara']['buf'][rulefile] = {
rulename: {
'tags': None,
'description': None,
'strings': None,
'namespace': None
}
}
elif rulename not in report['flows'][k]['transactions'][tid]['yara']['buf'][rulefile]:
report['flows'][k]['transactions'][tid]['yara']['buf'][rulefile][rulename] = {
'tags': None,
'description': None,
'strings': None,
'namespace': None
}
if len(m.tags) is not 0:
report['flows'][k]['transactions'][tid]['yara']['buf'][rulefile][rulename]['tags'] = []
for tag in m.tags:
report['flows'][k]['transactions'][tid]['yara']['buf'][rulefile][rulename]['tags'].append(tag.upper())
if 'description' in m.meta.keys():
report['flows'][k]['transactions'][tid]['yara']['buf'][rulefile][rulename]['description'] = m.meta['description']
if len(m.strings) is not 0:
report['flows'][k]['transactions'][tid]['yara']['buf'][rulefile][rulename]['strings'] = []
for offset, var, val in m.strings:
report['flows'][k]['transactions'][tid]['yara']['buf'][rulefile][rulename]['strings'].append("Found %s @ offset 0x%x" % (var, int(offset)))
report['flows'][k]['transactions'][tid]['yara']['buf'][rulefile][rulename]['namespace'] = m.namespace
if proto == 'TCP' and report['flows'][k]['transactions'][tid]['ctsbuf']:
if self.config['inspect_cts_depth'] > 0:
scanbuf = report['flows'][k]['transactions'][tid]['ctsbuf'][:self.config['inspect_cts_depth']]
else:
scanbuf = report['flows'][k]['transactions'][tid]['ctsbuf']
matches = None
try:
matches = y.match(
data=scanbuf,
timeout=self.config['yara_match_timeout'])
except Exception, e:
pass
if matches:
rulefile = f.rpartition('/')[2]
self.logger.debug('%s (CTS, Trans: #%d) matches %d rules from %s' % (k, tid, len(matches), rulefile))
for m in matches:
rulename = m.rule.encode('utf-8').strip()
if not report['flows'][k]['transactions'][tid]['yara']['cts']:
report['flows'][k]['transactions'][tid]['yara']['cts'] = {
rulefile: {
rulename: {
'tags': None,
'description': None,
'strings': None,
'namespace': None
}
}
}
elif rulefile not in report['flows'][k]['transactions'][tid]['yara']['cts'].keys():
report['flows'][k]['transactions'][tid]['yara']['cts'][rulefile] = {
rulename: {
'tags': None,
'description': None,
'strings': None,
'namespace': None
}
}
elif rulename not in report['flows'][k]['transactions'][tid]['yara']['cts'][rulefile]:
report['flows'][k]['transactions'][tid]['yara']['cts'][rulefile][rulename] = {
'tags': None,
'description': None,
'strings': None,
'namespace': None
}
if len(m.tags) is not 0:
report['flows'][k]['transactions'][tid]['yara']['cts'][rulefile][rulename]['tags'] = []
for tag in m.tags:
report['flows'][k]['transactions'][tid]['yara']['cts'][rulefile][rulename]['tags'].append(tag.upper())
if 'description' in m.meta.keys():
report['flows'][k]['transactions'][tid]['yara']['cts'][rulefile][rulename]['description'] = m.meta['description']
if len(m.strings) is not 0:
report['flows'][k]['transactions'][tid]['yara']['cts'][rulefile][rulename]['strings'] = []
for offset, var, val in m.strings:
report['flows'][k]['transactions'][tid]['yara']['cts'][rulefile][rulename]['strings'].append("Found %s @ offset 0x%x" % (var, int(offset)))
report['flows'][k]['transactions'][tid]['yara']['cts'][rulefile][rulename]['namespace'] = m.namespace
if proto == 'TCP' and report['flows'][k]['transactions'][tid]['stcbuf']:
if self.config['inspect_stc_depth'] > 0:
scanbuf = report['flows'][k]['transactions'][tid]['stcbuf'][:self.config['inspect_stc_depth']]
else:
scanbuf = report['flows'][k]['transactions'][tid]['stcbuf']
try:
matches = y.match(
data=scanbuf,
timeout=self.config['yara_match_timeout'])
except Exception, e:
pass
if matches:
rulefile = f.rpartition('/')[2]
self.logger.debug('%s (STC, Trans: #%d) matches %d rules from %s' % (k, tid, len(matches), rulefile))
for m in matches:
rulename = m.rule.encode('utf-8').strip()
if not report['flows'][k]['transactions'][tid]['yara']['stc']:
report['flows'][k]['transactions'][tid]['yara']['stc'] = {
rulefile: {
rulename: {
'tags': None,
'description': None,
'strings': None,
'namespace': None
}
}
}
elif rulefile not in report['flows'][k]['transactions'][tid]['yara']['stc'].keys():
report['flows'][k]['transactions'][tid]['yara']['stc'][rulefile] = {
rulename: {
'tags': None,
'description': None,
'strings': None,
'namespace': None
}
}
elif rulename not in report['flows'][k]['transactions'][tid]['yara']['stc'][rulefile]:
report['flows'][k]['transactions'][tid]['yara']['stc'][rulefile][rulename] = {
'tags': None,
'description': None,
'strings': None,
'namespace': None
}
if len(m.tags) is not 0:
report['flows'][k]['transactions'][tid]['yara']['stc'][rulefile][rulename]['tags'] = []
for tag in m.tags:
report['flows'][k]['transactions'][tid]['yara']['stc'][rulefile][rulename]['tags'].append(tag.upper())
if 'description' in m.meta.keys():
report['flows'][k]['transactions'][tid]['yara']['stc'][rulefile][rulename]['description'] = m.meta['description']
if len(m.strings) is not 0:
report['flows'][k]['transactions'][tid]['yara']['stc'][rulefile][rulename]['strings'] = []
for offset, var, val in m.strings:
report['flows'][k]['transactions'][tid]['yara']['stc'][rulefile][rulename]['strings'].append("Found %s @ offset 0x%x" % (var, int(offset)))
report['flows'][k]['transactions'][tid]['yara']['stc'][rulefile][rulename]['namespace'] = m.namespace
return dict(report)
def inspect_shellcode(self, report, filetype):
if filetype == 'PCAP':
self.logger.info('Invoking shellcode detection on input buffers')
for k in sorted(report['flows'].keys()):
proto = k.split(' - ')[2]
if 'transactions' in report['flows'][k].keys() and report['flows'][k]['transactions']:
for tid in sorted(report['flows'][k]['transactions']):
if 'shellcode' not in report['flows'][k]['transactions'][tid].keys():
if proto == 'TCP':
report['flows'][k]['transactions'][tid]['shellcode'] = {
'cts': None,
'stc': None
}
elif proto == 'UDP':
report['flows'][k]['transactions'][tid]['shellcode'] = {
'buf': None
}
if proto == 'UDP' and report['flows'][k]['transactions'][tid]['buf']:
if self.config['inspect_udp_depth'] > 0:
scanbuf = report['flows'][k]['transactions'][tid]['buf'][:self.config['inspect_udp_depth']]
else:
scanbuf = report['flows'][k]['transactions'][tid]['buf']
e = pylibemu.Emulator()
offset = e.shellcode_getpc_test(scanbuf)
e.test()
profile = e.emu_profile_output
if profile: # shellcode found!
self.logger.debug('%s (UDP, Trans: #%d) has shellcode @ offset %d' % (k, tid, offset))
report['flows'][k]['transactions'][tid]['shellcode']['buf'] = {
'offset': offset,
'buf': scanbuf[offset:len(report['flows'][k]['transactions'][tid]['buf'])],
'profile': profile
}
if proto == 'TCP' and report['flows'][k]['transactions'][tid]['ctsbuf']:
if self.config['inspect_cts_depth'] > 0:
scanbuf = report['flows'][k]['transactions'][tid]['ctsbuf'][:self.config['inspect_cts_depth']]
else:
scanbuf = report['flows'][k]['transactions'][tid]['ctsbuf']
e = pylibemu.Emulator()
offset = e.shellcode_getpc_test(scanbuf)
e.test()
profile = e.emu_profile_output
if profile: # shellcode found!
self.logger.debug('%s (CTS, Trans: #%d) has shellcode @ offset %d' % (k, tid, offset))
report['flows'][k]['transactions'][tid]['shellcode']['cts'] = {
'offset': offset,
'buf': scanbuf[offset:len(scanbuf)],
'profile': profile
}
if proto == 'TCP' and report['flows'][k]['transactions'][tid]['stcbuf']:
if self.config['inspect_stc_depth'] > 0:
scanbuf = report['flows'][k]['transactions'][tid]['stcbuf'][:self.config['inspect_stc_depth']]
else:
scanbuf = report['flows'][k]['transactions'][tid]['stcbuf']
e = pylibemu.Emulator()
offset = e.shellcode_getpc_test(scanbuf)
e.test()
profile = e.emu_profile_output
if profile: # shellcode found!
self.logger.debug('%s (STC, Trans: #%d) has shellcode @ offset %d' % (k, tid, offset))
report['flows'][k]['transactions'][tid]['shellcode']['stc'] = {
'offset': offset,
'buf': scanbuf[offset:len(scanbuf)],
'profile': profile
}
return dict(report)
def inspect_regex(self, report, filetype):
if filetype == 'PCAP':
self.logger.info('Invoking regex detection on input buffers')
for k in sorted(report['flows'].keys()):
proto = k.split(' - ')[2]
if 'transactions' in report['flows'][k].keys() and report['flows'][k]['transactions']:
for tid in sorted(report['flows'][k]['transactions']):
if 'regex' not in report['flows'][k]['transactions'][tid].keys():
if proto == 'TCP':
report['flows'][k]['transactions'][tid]['regex'] = {
'cts': None,
'stc': None
}
elif proto == 'UDP':
report['flows'][k]['transactions'][tid]['regex'] = {
'buf': None
}
for severity in ['info', 'low', 'medium', 'high']:
for rid in self.regexes[severity]:
if proto == 'UDP' and report['flows'][k]['transactions'][tid]['buf']:
if self.config['inspect_udp_depth'] > 0:
scanbuf = report['flows'][k]['transactions'][tid]['buf'][:self.config['inspect_udp_depth']]
else:
scanbuf = report['flows'][k]['transactions'][tid]['buf']
match = self.regexes[severity][rid]['regex'].search(scanbuf)
if match:
self.logger.info("%s (Trans: #%d) %08x: Found %s match" % (k, tid, match.start(), utils.size_string(match.end() - match.start())))
if 'buf' not in report['flows'][k]['transactions'][tid]['regex'].keys() or not report['flows'][k]['transactions'][tid]['regex']['buf']:
report['flows'][k]['transactions'][tid]['regex']['buf'] = {}
report['flows'][k]['transactions'][tid]['regex']['buf'][rid] = {
'offset': match.start(),
'size': match.end() - match.start(),
'severity': severity,
'description': self.regexes[severity][rid]['description'],
'match': scanbuf[match.start():match.end()]
}
if proto == 'TCP' and report['flows'][k]['transactions'][tid]['ctsbuf']:
if self.config['inspect_cts_depth'] > 0:
scanbuf = report['flows'][k]['transactions'][tid]['ctsbuf'][:self.config['inspect_cts_depth']]
else:
scanbuf = report['flows'][k]['transactions'][tid]['ctsbuf']
match = self.regexes[severity][rid]['regex'].search(scanbuf)
if match:
self.logger.info("%s (CTS, Trans: #%d) %08x: Found %s match" % (k, tid, match.start(), utils.size_string(match.end() - match.start())))
if not report['flows'][k]['transactions'][tid]['regex']['cts']:
report['flows'][k]['transactions'][tid]['regex']['cts'] = {}
report['flows'][k]['transactions'][tid]['regex']['cts'][rid] = {
'offset': match.start(),
'size': match.end() - match.start(),
'severity': severity,
'description': self.regexes[severity][rid]['description'],
'match': scanbuf[match.start():match.end()]
}
if proto == 'TCP' and report['flows'][k]['transactions'][tid]['stcbuf']:
if self.config['inspect_stc_depth'] > 0:
scanbuf = report['flows'][k]['transactions'][tid]['stcbuf'][:self.config['inspect_stc_depth']]
else:
scanbuf = report['flows'][k]['transactions'][tid]['stcbuf']
match = self.regexes[severity][rid]['regex'].search(scanbuf)
if match:
self.logger.info("%s (STC, Trans: #%d) %08x: Found %s match" % (k, tid, match.start(), utils.size_string(match.end() - match.start())))
if not report['flows'][k]['transactions'][tid]['regex']['stc']:
report['flows'][k]['transactions'][tid]['regex']['stc'] = {}
report['flows'][k]['transactions'][tid]['regex']['stc'][rid] = {
'offset': match.start(),
'size': match.end() - match.start(),
'severity': severity,
'description': self.regexes[severity][rid]['description'],
'match': scanbuf[match.start():match.end()]
}
return dict(report)
def inspect_heuristics(self, report, filetype):
if filetype == 'PCAP':
self.logger.info('Invoking heuristics detection on input buffers')
for k in sorted(report['flows'].keys()):
proto = k.split(' - ')[2]
if 'transactions' in report['flows'][k].keys() and report['flows'][k]['transactions']:
for tid in sorted(report['flows'][k]['transactions']):
if 'heuristics' not in report['flows'][k]['transactions'][tid].keys():
if proto == 'TCP':
report['flows'][k]['transactions'][tid]['heuristics'] = {
'cts': None,
'stc': None
}
elif proto == 'UDP':
report['flows'][k]['transactions'][tid]['heuristics'] = {
'buf': None
}
return dict(report)
|
[
"7h3rAm@gmail.com"
] |
7h3rAm@gmail.com
|
b558a5f18f55c7197200c0d002e7a91b389c657a
|
f18152728f6cc3ad6379181b303dc516cbd184ad
|
/actions.py
|
ce975520f8005426229fbb77a4e419e070707d81
|
[] |
no_license
|
spirosavlonitis/sentiment_predictor
|
5d168cbd23b03786af1da4ea722139a3897e58df
|
5ef4e1c318db7ba35e7102ba56e2191c8aa8e1b3
|
refs/heads/master
| 2020-04-11T10:02:29.142794
| 2018-12-14T07:02:39
| 2018-12-14T07:02:39
| 161,700,872
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 730
|
py
|
import numpy as np
from vectorizer import vect
import sqlite3
def classify(clf, document):
"""Classify user input."""
label = ['negative', 'positive']
X = vect.transform([document])
y = clf.predict(X)[0]
proba = np.max(clf.predict_proba(X))
return proba, label[y]
def train(clf, document, label):
"""Train model using user input."""
X = vect.transform([document])
clf.partial_fit(X, [label])
def update_db(document, label):
"""Add user input to review_db."""
conn = sqlite3.connect('reviews.sqlite')
c = conn.cursor()
c.execute('INSERT INTO review_db (review, sentiment, date) '\
'VALUES (?,?, DATETIME("now"))', (document, label))
conn.commit()
conn.close()
|
[
"spirosa84@hotmail.com"
] |
spirosa84@hotmail.com
|
3116f1dbaf91e026539a62162fa063db593b5337
|
017916cc98e583ee7f0973220056fe5a6c5292b7
|
/basic_grammar/classs.py
|
f9da5db19a0581960ce4284a0d75a098fc017e75
|
[] |
no_license
|
DongChanKIM2/JS
|
ae0674427be44b5459bb4cae1ed157d492c1925e
|
92ff17268e7831895a5909d404ddc2c7d9931430
|
refs/heads/main
| 2023-08-25T23:31:37.393520
| 2021-10-24T03:28:10
| 2021-10-24T03:28:10
| 362,272,742
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 548
|
py
|
class Car:
def __init__(self, options):
self.title = options.get('title')
def drive(self):
return '부릉부릉'
options = {'title': '세단', 'color': 'blue'}
car = Car(options)
car.title # bmw
car.drive() # 부릉 부릉
class Mercedes(Car):
def __init__(self, options):
super().__init__(options)
self.color = options.get('color')
def honk(self):
return '빵빵'
eclass = Mercedes(options)
print(
eclass.title,
eclass.color,
eclass.drive(),
eclass.honk(),
)
|
[
"fromecha@gmail.com"
] |
fromecha@gmail.com
|
f6c55bbbbe2cfb9bba709afc8dc0371c8106a589
|
d78145457f180d4b6535f5590ed794e1da26b070
|
/fig5.py
|
981ab3bdf3a5b84a36e3a107f8e1c1f3f0b5cf11
|
[] |
no_license
|
bolongz/MC_Telegraph_Equations
|
2c0953e10e013748bf0f4a4761c18519547dcf89
|
3c9f067262d8b2ab84c94bca7f28f921a43f7254
|
refs/heads/master
| 2020-03-28T19:40:01.649127
| 2018-09-18T00:14:40
| 2018-09-18T00:14:40
| 148,997,929
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,648
|
py
|
# This is an code for the example at 4.1.1 to generate Figure 5
import math
import random as rd
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.legend_handler import HandlerLine2D
import matplotlib
matplotlib.rc('xtick', labelsize=18)
matplotlib.rc('ytick', labelsize=18)
import time
a = 1.5
c = 1.
N_MC = 200001
ksi = -a + math.sqrt(a**2 - c**2)
ksibar = -a - math.sqrt(a**2 - c**2)
A = 1.
B = - 1.
def phi(x):
return 0.0
# Solution to the wave equation with same conditions
def opsi(x):
return (ksi - ksibar) * math.cos(x)
def Psi(x):
return (ksi - ksibar) * math.sin(x)
def sol(t, x):
return (phi(x + t) + phi(x - t)) / 2. + (Psi(x + t) - Psi(x - t)) / (2.*c)
# Solution of the telegrapher's equation / For comparing purposes.
def real_sol(t, x):
return (math.exp(ksi * t) - math.exp(ksibar * t )) * math.cos(x)
# argument is an array of ordered times
def randtimes(l):
T = 0
n = 1
result = -1. * np.ones(n)
u = -1
i = 0
r = 0
while (i < n ) and (result[i] < 0):
s = np.random.exponential(1./a)
T += s
u = -u
r += u * s
while (i < n) and T > l: # Need to use while loop because we could jump multiple epochs in one step
result[i] = c*(r - u * (T - l))
i += 1
return result
def route(l):
n = 1
T = 0
u = -1
r = 0
i = 0
result = -1. * np.ones(n)
length = 0.0 * np.ones(n)
k = 0
while (i < n) and (result[i] < 0):
s = np.random.exponential(1./a)
T += s
u = -u
r += u * s
k = k +1
while( i < n) and T > l:
result[i] = c * (r - u * (T - l))
length[i] = k
i+= 1
return result, length
def jcp_op(t, x, l):
f = 0.0
b = 0.0
'''
if l % 2 == 1:
return (phi(x + c * t) + phi(x - c * t)) / 2. + (Psi(x + c * t) - Psi(x - c * t)) / (2.*c)
else:
return (phi(x + c * t) + phi(x - c * t)) / 2. - (Psi(x + c * t) - Psi(x - c * t)) / (2.*c)
'''
if l % 2 == 1:
f = phi(x - t) - 1. / c * Psi(x - t)
b = phi(x + t) + 1. / c * Psi(x + t)
else:
f = phi(x + t) - 1. / c * Psi(x + t)
b = phi(x - t) + 1. / c * Psi(x - t)
return (f + b)/2
# Evaluates the solution for one random generation of the time !
def one_eval(times, points):
newtimes = randtimes(times)
n = len(points)
result = np.empty(n)
for i in range(n):
result[i] = sol(newtimes, points[i])
return result
def mc_method(times, points, err):
n = len(points)
stderr = np.zeros(n)
sum1 = np.zeros(n)
sum2 = np.zeros(n)
t = 1000.
i = 1;
while( t > err or i < 1001):
res = one_eval(times, points)
sum1 = sum1 + res
sum2 = sum2 + np.square(res)
if(i % 2000 == 0):
stderr = np.abs(sum2- (np.square(sum1) / (i))) / ( i - 1.)
t = max(np.vectorize(math.sqrt)(stderr/(i)) / (np.abs(sum1/i)))
print i, t
i = i + 1
return sum1 / (i - 1), i - 1 #, int_conf
def jcp_opsi(times, points):
n = len(points)
#n = 1
result = np.empty(n)
newtimes, length = route(times)
for i in range(n):
result[i]= jcp_op(newtimes, points[i], length)
return result
def jcp_sol(times, points, err):
n = len(points)
stderr = np.zeros(n)
sum1 = np.zeros(n)
sum2 = np.zeros(n)
t = 1000.
i = 1;
while( t > err or i < 1001):
res = jcp_opsi(times, points)
sum1 = sum1 + res
sum2 = sum2 + np.square(res)
if(i % 2000 == 0):
stderr = np.abs(sum2- (np.square(sum1) / (i))) / ( i - 1.)
t = max(np.vectorize(math.sqrt)(stderr/(i)) / (np.abs(sum1/i)))
print i, t
i = i + 1
return sum1/ (i - 1), i - 1 #, int_conf
tab = [0.05, 0.5, 1., 2.]
max_deviations = 0.0
max_deviations2 = 0.0
err = 0.01
for i, t in enumerate(tab):
print "****************"
plt.clf()
nx = 12 # number of positive space points
h = 0.5 #step between two space points
times = t #* np.ones(2 * nx + 1)
points = (np.arange(2 * nx + 1 ) - nx) * h
start_time = time.time()
mean, steps = mc_method(times, points,err)
kacs_time = time.time() - start_time
line1 = plt.scatter(points, mean,s = 40, marker = 'o', color = 'r', label =
'Presented algorithm' )
start_time2 = time.time()
mean1, steps2 = jcp_sol(times, points,err)
jcp_time = time.time() - start_time2
line2 = plt.scatter(points, mean1,s = 40, marker = '^', color = 'b', label = 'NMC' )
plt.ylim((-2.5, 2.5))
plt.xlim((-6, 6))
plt.ylabel('u(t,x)', size = 20)
plt.xlabel('x', size = 20)
plt.title('t = '+str(t),fontweight='bold', size = 20)
plt.legend(scatterpoints=1,loc='upper right', numpoints = 1,
prop={'size':15})
nx = 100
h = 0.06
points = (np.arange(2 * nx + 1 ) - nx) * h
real_points = np.vectorize(lambda x: real_sol(t, x))(points)
line3, = plt.plot(points, real_points, color = 'g', label='Accurate solution',
linewidth = 2)
plt.legend(handler_map={line3: HandlerLine2D(numpoints=2)}, prop =
{'size':15})
plt.savefig("img/t="+str(t)+".pdf", bbox_inches='tight')
plt.clf()
print t
print max_deviations
print max_deviations2
print "kac based time"
print kacs_time
print "kac based steps"
print steps
print "jcp based time"
print jcp_time
print "jcp based steps"
print steps2
|
[
"bolongz"
] |
bolongz
|
86b50466e909a8234bcdbc3ac7b0ff8bc7f50dd6
|
56bf1dbfa5d23257522fb03906e13c597a829ed3
|
/plugins/YouMustBuildABoatGameAgentPlugin/files/you_must_build_a_boat_game_agent.py
|
07e82ecfd2b6fa01e7c341a9e7693b38e4526d2a
|
[
"MIT"
] |
permissive
|
fendaq/SerpentAI
|
0417777bbc0fccb50df456d0ced1bce839aa3211
|
e9c147f33a790a9cd3e4ee631ddbf6bbf91c3921
|
refs/heads/master
| 2021-07-23T02:04:15.977726
| 2017-08-26T23:31:59
| 2017-08-26T23:31:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23,236
|
py
|
from lib.game_agent import GameAgent
from lib.machine_learning.context_classification.context_classifiers import CNNInceptionV3ContextClassifier
from lib.sprite import Sprite
import lib.cv
import lib.ocr
from .helpers.ocr import preprocess as ocr_preprocess
from .helpers.game import parse_game_board, generate_game_board_deltas, score_game_board, score_game_board_vector, generate_boolean_game_board_deltas, display_game_board
import offshoot
import numpy as np
import h5py
import xtermcolor
import skimage.io
import sklearn
from datetime import datetime, timedelta
import time
import uuid
import random
import collections
import pickle
import os
import subprocess
import shlex
class YouMustBuildABoatGameAgent(GameAgent):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.frame_handlers["PLAY"] = self.handle_play
self.frame_handlers["PLAY_BOT"] = self.handle_play_bot
self.frame_handlers["PLAY_RANDOM"] = self.handle_play_random
self.frame_handler_setups["PLAY"] = self.setup_play
self.frame_handler_setups["PLAY_BOT"] = self.setup_play_bot
self.analytics_client = None
@property
def game_contexts(self):
return dict(
)
@property
def rows(self):
return ["A", "B", "C", "D", "E", "F"]
@property
def columns(self):
return [1, 2, 3, 4, 5, 6, 7, 8]
@property
def match_milestone_sfx_mapping(self):
return {
10: "/home/serpent/SFX/first_blood.wav",
20: "/home/serpent/SFX/Double_Kill.wav",
30: "/home/serpent/SFX/Killing_Spree.wav",
40: "/home/serpent/SFX/Dominating.wav",
50: "/home/serpent/SFX/MegaKill.wav",
60: "/home/serpent/SFX/Unstoppable.wav",
70: "/home/serpent/SFX/WhickedSick.wav",
80: "/home/serpent/SFX/MonsterKill.wav",
90: "/home/serpent/SFX/GodLike.wav",
100: "/home/serpent/SFX/Combowhore.wav"
}
def setup_play(self):
plugin_path = offshoot.config["file_paths"]["plugins"]
ocr_classifier_path = f"{plugin_path}/YouMustBuildABoatGameAgentPlugin/files/ml_models/you_must_build_a_boat_ocr.model"
self.machine_learning_models["ocr_classifier"] = self.load_machine_learning_model(ocr_classifier_path)
context_classifier_path = f"{plugin_path}/YouMustBuildABoatGameAgentPlugin/files/ml_models/you_must_build_a_boat_context.model"
context_classifier = CNNInceptionV3ContextClassifier(input_shape=(384, 512, 3))
context_classifier.prepare_generators()
context_classifier.load_classifier(context_classifier_path)
self.machine_learning_models["context_classifier"] = context_classifier
self.ocr_policy = lib.ocr.OCRPolicy(
ocr_classifier=self.machine_learning_models["ocr_classifier"],
character_window_shape="rectangle",
character_window_size=(7, 2),
word_window_shape="rectangle",
word_window_size=(1, 10),
preprocessing_function=ocr_preprocess,
preprocessing_options=dict(
contrast_stretch_percentiles=(80, 100)
)
)
self.game_board = np.zeros((6, 8))
self.previous_game_board = np.zeros((6, 8))
self.mode = "PREDICT" # "RANDOM"
self.current_run = 0
self.current_run_started_at = None
self.current_attempts = 0
self.current_matches = 0
self.last_run_duration = 0
self.last_attempts = 0
self.last_matches = 0
self.record_random_duration = 0
self.record_random_duration_run = 0
self.record_random_matches = 0
self.record_random_matches_run = 0
self.record_random_duration_values = collections.deque(maxlen=1000)
self.record_random_matches_values = collections.deque(maxlen=1000)
self.record_predict_duration = 0
self.record_predict_duration_run = 0
self.record_predict_matches = 0
self.record_predict_matches_run = 0
self.record_predict_duration_values = collections.deque(maxlen=10)
self.record_predict_matches_values = collections.deque(maxlen=10)
self.game_boards = list()
if os.path.isfile("datasets/ymbab_matching.model"):
with open("datasets/ymbab_matching.model", "rb") as f:
self.model = pickle.loads(f.read())
else:
self.model = sklearn.linear_model.SGDRegressor()
def setup_play_bot(self):
plugin_path = offshoot.config["file_paths"]["plugins"]
ocr_classifier_path = f"{plugin_path}/YouMustBuildABoatGameAgentPlugin/files/ml_models/you_must_build_a_boat_ocr.model"
self.machine_learning_models["ocr_classifier"] = self.load_machine_learning_model(ocr_classifier_path)
context_classifier_path = f"{plugin_path}/YouMustBuildABoatGameAgentPlugin/files/ml_models/you_must_build_a_boat_context.model"
context_classifier = CNNInceptionV3ContextClassifier(input_shape=(384, 512, 3))
context_classifier.prepare_generators()
context_classifier.load_classifier(context_classifier_path)
self.machine_learning_models["context_classifier"] = context_classifier
self.ocr_policy = lib.ocr.OCRPolicy(
ocr_classifier=self.machine_learning_models["ocr_classifier"],
character_window_shape="rectangle",
character_window_size=(7, 2),
word_window_shape="rectangle",
word_window_size=(1, 10),
preprocessing_function=ocr_preprocess,
preprocessing_options=dict(
contrast_stretch_percentiles=(80, 100)
)
)
self.game_board = np.zeros((6, 8))
self.previous_game_board = np.zeros((6, 8))
def handle_play(self, game_frame):
context = self.machine_learning_models["context_classifier"].predict(game_frame.frame)
if context is None:
return
if context == "game_over":
self.last_run_duration = (datetime.utcnow() - self.current_run_started_at).seconds if self.current_run_started_at else 0
self.last_attempts = self.current_attempts if self.current_attempts > 0 else 1
self.last_matches = self.current_matches
if self.current_run > 0:
if self.mode == "RANDOM":
self.record_random_duration_values.appendleft(self.last_run_duration)
self.record_random_matches_values.appendleft(self.last_matches)
if self.last_run_duration > self.record_random_duration:
self.record_random_duration = self.last_run_duration
self.record_random_duration_run = self.current_run
if self.last_matches > self.record_random_matches:
self.record_random_matches = self.last_matches
self.record_random_matches_run = self.current_run
elif self.mode == "PREDICT":
self.record_predict_duration_values.appendleft(self.last_run_duration)
self.record_predict_matches_values.appendleft(self.last_matches)
record = False
if self.last_run_duration > self.record_predict_duration:
record = True
self.record_predict_duration = self.last_run_duration
self.record_predict_duration_run = self.current_run
if self.last_matches > self.record_predict_matches:
record = True
self.record_predict_matches = self.last_matches
self.record_predict_matches_run = self.current_run
if record:
subprocess.Popen(shlex.split(f"play -v 0.45 /home/serpent/SFX/HolyShit_F.wav"))
if self.last_matches < 10:
subprocess.Popen(shlex.split(f"play -v 0.45 /home/serpent/SFX/Humiliating_defeat.wav"))
print("\033c")
game_board_vector_data = list()
scores = list()
if len(self.game_boards):
print(f"GENERATING TRAINING DATASETS: 0 / 1")
print(f"NEXT RUN: {self.current_run + 1}")
game_board_deltas = generate_game_board_deltas(self.game_boards[-1])
boolean_game_board_deltas = generate_boolean_game_board_deltas(game_board_deltas)
for game_move, boolean_game_boards in boolean_game_board_deltas.items():
for boolean_game_board in boolean_game_boards:
for i in range(6):
row = boolean_game_board[i, :]
game_board_vector_data.append(row)
scores.append(score_game_board_vector(row))
for i in range(8):
column = boolean_game_board[:, i]
column = np.append(column, [0, 0])
game_board_vector_data.append(column)
scores.append(score_game_board_vector(column))
print("\033c")
print(f"GENERATING TRAINING DATASETS: 1 / 1")
print(f"NEXT RUN: {self.current_run + 1}")
with h5py.File(f"datasets/ymbab/ymbab_run_{self.current_run}.h5", "w") as f:
for index, data in enumerate(game_board_vector_data):
f.create_dataset(f"{index}", data=data)
for index, data in enumerate(scores):
f.create_dataset(f"{index}_score", data=data)
self.game_boards = list()
self.current_run += 1
if self.current_run % 10 == 0:
self.mode = "PREDICT"
print("\033c")
print("UPDATING MODEL WITH LATEST COLLECTED DATA...")
print(f"NEXT RUN: {self.current_run}")
for i in range(9 if self.current_run <= 10 else 10):
data_file_path = f"datasets/ymbab/ymbab_run_{self.current_run - (i + 1)}.h5"
data = list()
scores = list()
with h5py.File(data_file_path, "r") as f:
count = len(f.items()) // 2
for ii in range(count):
data.append(f[f"{ii}"][:])
scores.append(f[f"{ii}_score"].value)
if len(data):
self.model.partial_fit(data, scores)
serialized_model = pickle.dumps(self.model)
with open("datasets/ymbab_matching.model", "wb") as f:
f.write(serialized_model)
else:
self.mode = "PREDICT"
print("\033c")
self.input_controller.click_screen_region(screen_region="GAME_OVER_RUN_AGAIN", game=self.game)
time.sleep(2)
self.current_run_started_at = datetime.utcnow()
self.current_attempts = 0
self.current_matches = 0
elif context.startswith("level_"):
self.previous_game_board = self.game_board
self.game_board = parse_game_board(game_frame.frame)
unknown_tile_coordinates = np.argwhere(self.game_board == 0)
if 0 < unknown_tile_coordinates.size <= 10:
coordinates = random.choice(unknown_tile_coordinates)
tile_screen_region = f"GAME_BOARD_{self.rows[coordinates[0]]}{self.columns[coordinates[1]]}"
self.input_controller.click_screen_region(screen_region=tile_screen_region, game=self.game)
self.current_attempts += 1
game_board_deltas = generate_game_board_deltas(self.game_board)
if self.game_board[self.game_board == 0].size < 3:
self.game_boards.append(self.game_board)
if self.mode == "PREDICT":
boolean_game_board_deltas = generate_boolean_game_board_deltas(game_board_deltas, obfuscate=False)
top_game_move_score = -10
top_game_move = None
game_move_scores = dict()
for game_move, boolean_game_boards in boolean_game_board_deltas.items():
split_game_move = game_move.split(" to ")
axis = "ROW" if split_game_move[0][0] == split_game_move[1][0] else "COLUMN"
total_score = 0
for boolean_game_board in boolean_game_boards:
input_vectors = list()
if axis == "ROW":
row_index = self.rows.index(split_game_move[0][0])
row = boolean_game_board[row_index, :]
input_vectors.append(row)
for ii in range(8):
column = boolean_game_board[:, ii]
column = np.append(column, [False, False])
input_vectors.append(column)
elif axis == "COLUMN":
for ii in range(6):
row = boolean_game_board[ii, :]
input_vectors.append(row)
column_index = self.columns.index(int(split_game_move[0][1]))
column = boolean_game_board[:, column_index]
column = np.append(column, [False, False])
input_vectors.append(column)
prediction = self.model.predict(input_vectors)
total_score += max(prediction)
game_move_scores[game_move] = total_score
if total_score > top_game_move_score:
top_game_move_score = total_score
top_game_move = game_move
if top_game_move is None:
return False
start_coordinate, end_coordinate = top_game_move.split(" to ")
start_screen_region = f"GAME_BOARD_{start_coordinate}"
end_screen_region = f"GAME_BOARD_{end_coordinate}"
elif self.mode == "RANDOM":
axis = random.choice(["ROW", "COLUMN"])
if axis == "ROW":
row = random.choice(self.rows)
column = 1
end_column = 1 + (random.choice(range(7)) + 1)
start_screen_region = f"GAME_BOARD_{row}{column}"
end_screen_region = f"GAME_BOARD_{row}{end_column}"
else:
column = random.choice(self.columns)
row = "A"
end_row = self.rows[random.choice(range(5)) + 1]
start_screen_region = f"GAME_BOARD_{row}{column}"
end_screen_region = f"GAME_BOARD_{end_row}{column}"
start_coordinate = start_screen_region.split('_')[-1]
end_coordinate = end_screen_region.split('_')[-1]
game_board_key = f"{start_coordinate} to {end_coordinate}"
game_board_delta = None
for board_delta in game_board_deltas:
if board_delta[0] == game_board_key:
game_board_delta = board_delta[1]
break
if score_game_board(game_board_delta) > 0:
self.current_matches += 1
if self.current_matches in self.match_milestone_sfx_mapping:
subprocess.Popen(shlex.split(f"play -v 0.45 {self.match_milestone_sfx_mapping[self.current_matches]}"))
print("\033c")
print(f"CURRENT RUN: {self.current_run}")
print(f"CURRENT MODE: {self.mode}\n")
print("BOARD STATE:\n")
display_game_board(self.game_board)
print("")
print(xtermcolor.colorize(f" Moving {game_board_key}... ", ansi=0, ansi_bg=39))
print(f"\nCurrent Run Duration: {(datetime.utcnow() - self.current_run_started_at).seconds} seconds")
print(f"Current Run Matches (Approximate): {self.current_matches}/{self.current_attempts}")
print(f"\nLast Run Duration: {self.last_run_duration} seconds")
print(f"Last Run Matches (Approximate): {self.last_matches}/{self.last_attempts}")
print("")
print(xtermcolor.colorize(" RECORDS ", ansi=29, ansi_bg=15))
print("")
# print(f"Duration (RANDOM): {self.record_random_duration} seconds (Run #{self.record_random_duration_run})")
print(f"Duration (PREDICT): {self.record_predict_duration} seconds (Run #{self.record_predict_duration_run})")
# print(f"Matches (RANDOM - Approximate): {self.record_random_matches} (Run #{self.record_random_matches_run})")
print(f"Matches (PREDICT - Approximate): {self.record_predict_matches} (Run #{self.record_predict_matches_run})")
print("")
print(xtermcolor.colorize(" PREDICT AVERAGES (Last 10 runs)", ansi=29, ansi_bg=15))
print("")
print(f"Duration: {round(np.mean(self.record_predict_duration_values), 2)} seconds")
print(f"{', '.join([str(v) for v in list(self.record_predict_duration_values)])}")
print(f"\nMatches (Approximate): {np.mean(self.record_predict_matches_values)}")
print(f"{', '.join([str(int(v)) for v in list(self.record_predict_matches_values)])}")
game_move_direction = "ROW" if self.game.screen_regions[start_screen_region][0] == self.game.screen_regions[end_screen_region][0] else "COLUMN"
if game_move_direction == "ROW":
game_move_distance = int(end_coordinate[1]) - int(start_coordinate[1])
else:
game_move_distance = self.rows.index(end_coordinate[0]) - self.rows.index(start_coordinate[0])
self.input_controller.drag_screen_region_to_screen_region(
start_screen_region=start_screen_region,
end_screen_region=end_screen_region,
duration=(0.1 + (game_move_distance * 0.05)),
game=self.game
)
def handle_play_bot(self, game_frame):
context = self.machine_learning_models["context_classifier"].predict(game_frame.frame)
if context is None:
return
# if context == "game_over":
# self.input_controller.click_screen_region(screen_region="GAME_OVER_RUN_AGAIN", game=self.game)
# time.sleep(2)
# elif context.startswith("level_"):
# print("\033c")
# print(context)
# print("BOARD STATE:\n")
#
# self.previous_game_board = self.game_board
# self.game_board = parse_game_board(game_frame.frame)
# print(self.game_board)
#
# # Click the Unknown Tiles
# unknown_tile_coordinates = np.argwhere(self.game_board == 0)
#
# if 0 < unknown_tile_coordinates.size <= 10:
# coordinates = random.choice(unknown_tile_coordinates)
# tile_screen_region = f"GAME_BOARD_{self.rows[coordinates[0]]}{self.columns[coordinates[1]]}"
#
# self.input_controller.click_screen_region(screen_region=tile_screen_region, game=self.game)
#
# if not np.array_equal(self.game_board, self.previous_game_board):
# return
#
# game_board_deltas = generate_game_board_deltas(self.game_board)
# game_board_delta_matches = detect_game_board_delta_matches(game_board_deltas)
#
# game_move = None
#
# for i in [5, 4, 3]:
# if not len(game_board_delta_matches[i]):
# continue
#
# game_move = random.choice(game_board_delta_matches[i])
# break
#
# if game_move is None:
# time.sleep(0.1)
# return
#
# game_move_start_cell, game_move_end_cell = game_move.split(" to ")
#
# start_screen_region = f"GAME_BOARD_{game_move_start_cell}"
# end_screen_region = f"GAME_BOARD_{game_move_end_cell}"
#
# game_move_direction = "ROW" if self.game.screen_regions[start_screen_region][0] == self.game.screen_regions[end_screen_region][0] else "COLUMN"
#
# if game_move_direction == "ROW":
# game_move_distance = int(game_move_end_cell[1]) - int(game_move_start_cell[1])
# else:
# game_move_distance = self.rows.index(game_move_end_cell[0]) - self.rows.index(game_move_start_cell[0])
#
# print(f"\nMoving {game_move_start_cell} to {game_move_end_cell}...")
#
# print(game_board_delta_matches)
#
# self.input_controller.drag_screen_region_to_screen_region(
# start_screen_region=start_screen_region,
# end_screen_region=end_screen_region,
# duration=(0.1 + (game_move_distance * 0.05)),
# game=self.game
# )
def handle_play_random(self, game_frame):
rows = ["A", "B", "C", "D", "E", "F"]
columns = [1, 2, 3, 4, 5, 6, 7, 8]
row = random.choice(rows)
column = random.choice(columns)
start_screen_region = f"GAME_BOARD_{row}{column}"
axis = "row" if random.randint(0, 1) else "column"
if axis == "row":
end_column = random.choice(columns)
while end_column == column:
end_column = random.choice(columns)
end_screen_region = f"GAME_BOARD_{row}{end_column}"
else:
end_row = random.choice(rows)
while end_row == row:
end_row = random.choice(rows)
end_screen_region = f"GAME_BOARD_{end_row}{column}"
print(f"\nMoving {start_screen_region.split('_')[-1]} to {end_screen_region.split('_')[-1]}...")
self.input_controller.drag_screen_region_to_screen_region(
start_screen_region=start_screen_region,
end_screen_region=end_screen_region,
duration=0.3,
game=self.game
)
time.sleep(1)
def handle_collect_characters(self, game_frame):
frame_uuid = str(uuid.uuid4())
skimage.io.imsave(f"datasets/ocr/frames/frame_{frame_uuid}.png", game_frame.frame)
preprocessed_frame = ocr_preprocess(game_frame.frame, **self.ocr_policy.preprocessing_options)
objects = lib.ocr.detect_image_objects_closing(preprocessed_frame, window_shape="rectangle", window_size=(7, 2))
normalized_objects = lib.ocr.normalize_objects(preprocessed_frame, objects)
lib.ocr.save_objects("datasets/ocr/characters", objects, normalized_objects, frame_uuid)
time.sleep(self.config.get("collect_character_interval") or 1)
|
[
"info@nicholasbrochu.com"
] |
info@nicholasbrochu.com
|
1e4e61cf5e7e169794e46582487559adf479515b
|
abf605ecb315c256243c242eaac4cd392973c6e2
|
/GeoDocs/GeoDocs/cuentas/models.py
|
3c9402a5c01122b0cf9eb077c890e7e700466d74
|
[] |
no_license
|
patriciozapata/Python-django-postgresql
|
67f0cceea399939fe1d36bcf4b8682c9909b06c6
|
2fbd8142da0fa661f04ec166dac43a2ed6c6d9be
|
refs/heads/tesis
| 2022-11-05T19:30:05.521923
| 2019-05-02T21:01:23
| 2019-05-02T21:01:23
| 184,653,172
| 0
| 1
| null | 2022-11-03T16:04:34
| 2019-05-02T21:21:03
| null |
UTF-8
|
Python
| false
| false
| 2,324
|
py
|
from django.db import models
from django.contrib.auth.models import (
BaseUserManager, AbstractBaseUser
)
class UserManager(BaseUserManager):
def create_user(self, email, nombre, apellido, perfil, password=None):
if not email:
raise ValueError('Users must have an email address')
user = self.model(
email=self.normalize_email(email),
)
user.nombre = nombre
user.apellido = apellido
perfil = Perfil.objects.get(pk=perfil)
user.perfil = perfil
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, nombre, apellido, perfil,staff,admin, password):
user = self.create_user(email, nombre, apellido, perfil,password=password)
user.staff = staff
user.admin = admin
user.save(using=self._db)
return user
class Perfil(models.Model):
perfil = models.CharField(max_length=50)
def __str__(self): # __unicode__ on Python 2
return '{}'.format(self.perfil)
# hook in the New Manager to our Model
class User(AbstractBaseUser):
email = models.EmailField(
verbose_name='email address',
max_length=255,
unique=True,
)
is_active = models.BooleanField(default=True)
staff = models.BooleanField(default=False) #Necesario para administrador debido a que pregunta "is_staff"
admin = models.BooleanField(default=False) # a superuser
nombre = models.CharField(max_length=50)
apellido = models.CharField(max_length=50)
imagen = models.ImageField(upload_to='perfil_image',blank=True)
perfil = models.ForeignKey(Perfil, on_delete=models.CASCADE)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['nombre', 'apellido','perfil','staff','admin'] # Email & Password are required by default.
def __str__(self): # __unicode__ on Python 2
return self.perfil
def __str__(self): # __unicode__ on Python 2
return self.email
def has_perm(self, perm, obj=None):
return True
def has_module_perms(self, app_label):
return True
@property
def is_staff(self):
return self.staff
@property
def is_admin(self):
return self.admin
objects = UserManager()
|
[
"patrix_malito@hotmail.com"
] |
patrix_malito@hotmail.com
|
be44bbc064d7280037111833692d80e31717c8e1
|
24f2deae78a3f5fa8b5b3a53baff5637e0ea80ff
|
/sinoera/tst/sinozodiac/test_monkeyclever.py
|
93fc4197c860d2006f56188460f678c512b554e0
|
[
"Apache-2.0"
] |
permissive
|
sinotradition/sinoera
|
02b979a7dbca81594eed8862fa86671856b91e2e
|
1e93482c0a56a8917bc7ceebeef5b63b24ca3651
|
refs/heads/master
| 2021-01-10T03:20:20.231752
| 2015-12-14T15:13:42
| 2015-12-14T15:13:42
| 47,981,945
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 394
|
py
|
#!/usr/bin/python
#coding=utf-8
'''This is test module
@author: sheng
@contact: sinotradition@gmail.com
@copyright: License according to the project license.
'''
import unittest
from sinoera.sinozodiac import monkeyclever
TestMonkeycleverFunctions(unittest.TestCase):
def setUp(self):
pass
def test_XXX(self):
pass
if __name__ == "__main__":
unittest.main()
|
[
"smlh.sheng@gmail.com"
] |
smlh.sheng@gmail.com
|
66a4d92f7ec31b6e8c4973554647c227fbf1f327
|
197d1e555430b8524b2f8ef62539a65005d53f44
|
/hw1_tagging/venv/bin/easy_install
|
40711d02ecb909628a5d0027230116ae3a089950
|
[] |
no_license
|
yw778/COMS4705_nlp
|
b721aa5c20d7620d826cd39887b6f16699f51efb
|
b66dedf26af7e746454e6becfa01a314c5bf664e
|
refs/heads/master
| 2021-03-27T19:57:36.796081
| 2018-05-26T22:21:16
| 2018-05-26T22:21:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
#!/Users/yuwang/nlp_1/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"yuwang95320@gmail.com"
] |
yuwang95320@gmail.com
|
|
dc2c0bcd950dff0ecd0af63cc4a6cdd8ea869cd3
|
e82b073e9b92499b35b2d45c1b5384609969778a
|
/CSE316/Assignment 2/Source Code/src/server.py
|
6bef0307749b867e565de2ad6711c358ee2a4c1a
|
[] |
no_license
|
wjxhhhhh/XJTLU
|
2a6dbba2ec48231b299393ea10e7f58c957a9e82
|
a812446706e5d575d154035c338f58757ce0771f
|
refs/heads/master
| 2023-04-25T00:00:34.473128
| 2020-07-09T23:21:04
| 2020-07-09T23:21:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 860
|
py
|
# Sahand Sabour - 1614650
# Basic implementation of the CoAP server
# 1) Change self.SERVER to IP address of your choosing.
# 2) Run this file via:
# python src/server.py
from coapthon.server.coap import CoAP
from tools.Resources import COAPResource
class CoAPServer(CoAP):
def __init__(self):
self.SERVER = "192.168.0.3" # Access IP address
self.PORT = 5683 # Access port
# Bind IP and port to the CoAP server
CoAP.__init__(self, (self.SERVER, self.PORT))
# Declare the available resources and their indexes
self.add_resource("index/", COAPResource())
print("Server listening on %s:%d" % (self.SERVER, self.PORT))
if __name__ == "__main__":
server = CoAPServer()
try:
server.listen(5)
except KeyboardInterrupt:
server.close()
print("Server Shutdown")
|
[
"karimiali0022@yahoo.com"
] |
karimiali0022@yahoo.com
|
59bd8f6d04bda0023fa0410fd105f1e5594bf584
|
b45b8fdc2daf1138e45dc3aa6adc05737a171e52
|
/votingapp/election/admin.py
|
4a5b2d62dad3d6150f3b077fb5d443fb2efb4abf
|
[] |
no_license
|
princevanani9/SEPP
|
badfb5e8d13c049995d0ed295141c3c310f744da
|
ea8cc4ef43f9c0e59fd4e73f75ccc459e3d15497
|
refs/heads/master
| 2023-04-03T10:16:43.533755
| 2021-04-02T07:46:48
| 2021-04-02T07:46:48
| 353,250,327
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 210
|
py
|
from django.contrib import admin
from .models import CreateElection
@admin.register(CreateElection)
class AdminCreateElecion(admin.ModelAdmin):
list_display = ['name','type']
# Register your models here.
|
[
"vananiprince9@gmail.com"
] |
vananiprince9@gmail.com
|
03e0ce534ea35cb85aa6b762ecc79460b25841f8
|
de2c194da09e8a00f67dfc81e4d2526598699ffe
|
/autoencoder_duong/test_keras_install.py
|
b9fffb9489ef8a7c4ebade37d2612768db0534d0
|
[] |
no_license
|
vutienduong/CNNTensorFlow
|
972d0cc9468030d4837ea6fec31d1c6453b843c4
|
322434668334b524b2479135decc48cf762c6713
|
refs/heads/master
| 2021-01-12T08:23:36.699712
| 2017-05-25T03:00:30
| 2017-05-25T03:00:30
| 76,561,139
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,107
|
py
|
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.optimizers import SGD
import keras.utils as keut
# Generate dummy data
import numpy as np
x_train = np.random.random((1000, 20))
y_train = keut.to_categorical(np.random.randint(10, size=(1000, 1)), num_classes=10)
x_test = np.random.random((100, 20))
y_test = keut.to_categorical(np.random.randint(10, size=(100, 1)), num_classes=10)
model = Sequential()
# Dense(64) is a fully-connected layer with 64 hidden units.
# in the first layer, you must specify the expected input data shape:
# here, 20-dimensional vectors.
model.add(Dense(64, activation='relu', input_dim=20))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
model.fit(x_train, y_train,
epochs=20,
batch_size=128)
score = model.evaluate(x_test, y_test, batch_size=128)
|
[
"lucifervtd@gmail.com"
] |
lucifervtd@gmail.com
|
fe4ea25f10a6b7dc123b6983849f78f880cd66ff
|
9b3576dc1a0c8d9b778859372b8f8f4caa70d61d
|
/config.py
|
f9833ffe4f6025e3323868918f85ee9fee25ef9c
|
[] |
no_license
|
dittohead/googlespeedtestchecker
|
8eb37a95432cf741906c7167ac530a4f80c9d788
|
f55d7cc2347f3e034558520d67ed598d607f8b65
|
refs/heads/master
| 2020-05-21T08:51:56.394729
| 2017-05-03T11:57:14
| 2017-05-03T11:57:14
| 70,039,971
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 158
|
py
|
api_key = 'AIzaSyCRkic6tlrQqHESMQ6EnDubrZDrpML43xE'
pagespeedHost = "https://www.googleapis.com/pagespeedonline/v2/runPagespeed?url="
sitemap = 'sitemap.txt'
|
[
"noreply@github.com"
] |
dittohead.noreply@github.com
|
00de5095f39191fb4f1bd56a30416f2450ba71fb
|
f7330e58eddabd7d4fee5a4a9688d3515f8b9dd4
|
/checkpt_restart/run_with_restart/run_with_restart.py
|
8ed8540c1f34e7154796ccf35e1eca9b5bea714a
|
[
"BSD-3-Clause"
] |
permissive
|
clawpack/apps
|
9960265fa818794e6ebec7b9b5b65aa8e188a27a
|
f520ff1bb5271cc565bd6d4370331b5530c0d257
|
refs/heads/master
| 2023-04-07T03:42:11.531257
| 2023-03-30T19:07:51
| 2023-03-30T19:07:51
| 3,307,858
| 8
| 36
|
NOASSERTION
| 2023-03-30T09:05:16
| 2012-01-30T19:16:21
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 5,062
|
py
|
#!/usr/bin/env python
# encoding: utf-8
"""
Script to run a code or restart it from a checkpoint file automatically.
Needs to be cleaned up and made more general.
Assumes code is compiled already.
python run_with_restart.py
Will do the following:
- Check whether _output exists with data from a run that appeared to
complete (by checking last line of fort.amr -- more robust way?).
- If so, it quits,
- If _output exists with at least one checkpoint file, it will use the
more recent one to restart the code.
- If _output does not exist, it runs the code from scratch.
So you should be able to type the command above, hit Ctrl-C, and repeat this
process an arbitrary number of times and eventually the full output will be
generated.
Notes:
- The setrun.py file is used also for the restart. The clawdata.restart
value is set to True and written to claw.data explicitly from this script.
"""
from __future__ import print_function
from __future__ import absolute_import
import subprocess
import os, sys
sys.path.append('.')
from setrun import setrun
outdir = '_output'
# set any desired environment flags:
env = os.environ
#env['FFLAGS'] = '-O2 -fopenmp' # currently assume code is already compiled.
# runtime environment variables:
env['OMP_NUM_THREADS'] = '3'
# The next line insures that stdout is not buffered so if the code dies
# the output sent to run_output.txt so the error message is visible:
env['GFORTRAN_UNBUFFERED_PRECONNECTED'] = 'y'
def examine_outdir(outdir='_output'):
"""
Check the outdir to see if the code has already run to completion
(in which case nothing is done) or needs to be restarted.
If outdir does not exist, run from scratch.
"""
from numpy import Inf
fortamr = os.path.join(outdir,'fort.amr')
try:
f = open(fortamr).readlines()
finished = ('end of' in f[-1]) # examine last line for ending message
except:
finished = False
try:
cfile = os.path.join(outdir,'fort.tckaaaaa')
f = open(cfile).readlines()
ta = float(f[0][29:])
except:
ta = -Inf
try:
cfile = os.path.join(outdir,'fort.tckbbbbb')
f = open(cfile).readlines()
tb = float(f[0][29:])
except:
tb = -Inf
if (ta == -Inf) and (tb == -Inf):
print("Could not read fort.tckaaaaa or fort.tckbbbbb in outdir %s" \
% outdir)
latest = None
t_latest = None
elif ta > tb:
latest = 'aaaaa'
t_latest = ta
else:
latest = 'bbbbb'
t_latest = tb
return finished, latest, t_latest
def run_code_or_restart():
import time
tm = time.localtime()
year = str(tm[0]).zfill(4)
month = str(tm[1]).zfill(2)
day = str(tm[2]).zfill(2)
hour = str(tm[3]).zfill(2)
minute = str(tm[4]).zfill(2)
second = str(tm[5]).zfill(2)
timestamp = '%s-%s-%s-%s%s%s' % (year,month,day,hour,minute,second)
finished, latest, t_latest = examine_outdir(outdir)
if finished:
print("Code has finished running, remove %s to run again" % outdir)
return
restart = (latest is not None)
fname_output = 'run_output.txt'
fname_errors = 'run_errors.txt'
if restart:
print("Will attempt to restart using checkpoint file %s at t = %s" \
% (latest, t_latest))
print("Appending output stream to %s" % fname_output)
access = 'a'
else:
print("Will run code -- no restart")
print("Writing output stream to %s" % fname_output)
access = 'w'
fout = open(fname_output, access)
ferr = open(fname_errors, access)
if restart:
fout.flush()
fout.write("\n=========== RESTART =============\n" + \
"Local time: %s\n" % timestamp + \
"Will attempt to restart using checkpoint file %s at t = %s\n" \
% (latest, t_latest))
fout.flush()
make_args = ['make','output','RESTART=True']
else:
make_args = ['make','output']
#if restart:
# No longer need to do this since new restart now adds to gauge*.txt files
# fortgauge = os.path.join(outdir,'fort.gauge')
# fortgauge2 = os.path.join(outdir,'fort.gauge_%s' % timestamp)
# os.system("mv %s %s" % (fortgauge,fortgauge2))
# fout.write("Moving %s to %s \n" % (fortgauge,fortgauge2))
# fout.flush()
rundata = setrun('amrclaw')
rundata.clawdata.restart = restart
rundata.clawdata.restart_file = 'fort.chk' + str(latest)
if restart:
rundata.clawdata.output_t0 = False # to avoid plotting at restart times
rundata.write()
job = subprocess.Popen(make_args, stdout=fout,stderr=ferr,env=env)
return_code = job.wait()
if return_code == 0:
print("Successful run\n")
else:
print("Problem running code\n")
print("See %s and %s" % (fname_output,fname_errors))
fout.close()
ferr.close()
if __name__ == "__main__":
run_code_or_restart()
|
[
"rjl@uw.edu"
] |
rjl@uw.edu
|
b0990df1a25a8c8ff0efe04b40609e2c15b42bb8
|
bdfd839c3324feff6f474f006a32b66c01784b92
|
/ros_vive_driver/src/ros_vive_driver/vrwrapper/VRTrackedDevice.py
|
195b7ddebe023c68cf8143ce1a74bb3a88f25eeb
|
[] |
no_license
|
jdewaen/ros_vive
|
fddf3c7e84d9f588517d27592d97a16d80d1f4db
|
5e20c3cd9934c9fd87784fb1e1a05139b673b2f3
|
refs/heads/master
| 2020-08-09T22:07:25.801070
| 2019-09-30T09:09:46
| 2019-09-30T09:09:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,048
|
py
|
import math
import openvr
class VRTrackedDevice(object):
TRACKING_RESULT = {
1: "Uninitialized",
100: "Calibrating_InProgress",
101: "Calibrating_OutOfRange",
200: "Running_OK",
201: "Running_OutOfRange"
}
def __init__(self, vr_obj, index, device_class, name):
self.device_class = device_class
self.index = index
self.vr = vr_obj
self.pose_cache = openvr.TrackedDevicePose_t()
self.name = name
def __del__(self):
self.shutdown()
def shutdown(self):
pass
def get_serial(self):
return self.vr.getStringTrackedDeviceProperty(self.index, openvr.Prop_SerialNumber_String).decode('utf-8')
def get_model(self):
return self.vr.getStringTrackedDeviceProperty(self.index, openvr.Prop_ModelNumber_String).decode('utf-8')
def get_pose(self, force_update=False):
pose = self.pose_cache
if force_update:
pose = self.vr.getDeviceToAbsoluteTrackingPose(openvr.TrackingUniverseStanding, 0,
openvr.k_unMaxTrackedDeviceCount)[self.index]
if not pose.bPoseIsValid:
return None
return VRTrackedDevice.convert_matrix_to_pose(pose.mDeviceToAbsoluteTracking)
def get_velocity(self, force_update=False):
pose = self.pose_cache
if force_update:
pose = self.vr.getDeviceToAbsoluteTrackingPose(openvr.TrackingUniverseStanding, 0,
openvr.k_unMaxTrackedDeviceCount)[self.index]
if not pose.bPoseIsValid:
return None
velocity = pose.vVelocity
angular_velocity = pose.vAngularVelocity
return (velocity[0], velocity[1], velocity[2]), (angular_velocity[0], angular_velocity[1], angular_velocity[2])
def has_valid_pose(self, force_update=False):
pose = self.pose_cache
if force_update:
pose = self.vr.getDeviceToAbsoluteTrackingPose(openvr.TrackingUniverseStanding, 0,
openvr.k_unMaxTrackedDeviceCount)[self.index]
return pose.bPoseIsValid == 1
def is_connected(self, force_update=False):
pose = self.pose_cache
if force_update:
pose = self.vr.getDeviceToAbsoluteTrackingPose(openvr.TrackingUniverseStanding, 0,
openvr.k_unMaxTrackedDeviceCount)[self.index]
return pose.bDeviceIsConnected == 1
def get_tracking_result(self, force_update=False):
pose = self.pose_cache
if force_update:
pose = self.vr.getDeviceToAbsoluteTrackingPose(openvr.TrackingUniverseStanding, 0,
openvr.k_unMaxTrackedDeviceCount)[self.index]
if pose.eTrackingResult not in VRTrackedDevice.TRACKING_RESULT:
return "Unknown"
return VRTrackedDevice.TRACKING_RESULT[pose.eTrackingResult]
def update_pose(self, poses):
self.pose_cache = poses[self.index]
@staticmethod
def convert_matrix_to_pose(pose_mat):
# Changed from triad_openvr version since that one could crash due to divide by 0 error.
# This calculation comes from issue #3
r_w = math.sqrt(max(0, 1 + pose_mat[0][0] + pose_mat[1][1] + pose_mat[2][2])) * 0.5
r_x = math.sqrt(max(0, 1 + pose_mat[0][0] - pose_mat[1][1] - pose_mat[2][2])) * 0.5
r_y = math.sqrt(max(0, 1 - pose_mat[0][0] + pose_mat[1][1] - pose_mat[2][2])) * 0.5
r_z = math.sqrt(max(0, 1 - pose_mat[0][0] - pose_mat[1][1] + pose_mat[2][2])) * 0.5
r_x *= math.copysign(1, r_x * (pose_mat[2][1] - pose_mat[1][2]))
r_y *= math.copysign(1, r_y * (pose_mat[0][2] - pose_mat[2][0]))
r_z *= math.copysign(1, r_z * (pose_mat[1][0] - pose_mat[0][1]))
x = pose_mat[0][3]
y = pose_mat[1][3]
z = pose_mat[2][3]
return (x, y, z), (r_x, r_y, r_z, r_w)
|
[
"noreply@github.com"
] |
jdewaen.noreply@github.com
|
d59fa7860f877cedd7b28103192d75cb60a9b3b2
|
9ee94226f283d601b27ae0b97bcfa178b51b6514
|
/catkin_velodyne/build/sbg_ros_rec/catkin_generated/pkg.installspace.context.pc.py
|
d95db19ade852a50c6c45313259fae3fe71fe4cd
|
[] |
no_license
|
zhang-quanzhe/navigation_car
|
ecaba1f385093ec3dc2ae433ce1ade96f278a466
|
2643a7827fcdc0660b11900f4118bb94be2291ee
|
refs/heads/master
| 2023-07-14T02:24:57.932726
| 2021-08-21T09:27:17
| 2021-08-21T09:27:17
| 398,517,655
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 380
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "sbg_driver_2"
PROJECT_SPACE_DIR = "/home/ustb/catkin_velodyne/install"
PROJECT_VERSION = "1.0.7"
|
[
"9418372+zhangquanzhe@user.noreply.gitee.com"
] |
9418372+zhangquanzhe@user.noreply.gitee.com
|
e6fc9450a00f5c5945c80ab8beda73f5a5c6c6fc
|
aaf14b75a281d0d230591a503dafb49bcfa62be8
|
/params.py
|
b179fa5e43038c20cef058a5eb6c2db225c4b56c
|
[] |
no_license
|
jialyu/MCIT582
|
6524021493a439ae130e5a145bacdc9236a96086
|
bbdf8840a4199094170ea113598ef4c84ee3d75e
|
refs/heads/main
| 2023-07-16T09:59:56.501976
| 2021-08-14T04:42:43
| 2021-08-14T04:42:43
| 367,925,781
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 315
|
py
|
g=9698439374675276823390863679085787099821687964590973421364530706908301367232976242960049544758496470828675688981570819335336142993170304256564916507021997
p=17485407687035251201370420829093858071027518631263552549047038216080132036645437679594890870680904087373138192057582526597149370808367592630377967178132719
|
[
"jialyuz@seas.upenn.edu"
] |
jialyuz@seas.upenn.edu
|
ba84f991060139b6c585752c2838f6d0855c1e5d
|
aaab39a353d12f182e107d1cf74f6b3a45d65bcd
|
/1/second.py
|
da9a718543e4aba6d31a3c9af9e43270fef52c99
|
[] |
no_license
|
myonov/advent_of_code_2018
|
c27c1983d5479f0650fd303046d1ae6b115012fd
|
902c59d13c8119d976ed2c05a55a572852227fd5
|
refs/heads/master
| 2020-04-09T23:52:12.347631
| 2018-12-22T13:58:45
| 2018-12-22T13:58:45
| 160,669,082
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 274
|
py
|
s = 0
f = {0}
with open('input.txt', 'r') as fin:
d = [int(line) for line in fin]
q = True
while q:
for item in d:
s += item
if s in f:
print(s)
q = False
break
f.add(s)
|
[
"myonov@gmail.com"
] |
myonov@gmail.com
|
c581ade6b59cd845c34b70586cfc5b5cab2022fd
|
f559b4d607cfdd3f192daed155ed8b0d263c71b2
|
/env/bin/django-admin.py
|
2832c6575b235beacbbadfac18c346f0dd30d38c
|
[] |
no_license
|
chris-baby/WbOnline
|
6270015e9a7897b413a3fe97e2aca8a33f744995
|
91425f677d2e7c2a0ac9aeb8c1ee47d75f9b9321
|
refs/heads/master
| 2022-07-14T23:57:56.162760
| 2020-05-11T13:56:22
| 2020-05-11T13:56:22
| 263,056,250
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 162
|
py
|
#!/Users/tongtong/Desktop/root/bwOnline/env/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
[
"918128078@qq.com"
] |
918128078@qq.com
|
1bd6b6e60863db9b9d7ea35156c0117ede3c8461
|
0d118386f5bf841864d2679461624b4ad0d488fb
|
/day_three/solution.py
|
e4af81f085b546f2a5f565f5c8d6defc9e540352
|
[] |
no_license
|
sandrohp88/adventofcode2018
|
6604d56f5d67a889b6f1327888e9b9861c0bd72b
|
7cc0a05f836ce4d29c482f18559ed74ea0d31498
|
refs/heads/master
| 2020-04-09T09:32:37.437124
| 2018-12-09T22:37:37
| 2018-12-09T22:37:37
| 160,237,449
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,178
|
py
|
import re
def part1(claims):
# rows, columns = find_matrix_size(claims)
matrix = [['.']*1000 for _ in range(1000)]
overlaps = 0
for id_, l, t, w, h in claims:
for i in range(t, t+h):
for j in range(l, l+w):
if matrix[i][j] == '.':
matrix[i][j] = id_
else:
matrix[i][j] = 'X'
for row in matrix:
for cell in row:
if cell == 'X':
overlaps += 1
print('overlaps:', overlaps)
return matrix
def part2(matrix, claims):
id_count = dict()
for row in matrix:
for id_ in row:
if id_ != '.' and id_ != 'X':
id_count[id_] = id_count.get(id_, 0) + 1
for id_, _, _, w, h in claims:
area = w * h
if id_ in id_count.keys() and id_count[id_] == area:
print('no-overlapped id:', id_)
break
# load data
file_handler = open('input')
claims = list()
for line in file_handler:
claim = list(map(int, re.findall(r'-?\d+', line)))
claims.append(claim)
matrix = part1(claims) # expected output 106501
part2(matrix, claims) # expected output 632
|
[
"sandrohp88@gmail.com"
] |
sandrohp88@gmail.com
|
95025f13d1081fcd737d395850c6ddefe735a83f
|
19a4b144e8afa70931d7a6e03bad2acea3d58fec
|
/venv/Lib/site-packages/uiutil/frame/dynamic.py
|
a4b996a817ca562efe709e2ab8ab6b40f4e40a0d
|
[
"Apache-2.0"
] |
permissive
|
avim2809/CameraSiteBlocker
|
29505f8d2d36f7d402787a032a916bd0fc2c4941
|
bfc0434e75e8f3f95c459a4adc86b7673200816e
|
refs/heads/master
| 2020-11-24T17:01:05.842820
| 2019-12-26T12:55:34
| 2019-12-26T12:55:34
| 228,260,283
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,887
|
py
|
# encoding: utf-8
import json
import logging_helper
from future.utils import iteritems
from uiutil.frame.frame import BaseFrame
from configurationutil import Configuration, cfg_params
from .._metadata import __version__, __authorshort__, __module_name__
from ..resources import templates, schema
from ._dynamic_widget import DynamicWidgetFrame
from ._dynamic_scroll import DynamicScrollFrame
from ._dynamic_base import EXPANDING_ROWS, EXPANDING_COLUMNS
from ..helper.dynamic_variable import handle_variable
# Register Config details (These are expected to be overwritten by an importing app)
cfg_params.APP_NAME = __module_name__
cfg_params.APP_AUTHOR = __authorshort__
cfg_params.APP_VERSION = __version__
# Set the config initialisation parameters
LAYOUT_CFG = u'ui_layout'
TEMPLATE = templates.ui_layout
logging = logging_helper.setup_logging()
# ConfigFrame keys
CONFIG_FRAMES = u'frames'
CONFIG_VARS = u'variables'
# config frame keys
CONFIG_CLASS = u'class'
CONFIG_KEY = u'key'
CONFIG_ROW = u'row'
CONFIG_COLUMN = u'column'
CONFIG_STICKY = u'sticky'
CONFIG_VERTICAL = u'scroll_vertical'
CONFIG_HORIZONTAL = u'scroll_horizontal'
# Layout types
ROOT_LAYOUT = u'root_layouts'
WIDGET_LAYOUT = u'widget_layouts'
def _add_layout(layout_type,
layout_name,
layout):
cfg = Configuration()
# Register configuration
cfg.register(config=LAYOUT_CFG,
config_type=cfg_params.CONST.json,
template=TEMPLATE,
schema=schema.ui_layout)
key = u'{c}.{t}.{n}'.format(c=LAYOUT_CFG,
t=layout_type,
n=layout_name)
cfg[key] = layout
def add_root_layout(layout_name,
layout):
_add_layout(ROOT_LAYOUT,
layout_name,
layout)
def add_widget_layout(layout_name,
layout):
_add_layout(WIDGET_LAYOUT,
layout_name,
layout)
def add_layout_config(layout_cfg):
if isinstance(layout_cfg, (str, unicode)):
layout_cfg = json.load(open(layout_cfg))
root_layouts = layout_cfg.get(ROOT_LAYOUT)
widget_layouts = layout_cfg.get(WIDGET_LAYOUT)
if root_layouts is not None:
for layout_name, layout in iteritems(root_layouts):
add_root_layout(layout_name,
layout)
if widget_layouts is not None:
for layout_name, layout in iteritems(widget_layouts):
add_widget_layout(layout_name,
layout)
class DynamicFrame(BaseFrame):
# Add the available frame classes
FRAME_CLASSES = {
u'DynamicWidgetFrame': DynamicWidgetFrame,
u'DynamicScrollFrame': DynamicScrollFrame
}
def __init__(self,
layout_key,
item_dict=None,
item_list=None,
selected=None,
*args,
**kwargs):
self.key = layout_key
self.item_dict_name = u''
self.item_dict = item_dict if item_dict is not None else {}
self.item_list = item_list if item_list is not None else []
self.selected = self.string_var(value=u'' if selected is None else selected)
self.default = self.string_var()
self.cfg = Configuration()
# Register configuration
self.cfg.register(config=LAYOUT_CFG,
config_type=cfg_params.CONST.json,
template=TEMPLATE,
schema=schema.ui_layout)
self.frames = {}
self.variables = {}
self.layout = self.cfg[self.key]
BaseFrame.__init__(self,
padx=0,
pady=0,
*args,
**kwargs)
self.init_variables()
self.before_draw()
self.draw_frames()
def init_variables(self):
for var_config in self.layout.get(CONFIG_VARS, []):
var = handle_variable(frame=self,
var_config=var_config)
self.variables[var.name] = var
def draw_frames(self):
for frame_name, frame_config in iteritems(self.layout.get(CONFIG_FRAMES, {})):
self.draw_frame(frame_name, frame_config)
# Configure columns that are allowed to expand
for column in self.layout.get(EXPANDING_COLUMNS, []):
self.columnconfigure(column, weight=1)
# Configure rows that are allowed to expand
for row in self.layout.get(EXPANDING_ROWS, []):
self.rowconfigure(row, weight=1)
def draw_frame(self,
name,
config):
frame_class = self.FRAME_CLASSES[config[CONFIG_CLASS]]
frame_kwargs = {
u'parent': self,
u'key': config[CONFIG_KEY],
u'row': config[CONFIG_ROW],
u'column': config[CONFIG_COLUMN]
}
# If we are using a scroll frame check on scrollbar specific params
# TODO: We should convert this to kwargs at some point.
if config[CONFIG_CLASS] == u'DynamicScrollFrame':
scroll_v = config.get(CONFIG_VERTICAL)
scroll_h = config.get(CONFIG_HORIZONTAL)
if scroll_v is not None:
frame_kwargs[u'vbar'] = scroll_v
if scroll_h is not None:
frame_kwargs[u'hbar'] = scroll_h
frame = frame_class(**frame_kwargs)
sticky = config.get(CONFIG_STICKY)
if sticky is not None:
frame.grid(sticky=sticky)
self.frames[name] = frame
def refresh(self):
logging.debug(u'REFRESH FRAMES')
# Destroy existing frames
for name, frame in iteritems(self.frames):
frame.destroy()
# Reset self.frames
self.frames = {}
# Re-draw
self.init_variables()
self.before_draw()
self.draw_frames()
# re-size window
self.parent.parent.update_geometry()
def update_layout(self,
layout):
# Change the DynamicFrame layout to use the add/edit layout
self.layout = self.cfg[layout]
# Remove variables specific to this layout
for var_name in self.variables.keys():
if hasattr(self, var_name):
delattr(self, var_name)
# Call refresh to redraw with new layout
self.refresh()
def return_to_root_layout(self):
self.selected.set(u'')
self.default.set(u'')
self.item_dict_name = u''
self.item_dict = {}
self.item_list = []
self.update_layout(self.key)
def close(self):
self.parent.parent.destroy()
def before_draw(self):
""" Override this to run any extra steps before UI is drawn """
|
[
"avim2809@gmail.com"
] |
avim2809@gmail.com
|
97fa9c0f4789f947ef64dd6ce8e5760526b25b7e
|
e3cf0d1f7ee2f3320f96b4ff5567e11b4155814f
|
/blog_nsi/urls.py
|
fe20079bb2cb7ff858d918813f2514645aa57226
|
[] |
no_license
|
noumou2019/projets_nsi
|
6ef21e634126411a61ccdddd3fd436ea60c65774
|
e9c816d7fc8f8847c93d9d1cad6f40ed0eae0935
|
refs/heads/master
| 2023-07-29T13:42:26.057217
| 2020-04-24T20:37:25
| 2020-04-24T20:37:25
| 256,851,995
| 0
| 0
| null | 2021-09-22T18:54:14
| 2020-04-18T20:58:56
|
Python
|
UTF-8
|
Python
| false
| false
| 318
|
py
|
from . import views
from django.urls import path
from django.conf.urls import url
urlpatterns = [
# path('', views.PostList.as_view(), name='home'),
path('', views.post),
url(r'^post/(?P<id>[0-9]+)$',views.show_post),
#path('<slug:slug>/', views.PostDetail.as_view(), name='post_detail'),
]
|
[
"noumou diakhate"
] |
noumou diakhate
|
013e3a057b78e9d567beb91e013b38d35609ed24
|
ba6cf7dc7403d475121460684a01e7aebeda38b1
|
/ag_Cog_Testing_Pred/ag_Cog_Testing_Pred/q_scripts/TL_DRM_IDA_Traget_restAPI_test.py
|
4f49f9e857c5405f50bcf55b8121f67e8036cb42
|
[] |
no_license
|
ilanjaya/predictive-defect-management-1479716573136
|
a1d53a6297c52af14c4c429572e4cd4f79764808
|
1bf9f5a6b59041f771dbbe22cbb85358d77c844b
|
refs/heads/master
| 2023-07-09T11:23:18.159748
| 2023-06-27T11:56:35
| 2023-06-27T11:56:35
| 74,347,471
| 0
| 0
| null | 2016-11-22T07:35:56
| 2016-11-21T09:30:25
|
Java
|
UTF-8
|
Python
| false
| false
| 5,612
|
py
|
# -*- coding: utf-8 -*-
"""
@authors: Cognitive Development Team (Nilesh, Mandar, Abhishek, Gomathy, Rahul, Anil)
"""
from __future__ import print_function
import sys
import os
import requests
import time
# User Inputs -----------------------------------------------------------------
path_base = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
# Set Base Path----------------------------------------------------------------
os.chdir(path_base)
sys.path.append(path_base)
from q_scripts.a_class_func_dir import DirStructure
# Read Config File: -----------------------------------------------------------
fld = DirStructure('config.ini')
def test():
# http://169.47.19.170:5004/CI_DRM_IDC_Target_noTP?title=new path order got kicked error code 999&problem_description=steps reproduce 1 initiate order new path 2 add wl configure 3 add dsl telco company fibe internet 15 10 configure offer offer nc targeted 9 m1 12 52 95 4 add telco company&resolution_description=there is not resolution for this test
orig_url = "http://169.47.19.170:6000/CI_DRM_IDC_Target_noTP"
r = requests.post(orig_url,
data={'title': 'new path order got kicked error code 999',
'problem_description': 'steps reproduce 1 initiate order new path 2 add wl configure 3 add dsl telco company fibe internet 15 10 configure offer offer nc targeted 9 m1 12 52 95 4 add telco company',
'resolution_description': 'there is not resolution for this test'})
print(r.status_code, r.reason)
print(r.text[:300] + '...')
r = requests.post("http://169.47.19.170:6000/CI_DRM_IDC_Target_noTP",
data={'title': '',
'problem_description': '',
'resolution_description': ''})
print(r.status_code, r.reason)
print(r.text[:300] + '...')
r = requests.post("http://169.47.19.170:6000/CI_DRM_IDC_Target_noTP",
data={'title': 'and',
'problem_description': 'the',
'resolution_description': 'is'})
print(r.status_code, r.reason)
print(r.text[:300] + '...')
params = {}
params['title'] = "the "
params['problem_description'] = "and it is the important"
params['resolution_description'] = "and it is the important "
r = requests.post(orig_url, data=params)
print (r.text)
def timing(f):
def wrap(*args):
time1 = time.time()
ret = f(*args)
time2 = time.time()
print('%s function took %0.3f ms' % (f.__name__, (time2-time1)*1000.0))
return ret
return wrap
@timing
def test_timings():
orig_url = "http://169.47.19.170:6000/CI_DRM_IDC_Target_noTP"
params = {}
params['title'] = "Functional Requirement ID is missing"
params['problem_description'] = "The last Functional Requirement List entry does not have a Functional Requirement ID."
params['resolution_description'] = "Functional requirement ID is provided"
r = requests.post(orig_url, data=params)
print (r.text)
def test_local():
orig_url = "http://localhost:6000/CI_DRM_IDC_Target_noTP"
params = {}
params['title'] = "IESA_DEV_Renewals_Unable to save/transact the quote"
params['problem_description'] = """Renewals: Unable to save the quote or renew in devbeta4
We are unable to do renew/save quote. Also, unable to recalculate price and export to excel by unselecting some of the licenses.
Steps to reproduce:
1) Open devbeta4
Url:http://devbeta4.citrite.net/MyCitrix/EmployeeMyCitrix/Frameset.aspx
2)Click on Samri and login.
3)Go to quote work sheet page
4)Click on renew or save quote
Test data:
TestData:
Login: gplachn987
Customer Information
Org ID: 45441738
Please find the attachment for more details
Navy/HP Inc"""
params['resolution_description'] = ""
r = requests.post(orig_url, data=params)
print (r.text) # Data #-- failed
params['title'] = "Opp2Create: The Opp2Create Process is failing to create opportunities"
params['problem_description'] = """Issue: Error Description: System.ServiceModel.Security.MessageSecurityException: The HTTP request is
unauthorized with client authentication scheme 'Anonymous'. The authentication header received from the server
was ''. ---> System.Net.WebException: The remote server returned an error: (401) Unauthorized.
Tried for APAC, EMEA, Americas(NA) Customers.
Please see the attachment."""
params['resolution_description'] = ""
r = requests.post(orig_url, data=params)
print (r.text) # Environment #-- pass
#%% Start the port
if __name__ == "__main__":
if True:
test()
test_timings()
else:
test_local()
|
[
"noreply@github.com"
] |
ilanjaya.noreply@github.com
|
834213ba2753b49b7973cec9cd548f30bae2ec2a
|
323cd948429b9e9e6b0e287be9e352cb4c78eebd
|
/MyCareer/mycareers/migrations/0014_auto_20190626_1718.py
|
d7e7673cd29c0d57445782ca9fe95c0f2a331a94
|
[] |
no_license
|
Jiwon0801/MyCareer
|
2267aebc6295ae1ae68f2441da7c9b45a034e135
|
dabed70b885a4147d283f5aafa75ff29b1cb39ef
|
refs/heads/master
| 2022-11-16T23:39:34.631636
| 2019-06-27T05:33:40
| 2019-06-27T05:33:40
| 194,028,875
| 0
| 1
| null | 2022-11-03T16:24:10
| 2019-06-27T05:32:49
|
Python
|
UTF-8
|
Python
| false
| false
| 521
|
py
|
# Generated by Django 2.2.2 on 2019-06-26 17:18
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mycareers', '0013_auto_20190626_1655'),
]
operations = [
migrations.RenameField(
model_name='tb_project',
old_name='project_start',
new_name='start',
),
migrations.RenameField(
model_name='tb_reward',
old_name='reward_start',
new_name='start',
),
]
|
[
"ssintico88@naver.com"
] |
ssintico88@naver.com
|
897d586112f0ec222933a592cf1333bb33fa2f21
|
3615a5c3ef1a534630c07cdc128f258f505bbfdb
|
/CONNECT/settings.py
|
4a486906b9064b30564a19fa8b5a2f4e634b6200
|
[] |
no_license
|
ashwinmendhe/Django-CONNECT
|
9310736c2092b6cf0e470f76e139f59bbb6a59a3
|
619c3f5817b66320bec9d486b899ca7481f70a3c
|
refs/heads/master
| 2023-08-21T10:31:24.314729
| 2021-10-28T05:38:30
| 2021-10-28T05:38:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,200
|
py
|
"""
Django settings for CONNECT project.
Generated by 'django-admin startproject' using Django 3.2.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-u)__h6*1#2m8xt)41*c(y0_^h0u@%^!61&udt6ul(hlo649qyv'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['connect-1106.herokuapp.com', '127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'social',
'crispy_forms',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'CONNECT.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'CONNECT.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'database name',
'USER':'database user',
'PASSWORD':'database password',
'HOST':'database endpoint',
'PORT':'database port',
}
}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql',
# 'NAME': 'CONNECT',
# 'USER':'postgres',
# 'PASSWORD':'1234',
# 'HOST':'localhost',
# 'PORT':'5432',
# }
# }
import dj_database_url
db_from_env = dj_database_url.config(conn_max_age=600)
DATABASES['default'].update(db_from_env)
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
PROJECT_ROOT = os.path.realpath(os.path.dirname(__file__))
MEDIA_ROOT = PROJECT_ROOT + '/static/'
MEDIA_URL = '/media/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"ashwin.mendhe11@gmail.com"
] |
ashwin.mendhe11@gmail.com
|
724b4ac56c91b6848db180ec241a9630281cc4cf
|
d1276973203b51a7891d54c061d268dcce5686a8
|
/django-udemy/src/blog/migrations/0005_comment.py
|
a55c2b7f563d5df1d302ec02c05720af1a8a8db5
|
[] |
no_license
|
srinumadhavv/django_restaurant
|
f576ef2c52cebaebff9155d6f1d33dba0e66ca61
|
39087916cdb804d30fca3197936d3902f5d00a14
|
refs/heads/master
| 2021-05-21T20:41:05.184948
| 2020-04-03T17:25:48
| 2020-04-03T17:25:48
| 252,791,832
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 965
|
py
|
# Generated by Django 3.0.4 on 2020-04-01 17:26
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('blog', '0004_post_tags'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField()),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Post')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"srinumadhavv@gmail.com"
] |
srinumadhavv@gmail.com
|
8a6ae4b1914dbad07b3f5757c33c8c2f26c55fe0
|
418f984fd7adc3f0c167b7b306cc5d7752149883
|
/LearnScrapy/books.toscrape.com/settings.py
|
4868c7fafbaefb54d503879a76561ae65a9c6c14
|
[] |
no_license
|
shuihu001/Python3-Spiders
|
1c056fa6451973cac40651a23d8bce5d546f83a0
|
5e425010450cee73f3f45b0b5d4dffa51393adb3
|
refs/heads/master
| 2020-03-09T12:45:44.374079
| 2018-03-24T06:33:23
| 2018-03-24T06:33:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,149
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for ToscrapeBooks project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'ToscrapeBooks'
SPIDER_MODULES = ['ToscrapeBooks.spiders']
NEWSPIDER_MODULE = 'ToscrapeBooks.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'ToscrapeBooks (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'ToscrapeBooks.middlewares.ToscrapebooksSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'ToscrapeBooks.middlewares.ToscrapebooksDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'ToscrapeBooks.pipelines.ToscrapebooksPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"noreply@github.com"
] |
shuihu001.noreply@github.com
|
cd1aea6d076c2a8cb9f8340f9ffca0fa4ba1c63b
|
db5aac17b476308e291fc8ca6dc642d6a5ecb105
|
/gui/bin/wheel
|
2aa36ddfa2123ed46a53557291c9f4fb95cad95c
|
[] |
no_license
|
maknetwork/windows_gui
|
2f93c6a3b5ff8a1581e1e3852fcb117835cf3cc0
|
6318371db12fd9aaa25a5a579e700f4e1a1c1a66
|
refs/heads/master
| 2023-08-14T23:54:13.254629
| 2020-04-18T10:18:20
| 2020-04-18T10:18:20
| 256,722,790
| 0
| 0
| null | 2023-07-23T12:22:47
| 2020-04-18T10:15:37
|
Python
|
UTF-8
|
Python
| false
| false
| 229
|
#!/home/arif/aarzeon/gui/gui/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"maknetwork"
] |
maknetwork
|
|
79eeaa901ce0d9bd1a86ba03f05e476efff338c2
|
58ca1aedfd2c2c43ce3f71e7877f92c51d41adf8
|
/confusion_matrix.py
|
a98fcffcb0e3224af90edcaa6902019e8e7cc655
|
[] |
no_license
|
seaun163/DeepSLAM
|
00d88ee00367987cb4b7a57db3b0bedafeeb4e68
|
a038772bd7de897fb8253214813bfab09e31d62f
|
refs/heads/master
| 2021-01-25T08:19:28.198277
| 2016-10-18T19:11:32
| 2016-10-18T19:11:32
| 93,752,917
| 1
| 0
| null | 2017-06-08T13:32:24
| 2017-06-08T13:32:24
| null |
UTF-8
|
Python
| false
| false
| 15,726
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import scipy.io as sio
import cPickle as pickle
import math
import h5py
import getpass
import sys
import overfeat
from scipy.ndimage import imread
from scipy.misc import imresize
from copy import deepcopy
# Smush the 5 images together into one, otherwise treat them separately
smush = True
# Create the full confusion matrix, including sections not needed
full = True
# Whether or not the images have a colour channel
colour = False
# The type of pre-trained deep network to get the features from
net_type = 'GoogLeNet'
#net_type = 'AlexNet'
#net_type = 'CaffeNet'
#net_type = 'OverFeat'
#anet_type = 'Cifar10'
#net_type = 'Cifar10Full'
#net_type = 'Cifar10SoftLIF'
net_type = 'VGG16'
net_type = 'VGG19'
# Check the username, so the same code can work on all of our computers
user = getpass.getuser()
if user == 'ctnuser':
caffe_root = '/home/ctnuser/saubin/src/caffe/'
overfeat_root = '/home/ctnuser/saubin/src/OverFeat/'
path_prefix = '/home/ctnuser/saubin/src/datasets/DatasetEynsham/Images/'
elif user == 'bjkomer':
caffe_root = '/home/bjkomer/caffe/'
overfeat_root = '/home/bjkomer/OverFeat/'
path_prefix = '/home/bjkomer/deep_learning/datasets/DatasetEynsham/Images/'
elif user == 'saubin': #TODO: put in Sean's actual path, I just guessed for now
caffe_root = '/home/saubin/src/caffe/'
overfeat_root = '/home/saubin/src/OverFeat/'
path_prefix = '/home/saubin/src/datasets/DatasetEynsham/Images/'
else:
caffe_root = '/home/ctnuser/saubin/src/caffe/'
overfeat_root = '/home/ctnuser/saubin/src/OverFeat/'
path_prefix = '/home/ctnuser/saubin/src/datasets/DatasetEynsham/Images/'
sys.path.insert(0, caffe_root + 'python')
import caffe
# Open an IPython session if an exception is found
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose', color_scheme='Linux', call_pdb=1)
# Stuff for optional plotting
plt.rcParams['figure.figsize'] = (10, 10)
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# take an array of shape (n, height, width) or (n, height, width, channels)
# and visualize each (height, width) thing in a grid of size approx. sqrt(n) by sqrt(n)
def vis_square(data, padsize=1, padval=0):
data -= data.min()
data /= data.max()
# force the number of filters to be square
n = int(np.ceil(np.sqrt(data.shape[0])))
padding = ((0, n ** 2 - data.shape[0]), (0, padsize), (0, padsize)) + ((0, 0),) * (data.ndim - 3)
data = np.pad(data, padding, mode='constant', constant_values=(padval, padval))
# tile the filters into an image
data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))
data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:])
plt.imshow(data)
plt.figure()
#plt.show()
def smush_images(im_list):
return np.concatenate( map(lambda x: caffe.io.load_image(path_prefix + x), im_list) )
def process_overfeat_image(image):
# resize and crop into a 231x231 image
h0 = image.shape[0]
w0 = image.shape[1]
d0 = float(min(h0, w0))
# TODO: make this less hacky and more legit (if possible)
if not colour:
# Copy the monochrome image to all three channels to make OverFeat happy
image = image.reshape(h0,w0,1)
image = np.concatenate([image, image, image], axis=2)
image = image[int(round((h0-d0)/2.)):int(round((h0-d0)/2.)+d0),
int(round((w0-d0)/2.)):int(round((w0-d0)/2.)+d0), :]
image = imresize(image, (231, 231)).astype(np.float32)
# numpy loads image with colors as last dimension, transpose tensor
h = image.shape[0]
w = image.shape[1]
c = image.shape[2]
image = image.reshape(w*h, c)
image = image.transpose()
image = image.reshape(c, h, w)
return image
def load_overfeat_image(im):
# read image
return process_overfeat_image(imread(path_prefix + im))
def smush_overfeat_images(im_list):
return process_overfeat_image(np.concatenate( map(lambda x:
imread(path_prefix + x),
im_list) ))
index_mat = sio.loadmat(path_prefix + 'IndexToFilename.mat')['IndexToFilename'][0]
# MATLAB code uses 4789 as the split point, and this seems to match the data beter
# The dataset itself claims 4804 is the split point, but this looks to be incorrect
if full:
training_start_index = 0
training_end_index = len(index_mat)
testing_start_index = 0
testing_end_index = len(index_mat)
else:
training_start_index = 0
training_end_index = 4789 #4804
testing_start_index = 4789 #4804
testing_end_index = len(index_mat)
training_images = []
testing_images = []
if smush:
# TODO: make sure concatenation is along the correct axis
for i in range(training_start_index, training_end_index):
training_images.append([ index_mat[i][0,0][0],
index_mat[i][0,1][0],
index_mat[i][0,2][0],
index_mat[i][0,3][0],
index_mat[i][0,4][0],
])
for i in range(testing_start_index, testing_end_index):
testing_images.append([ index_mat[i][0,0][0],
index_mat[i][0,1][0],
index_mat[i][0,2][0],
index_mat[i][0,3][0],
index_mat[i][0,4][0],
])
else:
for i in range(training_start_index, training_end_index):
for j in range(5):
training_images.append(index_mat[i][0,j][0])
for i in range(testing_start_index, testing_end_index):
for j in range(5):
testing_images.append(index_mat[i][0,j][0])
# TODO: use something better than a list
training_features = []
# OverFeat does not use caffe
if net_type == 'OverFeat':
# OverFeat has 22 layers, including original image
num_layers = 22
# For filename purposes
layer = 'all'
layer = 10
if layer == 'all':
# Put all layers into one stacked confusion matrix
confusion_matrix = np.zeros((num_layers, len(training_images), len(testing_images)))
else:
# Make the confusion matrix for a single layer
confusion_matrix = np.zeros((len(training_images), len(testing_images)))
overfeat.init(overfeat_root + 'data/default/net_weight_0', 0)
for i in range(len(training_images)):
print("Training Image %s of %s" % (i, len(training_images)))
if smush:
image = smush_overfeat_images(training_images[i])
else:
image = load_overfeat_image(training_images[i])
b = overfeat.fprop(image)
if layer == 'all':
# Calculate features for all layers at once
features = []
for n in range(num_layers):
features.append(deepcopy(overfeat.get_output(n)))
training_features.append(features)
else:
training_features.append(deepcopy(overfeat.get_output(layer)))
for i in range(len(testing_images)):
print("Testing Image %s of %s" % (i, len(testing_images)))
if smush:
image = smush_overfeat_images(testing_images[i])
else:
image = load_overfeat_image(testing_images[i])
b = overfeat.fprop(image)
for j in range(len(training_images)):
if layer == 'all':
for n in range(num_layers):
feat = overfeat.get_output(n)
confusion_matrix[n,j,i] = np.linalg.norm(feat - training_features[j][n])
else:
feat = overfeat.get_output(layer)
confusion_matrix[j,i] = np.linalg.norm(feat - training_features[j])
# Convert to string in case it is a layer number, for use in the filename
layer = str(layer)
# Use caffe for all other models
else:
confusion_matrix = np.zeros((len(training_images), len(testing_images)))
if user == 'ctnuser':
caffe.set_mode_gpu()
else:
caffe.set_mode_cpu()
if net_type == 'GoogLeNet':
net = caffe.Net(caffe_root + 'models/bvlc_googlenet/deploy.prototxt',
caffe_root + 'models/bvlc_googlenet/bvlc_googlenet.caffemodel',
caffe.TEST)
elif net_type == 'CaffeNet':
net = caffe.Net(caffe_root + 'models/bvlc_reference_caffenet/deploy.prototxt',
caffe_root + 'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel',
caffe.TEST)
elif net_type == 'AlexNet':
net = caffe.Net(caffe_root + 'models/bvlc_alexnet/deploy.prototxt',
caffe_root + 'models/bvlc_alexnet/bvlc_alexnet.caffemodel',
caffe.TEST)
elif net_type == 'Cifar10':
net = caffe.Net(caffe_root + 'examples/cifar10/cifar10_quick.prototxt',
caffe_root + 'examples/cifar10/cifar10_quick_iter_5000.caffemodel.h5',
caffe.TEST)
elif net_type == 'Cifar10Full':
net = caffe.Net(caffe_root + 'examples/cifar10/cifar10_full.prototxt',
caffe_root + 'examples/cifar10/cifar10_full_iter_70000.caffemodel.h5',
caffe.TEST)
elif net_type == 'Cifar10SoftLIF':
net = caffe.Net(caffe_root + 'examples/cifar10/cifar10_quick_softlif.prototxt',
caffe_root + 'examples/cifar10/cifar10_quick_softlif_iter_5000.caffemodel.h5',
caffe.TEST)
elif net_type == 'Cifar10FullSoftLIF':
net = caffe.Net(caffe_root + 'examples/cifar10/cifar10_full_softlif.prototxt',
caffe_root + 'examples/cifar10/cifar10_full_softlif_iter_70000.caffemodel.h5',
caffe.TEST)
elif net_type == 'VGG16':
net = caffe.Net(caffe_root + 'models/vgg/VGG_ILSVRC_16_layers_deploy.prototxt',
caffe_root + 'models/vgg/VGG_ILSVRC_16_layers.caffemodel',
caffe.TEST)
elif net_type == 'VGG19':
net = caffe.Net(caffe_root + 'models/vgg/VGG_ILSVRC_19_layers_deploy.prototxt',
caffe_root + 'models/vgg/VGG_ILSVRC_19_layers.caffemodel',
caffe.TEST)
# input preprocessing: 'data' is the name of the input blob == net.inputs[0]
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_transpose('data', (2,0,1))
transformer.set_mean('data', np.load(caffe_root + 'python/caffe/imagenet/ilsvrc_2012_mean.npy').mean(1).mean(1)) # mean pixel
transformer.set_raw_scale('data', 255) # the reference model operates on images in [0,255] range instead of [0,1]
transformer.set_channel_swap('data', (2,1,0)) # the reference model has channels in BGR order instead of RGB
# AlexNet can do a batch_size of 50
# GoogLeNet needs a smaller batch_size, 10 works
# They also have different names for each layer
if net_type == 'GoogLeNet':
batch_size = 10
#layer = 'inception_3a/output'
layer = 'inception_3b/output'
#layer = 'inception_4a/output'
layer = 'inception_4b/output'
layer = 'inception_4c/output'
layer = 'inception_4d/output'
layer = 'inception_4e/output'
#layer = 'inception_5a/output'
layer = 'inception_5b/output'
layer = 'prob'
net.blobs['data'].reshape(batch_size,3,224,224) # GoogLeNet uses 224x224
elif 'VGG' in net_type:
batch_size = 10
layer = 'conv4_4'
net.blobs['data'].reshape(batch_size,3,224,224) # VGG uses 224x224
elif net_type == 'AlexNet' or net_type == 'CaffeNet':
batch_size = 50
layer = 'conv3'
net.blobs['data'].reshape(batch_size,3,227,227) # AlexNet uses 227*227
if 'Cifar10' in net_type:
batch_size = 10
layer = 'conv1'
net.blobs['data'].reshape(batch_size,3,32,32) # Cifar10Net uses 32x32
# Get all the features for the training images
for batch in range(int(len(training_images) / batch_size)):
if smush:
net.blobs['data'].data[...] = map(lambda x: transformer.preprocess('data',
smush_images(x)),
training_images[batch*batch_size:(batch+1)*batch_size])
else:
net.blobs['data'].data[...] = map(lambda x: transformer.preprocess('data',
caffe.io.load_image(path_prefix + x)),
training_images[batch*batch_size:(batch+1)*batch_size])
out = net.forward()
print("Training Batch %i of %i" % (batch, int(len(training_images) / batch_size)))
for bi in range(batch_size):
feat = net.blobs[layer].data[bi]
#vis_square(feat, padval=0.5)
training_features.append(deepcopy(feat))
# Run the last partial batch if needed
extra = len(training_images) % batch_size
if extra != 0:
if smush:
net.blobs['data'].data[:extra,...] = map(lambda x: transformer.preprocess('data',
smush_images(x)),
training_images[-extra:])
else:
net.blobs['data'].data[:extra,...] = map(lambda x: transformer.preprocess('data',
caffe.io.load_image(path_prefix + x)),
training_images[-extra:])
out = net.forward()
print("Training Overflow Batch")
for bi in range(extra):
feat = net.blobs[layer].data[bi]
training_features.append(deepcopy(feat))
j = 0
for batch in range(int(len(testing_images) / batch_size)):
if smush:
net.blobs['data'].data[...] = map(lambda x: transformer.preprocess('data',
smush_images(x)),
testing_images[batch*batch_size:(batch+1)*batch_size])
else:
net.blobs['data'].data[...] = map(lambda x: transformer.preprocess('data',
caffe.io.load_image(path_prefix + x)),
testing_images[batch*batch_size:(batch+1)*batch_size])
out = net.forward()
print("Testing Batch %i of %i" % (batch, int(len(testing_images) / batch_size)))
for bi in range(batch_size):
feat = net.blobs[layer].data[bi]
for i in range(len(training_images)):
confusion_matrix[i,j] = np.linalg.norm(feat - training_features[i])
j += 1
# Run the last partial batch if needed
extra = len(testing_images) % batch_size
if extra != 0:
if smush:
net.blobs['data'].data[:extra,...] = map(lambda x: transformer.preprocess('data',
smush_images(x)),
testing_images[-extra:])
else:
net.blobs['data'].data[:extra,...] = map(lambda x: transformer.preprocess('data',
caffe.io.load_image(path_prefix + x)),
testing_images[-extra:])
out = net.forward()
print("Testing Overflow Batch")
for bi in range(extra):
feat = net.blobs[layer].data[bi]
for i in range(len(training_images)):
confusion_matrix[i,j] = np.linalg.norm(feat - training_features[i])
j += 1
# Remove any slashes from layer name
layer = layer.replace('/','-')
# Optional plotting of features
#for i in range(len(training_images)):
# vis_square(training_features[i], padval=0.5)
#plt.show()
print( confusion_matrix )
# Construct file name
fname = 'conf_mat'
if smush:
fname += '_smush'
if full:
fname += '_full'
fname += '_' + net_type.lower() + '_' + layer + '.h5'
# Save to HDF5 format
print( "Saving Confusion Matrix for %s to HDF5 File..." % layer )
h5f = h5py.File(fname, 'w')
h5f.create_dataset('dataset', data=confusion_matrix)
h5f.close()
print( "Saving Complete!" )
|
[
"brent.komer@gmail.com"
] |
brent.komer@gmail.com
|
b9a7c26b69d0c4bee348283b34b00c53ac903ea8
|
c157dc447672f47f2e4aef2291c25beea6d71cf0
|
/geGL/scripts/subscripts/glExtensionFilter.py
|
f6b45412710eee580962cd33b8e7885e56887db1
|
[] |
no_license
|
Rendering-FIT/GPUEngine
|
a1b38299adb9ee972a3b0011ad3bfb31b9da9fab
|
a5f486d3dfdc7c4430d90cb6cf0ccdba6da37844
|
refs/heads/master
| 2022-04-29T17:50:56.736207
| 2022-04-29T09:59:10
| 2022-04-29T10:09:39
| 81,936,720
| 11
| 8
| null | 2019-10-16T07:15:04
| 2017-02-14T11:04:27
|
C++
|
UTF-8
|
Python
| false
| false
| 492
|
py
|
#!/usr/bin/python
import sys
import re
import os
import fileinput
from subprocess import Popen, PIPE
data0=""
for line in fileinput.input():
data0+=line
#ext = re.compile(r"AMD|NV|ATI|IBM|HP|EXT|ARB|OES|SUN|SGI|MESA|INTEL|APPLE|3DFX|GREMEDY|OVR|PGI|INGR|KHR|\[")
ext = re.compile(r"AMD|NV|ATI|IBM|HP|ARB|OES|SUN|SGI|MESA|INTEL|APPLE|3DFX|GREMEDY|OVR|PGI|INGR|KHR|\[")
for i in data0.split("\n"):
if ext.findall(i)!=[]:
continue
if i=="":
continue
print i
|
[
"imilet@fit.vutbr.cz"
] |
imilet@fit.vutbr.cz
|
51a76d49337dfbcacbf3e19f4ad22b413a2df96f
|
c68f4ef6b038d54489593efdbc5829793d9bc620
|
/official/urls.py
|
bae2e51e7f7ae08e6a372850ae95bc9c4881a99e
|
[] |
no_license
|
sivanZhang/official
|
8aec0e0e56421c098847a66813943fc162bfff04
|
255c3baa04a1920d46607434819d0dd8899c4f1c
|
refs/heads/master
| 2021-09-16T00:15:25.199375
| 2018-06-13T14:27:54
| 2018-06-13T14:27:54
| 139,970,003
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,284
|
py
|
"""official URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
from official import views
from official import en_views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^product/', include('product.urls', namespace="product")),
url(r'^pic/', include('piclab.urls', namespace="piclab")),
url(r'^$', views.home, name='home'),
url(r'^users/', include('appuser.urls', namespace="users")),
url(r'^ckeditor/', include('ckeditor_uploader.urls')),
url(r'^category/', include('category.urls', namespace="category")),
url(r'^sitecontent/', include('sitecontent.urls', namespace="sitecontent")),
url(r'^page/', include('page.urls', namespace="page")),
url(r'^book/', include('book.urls', namespace="book")),
url(r'^bussiness/', include('bussiness.urls', namespace="bussiness")),
url(r'^area/', include('area.urls', namespace="area")),
url(r'^dept/', include('dept.urls', namespace="dept")),
url(r'^subscribe/', include('subscribe.urls', namespace="subscribe")),
url(r'^$', views.home, name='home'),
url(r'^en/home$', en_views.home, name='home'),
url(r'^en/watch$', en_views.watch, name='watch'),
url(r'^en/aboutus$', en_views.aboutus, name='aboutus'),
url(r'^en/contactus$', en_views.contactus, name='contactus'),
url(r'^en/accessories$', en_views.accessories, name='accessories'),
url(r'^en/parameters$', en_views.parameters, name='parameters'),
url(r"^pay/", include('pay.urls', namespace="pay")),
]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"461882709@qq.com"
] |
461882709@qq.com
|
b45ff9e709d93a0c576a76a3dfe00c1a545240a4
|
c74e1e14535b6343a27ddbd9fe130a327ba75716
|
/shop/migrations/0005_auto_20200503_0226.py
|
1d6c6a34b6964241ac006bb5efa30cd12507c825
|
[] |
no_license
|
parmaryash49/Django-Simple-Ecommerce-Site
|
7736bbbf331df0c80bf1029ee3496f8e577b7bdd
|
cd0bf17fb70e35ecbf7db05d637fd80b77244889
|
refs/heads/master
| 2022-12-17T23:56:42.908596
| 2020-05-31T19:35:40
| 2020-05-31T19:35:40
| 299,948,894
| 0
| 0
| null | 2020-09-30T14:39:34
| 2020-09-30T14:36:51
| null |
UTF-8
|
Python
| false
| false
| 526
|
py
|
# Generated by Django 3.0.5 on 2020-05-02 20:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0004_orders'),
]
operations = [
migrations.RenameField(
model_name='orders',
old_name='item_json',
new_name='items_Json',
),
migrations.AddField(
model_name='orders',
name='phone',
field=models.CharField(default='', max_length=111),
),
]
|
[
"chintakd999@gmail.com"
] |
chintakd999@gmail.com
|
4a3aa4ea3c917d43b650b80ecfdaa059e4256042
|
003b6b60d7b89779a304ac05612a83364f01bbe4
|
/TrialOne/EComWeb/website/migrations/0011_remove_order_start_date.py
|
5d15e3e40014a0711e4db2bb5764ddbc9ab7fe8b
|
[] |
no_license
|
Shrenik99/DBMS_Python_project
|
7d8ab2f04e31054139f7eb8948dad37dc3aa891b
|
ed26f1bfadc42252e0a43b1efb91a1b476344aaf
|
refs/heads/main
| 2023-03-27T04:35:48.273102
| 2021-03-21T11:10:58
| 2021-03-21T11:10:58
| 350,084,267
| 2
| 0
| null | 2021-03-21T18:27:24
| 2021-03-21T18:27:24
| null |
UTF-8
|
Python
| false
| false
| 330
|
py
|
# Generated by Django 3.1.5 on 2021-03-17 11:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('website', '0010_orderitem_ordered'),
]
operations = [
migrations.RemoveField(
model_name='order',
name='start_date',
),
]
|
[
"arya21kapoor@gmail.com"
] |
arya21kapoor@gmail.com
|
544248a2f00936d0a98efc4feaa3d18b67957fb1
|
eb720d10b398f08a08770599c091f483c8504fc3
|
/src/state_definitions.py
|
e81b1bcb9043a067d1f3cc9692a3d086123429a1
|
[] |
no_license
|
colepeterson4/Wall-Follower
|
deeb90e5d773f6de396b6b81ddd895c9dbaf2de9
|
3c5ed49428e69a5a8612e83f9d14b3142ca54c12
|
refs/heads/master
| 2023-03-13T05:08:38.481887
| 2021-03-05T04:07:49
| 2021-03-05T04:07:49
| 344,686,827
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 372
|
py
|
#Here is where you can define all of your states in a clever way to make your code more readable.
#Think of all the states your robot could be in if thrown randomly into a world.
#Currently the code is set up for a variable name being equal to an integer like:
#FOLLOW_LEFT = 1
#FOLLOW_RIGHT = 2
#TURN_LEFT = 3
#TURN_RIGHT = 4
#WANDERING = 5
#for each of your states.
|
[
"colepeterson4@gmail.com"
] |
colepeterson4@gmail.com
|
d6e187a43b57948110fc80cd494a67623b5d163b
|
816ef1dccf6212505fe4eb9579f2e121010a3c8e
|
/mysite/settings.py
|
8692deeaaf1b2f369ddc2dfb878059c0a8741afc
|
[] |
no_license
|
ginamb/my-first-blog1
|
fd82c66e1340d000c52bb665ed2de6ded6346d72
|
606e2c1cc072b82552e298c77c4b23a69ab9055b
|
refs/heads/master
| 2020-12-30T13:40:00.175766
| 2017-06-13T17:12:42
| 2017-06-13T17:12:42
| 91,241,139
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,200
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.10.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3_kj#(kt3j8e5a!r8afe72x-!w%5a@!s7ffox51@%t%d*+=cwh'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Vilnius'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
|
[
"ambrozaityte.g@gmail.com"
] |
ambrozaityte.g@gmail.com
|
c263e9bdb7f6f66dd4cc071975d148c40f35b7c8
|
5436ef13bec2476f5abe9d8f1f586a90d87119d6
|
/wsgi_adapter/request.py
|
9ef2791a22d337b0dea5ec33b9f16606eb322561
|
[
"MIT"
] |
permissive
|
carltongibson/azure-functions-wsgi-adapter
|
392f2890880882b3c7740d5a9ae1159a977d677b
|
0dc0a45aa1b6b72155c62dcb1388ff43b21a1c1d
|
refs/heads/master
| 2023-08-26T15:19:27.753700
| 2020-04-12T13:12:48
| 2020-04-12T13:12:48
| 157,197,174
| 9
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 745
|
py
|
import os
class AzureRequestAdapter:
def __init__(self, azure_request):
self.azure_request = azure_request
def as_dict(self):
"""WSGI environ MUST be a plain Python dict."""
req = self.azure_request
path_info = req.route_params.get('path_info', '')
path_info = '/' + os.getenv('FUNCTIONS_MOUNT_POINT') + '/' + path_info
environ = {'HTTP_' + k.upper().replace('-', '_'): v for k, v in req.headers.items()}
environ.update({
"REQUEST_METHOD": req.method,
"wsgi.input": req.get_body(), # Wrap in io.BytesIO?
"SERVER_NAME": "localhost",
"SERVER_PORT": "7071",
"PATH_INFO": path_info,
})
return environ
|
[
"carlton.gibson@noumenal.es"
] |
carlton.gibson@noumenal.es
|
084e6fcc39a7e35823dbbce5f8f327f6cc08ee9e
|
b93321838d339c9354e735cd691bbc4bfa5b7e4e
|
/website/urls.py
|
c4d34bd6180baf470bac0c204b7c6f1bf8271ee6
|
[] |
no_license
|
deviank/firstDjangoSite
|
45a68759515ff5b5b31bfe5427495f270dee69b2
|
efd33f49878a8e495913b41b6358af383ad12be5
|
refs/heads/master
| 2021-01-11T02:05:51.631143
| 2017-05-18T15:20:36
| 2017-05-18T15:20:36
| 70,809,280
| 0
| 1
| null | 2017-03-29T07:10:17
| 2016-10-13T13:28:25
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,115
|
py
|
"""website URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^music/', include('music.urls')),
url(r'^', include('music.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"noreply@github.com"
] |
deviank.noreply@github.com
|
b6eb439655a13dd067a935ed76756f4f55fc72b6
|
baeca56d705aab0325fe6c2fc5fbd0b8c9883303
|
/递归/3.py
|
584f48796718a0023c0086352b63cac3e9aa6283
|
[] |
no_license
|
lixiangwang/SEU---Data-structure-and-algorithm-design
|
69411225934636ea791ced494b598d610224b48f
|
cbc03bff7e885a196dfd6a6a75de112e8b6a3aa5
|
refs/heads/master
| 2020-11-23T22:46:19.928513
| 2019-12-23T11:06:14
| 2019-12-23T11:06:14
| 227,852,147
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 480
|
py
|
# encoding: utf-8
def LCS(a, b):
if a == '' or b == '':
return ''
elif a[-1] == b[-1]:
return LCS(a[:-1], b[:-1]) + a[-1]
else:
sol_a = LCS(a[:-1], b)
sol_b = LCS(a, b[:-1])
if len(sol_a) > len(sol_b):
return sol_a
return sol_b
if __name__ == "__main__":
a = 'abdebcbb'
print('a序列为:', a)
b = 'adacbcb'
print('b序列为:', b)
print('a,b的最长公共子序列为:', LCS(a, b))
|
[
"47591862+lixiangwang@users.noreply.github.com"
] |
47591862+lixiangwang@users.noreply.github.com
|
86aa76b0f91c8917deb6bc800a3a98983bb2bb02
|
51f887286aa3bd2c3dbe4c616ad306ce08976441
|
/pybind/nos/v6_0_2f/overlay_gateway/attach/rbridge_id/__init__.py
|
444af2f5f0cc31eaaf8279728fd9afc3fcb242cc
|
[
"Apache-2.0"
] |
permissive
|
b2220333/pybind
|
a8c06460fd66a97a78c243bf144488eb88d7732a
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
refs/heads/master
| 2020-03-18T09:09:29.574226
| 2018-04-03T20:09:50
| 2018-04-03T20:09:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,382
|
py
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class rbridge_id(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-tunnels - based on the path /overlay-gateway/attach/rbridge-id. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__rb_add','__rb_remove',)
_yang_name = 'rbridge-id'
_rest_name = 'rbridge-id'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__rb_remove = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[0-9]+(-[0-9]+)?(,[0-9]+(-[0-9]+)?)*'}), is_leaf=True, yang_name="rb-remove", rest_name="remove", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'alt-name': u'remove', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='comn:ui32-range', is_config=True)
self.__rb_add = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[0-9]+(-[0-9]+)?(,[0-9]+(-[0-9]+)?)*'}), is_leaf=True, yang_name="rb-add", rest_name="add", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'alt-name': u'add', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='comn:ui32-range', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'overlay-gateway', u'attach', u'rbridge-id']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'overlay-gateway', u'attach', u'rbridge-id']
def _get_rb_add(self):
"""
Getter method for rb_add, mapped from YANG variable /overlay_gateway/attach/rbridge_id/rb_add (comn:ui32-range)
"""
return self.__rb_add
def _set_rb_add(self, v, load=False):
"""
Setter method for rb_add, mapped from YANG variable /overlay_gateway/attach/rbridge_id/rb_add (comn:ui32-range)
If this variable is read-only (config: false) in the
source YANG file, then _set_rb_add is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_rb_add() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[0-9]+(-[0-9]+)?(,[0-9]+(-[0-9]+)?)*'}), is_leaf=True, yang_name="rb-add", rest_name="add", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'alt-name': u'add', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='comn:ui32-range', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """rb_add must be of a type compatible with comn:ui32-range""",
'defined-type': "comn:ui32-range",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[0-9]+(-[0-9]+)?(,[0-9]+(-[0-9]+)?)*'}), is_leaf=True, yang_name="rb-add", rest_name="add", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'alt-name': u'add', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='comn:ui32-range', is_config=True)""",
})
self.__rb_add = t
if hasattr(self, '_set'):
self._set()
def _unset_rb_add(self):
self.__rb_add = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[0-9]+(-[0-9]+)?(,[0-9]+(-[0-9]+)?)*'}), is_leaf=True, yang_name="rb-add", rest_name="add", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'alt-name': u'add', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='comn:ui32-range', is_config=True)
def _get_rb_remove(self):
"""
Getter method for rb_remove, mapped from YANG variable /overlay_gateway/attach/rbridge_id/rb_remove (comn:ui32-range)
"""
return self.__rb_remove
def _set_rb_remove(self, v, load=False):
"""
Setter method for rb_remove, mapped from YANG variable /overlay_gateway/attach/rbridge_id/rb_remove (comn:ui32-range)
If this variable is read-only (config: false) in the
source YANG file, then _set_rb_remove is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_rb_remove() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[0-9]+(-[0-9]+)?(,[0-9]+(-[0-9]+)?)*'}), is_leaf=True, yang_name="rb-remove", rest_name="remove", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'alt-name': u'remove', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='comn:ui32-range', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """rb_remove must be of a type compatible with comn:ui32-range""",
'defined-type': "comn:ui32-range",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[0-9]+(-[0-9]+)?(,[0-9]+(-[0-9]+)?)*'}), is_leaf=True, yang_name="rb-remove", rest_name="remove", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'alt-name': u'remove', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='comn:ui32-range', is_config=True)""",
})
self.__rb_remove = t
if hasattr(self, '_set'):
self._set()
def _unset_rb_remove(self):
self.__rb_remove = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[0-9]+(-[0-9]+)?(,[0-9]+(-[0-9]+)?)*'}), is_leaf=True, yang_name="rb-remove", rest_name="remove", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'alt-name': u'remove', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='comn:ui32-range', is_config=True)
rb_add = __builtin__.property(_get_rb_add, _set_rb_add)
rb_remove = __builtin__.property(_get_rb_remove, _set_rb_remove)
_pyangbind_elements = {'rb_add': rb_add, 'rb_remove': rb_remove, }
|
[
"badaniya@brocade.com"
] |
badaniya@brocade.com
|
2372188140a1e8060d79cb303a3e257bee50b3c8
|
c718f1694f84b96ee45ca348045b3e0a11c38444
|
/account_profile/migrations/0004_auto_20141207_0003.py
|
edc8931a8d22583289fb24831177cd385ea02008
|
[] |
no_license
|
damianpv/realtymplace
|
26a3ad48ad132d7993b3741fb6c351c376b5d9f2
|
7e84d46ddb82066c4b1b1d73abcb7df038f05dc3
|
refs/heads/master
| 2020-03-18T02:54:22.686441
| 2018-05-21T03:28:17
| 2018-05-21T03:28:17
| 134,212,955
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 445
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('account_profile', '0003_auto_20141207_0002'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='avatar',
field=models.ImageField(null=True, upload_to=b'avatar', blank=True),
),
]
|
[
"damian.adriel@gmail.com"
] |
damian.adriel@gmail.com
|
92bfa05bea61a869b6d79595e5b28f340350f31c
|
c4f4499e19a80381be650d01fb4f0c176f07cbb9
|
/Python/buttonLED.py
|
f5b8938628c1a699c19fa4b2e78f0e5fd69b5600
|
[] |
no_license
|
dcmid/doodads
|
6af8f61b3182707bd69f925507675b2ab2de2008
|
d48cfe3fc27407ba0e89dda741c24ec80e6337e9
|
refs/heads/master
| 2021-01-22T11:38:38.848244
| 2015-05-27T16:02:26
| 2015-05-27T16:02:26
| 33,454,497
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 488
|
py
|
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BOARD)
GPIO.setup(15, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(40, GPIO.OUT)
while True:
try:
startTime=time.time()
while not GPIO.input(15):
time.sleep(.05)
elapsedTime=time.time()-startTime
if elapsedTime>.05:
print elapsedTime
startTime=time.time()
while time.time()<startTime+elapsedTime:
GPIO.output(40,1)
time.sleep(.05)
GPIO.output(40,0)
time.sleep(.05)
except:
break
GPIO.cleanup()
|
[
"d.claymidkiff@gmail.com"
] |
d.claymidkiff@gmail.com
|
863dda01ba3e7e7715f7baace8a7169afe070a36
|
58bcdda18a727374830c8b536ca413714a137a36
|
/plebian/news/tests.py
|
c1ddda7b126ec9de362c9e7838cb3781e7570d89
|
[] |
no_license
|
alanzoppa/django-plebian
|
c337fa120090a46966791e69e41b2d5497b3bd87
|
eb3f861abf1da1ebee0550a774923fc4a7122294
|
refs/heads/master
| 2016-09-09T22:52:23.121713
| 2010-11-19T23:14:18
| 2010-11-19T23:14:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 545
|
py
|
import random
import json
from django.test.client import Client
from django.test import TestCase
from django.conf import settings
from django.core.cache import cache
from django.core.urlresolvers import reverse
from plebian.news.models import *
class SimpleTest(TestCase):
def test_validity_of_urls(self):
c = Client()
for i in NewsItem.objects.filter(published=True):
print 'testing:', i.get_absolute_url()
page = c.get(i.get_absolute_url())
if not page.status_code == 200: assert False
|
[
"azoppa@Manifest-Digitals-MacBook-Pro-5.local"
] |
azoppa@Manifest-Digitals-MacBook-Pro-5.local
|
2293fb2827ff3dc063a3779584f029916bbccf17
|
a700e0378dcb2409aaea43f671a242e7d747fd8a
|
/gui/Table.py
|
351b5db048854a55c1384208d88e5f5195a024a8
|
[] |
no_license
|
razzaksr/PythonAnnamalai
|
da8d3a09a97489e43cd880a8114e24412ba36362
|
4f0e431fcd0fe5cd22075c9348c9fff954785c89
|
refs/heads/master
| 2023-02-02T00:54:42.694291
| 2020-12-22T09:58:15
| 2020-12-22T09:58:15
| 281,579,023
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,741
|
py
|
# Getting records
from tkinter import *
from tkinter import messagebox
from tkinter.ttk import Combobox
from pymysql import *
class record(Tk):
def __init__(self):
Tk.__init__(self)
self.title("Getting records")
self.geometry("500x400")
p1 = PhotoImage(file="C:\\Users\\DOLL\\PycharmProjects\\MorningBatch\\eventmanage\\bday.ico")
self.iconphoto(False, p1)
self.head=Label(self,text="Fetch by following options")#,font=('Times New Roman',30))
self.head.grid(row=0,column=0)
self.spec = Combobox(self)
self.spec['values'] = ['edate', 'eid', 'ename', 'edept', 'eorg', 'prize', 'winner',
'participants']
self.spec.grid(row=2, column=0)
self.men=Entry(self)
self.men.grid(row=2,column=1)
self.bt = Button(self, text="GetOne", command=self.read)
self.bt.grid(row=2, column=2)
def read(self):
self.f1=font=('Times New Roman',12,'bold')
self.f2 = font = ('Times New Roman', 11, 'italic')
self.h1=Entry(self,font=self.f1);self.h1.insert(END,"Event Id");self.h1.grid(row=5,column=0)
self.h2 = Entry(self, font=self.f1);self.h2.insert(END, "Event Name");self.h2.grid(row=5, column=1)
self.h3 = Entry(self, font=self.f1);self.h3.insert(END, "Event Date");self.h3.grid(row=5, column=2)
self.h4 = Entry(self, font=self.f1);self.h4.insert(END, "Event Department");self.h4.grid(row=5, column=3)
self.h5 = Entry(self, font=self.f1);self.h5.insert(END, "Event Organizer");self.h5.grid(row=5, column=4)
self.h6 = Entry(self, font=self.f1);self.h6.insert(END, "Event Participants");self.h6.grid(row=5, column=5)
self.h7 = Entry(self, font=self.f1);self.h7.insert(END, "Event Winner");self.h7.grid(row=5, column=6)
self.h8 = Entry(self, font=self.f1);self.h8.insert(END, "Event Prize");self.h8.grid(row=5, column=7)
try:
con = connect("localhost", "root", "", "avscollege")
cur = con.cursor()
if self.spec.get()!='participants':qry="select * from events where "+self.spec.get()+"='"+self.men.get()+"'"
else:qry="select * from events where "+self.spec.get()+" like'%"+self.men.get()+"%'"
cur.execute(qry)
ware = cur.fetchall()
lin=6
for rows in range(len(ware)):
for each in range(len(ware[rows])):
self.data=Entry(self,font=self.f2)
self.data.insert(END,ware[rows][each])
self.data.grid(row=lin,column=each)
lin+=1
con.close()
except Exception as e:messagebox.showinfo("Error",e)
rec=record()
rec.mainloop()
|
[
"razzaksr@gmail.com"
] |
razzaksr@gmail.com
|
061c8009fa7bb28dc55261adf8cd2fd22130be4c
|
08bac92b1741c0b2e106935bab47ff65b309123c
|
/0x0F-python-object_relational_mapping/5-filter_cities.py
|
7ec7e23ceb57945823f543d7cfa6fe82316dd399
|
[] |
no_license
|
RoMalms10/holbertonschool-higher_level_programming
|
5702dbcc17156b66b472df79eddb55baac2613aa
|
aebff20e55c7fe07e9e3fb1ff33dd65d17d8ee1f
|
refs/heads/master
| 2021-09-14T10:10:56.680309
| 2018-05-11T17:59:40
| 2018-05-11T17:59:40
| 113,100,806
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 812
|
py
|
#!/usr/bin/python3
''' This module queries cities TABLE and joins it with states
and sorts by input
'''
if __name__ == "__main__":
import MySQLdb
from sys import argv
db = MySQLdb.connect(host="localhost", port=3306,
user=argv[1], passwd=argv[2], db=argv[3])
cur = db.cursor()
cur.execute("SELECT cities.name \
FROM cities \
INNER JOIN states \
ON cities.state_id = states.id \
WHERE states.name=%s \
ORDER BY cities.id ASC", (argv[4], ))
query = cur.fetchall()
for row in range(len(query)):
if row != len(query) - 1:
print(query[row][0], end=", ")
else:
print(query[row][0], end="")
print()
cur.close()
db.close()
|
[
"156@holbertonschool.com"
] |
156@holbertonschool.com
|
3d5181261fd7d8e2a51740ad8383f25860efab5c
|
67a7ebf702ce3fd3b7d198313e3c5d444ca7ac0c
|
/modules/entry.py
|
a1dfbae9f114c76eb68865cd32324d7dd1b2372d
|
[
"Apache-2.0"
] |
permissive
|
syslock/ems
|
82b959efbdbba48da28d781afe5bfb66dfc3b072
|
241a93b90ef866200f78c023236921ab8f8c115c
|
refs/heads/master
| 2021-01-17T02:23:20.858297
| 2020-10-10T23:15:27
| 2020-10-10T23:15:27
| 4,173,565
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,298
|
py
|
import json
from lib import db_object
from lib import entry
from lib import errors
from modules import get as get_module
def process( app ):
query = app.query
obj_id = query.parms["id"]
if not app.user.can_read( obj_id ):
raise errors.PrivilegeError( "%d cannot read %d" % (app.user.id, obj_id) )
obj = db_object.DBObject.create_typed_object( app=app, object_id=obj_id )
result = {}
if type(obj)==entry.Entry:
if query.parms["method"] == "create_draft":
draft = obj.create_draft()
result = { "succeeded" : True, "draft" : get_module.get(app=app, object_ids=[draft.id], recursive=(True,True))[0] }
else:
raise errors.ParameterError( "Unsupported method for type" )
elif type(obj)==entry.Draft:
if query.parms["method"] == "publish":
entry_id = obj.publish()
result = { "succeeded" : True, "entry" : get_module.get(app=app, object_ids=[entry_id], recursive=(True,True))[0] }
elif query.parms["method"] == "merge_to_parent":
entry_id = obj.merge_to_parent()
result = { "succeeded" : True, "entry" : get_module.get(app=app, object_ids=[entry_id], recursive=(True,True))[0] }
else:
raise errors.ParameterError( "Unsupported method for type" )
else:
raise errors.ParameterError( "Object with unsupported type" )
app.response.output = json.dumps( result )
|
[
"syslock@gmx.de"
] |
syslock@gmx.de
|
d456c30b907b604c4ef16dcf5f3a4f3774303ec4
|
7979c7d4162d75c749d6e22c4914c3ff7f92dd5a
|
/我手敲的代码(中文注释)/chapter11/volatility/plugins/dumpfiles.py
|
afac695daf92e55f1fe01a5845fe3d6d6eb6c276
|
[] |
no_license
|
giantbranch/python-hacker-code
|
11f2bc491c43d20d754cefd7084057af47b3f62c
|
addbc8c73e7e6fb9e4fcadcec022fa1d3da4b96d
|
refs/heads/master
| 2023-08-05T01:46:19.582299
| 2021-11-25T06:17:40
| 2021-11-25T06:17:40
| 52,247,436
| 400
| 179
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 52,457
|
py
|
# Volatility
# Copyright (C) 2012-13 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License Version 2 as
# published by the Free Software Foundation. You may not use, modify or
# distribute this program under any other version of the GNU General
# Public License.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
# Notwithstanding any rights to use the Software granted by the foregoing,
# if entities or individuals have received a Cease & Desist letter from
# the Volatility Project, the Volatility Foundation, or its copyright holders
# for violating the terms of the GPL version 2, those entities (their employees,
# subcontractors, independent contractors, and affiliates) and / or persons
# are granted no such rights and any use by any one or more of them is
# expressly prohibited, in accordance with Section 4 of the GPL version 2.
# Any rights granted to such entities and / or persons by earlier license
# agreements have been previously terminated as to them.
#pylint: disable-msg=C0111
import os
import re
import math
import volatility.obj as obj
import volatility.utils as utils
import volatility.debug as debug
import volatility.win32.tasks as tasks_mod
import volatility.win32.modules as modules
import volatility.plugins.common as common
import volatility.plugins.taskmods as taskmods
import json
#--------------------------------------------------------------------------------
# Constants
#--------------------------------------------------------------------------------
PAGE_SIZE = 0x1000
PAGE_MASK = PAGE_SIZE - 1
IMAGE_EXT = "img"
DATA_EXT = "dat"
FILEOFFSET_MASK = 0xFFFFFFFFFFFF0000
VACB_BLOCK = 0x40000
VACB_ARRAY = 0x80
VACB_OFFSET_SHIFT = 18
VACB_LEVEL_SHIFT = 7
VACB_SIZE_OF_FIRST_LEVEL = 1 << (VACB_OFFSET_SHIFT + VACB_LEVEL_SHIFT)
class _CONTROL_AREA(obj.CType):
def extract_ca_file(self, unsafe = False):
""" Extracts a file from a specified CONTROL_AREA
Attempts to extract the memory resident pages pertaining to a
particular CONTROL_AREA object.
Args:
control_area: Instance of a CONTROL_AREA object
unsafe: Relax safety constraints for more data
Returns:
mdata: List of pages, (physoffset, fileoffset, size) tuples, that are memory resident
zpad: List of pages, (offset, size) tuples, that not memory resident
Raises:
"""
zpad = []
mdata = []
# Depending on the particular address space being used we need to
# determine if the MMPTE will be either 4 or 8 bytes. The x64
# and IA32_PAE both use 8 byte PTEs. Whereas, IA32 uses 4 byte
# PTE entries.
memory_model = self.obj_vm.profile.metadata.get('memory_model', '32bit')
pae = self.obj_vm.pae
if pae:
mmpte_size = self.obj_vm.profile.get_obj_size("_MMPTEPA")
else:
mmpte_size = self.obj_vm.profile.get_obj_size("_MMPTE")
# Calculate the size of the _CONTROL_AREA object. It is used to find
# the correct offset for the SUBSECTION object and the size of the
# CONTROL_AREA can differ between versions of Windows.
control_area_size = self.size()
# The segment is used to describe the physical view of the
# file. We also use this as a semantic check to see if
# the processing should continue. If the Segment address
# is invalid, then we return.
Segment = self.Segment
if not Segment.is_valid():
return mdata, zpad
# The next semantic check validates that the _SEGMENT object
# points back to the appropriate _CONTROL_AREA object. If the
# check is invalid, then we return.
if (self.obj_offset != Segment.ControlArea):
return mdata, zpad
# This is a semantic check added to make sure the Segment.SizeOfSegment value
# is consistant with the Segment.TotalNumberOfPtes. This occurs fequently
# when traversing through CONTROL_AREA Objects (~5%), often leading to
# impossible values. Thus, to be conservative we do not proceed if the
# Segment does not seem sound.
if Segment.SizeOfSegment != (Segment.TotalNumberOfPtes * PAGE_SIZE):
return mdata, zpad
# The _SUBSECTION object is typically found immediately following
# the CONTROL_AREA object. For Image Section Objects, the SUBSECTIONS
# typically correspond with the sections found in the PE. On the otherhand,
# for Data Section Objects, there is typically only a single valid SUBSECTION.
subsection_offset = self.obj_offset + control_area_size
#subsection = obj.Object("_SUBSECTION", subsection_offset, self.kaddr_space)
subsection = obj.Object("_SUBSECTION", subsection_offset, self.obj_vm)
# This was another check which was inspired by Ruud's code. It
# verifies that the first SubsectionBaase (Mmst) never starts
# at the beginning of a page. The UNSAFE option allows us to
# ignore this constraint. This was necessary for dumping file data
# for file objects found with filescan (ie $Mft)
SubsectionBase = subsection.SubsectionBase
if (SubsectionBase & PAGE_MASK == 0x0) and not unsafe:
return mdata, zpad
# We obtain the Subsections associated with this file
# by traversing the singly linked list. Ideally, this
# list should be null (0) terminated. Upon occasion we
# we have seen instances where the link pointers are
# undefined (XXX). If we hit an invalid pointer, the we
# we exit the traversal.
while subsection.is_valid() and subsection.v() != 0x0:
if not subsection:
break
# This constraint makes sure that the _SUBSECTION object
# points back to the associated CONTROL_AREA object. Otherwise,
# we exit the traversal.
if (self.obj_offset != subsection.ControlArea):
break
# Extract subsection meta-data into local variables
# this helps with performance and not having to do
# repetitive lookups.
PtesInSubsection = subsection.PtesInSubsection
SubsectionBase = subsection.SubsectionBase
NextSubsection = subsection.NextSubsection
# The offset into the file is stored implicitely
# based on the PTE's location within the Subsection.
StartingSector = subsection.StartingSector
SubsectionOffset = StartingSector * 0x200
# This was another check based on something Ruud
# had done. We also so instances where DataSectionObjects
# would hit a SubsectionBase that was paged aligned
# and hit strange data. In those instances, the
# MMPTE SubsectionAddress would not point to the associated
# Subsection. (XXX)
if (SubsectionBase & PAGE_MASK == 0x0) and not unsafe:
break
ptecount = 0
while (ptecount < PtesInSubsection):
pteoffset = SubsectionBase + (mmpte_size * ptecount)
FileOffset = SubsectionOffset + ptecount * 0x1000
# The size of MMPTE changes depending on if it is IA32 (4 bytes)
# or IA32_PAE/AMD64 (8 bytes).
objname = "_MMPTE"
if pae:
objname = "_MMPTEPA"
mmpte = obj.Object(objname, offset = pteoffset, vm = \
subsection.obj_vm)
if not mmpte:
ptecount += 1
continue
# First we check if the entry is valid. If the entry is valid
# then we get the physical offset. The valid entries are actually
# handled by the hardware.
if mmpte.u.Hard.Valid == 0x1:
# There are some valid Page Table entries where bit 63
# is used to specify if the page is executable. This is
# maintained by the processor. If it is not executable,
# then the bit is set. Within the Intel documentation,
# this is known as the Execute-disable (XD) flag. Regardless,
# we will use the get_phys_addr method from the address space
# to obtain the physical address.
### Should we check the size of the PAGE? Haven't seen
# a hit for LargePage.
#if mmpte.u.Hard.LargePage == 0x1:
# print "LargePage"
physoffset = mmpte.u.Hard.PageFrameNumber << 12
mdata.append([physoffset, FileOffset, PAGE_SIZE])
ptecount += 1
continue
elif mmpte.u.Soft.Prototype == 0x1:
# If the entry is not a valid physical address then
# we check if it contains a pointer back to the SUBSECTION
# object. If so, the page is in the backing file and we will
# need to pad to maintain spacial integrity of the file. This
# check needs to be performed for looking for the transition flag.
# The prototype PTEs are initialized as MMPTE_SUBSECTION with the
# SubsectionAddress.
# On x86 systems that use 4 byte MMPTE , the MMPTE_SUBSECTION
# stores an "encoded" version of the SUBSECTION object address.
# The data is relative to global variable (MmSubsectionBase or
# MmNonPagedPoolEnd) depending on the WhichPool member of
# _SUBSECTION. This applies to x86 systems running ntoskrnl.exe.
# If bit 10 is set then it is prototype/subsection
if (memory_model == "32bit") and not pae:
SubsectionOffset = \
((mmpte.u.Subsect.SubsectionAddressHigh << 7) |
(mmpte.u.Subsect.SubsectionAddressLow << 3))
#WhichPool = mmpte.u.Subsect.WhichPool
#print "mmpte 0x%x ptecount 0x%x sub-32 0x%x pteoffset 0x%x which 0x%x subdelta 0x%x"%(mmpte.u.Long,ptecount,subsection_offset,pteoffset,WhichPool,SubsectionOffset)
zpad.append([FileOffset, PAGE_SIZE])
ptecount += 1
continue
if memory_model == "64bit" or pae:
SubsectionAddress = mmpte.u.Subsect.SubsectionAddress
else:
SubsectionAddress = mmpte.u.Long
if SubsectionAddress == subsection.obj_offset:
# sub proto/prot 4c0 420
#print "mmpte 0x%x ptecount 0x%x sub 0x%x offset 0x%x"%(mmpte.u.Long,ptecount,SubsectionAddress,pteoffset)
zpad.append([FileOffset, PAGE_SIZE])
ptecount += 1
continue
elif (SubsectionAddress == (subsection.obj_offset + 4)):
# This was a special case seen on IA32_PAE systems where
# the SubsectionAddress pointed to subsection.obj_offset+4
# (0x420, 0x460, 0x4a0)
#print "mmpte 0x%x ptecount 0x%x sub+4 0x%x offset 0x%x"%(mmpte.u.Long,ptecount,SubsectionAddress,pteoffset)
zpad.append([FileOffset, PAGE_SIZE])
ptecount += 1
continue
else:
#print "mmpte 0x%x ptecount 0x%x sub_unk 0x%x offset 0x%x suboffset 0x%x"%(mmpte.u.Long,ptecount,SubsectionAddress,pteoffset,subsection.obj_offset)
zpad.append([FileOffset, PAGE_SIZE])
ptecount += 1
continue
# Check if the entry is a DemandZero entry.
elif (mmpte.u.Soft.Transition == 0x0):
if ((mmpte.u.Soft.PageFileLow == 0x0) and
(mmpte.u.Soft.PageFileHigh == 0x0)):
# Example entries include: a0,e0
#print "mmpte 0x%x ptecount 0x%x zero offset 0x%x subsec 0x%x"%(mmpte.u.Long,ptecount,pteoffset,subsection.obj_offset)
zpad.append([FileOffset, PAGE_SIZE])
ptecount += 1
else:
#print "mmpte 0x%x ptecount 0x%x paged offset 0x%x subsec 0x%x file 0x%x offset 0x%x"%(mmpte.u.Long,ptecount,pteoffset,subsection.obj_offset,mmpte.u.Soft.PageFileLow,mmpte.u.Soft.PageFileHigh)
zpad.append([FileOffset, PAGE_SIZE])
ptecount += 1
# If the entry is not a valid physical address then
# we also check to see if it is in transition.
elif mmpte.u.Trans.Transition == 0x1:
physoffset = mmpte.u.Trans.PageFrameNumber << 12
#print "mmpte 0x%x ptecount 0x%x transition 0x%x offset 0x%x"%(mmpte.u.Long,ptecount,physoffset,pteoffset)
mdata.append([physoffset, FileOffset, PAGE_SIZE])
ptecount += 1
continue
else:
# This is a catch all for all the other entry types.
# sub proto/pro 420,4e0,460,4a0 (x64 +0x28)(x32 +4)
# other a0,e0,0, (20,60)
# 0x80000000
#print "mmpte 0x%x ptecount 0x%x other offset 0x%x subsec 0x%x"%(mmpte.u.Long,ptecount,pteoffset,subsection.obj_offset)
zpad.append([FileOffset, PAGE_SIZE])
ptecount += 1
# Traverse the singly linked list to its next member.
subsection = NextSubsection
return (mdata, zpad)
class _SHARED_CACHE_MAP(obj.CType):
def is_valid(self):
if not obj.CType.is_valid(self):
return False
# Added a semantic check to make sure the data is in a sound state. It's better
# to catch it early.
FileSize = self.FileSize.QuadPart
ValidDataLength = self.ValidDataLength.QuadPart
SectionSize = self.SectionSize.QuadPart
#print "SectionSize 0x%x < 0 or FileSize < 0x%x ValidDataLength 0x%x"%(SectionSize,FileSize,ValidDataLength)
#if SectionSize < 0 or (FileSize < ValidDataLength):
if SectionSize < 0 or ((FileSize < ValidDataLength) and (ValidDataLength != 0x7fffffffffffffff)):
return False
return True
def process_index_array(self, array_pointer, level, limit, vacbary = None):
""" Recursively process the sparse multilevel VACB index array
Args:
array_pointer: The address of a possible index array
shared_cache_map: The associated SHARED_CACHE_MAP object
level: The current level
limit: The level where we abandon all hope. Ideally this is 7
vacbary: An array of collected VACBs
Returns:
vacbary: Collected VACBs
"""
if vacbary is None:
vacbary = []
if level > limit:
return []
# Create an array of VACB entries
VacbArray = obj.Object("Array", offset = array_pointer, \
vm = self.obj_vm, count = VACB_ARRAY, \
targetType = "address", parent = self)
# Iterate through the entries
for _i in range(0, VACB_ARRAY):
# Check if the VACB entry is in use
if VacbArray[_i] == 0x0:
continue
Vacbs = obj.Object("_VACB", offset = int(VacbArray[_i]), vm = self.obj_vm)
# Check if this is a valid VACB entry by verifying
# the SharedCacheMap member.
if Vacbs.SharedCacheMap == self.obj_offset:
# This is a VACB associated with this cache map
vacbinfo = self.extract_vacb(Vacbs, VACB_BLOCK)
if vacbinfo:
vacbary.append(vacbinfo)
else:
#Process the next level of the multi-level array
vacbary = self.process_index_array(VacbArray[_i], level + 1, limit, vacbary)
#vacbary = vacbary + _vacbary
return vacbary
def extract_vacb(self, vacbs, size):
""" Extracts data from a specified VACB
Attempts to extract the memory resident data from a specified
VACB.
Args:
vacbs: The VACB object
size: How much data should be read from the VACB
shared_cache_map: The associated SHARED_CACHE_MAP object
Returns:
vacbinfo: Extracted VACB meta-information
"""
# This is used to collect summary information. We will eventually leverage this
# when creating the externally exposed APIs.
vacbinfo = {}
# Check if the Overlay member of _VACB is resident
# The Overlay member stores information about the FileOffset
# and the ActiveCount. This is just another proactive check
# to make sure the objects are seemingly sound.
if not vacbs.Overlay:
return vacbinfo
# We should add another check to make sure that
# the SharedCacheMap member of the VACB points back
# to the corresponding SHARED_CACHE_MAP
if vacbs.SharedCacheMap != self.v():
return vacbinfo
# The FileOffset member of VACB is used to denote the
# offset within the file where the view begins. Since all
# views are 256 KB in size, the bottom 16 bits are used to
# store the number of references to the view.
FileOffset = vacbs.Overlay.FileOffset.QuadPart
if not FileOffset:
return vacbinfo
ActiveCount = vacbs.Overlay.ActiveCount
FileOffset = FileOffset & FILEOFFSET_MASK
BaseAddress = vacbs.BaseAddress.v()
vacbinfo['foffset'] = int(FileOffset)
vacbinfo['acount'] = int(ActiveCount)
vacbinfo['voffset'] = int(vacbs.obj_offset)
vacbinfo['baseaddr'] = int(BaseAddress)
vacbinfo['size'] = int(size)
return vacbinfo
def extract_scm_file(self):
""" Extracts a file from a specified _SHARED_CACHE_MAP
Attempts to extract the memory resident pages pertaining to a
particular _SHARED_CACHE_MAP object.
Args:
shared_cache_map: Instance of a _SHARED_CACHE_MAP object
Returns:
vacbary: List of collected VACB meta information.
Raises:
"""
vacbary = []
if self.obj_offset == 0x0:
return
# Added a semantic check to make sure the data is in a sound state.
#FileSize = shared_cache_map.FileSize.QuadPart
#ValidDataLength = shared_cache_map.ValidDataLength.QuadPart
SectionSize = self.SectionSize.QuadPart
# Let's begin by determining the number of Virtual Address Control
# Blocks (VACB) that are stored within the cache (nonpaged). A VACB
# represents one 256-KB view in the system cache. There a are a couple
# options to use for the data size: ValidDataLength, FileSize,
# and SectionSize.
full_blocks = SectionSize / VACB_BLOCK
left_over = SectionSize % VACB_BLOCK
# As an optimization, the shared cache map object contains a VACB index
# array of four entries. The VACB index arrays are arrays of pointers
# to VACBs, that track which views of a given file are mapped in the cache.
# For example, the first entry in the VACB index array refers to the first
# 256 KB of the file. The InitialVacbs can describe a file up to 1 MB (4xVACB).
iterval = 0
while (iterval < full_blocks) and (full_blocks <= 4):
Vacbs = self.InitialVacbs[iterval]
vacbinfo = self.extract_vacb(Vacbs, VACB_BLOCK)
if vacbinfo: vacbary.append(vacbinfo)
iterval += 1
# We also have to account for the spill over data
# that is not found in the full blocks. The first case to
# consider is when the spill over is still in InitialVacbs.
if (left_over > 0) and (full_blocks < 4):
Vacbs = self.InitialVacbs[iterval]
vacbinfo = self.extract_vacb(Vacbs, left_over)
if vacbinfo: vacbary.append(vacbinfo)
# If the file is larger than 1 MB, a seperate VACB index array
# needs to be allocated. This is based on how many 256 KB blocks
# would be required for the size of the file. This newly allocated
# VACB index array is found through the Vacbs member of
# SHARED_CACHE_MAP.
Vacbs = self.Vacbs
if not Vacbs or (Vacbs.v() == 0):
return vacbary
# There are a number of instances where the initial value in
# InitialVacb will also be the fist entry in Vacbs. Thus we
# ignore, since it was already processed. It is possible to just
# process again as the file offset is specified for each VACB.
if self.InitialVacbs[0].obj_offset == Vacbs.v():
return vacbary
# If the file is less than 32 MB than it can be found in
# a single level VACB index array.
size_of_pointer = self.obj_vm.profile.get_obj_size("address")
if not SectionSize > VACB_SIZE_OF_FIRST_LEVEL:
ArrayHead = Vacbs.v()
_i = 0
for _i in range(0, full_blocks):
vacb_addr = ArrayHead + (_i * size_of_pointer)
vacb_entry = obj.Object("address", offset = vacb_addr, vm = Vacbs.obj_vm)
# If we find a zero entry, then we proceed to the next one.
# If the entry is zero, then the view is not mapped and we
# skip. We do not pad because we use the FileOffset to seek
# to the correct offset in the file.
if not vacb_entry or (vacb_entry.v() == 0x0):
continue
Vacb = obj.Object("_VACB", offset = vacb_entry.v(), vm = self.obj_vm)
vacbinfo = self.extract_vacb(Vacb, VACB_BLOCK)
if vacbinfo:
vacbary.append(vacbinfo)
if left_over > 0:
vacb_addr = ArrayHead + ((_i + 1) * size_of_pointer)
vacb_entry = obj.Object("address", offset = vacb_addr, vm = Vacbs.obj_vm)
if not vacb_entry or (vacb_entry.v() == 0x0):
return vacbary
Vacb = obj.Object("_VACB", offset = vacb_entry.v(), vm = self.obj_vm)
vacbinfo = self.extract_vacb(Vacb, left_over)
if vacbinfo:
vacbary.append(vacbinfo)
# The file is less than 32 MB, so we can
# stop processing.
return vacbary
# If we get to this point, then we know that the SectionSize is greator than
# VACB_SIZE_OF_FIRST_LEVEL (32 MB). Then we have a "sparse multilevel index
# array where each VACB index array is made up of 128 entries. We no
# longer assume the data is sequential. (Log2 (32 MB) - 18)/7
#tree_depth = math.ceil((math.ceil(math.log(file_size, 2)) - 18)/7)
level_depth = math.ceil(math.log(SectionSize, 2))
level_depth = (level_depth - VACB_OFFSET_SHIFT) / VACB_LEVEL_SHIFT
level_depth = math.ceil(level_depth)
limit_depth = level_depth
if SectionSize > VACB_SIZE_OF_FIRST_LEVEL:
# Create an array of 128 entries for the VACB index array
VacbArray = obj.Object("Array", offset = Vacbs.v(), \
vm = self.obj_vm, count = VACB_ARRAY, \
targetType = "address", parent = self)
# We use a bit of a brute force method. We walk the
# array and if any entry points to the shared cache map
# object then we extract it. Otherwise, if it is non-zero
# we attempt to traverse to the next level.
for _i in range(0, VACB_ARRAY):
if VacbArray[_i] == 0x0:
continue
Vacb = obj.Object("_VACB", offset = int(VacbArray[_i]), vm = self.obj_vm)
if Vacb.SharedCacheMap == self.obj_offset:
vacbinfo = self.extract_vacb(Vacb, VACB_BLOCK)
if vacbinfo:
vacbary.append(vacbinfo)
else:
# The Index is a pointer
#Process the next level of the multi-level array
# We set the limit_depth to be the depth of the tree
# as determined from the size and we initialize the
# current level to 2.
vacbary = self.process_index_array(VacbArray[_i], 2, limit_depth, vacbary)
#vacbary = vacbary + _vacbary
return vacbary
class ControlAreaModification(obj.ProfileModification):
conditions = {'os': lambda x: x == 'windows'}
def modification(self, profile):
profile.object_classes.update({
'_CONTROL_AREA': _CONTROL_AREA,
'_SHARED_CACHE_MAP': _SHARED_CACHE_MAP,
})
#--------------------------------------------------------------------------------
# VTypes
#--------------------------------------------------------------------------------
# Windows x86 symbols for ntkrnlpa
ntkrnlpa_types_x86 = {
'__ntkrnlpa' : [ 0x8, {
'Long' : [ 0x0, ['unsigned long long']],
'VolatileLong' : [ 0x0, ['unsigned long long']],
'Hard' : [ 0x0, ['_MMPTE_HARDWARE_64']],
'Flush' : [ 0x0, ['_HARDWARE_PTE']],
'Proto' : [ 0x0, ['_MMPTE_PROTOTYPE']],
'Soft' : [ 0x0, ['_MMPTE_SOFTWARE_64']],
'TimeStamp' : [ 0x0, ['_MMPTE_TIMESTAMP']],
'Trans' : [ 0x0, ['_MMPTE_TRANSITION_64']],
'Subsect' : [ 0x0, ['_MMPTE_SUBSECTION_64']],
'List' : [ 0x0, ['_MMPTE_LIST']],
} ],
'_MMPTEPA' : [ 0x8, {
'u' : [ 0x0, ['__ntkrnlpa']],
} ],
'_MMPTE_SUBSECTION_64' : [ 0x8, {
'Valid' : [ 0x0, ['BitField', dict(start_bit = 0, end_bit = 1, native_type = 'unsigned long long')]],
'Unused0' : [ 0x0, ['BitField', dict(start_bit = 1, end_bit = 5, native_type = 'unsigned long long')]],
'Protection' : [ 0x0, ['BitField', dict(start_bit = 5, end_bit = 10, native_type = 'unsigned long long')]],
'Prototype' : [ 0x0, ['BitField', dict(start_bit = 10, end_bit = 11, native_type = 'unsigned long long')]],
'Unused1' : [ 0x0, ['BitField', dict(start_bit = 11, end_bit = 32, native_type = 'unsigned long long')]],
'SubsectionAddress' : [ 0x0, ['BitField', dict(start_bit = 32, end_bit = 64, native_type = 'long long')]],
} ],
'_MMPTE_TRANSITION_64' : [ 0x8, {
'Valid' : [ 0x0, ['BitField', dict(start_bit = 0, end_bit = 1, native_type = 'unsigned long long')]],
'Write' : [ 0x0, ['BitField', dict(start_bit = 1, end_bit = 2, native_type = 'unsigned long long')]],
'Owner' : [ 0x0, ['BitField', dict(start_bit = 2, end_bit = 3, native_type = 'unsigned long long')]],
'WriteThrough' : [ 0x0, ['BitField', dict(start_bit = 3, end_bit = 4, native_type = 'unsigned long long')]],
'CacheDisable' : [ 0x0, ['BitField', dict(start_bit = 4, end_bit = 5, native_type = 'unsigned long long')]],
'Protection' : [ 0x0, ['BitField', dict(start_bit = 5, end_bit = 10, native_type = 'unsigned long long')]],
'Prototype' : [ 0x0, ['BitField', dict(start_bit = 10, end_bit = 11, native_type = 'unsigned long long')]],
'Transition' : [ 0x0, ['BitField', dict(start_bit = 11, end_bit = 12, native_type = 'unsigned long long')]],
'PageFrameNumber' : [ 0x0, ['BitField', dict(start_bit = 12, end_bit = 48, native_type = 'unsigned long long')]],
'Unused' : [ 0x0, ['BitField', dict(start_bit = 48, end_bit = 64, native_type = 'unsigned long long')]],
}],
'_MMPTE_HARDWARE_64' : [ 0x8, {
'Valid' : [ 0x0, ['BitField', dict(start_bit = 0, end_bit = 1, native_type = 'unsigned long long')]],
'Dirty1' : [ 0x0, ['BitField', dict(start_bit = 1, end_bit = 2, native_type = 'unsigned long long')]],
'Owner' : [ 0x0, ['BitField', dict(start_bit = 2, end_bit = 3, native_type = 'unsigned long long')]],
'WriteThrough' : [ 0x0, ['BitField', dict(start_bit = 3, end_bit = 4, native_type = 'unsigned long long')]],
'CacheDisable' : [ 0x0, ['BitField', dict(start_bit = 4, end_bit = 5, native_type = 'unsigned long long')]],
'Accessed' : [ 0x0, ['BitField', dict(start_bit = 5, end_bit = 6, native_type = 'unsigned long long')]],
'Dirty' : [ 0x0, ['BitField', dict(start_bit = 6, end_bit = 7, native_type = 'unsigned long long')]],
'LargePage' : [ 0x0, ['BitField', dict(start_bit = 7, end_bit = 8, native_type = 'unsigned long long')]],
'Global' : [ 0x0, ['BitField', dict(start_bit = 8, end_bit = 9, native_type = 'unsigned long long')]],
'CopyOnWrite' : [ 0x0, ['BitField', dict(start_bit = 9, end_bit = 10, native_type = 'unsigned long long')]],
'Unused' : [ 0x0, ['BitField', dict(start_bit = 10, end_bit = 11, native_type = 'unsigned long long')]],
'Write' : [ 0x0, ['BitField', dict(start_bit = 11, end_bit = 12, native_type = 'unsigned long long')]],
'PageFrameNumber' : [ 0x0, ['BitField', dict(start_bit = 12, end_bit = 48, native_type = 'unsigned long long')]],
'reserved1' : [ 0x0, ['BitField', dict(start_bit = 48, end_bit = 52, native_type = 'unsigned long long')]],
'SoftwareWsIndex' : [ 0x0, ['BitField', dict(start_bit = 52, end_bit = 63, native_type = 'unsigned long long')]],
'NoExecute' : [ 0x0, ['BitField', dict(start_bit = 63, end_bit = 64, native_type = 'unsigned long long')]],
} ],
'_MMPTE_SOFTWARE_64' : [ 0x8, {
'Valid' : [ 0x0, ['BitField', dict(start_bit = 0, end_bit = 1, native_type = 'unsigned long long')]],
'PageFileLow' : [ 0x0, ['BitField', dict(start_bit = 1, end_bit = 5, native_type = 'unsigned long long')]],
'Protection' : [ 0x0, ['BitField', dict(start_bit = 5, end_bit = 10, native_type = 'unsigned long long')]],
'Prototype' : [ 0x0, ['BitField', dict(start_bit = 10, end_bit = 11, native_type = 'unsigned long long')]],
'Transition' : [ 0x0, ['BitField', dict(start_bit = 11, end_bit = 12, native_type = 'unsigned long long')]],
'UsedPageTableEntries' : [ 0x0, ['BitField', dict(start_bit = 12, end_bit = 22, native_type = 'unsigned long long')]],
'InStore' : [ 0x0, ['BitField', dict(start_bit = 22, end_bit = 23, native_type = 'unsigned long long')]],
'Reserved' : [ 0x0, ['BitField', dict(start_bit = 23, end_bit = 32, native_type = 'unsigned long long')]],
'PageFileHigh' : [ 0x0, ['BitField', dict(start_bit = 32, end_bit = 64, native_type = 'unsigned long long')]],
} ],
}
class DumpFilesVTypesx86(obj.ProfileModification):
"""This modification applies the vtypes for all
versions of 32bit Windows."""
before = ['WindowsObjectClasses']
conditions = {'os': lambda x: x == 'windows',
'memory_model': lambda x : x == '32bit'}
def modification(self, profile):
profile.vtypes.update(ntkrnlpa_types_x86)
class DumpFiles(common.AbstractWindowsCommand):
"""Extract memory mapped and cached files"""
def __init__(self, config, *args, **kwargs):
common.AbstractWindowsCommand.__init__(self, config, *args, **kwargs)
self.kaddr_space = None
self.filters = []
config.add_option('REGEX', short_option = 'r',
help = 'Dump files matching REGEX',
action = 'store', type = 'string')
config.add_option('IGNORE-CASE', short_option = 'i',
help = 'Ignore case in pattern match',
action = 'store_true', default = False)
config.add_option('OFFSET', short_option = 'o', default = None,
help = 'Dump files for Process with physical address OFFSET',
action = 'store', type = 'int')
config.add_option('PHYSOFFSET', short_option = 'Q', default = None,
help = 'Dump File Object at physical address PHYSOFFSET',
action = 'store', type = 'int')
config.add_option('DUMP-DIR', short_option = 'D', default = None,
cache_invalidator = False,
help = 'Directory in which to dump extracted files')
config.add_option('SUMMARY-FILE', short_option = 'S', default = None,
cache_invalidator = False,
help = 'File where to store summary information')
config.add_option('PID', short_option = 'p', default = None,
help = 'Operate on these Process IDs (comma-separated)',
action = 'store', type = 'str')
config.add_option('NAME', short_option = 'n',
help = 'Include extracted filename in output file path',
action = 'store_true', default = False)
config.add_option('UNSAFE', short_option = 'u',
help = 'Relax safety constraints for more data',
action = 'store_true', default = False)
# Possible filters include:
# SharedCacheMap,DataSectionObject,ImageSectionObject,HandleTable,VAD
config.add_option("FILTER", short_option = 'F', default = None,
help = 'Filters to apply (comma-separated)')
def filter_tasks(self, tasks):
""" Reduce the tasks based on the user selectable PIDS parameter.
Returns a reduced list or the full list if config.PIDS not specified.
"""
if self._config.PID is None:
return tasks
try:
pidlist = [int(p) for p in self._config.PID.split(',')]
except ValueError:
debug.error("Invalid PID {0}".format(self._config.PID))
return [t for t in tasks if t.UniqueProcessId in pidlist]
def audited_read_bytes(self, vm, vaddr, length, pad):
""" This function provides an audited zread capability
It performs a similar function to zread, in that it will
pad "invalid" pages. The main difference is that it allows
us to collect auditing information about which pages were actually
present and which ones were padded.
Args:
vm: The address space to read the data from.
vaddr: The virtual address to start reading the data from.
length: How many bytes to read
pad: This argument controls if the unavailable bytes are padded.
Returns:
ret: Data that was read
mdata: List of pages that are memory resident
zpad: List of pages that not memory resident
Raises:
"""
zpad = []
mdata = []
vaddr, length = int(vaddr), int(length)
ret = ''
while length > 0:
chunk_len = min(length, PAGE_SIZE - (vaddr % PAGE_SIZE))
buf = vm.read(vaddr, chunk_len)
if vm.vtop(vaddr) is None:
zpad.append([vaddr, chunk_len])
if pad:
buf = '\x00' * chunk_len
else:
buf = ''
else:
mdata.append([vaddr, chunk_len])
ret += buf
vaddr += chunk_len
length -= chunk_len
return ret, mdata, zpad
def calculate(self):
""" Finds all the requested FILE_OBJECTS
Traverses the VAD and HandleTable to find all requested
FILE_OBJECTS
"""
# Initialize containers for collecting artifacts.
control_area_list = []
shared_maps = []
procfiles = []
# These lists are used for object collecting files from
# both the VAD and handle tables
vadfiles = []
handlefiles = []
# Determine which filters the user wants to see
self.filters = []
if self._config.FILTER:
self.filters = self._config.FILTER.split(',')
# Instantiate the kernel address space
self.kaddr_space = utils.load_as(self._config)
# Check to see if the physical address offset was passed for a
# particular process. Otherwise, use the whole task list.
if self._config.OFFSET != None:
tasks_list = [taskmods.DllList.virtual_process_from_physical_offset(
self.kaddr_space, self._config.OFFSET)]
else:
# Filter for the specified processes
tasks_list = self.filter_tasks(tasks_mod.pslist(self.kaddr_space))
# If a regex is specified, build it.
if self._config.REGEX:
try:
if self._config.IGNORE_CASE:
file_re = re.compile(self._config.REGEX, re.I)
else:
file_re = re.compile(self._config.REGEX)
except re.error, e:
debug.error('Error parsing regular expression: {0:s}'.format(e))
# Check to see if a specific physical address was specified for a
# FILE_OBJECT. In particular, this is useful for FILE_OBJECTS that
# are found with filescan that are not associated with a process
# For example, $Mft.
if self._config.PHYSOFFSET:
file_obj = obj.Object("_FILE_OBJECT", self._config.PHYSOFFSET, self.kaddr_space.base, native_vm = self.kaddr_space)
procfiles.append((None, [file_obj]))
#return
# Iterate through the process list and collect all references to
# FILE_OBJECTS from both the VAD and HandleTable. Each open handle to a file
# has a corresponding FILE_OBJECT.
if not self._config.PHYSOFFSET:
for task in tasks_list:
pid = task.UniqueProcessId
# Extract FILE_OBJECTS from the VAD
if not self.filters or "VAD" in self.filters:
for vad in task.VadRoot.traverse():
if vad != None:
try:
control_area = vad.ControlArea
if not control_area:
continue
file_object = vad.FileObject
if file_object:
vadfiles.append(file_object)
except AttributeError:
pass
if not self.filters or "HandleTable" in self.filters:
# Extract the FILE_OBJECTS from the handle table
if task.ObjectTable.HandleTableList:
for handle in task.ObjectTable.handles():
otype = handle.get_object_type()
if otype == "File":
file_obj = handle.dereference_as("_FILE_OBJECT")
handlefiles.append(file_obj)
# Append the lists of file objects
#allfiles = handlefiles + vadfiles
procfiles.append((pid, handlefiles + vadfiles))
for pid, allfiles in procfiles:
for file_obj in allfiles:
if not self._config.PHYSOFFSET:
offset = file_obj.obj_offset
else:
offset = self._config.PHYSOFFSET
name = None
if file_obj.FileName:
name = str(file_obj.file_name_with_device())
# Filter for specific FILE_OBJECTS based on user defined
# regular expression.
if self._config.REGEX:
if not name:
continue
if not file_re.search(name):
continue
# The SECTION_OBJECT_POINTERS structure is used by the memory
# manager and cache manager to store file-mapping and cache information
# for a particular file stream. We will use it to determine what type
# of FILE_OBJECT we have and how it should be parsed.
if file_obj.SectionObjectPointer:
DataSectionObject = \
file_obj.SectionObjectPointer.DataSectionObject
SharedCacheMap = \
file_obj.SectionObjectPointer.SharedCacheMap
ImageSectionObject = \
file_obj.SectionObjectPointer.ImageSectionObject
# The ImageSectionObject is used to track state information for
# an executable file stream. We will use it to extract memory
# mapped binaries.
if not self.filters or "ImageSectionObject" in self.filters:
if ImageSectionObject and ImageSectionObject != 0:
summaryinfo = {}
# It points to a image section object( CONTROL_AREA )
control_area = \
ImageSectionObject.dereference_as('_CONTROL_AREA')
if not control_area in control_area_list:
control_area_list.append(control_area)
# The format of the filenames: file.<pid>.<control_area>.[img|dat]
ca_offset_string = "0x{0:x}".format(control_area.obj_offset)
if self._config.NAME and name != None:
fname = name.split("\\")
ca_offset_string += "." + fname[-1]
file_string = ".".join(["file", str(pid), ca_offset_string, IMAGE_EXT])
of_path = os.path.join(self._config.DUMP_DIR, file_string)
(mdata, zpad) = control_area.extract_ca_file(self._config.UNSAFE)
summaryinfo['name'] = name
summaryinfo['type'] = "ImageSectionObject"
if pid:
summaryinfo['pid'] = int(pid)
else:
summaryinfo['pid'] = None
summaryinfo['present'] = mdata
summaryinfo['pad'] = zpad
summaryinfo['fobj'] = int(offset)
summaryinfo['ofpath'] = of_path
yield summaryinfo
# The DataSectionObject is used to track state information for
# a data file stream. We will use it to extract artifacts of
# memory mapped data files.
if not self.filters or "DataSectionObject" in self.filters:
if DataSectionObject and DataSectionObject != 0:
summaryinfo = {}
# It points to a data section object (CONTROL_AREA)
control_area = DataSectionObject.dereference_as('_CONTROL_AREA')
if not control_area in control_area_list:
control_area_list.append(control_area)
# The format of the filenames: file.<pid>.<control_area>.[img|dat]
ca_offset_string = "0x{0:x}".format(control_area.obj_offset)
if self._config.NAME and name != None:
fname = name.split("\\")
ca_offset_string += "." + fname[-1]
file_string = ".".join(["file", str(pid), ca_offset_string, DATA_EXT])
of_path = os.path.join(self._config.DUMP_DIR, file_string)
(mdata, zpad) = control_area.extract_ca_file(self._config.UNSAFE)
summaryinfo['name'] = name
summaryinfo['type'] = "DataSectionObject"
if pid:
summaryinfo['pid'] = int(pid)
else:
summaryinfo['pid'] = None
summaryinfo['present'] = mdata
summaryinfo['pad'] = zpad
summaryinfo['fobj'] = int(offset)
summaryinfo['ofpath'] = of_path
yield summaryinfo
# The SharedCacheMap is used to track views that are mapped to the
# data file stream. Each cached file has a single SHARED_CACHE_MAP object,
# which has pointers to slots in the system cache which contain views of the file.
# The shared cache map is used to describe the state of the cached file.
if self.filters and "SharedCacheMap" not in self.filters:
continue
if SharedCacheMap:
vacbary = []
summaryinfo = {}
#The SharedCacheMap member points to a SHARED_CACHE_MAP object.
shared_cache_map = SharedCacheMap.dereference_as('_SHARED_CACHE_MAP')
if shared_cache_map.obj_offset == 0x0:
continue
# Added a semantic check to make sure the data is in a sound state. It's better
# to catch it early.
if not shared_cache_map.is_valid():
continue
if not shared_cache_map.obj_offset in shared_maps:
shared_maps.append(shared_cache_map.obj_offset)
else:
continue
shared_cache_map_string = ".0x{0:x}".format(shared_cache_map.obj_offset)
if self._config.NAME and name != None:
fname = name.split("\\")
shared_cache_map_string = shared_cache_map_string + "." + fname[-1]
of_path = os.path.join(self._config.DUMP_DIR, "file." + str(pid) + shared_cache_map_string + ".vacb")
vacbary = shared_cache_map.extract_scm_file()
summaryinfo['name'] = name
summaryinfo['type'] = "SharedCacheMap"
if pid:
summaryinfo['pid'] = int(pid)
else:
summaryinfo['pid'] = None
summaryinfo['fobj'] = int(offset)
summaryinfo['ofpath'] = of_path
summaryinfo['vacbary'] = vacbary
yield summaryinfo
def render_text(self, outfd, data):
"""Renders output for the dumpfiles plugin.
This includes extracting the file artifacts from memory
to the specified dump directory.
Args:
outfd: The file descriptor to write the text to.
data: (summaryinfo)
"""
# Summary file object
summaryfo = None
summaryinfo = data
if self._config.DUMP_DIR == None:
debug.error("Please specify a dump directory (--dump-dir)")
if not os.path.isdir(self._config.DUMP_DIR):
debug.error(self._config.DUMP_DIR + " is not a directory")
if self._config.SUMMARY_FILE:
summaryfo = open(self._config.SUMMARY_FILE, 'wb')
for summaryinfo in data:
if summaryinfo['type'] == "DataSectionObject":
outfd.write("DataSectionObject {0:#010x} {1:<6} {2}\n".format(summaryinfo['fobj'], summaryinfo['pid'], summaryinfo['name']))
if len(summaryinfo['present']) == 0:
continue
of = open(summaryinfo['ofpath'], 'wb')
for mdata in summaryinfo['present']:
rdata = None
if not mdata[0]:
continue
try:
rdata = self.kaddr_space.base.read(mdata[0], mdata[2])
except (IOError, OverflowError):
debug.debug("IOError: Pid: {0} File: {1} PhysAddr: {2} Size: {3}".format(summaryinfo['pid'], summaryinfo['name'], mdata[0], mdata[2]))
if not rdata:
continue
of.seek(mdata[1])
of.write(rdata)
continue
# XXX Verify FileOffsets
#for zpad in summaryinfo['pad']:
# of.seek(zpad[0])
# of.write("\0" * zpad[1])
if self._config.SUMMARY_FILE:
json.dump(summaryinfo, summaryfo)
of.close()
elif summaryinfo['type'] == "ImageSectionObject":
outfd.write("ImageSectionObject {0:#010x} {1:<6} {2}\n".format(summaryinfo['fobj'], summaryinfo['pid'], summaryinfo['name']))
if len(summaryinfo['present']) == 0:
continue
of = open(summaryinfo['ofpath'], 'wb')
for mdata in summaryinfo['present']:
rdata = None
if not mdata[0]:
continue
try:
rdata = self.kaddr_space.base.read(mdata[0], mdata[2])
except (IOError, OverflowError):
debug.debug("IOError: Pid: {0} File: {1} PhysAddr: {2} Size: {3}".format(summaryinfo['pid'], summaryinfo['name'], mdata[0], mdata[2]))
if not rdata:
continue
of.seek(mdata[1])
of.write(rdata)
continue
# XXX Verify FileOffsets
#for zpad in summaryinfo['pad']:
# print "ZPAD 0x%x"%(zpad[0])
# of.seek(zpad[0])
# of.write("\0" * zpad[1])
if self._config.SUMMARY_FILE:
json.dump(summaryinfo, summaryfo)
of.close()
elif summaryinfo['type'] == "SharedCacheMap":
outfd.write("SharedCacheMap {0:#010x} {1:<6} {2}\n".format(summaryinfo['fobj'], summaryinfo['pid'], summaryinfo['name']))
of = open(summaryinfo['ofpath'], 'wb')
for vacb in summaryinfo['vacbary']:
if not vacb:
continue
(rdata, mdata, zpad) = self.audited_read_bytes(self.kaddr_space, vacb['baseaddr'], vacb['size'], True)
### We need to update the mdata,zpad
if rdata:
try:
of.seek(vacb['foffset'])
of.write(rdata)
except IOError:
# TODO: Handle things like write errors (not enough disk space, etc)
continue
vacb['present'] = mdata
vacb['pad'] = zpad
if self._config.SUMMARY_FILE:
json.dump(summaryinfo, summaryfo)
of.close()
else:
return
if self._config.SUMMARY_FILE:
summaryfo.close()
|
[
"493254599@qq.com"
] |
493254599@qq.com
|
605bd581f662bb2525fc236b7fb33a2086e737a5
|
8780bc7f252f14ff5406ce965733c099034920b7
|
/pyCode/MongoToMysql/ods_mongodb_enterprise.py
|
58d9f7ddcd5e4fab7187e43a6bedc2822c48e69c
|
[] |
no_license
|
13661892653/workspace
|
5e4e458d31b9355c67d67ba7d9faccbcc1ac9f6b
|
17960becabb3b4f0fc30009c71a11c4f7a5f8330
|
refs/heads/master
| 2020-12-24T20:00:15.541432
| 2018-08-14T13:56:15
| 2018-08-14T13:56:15
| 86,225,975
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,727
|
py
|
#coding=utf-8
#Version:python3.5.2
#Tools:Pycharm
#Date:
"""
数据仓库,处理Mongodb到Mysql的数据问题
"""
__author__ = "Colby"
import pymongo
import pymysql
#--------------------------数据库启动函数------------------------------
def start_MySQL():
conn = pymysql.connect(host='localhost', user='root', passwd='root', db='youboy', charset='utf8')
cur = conn.cursor()
myConn_list = [conn, cur]
print('success',myConn_list)
return myConn_list
#---------------------------------------------------------------------
#--------------------------关闭数据库--------------------------------
def close_MySQL(cur,conn):
cur.close()
conn.commit()
conn.close()
#------------------------------------------------------------------
if __name__ == "__main__":
client = pymongo.MongoClient('localhost', 27017)
TempleSpider = client['youboy']
enterprise_collect = TempleSpider['enterprise']
print('enterprise_collect',enterprise_collect)
myConn_list = start_MySQL()
cur = myConn_list[1]
conn = myConn_list[0]
sqli = "replace into ods_mongodb_enterprise(" \
"_id" \
",catagory_1_Name" \
",catagory_1_Url" \
",catagory_2_Name" \
",catagory_2_Url" \
",catagory_3_Name" \
",catagory_3_Url" \
",cityName,cityUrl" \
",contactPerson" \
",enterpriseAddr" \
",enterpriseFax" \
",enterpriseMobile" \
",enterpriseName" \
",enterprisePhone" \
",enterpriseUrl" \
",provinceName" \
",url) " \
"values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#print('sqli',sqli)
dataList=[]
for temple in enterprise_collect.find():
print(temple['_id'])
#print('temple',temple)
data=(str(temple['_id']),
temple['catagory_1_Name'],
temple['catagory_1_Url'],
temple['catagory_2_Name'],
temple['catagory_2_Url'],
temple['catagory_3_Name'],
temple['catagory_3_Url'],
temple['cityName'],
temple['cityUrl'],
temple['contactPerson'],
temple['enterpriseAddr'],
temple['enterpriseFax'],
temple['enterpriseMobile'],
temple['enterpriseName'],
temple['enterprisePhone'],
temple['enterpriseUrl'],
temple['provinceName'],
temple['url'])
dataList.append(data)
#print('dataList', dataList)
cur.executemany(sqli,dataList)
#conn.commit()
close_MySQL(cur, conn)
|
[
"470563152@qq.com"
] |
470563152@qq.com
|
3b5c3590e555db3f87a54ef3874064e2167e7ede
|
8c922f362fdb28782502eb91176e686df8142087
|
/src/novaposhta/views.py
|
1747e1c8e0bd35d9e525700883a05c24ebc09d3d
|
[] |
no_license
|
JaVood/pasta_family
|
94d981088b8183845e8f12b3fab4c99bd651e3bf
|
fc6698e388c5e4ac5a0c58a93a610cbf4000e58a
|
refs/heads/master
| 2020-05-02T12:35:02.403867
| 2019-03-27T09:37:51
| 2019-03-27T09:37:51
| 177,957,070
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 646
|
py
|
from django.http import JsonResponse, HttpResponse
from django.contrib.auth.decorators import login_required
from novaposhta.lib import search_warehouses, refresh_warehouses, refresh_areas, refresh_cities, refresh_all
@login_required
def refresh(request):
refresh_warehouses()
refresh_areas()
refresh_cities()
refresh_all()
return HttpResponse('Warehouses were successfully refreshed')
def autocomplete(request):
query = request.GET.get('query')
suggestions = [w.full_name for w in search_warehouses(query, limit=10)]
return JsonResponse({
'query': query,
'suggestions': suggestions
})
|
[
"javood@JaVood.local"
] |
javood@JaVood.local
|
f3b263e9f663d8a6408e7f735bb694cdad3097e4
|
8b9989ea9e96b20eecaf2132fff1a7d3ef22aad9
|
/Length_of_Last_Word.py
|
67668bc7c2fe7a5c6514a02be5f998d1d7f9d076
|
[] |
no_license
|
useyourfeelings/leetcode
|
421a113e3b46208e98b573f83b4518ad06251856
|
4847f7f2d50d82f56932491426e4a948687d82ea
|
refs/heads/master
| 2021-01-01T20:06:22.766922
| 2015-02-17T05:53:41
| 2015-02-17T05:53:41
| 30,801,531
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 254
|
py
|
class Solution:
# @param s, a string
# @return an integer
def lengthOfLastWord(self, s):
s = s.rstrip()
length = len(s)
if length == 0:
return 0
return len((s.split(' ')[-1].lstrip()))
|
[
"raidercodebear@gmail.com"
] |
raidercodebear@gmail.com
|
545da36839f6b07f36ef3b328c0fdcda8d0d1c9f
|
5d680ec506efe6b6e743fd3fb0ba7554a341028e
|
/andreasmusic/pitches.py
|
cb49af2170184da88293b5062e3c35449d562f27
|
[] |
no_license
|
andreasjansson/andreasmusic
|
200e0252fe3ca33b1e58dc5a1ccb460fbc061212
|
f3906a9588e066b6aceb271613fcf674b4b55890
|
refs/heads/master
| 2021-01-10T19:01:52.006316
| 2014-08-22T06:07:17
| 2014-08-22T06:07:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,867
|
py
|
import sys
import numpy as np
from collections import namedtuple
Note = namedtuple('Note', ['name', 'fq', 'midi_pitch'])
NOTE_NAMES = ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B']
ENHARMONIC_EQUIVALENTS = {
'C#': 'Db',
'Db': 'C#',
'D#': 'Eb',
'Eb': 'D#',
'E' : 'Fb',
'Fb': 'E',
'E#': 'F',
'F' : 'E#',
'F#': 'Gb',
'Gb': 'F#',
'G#': 'Ab',
'Ab': 'G#',
'A#': 'Bb',
'Bb': 'A#',
'B' : 'Cb',
'Cb': 'B',
'B#': 'C',
'C' : 'B#',
}
MIDI_FREQS = {}
def _setup():
for octave in range(0, 7):
for i, note_name in enumerate(NOTE_NAMES):
dist_from_a = (octave - 3) * 12 + i - 9
fq = 440 * np.power(2, dist_from_a / 12.0)
midi_pitch = (octave + 1) * 12 + i
MIDI_FREQS[midi_pitch] = fq
note_names = [note_name]
if note_name in ENHARMONIC_EQUIVALENTS:
note_names.append(ENHARMONIC_EQUIVALENTS[note_name])
for n in [note_name] + ([ENHARMONIC_EQUIVALENTS[note_name]]
if note_name in ENHARMONIC_EQUIVALENTS else []):
name = '%s%d' % (n, octave)
note = Note(name, fq, midi_pitch)
setattr(sys.modules[__name__], name.replace('#', '_'), note)
_setup()
class UnknownNote(Exception): pass
def note_number(note_name):
if note_name in NOTE_NAMES:
return NOTE_NAMES.index(note_name)
elif note_name in ENHARMONIC_EQUIVALENTS:
return NOTE_NAMES.index(ENHARMONIC_EQUIVALENTS[note_name])
raise UnknownNote(note_name)
def note_name(note_number):
if note_number < 0:
raise UnknownNote(note_number)
name = NOTE_NAMES[note_number % 12]
octave = int(note_number / 12)
return '%s%d' % (name, octave)
def pitch_to_freq(pitch):
return MIDI_FREQS[pitch]
|
[
"andreas.s.t.jansson@gmail.com"
] |
andreas.s.t.jansson@gmail.com
|
bc96466615bc972f927d839ba7be56459ff8f060
|
2ecfe901f9b955d9f1ce32c80d5342f345e7f986
|
/py3oauth2/tests/test_refreshtokengrant.py
|
c8e2350fafa78b73c78713d59cbcb6daaa4f1252
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
putyta/py3oauth2
|
731b6f6919be6b98703bcded0e0d2659fcc2020f
|
060dc6f896382ae74842126df011e92bb9fb7146
|
refs/heads/master
| 2021-01-14T08:03:52.853376
| 2014-12-03T07:26:03
| 2014-12-03T07:26:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,233
|
py
|
# -*- coding: utf-8 -*-
import contextlib
import uuid
from examples.models import (
Owner,
Store,
)
from py3oauth2.refreshtokengrant import RefreshTokenRequest
from py3oauth2.tests import (
BlindAuthorizationProvider,
mock,
TestBase,
)
class TestRefreshTokenRequest(TestBase):
def setUp(self):
self.store = Store()
self.client = self.make_client()
self.store.persist_client(self.client)
self.owner = Owner(str(uuid.uuid4()))
self.access_token = self.store.issue_access_token(self.client,
self.owner,
{'view', 'write'})
def test_answer_access_denied(self):
from py3oauth2.errors import AccessDenied
req = RefreshTokenRequest()
req.update({
'grant_type': 'refresh_token',
'refresh_token': 'unknown_refresh_token',
})
provider = BlindAuthorizationProvider(self.store)
with self.assertRaises(AccessDenied):
req.answer(provider, self.owner)
def test_answer_unauthorized_client(self):
from py3oauth2.errors import UnauthorizedClient
from py3oauth2.provider import AuthorizationProvider
req = RefreshTokenRequest()
req.update({
'grant_type': 'refresh_token',
'refresh_token': self.access_token.get_refresh_token(),
})
provider = AuthorizationProvider(self.store)
with contextlib.ExitStack() as stack:
stack.enter_context(mock.patch.object(provider, 'authorize_client',
return_value=False))
stack.enter_context(self.assertRaises(UnauthorizedClient))
req.answer(provider, self.owner)
def test_answer_store_raises_error_exception(self):
from py3oauth2.errors import AccessDenied
req = RefreshTokenRequest()
req.update({
'grant_type': 'refresh_token',
'refresh_token': self.access_token.get_refresh_token(),
})
self.store.issue_access_token = mock.Mock(side_effect=AccessDenied())
provider = BlindAuthorizationProvider(self.store)
try:
req.answer(provider, self.owner)
except AccessDenied as why:
self.assertIs(why.request, req)
else:
self.fail()
def test_answer(self):
req = RefreshTokenRequest()
req.update({
'grant_type': 'refresh_token',
'refresh_token': self.access_token.get_refresh_token(),
})
provider = BlindAuthorizationProvider(self.store)
resp = req.answer(provider, self.owner)
self.assertIsInstance(resp, req.response)
token = self.store.get_access_token(resp.access_token)
self.assertIsNotNone(token)
self.assertEqual(resp.token_type, token.get_type())
self.assertEqual(resp.expires_in, token.get_expires_in())
self.assertEqual(provider.normalize_scope(resp.scope),
token.get_scope())
def test_answer_invalid_scope_1(self):
from py3oauth2.errors import InvalidScope
provider = BlindAuthorizationProvider(self.store)
req = RefreshTokenRequest()
req.update({
'grant_type': 'refresh_token',
'refresh_token': self.access_token.get_refresh_token(),
'scope': 'view write admin',
})
with self.assertRaises(InvalidScope):
req.answer(provider, self.owner)
def test_answer_invalid_scope_2(self):
from py3oauth2.errors import InvalidScope
access_token = self.store.issue_access_token(self.client,
self.owner,
{'write'})
provider = BlindAuthorizationProvider(self.store)
req = RefreshTokenRequest()
req.update({
'grant_type': 'refresh_token',
'refresh_token': access_token.get_refresh_token(),
'scope': 'view',
})
with self.assertRaises(InvalidScope):
req.answer(provider, self.owner)
|
[
"kohei.yoshida@gehirn.co.jp"
] |
kohei.yoshida@gehirn.co.jp
|
745923603c9c69d1a0fb4c0aaea41d9fd536dda3
|
8dbf1dd411a4f1b4c9b3c6c5a5cdbe40404aa3ae
|
/polls/migrations/0001_initial.py
|
d1e1b2ee96c7de3f72901a3820f02cd7e6f30587
|
[
"BSD-3-Clause"
] |
permissive
|
marcaurele/debian-packaging-for-django
|
31ac3005cd820979a472827ad2422ec365b7bf83
|
b0b8431f801a8cc5f068a6570d30939d40d333e7
|
refs/heads/master
| 2021-08-23T09:10:58.761508
| 2017-12-04T12:50:02
| 2017-12-04T12:50:02
| 112,287,266
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,230
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-27 16:43
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Question'),
),
]
|
[
"m@brothier.org"
] |
m@brothier.org
|
48b9d26fb186dac78386c6dfe0f315b7e5cdd08d
|
662232c0bd3aa31dd0f80b2a07c7d6342b672e68
|
/src/image_crawler.py
|
64b6684fe3f0a970f4047af8df08f1022199fcb9
|
[
"MIT"
] |
permissive
|
quqixun/ImageCrawler
|
4373c01b6afc79d5163f385e1c17d25df6a7e456
|
98205ae74f5e32ac90a04b902c0f49eb165d5a63
|
refs/heads/master
| 2022-12-16T00:02:02.000676
| 2021-03-19T08:02:53
| 2021-03-19T08:02:53
| 164,621,794
| 25
| 7
|
MIT
| 2022-12-07T03:03:38
| 2019-01-08T10:13:11
|
Python
|
UTF-8
|
Python
| false
| false
| 3,285
|
py
|
import os
import ssl
import json
import time
import pandas as pd
from tqdm import *
from selenium import webdriver
class ImageCrawler(object):
SETTINGS = {
'baidu': {'url': 'https://image.baidu.com/search/index?tn=baiduimage&word=${KEYWORD}',
'see_more': None,
'xpath': '//div[@id="imgContainer"]//li[@class="imgitem"]',
'item': 'data-objurl',
'item_attr': None},
'bing': {'url': 'https://www.bing.com/images/search?q=${KEYWORD}',
'see_more': '//div[@class="mm_seemore"]/a[@class="btn_seemore"]',
'xpath': '//div[@class="imgpt"]/a[@class="iusc"]',
'item': 'm',
'item_attr': 'murl'},
'google': {'url': 'https://www.google.com.hk/search?q=${KEYWORD}&source=lnms&tbm=isch',
'see_more': '//*[@id="smb"]',
'xpath': '//div[contains(@class,"rg_meta")]',
'item': 'innerHTML',
'item_attr': 'ou'}
}
def __init__(self, engine='google'):
self.engine = engine
self.url = self.SETTINGS[engine]['url']
self.see_more = self.SETTINGS[engine]['see_more']
self.xpath = self.SETTINGS[engine]['xpath']
self.item = self.SETTINGS[engine]['item']
self.item_attr = self.SETTINGS[engine]['item_attr']
self.image_links = set()
self._init_ssl()
return
def run(self, keyword, n_scroll):
self.n_scroll = n_scroll
self.keyword = keyword
print('Searching keyword: ', keyword)
print('Searching engine: ', self.engine)
self._generate_links()
print()
return
def save_links(self, save_dir, file_name):
self._create_dir(save_dir)
links_file = os.path.join(save_dir, file_name)
links_df = pd.DataFrame(data=list(self.image_links),
columns=['links'])
links_df.to_csv(links_file, index=False)
return
def _init_ssl(self):
ssl._create_default_https_context = \
ssl._create_unverified_context()
def _generate_links(self):
browser_driver = webdriver.Chrome()
browser_driver.get(self.url.replace('${KEYWORD}', self.keyword))
for _ in tqdm(range(self.n_scroll), ncols=70):
browser_driver.execute_script('window.scrollBy(0, 1000000)')
time.sleep(1)
if self.see_more is not None:
try:
browser_driver.find_element_by_xpath(self.see_more).click()
except Exception as e:
print('Error:', str(e))
image_blocks = browser_driver.find_elements_by_xpath(self.xpath)
for image_block in image_blocks:
image_link = image_block.get_attribute(self.item)
if self.item_attr is not None:
try:
image_link = json.loads(image_link)[self.item_attr]
except Exception as e:
print('Error:', str(e))
self.image_links.add(image_link)
browser_driver.quit()
return
def _create_dir(self, dir_path):
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
|
[
"quqixun@icarbonx.com"
] |
quqixun@icarbonx.com
|
9710326985b7fbf02113f33f4cf94e5cdee47478
|
3c5d86d087ce526ac3456f2fa443c71cc79f0e35
|
/qa/rpc-tests/fundrawtransaction-hd.py
|
c844e7b3ce3d68c3afd1a094aca48c31ffbb0b23
|
[
"MIT"
] |
permissive
|
perfectblockchain/coin-core
|
937577b2836fb032283fa5e90f0dac9abf524f9d
|
b79deef27798fb51f0140541f2493807efa893b2
|
refs/heads/master
| 2020-03-25T04:34:54.900739
| 2018-09-12T10:44:52
| 2018-09-12T10:44:52
| 143,402,539
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25,445
|
py
|
#!/usr/bin/env python2
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class RawTransactionsTest(BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self, split=False):
self.nodes = start_nodes(4, self.options.tmpdir, [['-usehd=1'], ['-usehd=1'], ['-usehd=1'], ['-usehd=1']])
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
def run_test(self):
print "Mining blocks..."
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"]
watchonly_amount = Decimal(2000)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 15)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 50)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enought inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 22 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 26 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 26, self.nodes[1].getnewaddress() : 25 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 50:
utx = aUtx
break
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 50:
utx = aUtx
break
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(50) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 10:
utx = aUtx
break
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = False
utx2 = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 10:
utx = aUtx
if aUtx['amount'] == 50:
utx2 = aUtx
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 60 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = False
utx2 = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 10:
utx = aUtx
if aUtx['amount'] == 50:
utx2 = aUtx
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 60, self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
listunspent = self.nodes[2].listunspent()
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 10}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
try:
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
raise AssertionError("Spent more than available")
except JSONRPCException as e:
assert("Insufficient" in e.error['message'])
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():11,self.nodes[1].getnewaddress():12,self.nodes[1].getnewaddress():1,self.nodes[1].getnewaddress():13,self.nodes[1].getnewaddress():2,self.nodes[1].getnewaddress():3}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
inputs = []
outputs = {mSigObj:11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
addr3Obj = self.nodes[1].validateaddress(addr3)
addr4Obj = self.nodes[1].validateaddress(addr4)
addr5Obj = self.nodes[1].validateaddress(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])
inputs = []
outputs = {mSigObj:11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
# send 12 RIZ to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 12)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():11}
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawTx)
signedTx = self.nodes[2].signrawtransaction(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('11.0000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.nodes[1].encryptwallet("test")
self.nodes.pop(1)
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(4, self.options.tmpdir, [['-usehd=1'], ['-usehd=1'], ['-usehd=1'], ['-usehd=1']])
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
# drain the keypool
self.nodes[1].getnewaddress()
self.nodes[1].getrawchangeaddress()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
try:
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
raise AssertionError("Wallet unlocked without passphrase")
except JSONRPCException as e:
assert('Keypool ran out' in e.error['message'])
#refill the keypool
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].keypoolrefill(2) #need to refill the keypool to get an internal change address
self.nodes[1].walletlock()
try:
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 12)
raise AssertionError("Wallet unlocked without passphrase")
except JSONRPCException as e:
assert('walletpassphrase' in e.error['message'])
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():11}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 100)
signedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('511.0000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
fundedAndSignedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('500.19000000'), self.nodes[0].getbalance()) #0.19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert("fee" in result.keys())
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransaction(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransaction(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
if __name__ == '__main__':
RawTransactionsTest().main()
|
[
"coinbuildhelp@gmail.com"
] |
coinbuildhelp@gmail.com
|
6b93d035eb85106993e327a9d8ba4d5830bc4b5d
|
6ab3d02c6b5426cd122b3d3c7b31faee7ea917d4
|
/hashmap_uncommonChar.py
|
16b0e41f244203ec15510a06985977b9d8972898
|
[] |
no_license
|
AishwaryalakshmiSureshKumar/DS-Algo
|
e54967ed24c641059fe15b286359f1b71141eeff
|
a624b29182c92b5fa8017aae597eb4ad2475deae
|
refs/heads/main
| 2023-04-21T17:17:10.342833
| 2021-04-18T18:03:57
| 2021-04-18T18:03:57
| 356,888,335
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,148
|
py
|
#code
def uncommonChar(str1, str2):
ll = []
for i in str1:
if i not in str2 and i not in ll:
ll.append(i)
for i in str2:
if i not in str1 and i not in ll:
ll.append(i)
ll = sorted(ll)
for i in ll:
print(i,end='')
'''MAX_CHAR = 26
def uncommonChar(str1, str2):
result = [0]*MAX_CHAR
for i in range(0, MAX_CHAR):
result[i] = 0
l1 = len(str1)
l2 = len(str2)
for i in range(l1):
result[ord(str1[i])-ord('a')]=1
for i in range(l2):
if result[ord(str2[i])-ord('a')]==1 or result[ord(str2[i])-ord('a')]==-1:
result[ord(str2[i])-ord('a')]=-1
else:
result[ord(str2[i])-ord('a')]=2
for i in range(0, MAX_CHAR):
if result[i]==1 or result[i]==2:
print(chr(i + ord('a')), end=' ')'''
case = int(input())
for i in range(case):
str1 = str(input())
str2 = str(input())
uncommonChar(str1, str2)
|
[
"noreply@github.com"
] |
AishwaryalakshmiSureshKumar.noreply@github.com
|
50c787eff65d485ac370b05518c45b8ed17ebf5f
|
9e103392e152873fcad9e3d0f0c18ca9507a0ddb
|
/accounts/admin.py
|
b5f060489948390e62ac6e006dd83507575c5a5d
|
[] |
no_license
|
do324/instaclone
|
9911a300809df1d4d7acbdf69902310b4829ccc4
|
2e07cd68d0849b8d27cc3b46917a2930ec56bd36
|
refs/heads/master
| 2022-05-28T17:45:13.398274
| 2020-05-06T08:46:15
| 2020-05-06T08:46:15
| 260,907,845
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 591
|
py
|
from django.contrib import admin
from .models import Profile, Follow
# Register your models here.
class FollowInline(admin.TabularInline):
model = Follow
fk_name = 'from_user'
@admin.register(Profile)
class ProfileAdmin(admin.ModelAdmin):
list_display = ['id', 'nickname', 'user']
list_display_links = ['nickname', 'user']
search_fields = ['nickname']
inlines = [FollowInline,]
@admin.register(Follow)
class FollowAdmin(admin.ModelAdmin):
list_display = ['from_user', 'to_user', 'created_at']
list_display_links = ['from_user', 'to_user', 'created_at']
|
[
"doyun5114@gmail.com"
] |
doyun5114@gmail.com
|
fb1355a16b36ac4d9c03852ba9890a86a3dc94af
|
7fe6407014fcfbab69c2ef6a56e6864227f66e2c
|
/Game.py
|
7faab7b0b0dd6a6b0ffc3456dbba01712fc8f560
|
[
"MIT"
] |
permissive
|
enosal/jubal-pygame
|
3951effc18f723052824d6d44f78163c29c5a903
|
331bf2be2f3e1165653be8a4dc02fbf0c6277da3
|
refs/heads/master
| 2021-01-17T16:15:03.037529
| 2014-04-12T04:18:59
| 2014-04-12T04:18:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,483
|
py
|
#!/usr/bin/env python
import pygame, sys, pyganim, os
from pygame.locals import *
from Player import Player
from Input import Input
from Tile import Tile
pygame.init()
FPS = 30 # frames per second settings
fpsClock = pygame.time.Clock()
DURATION = 0.1
# Screen size
SCREEN_X=400
SCREEN_Y=400
# This is the length of the sprite
LEN_SPRT_X=64
LEN_SPRT_Y=64
# This is where the sprite is found on the sheet
SPRT_RECT_X=0
SPRT_RECT_Y=LEN_SPRT_Y
def main():
# Determine assets
sprite_asset, bullet_sound_asset = DetermineAssets()
# Load sprite assets
IMAGESDICT, animObjs = LoadSpriteAssets(sprite_asset)
# Main game surface
DISPLAYSURF = pygame.display.set_mode((SCREEN_X, SCREEN_Y)) #Make the screen
# Colors
BLACK = (0,0,0)
# Calculate starting position of player
startX = SCREEN_X - LEN_SPRT_X
startY = SCREEN_Y - LEN_SPRT_Y
# Hold info on keys pressed, held, released
keyinput = Input()
# Initialize gamemap and Player
player = Player(IMAGESDICT, animObjs, bullet_sound_asset)
player.rect.topleft = startX, startY
# Add tiles
startx = 0
starty = SCREEN_Y - LEN_SPRT_Y/2
tile = Tile(IMAGESDICT['ground'])
tile.rect.topleft = startx, starty
# Sprite Groups
allsprites = pygame.sprite.RenderPlain(player)
environment = pygame.sprite.RenderPlain(tile)
# Start game loop
while True:
# Clear key info
keyinput.clearKeys()
# Draw screen black
DISPLAYSURF.fill(BLACK)
# Check for game events
for event in pygame.event.get():
# Reset player direction
if event.type == QUIT:
pygame.quit()
sys.exit()
elif event.type == KEYDOWN:
# Handle key presses
keyinput.keyDownEvent(event.key)
elif event.type == KEYUP:
keyinput.keyUpEvent(event.key)
# Player horizontal logic
if keyinput.isKeyHeld(K_LEFT) and keyinput.isKeyHeld(K_RIGHT):
player.stopMoving()
elif keyinput.isKeyHeld(K_LEFT):
player.moveLeft()
elif keyinput.isKeyHeld(K_RIGHT):
player.moveRight()
elif keyinput.wasKeyPressed(K_SPACE):
# Play player shooting animation
player.shoot()
elif keyinput.wasKeyPressed(K_ESCAPE):
pygame.quit()
sys.exit()
elif not player.shooting:
player.stopMoving()
# Vertical logic
if keyinput.wasKeyPressed(K_UP):
player.jump()
# Check for collisions between player and environment
collision_list = pygame.sprite.spritecollide(player, environment, False)
# Update
allsprites.update()
environment.update()
# Draw
allsprites.draw(DISPLAYSURF)
environment.draw(DISPLAYSURF)
pygame.display.update()
fpsClock.tick(FPS)
# Determines what filesystem accessor to use and retreives graphic and sound assets
def DetermineAssets():
# Find out if in Windows or Unix/Linux then load SpriteSheet
if os.path.isfile('assets\\jubal_64.png'):
SHEET = pygame.image.load('assets\\jubal_64.png')
bullet_sound = pygame.mixer.Sound("assets\\bullet.wav")
elif os.path.isfile('assets//jubal_64.png'):
SHEET = pygame.image.load('assets//jubal_64.png')
bullet_sound = pygame.mixer.Sound("assets//bullet.wav")
return SHEET, bullet_sound
def LoadSpriteAssets(SHEET):
# Global dictionary that contains all static images
IMAGESDICT = {
'j_normal': SHEET.subsurface(pygame.Rect(0, 0, LEN_SPRT_X, LEN_SPRT_Y)),
'j_rightface': SHEET.subsurface(pygame.Rect(SPRT_RECT_X, SPRT_RECT_Y, LEN_SPRT_X, LEN_SPRT_Y)),
'j_leftface': SHEET.subsurface(pygame.Rect(SPRT_RECT_X+(LEN_SPRT_X*5), SPRT_RECT_Y, LEN_SPRT_X, LEN_SPRT_Y)),
'bullet': SHEET.subsurface(pygame.Rect(SPRT_RECT_X+(LEN_SPRT_X*8), SPRT_RECT_Y*3, 2, 2)),
'ground': SHEET.subsurface(pygame.Rect(LEN_SPRT_X*4, SPRT_RECT_Y*5, LEN_SPRT_X, LEN_SPRT_Y/2)),
}
# Define the different animation types
animTypes = 'right_walk left_walk shoot_right shoot_left jump_right jump_left right_face left_face normal'.split()
# These tuples contain (base_x, base_y, numOfFrames)
# numOfFrames is in the x-direction
animTypesInfo = {
'right_walk': (SPRT_RECT_X+LEN_SPRT_X, SPRT_RECT_Y, 4),
'left_walk': (SPRT_RECT_X+(LEN_SPRT_X*6), SPRT_RECT_Y, 4),
'shoot_right': (LEN_SPRT_X, 0, 4),
'shoot_left': (LEN_SPRT_X*5, 0, 4),
'jump_right': (0, LEN_SPRT_Y*2, 7),
'jump_left': (0, LEN_SPRT_Y*3, 7),
'normal': (0, 0, 1),
'right_face': (0, LEN_SPRT_Y, 1),
'left_face': (0, LEN_SPRT_Y*3, 1)
}
animObjs = {}
for animType in animTypes:
xbase = (animTypesInfo[animType])[0]
ybase = (animTypesInfo[animType])[1]
numFrames = (animTypesInfo[animType])[2]
imagesAndDurations = [(SHEET.subsurface(pygame.Rect(xbase+(LEN_SPRT_X*num), ybase, LEN_SPRT_X, LEN_SPRT_Y)), DURATION) for num in range(numFrames)]
loopforever = True
if(animType == 'shoot_right' or animType == 'shoot_left'):
loopforever = False
animObjs[animType] = pyganim.PygAnimation(imagesAndDurations, loop=loopforever)
return IMAGESDICT, animObjs
if __name__ == "__main__":
main()
|
[
"aavina2@gmail.com"
] |
aavina2@gmail.com
|
789f0bc4c608fc5a4c4e46b52cb7634a18ec644f
|
fee6bb5e775c41d7c9e820a10ba785c526cb5fbf
|
/PythonAPI/CrudApp/serializers.py
|
69405f83dda5afbe713496488fb7a7276f2b56dd
|
[] |
no_license
|
jaovw/pbl7
|
c6d898d06086fc506f9cfa984a06e226a4ad4269
|
76b9541246de5f7c9e27cd8f0e1500d6d86f6d14
|
refs/heads/main
| 2023-08-30T00:32:05.254036
| 2021-11-02T22:27:54
| 2021-11-02T22:27:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 600
|
py
|
from django.db.models import fields
from django.db.models.base import Model
from rest_framework import serializers
from CrudApp.models import Editora, EditoraLivro, Livro
class EditoraSerializer(serializers.ModelSerializer):
class Meta:
model = Editora
fields = ('EditoraId', 'NomeEditora', 'LocalEditora')
class LivroSerializer(serializers.ModelSerializer):
class Meta:
model = Livro
fields = ('LivroId')
class EditoraLivroSerializer(serializers.ModelSerializer):
class Meta:
model = EditoraLivro
fields = ('Id_Editora', 'Id_Livro')
|
[
"gabriel_fsbs2@hotmail.com"
] |
gabriel_fsbs2@hotmail.com
|
3ec85d43915036c75522d11ebe231715c5e0a19c
|
b08a5bb3f0b570236774a85230533532c0343389
|
/swami-control/usr/lib/python2.7/dist-packages/swami_startupapps/swami_startupapps.py
|
527df86ffbc8f7596ea1a21e755a53da5be0af52
|
[] |
no_license
|
BodhiDev/bodhi5packages
|
007d262b9367f698159ae41fe4ba8e4fa4a0e3ce
|
2581afa2dcf7145fc683cb5275c2a012b9c687ac
|
refs/heads/master
| 2022-10-08T20:28:10.470593
| 2022-09-23T05:36:54
| 2022-09-23T05:36:54
| 134,073,827
| 1
| 3
| null | 2021-03-31T11:42:21
| 2018-05-19T15:24:05
|
Python
|
UTF-8
|
Python
| false
| false
| 11,254
|
py
|
#Moksha startup applications module for the Swami Control Panel
import os
from efl.evas import EVAS_HINT_EXPAND, EVAS_HINT_FILL
from efl import elementary
from efl.elementary.button import Button
from efl.elementary.box import Box
from efl.elementary.entry import Entry
from efl.elementary.icon import Icon
from efl.elementary.image import Image
from efl.elementary.list import List, ListItem
from efl.elementary.frame import Frame
from efl.elementary.flip import Flip, ELM_FLIP_ROTATE_YZ_CENTER_AXIS
from efl.elementary.popup import Popup
from elmextensions import StandardButton, SearchableList
EXPAND_BOTH = EVAS_HINT_EXPAND, EVAS_HINT_EXPAND
EXPAND_HORIZ = EVAS_HINT_EXPAND, 0.0
FILL_BOTH = EVAS_HINT_FILL, EVAS_HINT_FILL
FILL_HORIZ = EVAS_HINT_FILL, 0.5
ALIGN_CENTER = 0.5, 0.5
UserHome = os.path.expanduser("~")
StartupApplicationsFile = "%s/.e/e/applications/startup/.order"%UserHome
StartupCommandsFile = "%s/.e/e/applications/startup/startupcommands"%UserHome
ApplicationPaths = [ "/usr/share/applications/",
"%s/.local/share/applications/"%UserHome]
class SwamiModule(Box):
def __init__(self, rent):
Box.__init__(self, rent)
self.parent = rent
#This appears on the button in the main swmai window
self.name = "Startup Applications"
#The section in the main window the button is added to
self.section = "Applications"
#Search terms that this module should appear for
self.searchData = ["startup", "command", "applications", "apps"]
#Command line argument to open this module directly
self.launchArg = "--startupapps"
#Should be none by default. This value is used internally by swami
self.button = None
self.icon = Icon(self, size_hint_weight=EXPAND_BOTH, size_hint_align=FILL_BOTH)
#Use FDO icons -> http://standards.freedesktop.org/icon-naming-spec/latest/ar01s04.html
self.icon.standard_set('system-run')
self.icon.show()
self.mainBox = Box(self, size_hint_weight=EXPAND_BOTH, size_hint_align=FILL_BOTH)
self.mainBox.show()
buttonBox = Box(self, size_hint_weight = EXPAND_HORIZ, size_hint_align = FILL_BOTH)
buttonBox.horizontal = True
buttonApply = StandardButton(self, "Apply", "ok", self.applyPressed)
buttonApply.show()
buttonFlip = StandardButton(self, "Startup Commands", "preferences-system", self.flipPressed)
buttonFlip.show()
buttonReturn = StandardButton(self, "Back", "go-previous", self.returnPressed)
buttonReturn.show()
buttonBox.pack_end(buttonApply)
buttonBox.pack_end(buttonFlip)
buttonBox.pack_end(buttonReturn)
buttonBox.show()
startupApplications = []
with open(StartupApplicationsFile) as startupFile:
for line in startupFile:
startupApplications.append(line.rstrip())
desktopFiles = []
for ourPath in ApplicationPaths:
desktopFiles += [os.path.join(dp, f) for dp, dn, filenames in os.walk(ourPath) for f in filenames if os.path.splitext(f)[1] == '.desktop']
self.startupList = startupList = List(self, size_hint_weight=EXPAND_BOTH, size_hint_align=FILL_BOTH)
self.applicationsList = applicationsList = SearchableList(self, size_hint_weight=EXPAND_BOTH, size_hint_align=FILL_BOTH)
startupToAdd = []
applicationsToAdd = []
for d in desktopFiles:
try:
with open(d) as desktopFile:
fileName = d.split("/")[-1]
icon = None
for line in desktopFile:
if line[:5] == "Name=":
name = line[5:][:-1]
if line[:5] == "Icon=":
icon = line[5:].strip()
try:
iconObj = Icon(self, standard=icon, size_hint_weight=EXPAND_BOTH, size_hint_align=FILL_BOTH)
except:
iconObj = Icon(self, standard="preferences-system", size_hint_weight=EXPAND_BOTH, size_hint_align=FILL_BOTH)
icon = None
if fileName in startupApplications:
startupToAdd.append([name, iconObj, fileName, icon])
else:
applicationsToAdd.append([name, iconObj, fileName, icon])
except IOError:
print('Warning Startup Apps: Unable to open {}'.format(d))
startupToAdd.sort()
applicationsToAdd.sort()
for s in startupToAdd:
ourItem = startupList.item_append(s[0], s[1])
ourItem.data["file"] = s[2]
ourItem.data["icon"] = s[3]
#ourItem.append_to(startupList)
#startupList.item_append(ourItem)
for a in applicationsToAdd:
ourItem = applicationsList.item_append(a[0], a[1])
ourItem.data["file"] = a[2]
ourItem.data["icon"] = a[3]
#ourItem.append_to(applicationsList.ourList)
#applicationsList.item_append(a[0], a[1])
startupList.callback_clicked_double_add(self.startupAppRemove)
applicationsList.callback_clicked_double_add(self.startupAppAdd)
startupList.go()
startupList.show()
applicationsList.show()
startupFrame = Frame(self, size_hint_weight = EXPAND_BOTH, size_hint_align=FILL_BOTH)
startupFrame.text = "Startup Applications"
startupFrame.content_set(startupList)
startupFrame.show()
otherFrame = Frame(self, size_hint_weight = EXPAND_BOTH, size_hint_align=FILL_BOTH)
otherFrame.text = "Other Applications"
otherFrame.content_set(applicationsList)
otherFrame.show()
self.mainBox.pack_end(startupFrame)
self.mainBox.pack_end(otherFrame)
self.backBox = Box(self, size_hint_weight=EXPAND_BOTH, size_hint_align=FILL_BOTH)
self.backBox.show()
self.commandsList = commandsList = List(self, size_hint_weight=EXPAND_BOTH, size_hint_align=FILL_BOTH)
with open(StartupCommandsFile) as scf:
for line in scf:
if line.rstrip()[-3:] == "| \\":
commandsList.item_append(line.rstrip()[:-3])
else:
commandsList.item_append(line.rstrip())
commandsList.callback_clicked_right_add(self.commandRightClicked)
commandsList.go()
commandsList.show()
commandBox = Box(self, size_hint_weight=EXPAND_HORIZ, size_hint_align=(1, 0.5))
commandBox.horizontal = True
commandBox.show()
self.newCommandEntry = newCommandEntry = Entry(self, size_hint_weight = EXPAND_HORIZ, size_hint_align = FILL_BOTH)
newCommandEntry.single_line = True
newCommandEntry.text = "<i>Type command here</i>"
newCommandEntry.data["default text"] = True
newCommandEntry.callback_clicked_add(self.entryClicked)
newCommandEntry.show()
newCommandButton = StandardButton(self, "Add Command", "add", self.newCmdPressed)
newCommandButton.show()
delCommandButton = StandardButton(self, "Delete Command", "exit", self.delCmdPressed)
delCommandButton.show()
commandBox.pack_end(newCommandButton)
commandBox.pack_end(delCommandButton)
newCommandFrame = Frame(self, size_hint_weight = EXPAND_HORIZ, size_hint_align = FILL_BOTH)
newCommandFrame.text = "Add Startup Command:"
newCommandFrame.content_set(newCommandEntry)
newCommandFrame.show()
self.backBox.pack_end(commandsList)
self.backBox.pack_end(newCommandFrame)
self.backBox.pack_end(commandBox)
self.flip = Flip(self, size_hint_weight=EXPAND_BOTH,
size_hint_align=FILL_BOTH)
self.flip.part_content_set("front", self.mainBox)
self.flip.part_content_set("back", self.backBox)
self.flip.show()
self.pack_end(self.flip)
self.pack_end(buttonBox)
def startupAppRemove(self, lst, itm):
text = itm.text
dataFile = itm.data["file"]
dataIcon = itm.data["icon"]
itm.delete()
if dataIcon:
iconObj = Icon(self, standard=dataIcon, size_hint_weight=EXPAND_BOTH, size_hint_align=FILL_BOTH)
else:
iconObj = Icon(self, standard="preferences-system", size_hint_weight=EXPAND_BOTH, size_hint_align=FILL_BOTH)
ourItem = self.applicationsList.item_append(text, iconObj)
ourItem.data["file"] = dataFile
ourItem.data["icon"] = dataIcon
self.applicationsList.ourList.go()
def startupAppAdd(self, lst, itm):
text = itm.text
dataFile = itm.data["file"]
dataIcon = itm.data["icon"]
itm.delete()
if dataIcon:
iconObj = Icon(self, standard=dataIcon, size_hint_weight=EXPAND_BOTH, size_hint_align=FILL_BOTH)
else:
iconObj = Icon(self, standard="preferences-system", size_hint_weight=EXPAND_BOTH, size_hint_align=FILL_BOTH)
ourItem = self.startupList.item_append(text, iconObj)
ourItem.data["file"] = dataFile
ourItem.data["icon"] = dataIcon
self.startupList.go()
def flipPressed(self, btn):
if btn.text == "Startup Commands":
btn.text = "Startup Applications"
else:
btn.text = "Startup Commands"
self.flip.go(ELM_FLIP_ROTATE_YZ_CENTER_AXIS)
def commandRightClicked(self, lst, itm):
self.delCmdMenu.move(1, 1)
self.delCmdMenu.show()
def entryClicked(self, entry):
if entry.data["default text"]:
entry.data["default text"] = False
entry.text = ""
def newCmdPressed(self, btn):
self.commandsList.item_append(self.newCommandEntry.text)
self.newCommandEntry.text = ""
self.commandsList.go()
def delCmdPressed(self, btn):
selectedCommand = self.commandsList.selected_item_get()
selectedCommand.delete()
def applyPressed(self, btn):
with open(StartupApplicationsFile, 'w') as saf:
for i in self.startupList.items_get():
saf.write(i.data["file"])
saf.write("\n")
with open(StartupCommandsFile, 'w') as scf:
lastI = self.commandsList.last_item_get()
for i in self.commandsList.items_get():
if i != lastI:
scf.write(i.text + " | \\ \n")
else:
scf.write(i.text)
p = Popup(self, size_hint_weight=EXPAND_BOTH, timeout=3.0)
p.text = "Changes Successfully Applied"
p.show()
def returnPressed(self, btn):
self.parent.returnMain()
|
[
"ylee@bodhilinux.com"
] |
ylee@bodhilinux.com
|
db782fe99f4bfbce673138a8c29e635dd5b552ad
|
d24a6e0be809ae3af8bc8daa6dacfc1789d38a84
|
/ABC/ABC251-300/ABC290/F.py
|
c84a613c4d9104d5bde110858967956466e6c904
|
[] |
no_license
|
k-harada/AtCoder
|
5d8004ce41c5fc6ad6ef90480ef847eaddeea179
|
02b0a6c92a05c6858b87cb22623ce877c1039f8f
|
refs/heads/master
| 2023-08-21T18:55:53.644331
| 2023-08-05T14:21:25
| 2023-08-05T14:21:25
| 184,904,794
| 9
| 0
| null | 2023-05-22T16:29:18
| 2019-05-04T14:24:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,140
|
py
|
MOD = 998244353
def solve(t, case_list):
n = 2 * max(case_list)
factorial = [1] * (n + 1)
factorial_inv = [1] * (n + 1)
for i in range(1, n + 1):
factorial[i] = (factorial[i - 1] * i) % MOD
factorial_inv[-1] = pow(factorial[-1], MOD - 2, MOD)
for i in range(n, 0, -1):
factorial_inv[i - 1] = (factorial_inv[i] * i) % MOD
res_list = []
for k in case_list:
if k >= 3:
res = factorial[2 * k - 3] * factorial_inv[k - 1] * factorial_inv[k - 2]
res %= MOD
res += k * factorial[2 * k - 4] * factorial_inv[k - 1] * factorial_inv[k - 3]
res %= MOD
res_list.append(res)
else:
res_list.append(1)
# print(res_list)
return res_list
def main():
t = int(input())
case_list = [int(input()) for _ in range(t)]
res = solve(t, case_list)
for r in res:
print(r)
def test():
assert solve(10, [2, 3, 5, 8, 13, 21, 34, 55, 89, 144]) == [
1, 6, 110, 8052, 9758476, 421903645, 377386885, 881422708, 120024839, 351256142
]
if __name__ == "__main__":
test()
main()
|
[
"cashfeg@gmail.com"
] |
cashfeg@gmail.com
|
09ada11b6a8f3cef5e9f809a0c7cac1bf266055b
|
859e3c2582d38d4bf76363f7695b6003513707ed
|
/Alphabet_Rangoli.py
|
7d3b575c2a6d0c11e5c6880cabf17dfc28caa5a5
|
[] |
no_license
|
tiptop-crazy/hackerrank
|
a1612038eb52e1c9a04c436e05d92619ff014867
|
53fc624db5da822afd875ef58cba5e360ed56dc5
|
refs/heads/master
| 2020-12-04T06:02:08.937639
| 2020-01-04T11:45:03
| 2020-01-04T11:45:03
| 231,645,800
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,729
|
py
|
"""
You are given an integer, N. Your task is to print an alphabet rangoli of size N. (Rangoli is a form of Indian folk art based on creation of patterns.)
Different sizes of alphabet rangoli are shown below:
#size 3
----c----
--c-b-c--
c-b-a-b-c
--c-b-c--
----c----
#size 5
--------e--------
------e-d-e------
----e-d-c-d-e----
--e-d-c-b-c-d-e--
e-d-c-b-a-b-c-d-e
--e-d-c-b-c-d-e--
----e-d-c-d-e----
------e-d-e------
--------e--------
#size 10
------------------j------------------
----------------j-i-j----------------
--------------j-i-h-i-j--------------
------------j-i-h-g-h-i-j------------
----------j-i-h-g-f-g-h-i-j----------
--------j-i-h-g-f-e-f-g-h-i-j--------
------j-i-h-g-f-e-d-e-f-g-h-i-j------
----j-i-h-g-f-e-d-c-d-e-f-g-h-i-j----
--j-i-h-g-f-e-d-c-b-c-d-e-f-g-h-i-j--
j-i-h-g-f-e-d-c-b-a-b-c-d-e-f-g-h-i-j
--j-i-h-g-f-e-d-c-b-c-d-e-f-g-h-i-j--
----j-i-h-g-f-e-d-c-d-e-f-g-h-i-j----
------j-i-h-g-f-e-d-e-f-g-h-i-j------
--------j-i-h-g-f-e-f-g-h-i-j--------
----------j-i-h-g-f-g-h-i-j----------
------------j-i-h-g-h-i-j------------
--------------j-i-h-i-j--------------
----------------j-i-j----------------
------------------j------------------
The center of the rangoli has the first alphabet letter a, and the boundary has the Nth alphabet letter (in alphabetical order).
"""
"""
You are given an integer, N. Your task is to print an alphabet rangoli of size N. (Rangoli is a form of Indian folk art based on creation of patterns.)
Different sizes of alphabet rangoli are shown below:
#size 3
----c----
--c-b-c--
c-b-a-b-c
--c-b-c--
----c----
#size 5
--------e--------
------e-d-e------
----e-d-c-d-e----
--e-d-c-b-c-d-e--
e-d-c-b-a-b-c-d-e
--e-d-c-b-c-d-e--
----e-d-c-d-e----
------e-d-e------
--------e--------
#size 10
------------------j------------------
----------------j-i-j----------------
--------------j-i-h-i-j--------------
------------j-i-h-g-h-i-j------------
----------j-i-h-g-f-g-h-i-j----------
--------j-i-h-g-f-e-f-g-h-i-j--------
------j-i-h-g-f-e-d-e-f-g-h-i-j------
----j-i-h-g-f-e-d-c-d-e-f-g-h-i-j----
--j-i-h-g-f-e-d-c-b-c-d-e-f-g-h-i-j--
j-i-h-g-f-e-d-c-b-a-b-c-d-e-f-g-h-i-j
--j-i-h-g-f-e-d-c-b-c-d-e-f-g-h-i-j--
----j-i-h-g-f-e-d-c-d-e-f-g-h-i-j----
------j-i-h-g-f-e-d-e-f-g-h-i-j------
--------j-i-h-g-f-e-f-g-h-i-j--------
----------j-i-h-g-f-g-h-i-j----------
------------j-i-h-g-h-i-j------------
--------------j-i-h-i-j--------------
----------------j-i-j----------------
------------------j------------------
The center of the rangoli has the first alphabet letter a, and the boundary has the Nth alphabet letter (in alphabetical order).
"""
import sys
import string
def matrix(N=1, M=1, a='-'):
sp = []
for i in range(N):
sp.append([a] * M)
return sp
def printMatrix(matr, N=1, M=1):
for i in range(N):
for j in range(M):
sys.stdout.write(matr[i][j])
print ()
def print_rangoli(size):
# your code goes here
alph = list(string.ascii_lowercase)
matr = matrix(2 * size - 1, 4 * size - 3, '-')
for i in range(size):
for j in range(2 * size - 1):
if (j % 2 == 0 and 2 * i + j >= 2 * size - 2):
matr[i][j] = alph[2 * size - 2 - (i + j // 2)]
matr[2 * size - 2 - i][4 * size - 4 - j] = alph[2 * size - 2 - (i + j // 2)]
matr[i][4 * size - 4 - j] = alph[2 * size - 2 - (i + j // 2)]
matr[2 * size - 2 - i][j] = alph[2 * size - 2 - (i + j // 2)]
printMatrix(matr, 2 * size - 1, 4 * size - 3)
if __name__ == '__main__':
n = int(input("Enter positive number:"))
print_rangoli(n)
|
[
"andrei.shewko@gmail.com"
] |
andrei.shewko@gmail.com
|
3e9b8378b946a2f7f7bff6cca9a458bb921a97ab
|
94dcc6470f46734e033dea761e48028f5cf9d3b2
|
/backend/apps/httpproxy/models.py
|
16dfb92768740bb2e93c0ac8a008e2a32b3485f7
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
cnap-cobre/synapse
|
08adf02fb22166b26846265511a800ce92d9bbf1
|
edb850412b6f95d1d4e057674e5cd899ee0b444e
|
refs/heads/master
| 2021-03-24T09:25:01.365514
| 2018-12-12T22:41:38
| 2018-12-12T22:41:38
| 108,348,892
| 1
| 2
|
MIT
| 2018-12-25T09:52:49
| 2017-10-26T02:02:02
|
CSS
|
UTF-8
|
Python
| false
| false
| 3,856
|
py
|
from django.db import models
from django.utils.six.moves import urllib
from django.utils.translation import ugettext as _
class Request(models.Model):
"""
An HTTP request recorded in the database.
Used by the :class:`~httpproxy.recorder.ProxyRecorder` to record all
identifying aspects of an HTTP request for matching later on when playing
back the response.
Request parameters are recorded separately, see
:class:`~httpproxy.models.RequestParameter`.
"""
method = models.CharField(_('method'), max_length=20)
domain = models.CharField(_('domain'), max_length=100)
port = models.PositiveSmallIntegerField(default=80)
path = models.CharField(_('path'), max_length=250)
date = models.DateTimeField(auto_now=True)
querykey = models.CharField(_('query key'), max_length=255, editable=False)
@property
def querystring(self):
"""
The URL-encoded set of request parameters.
"""
return self.parameters.urlencode()
def querystring_display(self):
maxlength = 50
if len(self.querystring) > maxlength:
return '%s [...]' % self.querystring[:50]
else:
return self.querystring
querystring_display.short_description = 'querystring'
def __unicode__(self):
output = u'%s %s:%d%s' % \
(self.method, self.domain, self.port, self.path)
if self.querystring:
output += '?%s' % self.querystring
return output[:50] # TODO add elipsed if truncating
class Meta:
verbose_name = _('request')
verbose_name_plural = _('requests')
unique_together = ('method', 'domain', 'port', 'path', 'querykey')
get_latest_by = 'date'
class RequestParameterManager(models.Manager):
def urlencode(self):
output = []
for param in self.values('name', 'value'):
output.extend([urllib.parse.urlencode(
{param['name']: param['value']}
)])
return '&'.join(output)
class RequestParameter(models.Model):
"""
A single HTTP request parameter for a :class:`~httpproxy.models.Request`
object.
"""
REQUEST_TYPES = (
('G', 'GET'),
('P', 'POST'),
)
request = models.ForeignKey(
Request, verbose_name=_('request'),
related_name='parameters', on_delete=models.CASCADE)
type = models.CharField(max_length=1, choices=REQUEST_TYPES, default='G')
order = models.PositiveSmallIntegerField(default=1)
name = models.CharField(_('name'), max_length=100)
value = models.CharField(_('value'), max_length=250, null=True, blank=True)
objects = RequestParameterManager()
def __unicode__(self):
return u'%d %s=%s' % (self.pk, self.name, self.value)
class Meta:
ordering = ('order',)
verbose_name = _('request parameter')
verbose_name_plural = _('request parameters')
class Response(models.Model):
"""
The response that was recorded in response to the corresponding
:class:`~httpproxy.models.Request`.
"""
request = models.OneToOneField(
Request,
verbose_name=_('request'),
on_delete=models.CASCADE
)
status = models.PositiveSmallIntegerField(default=200)
content_type = models.CharField(_('content type'), max_length=200)
content = models.TextField(_('content'))
@property
def request_domain(self):
return self.request.domain
@property
def request_path(self):
return self.request.path
@property
def request_querystring(self):
return self.request.querystring
def __unicode__(self):
return u'Response to %s (%d)' % (self.request, self.status)
class Meta:
verbose_name = _('response')
verbose_name_plural = _('responses')
|
[
"kevin.dice1@gmail.com"
] |
kevin.dice1@gmail.com
|
bbfe78db4af645877df6f330d30b8cc4bebf4d84
|
75d6c8bc41d3228139c58f9360b18fdd2306cf55
|
/scrapy_app/youtube/youtube/items.py
|
b3d36b8004dfb0f700e5d4f33b4cfd824a6e81ca
|
[] |
no_license
|
memadd/youtubecrawler
|
b5f372176f6516e23621a7e53569cec6f6f72b30
|
bec5b22bf104405c579f044b569dedc0e285cabf
|
refs/heads/main
| 2023-04-30T14:04:53.064650
| 2021-05-03T22:39:06
| 2021-05-03T22:39:06
| 363,543,612
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 426
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class YoutubeItem(scrapy.Item):
# define the fields for your item here like:
url = scrapy.Field()
title = scrapy.Field()
duration = scrapy.Field()
views = scrapy.Field()
thumbnail_url = scrapy.Field()
images_url = scrapy.Field()
|
[
"memad632@gmail.com"
] |
memad632@gmail.com
|
e868a94e30c3ecc3ee54bc639d661943d16f2dc2
|
642759c2986011e1ead0093d3217567b9c6bcb51
|
/dictionary.py
|
551526b571ad2c9b4078b1b715ef5efab6bdafad
|
[] |
no_license
|
VashStampede/LRU_cache
|
7249a039aa57ac41e09acedf407906f1557dea65
|
c7ff66f754f4fac75be8d75f94bfe13a2c8820e6
|
refs/heads/master
| 2020-09-13T07:15:17.743276
| 2019-11-20T16:25:31
| 2019-11-20T16:25:31
| 222,691,807
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 884
|
py
|
import random
QUOTES = [
"There's a huge amount of faith and confidence in the stunt team. ...",
"All of the stunt men - these are the unsung heroes. ...",
"Revenge is a caustic thing. ...",
"When I'm getting ready for a movie, let's just say my diet is 'The Antisocial Diet.'",
"High risk is high adrenaline",
"Without music, life would be a mistake.",
"It is not a lack of love, but a lack of friendship that makes unhappy marriages.",
"That which does not kill us makes us stronger.",
"I'm not upset that you lied to me, I'm upset that from now on I can't believe you.",
"And those who were seen dancing were thought to be insane by those who could not hear the music." ,
"There is always some madness in love. But there is also always some reason in madness." ,
]
def get_random_quote():
return random.choice(QUOTES)
if __name__ == '__main__':
print(get_random_quote())
|
[
"ilyshameal@gmail.com"
] |
ilyshameal@gmail.com
|
8200dd9efd8cd574e153819ddf517425d1c2d3d5
|
e74ccc836611b5131356f6f446f21b2e76c41247
|
/cifar10_classification.py
|
c42749ad11afcb4bbf16e03c06d322727a787d8f
|
[] |
no_license
|
Bruces1998/NeuralNetwork
|
5754096c1be7403b23d12ad7832f939ffcd8b1d6
|
b6ba2c61c0c5671eaa5aac9890101fbb61cb1aa0
|
refs/heads/master
| 2020-06-29T12:27:19.803675
| 2019-08-27T12:51:23
| 2019-08-27T12:51:23
| 200,535,554
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,346
|
py
|
import numpy as np
from keras.datasets import cifar10
from keras.layers import Dense
from keras.models import Sequential
from keras.optimizers import SGD
from sklearn.preprocessing import LabelBinarizer
from sklearn.metrics import classification_report
import matplotlib.pyplot as pyplot
print("[INFO] loading CIFAR-10 data.....")
((trainX, trainY),(testX, testY))=cifar10.load_data()
trainX = trainX.astype("float") / 255.0
testX = testX.astype("float") / 255.0
trainX = trainX.reshape((trainX.shape[0], 3072))
testX = testX.reshape((testX.shape[0], 3072))
le = LabelBinarizer()
trainY = le.fit_transform(trainY)
testY = le.fit_transform(testY)
labelNames = ["airplane", "automobile", "bird", "cat", "deer","dog", "frog", "horse", "ship", "truck"]
model=Sequential()
model.add(Dense(1024, input_shape=(3072,), activation="relu"))
model.add(Dense(512, activation="relu"))
model.add(Dense(10, activation="softmax"))
print("[INFO] training network....")
sgd = SGD(0.1)
model.compile(loss = "categorical_crossentropy", optimizer=sgd, metrics=["accuracy"])
H = model.fit(trainX, trainY, validation_data=(testX, testY), epochs=50, batch_size=32)
print("[INFO] evaluating network....")
predictions = model.predict(testX, batch_size=32)
print(classification_report(testY.argmax(axis=1), predictions.argmax(axis=1), target_names=labelNames))
|
[
"bruces1998@gmail.com"
] |
bruces1998@gmail.com
|
f71e67ea97ed8728bbb2893bbcb69248df256a18
|
b6d6062570f02cb278d6189ab4135cc17e2cc4d4
|
/django_tctip/admin.py
|
62937195148f71f4e88f81d00064bfbbe8517e9a
|
[
"MIT"
] |
permissive
|
mooremok/django-tctip
|
92fb190cb059d8cb2dfc56231fb250914b414978
|
d15a47aab0f9f3e18d407df38fbd81f296cc4bfe
|
refs/heads/master
| 2022-11-23T10:20:06.830429
| 2020-07-20T02:25:17
| 2020-07-20T02:25:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,192
|
py
|
from django.contrib import admin
from .models import Tip
# Register your models here.
@admin.register(Tip)
class TipAdmin(admin.ModelAdmin):
fieldsets = (
('基本设置', {'fields': (('is_on',),
('name', 'minScreenSize'),
('headText', 'siderText'),
('siderTextTop', 'siderBgcolor', 'siderTop'))}),
('公告栏设置', {'fields': (('notice_flag',),
('notice_name', 'notice_title'),
('notice_text',))}),
('支付宝栏设置', {'fields': (('alipay_flag',),
('alipay_name', 'alipay_title'),
('alipay_desc', 'alipay_qrimg'))}),
('微信栏设置', {'fields': (('weixin_flag',),
('weixin_name', 'weixin_title'),
('weixin_desc', 'weixin_qrimg'))}),
('微信群设置', {'fields': (('wechat_flag',),
('wechat_name', 'wechat_title'),
('wechat_desc', 'wechat_qrimg'),
('wechat_icon',))})
)
|
[
"zlwork2014@163.com"
] |
zlwork2014@163.com
|
3836c2b712e9e2ef692b159a8849ce4fafa9da48
|
a6f91bafaca735998fa2400c930aee121918623a
|
/sif/migrations/0002_auto_20160525_1204.py
|
bde59ac09571ce957d1a6a16866ca280877ca1d7
|
[] |
no_license
|
ramonvg/shareitfast
|
0ae99b395710eec3329a822b65e906e126cb4e70
|
761eb4a102dbe7b53ce39822ebb575f444daea73
|
refs/heads/master
| 2020-12-24T19:04:09.148997
| 2016-05-25T14:05:34
| 2016-05-25T14:05:34
| 59,667,200
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 430
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-25 12:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sif', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='file',
name='file',
field=models.FileField(upload_to=''),
),
]
|
[
"rvg@openmailbox.org"
] |
rvg@openmailbox.org
|
31c63861b0e0320ebbcb5ece22a79a725f3fbd66
|
7f3b81cca74eac23f270a9f8e956d0c5a47728e1
|
/Final/Shop/services.py
|
bc101202fb0ba8e99de475ba81083e3664e89dec
|
[] |
no_license
|
dhtien95/cuoiky
|
2540c987a0e3202a292d4ca8bb8c0460e8eda1cd
|
c6566d1ababc2f33e7f3a5d7cd5f2c275012eb05
|
refs/heads/master
| 2020-03-19T23:51:36.116939
| 2018-06-12T06:08:26
| 2018-06-12T06:08:26
| 137,022,923
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,006
|
py
|
import os
from mimetypes import MimeTypes
from django.contrib.auth.models import User
from Shop.models import Song
from api import drive_api
from api.drive_api import downloads_path,createFolder
from sys import argv
downloads_path = os.path.expanduser((os.sep.join(["~","Downloads"])))
class EncodeWAV:
def __init__(self):
self.header_offset =44
self.DELIMITER='$'
self.error_msg=""
self.encoded_file = ''
self.processing_byte_ord = self.header_offset
def encode_file(self, file_path,msg,file_name):
origin_file = open(file_path, 'rb').read()
origin_file = bytearray(origin_file)
self.encoded_file=origin_file
#if b"WAE" in origin_file[8:11]:
msg_len_str = str(len(msg)) #show check case len(msg) > file size
self.hide(msg_len_str + self.DELIMITER) # Insert Len of Msg
self.hide(msg) # Insert Msg
full_path = downloads_path + os.sep + file_name
fh = open(full_path,'wb')
fh.write(self.encoded_file)
fh.close()
return full_path
def hide(self,msg):
for c in msg:
c_in_binary = '{0:08b}'.format(ord(c))
for b in c_in_binary:
new_value = self.encoded_file[self.processing_byte_ord] + self.encoded_file[
self.processing_byte_ord] % 2 + int(b)
self.encoded_file[self.processing_byte_ord] = new_value % 256
self.processing_byte_ord += 1
class DecodeWAV:
def __init__(self):
self.msg = ''
self.header_offset = 44
self.DELIMITER = '$'
self.error_msg = ""
self.len_hidden_msg = ""
self.processing_byte_ord = self.header_offset
def decode_file(self, file_path):
encoded_file = open(file_path, 'rb').read()
encoded_file = bytearray(encoded_file)
processing_byte_ord = self.header_offset
# if b"WAV" in encoded_file[8:11]:
# get msg length
temp_byte = ""
while True:
for b in encoded_file[processing_byte_ord:processing_byte_ord + 8]:
temp_byte += (str(b % 2))
decrypted_char = chr(int(temp_byte, 2))
self.msg += decrypted_char
temp_byte = ""
processing_byte_ord += 8
if decrypted_char == '$':
try:
self.len_hidden_msg = int(self.msg[:-1]) # Ignore '$' char at the end
self.msg = ""
break
except ValueError:
return "This file has no Signature"
for i in range(0, self.len_hidden_msg):
for b in encoded_file[processing_byte_ord:processing_byte_ord + 8]:
temp_byte += (str(b % 2))
decrypted_char = chr(int(temp_byte, 2))
self.msg += decrypted_char
temp_byte = ""
processing_byte_ord += 8
return self.msg
# endif
def upload_new_song(user, song_id, file_path, signature=None):
# Get song info
print("Upload new song \nSong path: ", file_path)
song = Song.objects.get(pk=song_id)
name = song.name
author = song.author
price = song.price
extension = song.extension = open(file_path).name.rsplit('.', 1)[1]
print("Extension: ", extension)
mime_type = MimeTypes()
content_type = mime_type.guess_extension(file_path)
print("Mime type: ", content_type)
if not user.is_superuser: # if normal user, upload to their own directory
if user.profile.drive_folder_id:
folder_id = user.profile.drive_folder_id
else:
folder_id = drive_api.createFolder(user.username)
user.profile.drive_folder_id = folder_id
user.profile.save()
else: # if superuser upload to shiro store directory
folder_id = drive_api.shiro_store_folder_id
output_filename = name + " - " + author + "." + extension
file_id = drive_api.uploadFile(output_filename, file_path, content_type, folder_id=folder_id)
# Build new song with info
new_song = Song(id=file_id, name=name, author=author, extension=extension, price=price)
if signature:
new_song.signature = signature
if not user.is_superuser:
new_song.owner = user
new_song.save()
user.profile.songs.add(new_song) # Update Archived Song to Profile
user.profile.save()
else:
new_song.save()
return file_id
|
[
"38153453+dhtien95@users.noreply.github.com"
] |
38153453+dhtien95@users.noreply.github.com
|
3f35d71922b13339d2c2ab0ce88a514fd571b483
|
8872abd7028ea45cc84074a4657650261a92fbbc
|
/VigenereCipher/vigenercipher1.py
|
9e2ab10d3b903943da67405ced48ba6dc1c3451f
|
[] |
no_license
|
MatteoGardini1988/opencodes
|
92e775b5d6de238633a777257cd77faaddfdd9c4
|
ee0c9b20e96728aee420b15c3ad0ec503938a6d8
|
refs/heads/main
| 2023-08-24T13:52:15.821362
| 2021-10-13T08:47:21
| 2021-10-13T08:47:21
| 415,842,722
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,557
|
py
|
import constants as c
class VigenereCipher:
def __init__(self, msg, alphabeth="LATIN"):
# Choose the alphabeth
assert alphabeth in c.ALPHABETHS.keys(), f"The alphabeth ',{alphabeth},' does not exists."
chosen_alphabeth = c.ALPHABETHS[alphabeth]
# Inizialize the message to cipher
check_msg = msg.replace(" ", "")
assert self.only_admissible_letters(check_msg, chosen_alphabeth), 'You have entered an invalid message. You have to use only letter from latin alphabeth'
self.msg = msg
self.__key = "" # This is a private content
self.vigmatrix = self.create_vig_cipher_matrix(chosen_alphabeth)
self.cripedmsg = ""
self.was_upper = [x.isupper() for x in self.msg]
@property
def key(self):
return self.__key
@key.setter
def key(self, key_val):
if self.only_admissible_letters(key_val, alphabeth="Latin"):
raise Exception("The key must contains values only from the alphabeth")
elif key_val.islower():
raise Exception("The key must be in Italics")
else:
self.__key = key_val
@staticmethod
def only_admissible_letters(text, alphabeth):
# Check if a text contains only latin-letters
# text is a string
# alphabeth is a list containining symbols of an alphabeth
# Turn my set is upper case
text = text.upper()
# Turn the alphabeth into a set
myalphabeth = set(alphabeth)
mytext = set(text)
return mytext.issubset(myalphabeth)
@staticmethod
def create_vig_cipher_matrix(alphabeth):
# Create the vigenere matrix from a given alphabeth
n = len(alphabeth)
vig_cipher = [None]*n
for i in range(n):
vig_cipher[i] = alphabeth[i:] + alphabeth[0:i]
return vig_cipher
def encriptmsg(self):
n = len(self.msg)
msg_to_encript = self.msg.upper()
nkey = len(self.__key)
d = self.vigmatrix[0] # This is my dictionary
j = 0 # This is needed to run over the ciphring string
for i in range(n):
if msg_to_encript[i] == ' ':
ciphred = ' '
else:
val2cip = msg_to_encript[i]
p = j % nkey
j += 1
col = d.index(val2cip)
row = d.index(self.__key[p])
ciphred = self.vigmatrix[row][col]
self.cripedmsg += ciphred
def decipher(self):
n = len(self.msg)
nkey = len(self.__key)
d = self.vigmatrix[0] # This is my dictionary
decripted_msg = ""
j = 0
for i in range(n):
if self.cripedmsg[i] == ' ':
# if there is an empty space you don't have anything to cipher
deciphred = ' '
else:
val2dec = self.cripedmsg[i]
# get the current position of the crypto string
p = j % nkey
j += 1
row = d.index(self.__key[p])
col = self.vigmatrix[row].index(val2dec.upper())
deciphred = self.vigmatrix[0][col]
# Restyle: this respect the input upper/lower case
if not self.was_upper[i]:
deciphred = deciphred.lower()
decripted_msg += deciphred
return decripted_msg
|
[
"noreply@github.com"
] |
MatteoGardini1988.noreply@github.com
|
0a7a15e03741213c91a5d2b06212b2b10de010e8
|
71f39b722f1204738b53e90d8566bcf6da99d494
|
/apps/utils/yunpian.py
|
efd6bd4e99d5ed743b872b2e50e3c07ff167dd63
|
[] |
no_license
|
kingvern/txplatform
|
cd9fc36fe3ba536b7578d734f520d0f091db4b22
|
235465b742d0ba13132f872e0f3818990f232888
|
refs/heads/master
| 2022-12-17T00:03:50.675329
| 2018-11-16T10:02:35
| 2018-11-16T10:02:35
| 149,862,235
| 0
| 0
| null | 2022-11-22T02:53:29
| 2018-09-22T09:17:06
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 732
|
py
|
# _*_ coding: utf-8 _*_
import requests
class YunPian(object):
def __init__(self, api_key):
self.api_key = api_key
self.single_send_url = 'https://sms.yunpian.com/v2/sms/single_send.json'
def send_sms(self, code, mobile):
parmas = {
'apikey': self.api_key,
'mobile': mobile,
'text': '【王远欣】您的验证码是{code}'.format(code=code)
}
# text必须要跟云片后台的模板内容 保持一致,不然发送不出去!
r = requests.post(self.single_send_url, data=parmas)
print(r)
if __name__ == '__main__':
yun_pian = YunPian('460b7e12332b41a211c21ab4dd4b6481')
yun_pian.send_sms('123456', '18801272770')
|
[
"kingvern@foxmail.com"
] |
kingvern@foxmail.com
|
c5912fceef6b9dcb1f1cc8b305861e72abef1b77
|
20b0eb954615e6953b641cf9122c2212a2b32e32
|
/shoot/account/forms.py
|
d10a7615d32d87d2428cd2cf15f58dbe64c22f7f
|
[] |
no_license
|
divyeshbhatt/sports
|
254f160cb7731b89fe78d8cfbd44680f75125a89
|
029a30c76255bebd6103335b89721303edb61a1c
|
refs/heads/master
| 2020-03-09T22:36:39.399852
| 2018-04-11T06:28:47
| 2018-04-11T06:28:47
| 129,038,184
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,174
|
py
|
from django import forms
from django.contrib.auth import (
authenticate,
get_user_model,
login,
logout,
)
User = get_user_model()
class UserLoginForm(forms.Form):
username = forms.CharField(widget=forms.TextInput(attrs={'placeholder':'username'}))
password = forms.CharField(widget=forms.PasswordInput(attrs={'placeholder':'password'}))
class UserRegistrationForm(forms.ModelForm):
username = forms.CharField(widget=forms.TextInput(attrs={'placeholder':'User Name'}))
email = forms.EmailField(widget=forms.EmailInput(attrs={'placeholder':'Email'}))
email2 = forms.EmailField(widget=forms.EmailInput(attrs={'placeholder':'Confirm Email'}))
password = forms.CharField(widget=forms.PasswordInput(attrs={'placeholder':'Password'}))
class Meta:
model = User
fields = ('username', 'email','email2', 'password')
def clean_email2(self):
email = self.cleaned_data.get('email')
email2 = self.cleaned_data.get('email2')
if email != email2:
raise forms.ValidationError('Emails Not Match..')
email_qs = User.objects.filter(email = email)
if email_qs.exists():
raise forms.ValidationError('Email already exists..!')
return email2
|
[
"divyesh171975@gmail.com"
] |
divyesh171975@gmail.com
|
a3706a1097368c223b6395e2a2418e3ff40dc558
|
04662a4b20227f8ab7446cce71a77e2bd7fbfffb
|
/flask-aws/bin/rst2odt_prepstyles.py
|
b60558395fdfc271bbe3400b2be1d2bb9f46d108
|
[] |
no_license
|
poonesh/flask-aws-tutorial
|
d77511290cf2b293c01dd9ac93a46678f4dd9d12
|
ccd2b829f9f381d0fdb84c1c995ce7aaeb7ea2ed
|
refs/heads/master
| 2021-01-25T14:39:39.219960
| 2018-03-03T21:34:19
| 2018-03-03T21:34:19
| 123,721,060
| 0
| 0
| null | 2018-03-03T18:43:18
| 2018-03-03T18:43:18
| null |
UTF-8
|
Python
| false
| false
| 1,744
|
py
|
#!/Users/Pooneh/projects/flask-aws-tutorial/flask-aws/bin/python
# $Id: rst2odt_prepstyles.py 5839 2009-01-07 19:09:28Z dkuhlman $
# Author: Dave Kuhlman <dkuhlman@rexx.com>
# Copyright: This module has been placed in the public domain.
"""
Fix a word-processor-generated styles.odt for odtwriter use: Drop page size
specifications from styles.xml in STYLE_FILE.odt.
"""
#
# Author: Michael Schutte <michi@uiae.at>
from lxml import etree
import sys
import zipfile
from tempfile import mkstemp
import shutil
import os
NAMESPACES = {
"style": "urn:oasis:names:tc:opendocument:xmlns:style:1.0",
"fo": "urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0"
}
def prepstyle(filename):
zin = zipfile.ZipFile(filename)
styles = zin.read("styles.xml")
root = etree.fromstring(styles)
for el in root.xpath("//style:page-layout-properties",
namespaces=NAMESPACES):
for attr in el.attrib:
if attr.startswith("{%s}" % NAMESPACES["fo"]):
del el.attrib[attr]
tempname = mkstemp()
zout = zipfile.ZipFile(os.fdopen(tempname[0], "w"), "w",
zipfile.ZIP_DEFLATED)
for item in zin.infolist():
if item.filename == "styles.xml":
zout.writestr(item, etree.tostring(root))
else:
zout.writestr(item, zin.read(item.filename))
zout.close()
zin.close()
shutil.move(tempname[1], filename)
def main():
args = sys.argv[1:]
if len(args) != 1:
print >> sys.stderr, __doc__
print >> sys.stderr, "Usage: %s STYLE_FILE.odt\n" % sys.argv[0]
sys.exit(1)
filename = args[0]
prepstyle(filename)
if __name__ == '__main__':
main()
# vim:tw=78:sw=4:sts=4:et:
|
[
"poonehshooshtari@gmail.com"
] |
poonehshooshtari@gmail.com
|
bf340fac36686131acd709efb2fff3a1258101c7
|
ecec5645d7cc552565e466b2f233275294bab108
|
/lab2_v9/code.py
|
f28ef7ec803e23f78f7de1d6353ae1c2b0230471
|
[] |
no_license
|
stepatron/MobileDevicesManagement
|
677b3cffe7956040f69cf98182c3b3708f32c042
|
66690443cc6007b67b42e763f46cd098b9a887de
|
refs/heads/master
| 2022-04-14T11:52:47.487215
| 2022-02-10T18:09:17
| 2022-02-10T18:09:17
| 255,310,079
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,502
|
py
|
import os
import re
import math
import matplotlib.pyplot as plt
table = []
table_1 = []
mbit_rate = 0.5
kbit_free_total = 1000
byte_total = bill_total = 0
ip_client = '192.168.250.3'
# os.system("nfdump -r nfcapd.202002251200 >> nfcapd202002251200.txt")
file_in = open('nfcapd202002251200.txt', 'r')
[table_1.append(string.rstrip()) for string in file_in]
for i in range(len(table_1)):
table_1[i] = re.sub(r'\.\d\sM', '500000', table_1[i])
[table.append(re.sub(r'\s+', ' ', elem).split(' ')) for elem in table_1[1:-4]]
[row.remove('->') for row in table]
[row.remove('->') for row in table]
X_unsort = []
[X_unsort.append(re.sub(r':\d{2}\.{1}\d{3}', '', row[1])) for row in table]
Y_unsort = []
[Y_unsort.append(int(row[9])) for row in table]
XY_unsort = zip(X_unsort,Y_unsort)
XY = sorted(XY_unsort, key=lambda tup: tup[0])
X = [XY[0] for XY in XY]
Y = [XY[1] for XY in XY]
for i in range(len(X)):
if i == len(X)-1:
break
if X[i] == X[i+1]:
Y[i] += Y[i+1]
del X[i+1]
del Y[i+1]
i -= 1
for row in table:
if ip_client in row[5]:
byte_total += int(row[9])
bill_total = math.ceil((byte_total*8 - kbit_free_total*1024)/(1024*1024)) * mbit_rate
print ('Затраты абонента', ip_client, 'составляют:', bill_total, 'руб.')
fig, ax = plt.subplots()
ax.bar(X, Y)
plt.ylabel('Объем трафика (бит)')
plt.xlabel('Время (поминутно)')
plt.show()
os.system("pause")
sys.exit(0)
|
[
"noreply@github.com"
] |
stepatron.noreply@github.com
|
c9673f0f6fd4aecb76d02fa3a8f44211346f42e5
|
b32fdf5e74c46bcde51c12c152e6762a92e272e0
|
/Login/urls.py
|
e5d4c36a606fd87dc4459f8c29d1a3c2ea44f235
|
[] |
no_license
|
INHDI/demo_sql
|
172722f6a7122bec0d103aee7e62cb65925e4aa1
|
d77f7a796bf3d406f1881f0753cd4278948a5b74
|
refs/heads/main
| 2023-07-20T04:32:24.296373
| 2021-08-19T04:36:08
| 2021-08-19T04:36:08
| 397,814,454
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 255
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('login/', views.login, name='home'),
path("them/", views.them, name="them"),
path("sua/<str:pk>", views.sua, name="sua"),
path("xoa/<str:pk>", views.xoa, name="xoa")
]
|
[
"dang12.10.1999@gmail.com"
] |
dang12.10.1999@gmail.com
|
bf59a8d1751a59bb7477c25729c5179e6bdb4d4d
|
1daabb9079a80fdf24f1e27d750b7bd53ac1c4c3
|
/pybot/plugins/perms.py
|
135f8d20745484fe1e6cc8faf9c1ac71c70d6986
|
[
"MIT"
] |
permissive
|
jkent/pybot
|
ea01bd0d9b19b65a30a50f429b563e3ff39d1ee9
|
0c70a7c29caa709413e04a411a5fdb22a8dbdb12
|
refs/heads/master
| 2023-07-14T20:22:56.612958
| 2021-08-20T04:49:05
| 2021-08-20T04:49:05
| 12,857,889
| 0
| 1
|
MIT
| 2021-05-02T00:36:54
| 2013-09-16T03:42:14
|
Python
|
UTF-8
|
Python
| false
| false
| 4,918
|
py
|
# -*- coding: utf-8 -*-
# vim: set ts=4 et
import json
import os
import sqlite3
from pybot.plugin import *
class Plugin(BasePlugin):
default_level = 1000
def on_load(self):
self.db = sqlite3.connect(os.path.join(self.bot.core.data_path,
'perms.db'))
self.cur = self.db.cursor()
self.cur.execute('''CREATE TABLE IF NOT EXISTS allow
(mask TEXT PRIMARY KEY, rules TEXT)''')
self.cur.execute('''CREATE TABLE IF NOT EXISTS deny
(mask TEXT PRIMARY KEY, rules TEXT)''')
self.db.commit()
self.load_rules()
def on_unload(self):
self.save_rules()
self.db.close()
def load_rules(self):
self.bot.allow_rules = {}
self.bot.deny_rules = {}
self.cur.execute('SELECT COUNT(*) FROM allow')
count = self.cur.fetchone()[0]
if count == 0:
self.bot.allow_rules['*'] = {'ANY': 1}
else:
for mask, rules in self.cur.execute('SELECT mask, rules ' \
'FROM allow'):
self.bot.allow_rules[mask] = json.loads(rules)
for mask, rules in self.cur.execute('SELECT mask, rules FROM ' \
'deny'):
self.bot.deny_rules[mask] = json.loads(rules)
superuser = self.config.get('superuser')
if superuser:
self.bot.allow_rules[superuser] = {'ANY': 1000}
def save_rules(self):
for mask, rules in list(self.bot.allow_rules.items()):
rules = json.dumps(rules)
self.cur.execute('INSERT OR REPLACE INTO allow (mask, rules) ' \
'VALUES (?, ?)', (mask, rules))
for mask, rules in list(self.bot.deny_rules.items()):
rules = json.dumps(rules)
self.cur.execute('INSERT OR REPLACE INTO deny (mask, rules) ' \
'VALUES (?, ?)', (mask, rules))
self.db.commit()
@hook
def perms_list_trigger(self, msg, args, argstr):
msg.reply('Allow:')
for mask, rules in list(self.bot.allow_rules.items()):
line = ' ' + mask
for plugin, level in list(rules.items()):
line += ' %s=%s' % (plugin, level)
msg.reply(line)
msg.reply('Deny:')
for mask, rules in list(self.bot.deny_rules.items()):
line = ' ' + mask
for plugin, level in list(rules.items()):
line += ' %s=%s' % (plugin, level)
msg.reply(line)
@hook
def perms_allow_trigger(self, msg, args, argstr):
if len(args) < 2:
msg.reply('a prefix mask is required')
return
mask = args[1]
if mask.startswith('-'):
if len(args) != 2:
msg.reply('only one argument expected')
mask = mask[1:]
if mask in self.bot.allow_rules:
del self.bot.allow_rules[mask]
self.cur.execute('DELETE FROM allow WHERE mask=?', (mask,))
self.db.commit()
return
rules = self.bot.allow_rules.setdefault(mask, {})
for arg in args[2:]:
if arg.startswith('-'):
plugin = arg[1:]
try:
del rules[plugin]
except:
msg.reply('no rule exists for plugin "%s"' % plugin)
return
else:
try:
plugin, level = arg.split('=', 1)
level = int(level)
except:
msg.reply('invalid syntax, "plugin=level" format required')
return
rules[plugin] = level
@hook
def perms_deny_trigger(self, msg, args, argstr):
if len(args) < 2:
msg.reply('a prefix mask is required')
return
mask = args[1]
if mask.startswith('-'):
if len(args) != 2:
msg.reply('only one argument expected')
mask = mask[1:]
if mask in self.bot.deny_rules:
del self.bot.deny_rules[mask]
self.cur.execute('DELETE FROM deny WHERE mask=?', (mask,))
self.db.commit()
return
rules = self.bot.deny_rules.setdefault(mask, {})
for arg in args[2:]:
if arg.startswith('-'):
plugin = arg[1:]
try:
del rules[plugin]
except:
msg.reply('no rule exists for plugin "%s"' % plugin)
return
else:
try:
plugin, level = arg.split('=', 1)
level = int(level)
except:
msg.reply('invalid syntax, "plugin=level" format required')
return
rules[plugin] = level
|
[
"jeff@jkent.net"
] |
jeff@jkent.net
|
f5a46d1bbb3b34ce030ff8beb152402b946804ea
|
e188b95dd14ec5de77056be87c30ccc322f87d7a
|
/Week2/command line/command-line/SNParrray2fasta.py
|
b7ce71fb3b5408ab4e7385968507cafa51fdeb64
|
[] |
no_license
|
anuhanovic/BIOF475Spring2018
|
f0db901b45c7bac4d736f2ced93ec1e2b794dfa7
|
fdeafbea61b58d90399c4ae0b09bbefe3a705ea6
|
refs/heads/master
| 2021-05-08T13:56:17.463030
| 2019-07-12T18:13:24
| 2019-07-12T18:13:24
| 120,050,640
| 7
| 7
| null | 2018-12-04T19:41:58
| 2018-02-03T01:56:19
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,096
|
py
|
#!/usr/bin/python
#
"""
Modified to deal with tab-delimited rows of IDs and nucleotides.
Did this to parse a SNP array into FASTA format.
Author: R. Burke Squires, BCBB
"""
# __version__: 0.1.1
def converter(args):
"""
This method takes the command line arguments and converts the input SNP file
into a FASTA for for further analysis.
"""
import os
if not args.output_file:
input_filename, file_extension = os.path.splitext(args.input_file)
file_extension = 'fasta'
output_file = "%s.%s" % (input_filename, file_extension)
else:
output_file = '%s.fasta' % args.output_file
output = open(output_file, 'w')
with open(args.input_file, 'U') as f:
for line in f:
data = line.split('\t')
identifier = data[0]
sequence = ''.join(data[1:])
output.write(">%s\n%s" % (identifier, sequence))
output.close()
if __name__ == '__main__':
"""
This is the main function of the program and what is run first. This sets up
the arguments and then feeds tehm into the converter method when run.
"""
import argparse
PARSER = argparse.ArgumentParser(prog='SNParray2fasta.py',
usage='%(prog)s -in (SNP file)\n',
description='Create FASTA file from tab delimited SNP data.',
formatter_class=lambda prog:
argparse.HelpFormatter(prog, max_help_position=15),
add_help=False)
REQUIRED = PARSER.add_argument_group('Required')
REQUIRED.add_argument('-in', '--input_file', required=True, help='The input SNP file.')
OPTIONAL = PARSER.add_argument_group('Options')
OPTIONAL.add_argument('-out', '--output_file', help='The output FASTA file.')
OPTIONAL.add_argument('-h', '--help', action='help', help='show this help message & exit')
OPTIONAL.add_argument('-path', default='.', help=argparse.SUPPRESS)
ARGS = PARSER.parse_args()
converter(ARGS)
|
[
"noreply@github.com"
] |
anuhanovic.noreply@github.com
|
2508cc3d31dd4401b54226b20e67701253be7c4d
|
547018e2fb9b178aacfe2ceabcab4313647ffb79
|
/test_cases/general/primitive_type/time/py/test_generate/parse.py
|
edef0527fe4b9af76816443699824edcbb8c8aa6
|
[
"MIT"
] |
permissive
|
Parquery/mapry
|
2eb22862494342f71ca4254e7b2b53df84bd666f
|
93515307f9eba8447fe64b0ac7cc68b2d07205a7
|
refs/heads/master
| 2021-06-11T09:57:17.764387
| 2021-06-02T14:19:52
| 2021-06-02T14:19:52
| 165,482,780
| 11
| 3
|
MIT
| 2021-06-02T14:19:53
| 2019-01-13T08:31:08
|
C++
|
UTF-8
|
Python
| false
| false
| 2,269
|
py
|
# File automatically generated by mapry. DO NOT EDIT OR APPEND!
"""provides general structures and functions for parsing."""
import typing
import some.graph
class Error:
"""represents an error occurred while parsing."""
def __init__(self, ref: str, message: str) -> None:
"""
initializes the error with the given values.
:param ref: references the cause (e.g., a reference path)
:param message: describes the error
"""
self.ref = ref
self.message = message
class Errors:
"""
collects errors capped at a certain quantity.
If the capacity is full, the subsequent surplus errors are ignored.
"""
def __init__(self, cap: int) -> None:
"""
initializes the error container with the given cap.
:param cap: maximum number of contained errors
"""
self.cap = cap
self._values = [] # type: typing.List[Error]
def add(self, ref: str, message: str) -> None:
"""
adds an error to the container.
:param ref: references the cause (e.g., a reference path)
:param message: describes the error
"""
if len(self._values) < self.cap:
self._values.append(Error(ref=ref, message=message))
def full(self) -> bool:
"""gives True when there are exactly ``cap`` errors contained."""
return len(self._values) == self.cap
def empty(self) -> bool:
"""gives True when there are no errors contained."""
return len(self._values) == 0
def count(self) -> int:
"""returns the number of errors."""
return len(self._values)
def values(self) -> typing.Iterable[Error]:
"""gives an iterator over the errors."""
return iter(self._values)
def placeholder_some_graph() -> some.graph.SomeGraph:
"""
creates a placeholder instance of SomeGraph.
Placeholders are necessary so that we can pre-allocate class registries
during parsing. All the attribute of the placeholder are set to None.
Consider a placeholder an empty shell to be filled out during parsing.
:return: empty shell
"""
return some.graph.SomeGraph( # type: ignore
some_time=None,
formatless_time=None)
|
[
"noreply@github.com"
] |
Parquery.noreply@github.com
|
f2a93f6eb0f2a4b12c9f597dd58d59b49426ecb9
|
efe53f7c0b0439bd9dcdcf49847222c87aad6c51
|
/study_algorithm/python/Recursion/Fibonnaci.py
|
34d5b4ce62808109109459c9e944593ed16230ef
|
[
"MIT"
] |
permissive
|
AlphaSunny/study
|
f13eca7bfa830dcdcb395fb05e9c2006b86190ad
|
4e65127fefa9078b7ae6b9db92369c93e61e4327
|
refs/heads/master
| 2020-04-09T07:57:30.685004
| 2019-03-21T08:40:29
| 2019-03-21T08:40:29
| 160,177,045
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 354
|
py
|
# Instantiate Cache information
n = 10
cache = [None] * (n + 1)
def fib_dyn(n):
# Base Case
if n == 0 or n == 1:
return n
# Check cache
if cache[n] != None:
return cache[n]
# Keep setting cache
cache[n] = fib_dyn(n-1) + fib_dyn(n-2)
return cache[n]
fib_dyn(10)
|
[
"epsilonsunny@gmai.com"
] |
epsilonsunny@gmai.com
|
cc0b92b1e557e4310fac89d5ba17a487c4827dce
|
ecdbfdcc607f7c58e2728f76b7819790d6ab51c7
|
/Machine Learning Lab 1.py
|
a0c5c6a83a3972fd64ffdbc052081ebd8dd34f3d
|
[] |
no_license
|
NischalKothariM-1CE17CS074/ML-LAB-7TH-SEM_NISCHAL
|
0908143c6ef7568195868d10c34df854ec8d4aef
|
5122da8109536c7aa437fcfe4a5b73b2075bf99e
|
refs/heads/main
| 2023-02-20T22:48:55.111346
| 2021-01-27T15:31:26
| 2021-01-27T15:31:26
| 329,391,518
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 958
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[6]:
import csv
num_attribute=6
a=[]
with open('pro1.csv', 'r') as csvfile:
reader=csv.reader(csvfile)
for row in reader:
a.append(row)
print(row)
print("\n The total number of training instances are : ",len(a))
num_attribute = len(a[0])-1
print("\n The initial hypothesis is : ")
hypothesis = ['0']*num_attribute
print(hypothesis)
for j in range(0,num_attribute):
hypothesis[j]=a[0][j]
print("\n Find-S: Finding maximally specific Hypothesis\n")
for i in range(0,len(a)):
if a[i][num_attribute]=='Yes':
for j in range(0,num_attribute):
if a[i][j]!=hypothesis[j]:
hypothesis[j]='?'
else:
hypothesis[j]=a[i][j]
print("\n For training Example No:{0} the hypothesis is".format(i),hypothesis)
print("\n The Maximally specific hypothesis for the training instance is ")
print(hypothesis)
# In[ ]:
|
[
"noreply@github.com"
] |
NischalKothariM-1CE17CS074.noreply@github.com
|
b168b5b02efedee13bf8554f7c91b90784e428ae
|
96cc90fd90b838c55f08605d0096e8a91f35a797
|
/Django_ben6/urls.py
|
2a7e5261e84508cc217896c4a2f57316d36a1db7
|
[] |
no_license
|
obt817/Django_ben6
|
832165818b05c07e013b2309c270a4ced0234001
|
8ab3e3671ee571777e4a8e0dcb4a3c512cc17d68
|
refs/heads/master
| 2023-01-30T15:39:41.183016
| 2020-12-04T14:39:08
| 2020-12-04T14:39:08
| 316,948,120
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 798
|
py
|
"""Django_ben6 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/dev/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include("app6.urls")),
]
|
[
"obatayouhei@obatayouheinoMacBook-Air.local"
] |
obatayouhei@obatayouheinoMacBook-Air.local
|
1d6b32455392c0bcd4af1a417953e142dfb8d24e
|
367e63b0a34178713613b737d8a8ca59dc039acc
|
/Sorting/Insertion-sort.py
|
99dcd274283d4b31f257203cc4d528ce0614f95f
|
[] |
no_license
|
ossayed/hackerrank-problems
|
1d92d13750cdfc7420c284001772b093992d5714
|
53fcbbdef97527cbca8b79119436c826b0c2edc3
|
refs/heads/master
| 2020-03-28T05:08:39.559407
| 2018-09-07T02:19:28
| 2018-09-07T02:19:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 731
|
py
|
#!/bin/python3
import sys
#https://www.hackerrank.com/challenges/insertionsort2/problem
def insertionSort2(n, arr):
# Complete this function
for x in range(1,n):
sort_num = arr[x]
for y in range(1,x + 1):
if sort_num < arr[x-y]:
arr[(x-y)+1] = arr[x-y]
arr[(x-y)] = sort_num
#print(arr,"a")
if sort_num > arr[x-y]:
arr[(x-y)+1]= sort_num
#print(arr,"b")
break
arr_str =" ".join(str(v) for v in arr)
print(arr_str)
if __name__ == "__main__":
n = int(input().strip())
arr = list(map(int, input().strip().split(' ')))
insertionSort2(n, arr)
|
[
"ossayedkh@gmail.com"
] |
ossayedkh@gmail.com
|
a1f5abefe278e255ac9bb7768c3d39e92105bc60
|
c074fb834cb4a8ac75d107146df10f9496590792
|
/users/urls.py
|
df7702c6046b6ab2fde4eb8a1c751b9e23eb5677
|
[
"Unlicense"
] |
permissive
|
jmhubbard/quote_of_the_day_custom_user
|
4d5ffd4183d7e6290161b84cae2aa1f7ad621a99
|
27024b2953c1c94fd2970563c3ab31ad444912b6
|
refs/heads/master
| 2023-02-19T00:59:27.372671
| 2021-01-10T02:45:56
| 2021-01-10T02:45:56
| 293,443,918
| 1
| 0
|
Unlicense
| 2020-12-03T17:59:59
| 2020-09-07T06:41:25
|
Python
|
UTF-8
|
Python
| false
| false
| 771
|
py
|
from django.urls import path, include
from users.views import UserCreate, UserDeleteView, useraccountview
from main.views import UserLoginView, UserLogoutView
# from django.contrib.auth import views as auth_views
urlpatterns = [
path('login/', UserLoginView.as_view(), name='login'),
path('add/', UserCreate.as_view(), name='user-add'), #Create user page
path('logout/' , UserLogoutView.as_view(), name='logout'),
path('delete_account/<int:pk>/', UserDeleteView.as_view(), name='delete_account'),
path('account_page/', useraccountview, name='account_page')
# path('', include('django.contrib.auth.urls')), #accounts/login = login page
# path('accounts/', include('django.contrib.auth.urls')), #accounts/login = login page
]
|
[
"jasonhubb@gmail.com"
] |
jasonhubb@gmail.com
|
e98c78de8aedaf45e00b38c10dbe13f7032afb9b
|
1684a619346926098eb66e818dfc7fda160d7062
|
/src/Profiles/migrations/0006_alter_profile_user.py
|
b8588138e874d998425c096983088ed79d928cfb
|
[] |
no_license
|
mah-di/social
|
e5fc0a531d8f5cc66074e1fd100077524ad48d01
|
d1f10b15872e8a015f7bbc8180eec67657e7efd2
|
refs/heads/main
| 2023-07-04T18:02:54.574786
| 2021-08-28T06:01:32
| 2021-08-28T06:01:32
| 400,711,110
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 609
|
py
|
# Generated by Django 3.2.5 on 2021-07-27 15:18
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('Profiles', '0005_alter_profile_friends'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL),
),
]
|
[
"mahdiiqbal37@gmail.com"
] |
mahdiiqbal37@gmail.com
|
a6467223350da4911eb89579ea7f11c2fb8744a8
|
c34dd2c2d8c0b5916743bb7014e5011a5197dfd2
|
/projects/NLR_MEG/PSI_session1_ROI_3.py
|
82e54b1d6ee9f8032cd6fa7313485480352d2c14
|
[
"BSD-3-Clause"
] |
permissive
|
yeatmanlab/BrainTools
|
c6858c2a2623ee4ec1160ef98872f15e2ad05dad
|
890db4256b0290918045e53cd3c6fd6197fcbb4e
|
refs/heads/master
| 2021-05-22T03:47:32.162046
| 2021-04-14T17:56:12
| 2021-04-14T17:56:12
| 46,300,396
| 5
| 7
|
BSD-3-Clause
| 2019-06-18T21:22:46
| 2015-11-16T20:25:44
|
Python
|
UTF-8
|
Python
| false
| false
| 102,702
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 15 12:05:40 2016
@author: sjjoo
"""
#%%
import sys
import mne
import matplotlib.pyplot as plt
import imageio
from mne.utils import run_subprocess, logger
import os
from os import path as op
import copy
import shutil
import numpy as np
from numpy.random import randn
from scipy import stats as stats
import scipy.io as sio
import time
from functools import partial
from mne.stats import (spatio_temporal_cluster_1samp_test,
summarize_clusters_stc)
from mne import set_config
set_config('MNE_MEMMAP_MIN_SIZE', '1M')
set_config('MNE_CACHE_DIR', '.tmp')
mne.set_config('MNE_USE_CUDA', 'true')
this_env = copy.copy(os.environ)
fs_dir = '/mnt/diskArray/projects/avg_fsurfer'
this_env['SUBJECTS_DIR'] = fs_dir
raw_dir = '/mnt/scratch/NLR_MEG3'
os.chdir(raw_dir)
subs = ['NLR_102_RS','NLR_103_AC','NLR_110_HH','NLR_127_AM',
'NLR_130_RW','NLR_132_WP','NLR_133_ML','NLR_145_AC','NLR_151_RD',
'NLR_152_TC','NLR_160_EK','NLR_161_AK','NLR_163_LF','NLR_164_SF',
'NLR_170_GM','NLR_172_TH','NLR_174_HS','NLR_179_GM','NLR_180_ZD',
'NLR_187_NB','NLR_203_AM','NLR_204_AM','NLR_205_AC','NLR_206_LM',
'NLR_207_AH','NLR_211_LB','NLR_150_MG'
] # 'NLR_202_DD','NLR_105_BB','NLR_150_MG','NLR_201_GS',
brs = [87, 102, 78, 115,
91, 121, 77, 91, 93,
88, 75, 90, 66, 59,
81, 84, 81, 72, 71,
121,75, 66, 90, 93,
101, 56, 93] #75 101, 93,
brs = np.array(brs)
age = [125, 132, 138, 109,
138, 108, 98, 105, 87,
131, 123, 95, 112, 133,
152, 103, 89, 138, 93,
117, 122, 109, 90, 111,
86, 147]
age = np.divide(age, 12)
# Session 1
# subs are synced up with session1 folder names...
#
session1 = ['102_rs160618','103_ac150609',
'110_hh160608','127_am161004','130_rw151221',
'132_wp160919','133_ml151124','145_ac160621',
'151_rd160620','152_tc160422','160_ek160627',
'161_ak160627','163_lf160707',
'164_sf160707','170_gm160613','172_th160614',
'174_hs160620','179_gm160701','180_zd160621',
'187_nb161017','203_am150831',
'204_am150829','205_ac151208','206_lm151119',
'207_ah160608','211_lb160617','150_mg160606'
] #'202_dd150919'(# of average is zero) '105_bb150713'(# of average is less than 10)
#,(# of average is less than 20) '201_gs150729'(# of average is less than 10)
n_subjects = len(subs)
c_table = ( (0.6510, 0.8078, 0.8902), # Blue, Green, Red, Orange, Purple, yellow
(0.1216, 0.4706, 0.7059),
(0.6980, 0.8745, 0.5412),
(0.2000, 0.6275, 0.1725),
(0.9843, 0.6039, 0.6000),
(0.8902, 0.1020, 0.1098),
(0.9922, 0.7490, 0.4353),
(1.0000, 0.4980, 0),
(0.7922, 0.6980, 0.8392),
(0.4157, 0.2392, 0.6039),
(1.0000, 1.0000, 0.6000),
(0.6941, 0.3490, 0.1569))
method = "dSPM"
snr = 3.
lambda2 = 1. / snr ** 2
fname_data = op.join(raw_dir, 'session1_data.npy')
m1 = np.transpose(brs) >= 90
m2 = np.logical_not(m1)
#m1[19] = False
m2[12] = False
m2[16] = False
m1[26] = False
m2[26] = False
#m2[15] = False
good_readers = np.where(m1)[0]
poor_readers = np.where(m2)[0]
all_subject = []
all_subject.extend(good_readers)
all_subject.extend(poor_readers)
all_subject.sort()
poor_subs = []
for n in np.arange(0,len(poor_readers)):
poor_subs.append(subs[poor_readers[n]])
#m1 = np.transpose(age) > 9
#
#m2 = np.logical_not(m1)
#
#m2[12] = False
#m2[16] = False
#m2[26] = False
#old_readers = np.where(m1)[0]
#young_readers = np.where(m2)[0]
#
#all_readers = []
#all_readers.extend(good_readers)
#all_readers.extend(poor_readers)
#all_readers.sort()
#%%
"""
Here we do the real deal...
"""
# Session 1
load_data = False
method = "dSPM"
snr = 3.
lambda2 = 1. / snr ** 2
#conditions1 = [0, 2, 4, 6, 8, 16, 18, 20, 22, 24] # Lets compare word vs. scramble
conditions1 = ['word_c254_p20_dot', 'word_c254_p50_dot', 'word_c137_p20_dot',
'word_c254_p80_dot', 'word_c137_p80_dot', #'bigram_c254_p20_dot',
# 'bigram_c254_p50_dot', 'bigram_c137_p20_dot',
'word_c254_p20_word', 'word_c254_p50_word', 'word_c137_p20_word',
'word_c254_p80_word', 'word_c137_p80_word', #'bigram_c254_p20_word',
# 'bigram_c254_p50_word', 'bigram_c137_p20_word'
]
# conditions2 = [16, 22] # Lets compare word vs. scramble
X13 = np.empty((20484, 601, n_subjects, len(conditions1)))
#word_data = np.empty((20484, 421, n_subjects, len(conditions1[8:])))
fs_vertices = [np.arange(10242)] * 2
n_epochs = np.empty((n_subjects,len(conditions1)))
""" Read HCP labels """
labels = mne.read_labels_from_annot('fsaverage', parc='HCPMMP1', surf_name='white', subjects_dir=fs_dir)
PHT_label_lh = [label for label in labels if label.name == 'L_PHT_ROI-lh'][0]
PHT_label_rh = [label for label in labels if label.name == 'R_PHT_ROI-rh'][0]
TE1p_label_lh = [label for label in labels if label.name == 'L_TE1p_ROI-lh'][0]
TE1p_label_rh = [label for label in labels if label.name == 'R_TE1p_ROI-rh'][0]
TE2p_label_lh = [label for label in labels if label.name == 'L_TE2p_ROI-lh'][0]
TE2p_label_rh = [label for label in labels if label.name == 'R_TE2p_ROI-rh'][0]
PH_label_lh = [label for label in labels if label.name == 'L_PH_ROI-lh'][0]
PH_label_rh = [label for label in labels if label.name == 'R_PH_ROI-rh'][0]
FFC_label_lh = [label for label in labels if label.name == 'L_FFC_ROI-lh'][0]
FFC_label_rh = [label for label in labels if label.name == 'R_FFC_ROI-rh'][0]
IFSp_label_lh = [label for label in labels if label.name == 'L_IFSp_ROI-lh'][0]
IFSp_label_rh = [label for label in labels if label.name == 'R_IFSp_ROI-rh'][0]
IFJp_label_lh = [label for label in labels if label.name == 'L_IFJp_ROI-lh'][0]
IFJp_label_rh = [label for label in labels if label.name == 'R_IFJp_ROI-rh'][0]
IFJa_label_lh = [label for label in labels if label.name == 'L_IFJa_ROI-lh'][0]
IFJa_label_rh = [label for label in labels if label.name == 'R_IFJa_ROI-rh'][0]
a45_label_lh = [label for label in labels if label.name == 'L_45_ROI-lh'][0]
a45_label_rh = [label for label in labels if label.name == 'R_45_ROI-rh'][0]
a44_label_lh = [label for label in labels if label.name == 'L_44_ROI-lh'][0]
a44_label_rh = [label for label in labels if label.name == 'R_44_ROI-rh'][0]
PGi_label_lh = [label for label in labels if label.name == 'L_PGi_ROI-lh'][0]
PGi_label_rh = [label for label in labels if label.name == 'R_PGi_ROI-rh'][0]
PGs_label_lh = [label for label in labels if label.name == 'L_PGs_ROI-lh'][0]
PGs_label_rh = [label for label in labels if label.name == 'R_PGs_ROI-rh'][0]
STSvp_label_lh = [label for label in labels if label.name == 'L_STSvp_ROI-lh'][0]
STSvp_label_rh = [label for label in labels if label.name == 'R_STSvp_ROI-rh'][0]
STSdp_label_lh = [label for label in labels if label.name == 'L_STSdp_ROI-lh'][0]
STSdp_label_rh = [label for label in labels if label.name == 'R_STSdp_ROI-rh'][0]
TPOJ1_label_lh = [label for label in labels if label.name == 'L_TPOJ1_ROI-lh'][0]
TPOJ1_label_rh = [label for label in labels if label.name == 'R_TPOJ1_ROI-rh'][0]
V1_label_lh = [label for label in labels if label.name == 'L_V1_ROI-lh'][0]
V1_label_rh = [label for label in labels if label.name == 'R_V1_ROI-rh'][0]
if load_data == False:
for n, s in enumerate(session1):
os.chdir(os.path.join(raw_dir,session1[n]))
os.chdir('inverse')
fn = 'Conditions_40-sss_eq_'+session1[n]+'-ave.fif'
fn_inv = session1[n] + '-inv.fif'
inv = mne.minimum_norm.read_inverse_operator(fn_inv, verbose=None)
evoked = mne.read_evokeds(fn, condition=None,
baseline=(None,0), kind='average', proj=True)
stc = mne.minimum_norm.apply_inverse(evoked[0],inv,lambda2, method=method, pick_ori="normal")
s_label = IFSp_label_lh.morph(subject_from='fsaverage', subject_to=subs[n], subjects_dir=fs_dir,
n_jobs=18) #grade=[np.arange(10242), np.arange(10242)]
stc_label = stc.in_label(s_label)
os.chdir(os.path.join(raw_dir,session1[n]))
os.chdir('epochs')
fn = 'All_40-sss_'+session1[n]+'-epo.fif'
epochs = mne.read_epochs(fn)
eid = epochs.events[:,2] == 101
epo = epochs[eid]
epo.crop(0., 0.7)
snr = 1.0 # use lower SNR for single epochs
lambda2 = 1.0 / snr ** 2
stcs = mne.minimum_norm.apply_inverse_epochs(epo, inv, lambda2, method,
pick_ori="normal", return_generator=True)
os.chdir(os.path.join(fs_dir,subs[n]))
os.chdir('bem')
fn = subs[n] + '-ico-5-src.fif'
src = mne.read_source_spaces(fn, patch_stats=False, verbose=None)
src = inv['src']
label = mne.read_label('TE2p_label_lh')
seed_ts = mne.extract_label_time_course(stcs, label, src, #labels_parc
mode='mean_flip',
allow_empty=True,
return_generator=False)
comb_ts = zip(seed_ts, stcs)
#Construct indices to estimate connectivity between the label time course
# and all source space time courses
vertices = [src[i]['vertno'] for i in range(2)]
n_signals_tot = 1 + len(vertices[0]) + len(vertices[1])
indices = mne.connectivity.seed_target_indices([0], np.arange(1, n_signals_tot))
# read colors
node_colors = [label.color for label in labels]
# Compute the PSI in the frequency range 8Hz..30Hz. We exclude the baseline
# period from the connectivity estimation
fmin = 8.
fmax = 30.
tmin_con = 0.
sfreq = 600 # the sampling frequency
psi, freqs, times, n_epochs, _ = mne.connectivity.phase_slope_index(
comb_ts, mode='multitaper', indices=indices, sfreq=sfreq,
fmin=fmin, fmax=fmax, tmin=tmin_con)
# Generate a SourceEstimate with the PSI. This is simple since we used a single
# seed (inspect the indices variable to see how the PSI scores are arranged in
# the output)
psi_stc = mne.SourceEstimate(psi, vertices=vertices, tmin=0, tstep=1,
subject='sample')
# Now we can visualize the PSI using the plot method. We use a custom colormap
# to show signed values
v_max = np.max(np.abs(psi))
brain = psi_stc.plot(surface='inflated', hemi='lh',
time_label='Phase Slope Index (PSI)',
subjects_dir=subjects_dir,
clim=dict(kind='percent', pos_lims=(95, 97.5, 100)))
brain.show_view('medial')
brain.add_label(fname_label, color='green', alpha=0.7)
os.chdir(raw_dir)
np.save(fname_data, X13)
np.save('session1_times.npy',times)
np.save('session1_tstep.npy',tstep)
np.save('session1_n_averages.npy',n_epochs)
else:
os.chdir(raw_dir)
X13 = np.load(fname_data)
times = np.load('session1_times.npy')
tstep = np.load('session1_tstep.npy')
n_epochs3 = np.load('session1_n_averages.npy')
tmin = -0.1
#%%
""" Read HCP labels """
labels = mne.read_labels_from_annot('fsaverage', parc='HCPMMP1', surf_name='white', subjects_dir=fs_dir) #regexp=aparc_label_name
#aparc_label_name = 'PHT_ROI'#'_IP'#'IFSp_ROI'#'STSvp_ROI'#'STSdp_ROI'#'PH_ROI'#'TE2p_ROI' #'SFL_ROI' #'IFSp_ROI' #'TE2p_ROI' #'inferiortemporal' #'pericalcarine'
# anat_label = mne.read_labels_from_annot('fsaverage', parc='aparc',surf_name='white',
# subjects_dir=fs_dir, regexp=aparc_label_name)
#%%
#labels = mne.read_labels_from_annot('fsaverage', 'HCPMMP1', 'lh', subjects_dir=fs_dir)
#aud_label = [label for label in labels if label.name == 'L_A1_ROI-lh'][0]
#brain.add_label(aud_label, borders=False)
""" Task effects """
#TE2p_mask_lh = mne.Label.get_vertices_used(TE2p_label[0])
#TE2p_mask_rh = mne.Label.get_vertices_used(TE2p_label[1])
k = 1
#tmin = 0
tp1 = [0.08, 0.13, 0.15, 0.20, 0.30]
tp2 = [0.12, 0.17, 0.19, 0.24, 0.35]
mask = times == 0.15
PHT_label_lh = [label for label in labels if label.name == 'L_PHT_ROI-lh'][0]
PHT_label_rh = [label for label in labels if label.name == 'R_PHT_ROI-rh'][0]
TE1p_label_lh = [label for label in labels if label.name == 'L_TE1p_ROI-lh'][0]
TE1p_label_rh = [label for label in labels if label.name == 'R_TE1p_ROI-rh'][0]
TE2p_label_lh = [label for label in labels if label.name == 'L_TE2p_ROI-lh'][0]
TE2p_label_rh = [label for label in labels if label.name == 'R_TE2p_ROI-rh'][0]
PH_label_lh = [label for label in labels if label.name == 'L_PH_ROI-lh'][0]
PH_label_rh = [label for label in labels if label.name == 'R_PH_ROI-rh'][0]
FFC_label_lh = [label for label in labels if label.name == 'L_FFC_ROI-lh'][0]
FFC_label_rh = [label for label in labels if label.name == 'R_FFC_ROI-rh'][0]
IFSp_label_lh = [label for label in labels if label.name == 'L_IFSp_ROI-lh'][0]
IFSp_label_rh = [label for label in labels if label.name == 'R_IFSp_ROI-rh'][0]
IFJp_label_lh = [label for label in labels if label.name == 'L_IFJp_ROI-lh'][0]
IFJp_label_rh = [label for label in labels if label.name == 'R_IFJp_ROI-rh'][0]
IFJa_label_lh = [label for label in labels if label.name == 'L_IFJa_ROI-lh'][0]
IFJa_label_rh = [label for label in labels if label.name == 'R_IFJa_ROI-rh'][0]
a45_label_lh = [label for label in labels if label.name == 'L_45_ROI-lh'][0]
a45_label_rh = [label for label in labels if label.name == 'R_45_ROI-rh'][0]
a44_label_lh = [label for label in labels if label.name == 'L_44_ROI-lh'][0]
a44_label_rh = [label for label in labels if label.name == 'R_44_ROI-rh'][0]
PGi_label_lh = [label for label in labels if label.name == 'L_PGi_ROI-lh'][0]
PGi_label_rh = [label for label in labels if label.name == 'R_PGi_ROI-rh'][0]
PGs_label_lh = [label for label in labels if label.name == 'L_PGs_ROI-lh'][0]
PGs_label_rh = [label for label in labels if label.name == 'R_PGs_ROI-rh'][0]
STSvp_label_lh = [label for label in labels if label.name == 'L_STSvp_ROI-lh'][0]
STSvp_label_rh = [label for label in labels if label.name == 'R_STSvp_ROI-rh'][0]
STSdp_label_lh = [label for label in labels if label.name == 'L_STSdp_ROI-lh'][0]
STSdp_label_rh = [label for label in labels if label.name == 'R_STSdp_ROI-rh'][0]
TPOJ1_label_lh = [label for label in labels if label.name == 'L_TPOJ1_ROI-lh'][0]
TPOJ1_label_rh = [label for label in labels if label.name == 'R_TPOJ1_ROI-rh'][0]
V1_label_lh = [label for label in labels if label.name == 'L_V1_ROI-lh'][0]
V1_label_rh = [label for label in labels if label.name == 'R_V1_ROI-rh'][0]
new_data = X13[:,:,poor_readers,:]
data1 = np.subtract(np.mean(new_data[:,:,:,[5]],axis=3), np.mean(new_data[:,:,:,[8]],axis=3))
#data1 = np.mean(new_data[:,:,:,[3]],axis=3)
del new_data
data11 = data1[:,:,:]
del data1
data11 = np.transpose(data11,[2,1,0])
#""" Spatiotemporal clustering """
#p_threshold = 0.001
#t_threshold = -stats.distributions.t.ppf(p_threshold / 2., n_subjects - 1)
#print('Clustering.')
#connectivity = mne.spatial_tris_connectivity(mne.grade_to_tris(5))
#T_obs, clusters, cluster_p_values, H0 = clu = \
# spatio_temporal_cluster_1samp_test(data11, connectivity=connectivity, n_jobs=12,
# threshold=t_threshold)
## Now select the clusters that are sig. at p < 0.05 (note that this value
## is multiple-comparisons corrected).
#good_cluster_inds = np.where(cluster_p_values < 0.05)[0]
#
#print('Visualizing clusters.')
## Now let's build a convenient representation of each cluster, where each
## cluster becomes a "time point" in the SourceEstimate
#stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
# vertices=fs_vertices,
# subject='fsaverage')
#
## Let's actually plot the first "time point" in the SourceEstimate, which
## shows all the clusters, weighted by duration
#brain = stc_all_cluster_vis.plot(hemi='both', views='lateral',
# time_label='Duration significant (ms)')
#brain.save_image('clusters.png')
#stat_fun = partial(mne.stats.ttest_1samp_no_p,sigma=1e-3)
stat_fun = partial(mne.stats.ttest_1samp_no_p)
temp3 = mne.SourceEstimate(np.transpose(stat_fun(data11)), fs_vertices, tmin,
tstep,subject='fsaverage')
brain3_1 = temp3.plot(hemi='lh', subjects_dir=fs_dir, views = ['lat','ven'], #views=['lat','ven','med'], #transparent = True,
initial_time=0.15, clim=dict(kind='value', pos_lims=[2.3, 2.4, 4.3])) #np.max(temp3.data[:,:])]))
brain3_1.add_label(PHT_label_lh, borders=True, color=c_table[0])
brain3_1.add_label(TE2p_label_lh, borders=True, color=c_table[1])
brain3_1.add_label(PH_label_lh, borders=True, color=c_table[2])
brain3_1.add_label(FFC_label_lh, borders=True, color=c_table[3])
brain3_1.add_label(TE1p_label_lh, borders=True, color=c_table[4])
brain3_1.add_label(IFSp_label_lh, borders=True, color=c_table[5])
brain3_1.add_label(IFJp_label_lh, borders=True, color=c_table[6])
brain3_1.add_label(IFJa_label_lh, borders=True, color=c_table[7])
brain3_1.add_label(a45_label_lh, borders=True, color=c_table[8])
brain3_1.add_label(a44_label_lh, borders=True, color=c_table[8])
brain3_1.add_label(PGi_label_lh, borders=True, color=c_table[9])
brain3_1.add_label(PGs_label_lh, borders=True, color=c_table[9])
brain3_1.add_label(STSvp_label_lh, borders=True, color=c_table[11])
brain3_1.add_label(STSdp_label_lh, borders=True, color=c_table[11])
brain3_1.add_label(V1_label_lh, borders=True, color='k')
brain3_1.save_movie('DotTask_LowNoise-HighNoise_GR_lh.mp4',time_dilation = 4.0,framerate = 30)
brain3_2 = temp3.plot(hemi='rh', subjects_dir=fs_dir, views='lat',
clim=dict(kind='value', lims=[2.9, 3, np.max(temp3.data[:,:])]),
initial_time=0.15)
brain3_2.add_label(PHT_label_rh, borders=True, color=c_table[0])
brain3_2.add_label(TE2p_label_rh, borders=True, color=c_table[1])
brain3_2.add_label(PH_label_rh, borders=True, color=c_table[2])
brain3_2.add_label(FFC_label_rh, borders=True, color=c_table[3])
brain3_2.add_label(TE1p_label_rh, borders=True, color=c_table[4])
brain3_2.add_label(IFSp_label_rh, borders=True, color=c_table[5])
brain3_2.add_label(IFJp_label_rh, borders=True, color=c_table[6])
brain3_2.add_label(IFJa_label_rh, borders=True, color=c_table[7])
brain3_2.add_label(a45_label_rh, borders=True, color=c_table[8])
""" Frontal """
temp = temp3.in_label(a44_label_lh)
broca_vertices = temp.vertices[0]
temp = temp3.in_label(a45_label_lh)
broca_vertices = np.unique(np.append(broca_vertices, temp.vertices[0]))
temp = temp3.in_label(IFSp_label_lh)
broca_vertices = np.unique(np.append(broca_vertices, temp.vertices[0]))
#temp = temp3.in_label(IFJp_label_lh)
#broca_vertices = np.unique(np.append(broca_vertices, temp.vertices[0]))
#temp = temp3.in_label(IFJa_label_lh)
#broca_vertices = np.unique(np.append(broca_vertices, temp.vertices[0]))
""" Ventral """
temp = temp3.in_label(TE2p_label_lh)
ventral_vertices = temp.vertices[0]
#temp = temp3.in_label(PH_label_lh)
#ventral_vertices = np.unique(np.append(ventral_vertices, temp.vertices[0]))
#
#temp = temp3.in_label(FFC_label_lh)
#ventral_vertices = np.unique(np.append(ventral_vertices, temp.vertices[0]))
""" Temporal """
temp = temp3.in_label(STSvp_label_lh)
w_vertices = temp.vertices[0]
temp = temp3.in_label(STSdp_label_lh)
w_vertices = np.unique(np.append(w_vertices, temp.vertices[0]))
temp = temp3.in_label(TPOJ1_label_lh)
w_vertices = np.unique(np.append(w_vertices, temp.vertices[0]))
""" V1 """
temp = temp3.in_label(V1_label_lh)
v1_vertices = temp.vertices[0]
#temp = temp3.in_label(IFSp_label_lh)
#broca_vertices = np.unique(np.append(broca_vertices, temp.vertices[0]))
""" Just to visualize the new ROI """
mask = np.logical_and(times >= tp1[k], times <= tp2[k])
lh_label = temp3.in_label(TE2p_label_lh)
data = np.max(lh_label.data[:,mask],axis=1)
lh_label.data[data < 1.5] = 0.
temp_labels, _ = mne.stc_to_label(lh_label, src='fsaverage', smooth=False,
subjects_dir=fs_dir, connected=False)
temp = temp3.in_label(temp_labels)
ven_vertices = temp.vertices[0]
lh_label = temp3.in_label(FFC_label_lh)
data = np.max(lh_label.data[:,mask],axis=1)
lh_label.data[data < 1.5] = 0.
temp_labels, _ = mne.stc_to_label(lh_label, src='fsaverage', smooth=False,
subjects_dir=fs_dir, connected=False)
temp = temp3.in_label(temp_labels)
ven_vertices = np.unique(np.append(ven_vertices, temp.vertices[0]))
new_label = mne.Label(ven_vertices, hemi='lh')
brain3_1.add_label(new_label, borders=True, color='k')
""" Overwrite functional with anatomical ROIs """
lh_label = temp3.in_label(PHT_label_lh)
data = np.mean(lh_label.data[:,:],axis=1)
#lh_label.data[data < 1.5] = 0.
func_labels, _ = mne.stc_to_label(lh_label, src='fsaverage', smooth=False,
subjects_dir=fs_dir, connected=False)
ven_vertices = func_labels.vertices
# Figures
#%%
""" All subjects """
plt.figure(1)
plt.clf()
X11 = X13[ventral_vertices,:,:,:]
M = np.mean(np.mean(X11[:,:,all_subject,:],axis=0),axis=1)
plt.subplot(3,2,1)
plt.hold(True)
plt.plot(times, np.mean(M[:,[0]],axis=1),'--',color=c_table[5],label='Word-No noise')
yerr = np.std(np.mean(M[:,[0]],axis=1)) / np.sqrt(len(all_subject))
plt.fill_between(times, np.mean(M[:,[0]],axis=1)-yerr, np.mean(M[:,[0]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M[:,[1]],axis=1),'--',color=c_table[3],label='Word-Med noise')
yerr = np.std(np.mean(M[:,[1]],axis=1)) / np.sqrt(len(all_subject))
plt.fill_between(times, np.mean(M[:,[1]],axis=1)-yerr, np.mean(M[:,[1]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M[:,[3]],axis=1),'--',color=c_table[1],label='Noise')
yerr = np.std(np.mean(M[:,[3]],axis=1)) / np.sqrt(len(all_subject))
plt.fill_between(times, np.mean(M[:,[3]],axis=1)-yerr, np.mean(M[:,[3]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.title('Dot task: Ventral')
plt.subplot(3,2,2)
plt.hold(True)
plt.plot(times, np.mean(M[:,[5]],axis=1),'-',color=c_table[5],label='Word-No noise')
yerr = np.std(np.mean(M[:,[5]],axis=1)) / np.sqrt(len(all_subject))
plt.fill_between(times, np.mean(M[:,[5]],axis=1)-yerr, np.mean(M[:,[5]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M[:,[6]],axis=1),'-',color=c_table[3],label='Word-Med noise')
yerr = np.std(np.mean(M[:,[6]],axis=1)) / np.sqrt(len(all_subject))
plt.fill_between(times, np.mean(M[:,[6]],axis=1)-yerr, np.mean(M[:,[6]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M[:,[8]],axis=1),'-',color=c_table[1],label='Noise')
yerr = np.std(np.mean(M[:,[8]],axis=1)) / np.sqrt(len(all_subject))
plt.fill_between(times, np.mean(M[:,[8]],axis=1)-yerr, np.mean(M[:,[8]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.title('Lexical task: Ventral')
X11 = X13[broca_vertices,:,:,:]
M = np.mean(np.mean(X11[:,:,all_subject,:],axis=0),axis=1)
plt.subplot(3,2,3)
plt.hold(True)
plt.plot(times, np.mean(M[:,[0]],axis=1),'--',color=c_table[5],label='Word-No noise')
yerr = np.std(np.mean(M[:,[0]],axis=1)) / np.sqrt(len(all_subject))
plt.fill_between(times, np.mean(M[:,[0]],axis=1)-yerr, np.mean(M[:,[0]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M[:,[1]],axis=1),'--',color=c_table[3],label='Word-Med noise')
yerr = np.std(np.mean(M[:,[1]],axis=1)) / np.sqrt(len(all_subject))
plt.fill_between(times, np.mean(M[:,[1]],axis=1)-yerr, np.mean(M[:,[1]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M[:,[3]],axis=1),'--',color=c_table[1],label='Noise')
yerr = np.std(np.mean(M[:,[3]],axis=1)) / np.sqrt(len(all_subject))
plt.fill_between(times, np.mean(M[:,[3]],axis=1)-yerr, np.mean(M[:,[3]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('Dot task: Frontal')
plt.subplot(3,2,4)
plt.hold(True)
plt.plot(times, np.mean(M[:,[5]],axis=1),'-',color=c_table[5],label='Word-No noise')
yerr = np.std(np.mean(M[:,[5]],axis=1)) / np.sqrt(len(all_subject))
plt.fill_between(times, np.mean(M[:,[5]],axis=1)-yerr, np.mean(M[:,[5]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M[:,[6]],axis=1),'-',color=c_table[3],label='Word-Med noise')
yerr = np.std(np.mean(M[:,[6]],axis=1)) / np.sqrt(len(all_subject))
plt.fill_between(times, np.mean(M[:,[6]],axis=1)-yerr, np.mean(M[:,[6]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M[:,[8]],axis=1),'-',color=c_table[1],label='Noise')
yerr = np.std(np.mean(M[:,[8]],axis=1)) / np.sqrt(len(all_subject))
plt.fill_between(times, np.mean(M[:,[8]],axis=1)-yerr, np.mean(M[:,[8]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('Lexical task: Frontal')
X11 = X13[w_vertices,:,:,:]
M = np.mean(np.mean(X11[:,:,all_subject,:],axis=0),axis=1)
plt.subplot(3,2,5)
plt.hold(True)
plt.plot(times, np.mean(M[:,[0]],axis=1),'--',color=c_table[5],label='Word-No noise')
yerr = np.std(np.mean(M[:,[0]],axis=1)) / np.sqrt(len(all_subject))
plt.fill_between(times, np.mean(M[:,[0]],axis=1)-yerr, np.mean(M[:,[0]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M[:,[1]],axis=1),'--',color=c_table[3],label='Word-Med noise')
yerr = np.std(np.mean(M[:,[1]],axis=1)) / np.sqrt(len(all_subject))
plt.fill_between(times, np.mean(M[:,[1]],axis=1)-yerr, np.mean(M[:,[1]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M[:,[3]],axis=1),'--',color=c_table[1],label='Noise')
yerr = np.std(np.mean(M[:,[3]],axis=1)) / np.sqrt(len(all_subject))
plt.fill_between(times, np.mean(M[:,[3]],axis=1)-yerr, np.mean(M[:,[3]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('Dot task: Temporal')
plt.subplot(3,2,6)
plt.hold(True)
plt.plot(times, np.mean(M[:,[5]],axis=1),'-',color=c_table[5],label='Word-No noise')
yerr = np.std(np.mean(M[:,[5]],axis=1)) / np.sqrt(len(all_subject))
plt.fill_between(times, np.mean(M[:,[5]],axis=1)-yerr, np.mean(M[:,[5]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M[:,[6]],axis=1),'-',color=c_table[3],label='Word-Med noise')
yerr = np.std(np.mean(M[:,[6]],axis=1)) / np.sqrt(len(all_subject))
plt.fill_between(times, np.mean(M[:,[6]],axis=1)-yerr, np.mean(M[:,[6]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M[:,[8]],axis=1),'-',color=c_table[1],label='Noise')
yerr = np.std(np.mean(M[:,[8]],axis=1)) / np.sqrt(len(all_subject))
plt.fill_between(times, np.mean(M[:,[8]],axis=1)-yerr, np.mean(M[:,[8]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('Lexical task: Temporal')
#%%
""" V1 """
plt.figure(2)
plt.clf()
X11 = X13[v1_vertices,:,:,:]
M = np.mean(np.mean(X11[:,:,all_subject,:],axis=0),axis=1)
plt.subplot(3,2,1)
plt.hold(True)
plt.plot(times, np.mean(M[:,[0]],axis=1),'--',color=c_table[5],label='Word-No noise')
yerr = np.std(np.mean(M[:,[0]],axis=1)) / np.sqrt(len(all_subject))
plt.fill_between(times, np.mean(M[:,[0]],axis=1)-yerr, np.mean(M[:,[0]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M[:,[1]],axis=1),'--',color=c_table[3],label='Word-Med noise')
yerr = np.std(np.mean(M[:,[1]],axis=1)) / np.sqrt(len(all_subject))
plt.fill_between(times, np.mean(M[:,[1]],axis=1)-yerr, np.mean(M[:,[1]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M[:,[3]],axis=1),'--',color=c_table[1],label='Noise')
yerr = np.std(np.mean(M[:,[3]],axis=1)) / np.sqrt(len(all_subject))
plt.fill_between(times, np.mean(M[:,[3]],axis=1)-yerr, np.mean(M[:,[3]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('Dot task: V1')
plt.subplot(3,2,2)
plt.hold(True)
plt.plot(times, np.mean(M[:,[5]],axis=1),'-',color=c_table[5],label='Word-No noise')
yerr = np.std(np.mean(M[:,[5]],axis=1)) / np.sqrt(len(all_subject))
plt.fill_between(times, np.mean(M[:,[5]],axis=1)-yerr, np.mean(M[:,[5]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M[:,[6]],axis=1),'-',color=c_table[3],label='Word-Med noise')
yerr = np.std(np.mean(M[:,[6]],axis=1)) / np.sqrt(len(all_subject))
plt.fill_between(times, np.mean(M[:,[6]],axis=1)-yerr, np.mean(M[:,[6]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M[:,[8]],axis=1),'-',color=c_table[1],label='Noise')
yerr = np.std(np.mean(M[:,[8]],axis=1)) / np.sqrt(len(all_subject))
plt.fill_between(times, np.mean(M[:,[8]],axis=1)-yerr, np.mean(M[:,[8]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('Lexical task: V1')
M1 = np.mean(np.mean(X11[:,:,good_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1)
plt.subplot(3, 2, 3)
plt.hold(True)
plt.plot(times, np.mean(M1[:,[0]],axis=1),'--',color=c_table[5])
yerr = np.std(np.mean(M1[:,[0]],axis=1)) / np.sqrt(len(good_readers))
plt.fill_between(times, np.mean(M1[:,[0]],axis=1)-yerr, np.mean(M1[:,[0]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M1[:,[1]],axis=1),'--',color=c_table[3])
yerr = np.std(np.mean(M1[:,[1]],axis=1)) / np.sqrt(len(good_readers))
plt.fill_between(times, np.mean(M1[:,[1]],axis=1)-yerr, np.mean(M1[:,[1]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M1[:,[3]],axis=1),'--',color=c_table[1])
yerr = np.std(np.mean(M1[:,[3]],axis=1)) / np.sqrt(len(good_readers))
plt.fill_between(times, np.mean(M1[:,[3]],axis=1)-yerr, np.mean(M1[:,[3]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('Dot task (GR): V1')
plt.subplot(3, 2, 4)
plt.hold(True)
plt.plot(times, np.mean(M1[:,[5]],axis=1),'-',color=c_table[5])
yerr = np.std(np.mean(M1[:,[5]],axis=1)) / np.sqrt(len(good_readers))
plt.fill_between(times, np.mean(M1[:,[5]],axis=1)-yerr, np.mean(M1[:,[5]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M1[:,[6]],axis=1),'-',color=c_table[3])
yerr = np.std(np.mean(M1[:,[6]],axis=1)) / np.sqrt(len(good_readers))
plt.fill_between(times, np.mean(M1[:,[6]],axis=1)-yerr, np.mean(M1[:,[6]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M1[:,[8]],axis=1),'-',color=c_table[1])
yerr = np.std(np.mean(M1[:,[8]],axis=1)) / np.sqrt(len(good_readers))
plt.fill_between(times, np.mean(M1[:,[8]],axis=1)-yerr, np.mean(M1[:,[8]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('Lexical task (GR): V1')
plt.subplot(3, 2, 5)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[0]],axis=1),'--',color=c_table[5])
yerr = np.std(np.mean(M2[:,[0]],axis=1)) / np.sqrt(len(poor_readers))
plt.fill_between(times, np.mean(M2[:,[0]],axis=1)-yerr, np.mean(M2[:,[0]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M2[:,[1]],axis=1),'--',color=c_table[3])
yerr = np.std(np.mean(M2[:,[1]],axis=1)) / np.sqrt(len(poor_readers))
plt.fill_between(times, np.mean(M2[:,[1]],axis=1)-yerr, np.mean(M2[:,[1]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M2[:,[3]],axis=1),'--',color=c_table[1])
yerr = np.std(np.mean(M2[:,[3]],axis=1)) / np.sqrt(len(poor_readers))
plt.fill_between(times, np.mean(M2[:,[3]],axis=1)-yerr, np.mean(M2[:,[3]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('Dot task (PR): V1')
plt.subplot(3, 2, 6)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[5]],axis=1),'-',color=c_table[5])
yerr = np.std(np.mean(M2[:,[5]],axis=1)) / np.sqrt(len(poor_readers))
plt.fill_between(times, np.mean(M2[:,[5]],axis=1)-yerr, np.mean(M2[:,[5]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M2[:,[6]],axis=1),'-',color=c_table[3])
yerr = np.std(np.mean(M2[:,[6]],axis=1)) / np.sqrt(len(poor_readers))
plt.fill_between(times, np.mean(M2[:,[6]],axis=1)-yerr, np.mean(M2[:,[6]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M2[:,[8]],axis=1),'-',color=c_table[1])
yerr = np.std(np.mean(M2[:,[8]],axis=1)) / np.sqrt(len(poor_readers))
plt.fill_between(times, np.mean(M2[:,[8]],axis=1)-yerr, np.mean(M2[:,[8]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('Lexical task (PR): V1')
""" Plot individual V1 responses """
#for iSub in np.arange(0,len(poor_readers)):
# plt.figure(100+iSub)
# plt.clf()
# plt.subplot(1,2,1)
# plt.hold(True)
# plt.plot(times, np.mean(X1[v1_vertices,:,poor_readers[iSub],0],axis=0), '--', color=c_table[5])
# plt.plot(times, np.mean(X1[v1_vertices,:,poor_readers[iSub],1],axis=0), '--', color=c_table[3])
# plt.plot(times, np.mean(X1[v1_vertices,:,poor_readers[iSub],3],axis=0), '--', color=c_table[1])
# plt.plot([0.1, 0.1],[0, 8],'-',color='k')
# plt.title(subs[poor_readers[iSub]])
# plt.subplot(1,2,2)
# plt.hold(True)
# plt.plot(times, np.mean(X1[v1_vertices,:,poor_readers[iSub],5],axis=0), '-', color=c_table[5])
# plt.plot(times, np.mean(X1[v1_vertices,:,poor_readers[iSub],6],axis=0), '-', color=c_table[3])
# plt.plot(times, np.mean(X1[v1_vertices,:,poor_readers[iSub],8],axis=0), '-', color=c_table[1])
# plt.plot([0.1, 0.1],[0, 8],'-',color='k')
# plt.title(subs[poor_readers[iSub]])
#%%
""" Good readers """
plt.figure(3)
plt.clf()
X11 = X13[ventral_vertices,:,:,:]
M1 = np.mean(np.mean(X11[:,:,good_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1)
plt.subplot(3, 2, 1)
plt.hold(True)
plt.plot(times, np.mean(M1[:,[0]],axis=1),'--',color=c_table[5])
yerr = np.std(np.mean(M1[:,[0]],axis=1)) / np.sqrt(len(good_readers))
plt.fill_between(times, np.mean(M1[:,[0]],axis=1)-yerr, np.mean(M1[:,[0]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M1[:,[1]],axis=1),'--',color=c_table[3])
yerr = np.std(np.mean(M1[:,[1]],axis=1)) / np.sqrt(len(good_readers))
plt.fill_between(times, np.mean(M1[:,[1]],axis=1)-yerr, np.mean(M1[:,[1]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M1[:,[3]],axis=1),'--',color=c_table[1])
yerr = np.std(np.mean(M1[:,[3]],axis=1)) / np.sqrt(len(good_readers))
plt.fill_between(times, np.mean(M1[:,[3]],axis=1)-yerr, np.mean(M1[:,[3]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('Dot task (GR): Ventral')
plt.subplot(3, 2, 2)
plt.hold(True)
plt.plot(times, np.mean(M1[:,[5]],axis=1),'-',color=c_table[5])
yerr = np.std(np.mean(M1[:,[5]],axis=1)) / np.sqrt(len(good_readers))
plt.fill_between(times, np.mean(M1[:,[5]],axis=1)-yerr, np.mean(M1[:,[5]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M1[:,[6]],axis=1),'-',color=c_table[3])
yerr = np.std(np.mean(M1[:,[6]],axis=1)) / np.sqrt(len(good_readers))
plt.fill_between(times, np.mean(M1[:,[6]],axis=1)-yerr, np.mean(M1[:,[6]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M1[:,[8]],axis=1),'-',color=c_table[1])
yerr = np.std(np.mean(M1[:,[8]],axis=1)) / np.sqrt(len(good_readers))
plt.fill_between(times, np.mean(M1[:,[8]],axis=1)-yerr, np.mean(M1[:,[8]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('Lexical task (GR): Ventral')
X11 = X13[broca_vertices,:,:,:]
M1 = np.mean(np.mean(X11[:,:,good_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1)
plt.subplot(3, 2, 3)
plt.hold(True)
plt.plot(times, np.mean(M1[:,[0]],axis=1),'--',color=c_table[5])
yerr = np.std(np.mean(M1[:,[0]],axis=1)) / np.sqrt(len(good_readers))
plt.fill_between(times, np.mean(M1[:,[0]],axis=1)-yerr, np.mean(M1[:,[0]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M1[:,[1]],axis=1),'--',color=c_table[3])
yerr = np.std(np.mean(M1[:,[1]],axis=1)) / np.sqrt(len(good_readers))
plt.fill_between(times, np.mean(M1[:,[1]],axis=1)-yerr, np.mean(M1[:,[1]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M1[:,[3]],axis=1),'--',color=c_table[1])
yerr = np.std(np.mean(M1[:,[3]],axis=1)) / np.sqrt(len(good_readers))
plt.fill_between(times, np.mean(M1[:,[3]],axis=1)-yerr, np.mean(M1[:,[3]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('Dot task (GR): Frontal')
plt.subplot(3, 2, 4)
plt.hold(True)
plt.plot(times, np.mean(M1[:,[5]],axis=1),'-',color=c_table[5])
yerr = np.std(np.mean(M1[:,[5]],axis=1)) / np.sqrt(len(good_readers))
plt.fill_between(times, np.mean(M1[:,[5]],axis=1)-yerr, np.mean(M1[:,[5]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M1[:,[6]],axis=1),'-',color=c_table[3])
yerr = np.std(np.mean(M1[:,[6]],axis=1)) / np.sqrt(len(good_readers))
plt.fill_between(times, np.mean(M1[:,[6]],axis=1)-yerr, np.mean(M1[:,[6]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M1[:,[8]],axis=1),'-',color=c_table[1])
yerr = np.std(np.mean(M1[:,[8]],axis=1)) / np.sqrt(len(good_readers))
plt.fill_between(times, np.mean(M1[:,[8]],axis=1)-yerr, np.mean(M1[:,[8]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('Lexical task (GR): Frontal')
X11 = X13[w_vertices,:,:,:]
M1 = np.mean(np.mean(X11[:,:,good_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1)
plt.subplot(3, 2, 5)
plt.hold(True)
plt.plot(times, np.mean(M1[:,[0]],axis=1),'--',color=c_table[5])
yerr = np.std(np.mean(M1[:,[0]],axis=1)) / np.sqrt(len(good_readers))
plt.fill_between(times, np.mean(M1[:,[0]],axis=1)-yerr, np.mean(M1[:,[0]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M1[:,[1]],axis=1),'--',color=c_table[3])
yerr = np.std(np.mean(M1[:,[1]],axis=1)) / np.sqrt(len(good_readers))
plt.fill_between(times, np.mean(M1[:,[1]],axis=1)-yerr, np.mean(M1[:,[1]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M1[:,[3]],axis=1),'--',color=c_table[1])
yerr = np.std(np.mean(M1[:,[3]],axis=1)) / np.sqrt(len(good_readers))
plt.fill_between(times, np.mean(M1[:,[3]],axis=1)-yerr, np.mean(M1[:,[3]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('Dot task (GR): Temporal')
plt.subplot(3, 2, 6)
plt.hold(True)
plt.plot(times, np.mean(M1[:,[5]],axis=1),'-',color=c_table[5])
yerr = np.std(np.mean(M1[:,[5]],axis=1)) / np.sqrt(len(good_readers))
plt.fill_between(times, np.mean(M1[:,[5]],axis=1)-yerr, np.mean(M1[:,[5]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M1[:,[6]],axis=1),'-',color=c_table[3])
yerr = np.std(np.mean(M1[:,[6]],axis=1)) / np.sqrt(len(good_readers))
plt.fill_between(times, np.mean(M1[:,[6]],axis=1)-yerr, np.mean(M1[:,[6]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M1[:,[8]],axis=1),'-',color=c_table[1])
yerr = np.std(np.mean(M1[:,[8]],axis=1)) / np.sqrt(len(good_readers))
plt.fill_between(times, np.mean(M1[:,[8]],axis=1)-yerr, np.mean(M1[:,[8]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('Lexical task (GR): Temporal')
#%%
""" Poor readers """
plt.figure(4)
plt.clf()
X11 = X13[ventral_vertices,:,:,:]
M1 = np.mean(np.mean(X11[:,:,good_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1)
plt.subplot(3, 2, 1)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[0]],axis=1),'--',color=c_table[5])
yerr = np.std(np.mean(M2[:,[0]],axis=1)) / np.sqrt(len(poor_readers))
plt.fill_between(times, np.mean(M2[:,[0]],axis=1)-yerr, np.mean(M2[:,[0]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M2[:,[1]],axis=1),'--',color=c_table[3])
yerr = np.std(np.mean(M2[:,[1]],axis=1)) / np.sqrt(len(poor_readers))
plt.fill_between(times, np.mean(M2[:,[1]],axis=1)-yerr, np.mean(M2[:,[1]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M2[:,[3]],axis=1),'--',color=c_table[1])
yerr = np.std(np.mean(M2[:,[3]],axis=1)) / np.sqrt(len(poor_readers))
plt.fill_between(times, np.mean(M2[:,[3]],axis=1)-yerr, np.mean(M2[:,[3]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('Dot task (PR): Ventral')
plt.subplot(3, 2, 2)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[5]],axis=1),'-',color=c_table[5])
yerr = np.std(np.mean(M2[:,[5]],axis=1)) / np.sqrt(len(poor_readers))
plt.fill_between(times, np.mean(M2[:,[5]],axis=1)-yerr, np.mean(M2[:,[5]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M2[:,[6]],axis=1),'-',color=c_table[3])
yerr = np.std(np.mean(M2[:,[6]],axis=1)) / np.sqrt(len(poor_readers))
plt.fill_between(times, np.mean(M2[:,[6]],axis=1)-yerr, np.mean(M2[:,[6]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M2[:,[8]],axis=1),'-',color=c_table[1])
yerr = np.std(np.mean(M2[:,[8]],axis=1)) / np.sqrt(len(poor_readers))
plt.fill_between(times, np.mean(M2[:,[8]],axis=1)-yerr, np.mean(M2[:,[8]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('Lexical task (PR): Ventral')
X11 = X13[broca_vertices,:,:,:]
M1 = np.mean(np.mean(X11[:,:,good_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1)
plt.subplot(3, 2, 3)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[0]],axis=1),'--',color=c_table[5])
yerr = np.std(np.mean(M2[:,[0]],axis=1)) / np.sqrt(len(poor_readers))
plt.fill_between(times, np.mean(M2[:,[0]],axis=1)-yerr, np.mean(M2[:,[0]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M2[:,[1]],axis=1),'--',color=c_table[3])
yerr = np.std(np.mean(M2[:,[1]],axis=1)) / np.sqrt(len(poor_readers))
plt.fill_between(times, np.mean(M2[:,[1]],axis=1)-yerr, np.mean(M2[:,[1]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M2[:,[3]],axis=1),'--',color=c_table[1])
yerr = np.std(np.mean(M2[:,[3]],axis=1)) / np.sqrt(len(poor_readers))
plt.fill_between(times, np.mean(M2[:,[3]],axis=1)-yerr, np.mean(M2[:,[3]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('Dot task (PR): Frontal')
plt.subplot(3, 2, 4)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[5]],axis=1),'-',color=c_table[5])
yerr = np.std(np.mean(M2[:,[5]],axis=1)) / np.sqrt(len(poor_readers))
plt.fill_between(times, np.mean(M2[:,[5]],axis=1)-yerr, np.mean(M2[:,[5]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M2[:,[6]],axis=1),'-',color=c_table[3])
yerr = np.std(np.mean(M2[:,[6]],axis=1)) / np.sqrt(len(poor_readers))
plt.fill_between(times, np.mean(M2[:,[6]],axis=1)-yerr, np.mean(M2[:,[6]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M2[:,[8]],axis=1),'-',color=c_table[1])
yerr = np.std(np.mean(M2[:,[8]],axis=1)) / np.sqrt(len(poor_readers))
plt.fill_between(times, np.mean(M2[:,[8]],axis=1)-yerr, np.mean(M2[:,[8]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('Lexical task (PR): Frontal')
X11 = X13[w_vertices,:,:,:]
M1 = np.mean(np.mean(X11[:,:,good_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1)
plt.subplot(3, 2, 5)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[0]],axis=1),'--',color=c_table[5])
yerr = np.std(np.mean(M2[:,[0]],axis=1)) / np.sqrt(len(poor_readers))
plt.fill_between(times, np.mean(M2[:,[0]],axis=1)-yerr, np.mean(M2[:,[0]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M2[:,[1]],axis=1),'--',color=c_table[3])
yerr = np.std(np.mean(M2[:,[1]],axis=1)) / np.sqrt(len(poor_readers))
plt.fill_between(times, np.mean(M2[:,[1]],axis=1)-yerr, np.mean(M2[:,[1]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M2[:,[3]],axis=1),'--',color=c_table[1])
yerr = np.std(np.mean(M2[:,[3]],axis=1)) / np.sqrt(len(poor_readers))
plt.fill_between(times, np.mean(M2[:,[3]],axis=1)-yerr, np.mean(M2[:,[3]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('Dot task (PR): Temporal')
plt.subplot(3, 2, 6)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[5]],axis=1),'-',color=c_table[5])
yerr = np.std(np.mean(M2[:,[5]],axis=1)) / np.sqrt(len(poor_readers))
plt.fill_between(times, np.mean(M2[:,[5]],axis=1)-yerr, np.mean(M2[:,[5]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M2[:,[6]],axis=1),'-',color=c_table[3])
yerr = np.std(np.mean(M2[:,[6]],axis=1)) / np.sqrt(len(poor_readers))
plt.fill_between(times, np.mean(M2[:,[6]],axis=1)-yerr, np.mean(M2[:,[6]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M2[:,[8]],axis=1),'-',color=c_table[1])
yerr = np.std(np.mean(M2[:,[8]],axis=1)) / np.sqrt(len(poor_readers))
plt.fill_between(times, np.mean(M2[:,[8]],axis=1)-yerr, np.mean(M2[:,[8]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('Lexical task (PR): Temporal')
#%%
""" Dot task: Good vs. Poor """
plt.figure(5)
plt.clf()
X11 = X1[ventral_vertices,:,:,:]
M1 = np.mean(np.mean(X11[:,:,good_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1)
plt.subplot(3, 3, 1)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[0]],axis=1),'--',color=c_table[4])
yerr = np.std(np.mean(M2[:,[0]],axis=1)) / np.sqrt(len(poor_readers))
plt.fill_between(times, np.mean(M2[:,[0]],axis=1)-yerr, np.mean(M2[:,[0]],axis=1)+yerr, facecolor=c_table[4], alpha=0.3, edgecolor='none')
plt.plot(times, np.mean(M1[:,[0]],axis=1),'--',color=c_table[5])
yerr = np.std(np.mean(M1[:,[0]],axis=1)) / np.sqrt(len(good_readers))
plt.fill_between(times, np.mean(M1[:,[0]],axis=1)-yerr, np.mean(M1[:,[0]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('No Noise (GP vs. PR): Ventral')
plt.subplot(3, 3, 2)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[1]],axis=1),'--',color=c_table[2])
yerr = np.std(np.mean(M2[:,[1]],axis=1)) / np.sqrt(len(poor_readers))
plt.fill_between(times, np.mean(M2[:,[1]],axis=1)-yerr, np.mean(M2[:,[1]],axis=1)+yerr, facecolor=c_table[2], alpha=0.3, edgecolor='none')
plt.plot(times, np.mean(M1[:,[1]],axis=1),'--',color=c_table[3])
yerr = np.std(np.mean(M1[:,[1]],axis=1)) / np.sqrt(len(good_readers))
plt.fill_between(times, np.mean(M1[:,[1]],axis=1)-yerr, np.mean(M1[:,[1]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('Med Noise (GP vs. PR): Ventral')
plt.subplot(3, 3, 3)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[3]],axis=1),'--',color=c_table[0])
yerr = np.std(np.mean(M2[:,[3]],axis=1)) / np.sqrt(len(poor_readers))
plt.fill_between(times, np.mean(M2[:,[3]],axis=1)-yerr, np.mean(M2[:,[3]],axis=1)+yerr, facecolor=c_table[0], alpha=0.3, edgecolor='none')
plt.plot(times, np.mean(M1[:,[3]],axis=1),'--',color=c_table[1])
yerr = np.std(np.mean(M1[:,[3]],axis=1)) / np.sqrt(len(good_readers))
plt.fill_between(times, np.mean(M1[:,[3]],axis=1)-yerr, np.mean(M1[:,[3]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('Noise (GP vs. PR): Ventral')
X11 = X1[broca_vertices,:,:,:]
M1 = np.mean(np.mean(X11[:,:,good_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1)
plt.subplot(3, 3, 4)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[0]],axis=1),'--',color=c_table[4])
yerr = np.std(np.mean(M2[:,[0]],axis=1)) / np.sqrt(len(poor_readers))
plt.fill_between(times, np.mean(M2[:,[0]],axis=1)-yerr, np.mean(M2[:,[0]],axis=1)+yerr, facecolor=c_table[4], alpha=0.3, edgecolor='none')
plt.plot(times, np.mean(M1[:,[0]],axis=1),'--',color=c_table[5])
yerr = np.std(np.mean(M1[:,[0]],axis=1)) / np.sqrt(len(good_readers))
plt.fill_between(times, np.mean(M1[:,[0]],axis=1)-yerr, np.mean(M1[:,[0]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('No Noise (GP vs. PR): Frontal')
plt.subplot(3, 3, 5)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[1]],axis=1),'--',color=c_table[2])
yerr = np.std(np.mean(M2[:,[1]],axis=1)) / np.sqrt(len(poor_readers))
plt.fill_between(times, np.mean(M2[:,[1]],axis=1)-yerr, np.mean(M2[:,[1]],axis=1)+yerr, facecolor=c_table[2], alpha=0.3, edgecolor='none')
plt.plot(times, np.mean(M1[:,[1]],axis=1),'--',color=c_table[3])
yerr = np.std(np.mean(M1[:,[1]],axis=1)) / np.sqrt(len(good_readers))
plt.fill_between(times, np.mean(M1[:,[1]],axis=1)-yerr, np.mean(M1[:,[1]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('Med Noise (GP vs. PR): Frontal')
plt.subplot(3, 3, 6)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[3]],axis=1),'--',color=c_table[0])
yerr = np.std(np.mean(M2[:,[3]],axis=1)) / np.sqrt(len(poor_readers))
plt.fill_between(times, np.mean(M2[:,[3]],axis=1)-yerr, np.mean(M2[:,[3]],axis=1)+yerr, facecolor=c_table[0], alpha=0.3, edgecolor='none')
plt.plot(times, np.mean(M1[:,[3]],axis=1),'--',color=c_table[1])
yerr = np.std(np.mean(M1[:,[3]],axis=1)) / np.sqrt(len(good_readers))
plt.fill_between(times, np.mean(M1[:,[3]],axis=1)-yerr, np.mean(M1[:,[3]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('Noise (GP vs. PR): Frontal')
X11 = X1[w_vertices,:,:,:]
M1 = np.mean(np.mean(X11[:,:,good_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1)
plt.subplot(3, 3, 7)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[0]],axis=1),'--',color=c_table[4])
yerr = np.std(np.mean(M2[:,[0]],axis=1)) / np.sqrt(len(poor_readers))
plt.fill_between(times, np.mean(M2[:,[0]],axis=1)-yerr, np.mean(M2[:,[0]],axis=1)+yerr, facecolor=c_table[4], alpha=0.3, edgecolor='none')
plt.plot(times, np.mean(M1[:,[0]],axis=1),'--',color=c_table[5])
yerr = np.std(np.mean(M1[:,[0]],axis=1)) / np.sqrt(len(good_readers))
plt.fill_between(times, np.mean(M1[:,[0]],axis=1)-yerr, np.mean(M1[:,[0]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('No Noise (GP vs. PR): Frontal')
plt.subplot(3, 3, 8)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[1]],axis=1),'--',color=c_table[2])
yerr = np.std(np.mean(M2[:,[1]],axis=1)) / np.sqrt(len(poor_readers))
plt.fill_between(times, np.mean(M2[:,[1]],axis=1)-yerr, np.mean(M2[:,[1]],axis=1)+yerr, facecolor=c_table[2], alpha=0.3, edgecolor='none')
plt.plot(times, np.mean(M1[:,[1]],axis=1),'--',color=c_table[3])
yerr = np.std(np.mean(M1[:,[1]],axis=1)) / np.sqrt(len(good_readers))
plt.fill_between(times, np.mean(M1[:,[1]],axis=1)-yerr, np.mean(M1[:,[1]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('Med Noise (GP vs. PR): Frontal')
plt.subplot(3, 3, 9)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[3]],axis=1),'--',color=c_table[0])
yerr = np.std(np.mean(M2[:,[3]],axis=1)) / np.sqrt(len(poor_readers))
plt.fill_between(times, np.mean(M2[:,[3]],axis=1)-yerr, np.mean(M2[:,[3]],axis=1)+yerr, facecolor=c_table[0], alpha=0.3, edgecolor='none')
plt.plot(times, np.mean(M1[:,[3]],axis=1),'--',color=c_table[1])
yerr = np.std(np.mean(M1[:,[3]],axis=1)) / np.sqrt(len(good_readers))
plt.fill_between(times, np.mean(M1[:,[3]],axis=1)-yerr, np.mean(M1[:,[3]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('Noise (GP vs. PR): Frontal')
#%%
""" Lexical task: Good vs. Poor """
plt.figure(6)
plt.clf()
X11 = X1[ventral_vertices,:,:,:]
M1 = np.mean(np.mean(X11[:,:,good_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1)
plt.subplot(3, 3, 1)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[5]],axis=1),'-',color=c_table[4])
yerr = np.std(np.mean(M2[:,[5]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[5]],axis=1)-yerr, np.mean(M2[:,[5]],axis=1)+yerr, facecolor=c_table[4], alpha=0.3, edgecolor='none')
plt.plot(times, np.mean(M1[:,[5]],axis=1),'-',color=c_table[5])
yerr = np.std(np.mean(M1[:,[5]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[5]],axis=1)-yerr, np.mean(M1[:,[5]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('No Noise (GP vs. PR): Ventral')
plt.subplot(3, 3, 2)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[6]],axis=1),'-',color=c_table[2])
yerr = np.std(np.mean(M2[:,[6]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[6]],axis=1)-yerr, np.mean(M2[:,[6]],axis=1)+yerr, facecolor=c_table[2], alpha=0.3, edgecolor='none')
plt.plot(times, np.mean(M1[:,[6]],axis=1),'-',color=c_table[3])
yerr = np.std(np.mean(M1[:,[6]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[6]],axis=1)-yerr, np.mean(M1[:,[6]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('Med Noise (GP vs. PR): Ventral')
plt.subplot(3, 3, 3)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[8]],axis=1),'-',color=c_table[0])
yerr = np.std(np.mean(M2[:,[8]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[8]],axis=1)-yerr, np.mean(M2[:,[8]],axis=1)+yerr, facecolor=c_table[0], alpha=0.3, edgecolor='none')
plt.plot(times, np.mean(M1[:,[8]],axis=1),'-',color=c_table[1])
yerr = np.std(np.mean(M1[:,[8]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[8]],axis=1)-yerr, np.mean(M1[:,[8]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('Noise (GP vs. PR): Ventral')
X11 = X1[broca_vertices,:,:,:]
M1 = np.mean(np.mean(X11[:,:,good_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1)
plt.subplot(3, 3, 4)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[5]],axis=1),'-',color=c_table[4])
yerr = np.std(np.mean(M2[:,[5]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[5]],axis=1)-yerr, np.mean(M2[:,[5]],axis=1)+yerr, facecolor=c_table[4], alpha=0.3, edgecolor='none')
plt.plot(times, np.mean(M1[:,[5]],axis=1),'-',color=c_table[5])
yerr = np.std(np.mean(M1[:,[5]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[5]],axis=1)-yerr, np.mean(M1[:,[5]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('No Noise (GP vs. PR): Frontal')
plt.subplot(3, 3, 5)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[6]],axis=1),'-',color=c_table[2])
yerr = np.std(np.mean(M2[:,[6]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[6]],axis=1)-yerr, np.mean(M2[:,[6]],axis=1)+yerr, facecolor=c_table[2], alpha=0.3, edgecolor='none')
plt.plot(times, np.mean(M1[:,[6]],axis=1),'-',color=c_table[3])
yerr = np.std(np.mean(M1[:,[6]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[6]],axis=1)-yerr, np.mean(M1[:,[6]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('Med Noise (GP vs. PR): Frontal')
plt.subplot(3, 3, 6)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[8]],axis=1),'-',color=c_table[0])
yerr = np.std(np.mean(M2[:,[8]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[8]],axis=1)-yerr, np.mean(M2[:,[8]],axis=1)+yerr, facecolor=c_table[0], alpha=0.3, edgecolor='none')
plt.plot(times, np.mean(M1[:,[8]],axis=1),'-',color=c_table[1])
yerr = np.std(np.mean(M1[:,[8]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[8]],axis=1)-yerr, np.mean(M1[:,[8]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('Noise (GP vs. PR): Frontal')
X11 = X1[w_vertices,:,:,:]
M1 = np.mean(np.mean(X11[:,:,good_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1)
plt.subplot(3, 3, 7)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[5]],axis=1),'-',color=c_table[4])
yerr = np.std(np.mean(M2[:,[5]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[5]],axis=1)-yerr, np.mean(M2[:,[5]],axis=1)+yerr, facecolor=c_table[4], alpha=0.3, edgecolor='none')
plt.plot(times, np.mean(M1[:,[5]],axis=1),'-',color=c_table[5])
yerr = np.std(np.mean(M1[:,[5]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[5]],axis=1)-yerr, np.mean(M1[:,[5]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('No Noise (GP vs. PR): Temporal')
plt.subplot(3, 3, 8)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[6]],axis=1),'-',color=c_table[2])
yerr = np.std(np.mean(M2[:,[6]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[6]],axis=1)-yerr, np.mean(M2[:,[6]],axis=1)+yerr, facecolor=c_table[2], alpha=0.3, edgecolor='none')
plt.plot(times, np.mean(M1[:,[6]],axis=1),'-',color=c_table[3])
yerr = np.std(np.mean(M1[:,[6]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[6]],axis=1)-yerr, np.mean(M1[:,[6]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('Med Noise (GP vs. PR): Temporal')
plt.subplot(3, 3, 9)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[8]],axis=1),'-',color=c_table[0])
yerr = np.std(np.mean(M2[:,[8]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[8]],axis=1)-yerr, np.mean(M2[:,[8]],axis=1)+yerr, facecolor=c_table[0], alpha=0.3, edgecolor='none')
plt.plot(times, np.mean(M1[:,[8]],axis=1),'-',color=c_table[1])
yerr = np.std(np.mean(M1[:,[8]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[8]],axis=1)-yerr, np.mean(M1[:,[8]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('Noise (GP vs. PR): Temporal')
#%%
""" Task effects """
plt.figure(7)
plt.clf()
X11 = X1[ventral_vertices,:,:,:]
M1 = np.mean(np.mean(X11[:,:,good_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1)
plt.subplot(3, 3, 1)
plt.hold(True)
plt.plot(times, np.mean(M1[:,[0]],axis=1),'--',color=c_table[5])
yerr = np.std(np.mean(M1[:,[0]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[0]],axis=1)-yerr, np.mean(M1[:,[0]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M1[:,[5]],axis=1),'-',color=c_table[5])
yerr = np.std(np.mean(M1[:,[5]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[5]],axis=1)-yerr, np.mean(M1[:,[5]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('GR: Ventral')
plt.subplot(3, 3, 2)
plt.hold(True)
plt.plot(times, np.mean(M1[:,[1]],axis=1),'--',color=c_table[3])
yerr = np.std(np.mean(M1[:,[1]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[1]],axis=1)-yerr, np.mean(M1[:,[1]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M1[:,[6]],axis=1),'-',color=c_table[3])
yerr = np.std(np.mean(M1[:,[6]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[6]],axis=1)-yerr, np.mean(M1[:,[6]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('GR: Ventral')
plt.subplot(3, 3, 3)
plt.hold(True)
plt.plot(times, np.mean(M1[:,[3]],axis=1),'--',color=c_table[1])
yerr = np.std(np.mean(M1[:,[3]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[3]],axis=1)-yerr, np.mean(M1[:,[3]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M1[:,[8]],axis=1),'-',color=c_table[1])
yerr = np.std(np.mean(M1[:,[8]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[8]],axis=1)-yerr, np.mean(M1[:,[8]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('GR: Ventral')
X11 = X1[broca_vertices,:,:,:]
M1 = np.mean(np.mean(X11[:,:,good_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1)
plt.subplot(3, 3, 4)
plt.hold(True)
plt.plot(times, np.mean(M1[:,[0]],axis=1),'--',color=c_table[5])
yerr = np.std(np.mean(M1[:,[0]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[0]],axis=1)-yerr, np.mean(M1[:,[0]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M1[:,[5]],axis=1),'-',color=c_table[5])
yerr = np.std(np.mean(M1[:,[5]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[5]],axis=1)-yerr, np.mean(M1[:,[5]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('GR: Frontal')
plt.subplot(3, 3, 5)
plt.hold(True)
plt.plot(times, np.mean(M1[:,[1]],axis=1),'--',color=c_table[3])
yerr = np.std(np.mean(M1[:,[1]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[1]],axis=1)-yerr, np.mean(M1[:,[1]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M1[:,[6]],axis=1),'-',color=c_table[3])
yerr = np.std(np.mean(M1[:,[6]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[6]],axis=1)-yerr, np.mean(M1[:,[6]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('GR: Frontal')
plt.subplot(3, 3, 6)
plt.hold(True)
plt.plot(times, np.mean(M1[:,[3]],axis=1),'--',color=c_table[1])
yerr = np.std(np.mean(M1[:,[3]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[3]],axis=1)-yerr, np.mean(M1[:,[3]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M1[:,[8]],axis=1),'-',color=c_table[1])
yerr = np.std(np.mean(M1[:,[8]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[8]],axis=1)-yerr, np.mean(M1[:,[8]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('GR: Frontal')
X11 = X1[w_vertices,:,:,:]
M1 = np.mean(np.mean(X11[:,:,good_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1)
plt.subplot(3, 3, 7)
plt.hold(True)
plt.plot(times, np.mean(M1[:,[0]],axis=1),'--',color=c_table[5])
yerr = np.std(np.mean(M1[:,[0]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[0]],axis=1)-yerr, np.mean(M1[:,[0]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M1[:,[5]],axis=1),'-',color=c_table[5])
yerr = np.std(np.mean(M1[:,[5]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[5]],axis=1)-yerr, np.mean(M1[:,[5]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('GR: Temporal')
plt.subplot(3, 3, 8)
plt.hold(True)
plt.plot(times, np.mean(M1[:,[1]],axis=1),'--',color=c_table[3])
yerr = np.std(np.mean(M1[:,[1]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[1]],axis=1)-yerr, np.mean(M1[:,[1]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M1[:,[6]],axis=1),'-',color=c_table[3])
yerr = np.std(np.mean(M1[:,[6]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[6]],axis=1)-yerr, np.mean(M1[:,[6]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('GR: Temporal')
plt.subplot(3, 3, 9)
plt.hold(True)
plt.plot(times, np.mean(M1[:,[3]],axis=1),'--',color=c_table[1])
yerr = np.std(np.mean(M1[:,[3]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[3]],axis=1)-yerr, np.mean(M1[:,[3]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M1[:,[8]],axis=1),'-',color=c_table[1])
yerr = np.std(np.mean(M1[:,[8]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[8]],axis=1)-yerr, np.mean(M1[:,[8]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('GR: Temporal')
#%%
""" Task effects """
plt.figure(8)
plt.clf()
X11 = X1[ventral_vertices,:,:,:]
M1 = np.mean(np.mean(X11[:,:,good_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1)
plt.subplot(3, 3, 1)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[0]],axis=1),'--',color=c_table[5])
yerr = np.std(np.mean(M2[:,[0]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[0]],axis=1)-yerr, np.mean(M2[:,[0]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M2[:,[5]],axis=1),'-',color=c_table[5])
yerr = np.std(np.mean(M2[:,[5]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[5]],axis=1)-yerr, np.mean(M2[:,[5]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('GR: Ventral')
plt.subplot(3, 3, 2)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[1]],axis=1),'--',color=c_table[3])
yerr = np.std(np.mean(M2[:,[1]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[1]],axis=1)-yerr, np.mean(M2[:,[1]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M2[:,[6]],axis=1),'-',color=c_table[3])
yerr = np.std(np.mean(M2[:,[6]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[6]],axis=1)-yerr, np.mean(M2[:,[6]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('GR: Ventral')
plt.subplot(3, 3, 3)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[3]],axis=1),'--',color=c_table[1])
yerr = np.std(np.mean(M2[:,[3]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[3]],axis=1)-yerr, np.mean(M2[:,[3]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M2[:,[8]],axis=1),'-',color=c_table[1])
yerr = np.std(np.mean(M2[:,[8]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[8]],axis=1)-yerr, np.mean(M2[:,[8]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('GR: Ventral')
X11 = X1[broca_vertices,:,:,:]
M1 = np.mean(np.mean(X11[:,:,good_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1)
plt.subplot(3, 3, 4)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[0]],axis=1),'--',color=c_table[5])
yerr = np.std(np.mean(M2[:,[0]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[0]],axis=1)-yerr, np.mean(M2[:,[0]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M2[:,[5]],axis=1),'-',color=c_table[5])
yerr = np.std(np.mean(M2[:,[5]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[5]],axis=1)-yerr, np.mean(M2[:,[5]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('GR: Frontal')
plt.subplot(3, 3, 5)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[1]],axis=1),'--',color=c_table[3])
yerr = np.std(np.mean(M2[:,[1]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[1]],axis=1)-yerr, np.mean(M2[:,[1]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M2[:,[6]],axis=1),'-',color=c_table[3])
yerr = np.std(np.mean(M2[:,[6]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[6]],axis=1)-yerr, np.mean(M2[:,[6]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('GR: Frontal')
plt.subplot(3, 3, 6)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[3]],axis=1),'--',color=c_table[1])
yerr = np.std(np.mean(M2[:,[3]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[3]],axis=1)-yerr, np.mean(M2[:,[3]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M2[:,[8]],axis=1),'-',color=c_table[1])
yerr = np.std(np.mean(M2[:,[8]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[8]],axis=1)-yerr, np.mean(M2[:,[8]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('GR: Frontal')
X11 = X1[w_vertices,:,:,:]
M1 = np.mean(np.mean(X11[:,:,good_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1)
plt.subplot(3, 3, 7)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[0]],axis=1),'--',color=c_table[5])
yerr = np.std(np.mean(M2[:,[0]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[0]],axis=1)-yerr, np.mean(M2[:,[0]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M2[:,[5]],axis=1),'-',color=c_table[5])
yerr = np.std(np.mean(M2[:,[5]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[5]],axis=1)-yerr, np.mean(M2[:,[5]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('GR: Temporal')
plt.subplot(3, 3, 8)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[1]],axis=1),'--',color=c_table[3])
yerr = np.std(np.mean(M2[:,[1]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[1]],axis=1)-yerr, np.mean(M2[:,[1]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M2[:,[6]],axis=1),'-',color=c_table[3])
yerr = np.std(np.mean(M2[:,[6]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[6]],axis=1)-yerr, np.mean(M2[:,[6]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('GR: Temporal')
plt.subplot(3, 3, 9)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[3]],axis=1),'--',color=c_table[1])
yerr = np.std(np.mean(M2[:,[3]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[3]],axis=1)-yerr, np.mean(M2[:,[3]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M2[:,[8]],axis=1),'-',color=c_table[1])
yerr = np.std(np.mean(M2[:,[8]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[8]],axis=1)-yerr, np.mean(M2[:,[8]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('GR: Temporal')
#%%
""" Right h """
temp = temp3.in_label(a44_label_rh)
broca_vertices = temp.vertices[1]
temp = temp3.in_label(a45_label_rh)
broca_vertices = np.unique(np.append(broca_vertices, temp.vertices[0]))
temp = temp3.in_label(TE2p_label_rh)
ventral_vertices1 = temp.vertices[1]
temp = temp3.in_label(PH_label_rh)
ventral_vertices1 = np.unique(np.append(ventral_vertices1, temp.vertices[0]))
temp = temp3.in_label(PHT_label_rh)
ventral_vertices1 = np.unique(np.append(ventral_vertices1, temp.vertices[0]))
temp = temp3.in_label(TE1p_label_rh)
ventral_vertices1 = np.unique(np.append(ventral_vertices1, temp.vertices[0]))
plt.figure(20)
plt.clf()
plt.subplot(2,1,1)
plt.hold(True)
X11 = X1[ventral_vertices1,:,:,:]
M = np.mean(np.mean(X11[:,:,:,:],axis=0),axis=1)
plt.plot(times, np.mean(M[:,[0]],axis=1),'--',color=c_table[5])
plt.plot(times, np.mean(M[:,[5]],axis=1),'-',color=c_table[5],label='Word-No noise')
plt.plot(times, np.mean(M[:,[1]],axis=1),'--',color=c_table[3])
plt.plot(times, np.mean(M[:,[6]],axis=1),'-',color=c_table[3],label='Word-Med noise')
plt.plot(times, np.mean(M[:,[3]],axis=1),'--',color=c_table[1])
plt.plot(times, np.mean(M[:,[8]],axis=1),'-',color=c_table[1],label='Noise')
plt.subplot(2,1,2)
plt.hold(True)
X11 = X1[broca_vertices,:,:,:]
M = np.mean(np.mean(X11[:,:,:,:],axis=0),axis=1)
plt.plot(times, np.mean(M[:,[0]],axis=1),'--',color=c_table[5])
plt.plot(times, np.mean(M[:,[5]],axis=1),'-',color=c_table[5],label='Word-No noise')
plt.plot(times, np.mean(M[:,[1]],axis=1),'--',color=c_table[3])
plt.plot(times, np.mean(M[:,[6]],axis=1),'-',color=c_table[3],label='Word-Med noise')
plt.plot(times, np.mean(M[:,[3]],axis=1),'--',color=c_table[1])
plt.plot(times, np.mean(M[:,[8]],axis=1),'-',color=c_table[1],label='Noise')
X11 = X1[ventral_vertices1,:,:,:]
M1 = np.mean(np.mean(X11[:,:,good_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1)
plt.figure(30)
plt.clf()
plt.subplot(2, 1, 1)
plt.hold(True)
plt.plot(times, np.mean(M1[:,[0]],axis=1),'--',color=c_table[5])
plt.plot(times, np.mean(M1[:,[5]],axis=1),'-',color=c_table[5],label='Word-No noise')
plt.plot(times, np.mean(M1[:,[1]],axis=1),'--',color=c_table[3])
plt.plot(times, np.mean(M1[:,[6]],axis=1),'-',color=c_table[3],label='Word-Med noise')
plt.plot(times, np.mean(M1[:,[3]],axis=1),'--',color=c_table[1])
plt.plot(times, np.mean(M1[:,[8]],axis=1),'-',color=c_table[1],label='Noise')
plt.ylabel('%s value' % method)
plt.title('Good readers')
plt.legend(loc='upper left', shadow=True)
plt.subplot(2, 1, 2)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[0]],axis=1),'--',color=c_table[5])
plt.plot(times, np.mean(M2[:,[5]],axis=1),'-',color=c_table[5],label='Word-No noise')
plt.plot(times, np.mean(M2[:,[1]],axis=1),'--',color=c_table[3])
plt.plot(times, np.mean(M2[:,[6]],axis=1),'-',color=c_table[3],label='Word-Med noise')
plt.plot(times, np.mean(M2[:,[3]],axis=1),'--',color=c_table[1])
plt.plot(times, np.mean(M2[:,[8]],axis=1),'-',color=c_table[1],label='Noise')
#plt.ylim(0, 4)
plt.xlabel('time (ms)')
plt.ylabel('%s value' % method)
plt.title('Poor readers')
plt.legend(loc='upper left', shadow=True)
plt.show()
X11 = X1[broca_vertices,:,:,:]
M1 = np.mean(np.mean(X11[:,:,good_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1)
plt.figure(40)
plt.clf()
plt.subplot(2, 1, 1)
plt.hold(True)
plt.plot(times, np.mean(M1[:,[0]],axis=1),'--',color=c_table[5])
plt.plot(times, np.mean(M1[:,[5]],axis=1),'-',color=c_table[5],label='Word-No noise')
plt.plot(times, np.mean(M1[:,[1]],axis=1),'--',color=c_table[3])
plt.plot(times, np.mean(M1[:,[6]],axis=1),'-',color=c_table[3],label='Word-Med noise')
plt.plot(times, np.mean(M1[:,[3]],axis=1),'--',color=c_table[1])
plt.plot(times, np.mean(M1[:,[8]],axis=1),'-',color=c_table[1],label='Noise')
plt.ylabel('%s value' % method)
plt.title('Good readers')
plt.legend(loc='upper left', shadow=True)
plt.subplot(2, 1, 2)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[0]],axis=1),'--',color=c_table[5])
plt.plot(times, np.mean(M2[:,[5]],axis=1),'-',color=c_table[5],label='Word-No noise')
plt.plot(times, np.mean(M2[:,[1]],axis=1),'--',color=c_table[3])
plt.plot(times, np.mean(M2[:,[6]],axis=1),'-',color=c_table[3],label='Word-Med noise')
plt.plot(times, np.mean(M2[:,[3]],axis=1),'--',color=c_table[1])
plt.plot(times, np.mean(M2[:,[8]],axis=1),'-',color=c_table[1],label='Noise')
#plt.ylim(0, 4)
plt.xlabel('time (ms)')
plt.ylabel('%s value' % method)
plt.title('Poor readers')
plt.legend(loc='upper left', shadow=True)
plt.show()
#%%
#plt.plot([0.15, 0.15],[0, 4],'-',color='k')
#plt.ylim(0, 4)
plt.xlabel('time (ms)')
plt.ylabel('%s value' % method)
#plt.title('High contrast')
plt.legend(loc='upper left', shadow=True)
plt.show()
M1 = np.mean(np.mean(X11[:,:,good_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1)
plt.figure(3)
plt.clf()
plt.subplot(2, 1, 1)
plt.hold(True)
plt.plot(times, np.mean(M1[:,[0]],axis=1),'--',color=c_table[5])
plt.plot(times, np.mean(M1[:,[5]],axis=1),'-',color=c_table[5],label='Word-No noise')
plt.plot(times, np.mean(M1[:,[1]],axis=1),'--',color=c_table[3])
plt.plot(times, np.mean(M1[:,[6]],axis=1),'-',color=c_table[3],label='Word-Med noise')
plt.plot(times, np.mean(M1[:,[3]],axis=1),'--',color=c_table[1])
plt.plot(times, np.mean(M1[:,[8]],axis=1),'-',color=c_table[1],label='Noise')
plt.ylabel('%s value' % method)
plt.title('Good readers')
plt.legend(loc='upper left', shadow=True)
plt.subplot(2, 1, 2)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[0]],axis=1),'--',color=c_table[5])
plt.plot(times, np.mean(M2[:,[5]],axis=1),'-',color=c_table[5],label='Word-No noise')
plt.plot(times, np.mean(M2[:,[1]],axis=1),'--',color=c_table[3])
plt.plot(times, np.mean(M2[:,[6]],axis=1),'-',color=c_table[3],label='Word-Med noise')
plt.plot(times, np.mean(M2[:,[3]],axis=1),'--',color=c_table[1])
plt.plot(times, np.mean(M2[:,[8]],axis=1),'-',color=c_table[1],label='Noise')
#plt.ylim(0, 4)
plt.xlabel('time (ms)')
plt.ylabel('%s value' % method)
plt.title('Poor readers')
plt.legend(loc='upper left', shadow=True)
plt.show()
#%%
k = 1
tp1 = [0.08, 0.13, 0.15, 0.20, 0.30]
tp2 = [0.12, 0.17, 0.19, 0.24, 0.35]
mask = np.logical_and(times >= tp1[k], times <= tp2[k])
""" Plot scatter """
temp_diff = np.subtract(np.mean(X1[:,:,:,[0,1]],axis=3), np.mean(X1[:,:,:,[3]],axis=3))
data_diff = np.mean(temp_diff[vwfa_labels,:,:], axis = 0)
#data1 = data1.reshape((data1.shape[1],data1.shape[0],data1.shape[2]))
vwfa_response = np.mean(data_diff[mask,:],axis=0)
plt.figure(4)
plt.clf()
ax = plt.subplot()
ax.scatter(brs, vwfa_response, s=30, c='k', alpha=0.5)
for i, txt in enumerate(subs):
ax.annotate(txt, (brs[i],vwfa_response[i]))
np.corrcoef(vwfa_response,brs)
#%%
""" Individual """
for iSub, s in enumerate(subs):
plt.figure(100+iSub)
plt.clf()
plt.subplot(2,2,1)
plt.hold(True)
plt.plot(times, np.mean(X1[ven_vertices,:,iSub,0],axis=0), '--', color=c_table[5])
plt.plot(times, np.mean(X1[ven_vertices,:,iSub,3],axis=0), '--', color=c_table[1])
plt.subplot(2,2,2)
plt.hold(True)
plt.plot(times, np.mean(X1[ven_vertices,:,iSub,5],axis=0), '-', color=c_table[5])
plt.plot(times, np.mean(X1[ven_vertices,:,iSub,8],axis=0), '-', color=c_table[1])
plt.subplot(2,2,3)
plt.hold(True)
plt.plot(times, np.mean(X1[ven_vertices,:,iSub,5],axis=0), '-', color=c_table[5])
plt.plot(times, np.mean(X1[ven_vertices,:,iSub,0],axis=0), '--', color=c_table[5])
plt.subplot(2,2,4)
plt.hold(True)
plt.plot(times, np.mean(X1[ven_vertices,:,iSub,8],axis=0), '-', color=c_table[1])
plt.plot(times, np.mean(X1[ven_vertices,:,iSub,3],axis=0), '--', color=c_table[1])
plt.title(s)
#%%
""" Good readers vs. poor readers """
k = 1
#tmin = 0
tp1 = [0.08, 0.13, 0.15, 0.20, 0.30]
tp2 = [0.12, 0.17, 0.19, 0.24, 0.35]
mask = times == 0.15
PHT_label_lh = [label for label in labels if label.name == 'L_PHT_ROI-lh'][0]
PHT_label_rh = [label for label in labels if label.name == 'R_PHT_ROI-rh'][0]
TE1p_label_lh = [label for label in labels if label.name == 'L_TE1p_ROI-lh'][0]
TE1p_label_rh = [label for label in labels if label.name == 'R_TE1p_ROI-rh'][0]
TE2p_label_lh = [label for label in labels if label.name == 'L_TE2p_ROI-lh'][0]
TE2p_label_rh = [label for label in labels if label.name == 'R_TE2p_ROI-rh'][0]
PH_label_lh = [label for label in labels if label.name == 'L_PH_ROI-lh'][0]
PH_label_rh = [label for label in labels if label.name == 'R_PH_ROI-rh'][0]
FFC_label_lh = [label for label in labels if label.name == 'L_FFC_ROI-lh'][0]
FFC_label_rh = [label for label in labels if label.name == 'R_FFC_ROI-rh'][0]
IFSp_label_lh = [label for label in labels if label.name == 'L_IFSp_ROI-lh'][0]
IFSp_label_rh = [label for label in labels if label.name == 'R_IFSp_ROI-rh'][0]
IFJp_label_lh = [label for label in labels if label.name == 'L_IFJp_ROI-lh'][0]
IFJp_label_rh = [label for label in labels if label.name == 'R_IFJp_ROI-rh'][0]
IFJa_label_lh = [label for label in labels if label.name == 'L_IFJa_ROI-lh'][0]
IFJa_label_rh = [label for label in labels if label.name == 'R_IFJa_ROI-rh'][0]
a45_label_lh = [label for label in labels if label.name == 'L_45_ROI-lh'][0]
a45_label_rh = [label for label in labels if label.name == 'R_45_ROI-rh'][0]
a44_label_lh = [label for label in labels if label.name == 'L_44_ROI-lh'][0]
a44_label_rh = [label for label in labels if label.name == 'R_44_ROI-rh'][0]
stv_label_lh = [label for label in labels if label.name == 'L_STV_ROI-lh'][0]
stv_label_rh = [label for label in labels if label.name == 'R_STV_ROI-rh'][0]
#new_data = X1[:,:,all_subject,:]
good_data = X1[:,:,good_readers,:]
poor_data = X1[:,:,poor_readers,:]
data1 = np.subtract(np.mean(good_data[:,:,:,[6]],axis=3), np.mean(poor_data[:,:,:,[6]],axis=3))
#del new_data
data11 = data1[:,:,:]
del data1, good_data, poor_data
#stat_fun = partial(mne.stats.ttest_1samp_no_p,sigma=1e-3)
data11 = np.transpose(data11,[2,1,0])
stat_fun = partial(mne.stats.ttest_1samp_no_p)
temp3 = mne.SourceEstimate(np.transpose(stat_fun(data11)), fs_vertices, tmin,
tstep,subject='fsaverage')
brain3_1 = temp3.plot(hemi='lh', subjects_dir=fs_dir, views = ['lat','ven','med'], #views=['lat','ven','med'], #transparent = True,
initial_time=0.15) # clim=dict(kind='value', lims=[2.0, 2.1, np.max(temp3.data[:,:])])
brain3_1.add_label(PHT_label_lh, borders=True, color=c_table[0])
brain3_1.add_label(TE2p_label_lh, borders=True, color=c_table[1])
brain3_1.add_label(PH_label_lh, borders=True, color=c_table[2])
brain3_1.add_label(FFC_label_lh, borders=True, color=c_table[3])
brain3_1.add_label(TE1p_label_lh, borders=True, color=c_table[4])
brain3_1.add_label(IFSp_label_lh, borders=True, color=c_table[5])
brain3_1.add_label(IFJp_label_lh, borders=True, color=c_table[6])
brain3_1.add_label(IFJa_label_lh, borders=True, color=c_table[7])
brain3_1.add_label(a45_label_lh, borders=True, color=c_table[8])
brain3_1.add_label(a44_label_lh, borders=True, color=c_table[8])
brain3_1.add_label(stv_label_lh, borders=True, color=c_table[9])
brain3_1.save_movie('GoodPoor_Lexical_MedNoise_lh.mp4',time_dilation = 4.0,framerate = 24)
#%%
""" Single condition """
k = 1
#tmin = 0
tp1 = [0.08, 0.13, 0.15, 0.20, 0.30]
tp2 = [0.12, 0.17, 0.19, 0.24, 0.35]
mask = times == 0.15
PHT_label_lh = [label for label in labels if label.name == 'L_PHT_ROI-lh'][0]
PHT_label_rh = [label for label in labels if label.name == 'R_PHT_ROI-rh'][0]
TE1p_label_lh = [label for label in labels if label.name == 'L_TE1p_ROI-lh'][0]
TE1p_label_rh = [label for label in labels if label.name == 'R_TE1p_ROI-rh'][0]
TE2p_label_lh = [label for label in labels if label.name == 'L_TE2p_ROI-lh'][0]
TE2p_label_rh = [label for label in labels if label.name == 'R_TE2p_ROI-rh'][0]
PH_label_lh = [label for label in labels if label.name == 'L_PH_ROI-lh'][0]
PH_label_rh = [label for label in labels if label.name == 'R_PH_ROI-rh'][0]
FFC_label_lh = [label for label in labels if label.name == 'L_FFC_ROI-lh'][0]
FFC_label_rh = [label for label in labels if label.name == 'R_FFC_ROI-rh'][0]
IFSp_label_lh = [label for label in labels if label.name == 'L_IFSp_ROI-lh'][0]
IFSp_label_rh = [label for label in labels if label.name == 'R_IFSp_ROI-rh'][0]
IFJp_label_lh = [label for label in labels if label.name == 'L_IFJp_ROI-lh'][0]
IFJp_label_rh = [label for label in labels if label.name == 'R_IFJp_ROI-rh'][0]
IFJa_label_lh = [label for label in labels if label.name == 'L_IFJa_ROI-lh'][0]
IFJa_label_rh = [label for label in labels if label.name == 'R_IFJa_ROI-rh'][0]
a45_label_lh = [label for label in labels if label.name == 'L_45_ROI-lh'][0]
a45_label_rh = [label for label in labels if label.name == 'R_45_ROI-rh'][0]
a44_label_lh = [label for label in labels if label.name == 'L_44_ROI-lh'][0]
a44_label_rh = [label for label in labels if label.name == 'R_44_ROI-rh'][0]
stv_label_lh = [label for label in labels if label.name == 'L_STV_ROI-lh'][0]
stv_label_rh = [label for label in labels if label.name == 'R_STV_ROI-rh'][0]
#new_data = X1[:,:,all_subject,:]
good_data = X1[:,:,good_readers,:]
poor_data = X1[:,:,poor_readers,:]
data1 = np.mean(good_data[:,:,:,[6]],axis=3)
#del new_data
data11 = data1[:,:,:]
del data1, good_data, poor_data
#stat_fun = partial(mne.stats.ttest_1samp_no_p,sigma=1e-3)
data11 = np.transpose(data11,[2,1,0])
stat_fun = partial(mne.stats.ttest_1samp_no_p)
temp3 = mne.SourceEstimate(np.transpose(stat_fun(data11)), fs_vertices, tmin,
tstep,subject='fsaverage')
brain3_1 = temp3.plot(hemi='lh', subjects_dir=fs_dir, views = ['lat','ven','med'], #views=['lat','ven','med'], #transparent = True,
initial_time=0.15) # clim=dict(kind='value', lims=[2.0, 2.1, np.max(temp3.data[:,:])])
brain3_1.add_label(PHT_label_lh, borders=True, color=c_table[0])
brain3_1.add_label(TE2p_label_lh, borders=True, color=c_table[1])
brain3_1.add_label(PH_label_lh, borders=True, color=c_table[2])
brain3_1.add_label(FFC_label_lh, borders=True, color=c_table[3])
brain3_1.add_label(TE1p_label_lh, borders=True, color=c_table[4])
brain3_1.add_label(IFSp_label_lh, borders=True, color=c_table[5])
brain3_1.add_label(IFJp_label_lh, borders=True, color=c_table[6])
brain3_1.add_label(IFJa_label_lh, borders=True, color=c_table[7])
brain3_1.add_label(a45_label_lh, borders=True, color=c_table[8])
brain3_1.add_label(a44_label_lh, borders=True, color=c_table[8])
brain3_1.add_label(stv_label_lh, borders=True, color=c_table[9])
brain3_1.save_movie('Single_Lexical_MedNoise_GR_lh.mp4',time_dilation = 4.0,framerate = 24)
#%%
""" VWFA TE2p """
aparc_label_name = 'TE2p_ROI'#'TE2p_ROI' #'TE2p_ROI' #'inferiortemporal' #'pericalcarine'
#anat_label1 = mne.read_labels_from_annot('fsaverage', parc='aparc',surf_name='white',
# subjects_dir=fs_dir, regexp=aparc_label_name)
anat_label1 = mne.read_labels_from_annot('fsaverage', parc='HCPMMP1', surf_name='white',
subjects_dir=fs_dir, regexp=aparc_label_name)
vertices_mask_lh = mne.Label.get_vertices_used(anat_label1[0])
vertices_mask_rh = mne.Label.get_vertices_used(anat_label1[1])
#aparc_label_name = 'PH_ROI' #'TE2p_ROI' #'inferiortemporal' #'pericalcarine'
## anat_label = mne.read_labels_from_annot('fsaverage', parc='aparc',surf_name='white',
## subjects_dir=fs_dir, regexp=aparc_label_name)
#anat_label2 = mne.read_labels_from_annot('fsaverage', parc='HCPMMP1', surf_name='white',
# subjects_dir=fs_dir, regexp=aparc_label_name)
#vertices_mask_lh2 = mne.Label.get_vertices_used(anat_label2[0])
#vertices_mask_rh2 = mne.Label.get_vertices_used(anat_label2[1])
#
#vertices_mask_lh = np.append(vertices_mask_lh1,vertices_mask_lh2)
#vertices_mask_rh = np.append(vertices_mask_rh1,vertices_mask_rh2)
data1 = np.subtract(np.mean(X1[:,:,:,[1,6]],axis=3), np.mean(X1[:,:,:,[3,8]],axis=3))
data11 = data1[:,:,:]
#stat_fun = partial(mne.stats.ttest_1samp_no_p,sigma=1e-3)
data11 = np.transpose(data11,[2,1,0])
stat_fun = partial(mne.stats.ttest_1samp_no_p)
temp3 = mne.SourceEstimate(np.transpose(stat_fun(data11)), fs_vertices, tmin,
tstep,subject='fsaverage')
##
brain3_1 = temp3.plot(hemi='lh', subjects_dir=fs_dir, views='lat', #transparent = True,
clim=dict(kind='value', lims=[0, 1.5, np.max(temp3.data[vertices_mask_lh,:])]),
initial_time=0.15) # background='white', size=(800, 600)
brain3_1.add_label(anat_label1[0], borders=True, color='k')
#brain3_1.add_label(anat_label2[0], borders=True, color='k')
brain3_2 = temp3.plot(hemi='rh', subjects_dir=fs_dir, views='lat',
clim=dict(kind='value', lims=[0, 1.5, np.max(temp3.data[vertices_mask_rh,:])]),
initial_time=0.15)
brain3_2.add_label(anat_label1[1], borders=True, color='k')
#brain3_2.add_label(anat_label2[1], borders=True, color='k')
k = 1
#tmin = 0
tp1 = [0.08, 0.13, 0.15, 0.20, 0.30]
tp2 = [0.12, 0.17, 0.19, 0.24, 0.35]
#mask = np.logical_and(times >= tp1[k], times <= tp2[k])
mask = times == 0.15
""" Left """
lh_label = temp3.in_label(anat_label1[0])
data = np.mean(lh_label.data[:,mask],axis=1)
lh_label.data[data < 1.5] = 0.
func_labels, _ = mne.stc_to_label(lh_label, src='fsaverage', smooth=False,
subjects_dir=fs_dir, connected=False)
brain3_1.add_label(func_labels, borders=True, color='b')
#brain3_1.save_image('l_TE2p.png')
""" Right """
rh_label = temp3.in_label(anat_label1[1])
#data2 = rh_label.data
#rh_label.data[data2 < 1.5] = 0.
_, func_labels2 = mne.stc_to_label(rh_label, src='fsaverage', smooth=False,
subjects_dir=fs_dir, connected=False)
brain3_2.add_label(func_labels2, borders=True, color='b')
vwfa_labels = func_labels.vertices
X11 = X1[vwfa_labels,:,:,:]
X11 = X11[:,:,:,:]
#X11 = np.delete(X11,16,2)
#M = np.mean(np.mean(X1[vwfa_labels,:,:,:],axis=0),axis=1)
M = np.mean(np.mean(X11[:,:,:,:],axis=0),axis=1)
plt.figure(2)
plt.clf()
plt.subplot(2,1,1)
plt.hold(True)
#plt.plot(times, np.mean(M[:,[0]],axis=1),'--',color=c_table[5])
plt.plot(times, np.mean(M[:,[5]],axis=1),'-',color=c_table[5],label='Word')
#plt.plot(times, np.mean(M[:,[1]],axis=1),'--',color=c_table[3])
plt.plot(times, np.mean(M[:,[6]],axis=1),'-',color=c_table[3],label='Word')
#plt.plot(times, np.mean(M[:,[3]],axis=1),'--',color=c_table[1])
plt.plot(times, np.mean(M[:,[8]],axis=1),'-',color=c_table[1],label='Noise')
plt.plot([0.15, 0.15],[0, 4],'-',color='k')
#plt.ylim(0, 4)
plt.xlabel('time (ms)')
plt.ylabel('%s value' % method)
#plt.title('High contrast')
plt.legend(loc='upper left', shadow=True)
plt.subplot(2,1,2)
plt.hold(True)
plt.plot(times, np.mean(M[:,[0]],axis=1),'--',color=c_table[5],label='Word')
#plt.plot(times, np.mean(M[:,[5]],axis=1),'-',color=c_table[5],label='Word')
plt.plot(times, np.mean(M[:,[1]],axis=1),'--',color=c_table[3],label='Word')
#plt.plot(times, np.mean(M[:,[6]],axis=1),'-',color=c_table[3],label='Word')
plt.plot(times, np.mean(M[:,[3]],axis=1),'--',color=c_table[1],label='Noise')
#plt.plot(times, np.mean(M[:,[8]],axis=1),'-',color=c_table[1],label='Noise')
#plt.plot(times, np.mean(M[:,[2]],axis=1),'--',color=c_table[5])
#plt.plot(times, np.mean(M[:,[7]],axis=1),'-',color=c_table[5],label='Word')
#
#plt.plot(times, np.mean(M[:,[4]],axis=1),'--',color=c_table[1])
#plt.plot(times, np.mean(M[:,[9]],axis=1),'-',color=c_table[1],label='Noise')
plt.plot([0.15, 0.15],[0, 4],'-',color='k')
#plt.ylim(0, 4)
plt.xlabel('time (ms)')
plt.ylabel('%s value' % method)
#plt.title('Low contrast')
plt.legend(loc='upper left', shadow=True)
plt.show()
m1 = np.transpose(brs) >= 85
M1 = np.mean(np.mean(X11[:,:,m1,:],axis=0),axis=1)
m2 = np.logical_not(m1)
M2 = np.mean(np.mean(X11[:,:,m2,:],axis=0),axis=1)
plt.figure(4)
plt.clf()
plt.subplot(2, 1, 1)
plt.hold(True)
plt.plot(times, np.mean(M1[:,[1]],axis=1),'--',color=c_table[5])
plt.plot(times, np.mean(M1[:,[6]],axis=1),'-',color=c_table[5],label='Word')
#plt.plot(times, np.mean(M1[:,[1]],axis=1),'--',color=c_table[3])
#plt.plot(times, np.mean(M1[:,[6]],axis=1),'-',color=c_table[3],label='Word')
plt.plot(times, M1[:,3],'--',color=c_table[1])
plt.plot(times, M1[:,8],'-',color=c_table[1],label='Noise')
plt.ylabel('%s value' % method)
plt.title('Good readers')
plt.legend(loc='upper left', shadow=True)
plt.subplot(2, 1, 2)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[1]],axis=1),'--',color=c_table[5])
plt.plot(times, np.mean(M2[:,[6]],axis=1),'-',color=c_table[5],label='Word')
#plt.plot(times, np.mean(M2[:,[1]],axis=1),'--',color=c_table[3])
#plt.plot(times, np.mean(M2[:,[6]],axis=1),'-',color=c_table[3],label='Word')
plt.plot(times, M2[:,3],'--',color=c_table[1])
plt.plot(times, M2[:,8],'-',color=c_table[1],label='Noise')
#plt.ylim(0, 4)
plt.xlabel('time (ms)')
plt.ylabel('%s value' % method)
plt.title('Poor readers')
plt.legend(loc='upper left', shadow=True)
plt.show()
#data = {
# 'X11': X11,
# 'brs': brs
# }
#sio.savemat('R21.mat',{'data':data})
#%%
k = 1
#tmin = 0
tp1 = [0.08, 0.13, 0.15, 0.20, 0.30]
tp2 = [0.12, 0.17, 0.19, 0.24, 0.35]
mask = np.logical_and(times >= tp1[k], times <= tp2[k])
""" Plot scatter """
temp_diff = np.subtract(np.mean(X1[:,:,:,[0,1]],axis=3), np.mean(X1[:,:,:,[3]],axis=3))
data_diff = np.mean(temp_diff[vwfa_labels,:,:], axis = 0)
#data1 = data1.reshape((data1.shape[1],data1.shape[0],data1.shape[2]))
vwfa_response = np.mean(data_diff[mask,:],axis=0)
plt.figure(4)
plt.clf()
ax = plt.subplot()
ax.scatter(brs, vwfa_response, s=30, c='k', alpha=0.5)
for i, txt in enumerate(subs):
ax.annotate(txt, (brs[i],vwfa_response[i]))
np.corrcoef(vwfa_response,brs)
for iSub, s in enumerate(subs):
plt.figure(100+iSub)
plt.clf()
plt.subplot(1,2,1)
plt.hold(True)
plt.plot(times, np.mean(X1[vwfa_labels,:,iSub,0],axis=0), '--', color=c_table[5])
plt.plot(times, np.mean(X1[vwfa_labels,:,iSub,3],axis=0), '--', color=c_table[1])
plt.subplot(1,2,2)
plt.hold(True)
plt.plot(times, np.mean(X1[vwfa_labels,:,iSub,5],axis=0), '-', color=c_table[5])
plt.plot(times, np.mean(X1[vwfa_labels,:,iSub,8],axis=0), '-', color=c_table[1])
plt.title(s)
#%%
#temp3 = mne.SourceEstimate(np.mean(X1[:,:,:,0],axis=2), fs_vertices, tmin,
# tstep,subject='fsaverage')
##vertno_max, time_max = temp1.get_peak(hemi='lh',mode='pos')
#temp3.plot(hemi='lh', subjects_dir=fs_dir, views='lat', #transparent = True,
# clim=dict(kind='value', lims=[0.5, 2, 4]),
# initial_time=0.1)
""" V1 V1 """
#k = 0
#tmin = 0
#
#tp1 = [0.08, 0.13, 0.15, 0.20, 0.30]
#tp2 = [0.12, 0.17, 0.19, 0.24, 0.35]
aparc_label_name = '_V1_ROI' #'inferiortemporal' #'pericalcarine'
# anat_label = mne.read_labels_from_annot('fsaverage', parc='aparc',surf_name='white',
# subjects_dir=fs_dir, regexp=aparc_label_name)
anat_label = mne.read_labels_from_annot('fsaverage', parc='HCPMMP1', surf_name='white',
subjects_dir=fs_dir, regexp=aparc_label_name)
vertices_mask_lh = mne.Label.get_vertices_used(anat_label[0])
vertices_mask_rh = mne.Label.get_vertices_used(anat_label[1])
#mask = np.logical_and(times >= tp1[k], times <= tp2[k])
#data1 = temp_avg1[:,mask,:]
#data1 = np.mean(data1, axis = 2)
data1 = np.subtract(np.mean(X1[:,:,:,[0,5]],axis=3), np.mean(X1[:,:,:,[2,7]],axis=3))
data11 = data1[:,:,:]
#data11 = data11[:,mask,:]
#data1 = np.mean(data1,axis=2)
#stat_fun = partial(mne.stats.ttest_1samp_no_p,sigma=1e-3)
data11 = np.transpose(data11,[2,1,0])
stat_fun = partial(mne.stats.ttest_1samp_no_p)
temp3 = mne.SourceEstimate(np.transpose(stat_fun(data11)), fs_vertices, tmin,
tstep,subject='fsaverage')
#vertno_max, time_max = temp3.get_peak(hemi='lh',mode='pos')
##
brain3_1 = temp3.plot(hemi='lh', subjects_dir=fs_dir, views=['lat','ven','med'], #transparent = True,
clim=dict(kind='value', lims=[2, 3, np.max(temp3.data[vertices_mask_lh,:])]),
initial_time=0.10) # background='white', size=(800, 600)
#brain3_1.save_movie('test.mp4',time_dilation =8.0,framerate = 30)
brain3_1.add_label(anat_label[0], borders=True, color='k')
brain3_2 = temp3.plot(hemi='rh', subjects_dir=fs_dir, views='lat',
clim=dict(kind='value', lims=[2, np.max(temp3.data[vertices_mask_lh,:])*.7, np.max(temp3.data[vertices_mask_lh,:])]),
initial_time=0.1)
brain3_2.add_label(anat_label[1], borders=True, color='k')
""" Left """
lh_label = temp3.in_label(anat_label[0])
#data = lh_label.data
#
#lh_label.data[data < np.max(data)*0.9] = 0.
func_labels, _ = mne.stc_to_label(lh_label, src='fsaverage', smooth=False,
subjects_dir=fs_dir, connected=False)
brain3_1.add_label(func_labels, borders=True, color='b')
#brain3_1.save_image('l_TE2p.png')
""" Right """
rh_label = temp3.in_label(anat_label[1])
#data2 = rh_label.data
#
#rh_label.data[data2 < np.max(data2)*0.9] = 0.
_, func_labels2 = mne.stc_to_label(rh_label, src='fsaverage', smooth=False,
subjects_dir=fs_dir, connected=False)
brain3_2.add_label(func_labels2, borders=True, color='b')
v1_labels = func_labels.vertices
v1_labels2 = func_labels2.vertices
M = np.mean(np.mean(X1[v1_labels,:,:,:],axis=0),axis=1)
plt.figure(20)
plt.clf()
plt.hold(True)
plt.plot(times, M[:,0],'--',color=c_table[5])
plt.plot(times, M[:,5],'-',color=c_table[5],label='High')
plt.plot(times, M[:,2],'--',color=c_table[1])
plt.plot(times, M[:,7],'-',color=c_table[1],label='Low')
plt.legend(loc='upper left', shadow=True)
plt.show()
plt.figure(300)
plt.clf()
plt.hold(True)
plt.plot(times, M[:,0],'--',color=c_table[5])
plt.plot(times, M[:,5],'-',color=c_table[5],label='High')
plt.plot(times, M[:,1],'--',color=c_table[1])
plt.plot(times, M[:,6],'-',color=c_table[1],label='med')
plt.plot(times, M[:,3],'--',color=c_table[3])
plt.plot(times, M[:,8],'-',color=c_table[3],label='Noise')
plt.legend(loc='down right', shadow=True)
k = 0
tp1 = [0.08, 0.13, 0.15, 0.20, 0.30]
tp2 = [0.12, 0.17, 0.19, 0.24, 0.35]
mask = np.logical_and(times >= tp1[k], times <= tp2[k])
""" Plot scatter """
temp_diff = np.subtract(np.mean(X1[:,:,:,[0,5]],axis=3), np.mean(X1[:,:,:,[2,7]],axis=3))
data_diff = np.mean(temp_diff[v1_labels,:,:], axis = 0)
#data1 = np.subtract(X1[v1_labels,:,:,[0,5]], X1[v1_labels,:,:,[3,7]])
#data1 = data1.reshape((data1.shape[1],data1.shape[0],data1.shape[2]))
v1_response = np.mean(data_diff[mask,:],axis=0)
fig = plt.figure(40)
plt.clf()
ax1 = plt.subplot()
ax1.scatter(brs, v1_response, s=30, c='k', alpha=0.5)
for i, txt in enumerate(subs):
ax1.annotate(txt, (brs[i],v1_response[i]))
np.corrcoef(v1_response,brs)
""" Plot individual V1 responses """
for iSub, s in enumerate(subs):
plt.figure(100+iSub)
plt.clf()
plt.subplot(1,2,1)
plt.hold(True)
plt.plot(times, np.mean(X1[v1_labels,:,iSub,0],axis=0), '--', color=c_table[5])
plt.plot(times, np.mean(X1[v1_labels,:,iSub,1],axis=0), '--', color=c_table[3])
plt.plot(times, np.mean(X1[v1_labels,:,iSub,3],axis=0), '--', color=c_table[1])
plt.plot([0.1, 0.1],[0, 8],'-',color='k')
plt.title(s)
plt.subplot(1,2,2)
plt.hold(True)
plt.plot(times, np.mean(X1[v1_labels,:,iSub,5],axis=0), '-', color=c_table[5])
plt.plot(times, np.mean(X1[v1_labels,:,iSub,6],axis=0), '-', color=c_table[3])
plt.plot(times, np.mean(X1[v1_labels,:,iSub,8],axis=0), '-', color=c_table[1])
plt.plot([0.1, 0.1],[0, 8],'-',color='k')
plt.title(s)
#%%
#connectivity = mne.spatial_tris_connectivity(mne.grade_to_tris(5))
#p_threshold = 0.02
#t_threshold = -stats.distributions.t.ppf(p_threshold / 2., n_subjects - 1)
#
#T_obs, clusters, cluster_p_values, H0 = clu = \
# mne.stats.spatio_temporal_cluster_1samp_test(data1, connectivity=connectivity, n_jobs=18,
# threshold=t_threshold)
#good_cluster_inds = np.where(cluster_p_values < 0.05)[0]
#
#stc_all_cluster_vis1 = mne.stats.summarize_clusters_stc(clu, tstep=tstep,
# vertices=fs_vertices,
# subject='fsaverage')
#brain1_1 = stc_all_cluster_vis1.plot(hemi='lh', views='lateral',
# subjects_dir=fs_dir,
# time_label='Duration significant (ms)')
#brain1_2 = stc_all_cluster_vis1.plot(hemi='rh', views='lateral',
# subjects_dir=fs_dir,
# time_label='Duration significant (ms)')
#""
#aparc_label_name = 'lateraloccipital'#'inferiortemporal'#'fusiform'#'lingual' #fusiform'#'pericalcarine' # lateraloccipital
## tmin, tmax = 0.080, 0.12
#tmin, tmax = 0.13, 0.18
## tmin, tmax = 0.10, 0.15
#
#stc_mean = stc.crop(tmin, tmax).mean()
#label = mne.read_labels_from_annot(subs[0], parc='aparc',surf_name='white',
# subjects_dir=fs_dir,
# regexp=aparc_label_name)
#
#stc_mean_label = stc_mean.in_label(label[0])
#data = np.abs(stc_mean_label.data)
#stc_mean_label.data[data < 0.6 * np.max(data)] = 0.
#
#func_labels, _ = mne.stc_to_label(stc_mean_label, src=src, smooth=True,
# subjects_dir=fs_dir, connected=True)
#func_label = func_labels[0]
#
#anat_label = mne.read_labels_from_annot(subs[0], parc='aparc',
# subjects_dir=fs_dir,
# regexp=aparc_label_name)
#
## extract the anatomical time course for each label
#stc_anat_label = stc.in_label(anat_label[0])
#pca_anat = stc.extract_label_time_course(anat_label[0], src, mode='pca_flip')[0]
#
#stc_func_label = stc.in_label(func_label)
#pca_func = stc.extract_label_time_course(func_label, src, mode='pca_flip')[0]
#
## flip the pca so that the max power between tmin and tmax is positive
#pca_anat *= np.sign(pca_anat[np.argmax(np.abs(pca_anat))])
#pca_func *= np.sign(pca_func[np.argmax(np.abs(pca_anat))])
#
#plt.figure()
#plt.plot(1e3 * stc_anat_label.times, pca_anat, 'k',
# label='Anatomical %s' % aparc_label_name)
#plt.plot(1e3 * stc_func_label.times, pca_func, 'b',
# label='Functional %s' % aparc_label_name)
#plt.legend()
#plt.show()
#
#brain = stc_mean.plot(hemi='lh', subjects_dir=fs_dir,
# clim=dict(kind='value', lims=[3, 5, 10]))
#brain.show_view('lateral')
#
## show both labels
#brain.add_label(anat_label[0], borders=True, color='k')
#brain.add_label(func_label, borders=True, color='b')
|
[
"sjjoo@utexas.edu"
] |
sjjoo@utexas.edu
|
addf62c3fc65ce49d477e0cfdd502a0fd8dedf6e
|
3c3b8c27c3478a52bb7b68caf8fecd6431d7a786
|
/backend/swagger_server/controllers/document/info.py
|
8313e530913cac567e5dd311e1b0174b9a3731fb
|
[
"MIT"
] |
permissive
|
Lend88/libresign
|
d7116aa71904de011c48700bff65f06cde2853d4
|
9537f39a696fa5f3433052406329d77d528b6cf9
|
refs/heads/master
| 2020-04-20T07:02:13.623754
| 2018-08-24T19:56:02
| 2018-08-24T19:56:02
| 145,740,805
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,232
|
py
|
import json
from uuid import UUID
from flask import Response, jsonify
from flask_jwt_extended import jwt_required, get_jwt_identity
from ...decorators import produces
from ...db import Session
from ...models import ErrorMessage
from ...mappings import Document, Field, FileUsage, FieldUsage
from ... import config
from ...helpers import verify_permission, type_check
@type_check
def get_filled(session, doc_id: UUID):
''' Get all fields that have been filled '''
subquery = (
session
.query(FieldUsage)
.filter(FieldUsage.field_id == Field.id)
.filter(FieldUsage.fieldusage_type == config.FIELD_USAGE_TYPE["filled"])
)
return (
session
.query(Field)
.filter(Field.document_id == doc_id.bytes)
.filter(subquery.exists())
.with_entities(Field.id)
.all()
)
@jwt_required
@produces('application/json')
def info_get(docId: str):
''' Fetch information about the document. This
information is intended to be used by
applications showing the fields to users
and includes the location, size and status
of various fields in the document and the
dimensions of all pages within the document.
Note that all sizes/locations are in PDF
units.
Arguments:
docId (str): The document ID
Response:
If successful, this endpoint will respond
with HTTP 200 and JSON describing the
document fields/pages. The only fields
carried within the document are those
that the current user should sign. See
the swagger specification for a schema
of the returned JSON.
If an error occurrs this endpoint will
respond with a 4XX error code and a
JSON body describing the error.
'''
uid = UUID(hex=get_jwt_identity())
doc_id = None
try:
doc_id = UUID(hex=docId)
except ValueError:
return ErrorMessage("Not a valid document ID"), 400
session = Session()
if not verify_permission(session, doc_id):
return ErrorMessage("Not Authorized"), 401
field_data = (
session
.query(FileUsage)
.filter(FileUsage.document_id == doc_id.bytes)
.filter(FileUsage.fileusage_type == config.FILE_USAGE_TYPES['describe-fields'])
.with_entities(FileUsage.data)
.order_by(FileUsage.timestamp.asc())
.first()
)
doc_title = (
session
.query(Document)
.filter(Document.id == doc_id.bytes)
.with_entities(Document.title)
.one()
)[0]
if not field_data:
# If the field data hasn't been created yet, then
# return a 503 to indicate that the client should
# retry at a later time
return Response(
json.dumps({'msg':"Field data is still being generated"}),
headers={
# This should hopefully be in the right area
'Retry-After': 30
}
), 503
else:
field_data = json.loads(field_data[0])
# Assert on properties of json data
assert isinstance(field_data, dict)
assert 'fields' in field_data
assert 'pages' in field_data
assert isinstance(field_data['fields'], list)
assert isinstance(field_data['pages'], list)
fields_for_user = dict(
session
.query(Field)
.filter(Field.document_id == doc_id.bytes)
.filter(Field.user_id == uid.bytes)
.with_entities(Field.field_name, Field.id)
.all()
)
filled = set(UUID(bytes=x[0]) for x in get_filled(session, doc_id))
print(filled)
filtered = []
for field in field_data['fields']:
if field['name'] in fields_for_user:
field_id = UUID(bytes=fields_for_user[field['name']])
field['id'] = field_id.hex
field['filled'] = field_id in filled
field['optional'] = False
filtered.append(field)
field_data['fields'] = filtered
field_data['title'] = doc_title
return jsonify(field_data), 200
|
[
"sean@lend88.com"
] |
sean@lend88.com
|
7cc2cd33d853705b4649625af3ff61164575e74b
|
1eab20f73802746572c2a4a1996ce7355afca276
|
/app.py
|
97d97d79cd999c0cd9a9708e1ce190b02b71ee36
|
[] |
no_license
|
gretelup/bbb
|
5b57f1732a23e1edb59b0aadb443eceef53c8f1d
|
ab5157e3a46561f773d990cee0acfd3e7375e519
|
refs/heads/master
| 2023-03-20T21:29:38.100908
| 2021-03-20T13:13:36
| 2021-03-20T13:13:36
| 277,312,251
| 0
| 0
| null | 2021-03-20T04:26:59
| 2020-07-05T13:55:13
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 3,097
|
py
|
import os
import pandas as pd
import numpy as np
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine
from flask import Flask, jsonify, render_template
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
#################################################
# Database Setup
#################################################
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///db/bellybutton.sqlite"
db = SQLAlchemy(app)
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(db.engine, reflect=True)
# Save references to each table
Samples_Metadata = Base.classes.sample_metadata
Samples = Base.classes.samples
@app.route("/")
def index():
"""Return the homepage."""
return render_template("index.html")
@app.route("/names")
def names():
"""Return a list of sample names."""
# Use Pandas to perform the sql query
stmt = db.session.query(Samples).statement
df = pd.read_sql_query(stmt, db.session.bind)
# Return a list of the column names (sample names)
return jsonify(list(df.columns)[2:])
@app.route("/metadata/<sample>")
def sample_metadata(sample):
"""Return the MetaData for a given sample."""
sel = [
Samples_Metadata.sample,
Samples_Metadata.ETHNICITY,
Samples_Metadata.GENDER,
Samples_Metadata.AGE,
Samples_Metadata.LOCATION,
Samples_Metadata.BBTYPE,
Samples_Metadata.WFREQ,
]
results = db.session.query(*sel).filter(Samples_Metadata.sample == sample).all()
# Create a dictionary entry for each row of metadata information
sample_metadata = {}
for result in results:
sample_metadata["sample"] = result[0]
sample_metadata["ETHNICITY"] = result[1]
sample_metadata["GENDER"] = result[2]
sample_metadata["AGE"] = result[3]
sample_metadata["LOCATION"] = result[4]
sample_metadata["BBTYPE"] = result[5]
sample_metadata["WFREQ"] = result[6]
print(sample_metadata)
return jsonify(sample_metadata)
@app.route("/samples/<sample>")
def samples(sample):
"""Return `otu_ids`, `otu_labels`,and `sample_values`."""
stmt = db.session.query(Samples).statement
df = pd.read_sql_query(stmt, db.session.bind)
# Filter the data based on the sample number and
# only keep rows with values above 1
sample_data = df.loc[df[sample] > 1, ["otu_id", "otu_label", sample]]
# Format the data to send as json
data = {
"otu_ids": sample_data.otu_id.values.tolist(),
"sample_values": sample_data[sample].values.tolist(),
"otu_labels": sample_data.otu_label.tolist(),
}
return jsonify(data)
@app.route("/wfreq/<sample>")
def wfreq(sample):
"""Return washing frequency for sample."""
result = db.session.query(Samples_Metadata.WFREQ).\
filter(Samples_Metadata.sample == sample).all()
sample_wfreq = result[0]
return jsonify(sample_wfreq)
if __name__ == "__main__":
app.run()
|
[
"gretelup@gmail.com"
] |
gretelup@gmail.com
|
c24c4e3521e3a3b230daaaf0d2f590d8e5fe952d
|
e7e5ac71c941e3daf82781249ae6d32d8614f78e
|
/2017/day-04/part1.py
|
10683698d8af82bdb1eaa74118c13286e071c8aa
|
[
"MIT"
] |
permissive
|
amochtar/adventofcode
|
7f952ebee6b41aa5147cc788710fb054579742e7
|
292e7f00a1e19d2149d00246b0a77fedfcd3bd08
|
refs/heads/master
| 2022-07-14T22:46:21.175533
| 2021-12-15T08:14:17
| 2021-12-15T08:28:43
| 222,647,709
| 1
| 0
|
MIT
| 2022-06-22T04:45:13
| 2019-11-19T08:36:02
|
Python
|
UTF-8
|
Python
| false
| false
| 144
|
py
|
with open("input.txt", "r") as f:
lines = f.readlines()
print("Part 1:", len([l for l in lines if len(set(l.split())) == len(l.split())]))
|
[
"amochtar@xebia.com"
] |
amochtar@xebia.com
|
ed65d2fbf91fb60c18f53d2cb981e925bd6c5fdc
|
3a702d84cb15e76dc0eebf9d9d1939e0334c57c8
|
/Site/list/models.py
|
d49a066c028e24d410ff9d5f0ff95309f12c0098
|
[] |
no_license
|
DanilParunov/SiteAgLib
|
3fa99007e0bdb411f075d8464474cf96168a8bbb
|
f16544f8c4d86b172d241b3710949a568f5b8a2a
|
refs/heads/master
| 2023-02-22T12:28:42.325291
| 2021-01-17T20:57:52
| 2021-01-17T20:57:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,158
|
py
|
from django.db import models
from django.urls import reverse
class Articles(models.Model):
title = models.CharField('Название', max_length=50)
anons = models.CharField('Автор', max_length=250)
text = models.TextField('Текст')
date = models.DateField('Дата выпуска')
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('book', {'book_id': self.pk})
class Meta:
verbose_name = 'Список литературы'
verbose_name_plural = 'Список литературы'
class CustomersBooks(models.Model):
customer = models.ForeignKey(to='accounts.Customers', on_delete=models.CASCADE)
article = models.ForeignKey(Articles, on_delete=models.CASCADE)
class Library(models.Model):
title = models.CharField('Название', max_length=50)
addres = models.CharField('Адрес',max_length=250)
text = models.TextField('Текст')
def __str__(self):
return self.title
class Meta:
verbose_name = 'Список Библиотек'
verbose_name_plural= 'Список библиотек'
|
[
"danil19056@gmail.com"
] |
danil19056@gmail.com
|
3288c028cd4b3fe106a6c3cdfd5ea08541dd253e
|
3fffc40cf16672cb36b4ce5da6b8a6fc3fe3f848
|
/digitRecognizer.py
|
f1b49241574c4d1c26cc905c86652987a82c892d
|
[] |
no_license
|
pranauv1/Digit_Recognizer
|
04dfec8dcb113fa987d9f1b760c5de42d7b4825c
|
f303f49fdd8ea1064092b21c0c8afb0c033f26db
|
refs/heads/main
| 2023-07-05T15:38:12.791482
| 2021-08-20T16:18:24
| 2021-08-20T16:18:24
| 398,330,081
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,362
|
py
|
#Get the dataset
! kaggle competitions download -c digit-recognizer
#Unzip them
! unzip /path/test.csv.zip
! unzip /path/train.csv.zip
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from keras.utils.np_utils import to_categorical
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, BatchNormalization
from sklearn.model_selection import train_test_split
#Load CSV files
test = pd.read_csv('/content/test.csv')
train = pd.read_csv('/content/train.csv')
sample = pd.read_csv('/content/sample_submission.csv')
#Analysing Dataframes
test.head()
train.head()
sample.head()
#We can directly jump into spltting the data
#Defining train, test and labels
x_train = train.drop('label', axis=1)
y_train = train['label']
x_test = test
#Will check the shapes
print(x_train.shape)
print(y_train.shape)
print(x_test.shape)
#Converting them into an array
x_train = x_train.values.reshape(-1, 28, 28, 1)/255
x_test = x_test.values.reshape(-1, 28, 28, 1)/255
#One hot encoding the labels
y_train = to_categorical(y_train,10)
#Will see if everything is okay
print(x_train.shape)
print(x_test.shape)
print(y_train.shape)
#Creating the model
model = Sequential()
model.add(Conv2D(32, (3,3), activation="relu", input_shape=(28, 28, 1)))
model.add(Conv2D(32, (3,3), activation='relu'))
model.add(MaxPool2D(pool_size = (2,2)))
model.add(Conv2D(64, (3,3), activation='relu', padding='same'))
model.add(Conv2D(64, (3,3), activation='relu', padding='same'))
model.add(MaxPool2D(pool_size = (2,2), strides = (2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
model.summary()
#Will train
model.compile(loss="categorical_crossentropy", optimizer="Adam", metrics=["acc"])
history = model.fit(x_train, y_train, epochs=20, batch_size=50, verbose=2)
#Save the model
model.save('digit_recognizer.h5')
#Predicting using the given test dataset
results = model.predict(x_test)
results = np.argmax(results, axis=1)
results = pd.Series(results, name='label')
#Submission(Kaggle)
submission = pd.concat([pd.Series(range(1,28001), name='ImageId'), results], axis=1)
submission.head()
#CSV to submit
submission.to_csv('submission.csv', index=False)
|
[
"noreply@github.com"
] |
pranauv1.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.