blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
72e8de4775a020325d71742130df47bd07f4ff98
|
78299c77361930ff5576762c4e848179dd98af20
|
/Configuration/python/varparsing.py
|
09a1d6653b7dde333cd0eb09a08520bb91a8b21f
|
[] |
no_license
|
yduh/URAnalysis
|
1f54bc3f81a7835ba343fc454c3408ce9f0c423b
|
0d80413cc0e3d06926fb3d3b1a7c8b635065dcaf
|
refs/heads/master
| 2021-05-19T22:06:18.762821
| 2019-02-25T14:56:29
| 2019-02-25T14:56:29
| 33,153,405
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 578
|
py
|
from FWCore.ParameterSet.VarParsing import VarParsing
options = VarParsing("analysis")
#inputFiles, outputFile, maxEvents
#options come for free in the VarParsing
options.register(
'globalTag',
'',
VarParsing.multiplicity.singleton,
VarParsing.varType.string,
'global tag to be used'
)
options.register(
'isMC',
False,
VarParsing.multiplicity.singleton,
VarParsing.varType.bool,
'global tag to be used'
)
options.register(
'reportEvery',
100,
VarParsing.multiplicity.singleton,
VarParsing.varType.int,
'Verbosity of message logs'
)
|
[
"mverzett@cern.ch"
] |
mverzett@cern.ch
|
7e15e46db4a4b3c07e4208d690942153ba38118a
|
df3975a6d594f2111b490de60b0a12da990d78e9
|
/totalimpactwebapp/product_deets.py
|
429633a745291371c6d3649b70ff9b605578ebf6
|
[
"MIT"
] |
permissive
|
dhalperi/total-impact-webapp
|
d366cefb9b365baf860b73d56c7ea236f5e327de
|
4580cc092c11c83679084a85e080d7769aa28928
|
refs/heads/master
| 2020-12-24T11:09:34.940748
| 2014-08-15T06:38:17
| 2014-08-15T06:38:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,637
|
py
|
import logging
import datetime
from sqlalchemy.exc import IntegrityError, DataError, InvalidRequestError
from totalimpactwebapp import db
from totalimpactwebapp.json_sqlalchemy import JSONAlchemy
logger = logging.getLogger("webapp.totalimpactwebapp.product_deets")
class ProductDeets(db.Model):
id = db.Column(db.Integer, primary_key=True)
profile_id = db.Column(db.Integer)
url_slug = db.Column(db.Text)
tiid = db.Column(db.Text)
genre = db.Column(db.Text)
host = db.Column(db.Text)
year = db.Column(db.Text)
host = db.Column(db.Text)
mendeley_discipline = db.Column(db.Text)
has_metrics = db.Column(db.Text)
title = db.Column(db.Text)
deets_collected_date = db.Column(db.DateTime())
run_id = db.Column(db.Text)
def __init__(self, **kwargs):
# print(u"new ProductDeets {kwargs}".format(
# kwargs=kwargs))
self.deets_collected_date = datetime.datetime.utcnow()
super(ProductDeets, self).__init__(**kwargs)
def __repr__(self):
return u'<ProductDeets {url_slug} {tiid}>'.format(
url_slug=self.url_slug,
tiid=self.tiid)
def populate_product_deets(profile, product):
product_deets = ProductDeets(
profile_id = profile.id,
url_slug = profile.url_slug,
tiid = product.tiid,
genre = product.genre,
host = product.host,
year = product.year,
mendeley_discipline = product.mendeley_discipline,
has_metrics = str(product.has_metrics),
title = product.biblio.display_title,
)
return product_deets
|
[
"hpiwowar@gmail.com"
] |
hpiwowar@gmail.com
|
42c2eba44de99005e20b3810fabb99903161f6d3
|
2b97e9ddcae50aed62a359b42e4a5fbc209b30db
|
/Deep_Learning_Algorithme_Non_Supervisé/AutoEncoders/ae_test.py
|
e9be9f769fbc22136c401bcba8af2433db11298d
|
[] |
no_license
|
GMDFr/Deep_Learning_Algorithme_Non_Supervise
|
4f69bbe4d0667a33a6cd56d52ccebae2b2d87bba
|
9b308fb0145dffd61509e23a293071a48be82418
|
refs/heads/master
| 2023-02-03T22:58:07.879941
| 2020-12-16T13:31:07
| 2020-12-16T13:31:07
| 267,247,574
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,360
|
py
|
# -*- coding: utf-8 -*-
"""
Éditeur de Spyder
Ceci est un script temporaire.
"""
#Librairies
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
from torch.autograd import Variable
# Importation du jeu de données
movies = pd.read_csv("ml-1m/movies.dat",
sep="::",
header=None,
engine="python",
encoding="latin-1")
users = pd.read_csv("ml-1m/users.dat",
sep="::",
header=None,
engine="python",
encoding="latin-1")
ratings = pd.read_csv("ml-1m/ratings.dat",
sep="::",
header=None,
engine="python",
encoding="latin-1")
#Préparation du jeu d'entraînement et du jeu de test
training_set = pd.read_csv("ml-100k/u1.base",delimiter="\t", header=None) # \t = Tab en python
training_set = np.array(training_set, dtype="int")
test_set = pd.read_csv("ml-100k/u1.test",delimiter="\t", header=None) # \t = Tab en python
test_set = np.array(training_set, dtype="int")
# Obtenir le nombre d'utilisateurs et le nombre de films
nb_users = int(max(max(training_set[:,0]), max(test_set[:,0])))
nb_movies = int(max(max(training_set[:,1]), max(test_set[:,1])))
#Conversion des données en matrice.
def convert(data):
new_data =[]
for id_users in range (1, nb_users + 1):
id_movies = data[data[:,0] == id_users,1]
id_ratings = data[data[:,0] == id_users,2]
ratings = np.zeros(nb_movies)
ratings[id_movies-1] = id_ratings
new_data.append(list(ratings))
return new_data
training_set = convert(training_set)
test_set = convert(test_set)
# Conversion des données en tenseurs
training_set = torch.FloatTensor(training_set)
test_set = torch.FloatTensor(test_set)
# Architecture de l'autoencodeur
class SAE(nn.Module): # Notion d'héritage
def __init__(self):
super(SAE,self).__init__() #Récupère toutes les méthodes contenu dans module
self.fc1 = nn.Linear(nb_movies, 20)
self.fc2 = nn.Linear(20,10)
self.fc3 = nn.Linear(10,20)
self.fc4 = nn.Linear(20, nb_movies)
self.activation = nn.Sigmoid()
def forward(self,x):
x = self.activation(self.fc1(x))
x = self.activation(self.fc2(x))
x = self.activation(self.fc3(x))
x = self.fc4(x)
return x
sae = SAE()
criterion = nn.MSELoss()
optimizer = optim.RMSprop(sae.parameters(), lr = 0.01, weight_decay = 0.5)
# Entraînement de l'auto-encodeur
nb_epochs = 200
for epoch in range(1, nb_epochs + 1):
train_loss = 0
s = 0.
for id_user in range (nb_users):
input = Variable(training_set[id_user]).unsqueeze(0)
target = input.clone()
if torch.sum(target.data > 0) > 0:
output = sae(input)
target.require_grad = False
output[target == 0] = 0
loss = criterion(output,target)
mean_corrector = nb_movies / float(torch.sum(target.data > 0) + 1e-10) # corrige l'erreur uniquement les films notés
loss.backward()
train_loss += np.sqrt(loss.item() * mean_corrector)
s += 1.
optimizer.step() # intensité de la MAJ des poids
print("epoch: " + str(epoch) + " loss: " + str(train_loss / s))
# Test de l'auto-encodeur
test_loss = 0
s = 0.
for id_user in range (nb_users):
input = Variable(training_set[id_user]).unsqueeze(0)
target = Variable(test_set[id_user])
if torch.sum(target.data > 0) > 0:
output = sae(input)
target.require_grad = False
output[target == 0] = 0
loss = criterion(output,target)
mean_corrector = nb_movies / float(torch.sum(target.data > 0) + 1e-10) # corrige l'erreur uniquement pour les films notés
loss.backward()
test_loss += np.sqrt(loss.item() * mean_corrector)
s += 1. # Compteur
print(" loss: " + str(train_loss / s))
|
[
"noreply@github.com"
] |
GMDFr.noreply@github.com
|
5cb0dfddc9eda3a6aefd7a6e337a1a5c1dfc2130
|
7bad3261a6995496d0c93ab06e6d5f538f3901c7
|
/Project 2/venv/bin/easy_install
|
8501bec41c8554d9a44117cbbc450fe6ad7191bf
|
[] |
no_license
|
bpayne915/bpayne915.github.io
|
63167d0b41aae8ac0f0aa7cd9010dc4764914ffa
|
d3b4110ecbc0d10235337d8bbd0cdca7556212f3
|
refs/heads/main
| 2023-07-10T04:41:30.757254
| 2021-08-14T15:15:48
| 2021-08-14T15:15:48
| 377,582,853
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 446
|
#!/Users/Barbara/PycharmProjects/Assignment3/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
|
[
"bpayne915@knights.ucf.edu"
] |
bpayne915@knights.ucf.edu
|
|
59e2ef7356faef4622769acca68c00dec074fac9
|
a507b1c1b825ec68c140514f7b91fc30b61f1f7a
|
/WordPairsSim/storeWordsSimilarity.py
|
5cce3dcfb8c8410e703f7f6b9ce359521fc081aa
|
[] |
no_license
|
lunafeng/ELTDS
|
ee621da596b4f24b9d304a2e7031aac00ea353c4
|
d247e69c8272475bc8d97f2096c57f06872e3aa2
|
refs/heads/master
| 2021-01-19T09:31:21.300311
| 2017-02-16T05:27:04
| 2017-02-16T05:27:04
| 82,120,787
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 528
|
py
|
#!/usr/bin/python
import time
import MySQLdb
def main(storeValues):
db_mysql = MySQLdb.connect('141.117.3.92','lunafeng','luna222','WordsDisambiguation_b4')
db_mysql.ping()
cursor = db_mysql.cursor()
sql1 = "INSERT IGNORE INTO WordsSimilarityNew(Word1,Word2,Similarity) VALUES " + storeValues
sql2 = "INSERT IGNORE INTO WordsSimilarityNew(Word2,Word1,Similarity) VALUES " + storeValues
cursor.execute(sql1)
db_mysql.commit()
cursor.execute(sql2)
db_mysql.commit()
cursor.close()
db_mysql.close()
time.sleep(0.05)
|
[
"lunafeng123@gmail.com"
] |
lunafeng123@gmail.com
|
740a5281897126ded86c9c1ba4b08f7ee9734385
|
9364e309573c49526ba296707a18ad5239719c1e
|
/myenv/lib/python3.9/site-packages/typeshed_client/resolver.py
|
d34458ab6d217c3ce41a36f5a3ad1c74b890c170
|
[] |
no_license
|
prashant-ux/django
|
626337484981578e43dab8e8fe4986d8cd233633
|
0d1c01e70e8c6da4be681b81ba2ad9e86506fc9a
|
refs/heads/master
| 2023-08-28T21:22:48.715707
| 2021-11-08T06:39:44
| 2021-11-08T06:39:44
| 419,332,578
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,582
|
py
|
"""Module responsible for resolving names to the module they come from."""
from typing import Dict, NamedTuple, Optional, Union
from .finder import SearchContext, get_search_context, ModulePath
from . import parser
class ImportedInfo(NamedTuple):
source_module: ModulePath
info: parser.NameInfo
ResolvedName = Union[None, ModulePath, ImportedInfo, parser.NameInfo]
class Resolver:
def __init__(self, search_context: Optional[SearchContext] = None) -> None:
if search_context is None:
search_context = get_search_context()
self.ctx = search_context
self._module_cache: Dict[ModulePath, Module] = {}
def get_module(self, module_name: ModulePath) -> "Module":
if module_name not in self._module_cache:
names = parser.get_stub_names(
".".join(module_name), search_context=self.ctx
)
if names is None:
names = {}
self._module_cache[module_name] = Module(names, self.ctx)
return self._module_cache[module_name]
def get_name(self, module_name: ModulePath, name: str) -> ResolvedName:
module = self.get_module(module_name)
return module.get_name(name, self)
def get_fully_qualified_name(self, name: str) -> ResolvedName:
"""Public API."""
*path, tail = name.split(".")
return self.get_name(ModulePath(tuple(path)), tail)
class Module:
def __init__(self, names: parser.NameDict, ctx: SearchContext) -> None:
self.names = names
self.ctx = ctx
self._name_cache: Dict[str, ResolvedName] = {}
def get_name(self, name: str, resolver: Resolver) -> ResolvedName:
if name not in self._name_cache:
self._name_cache[name] = self._uncached_get_name(name, resolver)
return self._name_cache[name]
def _uncached_get_name(self, name: str, resolver: Resolver) -> ResolvedName:
if name not in self.names:
return None
info = self.names[name]
if not isinstance(info.ast, parser.ImportedName):
return info
# TODO prevent infinite recursion
import_info = info.ast
if import_info.name is not None:
resolved = resolver.get_name(import_info.module_name, import_info.name)
if isinstance(resolved, parser.NameInfo):
return ImportedInfo(import_info.module_name, resolved)
else:
# TODO: preserve export information
return resolved
else:
return import_info.module_name
|
[
"prashant.tiwari_ec16@gla.ac.in"
] |
prashant.tiwari_ec16@gla.ac.in
|
05fac2e34179bb8925ea9021dd3c191348810736
|
e8f7910a5bce25860177de4f9009a1ac9886cb3a
|
/merge_nucleotides.py
|
ab89fa07ec92d32b493c65124a694459ecea9c63
|
[] |
no_license
|
isaureCdB/Scripts
|
e89109928fd9d41e0ab9fa269433f427e15174a8
|
e66589b1d561479ba0f9eada25dc5c16a561b908
|
refs/heads/master
| 2023-02-23T10:06:31.941434
| 2023-02-07T13:00:02
| 2023-02-07T13:00:02
| 187,597,229
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,007
|
py
|
#!/usr/bin/env python3
'''
average atom coordinates on overlapping parts of nucl steps
usage: chain2rna.py <chains_file> --nat <nat1 nat2 nat3>
--coor <step1.npy step2.npy step3.npy>
'''
import sys, argparse, numpy as np
from npy import npy2to3, npy3to2
#######################################
parser =argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('chains_file', help="txt file, one line per structure, \
each line containing the list of fragment indices (from 1)\
at each position of the structure, with space separator.")
parser.add_argument('--nat', help="list of the numbers of atoms per nucleotide", nargs='+', type=int)
# avoid loading twice the same motif.npy, which is time consuming
parser.add_argument('--coor', help="coordinates of one step for all structures,\
(one file per step), in npy format with shape (nstruct, ncoor)\
with ncoor = 3*nat", nargs='+')
parser.add_argument('--outp')
args = parser.parse_args()
#######################################
chainfile = args.chains_file # txt file. One chain per line, one index per column
try:
steps = np.loadtxt(chainfile, dtype=int) - 1 ### changed 9-02_17:30
if len(steps.shape) == 1:
steps = np.reshape(steps,(1,steps.shape[0]))
except:
print("no np.loadtxt")
cc = [ [int(i)-1 for i in l.split()] for l in open(chainfile)]
steps = np.array(cc)
nfrag = steps.shape[1]
nat = args.nat
print(nat, file=sys.stderr)
assert nfrag == len(nat) + 1, (nfrag, len(nat))
coor = [ npy2to3(np.load(i)) for i in args.coor ] # one np.array per step
outp = args.outp
assert outp != args.chains_file, "ERROR: output is same as input"
#initialise merged structure
max_atom = sum([n.shape[1] for n in coor]) #max nb of atoms in final chain
rna = np.zeros( (len(steps), max_atom, 3) )
count = 0
#First atoms unchanged
len_step = coor[0].shape[1] #nb of atoms in 1st step
n = len_step - nat[0] #nb of atom to not merge in 1st step
rna[:,:n] = coor[0][steps[:, 0], :n]
count += n
for i in range(nfrag-1):
#Merge overlapping atoms
print("merge nucl %i"%(i+2), file=sys.stderr)
coor1 = coor[i][steps[:, i], -nat[i]: ] # last atoms of previous step
coor2 = coor[i+1][steps[:, i+1], :nat[i] ] # first atoms of next step
rna[:, count:count+nat[i]] = 0.5*(coor1+coor2)
count += nat[i]
if i < nfrag-2:
#add non-overlapping atoms of next step
len_step = coor[i+1].shape[1]
n = len_step - nat[i] - nat[i+1]
print(coor[i+1].shape)
rna[:, count:count+n] = coor[i+1][steps[:, i+1], nat[i]:nat[i]+n]
count += n
#Last atoms unchanged
len_step = coor[-1].shape[1] #nb of atoms in 1st step
n = len_step - nat[-1] #nb of atom to not merge in 1st step
rna[:,count:count+n] = coor[-1][steps[:, -1], -n:]
count += n
rna = rna[:, :count]
np.save(outp, rna)
|
[
"isaure@debeauchene.fr"
] |
isaure@debeauchene.fr
|
445fb5b6badfff86722d8e44d138eb67ccd93677
|
1add27072ef164ed8fc08a25d7c75a0560d41c8c
|
/有选择的广播client.py
|
4d6684677c76a23640afea959ce272418c203332
|
[] |
no_license
|
so1so2so/RbmqTest
|
fbe8aef3be7b5d7ec20340db238188a6c4c4903d
|
c6bd8ddc2c2e450462f15def833e6a3cdda06f4b
|
refs/heads/master
| 2020-12-02T05:24:23.296418
| 2017-07-11T15:52:57
| 2017-07-11T15:52:57
| 96,901,849
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 580
|
py
|
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
import pika
import sys
connection = pika.BlockingConnection(pika.ConnectionParameters(
host='localhost'))
channel = connection.channel()
channel.exchange_declare(exchange='direct_logs',
type='direct')
severity = sys.argv[1] if len(sys.argv) > 1 else 'info'
message = ' '.join(sys.argv[2:]) or 'Hello World!'
channel.basic_publish(exchange='direct_logs',
routing_key=severity,
body=message)
print(" [x] Sent %r:%r" % (severity, message))
connection.close()
|
[
"1037930435@qq.com"
] |
1037930435@qq.com
|
12f196384762afd78595f3d69010b05ce6cabdf9
|
af34436be48e3792599ff3466cc38bdd7fff5290
|
/avg of numbers in a LIST.py
|
8da4d8b7f463ccb3e65cc2211abfa316804f6710
|
[] |
no_license
|
masoom-A/Python-codes-for-Beginners
|
12d2924160b84ca3cf6d6e68e1f6fca74116c3c2
|
b439a29cbdc10ab881ef0ccd6458c431e5372927
|
refs/heads/master
| 2022-12-06T04:23:43.755022
| 2020-08-27T20:58:26
| 2020-08-27T20:58:26
| 234,271,631
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 156
|
py
|
numbers=[20,30,40,50]
total=0
for num in numbers:
total=total+num
avg=total/len(numbers)
print("the avg of the given numbers is :{}".format(avg))
|
[
"noreply@github.com"
] |
masoom-A.noreply@github.com
|
b49ac091b335ee1df84bca85aa6360db8c7f5049
|
17cc8bffed3fadb413506f1545c455d7b9406ed6
|
/parts/zodiac/pyramid/tests/test_config/test_assets.py
|
e051b89ba17edc45c561570df498743cd80122bf
|
[] |
no_license
|
stinett/zodiac
|
f7a4f788942930fa217e7e1c7d525b82a557258f
|
22b247719694b0f5aa5135b3cb68c1e84aaf7629
|
refs/heads/master
| 2020-05-21T01:14:59.949571
| 2014-01-13T15:53:47
| 2014-01-13T15:53:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 92
|
py
|
/home/stine/myenv/zodiac/eggs/pyramid-1.4-py2.7.egg/pyramid/tests/test_config/test_assets.py
|
[
"stine@funkydesktop.(none)"
] |
stine@funkydesktop.(none)
|
56ac3caa3cdeed7efb3a5bb57d1814faf753d732
|
afe15879dbbbb6780fd10691df1bb3ed75c1a9a5
|
/setup.py
|
9e5708bcfc505228d0281043a6217ea4d963a8f9
|
[] |
no_license
|
pintman/fizzbuzz
|
8e75315db4c65500afb4d5a5779a769373ccd7d8
|
1598212388294dde959d2fd67e3e5b68bad5b56b
|
refs/heads/master
| 2021-11-09T07:14:11.323327
| 2021-11-07T12:14:51
| 2021-11-07T12:14:51
| 88,208,440
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 198
|
py
|
from setuptools import setup, find_packages
setup(
name = "fizzbuzz_katas",
version = "0.1",
description = "Exercises with the famous FizzBuzz-Game.",
packages = find_packages()
)
|
[
"marco@bakera.de"
] |
marco@bakera.de
|
ee6a72eda676c44336313abdc90596d4f4f40c87
|
9fb2139bf41e2301f9ee9069d649c5afe8e7735c
|
/python/Algorithms/Implementation/Append and Delete.py
|
158f4de415ababe3c12518eb8a61830524553308
|
[] |
no_license
|
codewithgauri/HacktoberFest
|
9bc23289b4d93f7832271644a2ded2a83aa22c87
|
8ce8f687a4fb7c3953d1e0a5b314e21e4553366e
|
refs/heads/master
| 2023-01-02T07:20:51.634263
| 2020-10-26T07:02:34
| 2020-10-26T07:02:34
| 307,285,210
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 254
|
py
|
def appendAndDelete(s, t, k):
c=0
if s==t:
print("Yes")
else:
if t.find(s):
c=t.find(s)
print()
if __name__ == '__main__':
s = input()
t = input()
k = int(input())
appendAndDelete(s, t, k)
|
[
"08sandysk@gmail.com"
] |
08sandysk@gmail.com
|
46bd0280b432d71e1c209039b3f1a16c0a0c9e9d
|
95eba049c8a97ee806d573afdb3115313acf48af
|
/feeding-and-consuming-apis/consume_gitlab.py
|
c152ee68cb07017416dc8da30eeaf0414fd2be45
|
[] |
no_license
|
AlfredoPardo-zz/python-for-devsecops
|
ca5bc7dca498e1830578528aef5d205d261a43c3
|
9ace3271df173509999d80ee6c6a7ccf46424f5f
|
refs/heads/main
| 2022-12-31T09:00:58.195245
| 2020-10-15T21:06:47
| 2020-10-15T21:06:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,607
|
py
|
import requests
import urllib3
from pprintjson import pprintjson as ppjson
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
api_url = "https://gitlab.com/api/v4"
private_token = "your-personal-access-token-goes-here"
separator = "*" * 80
project_id = "999999999"
group_id = "999999999"
commit_id = "999999999"
pipeline_id = "999999999"
# Groups
response = requests.get("{}/groups?private_token={}".format(api_url,
private_token), verify=False)
print("{}\n{}\n{}".format(separator,"Gitlab Groups",separator))
print("{}\n{}".format("\tHeaders",separator))
for header_key, header_value in response.headers.items():
print("\t{}: {}".format(header_key, header_value))
print("{}\n\t{}\n{}".format(separator,"Response",separator))
ppjson(response.json())
# Group Members
response = requests.get("{}/groups/{}/members?private_token={}".format(api_url,
group_id, private_token), verify=False)
print("{}\n{}\n{}".format(separator,"GitLab Group Members",separator))
print("{}\n{}".format("\tHeaders",separator))
for header_key, header_value in response.headers.items():
print("\t{}: {}".format(header_key, header_value))
print("{}\n\t{}\n{}".format(separator,"Response",separator))
ppjson(response.json())
# Projects
response = requests.get("{}/projects/{}?private_token={}".format(api_url,
project_id, private_token), verify=False)
print("{}\n{}\n{}".format(separator,"GitLab Projects",separator))
print("{}\n{}".format("\tHeaders",separator))
for header_key, header_value in response.headers.items():
print("\t{}: {}".format(header_key, header_value))
print("{}\n\t{}\n{}".format(separator,"Response",separator))
ppjson(response.json())
# Project Members
response = requests.get("{}/projects/{}/members?private_token={}".format(api_url,
project_id, private_token), verify=False)
print("{}\n{}\n{}".format(separator,"GitLab Project Members",separator))
print("{}\n{}".format("\tHeaders",separator))
for header_key, header_value in response.headers.items():
print("\t{}: {}".format(header_key, header_value))
print("{}\n\t{}\n{}".format(separator,"Response",separator))
ppjson(response.json())
# Commits
response = requests.get("{}/projects/{}/repository/commits?private_token={}".\
format(api_url, project_id, private_token), verify=False)
print("{}\n{}\n{}".format(separator,"GitLab Repository Commits",separator))
print("{}\n{}".format("\tHeaders",separator))
for header_key, header_value in response.headers.items():
print("\t{}: {}".format(header_key, header_value))
print("{}\n\t{}\n{}".format(separator,"Response",separator))
ppjson(response.json())
# Commit Details
response = requests.get("{}/projects/{}/repository/commits/{}/diff?private_token={}".\
format(api_url, project_id, commit_id, private_token),
verify=False)
print("{}\n{}\n{}".format(separator,"GitLab Repository Commit Details",separator))
print("{}\n{}".format("\tHeaders",separator))
for header_key, header_value in response.headers.items():
print("\t{}: {}".format(header_key, header_value))
print("{}\n\t{}\n{}".format(separator,"Response",separator))
ppjson(response.json())
# Merge Requests
response = requests.get("{}/projects/{}/merge_requests?private_token={}".\
format(api_url, project_id, private_token), verify=False)
print("{}\n{}\n{}".format(separator,"GitLab Project Merge Requests",separator))
print("{}\n{}".format("\tHeaders",separator))
for header_key, header_value in response.headers.items():
print("\t{}: {}".format(header_key, header_value))
print("{}\n\t{}\n{}".format(separator,"Response",separator))
ppjson(response.json())
# Pipelines
response = requests.get("{}/projects/{}/pipelines?private_token={}".\
format(api_url, project_id, private_token), verify=False)
print("{}\n{}\n{}".format(separator,"GitLab Project Pipelines",separator))
print("{}\n{}".format("\tHeaders",separator))
for header_key, header_value in response.headers.items():
print("\t{}: {}".format(header_key, header_value))
print("{}\n\t{}\n{}".format(separator,"Response",separator))
ppjson(response.json())
# Jobs
response = requests.get("{}/projects/{}/pipelines/{}/jobs?private_token={}".\
format(api_url, project_id, pipeline_id, private_token), verify=False)
print("{}\n{}\n{}".format(separator,"GitLab Project Pipeline Jobs",separator))
print("{}\n{}".format("\tHeaders",separator))
for header_key, header_value in response.headers.items():
print("\t{}: {}".format(header_key, header_value))
print("{}\n\t{}\n{}".format(separator,"Response",separator))
ppjson(response.json())
|
[
"noreply@github.com"
] |
AlfredoPardo-zz.noreply@github.com
|
3f8ed73f895f604b8451900b88af990b06e4f71c
|
6f0dd6dcfd12aff7f2e14b97b2152ca4e04c932f
|
/JumptoPython/05.2.py
|
55255a4b340ec58dc6ee6771da068db7c86c279e
|
[] |
no_license
|
mozell/MyPython
|
45799f032dcfb68b4053ad22a83123276a72a7c7
|
c86f4db6dd5cf42c64690f377f323c9b97dcb735
|
refs/heads/master
| 2021-05-03T14:44:25.791400
| 2018-04-06T14:01:47
| 2018-04-06T14:01:47
| 120,463,604
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 223
|
py
|
import mod1_05
print(mod1_05.safe_sum(3,4))
print(mod1_05.safe_sum(3,'a'))
import mod2_05
print(mod2_05.PI)
a = mod2_05.Math()
print(a.solv(2))
print(mod2_05.sum(mod2_05.PI, 4.4))
result = mod2_05.sum(3,4)
print(result)
|
[
"mozellatto@gmail.com"
] |
mozellatto@gmail.com
|
8546ed251b75c2520ca53febc6071240ba38f398
|
c0083bc593015b6786e18b32ca4cd304b72ffb06
|
/porsche/porsche/settings.py
|
74688f75b1a5e5b041b0962559072aad2406be4b
|
[] |
no_license
|
MASk1999/PorsheForLife
|
e3967ba7a88a886f186c93fe09b58d398038a805
|
5815309a60fb24396d04558cf9327f141131ef2c
|
refs/heads/master
| 2022-05-28T13:07:46.347764
| 2020-05-02T14:51:33
| 2020-05-02T14:51:33
| 260,621,643
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,121
|
py
|
"""
Django settings for porsche project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')@m#y6ue=h%9@bgg-t6_2o%h5wl-vu(s6d-mfmdt)k+4nok@mi'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'taycan.apps.TaycanConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'porsche.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'porsche.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR,'taycan.db'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"59138913+MASk1999@users.noreply.github.com"
] |
59138913+MASk1999@users.noreply.github.com
|
51b75b9efa968facb5c13f9ca53c5e6363897eca
|
23d6fdd50d6c124c3bf57da2c9ee464afe1920a0
|
/app/models/other.py
|
448b8b8b071c133f7c9d9bbf24123660d40a6f8e
|
[] |
no_license
|
nxgycf/app-breeding
|
89ca27327c1bd7040d34520e42fcad398154e538
|
c82d5b7916d0dff176edba2e52aacf0cd4305d29
|
refs/heads/master
| 2020-03-27T17:02:30.116277
| 2019-04-30T09:57:54
| 2019-04-30T09:57:54
| 146,825,239
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,454
|
py
|
# -*- coding: utf-8 -*-
'''
Created on 2018年7月25日
@author: shuai.chen
'''
import datetime
from sqlalchemy import *
from base.sql import SqlModel
class AvatarInfo(SqlModel):
"""
avatar信息
"""
__tablename__ = "avatar_info"
__connection_name__ = "default"
id = Column('id',BigInteger,primary_key=True,nullable=False)
filename = Column('filename',String(32),nullable=False)
type = Column('type',SmallInteger,nullable=False, default=0)
path = Column('path',String(128),nullable=False, default='')
create_date = Column('create_date',DateTime,nullable=False,default=datetime.datetime.now,server_default=text('CURRENT_TIMESTAMP'))
update_time = Column('update_time',DateTime,nullable=False,default=datetime.datetime.now,server_default=text('CURRENT_TIMESTAMP'))
class Region(SqlModel):
"""
region
"""
__tablename__ = "region"
__connection_name__ = "default"
id = Column('id',Integer,primary_key=True,nullable=False)
region_code = Column('region_code',String(8),nullable=False)
region_name = Column('region_name',String(32),nullable=False,default='')
region_level = Column('region_level',SmallInteger,nullable=False,default=0)
city_code = Column('city_code',String(6),nullable=False,default='')
center = Column('center',String(32),nullable=False,default='')
parent_id = Column('parent_id',Integer,nullable=False,default=1,index=True)
|
[
"shuai.chen@moji.com"
] |
shuai.chen@moji.com
|
f94c9381245296bdb0486ee9db0077141a8f8354
|
14fb176790d5875478fcf213241318cfc9596842
|
/app/urls.py
|
70b3949f8cdcb75fd502a7999f5e44a6d2f98858
|
[] |
no_license
|
Qouagga/django_drf_ajax_example
|
c6a23b33128ea4597ced6a97d7d634c03d97902f
|
3161c7a16db74b2bd358fec38b21222184e5aeb5
|
refs/heads/master
| 2023-03-18T05:44:26.949883
| 2020-07-23T15:33:50
| 2020-07-23T15:33:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 156
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.list_jobs, name='list_jobs'),
path('api/get_jobs/', views.get_jobs)
]
|
[
"orion@system76-pc.localdomain"
] |
orion@system76-pc.localdomain
|
84a39ac88c5b9ccdc8ca396c14d24bb942443192
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/AtCoder/abc016/A/4893746.py
|
ae0ea3833c64764491dded4fc67bc407ff1c00df
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303
| 2022-01-26T18:53:40
| 2022-01-26T18:53:40
| 211,783,197
| 23
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 87
|
py
|
M, D =map(int, input().split())
if M % D == 0:
print('YES')
else:
print('NO')
|
[
"kwnafi@yahoo.com"
] |
kwnafi@yahoo.com
|
24738a4c8b661a5b63edae14b1694f6e933a9d9d
|
cf4d7225765a68e71ed4d799f9c27b1c0ec0d38a
|
/data_norm.py
|
18f1388bfa9fbb42935b2badeac5b3b8a539543e
|
[
"Apache-2.0"
] |
permissive
|
liuhongbing1220/360finance
|
f16fe53e8968f66189cf507ff82afa5dc2555d85
|
907a3ea455ea16bb3249aee7605899d318329c70
|
refs/heads/master
| 2020-03-30T05:40:16.930474
| 2018-10-10T13:50:50
| 2018-10-10T13:50:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,544
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 29 09:47:35 2018
@author: zbj
"""
import pandas as pd
import numpy as np
root= "E:\\liuhongbing\\360finance\\open_data_train_valid1\\open_data_train_valid\\train\\";
train_1 = pd.read_table(root+'train_1.txt', sep='\t')
train_2 = pd.read_table(root+'train_2.txt', sep='\t',header=None)
train_3 = pd.read_table(root+'train_3.txt', sep='\t',header=None)
train_4 = pd.read_table(root+'train_4.txt', sep='\t',header=None)
train_5 = pd.read_table(root+'train_5.txt', sep='\t',header=None)
train_sum = pd.concat([train_2, train_3, train_4, train_5])
train_sum.columns = train_1.columns
train_sum = pd.concat([train_sum, train_1])
data_info_bak = train_sum.describe(percentiles=[.25,.5,.95,.98])
data_info_bak.loc['count_un_null'] = data_info_bak.loc['count']/100000
## 去掉 NULL占比98%的特征
def getColumnsByUnNull():
tmp = (data_info_bak.loc['count_un_null']>0.2)
tmp1 = tmp[tmp.values]
return tmp1.index
colByUnNull = getColumnsByUnNull()
train_byUnNull = train_sum[colByUnNull]
data_info_byUnNull = data_info_bak[colByUnNull]
'''
colByUnNullList = colByUnNull.tolist()
colByUnNull = pd.DataFrame({'colu':colByUnNull, 'value':np.range(len(colByUnNull))})
colByUnNull = pd.DataFrame({'colu':colByUnNull, 'value':np.arange(len(colByUnNull))})
colByUnNull.to_csv(root+"train\\colByUnNull.txt", sep='\t',index=None)
'''
## 去掉0 占比 98%的特征
def getColumnsByZero():
tmp = data_info_byUnNull.loc['98%']>0
tmp1 = tmp[tmp.values]
return tmp1.index
colByZero = getColumnsByZero()
train_byZero = train_byUnNull[colByZero]
## 把 特征记录下来
colByZero = pd.DataFrame({'col':colByZero, 'value':np.arange(len(colByZero))})
colByZero.to_csv(root+"colByZero_0929.txt", sep='\t',index=None)
train_byZero.head()
## 含tag 的样本
train_byZero_tag_0 = train_byZero[train_byZero['tag']==0]
train_byZero_tag_1 = train_byZero[train_byZero['tag']==1]
train_byZero.to_csv(root+'train_sample.txt', sep='\t',index=None)
'''
## 计算 含逾期未逾期的样本
## 含标签样本
train_sample = train_byZero[~np.isnan(train_byZero.label)]
train_sample_positive = train_sample[train_sample.label==0]
train_sample_negative = train_sample[train_sample.label==1]
print('正负样本比例:', len(train_sample_positive.index)/len(train_sample_negative.index))
rateposneg = len(train_sample_positive.index)/len(train_sample_negative.index)
print('正负样本比例:', rateposneg)
train_sample_tag_0 = train_sample[train_sample.tag==0]
train_sample_t0_posi = train_sample_tag_0[train_sample_tag_0.label==0]
train_sample_t0_nega = train_sample_tag_0[train_sample_tag_0.label==1]
print('通过用户正负样本:', len(train_sample_t0_posi)/len(train_sample_t0_nega))
train_sample_tag_1 = train_sample[train_sample.tag==1]
train_sample_t1_posi = train_sample_tag_1[train_sample_tag_1.label==0]
train_sample_t1_nega = train_sample_tag_1[train_sample_tag_1.label==1]
print('未通过用户正负样本:', len(train_sample_t1_posi)/len(train_sample_t1_nega))
len(train_sample_positive)/len(train_sample)
len(train_sample_tag_0)
len(train_sample_tag_1)
train_byZero.to_csv(root+"train_sample.txt", sep='\t',index=None)
train_sample_bytag = train_byZero[~np.isnan(train_byZero.tag)]
train_sample_bytag_posi = train_sample_bytag[train_sample_bytag.tag==0]
train_sample_bytag_nega = train_sample_bytag[train_sample_bytag.tag==1]
train_sample_bytag_posi.to_csv(root+'train_sample_bytag_posi', sep='\t',index=None)
'''
|
[
"liuhongbing@zbj.com"
] |
liuhongbing@zbj.com
|
db020d29ae1ba752081c622d0712e06d8a5e71ae
|
90e1a82946baf33c980a98a7d71dd304ed338a56
|
/rev/una_acies/una_acies_admin.py
|
0bdce0550511c160c54363579b29b7a9fd74eb2f
|
[
"MIT"
] |
permissive
|
MikeSE314/sdctf-2021
|
d866d32603c169066377abfe8a91823c6f292164
|
fcddb506f5f798a264fc17e5588c0f5b7d5fbb2c
|
refs/heads/main
| 2023-04-20T16:14:07.678265
| 2021-05-17T20:31:25
| 2021-05-17T20:31:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 762
|
py
|
#! /usr/bin/env python2
from __future__ import print_function
import sys
def ok(test):
x = 5
for i in range(43):
x = x | i ^ i*x + x
return x
def test2():
return str(ok(2))
def no():
k = 3
for a in range(23):
k = k | a*a << a%k*a ^ a*k
return str(k)
def why(a,b):
return "".join([chr(ord(x) ^ ord(y)) for x,y in zip(str(a),b)])
def test(what):
b = no()
d = ok(5)
j = test2()
return ("nope", flag)[why(why(what,no()),test2()) == secret]
secret = "7<`+'eX#&QcZ1zVrr2's`%>}B7"
flag = open("flag.txt").read()
try:
while(True):
print("Enter key: ", end="")
sys.stdout.flush()
u = raw_input()
print(test(u))
sys.stdout.flush()
except EOFError:
pass
|
[
"nick@knox.codes"
] |
nick@knox.codes
|
24770049cd30942b074fa505b228b4083a815357
|
98700cdeee27f21c995b07d9c7a4289f522c3fab
|
/tests/test_graph_alg.py
|
e773fde2b673dd4bd2436ad0b4a6966ba011d90e
|
[
"MIT"
] |
permissive
|
KirovVerst/qparallel
|
a122e89d32d9f382f6767a83db873252fa193b98
|
4fc1265819f7db31ce63320dbaead6ca4e7798ba
|
refs/heads/master
| 2021-07-09T04:36:07.717940
| 2019-01-01T18:33:26
| 2019-01-01T18:33:26
| 133,990,820
| 1
| 0
|
MIT
| 2019-01-01T18:33:27
| 2018-05-18T18:31:29
|
Python
|
UTF-8
|
Python
| false
| false
| 374
|
py
|
__author__ = 'Maria Khodorchenko'
def test_graph_alg():
from qparallel.graph import Graph
graph = Graph([[0, 1, 8], [0, 2, 5], [2, 0, 4], [1, 0, 3], [2, 1, 2], [1, 2, 3], [2, 3, 1], [3, 2, 2]], n_proc=3)
graph.find_shortest_path()
print(graph._I)
# assert graph._I.tolist() == [[0., 7., 5.], [3., 0., 8.], [5., 2., 0.]]
print(graph.color_graph())
|
[
"mariyaxod@yandex.ru"
] |
mariyaxod@yandex.ru
|
65a4f7594820266f1286cfc085ae8c42a0bb83d8
|
14a0975bfef2a69761efb4fa0c748df0028c887b
|
/bangazon_project_pt_1/urls.py
|
3a5c2b4c2c9ca1f045a630ddf54c79c19fe1a51b
|
[] |
no_license
|
kirksudduth/bangazon_practice
|
5e647f61a41a1c2259e7b8539cfd39ac54a944a8
|
98fadf58d477a0b3b151556e331a028243d76ebd
|
refs/heads/master
| 2022-11-28T05:39:42.540505
| 2020-08-10T02:09:50
| 2020-08-10T02:09:50
| 286,353,873
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 763
|
py
|
"""bangazon_project_pt_1 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
|
[
"kirksudduth@gmail.com"
] |
kirksudduth@gmail.com
|
e0d9c1c53993d956a49154ddc4f380fba244cece
|
572837549c481f2076d6cb02c23a011a8b82d06e
|
/pedigreetools/pedigree/formats/csv.py
|
468b86170a27fcfdbeb703f6aaf33962d507740b
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
rvandijke/pedigree-tools
|
205af1cc7a5233366c9b3773c873b3d9d6d8ea9f
|
19daf492c3cbdceac5998d8e0b71891f15a695f7
|
refs/heads/master
| 2020-04-21T08:20:03.200381
| 2019-02-06T14:32:14
| 2019-02-06T14:32:14
| 169,416,468
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,384
|
py
|
from .store import Store
from ..models import Individual
from ..models import Sex
import csv
import os.path
from collections import OrderedDict
class CSV(Store):
@staticmethod
def default_column_names():
return {
Store.Column.FAMILY_ID: "FID",
Store.Column.INDIVIDUAL_ID: "IndivID",
Store.Column.FATHER_ID: "FathID",
Store.Column.MOTHER_ID: "MothID",
Store.Column.SEX: "Sex 1=m / 2=v"
}
def __init__(self, filename, column_names = None):
self.column_names = column_names if column_names else CSV.default_column_names()
self.filename = filename
if os.path.isfile(filename):
fieldnames = self.get_fieldnames()
self.individuals = list(map(lambda row: self.__deserialize(row, fieldnames.copy()), self.__get_all_rows()))
self.individuals = list(filter(lambda individual: len(individual.identifier) > 0, self.individuals))
def get_fieldnames(self):
file = open(self.filename, "r", encoding="utf-8-sig")
dialect = self._get_dialect_from_file(file)
fieldnames = list(filter(lambda fieldname: len(fieldname) > 0, csv.DictReader(file, dialect=dialect).fieldnames))
file.close()
return fieldnames + ["original_row"]
def _get_dialect_from_file(self, file):
sniffer = csv.Sniffer()
dialect = sniffer.sniff(file.read(1024))
file.seek(0)
return dialect
def number_of_families(self):
return len(self.get_all_families())
def get_all_families(self):
families = OrderedDict()
for individual in self.individuals:
families[individual.family_identifier] = "0"
return families.keys()
def number_of_individuals(self):
return len(self.individuals)
def get_all_individuals(self, family_identifier):
individuals = []
for individual in self.individuals:
if individual.family_identifier == family_identifier:
individuals.append(individual)
return individuals
def get_individual(self, index):
return [individual for idx, individual in enumerate(self.individuals) if idx == index][0]
def set_individuals(self, individuals):
self.individuals = sorted(individuals, key=lambda k: k.get_attribute("original_row"))
if len(individuals) == 0:
## TODO, clear file
return
fieldnames = [
self.column_names[Store.Column.FAMILY_ID],
self.column_names[Store.Column.INDIVIDUAL_ID],
self.column_names[Store.Column.FATHER_ID],
self.column_names[Store.Column.MOTHER_ID],
self.column_names[Store.Column.SEX]
]
fieldnames = fieldnames + list(individuals[0].get_attribute_names())
if "original_row" in fieldnames:
fieldnames.remove("original_row")
file = open(self.filename, "w", encoding="utf-8-sig")
writer = csv.DictWriter(file, fieldnames)
writer.writeheader()
rows = map(lambda individual: self.__serialize(individual), self.individuals)
writer.writerows(rows)
file.close()
def __get_all_rows(self):
file = open(self.filename, "r", encoding="utf-8-sig")
dialect = self._get_dialect_from_file(file)
reader = csv.DictReader(file, dialect=dialect)
rows = list(reader)
i = 2
for row in rows:
row["original_row"] = i
i = i + 1
file.close()
return rows
def __deserialize(self, row, fieldnames):
# filter empty keys out
row = {key:value for (key, value) in row.items() if key}
identifier = row[self.column_names[Store.Column.INDIVIDUAL_ID]]
family_identifier = row[self.column_names[Store.Column.FAMILY_ID]]
father_identifier = row[self.column_names[Store.Column.FATHER_ID]]
mother_identifier = row[self.column_names[Store.Column.MOTHER_ID]]
sex = row[self.column_names[Store.Column.SEX]]
if sex == "1":
sex = Sex.MALE
elif sex == "2":
sex = Sex.FEMALE
else:
sex = Sex.UNKNOWN
for key, value in self.column_names.items():
fieldnames.remove(value)
attributes = OrderedDict()
for key in fieldnames:
attributes[key] = row[key]
individual = Individual(
identifier,
family_identifier,
father_identifier,
mother_identifier,
sex,
attributes
)
return individual
def __serialize(self, individual):
data = dict()
data[self.column_names[Store.Column.FAMILY_ID]] = individual.family_identifier
data[self.column_names[Store.Column.INDIVIDUAL_ID]] = individual.identifier
data[self.column_names[Store.Column.FATHER_ID]] = individual.father_identifier
data[self.column_names[Store.Column.MOTHER_ID]] = individual.mother_identifier
sex = "0"
if individual.sex == Sex.MALE:
sex = "1"
if individual.sex == Sex.FEMALE:
sex = "2"
data[self.column_names[Store.Column.SEX]] = sex
data.update(individual.attributes)
del data["original_row"]
return data
|
[
"robinvandijke@gmail.com"
] |
robinvandijke@gmail.com
|
c555eff5b2a54b0c84668d7cea80f39b8e42ec2c
|
e5b4708219fab86c6f158d99a5bd8a2a31c6ad84
|
/TestcaseSelect/TestcaseSelector.py
|
fab69956ba30f87b5a2a01216427778798ea6820
|
[] |
no_license
|
Whaplescr/TestcaseSelector
|
c2d08e2a7480a90d9b31079cdf7091c8ced2d65c
|
4afbb0e5846336b5c75b2933ddce7bfa2185aed9
|
refs/heads/master
| 2021-09-12T14:22:55.898143
| 2018-04-17T14:59:05
| 2018-04-17T14:59:05
| 114,696,313
| 0
| 0
| null | 2018-02-02T14:30:30
| 2017-12-18T23:03:28
|
Python
|
UTF-8
|
Python
| false
| false
| 4,737
|
py
|
from tkinter import *
from tkinter.ttk import Treeview, Style
import unittest
class TestcaseSelector:
def start(self):
# Create TK window
self.root = Tk()
# Create Style object for TK window
self.root.style = Style()
# Set default frame size to 800x640
self.root.geometry('800x640')
# Set Treeview row height to 40 so there's no overlap
self.root.style.configure('Treeview',rowheight=40)
# Set title and window size
self.root.wm_title("Select Testcases to Run")
# Create a frame for the treeview
self.testcase_frame = Frame(self.root)
# Create scrollbar for treeview
scrollbar = Scrollbar(self.root)
scrollbar.pack(side=RIGHT,fill=Y)
# Create Treeview
self.treeView = Treeview(self.testcase_frame)
self.treeView.pack(expand=1,fill=BOTH)
# Attach scrollbar to Treeview
self.treeView.config(yscrollcommand=scrollbar.set)
scrollbar.config(command=self.treeView.yview)
# Get testcase name dictionary to use for filling out treeview
testcase_dictionary = get_testcase_name_dictonary()
self.testcase_data = {}
self.testcase_run_data = {}
for key in testcase_dictionary.keys():
subsection = testcase_dictionary[key]
self.testcase_data[key] = subsection
s_p = self.treeView.insert('', END, text=key)
for test in subsection:
testcase_name = test._testMethodName
testcase_name = testcase_name
self.treeView.insert(s_p, END, text=testcase_name)
self.testcase_run_data[testcase_name] = test
self.webData = self.testcase_run_data
# Create buttons for cancel and run tests
run_button = Button(self.testcase_frame, text="Run", fg="green",command=self._save_selection,width=25,height=5)
run_button.pack(side=LEFT,expand=1,fill=BOTH)
quit_button = Button(self.testcase_frame, text="Cancel", fg="red", command=self.treeView.quit,width=25,height=5)
quit_button.pack(side=RIGHT,expand=1,fill=BOTH)
# Pack the rest of the frame and tell it to scale on both x and y axis
self.testcase_frame.pack(expand=1,fill=BOTH)
def get_tests_from_selected_names(self,names):
ret_tests = {}
for name in names:
ret_tests[name] = self.webData[name]
return ret_tests
def _save_selection(self):
selected_tests = self.treeView.selection()
output=[]
for selection in selected_tests:
item_text = self.treeView.item(selection,'text')
if 'test_' in item_text:
if item_text not in output:
output.append(item_text)
else:
pass
elif 'Tests' in item_text:
for test in self.testcase_data[item_text]:
output.append(test._testMethodName)
# output = output + self.testSectionData[item_text]
self.testcases = self.get_tests_from_selected_names(output)
self.root.quit()
def get_testcases(self):
self.start()
self.root.mainloop()
self.root.destroy()
# Try/Except to fail gracefully
try:
return self.testcases
except:
exit(0)
def test_name(parent):
tns = []
if hasattr(parent, '_testMethodName'):
return parent
elif hasattr(parent, '_tests'):
for t in parent._tests:
tn = test_name(t)
if tn:
tns.append(tn)
return tns
def get_all_automated_tests():
# Create a unittest test loader
loader = unittest.TestLoader()
# Discoverer is to look in the "Tests" directory" and find files ending in Tests.py
tests = loader.discover('Tests', pattern='*Tests.py')
# Parses entries in *Tests.py file for unittests in classes and methods
tcs = [y for x in [y for x in test_name(tests) for y in x] for y in x]
return tcs
def get_testcase_name_dictonary():
# Get collection of automated tests
all_tests = get_all_automated_tests()
section_dict = {}
# Sort tests into groups by their parent class
for test in all_tests:
testcase_name = test
test_section = type(test).__name__
if test_section in section_dict:
section_dict[test_section].append(testcase_name)
else:
section_dict[test_section] = [testcase_name]
return section_dict
# # Testing code -- needs a Tests directory with a .py file containing unit-test to work
# tcs = TestcaseSelector()
# tests = tcs.get_testcases()
# print(1)
|
[
"whaples2011@gmail.com"
] |
whaples2011@gmail.com
|
afbf3af6e58dca36bf647dd5795d4696ecab44ca
|
30e0c169ff906855b1cb975bf1d730a11c5281e9
|
/mysite/SolarModel/urls.py
|
d69c20ff999b6ca560574f98a1c45923dc3dbbeb
|
[] |
no_license
|
hansonsa12/Final_Project
|
65db782f3f8d49099d81f7744f61fea301e27219
|
569089e270161fc0d0fe79f74e9e451a3c2d5054
|
refs/heads/master
| 2023-01-21T19:49:54.388468
| 2020-12-07T06:24:40
| 2020-12-07T06:24:40
| 314,090,857
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 283
|
py
|
from . import views
from django.urls import path
app_name = 'SolarModel'
urlpatterns = [
path('', views.IndexClassView.as_view(), name="index"),
path('<int:pk>', views.PlanetDetail.as_view(), name="detail"),
path('add', views.CreateItem.as_view(), name="create_item"),
]
|
[
"samuelhanson@mail.weber.edu"
] |
samuelhanson@mail.weber.edu
|
46e654b1076d802a9b995b7f7a19daf1a77493c6
|
660803ed1e0997e2de304be574198ef09384ccdd
|
/myfunctions.py
|
c675db3c8cdb3a3643162c775fdd98478b7c0079
|
[] |
no_license
|
toanphan1302/Room_Scanner
|
7fd5c174f2611d71108a32652a09453444af8b17
|
e0d0121a288eb3a157f4d227fe60ec1276c031a9
|
refs/heads/main
| 2023-06-09T11:50:17.258543
| 2021-06-25T03:37:00
| 2021-06-25T03:37:00
| 380,114,442
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,524
|
py
|
from machine import UART
def processPkt(pkt: bytes):
"""
packet = [0x59, 0x59, distL, distH, strL, strH, reserved, integration time, checksum]
return True if there is a problem
Note: the integration time always seems to be 0
"""
# turn string data into array of bytes
# pkt = list(map(ord, pkt))
problem = bool()
if len(pkt) != 9:
problem = True
print("< 9 bytes")
# check header
if pkt[0] != 0x59 or pkt[1] != 0x59:
print("bad headers")
problem = True
# calculate checksum
cs = sum(pkt[:8])
cs &= 0xff
# print('cs', cs, pkt[8])
if pkt[8] != cs:
problem = True
print("bad checksum")
# print('L {} H {}'.format(pkt[2], pkt[3]))
#strength = pkt[4] + (pkt[5] << 8)
# q = pkt[7]
# print('ans',dist, st, q)
return problem
def getdistance(uart: UART):
'''
Input: uart port from pi pico
***********
Output: return the distance read from uart sensor (for tfmini only)
'''
# flush serial
while uart.any() > 0:
uart.read(uart.any())
data = uart.read(9)
while processPkt(data):
# re_read data
while uart.any() > 0:
uart.read(uart.any())
data = uart.read(9)
print(data)
dist = (data[2] + (data[3] << 8))/100
return dist
|
[
"noreply@github.com"
] |
toanphan1302.noreply@github.com
|
b991346f4afbc80c4686cd4b0f4f73405bf20e70
|
debdf93849027455e660eaaeeea4b3fa24e91795
|
/Assignment 7/sample_pystan.py
|
253d4aaaecef9b3879593f45988b06c7a2111e2b
|
[] |
no_license
|
hyllevask/ABDA
|
774862b9c2ea1f7d022349fd08c19fe173553f50
|
c24d2a2e96a0fe3d28e0b1c64d21b637a0cf5a83
|
refs/heads/master
| 2020-08-01T17:49:04.479993
| 2020-01-23T20:25:13
| 2020-01-23T20:25:13
| 211,066,061
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,211
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 30 13:21:38 2019
sample_stan
@author: johohm
"""
########################## DATA ############################################
import pickle
import numpy as np
import matplotlib.pyplot as plt
import pystan
y = np.array([607, 583, 521, 494, 369, 782, 570, 678, 467, 620, 425, 395, 346, 361, 310,
300, 382, 294, 315, 323, 421, 339, 398, 328, 335, 291, 329, 310, 294, 321,
286, 349, 279, 268, 293, 310, 259, 241, 243, 272, 247, 275, 220, 245, 268,
357, 273, 301, 322, 276, 401, 368, 149, 507, 411, 362, 358, 355, 362, 324,
332, 268, 259, 274, 248, 254, 242, 286, 276, 237, 259, 251, 239, 247, 260,
237, 206, 242, 361, 267, 245, 331, 357, 284, 263, 244, 317, 225, 254, 253,
251, 314, 239, 248, 250, 200, 256, 233, 427, 391, 331, 395, 337, 392, 352,
381, 330, 368, 381, 316, 335, 316, 302, 375, 361, 330, 351, 186, 221, 278,
244, 218, 126, 269, 238, 194, 384, 154, 555, 387, 317, 365, 357, 390, 320,
316, 297, 354, 266, 279, 327, 285, 258, 267, 226, 237, 264, 510, 490, 458,
425, 522, 927, 555, 550, 516, 548, 560, 545, 633, 496, 498, 223, 222, 309,
244, 207, 258, 255, 281, 258, 226, 257, 263, 266, 238, 249, 340, 247, 216,
241, 239, 226, 273, 235, 251, 290, 473, 416, 451, 475, 406, 349, 401, 334,
446, 401, 252, 266, 210, 228, 250, 265, 236, 289, 244, 327, 274, 223, 327,
307, 338, 345, 381, 369, 445, 296, 303, 326, 321, 309, 307, 319, 288, 299,
284, 278, 310, 282, 275, 372, 295, 306, 303, 285, 316, 294, 284, 324, 264,
278, 369, 254, 306, 237, 439, 287, 285, 261, 299, 311, 265, 292, 282, 271,
268, 270, 259, 269, 249, 261, 425, 291, 291, 441, 222, 347, 244, 232, 272,
264, 190, 219, 317, 232, 256, 185, 210, 213, 202, 226, 250, 238, 252, 233,
221, 220, 287, 267, 264, 273, 304, 294, 236, 200, 219, 276, 287, 365, 438,
420, 396, 359, 405, 397, 383, 360, 387, 429, 358, 459, 371, 368, 452, 358, 371])
ind = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7,
7, 7, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11,
11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 18, 18,
18, 18, 18, 19, 19, 19, 19, 19, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 21, 21, 21, 21, 21, 21, 21, 21,
21, 21, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23,
23, 23, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
25, 25, 25, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28,
28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 30, 30, 30, 30,
30, 30, 31, 31, 31, 31, 31, 32, 32, 32, 32, 32, 33, 34, 34, 34, 34, 34,
34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34])
# Add the kids data
child_j = np.array([1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0])
child_i = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
# Add the attempt number!
x = np.array([1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1,
2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1, 2, 3,
4, 5, 6, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5,
1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 1, 2,
3, 4, 5, 1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18])
########################################################################################
logy = np.log(y)
#Stan definitions
stan_model = """
data {int<lower=0> NInd; //Number of participants
int<lower=0> Samples; // number of samples, integer with a lower bound = 0
real<lower=0> logy[Samples]; // Vector of samples, y with length "Samples" of real values
int Ind[Samples]; // Index specifying each individual
int Child_Ind[Samples]; // Index specyfying if an individual is a child or not
int x[Samples]; // Attempt number
}
parameters {
real mu0; // Group intercept
real mu1; // Group slope
real<lower=0> tau0; // Std of the individual theta0s
real<lower=0> tau1; // Std of the individual theta1s
real eta0[NInd]; // Individuals mean intercepts
real eta1[NInd]; // Individuals mean slopes
real<lower=0> sigma; // Variation of data
real phi0; // Child addition to the mean
real phi1;
}
transformed parameters {
}
model {
mu0 ~ uniform(-10000, 10000); // Flat prior distribution on mu
mu1 ~ uniform(-10000, 10000); // Flat prior distribution on mu
tau0 ~ uniform(0, 10000); // Flat prior distribution on tau
tau1 ~ uniform(0, 10000); // Flat prior distribution on tau
sigma ~ uniform(0, 10000); // Flat prior distribution on sigma
phi0 ~ uniform(-10000,10000); // Flat prior distribution on phi
phi1 ~ uniform(-10000,10000); // Flat prior distribution on phi
for (j in 1:NInd){
eta0 ~ normal(0,1);
eta1 ~ normal(0,1);
}
for (i in 1:Samples)
logy[i] ~ normal(eta0[Ind[i]] * tau0 + mu0 + phi0*Child_Ind[i] + (eta1[Ind[i]] * tau1 + mu1 + + phi1*Child_Ind[i]) * x[i],sigma); // likelihood, i.e collected data.
}
"""
stan_data = {
'Ind': ind,
'NInd': np.max(ind),
'Samples': np.size(y),
'logy': logy,
'Child_Ind': child_i,
'x':x
}
compiled_model = pystan.StanModel(model_code=stan_model) #Build the model
fit = compiled_model.sampling(data=stan_data,iter=10000,warmup=1000, chains=1)
results=fit.extract(permuted=True)
filename = 'Assignment7_test'
outfile = open(filename,'wb')
pickle.dump(results,outfile)
outfile.close()
|
[
"johan.ohman@ltu.se"
] |
johan.ohman@ltu.se
|
73f3068400bf196bd17b2584fb51fddac747f640
|
e608436347dc7455cb62db5eb473729832d4f9dc
|
/f5_bigip_asm_healthreport.py
|
11cef299987f9590cb634765207f8d0533d1aec2
|
[] |
no_license
|
cjenison/f5_bigip_asm_healthreport
|
276796531305e2028eb34f59de0a38e641dd2f98
|
2aaf7f423706975cf7b17d9629172275909436c6
|
refs/heads/master
| 2021-04-03T09:35:00.946455
| 2018-03-23T14:07:35
| 2018-03-23T14:07:35
| 124,674,658
| 0
| 0
| null | 2018-03-23T14:07:36
| 2018-03-10T16:31:58
|
Python
|
UTF-8
|
Python
| false
| false
| 26,945
|
py
|
#!/usr/bin/python
# Home: https://github.com/cjenison/f5_bigip_asm_healthreport
# Author: Chad Jenison (c.jenison@f5.com)
import argparse
import sys
import requests
import json
import getpass
import re
import xlsxwriter
from time import sleep
parser = argparse.ArgumentParser(description='A tool to give summary/health data on one or more BIG-IP ASM systems')
parser.add_argument('--user', '-u', help='username to use for authentication', required=True)
mode = parser.add_mutually_exclusive_group()
mode.add_argument('--bigip', '-b', help='IP or hostname of BIG-IP Management or Self IP')
mode.add_argument('--systemlistfile', '-s', help='Input file containing IP\'s or hostnames of BIG-IP systems \(Format: pairName: ipOrHostname1, ipOrHostname2\)')
passwdoption = parser.add_mutually_exclusive_group()
passwdoption.add_argument('--password', '-p', help='Supply Password as command line argument \(dangerous due to shell history\)')
passwdoption.add_argument('--passfile', '-pf', help='Obtain password from a text file \(with password string as the only contents of file\)')
parser.add_argument('--hostport', '-hp', help='List of NameIP:port pairs that will be checked with nc \(Example: logserver,514\) for reachability', nargs='*')
parser.add_argument('--xlsx', '-x', help='Produce XLSX Output File')
args = parser.parse_args()
def query_yes_no(question, default="no"):
valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False}
if default == None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while 1:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid.keys():
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' (or 'y' or 'n').\n")
def get_confirmed_password(bigip, username, password):
bip = requests.session()
bip.verify = False
bip.auth = (username, password)
credentialsValidated = False
while not credentialsValidated:
testRequest = bip.get('https://%s/mgmt/tm/sys/' % (bigip))
if testRequest.status_code == 200:
credentialsValidated = True
return password
elif testRequest.status_code == 401:
if args.password or args.passfile:
print ('Invalid credentials passed via command line argument or as file content; Exiting...')
quit()
else:
print ('Invalid credentials for user %s' % (user))
passwordRetryQuery = 'Retry with new password (No to exit)?'
if query_yes_no(passwordRetryQuery, default="yes"):
password = getpass.getpass('Re-enter Password for %s' % (user))
bip.auth = (username, password)
else:
print('Exiting due to invalid authentication credentials')
quit()
else:
print('Unexpected Error from test request to validate credentials')
print('Status Code: %s' % (testRequest.status_code))
print('Body: %s' % (testRequest.content))
print('Exiting due to unexpected error condition')
quit()
def get_system_info(bigip, username, password):
bip = requests.session()
bip.verify = False
systemInfo = dict()
token = get_auth_token(bigip, username, password)
if token == 'Fail':
systemInfo['authFail'] = True
return systemInfo
elif token == 'ConnectionError':
systemInfo['unreachable'] = True
return systemInfo
else:
systemInfo['authToken'] = token
if systemInfo['authToken']:
systemInfo['authHeader'] = {'X-F5-Auth-Token': systemInfo['authToken']}
bip.headers.update(systemInfo['authHeader'])
else:
bip.auth = (username, password)
systemInfo['authHeader'] = None
systemInfo['ipOrHostname'] = bigip
systemInfo['user'] = username
systemInfo['pass'] = password
versionRaw = bip.get('https://%s/mgmt/tm/sys/version/' % (bigip))
if versionRaw.status_code == '401':
print ('Invalid Basic Authentication Credentials')
systemInfo['authFail'] = True
return systemInfo
version = versionRaw.json()
if version.get('nestedStats'):
systemInfo['version'] = version['entries']['https://localhost/mgmt/tm/sys/version/0']['nestedStats']['entries']['Version']['description']
else:
volumes = bip.get('https://%s/mgmt/tm/sys/software/volume' % (bigip)).json()
for volume in volumes['items']:
if volume.get('active'):
if volume['active'] == True:
systemInfo['version'] = volume['version']
systemInfo['shortVersion'] = float('%s.%s' % (systemInfo['version'].split('.')[0], systemInfo['version'].split('.')[1]))
#bip.headers.update(authHeader)
globalSettings = bip.get('https://%s/mgmt/tm/sys/global-settings/' % (bigip)).json()
hardware = bip.get('https://%s/mgmt/tm/sys/hardware/' % (bigip)).json()
partitions = list()
partitionCollection = bip.get('https://%s/mgmt/tm/auth/partition/' % (bigip)).json()
for partition in partitionCollection['items']:
partitions.append(partition['fullPath'])
provision = bip.get('https://%s/mgmt/tm/sys/provision/' % (bigip)).json()
systemInfo['provision'] = provision
provisionedModules = list()
for module in provision['items']:
if module.get('level'):
if module['level'] != 'none':
provisionedModules.append(module['name'])
print ('Provisioned Modules: %s' % (json.dumps(provisionedModules)))
systemInfo['provisionedModules'] = provisionedModules
systemInfo['baseMac'] = hardware['entries']['https://localhost/mgmt/tm/sys/hardware/platform']['nestedStats']['entries']['https://localhost/mgmt/tm/sys/hardware/platform/0']['nestedStats']['entries']['baseMac']['description']
systemInfo['marketingName'] = hardware['entries']['https://localhost/mgmt/tm/sys/hardware/platform']['nestedStats']['entries']['https://localhost/mgmt/tm/sys/hardware/platform/0']['nestedStats']['entries']['marketingName']['description']
systemDatePayload = {'command':'run', 'utilCmdArgs': '-c \'date +%Y%m%d\''}
systemInfo['systemDate'] = bip.post('https://%s/mgmt/tm/util/bash' % (bigip), headers=contentJsonHeader, data=json.dumps(systemDatePayload)).json()['commandResult'].strip()
syncStatusPayload = {'command':'run', 'utilCmdArgs': '-c \'tmsh show cm sync-status\''}
systemInfo['syncStatus'] = bip.post('https://%s/mgmt/tm/util/bash' % (bigip), headers=contentJsonHeader, data=json.dumps(syncStatusPayload)).json()['commandResult']
syncStatusRegex = re.search('Color\s+(\S+)', systemInfo['syncStatus'])
systemInfo['syncStatusColor'] = syncStatusRegex.group(1)
systemInfo['hostname'] = globalSettings['hostname']
devices = bip.get('https://%s/mgmt/tm/cm/device' % (bigip)).json()
for device in devices['items']:
if device['selfDevice'] == 'true':
systemInfo['failoverState'] = device['failoverState']
if device.get('unicastAddress'):
systemInfo['unicastAddresses'] = device['unicastAddress']
systemInfo['configsyncIp'] = device['configsyncIp']
systemInfo['timeLimitedModules'] = device['timeLimitedModules']
return systemInfo
### BOX GLOBAL HEALTH CHECK
def bigip_asm_device_check(bigip):
global systemsRow
bip = requests.session()
bip.verify = False
if bigip['authHeader']:
bip.headers.update(bigip['authHeader'])
else:
bip.auth = (bigip['user'], bigip['pass'])
if bigip['syncStatusColor'] != 'green':
print ('System Out of Sync with Peer - Status:')
print bigip['syncStatus']
else:
print ('System In Sync with Peer')
licenseCheckPayload = {'command':'run', 'utilCmdArgs': '-c \'grep trust /config/bigip.license\''}
licenseCheck = bip.post('https://%s/mgmt/tm/util/bash' % (bigip['ipOrHostname']), headers=contentJsonHeader, data=json.dumps(licenseCheckPayload)).json()
bigip['ipIntelligenceLicensed'] = False
if licenseCheck['commandResult'] != '':
bigip['ipIntelEnd'] = licenseCheck['commandResult'].split('_')[0]
if int(bigip['systemDate']) > int(bigip['ipIntelEnd']):
print ('IP Intelligence License Appears to be Expired - End Date: %s - System Date: %s' % (bigip['ipIntelEnd'], bigip['systemDate']))
else:
print ('IP Intelligence License Appears Valid - End Date: %s - System Date: %s' % (bigip['ipIntelEnd'], bigip['systemDate']))
bigip['ipIntelligenceLicensed'] = True
if int(bigip['systemDate']) + 14 > int(bigip['ipIntelEnd']):
print ('IP Intelligence Licensed Expiring within 14 days')
if bigip['ipIntelligenceLicensed']:
checkBrightCloudPayload = {'command': 'run', 'utilCmdArgs': '-c \'nc -z -w3 vector.brightcloud.com 443\''}
checkBrightCloud = bip.post('https://%s/mgmt/tm/util/bash' % (bigip['ipOrHostname']), headers=contentJsonHeader, data=json.dumps(checkBrightCloudPayload)).json()
if checkBrightCloud.get('commandResult'):
if 'getaddrinfo' in checkBrightCloud['commandResult'] or 'name resolution' in checkBrightCloud['commandResult']:
print ('Unsuccessful attempt to reach Brightcloud due to name resolution problem')
bigip['checkBrightCloud'] = False
elif 'succeeded' in checkBrightCloud['commandResult']:
print ('Successfully Reached Brightcloud')
bigip['checkBrightCloud'] = True
else:
print ('Unknown Error in reaching Brightcloud: %s' % (checkBrightCloud['commandResult']))
bigip['checkBrightCloud'] = False
else:
print ('Unsuccessful Attempt to Reach Brightcloud')
bigip['checkBrightCloud'] = False
else:
print ('BIG-IP IP Intelligence not licensed')
if args.hostport:
bigip['hostPortCheck'] = list()
for hostPort in args.hostport:
print ('Checking Host: %s Port: %s for reachability' % (hostPort.split(',')[0], hostPort.split(',')[1]))
checkHostPayload = {'command': 'run', 'utilCmdArgs': '-c \'nc -z -w3 %s %s\'' % (hostPort.split(',')[0], hostPort.split(',')[1])}
checkHost = bip.post('https://%s/mgmt/tm/util/bash' % (bigip['ipOrHostname']), headers=contentJsonHeader, data=json.dumps(checkHostPayload)).json()
if checkHost.get('commandResult'):
if 'getaddrinfo' in checkHost['commandResult']:
print ('Unsuccessful attempt to reach: %s %s due to name resolution problem' % (hostPort.split(',')[0], hostPort.split(',')[1]))
bigip['hostPortCheck'].append({hostPort : False})
elif 'succeeded' in checkHost['commandResult']:
print ('Successfully Reached Host: %s %s' % (hostPort.split(',')[0], hostPort.split(',')[1]))
bigip['hostPortCheck'].append({hostPort : True})
elif checkHost['commandResult'] == '':
print ('Unsuccessful Attempt to Reach Host: %s %s' % (hostPort.split(',')[0], hostPort.split(',')[1]))
bigip['hostPortCheck'].append({hostPort : False})
else:
print ('Unknown Error in reaching %s %s: %s' % (hostPort.split(',')[0], hostPort.split(',')[1], checkBrightCloud['commandResult']))
bigip['hostPortCheck'].append({hostPort : False})
else:
print ('Unknown Error in reaching %s %s' % (hostPort.split(',')[0], hostPort.split(',')[1]))
bigip['hostPortCheck'].append({hostPort : False})
if args.xlsx:
systemsSheet.write(systemsRow, 0, bigip['hostname'])
systemsSheet.write(systemsRow, 1, bigip['failoverState'])
systemsSheet.write(systemsRow, 2, bigip['marketingName'])
systemsSheet.write(systemsRow, 3, bigip['version'])
systemsSheet.write(systemsRow, 4, json.dumps(bigip['provisionedModules']))
if bigip['ipIntelligenceLicensed']:
systemsSheet.write(systemsRow, 5, bigip['ipIntelligenceLicensed'])
else:
systemsSheet.write(systemsRow, 5, bigip['ipIntelligenceLicensed'], redbg)
if bigip.get('ipIntelEnd'):
systemsSheet.write(systemsRow, 6, bigip['ipIntelEnd'])
else:
systemsSheet.write(systemsRow, 6, bigip['ipIntelEnd'])
if bigip['checkBrightCloud']:
systemsSheet.write(systemsRow, 7, bigip['checkBrightCloud'])
else:
systemsSheet.write(systemsRow, 7, bigip['checkBrightCloud'], redbg)
if bigip['syncStatusColor'] == 'green':
systemsSheet.write(systemsRow, 8, bigip['syncStatusColor'])
else:
systemsSheet.write(systemsRow, 8, bigip['syncStatusColor'], redbg)
column = 9
if args.hostport:
for hostPortStatus in bigip['hostPortCheck']:
hostPortKey = hostPortStatus.keys()[0]
if hostPortStatus.get(hostPortKey):
systemsSheet.write(systemsRow, column, hostPortStatus.get(hostPortKey))
else:
systemsSheet.write(systemsRow, column, hostPortStatus.get(hostPortKey), redbg)
column += 1
systemsRow += 1
def bigip_asm_virtual_report(bigip):
global asmVirtualsRow
global noAsmVirtualsRow
bip = requests.session()
bip.verify = False
if bigip['authHeader']:
bip.headers.update(bigip['authHeader'])
else:
bip.auth = (bigip['user'], bigip['pass'])
ltmVirtualDict = {}
ltmVirtuals = bip.get('https://%s/mgmt/tm/ltm/virtual' % (bigip['ipOrHostname'])).json()
for virtual in ltmVirtuals['items']:
print ('Reading virtual: %s' % (virtual['fullPath']))
ltmVirtualDict[virtual['fullPath']] = virtual
if 'asm' in bigip['provisionedModules']:
asmPolicyDict = {}
asmPolicies = bip.get('https://%s/mgmt/tm/asm/policies' % (bigip['ipOrHostname'])).json()
for policy in asmPolicies['items']:
for virtualServer in policy['virtualServers']:
asmPolicyDict[virtualServer] = policy
else:
print ('ASM Module Not Provisioned')
ltmVirtualsWithoutAsm = []
for virtual in ltmVirtualDict.keys():
if asmPolicyDict.get(virtual):
print('--\nLTM Virtual: %s - FullPath: %s\nDestination: %s' % (ltmVirtualDict[virtual]['name'], virtual, ltmVirtualDict[virtual]['destination'].split("/")[-1]))
print('ASM Policy Name: %s\nEnforcement Mode: %s' % (asmPolicyDict[virtual]['name'], asmPolicyDict[virtual]['enforcementMode']))
print('ASM Policy Last Change: %s' % (asmPolicyDict[virtual]['versionDatetime']))
policyBuilderSettings = bip.get('https://%s/mgmt/tm/asm/policies/%s/policy-builder/' % (bigip['ipOrHostname'], asmPolicyDict[virtual]['id'])).json()
if bigip['shortVersion'] >= 12.0:
if policyBuilderSettings.get('learningMode'):
if policyBuilderSettings['learningMode'] == 'automatic':
print ('Policy Builder Enabled in Automatic Mode')
elif policyBuilderSettings['learningMode'] == 'manual':
print ('Policy Builder Enabled in Manual Mode')
else:
print ('Policy Builder Disabled')
else:
if policyBuilderSettings['enablePolicyBuilder']:
print ('Policy Builder Enabled')
else:
print ('Policy Builder Disabled')
asmPolicyGeneralSettings = bip.get('https://%s/mgmt/tm/asm/policies/%s/general' % (bigip['ipOrHostname'], asmPolicyDict[virtual]['id'])).json()
if asmPolicyGeneralSettings.get('code') != 501:
if asmPolicyGeneralSettings['trustXff']:
print('Trust XFF enabled')
if asmPolicyGeneralSettings.get('customXffHeaders'):
for customXff in asmPolicyGeneralSettings.get('customXffHeaders'):
print('Custom XFF Header: %s' % (customXff))
else:
print('Trust XFF disabled')
else:
if asmPolicyDict[virtual]['trustXff']:
print('Trust XFF enabled')
if asmPolicyDict.get('customXffHeaders'):
for customXff in asmPolicyDict.get('customXffHeaders'):
print('Custom XFF Header: %s' % (customXff))
else:
print('Trust XFF disabled')
maliciousIpBlock = False
maliciousIpAlarm = False
violationsBlocking = bip.get('https://%s/mgmt/tm/asm/policies/%s/blocking-settings/violations/' % (bigip['ipOrHostname'], asmPolicyDict[virtual]['id'])).json()
for violation in violationsBlocking['items']:
if violation['description'] == 'Access from malicious IP address':
if violation['block']:
print ('Access from malicious IP Address - Blocking Enabled')
maliciousIpBlock = True
else:
print ('Access from malicious IP Address - Blocking Disabled')
if violation['alarm']:
print ('Access from malicious IP Address - Alarm Enabled')
maliciousIpAlarm = True
else:
print ('Access from malicious IP Address - Alarm Disabled')
if ltmVirtualDict[virtual].get('securityLogProfiles'):
for logProfile in ltmVirtualDict[virtual]['securityLogProfiles']:
print('Log Profile: %s' % (logProfile))
else:
print('Log Profile Not Attached')
if args.xlsx:
asmVirtualsSheet.write(asmVirtualsRow, 0, bigip['hostname'])
asmVirtualsSheet.write(asmVirtualsRow, 1, ltmVirtualDict[virtual]['name'])
asmVirtualsSheet.write(asmVirtualsRow, 2, ltmVirtualDict[virtual]['fullPath'])
asmVirtualsSheet.write(asmVirtualsRow, 3, ltmVirtualDict[virtual]['destination'])
asmVirtualsSheet.write(asmVirtualsRow, 4, asmPolicyDict[virtual]['name'])
asmVirtualsSheet.write(asmVirtualsRow, 5, asmPolicyDict[virtual]['enforcementMode'])
if ltmVirtualDict[virtual].get('securityLogProfiles'):
if len(ltmVirtualDict[virtual]['securityLogProfiles']) == 1:
asmVirtualsSheet.write(asmVirtualsRow, 6, ltmVirtualDict[virtual]['securityLogProfiles'][0])
else:
asmVirtualsSheet.write(asmVirtualsRow, 6, json.dumps(ltmVirtualDict[virtual]['securityLogProfiles']))
else:
asmVirtualsSheet.write(asmVirtualsRow, 6, 'None')
if bigip['shortVersion'] < 12.0:
asmVirtualsSheet.write(asmVirtualsRow, 7, policyBuilderSettings['enablePolicyBuilder'])
elif bigip['shortVersion'] >= 12.0:
asmVirtualsSheet.write(asmVirtualsRow, 7, policyBuilderSettings['learningMode'])
if asmPolicyDict[virtual].get('trustXff'):
asmVirtualsSheet.write(asmVirtualsRow, 8, asmPolicyDict[virtual]['trustXff'])
elif asmPolicyGeneralSettings.get('trustXff'):
asmVirtualsSheet.write(asmVirtualsRow, 8, asmPolicyGeneralSettings['trustXff'])
else:
asmVirtualsSheet.write(asmVirtualsRow, 8, False)
asmVirtualsSheet.write(asmVirtualsRow, 9, maliciousIpBlock)
asmVirtualsSheet.write(asmVirtualsRow, 10, maliciousIpAlarm)
asmVirtualsRow += 1
else:
ltmVirtualsWithoutAsm.append(virtual)
if ltmVirtualsWithoutAsm:
print ('--\nLTM Virtuals Without an ASM Policy')
for virtual in ltmVirtualsWithoutAsm:
print virtual
if args.xlsx:
noAsmVirtualsSheet.write(noAsmVirtualsRow, 0, bigip['hostname'])
noAsmVirtualsSheet.write(noAsmVirtualsRow, 1, ltmVirtualDict[virtual]['name'])
noAsmVirtualsSheet.write(noAsmVirtualsRow, 2, ltmVirtualDict[virtual]['fullPath'])
noAsmVirtualsSheet.write(noAsmVirtualsRow, 3, ltmVirtualDict[virtual]['destination'])
noAsmVirtualsRow += 1
def get_auth_token(bigip, username, password):
authbip = requests.session()
authbip.verify = False
payload = {}
payload['username'] = username
payload['password'] = password
payload['loginProviderName'] = 'tmos'
authurl = 'https://%s/mgmt/shared/authn/login' % (bigip)
try:
authPost = authbip.post(authurl, headers=contentJsonHeader, auth=(username, password), data=json.dumps(payload), timeout=5)
except requests.exceptions.RequestException as error:
print ('Connection Error for %s' % (error))
token = 'ConnectionError'
return token
#print ('authPost.status_code: %s' % (authPost.status_code))
if authPost.status_code == 404:
print ('attempt to obtain authentication token failed; will fall back to basic authentication; remote LDAP auth will require configuration of local user account')
token = None
elif authPost.status_code == 401:
print ('attempt to obtain authentication token failed due to invalid credentials')
token = 'Fail'
elif authPost.json().get('token'):
token = authPost.json()['token']['token']
print ('Got Auth Token: %s' % (token))
else:
print ('Unexpected error attempting POST to get auth token')
quit()
return token
requests.packages.urllib3.disable_warnings()
if args.password:
unverifiedPassword = args.password
elif args.passfile:
with open(args.passfile, 'r') as file:
unverifiedPassword = file.read().strip()
else:
unverifiedPassword = getpass.getpass('Enter Password for: %s: ' % (args.user))
contentJsonHeader = {'Content-Type': "application/json"}
if args.xlsx:
workbook = xlsxwriter.Workbook(args.xlsx)
systemsRow = 0
asmVirtualsRow = 0
noAsmVirtualsRow = 0
if args.xlsx:
bold = workbook.add_format({'bold': True})
redbg = workbook.add_format()
redbg.set_bg_color('red')
systemsSheet = workbook.add_worksheet('Systems')
systemsSheet.write('A1', 'Hostname', bold)
systemsSheet.write('B1', 'HA Status', bold)
systemsSheet.write('C1', 'Model', bold)#
systemsSheet.write('D1', 'Version', bold)
systemsSheet.write('E1', 'Provisioned Modules', bold)
systemsSheet.write('F1', 'IP Intelligence Licensed', bold)
systemsSheet.write('G1', 'IP Intelligence License End Date', bold)
systemsSheet.write('H1', 'IP Intelligence BrightCloud Reachable', bold)
systemsSheet.write('I1', 'Sync Status', bold)
column = 9
if args.hostport:
for hostPort in args.hostport:
systemsSheet.write(0, column, 'Check for %s' % (hostPort), bold)
column += 1
systemsRow = 1
asmVirtualsSheet = workbook.add_worksheet('Virtual Servers with ASM')
asmVirtualsSheet.write('A1', 'Hostname', bold)
asmVirtualsSheet.write('B1', 'Virtual Name', bold)
asmVirtualsSheet.write('C1', 'Virtual FullPath', bold)
asmVirtualsSheet.write('D1', 'Virtual Destination', bold)
asmVirtualsSheet.write('E1', 'ASM Policy Name', bold)
asmVirtualsSheet.write('F1', 'ASM Enforcement Mode', bold)
asmVirtualsSheet.write('G1', 'ASM Logging Profile', bold)
asmVirtualsSheet.write('H1', 'ASM Policy Builder', bold)
asmVirtualsSheet.write('I1', 'ASM Trust XFF', bold)
asmVirtualsSheet.write('J1', 'ASM Malicious IP Blocking', bold)
asmVirtualsSheet.write('K1', 'ASM Malicious IP Alarm', bold)
#asmVirtualsSheet.write()
asmVirtualsRow = 1
noAsmVirtualsSheet = workbook.add_worksheet('Virtual Servers without ASM')
noAsmVirtualsSheet.write('A1', 'Hostname', bold)
noAsmVirtualsSheet.write('B1', 'Virtual Name', bold)
noAsmVirtualsSheet.write('C1', 'Virtual FullPath', bold)
noAsmVirtualsSheet.write('D1', 'Virtual Destination', bold)
#noAsmVirtualsSheet.write()
noAsmVirtualsRow = 1
if args.bigip:
singleBigip = get_system_info(args.bigip, args.user, unverifiedPassword)
if singleBigip.get('authFail') or singleBigip('unreachable'):
print ('Device: %s unreachable or invalid credentials' % (args.bigip))
else:
bigip_asm_device_check(singleBigip)
bigip_asm_virtual_report(singleBigip)
else:
with open(args.systemlistfile, 'r') as systems:
for line in systems:
pairName = line.split(':')[0]
bigipAaddr = line.split(':')[1].split(',')[0].strip()
bigipBaddr = line.split(':')[1].split(',')[1].strip()
bigipA = get_system_info(bigipAaddr, args.user, unverifiedPassword)
activeBigip = None
standbyBigip = None
if bigipA.get('authFail') or bigipA.get('unreachable'):
print ('Device: %s unreachable or invalid credentials' % (bigipAaddr))
else:
if bigipA['failoverState'] == 'active':
activeBigip = bigipA
else:
standbyBigip = bigipA
bigipB = get_system_info(bigipBaddr, args.user, unverifiedPassword)
if bigipB.get('authFail') or bigipB.get('unreachable'):
print ('Device: %s unreachable or invalid credentials' % (bigipBaddr))
else:
if bigipB['failoverState'] == 'active':
activeBigip = bigipB
else:
standbyBigip = bigipB
print ('Pair Name: %s - Active BIG-IP: %s - Standby BIG-IP: %s' % (pairName, activeBigip.get('hostname'), standbyBigip.get('hostname')))
if activeBigip:
print ('--Active System Info--')
bigip_asm_device_check(activeBigip)
if standbyBigip:
print ('--Standby System Info--')
bigip_asm_device_check(standbyBigip)
if activeBigip:
print ('--Active Virtual(s) Report--')
bigip_asm_virtual_report(activeBigip)
if args.xlsx:
workbook.close()
#if query_yes_no('Ready to Proceed with restoration of ASM Policy to Virtuals?', default="no"):
# pass
|
[
"c.jenison@f5.com"
] |
c.jenison@f5.com
|
a47addf1127f1ae6f88b3fb457bb8b59903f4498
|
1fe720adffeca631224d0c3c6887612a35fa962c
|
/setup.py
|
41dee6a82fbcbe05867b8d45c304b7501e17bdcc
|
[
"MIT"
] |
permissive
|
bob-stupak/mytestproj
|
36624e3a30f999bd07a53408434f2fddc32d8c31
|
e8c889e9e44feb578261c3cfdc0646bcb76a722d
|
refs/heads/main
| 2023-01-23T19:41:36.117992
| 2020-12-11T23:32:29
| 2020-12-11T23:32:29
| 320,708,233
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 802
|
py
|
#
#
from setuptools import setup
import os
import sys
if sys.version_info[0] < 3:
with open('README.rst') as f:
long_description = f.read()
else:
with open('README.rst', encoding='utf-8') as f:
long_description = f.read()
version={}
with open(os.path.join('mytestproj','_version.py')) as f:
exec(f.read(),version)
setup(name='mytestproj',
version=version['__version__'],
description='Test function for learning setup',
long_description=long_description,
url='http://github.com/bob-stupak/testpackage',
author='Bob Stupak',
author_email='bob.stupak@noirlab.edu',
license='MIT',
packages=['mytestproj'],
install_requires=['numpy==1.11.2',
'matplotlib==1.5.2',
],
zip_safe=False)
|
[
"stupakro@yahoo.com,stupak@noao.edu"
] |
stupakro@yahoo.com,stupak@noao.edu
|
5ee646099335af62968ecfb56fdadebab42d1e4f
|
db186156e6119d2a704077a8b3416202e482af90
|
/examples/management_api/declare_queue.py
|
75d085fdd18cececca87c30d24f1bf0e5f4f6aa9
|
[
"MIT"
] |
permissive
|
MeggyCal/amqpstorm
|
07a48100397c3d159944ad4935c62b927bd55efa
|
3682b835b256297fc3c4d52e71a4d0d15cb0719b
|
refs/heads/main
| 2023-04-12T23:40:30.100240
| 2021-04-19T13:40:19
| 2021-04-19T13:40:19
| 359,470,597
| 0
| 0
|
MIT
| 2021-04-19T13:31:22
| 2021-04-19T13:31:20
| null |
UTF-8
|
Python
| false
| false
| 506
|
py
|
from amqpstorm.management import ApiConnectionError
from amqpstorm.management import ApiError
from amqpstorm.management import ManagementApi
if __name__ == '__main__':
API = ManagementApi('http://localhost:15672', 'guest', 'guest')
try:
API.queue.declare('my_queue', virtual_host='/')
except ApiConnectionError as why:
print('Connection Error: %s' % why)
except ApiError as why:
print('Failed to create queue: %s' % why)
else:
print("Queue created...")
|
[
"me@eandersson.net"
] |
me@eandersson.net
|
0391d615e732cc068f6e1c90dbd00e72aa03bdb6
|
94c87557a47f5f32ab32b2aabf673e15278c48eb
|
/banco_do_brasil/trailer/__init__.py
|
c2fae625631cb207ad277d4a0e0d7eeafa3c930a
|
[] |
no_license
|
arannasousa/pyfebraban
|
993c54b4d193e0c941675083eca71d2394880133
|
08b8e481b10f2075d3d4c560e20728ff929f3384
|
refs/heads/master
| 2021-01-20T15:18:15.265245
| 2017-05-09T13:34:09
| 2017-05-09T13:34:09
| 90,746,167
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 797
|
py
|
# coding=utf-8
"""
Desenvolvedor: asantos
E-mail: Arannã Sousa Santos
Mês: 04
Ano: 2015
Empresa: TINS - SOLUCOES CORPORATIVAS
"""
__author__ = u'asantos'
from ...febraban.base import (TagCaracter, TXT)
from ...febraban.trailer import ArquivoTrailer as ArquivoTrailer_febraban
from .controle import ArquivoTrailerControle
class ArquivoTrailer(ArquivoTrailer_febraban):
def __init__(self):
super(ArquivoTrailer, self).__init__()
# --------------------------------------------------------------------------------------
# tipo_registro, codigo, nome, de, ate, valor=None, descricao=None, alertas=[], comentario=None, segumento=None, operacao=None
# --------------------------------------------------------------------------------------
self.controle = ArquivoTrailerControle()
|
[
"arannasousa@hotmail.com"
] |
arannasousa@hotmail.com
|
f2e133e603e1081efb9a2559b6915c3ecdb29d14
|
e00d24c4272d36d27303c13fb0383a72640a22ff
|
/previous_CODE/alerts.py
|
5d036cf8e97f906fbfa7e671d7be3b42935e5825
|
[
"Apache-2.0"
] |
permissive
|
jaumecolom/hackeseiaat
|
c55e97ba5f63d203ae57355cdf57c2e2d635e05c
|
fe6777fc95d26ed88949c4b9f2992414a7e54335
|
refs/heads/master
| 2021-08-23T02:48:39.198944
| 2017-12-02T17:32:18
| 2017-12-02T17:32:18
| 112,831,138
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 463
|
py
|
import requests as r
import cv2
import base64
from threading import Thread
def async_post(x, y, frame):
img = encode_image(frame)
r.post("http://openarms-alerts.000webhostapp.com/post-alert.php", data={'coor_x': x, 'coor_y': y, 'image': img})
def post_alert(x, y, frame):
thread = Thread(target=async_post, args=(x, y, frame))
thread.start()
def encode_image(image):
img = cv2.imencode('.jpg', image)[1]
return base64.b64encode(img)
|
[
"oriolcscr@gmail.com"
] |
oriolcscr@gmail.com
|
630a237bbf40c008aa96ba4127df47eccbcf94d4
|
9089e7afd65ba2c6d5607999b7b7c7eacbc59b61
|
/python3/apaxianparent.py
|
527eb85140d015e2508aac0d910010b976ef47b6
|
[] |
no_license
|
TheChickenBoy/kattis
|
9e9fb88436991db9c2f290064c70881f46a13440
|
fdeb99faf5f630607c5179c678b03270869bae75
|
refs/heads/master
| 2023-03-19T10:18:10.084193
| 2021-03-18T14:53:34
| 2021-03-18T14:53:34
| 217,545,715
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 245
|
py
|
w = input().split()
if w[0][-1] == 'e':
print(w[0]+'x'+w[1])
elif w[0][-1] in ["a","i","o","u"]:
print(w[0][:len(w[0])-1]+'ex'+w[1])
elif w[0][-1] == 'x' and w[0][-2] == 'e':
print(w[0]+w[1])
else:
print(w[0]+'ex'+w[1])
|
[
"c16gws@cs.umu.se"
] |
c16gws@cs.umu.se
|
aca2d24b1b809b5d8fca28cdff0fa4a378de9210
|
b512166a4378adfcdae308c0670e411d5cb7475e
|
/huggablehound.py
|
185c7c2aea7cb705d8e641c78b091fa6a4d88146
|
[] |
no_license
|
HebeHH/HuggableHusky
|
66af207b422111bfad0abfe705d87e558198ba0b
|
d3911e3123a330a815bcf2ea90a9f02c6a88cbfd
|
refs/heads/master
| 2020-05-24T12:49:12.996571
| 2019-05-17T20:24:27
| 2019-05-17T20:24:27
| 187,276,338
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,499
|
py
|
import facebookhugs
import RPi.GPIO as GPIO
import time
import random
def main():
GPIO.setmode(GPIO.BOARD)
print "GPIO setup"
GPIO.setup(3, GPIO.IN)
print "current signal: %r" %GPIO.input(3)
hour_count = 0
total_count = 0
last_hug_time = time.time()
print "hour count set to: %d \ntotal count set to: %d" %(hour_count, total_count)
while True:
print "in infinite loop"
if GPIO.input(3):
print "teddy hugged!"
hour_count+=1
total_count+=1
last_hug_time=time.time()
if total_count%3==0:
facebookhugs.main("I just got hugged!!")
elif total_count%3==1:
facebookhugs.main("Thank you for your love :) :)")
elif total_count%3==2:
facebookhugs.main("Another hug down!")
print "facebook message sent!"
while GPIO.input(3):
time.sleep(3)
print "still pressed..."
if time.time()/(60*60)==0:
print "it's the hour!"
msg = "In the past hour, I've been hugged %d times!" %hour_count
facebookhugs.main(msg)
print "facebook announcedment sent"
hour_count = 0
if total_count == 100:
facebookhugs.main("I just received my hundredth hug! If this was from you, please visit XXXXXX to collect your prize!")
print "finally been hugged a hundred times!"
if time.time()-last_hug_time>60*random.randint(20,50):
msg = "Aaw, I haven't been hugged in %d minutes and %d seconds. I'm sad. Please come give me love!" %(
(time.time()-last_hug_time)/60,
(time.time()-last_hug_time)%60)
if __name__ == "__main__":
main()
|
[
"hebehh@gmail.com"
] |
hebehh@gmail.com
|
24998598f986add28ac59a904b631e20e2884641
|
30fd3d665bc948ba6b5189662a9692a00779d6e3
|
/Schlaff_Final.py
|
7194f1af75b0b746c35ab1a99088b4a3de7890ff
|
[] |
no_license
|
Schlaff/soothing-forest
|
9e3866738a6fcd0a21bed3004b9fcef5c7f79f72
|
dce6b78da6c91da385f257e117545abf0863c2bc
|
refs/heads/main
| 2023-03-02T12:52:23.189481
| 2021-02-14T00:20:53
| 2021-02-14T00:20:53
| 313,816,023
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,756
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun May 17 16:47:32 2020
@author: brian
"""
### Brian Schlaff
### Comp Sci 450.2 Final
#Import Packages
import csv
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Load CSV data for 2017 income and 2014 Tree canopy cover into dataframes
TreeDF = pd.read_csv (r'C:\Users\brian\OneDrive\Desktop\Exploratory Data Analysis and Visualization\Final\Tree_Canopy_in_Urban_and_Non-Urban_LA_County__2014.csv')
IncomeDF = pd.read_csv (r'C:\Users\brian\OneDrive\Desktop\Exploratory Data Analysis and Visualization\Final\Living_Wage__2017_.csv')
#establish empty dataframe with columns for census tract number, medium income, and canopy area, in order to store data that is linked through common tract numbers
column_names = ["Census Tract","Medium Income","Can_P"]
CombinedDF = pd.DataFrame(columns = column_names)
# Loop through the census tract column of each datasets, link them, and append to empty dataframe
total = (len(TreeDF))*(len(IncomeDF))
count = 0
for i in range(0,len(IncomeDF),1):
IncID = IncomeDF.iloc[i]['CENSUS_TRACT']
#nested for loop to match data of the same census tract data
for j in range(0,len(TreeDF),1):
TreeID = TreeDF.iloc[j]['CENSUS_TRACT']
count += 1
percent = 100*(count/(total))
percent = round(percent,3)
print (count, 'Completion:',percent,'%')
if IncID == TreeID:
newRow = {"Census Tract":(IncomeDF.iloc[i]['CENSUS_TRACT']),"Medium Income":(IncomeDF.iloc[i]['MedIncome']),"Can_P":(TreeDF.iloc[j]['Can_P'])}
CombinedDF = CombinedDF.append(newRow,ignore_index=True)
# Export dataframe to csv
CombinedDF.to_csv('Combined.csv')
# Reload CombinedDF so you don't need to rerun nested for loop after closing program
Combined = pd.read_csv(r'C:\Users\brian\OneDrive\Desktop\Exploratory Data Analysis and Visualization\Final\Combined.csv')
column_names = ["Census Tract","Medium Income","Can_P"]
CombinedDF = pd.DataFrame(columns = column_names)
# Remove extra index column
Combined = Combined.drop('Unnamed: 0', axis = 1)
#Create a dictionary to reduce repeated Medium Income Values and average their corresponding canopy area percentages.
dict = {}
for k in range(0,len(Combined),1):
key = Combined.iloc[k]['Medium Income']
if key not in dict.keys():
ttl = 0
cnt = 0
for ii in range(k,len(Combined),1):
if Combined.iloc[ii]['Medium Income'] == key:
ttl += Combined.iloc[ii]['Can_P']
cnt += 1
Avg_Can = ttl/cnt
dict.update({key:Avg_Can})
#Create a new dataframe with averaged canopy area
DictDF = pd.DataFrame.from_dict(dict, orient= 'index')
#Convert dictionary to csv, and reimport it as a dataframe inorder to change keys from index position to column position.
DictDF.to_csv('DictDF.csv')
# reimport Dictionary csv and load it into a dataframe
UpdateDF = pd.read_csv(r'C:\Users\brian\OneDrive\Desktop\Exploratory Data Analysis and Visualization\Final\DictDF.csv')
UpdateDF.rename(columns={"Unnamed: 0":"Medium Income ($)", "0":"Canopy Area (%)"}, inplace= True)
# Add a new column Income Bracket to the dataframe to by examing the Medium income value and assigning a bracket value to it
# Empty Brackets to count the amount of values in each income bracket
Brckt1 = 0
Brckt2 = 0
Brckt3 = 0
Brckt4 = 0
Brckt5 = 0
Brckt6 = 0
Brckt7 = 0
#Empty dataframe to store the new column
IncomeBracket = pd.DataFrame()
#loop through medium income column and add an income bracket to the data
for III in range(0,len(UpdateDF),1):
Income = UpdateDF.iloc[III]['Medium Income ($)']
if Income <= 25000:
Brckt1 += 1
AddRow = {'Income Bracket':'0 - 25K ($)'}
IncomeBracket = IncomeBracket.append( AddRow, ignore_index=True)
elif 25001 < Income <= 50000:
Brckt2 += 1
AddRow = {"Income Bracket":"25K - 50K ($)"}
IncomeBracket = IncomeBracket.append( AddRow, ignore_index=True)
elif 50001 < Income <= 75000:
Brckt3 += 1
AddRow = {"Income Bracket":"50K - 75K ($)"}
IncomeBracket = IncomeBracket.append( AddRow,ignore_index=True)
elif 75001 < Income <= 100000:
Brckt4 += 1
AddRow = {"Income Bracket":"75K - 100K ($)"}
IncomeBracket = IncomeBracket.append( AddRow,ignore_index=True)
elif 100001 < Income <= 125000:
Brckt5 += 1
AddRow = {"Income Bracket":"100K - 125K ($)"}
IncomeBracket = IncomeBracket.append( AddRow,ignore_index=True)
elif 125000 < Income <= 150000:
Brckt6 += 1
AddRow = {"Income Bracket":"125K to 150K ($)"}
IncomeBracket = IncomeBracket.append( AddRow,ignore_index=True)
else:
Brckt7 += 1
AddRow = {"Income Bracket":"150K + ($)"}
IncomeBracket = IncomeBracket.append( AddRow,ignore_index=True)
# Append IncomeBracket Column to UpdateDF
UpdateDF = pd.concat([UpdateDF,IncomeBracket], axis=1)
#Export dataframe to csv file for Visualization in Tableau
UpdateDF.to_csv('Schlaff_Final.csv')
##### Analyze and plot the data
#convert data frame rows into arrays to be used in numpy
X = UpdateDF.iloc[:, 0].values
Y = UpdateDF.iloc[:, 1].values
#run a least squares regression.
#Find the mean of X and Y
MeanX = np.mean(X)
MeanY = np.mean(Y)
#establish and set the numerator and denomenator equal to 0
numer = 0
denomer = 0
for ix in range(0,len(X),1):
numer += (X[ix] - MeanX)*(Y[ix] - MeanY)
denomer += (X[ix] - MeanX)**2
#Calculate the slope of line of best fit (m), the Y intercept (c), and establish the equation
m = numer / denomer
c = MeanY - (m*MeanX)
Y_Predicted = c + X*m
# Equation to calculate the r^2 value of the line of best fit
num = 0
den = 0
for iy in range(0,len(Y),1):
den += (Y[iy] - MeanY)**2
num += (Y_Predicted[iy] - MeanY)**2
R2 = num/den
print(R2)
#Plot the data
plt.xlabel('Medium Income ($)')
plt.ylabel('Canopy Area (%)')
plt.title('Scatter Plot')
plt.scatter(X, Y, marker='x')
plt.plot([min(X),max(X)],[min(Y_Predicted),max(Y_Predicted)], color = 'red')
|
[
"noreply@github.com"
] |
Schlaff.noreply@github.com
|
61fd754894edcab290ff25bf8a89dc7d62e30f4b
|
48e124e97cc776feb0ad6d17b9ef1dfa24e2e474
|
/sdk/python/pulumi_azure_native/elastic/list_vm_host.py
|
f34a81517ec4ba204241c707e33ea9c30c6da3f1
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
bpkgoud/pulumi-azure-native
|
0817502630062efbc35134410c4a784b61a4736d
|
a3215fe1b87fba69294f248017b1591767c2b96c
|
refs/heads/master
| 2023-08-29T22:39:49.984212
| 2021-11-15T12:43:41
| 2021-11-15T12:43:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,117
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'ListVMHostResult',
'AwaitableListVMHostResult',
'list_vm_host',
'list_vm_host_output',
]
@pulumi.output_type
class ListVMHostResult:
"""
Response of a list operation.
"""
def __init__(__self__, next_link=None, value=None):
if next_link and not isinstance(next_link, str):
raise TypeError("Expected argument 'next_link' to be a str")
pulumi.set(__self__, "next_link", next_link)
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="nextLink")
def next_link(self) -> Optional[str]:
"""
Link to the next Vm resource Id, if any.
"""
return pulumi.get(self, "next_link")
@property
@pulumi.getter
def value(self) -> Optional[Sequence['outputs.VMResourcesResponse']]:
"""
Results of a list operation.
"""
return pulumi.get(self, "value")
class AwaitableListVMHostResult(ListVMHostResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListVMHostResult(
next_link=self.next_link,
value=self.value)
def list_vm_host(monitor_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListVMHostResult:
"""
Response of a list operation.
API Version: 2020-07-01.
:param str monitor_name: Monitor resource name
:param str resource_group_name: The name of the resource group to which the Elastic resource belongs.
"""
__args__ = dict()
__args__['monitorName'] = monitor_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:elastic:listVMHost', __args__, opts=opts, typ=ListVMHostResult).value
return AwaitableListVMHostResult(
next_link=__ret__.next_link,
value=__ret__.value)
@_utilities.lift_output_func(list_vm_host)
def list_vm_host_output(monitor_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[ListVMHostResult]:
"""
Response of a list operation.
API Version: 2020-07-01.
:param str monitor_name: Monitor resource name
:param str resource_group_name: The name of the resource group to which the Elastic resource belongs.
"""
...
|
[
"noreply@github.com"
] |
bpkgoud.noreply@github.com
|
6315e80b826d9c46d3d908dea6a08c5d291e41e0
|
5d59a3c3ba56afdc06a2b1419d5086179b8d83cd
|
/chatbrick/brick/mailer.py
|
15b6780b8e3cc72366667a6ea8c9b9d1f0eea501
|
[
"MIT"
] |
permissive
|
BluehackRano/cb-wh
|
d45cf07c081dea5b8034ddd887af9fb98777823c
|
ecf11100ad83df71eac9d56f6abbd59ceeda9d83
|
refs/heads/master
| 2020-04-26T23:04:15.065837
| 2019-03-05T07:17:23
| 2019-03-05T07:17:23
| 173,891,214
| 0
| 0
|
MIT
| 2019-03-05T06:50:11
| 2019-03-05T06:50:11
| null |
UTF-8
|
Python
| false
| false
| 9,737
|
py
|
import requests
import time
import logging
import os
import smtplib
from email.mime.text import MIMEText
import blueforge.apis.telegram as tg
from blueforge.apis.facebook import Message, GenericTemplate, TemplateAttachment, ImageAttachment, PostBackButton, \
Element, QuickReply, QuickReplyTextItem
from chatbrick.util import save_a_log_to_server
logger = logging.getLogger(__name__)
BRICK_DEFAULT_IMAGE = 'https://www.chatbrick.io/api/static/brick/img_brick_03_001.png'
BRICK_GENERIC_TEMPLATE_IMAGE = 'https://www.chatbrick.io/api/static/brick/img_brick_03_002.png'
class Mailer(object):
def __init__(self, fb, brick_db):
self.brick_db = brick_db
self.fb = fb
self.smtp = smtplib.SMTP('smtp.gmail.com', 587)
self.sender_email = os.environ['SENDER_EMAIL']
self.sender_password = os.environ['SENDER_PASSWORD']
def __del__(self):
self.smtp.close()
async def facebook(self, command):
if command == 'get_started':
# send_message = [
# Message(
# attachment=ImageAttachment(
# url=BRICK_DEFAULT_IMAGE
# )
# ),
# Message(
# text='블루핵에서 제공하는 "메일보내기 서비스"예요.'
# ),
# Message(
# attachment=TemplateAttachment(
# payload=GenericTemplate(
# elements=[
# Element(title='메일전송',
# subtitle='챗봇에서 메일을 보낼 수 있어요',
# image_url=BRICK_GENERIC_TEMPLATE_IMAGE,
# buttons=[
# PostBackButton(
# title='메일보내기',
# payload='brick|mailer|show_data'
# )
# ])
# ]
# )
# )
# )
# ]
send_message = [
Message(
attachment=TemplateAttachment(
payload=GenericTemplate(
elements=[
Element(image_url=BRICK_DEFAULT_IMAGE,
title='메일보내기 서비스',
subtitle='블루핵에서 제공하는 "메일보내기 서비스"예요.')
]
)
)
),
Message(
attachment=TemplateAttachment(
payload=GenericTemplate(
elements=[
Element(title='메일전송',
subtitle='챗봇에서 메일을 보낼 수 있어요',
image_url=BRICK_GENERIC_TEMPLATE_IMAGE,
buttons=[
PostBackButton(
title='메일보내기',
payload='brick|mailer|show_data'
)
])
]
)
)
)
]
await self.fb.send_messages(send_message)
elif command == 'show_data':
await self.brick_db.save()
elif command == 'cancel':
await self.brick_db.delete()
await self.fb.send_message(
message=Message(
text='메일 보내기를 취소했어요.',
quick_replies=QuickReply(
quick_reply_items=[
QuickReplyTextItem(
title='새 메일보내기',
payload='brick|mailer|show_data'
)
]
)
))
elif command == 'final':
input_data = await self.brick_db.get()
msg = MIMEText(input_data['store'][2]['value'])
msg['Subject'] = '%s로부터 이메일입니다.' % input_data['store'][0]['value']
msg['To'] = input_data['store'][1]['value']
result = '메일 보내기가 완료되었어요.'
if self.fb.log_id is None:
self.fb.log_id = 'FBSendMessage|%d' % int(time.time() * 1000)
try:
self.smtp.ehlo()
self.smtp.starttls()
self.smtp.login(self.sender_email, self.sender_password)
self.smtp.sendmail(self.sender_email, input_data['store'][1]['value'], msg.as_string())
except:
result = '메일 전송을 실패했습니다.\n잠시 후 다시 시도해주세요.'
save_a_log_to_server({
'log_id': self.fb.log_id,
'user_id': self.fb.user_id,
'os': '',
'application': 'facebook',
'api_code': 'mail',
'api_provider_code': 'chatbrick',
'origin': 'webhook_server',
'end': int(time.time() * 1000),
'remark': 'SMTP 통신으로 이메일 전송함'
})
await self.fb.send_message(
message=Message(
text=result,
quick_replies=QuickReply(
quick_reply_items=[
QuickReplyTextItem(
title='연속하여 메일보내기',
payload='brick|mailer|show_data'
)
]
)
))
await self.brick_db.delete()
return None
async def telegram(self, command):
if command == 'get_started':
send_message = [
tg.SendPhoto(
photo=BRICK_DEFAULT_IMAGE
),
tg.SendMessage(
text='블루핵에서 제공하는 "메일보내기 서비스"예요.',
reply_markup=tg.MarkUpContainer(
inline_keyboard=[
[
tg.CallbackButton(
text='메일 보내기',
callback_data='BRICK|mailer|show_data'
)
]
]
)
)
]
await self.fb.send_messages(send_message)
elif command == 'show_data':
await self.brick_db.save()
elif command == 'cancel':
await self.brick_db.delete()
await self.fb.send_message(
tg.SendMessage(
text='메일보내기를 취소했습니다.',
reply_markup=tg.MarkUpContainer(
inline_keyboard=[
[
tg.CallbackButton(
text='메일 보내기',
callback_data='BRICK|mailer|show_data'
)
]
]
)
)
)
elif command == 'final':
input_data = await self.brick_db.get()
msg = MIMEText(input_data['store'][2]['value'])
msg['Subject'] = '%s로부터 이메일입니다.' % input_data['store'][0]['value']
msg['To'] = input_data['store'][1]['value']
result = '메일 보내기가 완료되었어요.'
if self.fb.log_id is None:
self.fb.log_id = 'SendMessage|%d' % int(time.time() * 1000)
try:
self.smtp.ehlo()
self.smtp.starttls()
self.smtp.login(self.sender_email, self.sender_password)
self.smtp.sendmail(self.sender_email, input_data['store'][1]['value'], msg.as_string())
except:
result = '메일 전송을 실패했습니다.\n잠시 후 다시 시도해주세요.'
save_a_log_to_server({
'log_id': self.fb.log_id,
'user_id': self.fb.user_id,
'os': '',
'application': 'telegram',
'api_code': 'mail',
'api_provider_code': 'chatbrick',
'origin': 'webhook_server',
'end': int(time.time() * 1000),
'remark': 'SMTP 통신으로 이메일 전송함'
})
await self.fb.send_message(
tg.SendMessage(
text=result,
reply_markup=tg.MarkUpContainer(
inline_keyboard=[
[
tg.CallbackButton(
text='연속하여 메일보내기',
callback_data='BRICK|mailer|show_data'
)
]
]
)
)
)
await self.brick_db.delete()
return None
|
[
"wow5468@naver.com"
] |
wow5468@naver.com
|
99975723c664b8f27a566545623df1ee56c6c0ea
|
5537eec7f43098d216d2b550678c8d10b2a26f09
|
/venv/ansible/lib/python2.7/site-packages/requests_kerberos/kerberos_.py
|
57e125915ca5aa1b3013efe7bf91283aff05ddcd
|
[] |
no_license
|
wipro-sdx/Automation
|
f0ae1512b8d9d491d7bacec94c8906d06d696407
|
a8c46217d0fbe51a71597b5db87cbe98ed19297a
|
refs/heads/master
| 2021-07-08T11:09:05.314435
| 2018-05-02T07:18:54
| 2018-05-02T07:18:54
| 131,812,982
| 0
| 1
| null | 2020-07-23T23:22:33
| 2018-05-02T07:15:28
|
Python
|
UTF-8
|
Python
| false
| false
| 13,456
|
py
|
try:
import kerberos
except ImportError:
import winkerberos as kerberos
import re
import logging
from requests.auth import AuthBase
from requests.models import Response
from requests.compat import urlparse, StringIO
from requests.structures import CaseInsensitiveDict
from requests.cookies import cookiejar_from_dict
from .exceptions import MutualAuthenticationError, KerberosExchangeError
log = logging.getLogger(__name__)
# Different types of mutual authentication:
# with mutual_authentication set to REQUIRED, all responses will be
# authenticated with the exception of errors. Errors will have their contents
# and headers stripped. If a non-error response cannot be authenticated, a
# MutualAuthenticationError exception will be raised.
# with mutual_authentication set to OPTIONAL, mutual authentication will be
# attempted if supported, and if supported and failed, a
# MutualAuthenticationError exception will be raised. Responses which do not
# support mutual authentication will be returned directly to the user.
# with mutual_authentication set to DISABLED, mutual authentication will not be
# attempted, even if supported.
REQUIRED = 1
OPTIONAL = 2
DISABLED = 3
class SanitizedResponse(Response):
"""The :class:`Response <Response>` object, which contains a server's
response to an HTTP request.
This differs from `requests.models.Response` in that it's headers and
content have been sanitized. This is only used for HTTP Error messages
which do not support mutual authentication when mutual authentication is
required."""
def __init__(self, response):
super(SanitizedResponse, self).__init__()
self.status_code = response.status_code
self.encoding = response.encoding
self.raw = response.raw
self.reason = response.reason
self.url = response.url
self.request = response.request
self.connection = response.connection
self._content_consumed = True
self._content = ""
self.cookies = cookiejar_from_dict({})
self.headers = CaseInsensitiveDict()
self.headers['content-length'] = '0'
for header in ('date', 'server'):
if header in response.headers:
self.headers[header] = response.headers[header]
def _negotiate_value(response):
"""Extracts the gssapi authentication token from the appropriate header"""
if hasattr(_negotiate_value, 'regex'):
regex = _negotiate_value.regex
else:
# There's no need to re-compile this EVERY time it is called. Compile
# it once and you won't have the performance hit of the compilation.
regex = re.compile('(?:.*,)*\s*Negotiate\s*([^,]*),?', re.I)
_negotiate_value.regex = regex
authreq = response.headers.get('www-authenticate', None)
if authreq:
match_obj = regex.search(authreq)
if match_obj:
return match_obj.group(1)
return None
class HTTPKerberosAuth(AuthBase):
"""Attaches HTTP GSSAPI/Kerberos Authentication to the given Request
object."""
def __init__(
self, mutual_authentication=REQUIRED,
service="HTTP", delegate=False, force_preemptive=False,
principal=None, hostname_override=None, sanitize_mutual_error_response=True):
self.context = {}
self.mutual_authentication = mutual_authentication
self.delegate = delegate
self.pos = None
self.service = service
self.force_preemptive = force_preemptive
self.principal = principal
self.hostname_override = hostname_override
self.sanitize_mutual_error_response = sanitize_mutual_error_response
def generate_request_header(self, response, host, is_preemptive=False):
"""
Generates the GSSAPI authentication token with kerberos.
If any GSSAPI step fails, raise KerberosExchangeError
with failure detail.
"""
# Flags used by kerberos module.
gssflags = kerberos.GSS_C_MUTUAL_FLAG | kerberos.GSS_C_SEQUENCE_FLAG
if self.delegate:
gssflags |= kerberos.GSS_C_DELEG_FLAG
try:
kerb_stage = "authGSSClientInit()"
# contexts still need to be stored by host, but hostname_override
# allows use of an arbitrary hostname for the kerberos exchange
# (eg, in cases of aliased hosts, internal vs external, CNAMEs
# w/ name-based HTTP hosting)
kerb_host = self.hostname_override if self.hostname_override is not None else host
kerb_spn = "{0}@{1}".format(self.service, kerb_host)
result, self.context[host] = kerberos.authGSSClientInit(kerb_spn,
gssflags=gssflags, principal=self.principal)
if result < 1:
raise EnvironmentError(result, kerb_stage)
# if we have a previous response from the server, use it to continue
# the auth process, otherwise use an empty value
negotiate_resp_value = '' if is_preemptive else _negotiate_value(response)
kerb_stage = "authGSSClientStep()"
result = kerberos.authGSSClientStep(self.context[host],
negotiate_resp_value)
if result < 0:
raise EnvironmentError(result, kerb_stage)
kerb_stage = "authGSSClientResponse()"
gss_response = kerberos.authGSSClientResponse(self.context[host])
return "Negotiate {0}".format(gss_response)
except kerberos.GSSError as error:
log.exception(
"generate_request_header(): {0} failed:".format(kerb_stage))
log.exception(error)
raise KerberosExchangeError("%s failed: %s" % (kerb_stage, str(error.args)))
except EnvironmentError as error:
# ensure we raised this for translation to KerberosExchangeError
# by comparing errno to result, re-raise if not
if error.errno != result:
raise
message = "{0} failed, result: {1}".format(kerb_stage, result)
log.error("generate_request_header(): {0}".format(message))
raise KerberosExchangeError(message)
def authenticate_user(self, response, **kwargs):
"""Handles user authentication with gssapi/kerberos"""
host = urlparse(response.url).hostname
try:
auth_header = self.generate_request_header(response, host)
except KerberosExchangeError:
# GSS Failure, return existing response
return response
log.debug("authenticate_user(): Authorization header: {0}".format(
auth_header))
response.request.headers['Authorization'] = auth_header
# Consume the content so we can reuse the connection for the next
# request.
response.content
response.raw.release_conn()
_r = response.connection.send(response.request, **kwargs)
_r.history.append(response)
log.debug("authenticate_user(): returning {0}".format(_r))
return _r
def handle_401(self, response, **kwargs):
"""Handles 401's, attempts to use gssapi/kerberos authentication"""
log.debug("handle_401(): Handling: 401")
if _negotiate_value(response) is not None:
_r = self.authenticate_user(response, **kwargs)
log.debug("handle_401(): returning {0}".format(_r))
return _r
else:
log.debug("handle_401(): Kerberos is not supported")
log.debug("handle_401(): returning {0}".format(response))
return response
def handle_other(self, response):
"""Handles all responses with the exception of 401s.
This is necessary so that we can authenticate responses if requested"""
log.debug("handle_other(): Handling: %d" % response.status_code)
if self.mutual_authentication in (REQUIRED, OPTIONAL):
is_http_error = response.status_code >= 400
if _negotiate_value(response) is not None:
log.debug("handle_other(): Authenticating the server")
if not self.authenticate_server(response):
# Mutual authentication failure when mutual auth is wanted,
# raise an exception so the user doesn't use an untrusted
# response.
log.error("handle_other(): Mutual authentication failed")
raise MutualAuthenticationError("Unable to authenticate "
"{0}".format(response))
# Authentication successful
log.debug("handle_other(): returning {0}".format(response))
return response
elif is_http_error or self.mutual_authentication == OPTIONAL:
if not response.ok:
log.error("handle_other(): Mutual authentication unavailable "
"on {0} response".format(response.status_code))
if(self.mutual_authentication == REQUIRED and
self.sanitize_mutual_error_response):
return SanitizedResponse(response)
else:
return response
else:
# Unable to attempt mutual authentication when mutual auth is
# required, raise an exception so the user doesnt use an
# untrusted response.
log.error("handle_other(): Mutual authentication failed")
raise MutualAuthenticationError("Unable to authenticate "
"{0}".format(response))
else:
log.debug("handle_other(): returning {0}".format(response))
return response
def authenticate_server(self, response):
"""
Uses GSSAPI to authenticate the server.
Returns True on success, False on failure.
"""
log.debug("authenticate_server(): Authenticate header: {0}".format(
_negotiate_value(response)))
host = urlparse(response.url).hostname
try:
result = kerberos.authGSSClientStep(self.context[host],
_negotiate_value(response))
except kerberos.GSSError:
log.exception("authenticate_server(): authGSSClientStep() failed:")
return False
if result < 1:
log.error("authenticate_server(): authGSSClientStep() failed: "
"{0}".format(result))
return False
log.debug("authenticate_server(): returning {0}".format(response))
return True
def handle_response(self, response, **kwargs):
"""Takes the given response and tries kerberos-auth, as needed."""
num_401s = kwargs.pop('num_401s', 0)
if self.pos is not None:
# Rewind the file position indicator of the body to where
# it was to resend the request.
response.request.body.seek(self.pos)
if response.status_code == 401 and num_401s < 2:
# 401 Unauthorized. Handle it, and if it still comes back as 401,
# that means authentication failed.
_r = self.handle_401(response, **kwargs)
log.debug("handle_response(): returning %s", _r)
log.debug("handle_response() has seen %d 401 responses", num_401s)
num_401s += 1
return self.handle_response(_r, num_401s=num_401s, **kwargs)
elif response.status_code == 401 and num_401s >= 2:
# Still receiving 401 responses after attempting to handle them.
# Authentication has failed. Return the 401 response.
log.debug("handle_response(): returning 401 %s", response)
return response
else:
_r = self.handle_other(response)
log.debug("handle_response(): returning %s", _r)
return _r
def deregister(self, response):
"""Deregisters the response handler"""
response.request.deregister_hook('response', self.handle_response)
def __call__(self, request):
if self.force_preemptive:
# add Authorization header before we receive a 401
# by the 401 handler
host = urlparse(request.url).hostname
auth_header = self.generate_request_header(None, host, is_preemptive=True)
log.debug("HTTPKerberosAuth: Preemptive Authorization header: {0}".format(auth_header))
request.headers['Authorization'] = auth_header
request.register_hook('response', self.handle_response)
try:
self.pos = request.body.tell()
except AttributeError:
# In the case of HTTPKerberosAuth being reused and the body
# of the previous request was a file-like object, pos has
# the file position of the previous body. Ensure it's set to
# None.
self.pos = None
return request
|
[
"admin@example.com"
] |
admin@example.com
|
1f75842a3231df0a3c4fe874c0face5e7c80e1ee
|
04b57835df84526301851e155aceb143f3a6298b
|
/Project Manual (Face Recognition)/4-2_encode_faces.py
|
cef6d6d30c7ed41d5a32de366d58926869d9a4e8
|
[] |
no_license
|
wchoi09/OpenDoor_OpenCV
|
c7be844811c5a6b3258e0d02985c4fa2dcde6541
|
0fbbba7f3ee519512f887d818124c3e55756ddc9
|
refs/heads/master
| 2022-11-16T15:16:53.895626
| 2020-06-12T05:50:53
| 2020-06-12T05:50:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,243
|
py
|
# USAGE
# When encoding on laptop, desktop, or GPU (slower, more accurate):
# python encode_faces.py --dataset dataset --encodings encodings.pickle --detection-method cnn
# When encoding on Raspberry Pi (faster, more accurate):
# python3 encode_faces.py --dataset dataset --encodings encodings.pickle --detection-method hog
# import the necessary packages
from imutils import paths
import face_recognition
import argparse
import pickle
import cv2
import os
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--dataset", required=True,
help="path to input directory of faces + images")
ap.add_argument("-e", "--encodings", required=True,
help="path to serialized db of facial encodings")
ap.add_argument("-d", "--detection-method", type=str, default="cnn",
help="face detection model to use: either `hog` or `cnn`")
args = vars(ap.parse_args())
# grab the paths to the input images in our dataset
print("[INFO] quantifying faces...")
imagePaths = list(paths.list_images(args["dataset"]))
# initialize the list of known encodings and known names
knownEncodings = []
knownNames = []
# loop over the image paths
for (i, imagePath) in enumerate(imagePaths):
# extract the person name from the image path
print("[INFO] processing image {}/{}".format(i + 1,
len(imagePaths)))
name = imagePath.split(os.path.sep)[-2]
# load the input image and convert it from RGB (OpenCV ordering)
# to dlib ordering (RGB)
image = cv2.imread(imagePath)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# detect the (x, y)-coordinates of the bounding boxes
# corresponding to each face in the input image
boxes = face_recognition.face_locations(rgb,
model=args["detection_method"])
# compute the facial embedding for the face
encodings = face_recognition.face_encodings(rgb, boxes)
# loop over the encodings
for encoding in encodings:
# add each encoding + name to our set of known names and
# encodings
knownEncodings.append(encoding)
knownNames.append(name)
# dump the facial encodings + names to disk
print("[INFO] serializing encodings...")
data = {"encodings": knownEncodings, "names": knownNames}
f = open(args["encodings"], "wb")
f.write(pickle.dumps(data))
f.close()
|
[
"noreply@github.com"
] |
wchoi09.noreply@github.com
|
e8608e0a9783de1e3216386b2302a0cf193969f8
|
e14f562e4b0a2dc29dc6fe9c0d09ff3edd1cbcc4
|
/test.py
|
097f1428319dbf3ba5ecc9ffd567bd99664383b5
|
[] |
no_license
|
jordicolomer/puzzlehunt
|
fea6c95cd0aeb036a8c786b38a5c281a2b24b0f9
|
1d7a8cecaaaa90f076487569fa6aa63af66f2d5f
|
refs/heads/master
| 2021-01-21T14:32:22.041070
| 2017-06-24T13:17:40
| 2017-06-24T13:17:40
| 95,294,939
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,040
|
py
|
from zoo import *
def test_rule12():
assert rule12([albatross]) == True
assert rule12([]) == True
assert rule12([albatross, flamingo]) == True
assert rule12([albatross, flamingo, swan]) == True
assert rule12([albatross, flamingo, meetkat, swan]) == True
assert rule12([swan, flamingo]) == False
assert rule12([swan, meetkat, flamingo]) == False
assert rule12([albatross, swan, flamingo]) == False
def test_rule3():
assert rule3([swan, meetkat, lion, warthog]) == True
assert rule3([meetkat, lion, warthog]) == True
assert rule3([warthog, lion, meetkat]) == True
assert rule3([warthog, lion, swan, meetkat]) == True
assert rule3([warthog, swan, meetkat]) == False
assert rule3([warthog, swan]) == True
def test_rule11():
assert rule11([tiger, swan]) == True
assert rule11([swan, tiger]) == True
assert rule11([]) == True
assert rule11([meetkat, swan]) == False
assert rule11([swan, tiger, meetkat]) == True
assert rule11([tiger, meetkat, swan]) == False
|
[
"winkelmo@amazon.com"
] |
winkelmo@amazon.com
|
a7c8002113dc68c48eeef10c7a6887d99b9e8576
|
22655075ae724b6d06c8d5ac0306151988ff2124
|
/tool/cleaner.py
|
a90d6d5b4c427ea842306973200e73ee1aa6196f
|
[] |
no_license
|
cogaiwibu/hello-cross-platform
|
d7b71d287a40e0b04fa4a3e37ab774269a329520
|
871166053f72aa5df53c4b3aa5312a5b6f52df34
|
refs/heads/master
| 2023-03-23T20:03:58.889951
| 2021-03-18T08:14:09
| 2021-03-18T08:14:09
| 255,058,089
| 7
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 641
|
py
|
import os
import shutil
from pathlib import Path
BUILD_DIR = ['build', '.cxx', '.gradle', 'CMakeFiles']
class Cleaner:
def __init__(self, source_directory):
self.source_directory = source_directory
def clean_all(self):
for directory in BUILD_DIR:
self.clean(directory)
def clean(self, regex):
directory = list(Path(self.source_directory).rglob(regex))
for sub_path in directory:
self.clean_path(sub_path)
def clean_path(self, path: Path):
if path.is_dir():
shutil.rmtree(str(path))
elif path.exists():
os.remove(str(path))
|
[
"nganht2@vng.com.vn"
] |
nganht2@vng.com.vn
|
f14cf05d27eb2de9541b110f4925efcbfe375244
|
b05646c751f97af09165496febd2a23a0f5e283a
|
/update_pideal.py
|
9abe4faedaa9ac09da1825834279c1c6e24720f3
|
[] |
no_license
|
outofstyle/MOBSA_MRP
|
d96b1a478426f7606cd604caa48d2c412036f888
|
97fe2902f00b144fd3b9fce135e0a61d8ebffa1b
|
refs/heads/master
| 2021-09-12T07:13:02.370033
| 2018-04-15T08:07:38
| 2018-04-15T08:10:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,468
|
py
|
import sys
from algorithm.util import *
"""
Topology list:
--------------------------------------------
| Rand_Topo | SNDlib_Topo | Zoo_Topo |
--------------------------------------------
| Rand1 | germany50 | AttpMpls |
| Rand2 | india35 | BtNorthAmerica |
| Rand3 | ta1 | Chinanet |
| Rand4 | ta2 | Tinet |
| Rand5 |-------------------------------
| Rand6 |
| Rand7 |
| Rand8 |
-------------
Algorithms list:
----------------------------------------------------
| NSGA-II | MOEA/D | SPEA2 | MOPSO | PBIL1 | PBIL2 |
|NSABC | EAG-MOEAD | NSACO | Jaya |
----------------------------------------------------
"""
PATHs = {'Rand': '/Rand_Topo/',
'SNDlib': '/SNDlib_Topo/',
'Zoo': '/Zoo_Topo/'}
if __name__ == '__main__':
# topo_lst = ['Rand1', 'Rand2', 'Rand3', 'Rand4', 'Rand5', 'Rand6', 'Rand7', 'Rand8']
# topo_lst = ['germany50', 'india35', 'ta1', 'ta2']
topo_lst = ['AttMpls', 'BtNorthAmerica', 'Chinanet', 'Tinet']
alst = ['NSABC', 'MOEA-PCGG', 'SPEA2', 'MOEAD', 'EAG-MOEAD', 'MOSFLA']
runtime = 20
# runtime = 10
for topo in topo_lst[2:3]:
for al in alst[:]:
lst = []
for i in range(runtime):
lst.extend(read_json_as_list(topo=topo, algorithm=al, runtime=i+1))
write_list_to_json(topo=topo, algorithm=al, solutions=lst)
update_ideal_pf(topo=topo, algorithms=alst)
|
[
"quanwenming@outlook.com"
] |
quanwenming@outlook.com
|
b046a09c9975ce74e92f97a7eeb85819a14ebd54
|
19888c945454cc535ceabce35d637e8c6e550646
|
/gym_industrial/envs/mis_calibration/environment.py
|
f0b22e890d5dd73b46eccf16e8411f25964a1c74
|
[
"Apache-2.0"
] |
permissive
|
0xangelo/gym-industrial
|
8aaf7daa971246fb4b4950c44a697c8757273595
|
d89fdc4bbfa3e569beb5d5c4690c23d8ecad28ed
|
refs/heads/master
| 2022-11-25T05:23:09.606829
| 2020-05-13T19:11:41
| 2020-05-13T19:11:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,774
|
py
|
"""Standalone mis-calibration subsystem as a Gym environment."""
import gym
from gym.spaces import Box
from gym.utils import seeding
import numpy as np
from .dynamics import MisCalibrationDynamics
class MisCalibrationEnv(gym.Env):
"""Standalone mis-calibration subsystem as a Gym environment.
From the paper:
> The sub-dynamics of mis-calibration are influenced by external driver setpoint p
> and steering shift h. The goal is to reward an agent to oscillate in h in a pre-
> -defined frequency around a specific operation point determined by setpoint p.
> Thereby, the reward topology is inspired by an example from quantum physics,
> namely Goldstone’s ”Mexican hat” potential.
Args:
setpoint (float): setpoint parameter for the dynamics, as described in the paper
safe_zone (float): the radius of the safe zone.
"""
# pylint:disable=abstract-method
action_scale = 20 * np.sin(15 * np.pi / 180) / 0.9
def __init__(self, setpoint=50, safe_zone=None):
super().__init__()
self.observation_space = Box(
low=np.array([0, 0], dtype=np.float32),
high=np.array([100, 100], dtype=np.float32),
)
self.action_space = Box(
low=np.array([-1], dtype=np.float32), high=np.array([1], dtype=np.float32)
)
self._setpoint = setpoint
safe_zone = safe_zone or np.sin(np.pi * 15 / 180) / 2
self._dynamics = MisCalibrationDynamics(safe_zone)
self.state = None
self.seed()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reset(self):
setpoint = np.array([self._setpoint])
shift = self.np_random.uniform(low=0, high=100, size=(1,))
# Initial values
# domain: positive
# system response: advantageous
# phi: 0 (center)
hidden_state = np.array([1, 1, 0])
self.state = np.concatenate([setpoint, shift, hidden_state])
return self._get_obs(self.state)
def step(self, action):
assert action in self.action_space
state = self.state
self.state = next_state = self._transition_fn(self.state, action)
reward = self._reward_fn(state, action, next_state).item()
done = self._terminal(next_state)
return self._get_obs(next_state), reward, done, self._get_info()
def _get_info(self):
# pylint:disable=unbalanced-tuple-unpacking
setpoint, shift, domain, system_response, phi = self.state.tolist()
return {
"setpoint": setpoint,
"shift": shift,
"domain": domain,
"system_response": system_response,
"phi": phi,
}
def _transition_fn(self, state, action):
# pylint:disable=unbalanced-tuple-unpacking
setpoint, shift, domain, system_response, phi = np.split(state, 5, axis=-1)
shift = self._apply_action(action, shift)
domain, system_response, phi = self._dynamics.transition(
setpoint, shift, domain, system_response, phi
)
return np.concatenate([setpoint, shift, domain, system_response, phi], axis=-1)
def _apply_action(self, action, shift):
"""Apply Equation (4)."""
return np.clip(shift + action * self.action_scale, 0, 100)
def _reward_fn(self, state, action, next_state):
# pylint:disable=unused-argument
setpoint, shift = next_state[..., 0], next_state[..., 1]
phi = next_state[..., -1]
return -self._dynamics.penalty(setpoint, shift, phi)
@staticmethod
def _terminal(_):
return False
@staticmethod
def _get_obs(state):
return state[..., :2].astype(np.float32)
|
[
"angelolovatto@gmail.com"
] |
angelolovatto@gmail.com
|
c4c491ed8e7618ac8434622400d945a7985bc72b
|
64308a3bf96fa8b657fc7a975722dcf50afba473
|
/PSGUIAD/3/07.Layout.py
|
9c3f8545a097b7188a0e20e4c92d99b59adfd6c0
|
[
"MIT"
] |
permissive
|
j2doll/Learning-Qt-for-Python
|
c7b6aec5685bc1e922cae2b5ba52c962974474bb
|
eb737578792e4678ab98e100e6715d52315470a3
|
refs/heads/master
| 2020-03-28T01:18:43.574675
| 2018-09-14T06:50:19
| 2018-09-14T06:50:19
| 147,495,363
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,381
|
py
|
# 07.Layout.py
# Import required modules
import sys, time
from PySide2.QtGui import *
from PySide2.QtWidgets import *
# Our main window class
class MainWindow(QWidget):
def __init__(self): # Constructor function
super(MainWindow,self).__init__()
self.initGUI()
def initGUI(self):
self.setWindowTitle("Horizontal Layout")
self.setGeometry(300, 250, 400, 300)
self.SetLayout()
self.show()
def SetLayout(self): # Add Buttons and set the layout
horizontalLayout = QHBoxLayout(self)
hButton1 = QPushButton('Button 1', self)
hButton2 = QPushButton('Button 2', self)
hButton3 = QPushButton('Button 3', self)
hButton4 = QPushButton('Button 4', self)
horizontalLayout.addWidget(hButton1)
horizontalLayout.addWidget(hButton2)
horizontalLayout.addStretch()
horizontalLayout.addWidget(hButton3)
horizontalLayout.addWidget(hButton4)
self.setLayout(horizontalLayout)
if __name__ == '__main__':
# Exception Handling
try:
myApp = QApplication(sys.argv)
mainWindow = MainWindow()
myApp.exec_()
sys.exit(0)
except NameError:
print("Name Error:", sys.exc_info()[1])
except SystemExit:
print("Closing Window...")
except Exception:
print(sys.exc_info()[1])
|
[
"j2doll@gmail.com"
] |
j2doll@gmail.com
|
641de27a12d13f7a27177c37c506b418498a872d
|
ff8338671354a79c5248c074952ad9efbde86a0c
|
/shop_api/settings.py
|
d5399256420471bdcb5951e8d61141ab9d07b429
|
[] |
no_license
|
suman419/shopping
|
77e0f072b5ad47a1e1e03372e3e1120045e946b8
|
7daa2a411153057d31ae48e52dd6942faf2a1fb3
|
refs/heads/master
| 2022-11-30T06:08:37.761715
| 2020-08-18T14:34:53
| 2020-08-18T14:34:53
| 288,478,225
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,292
|
py
|
"""
Django settings for shop_api project.
Generated by 'django-admin startproject' using Django 3.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent
TEMPLATE_DIR=os.path.join(BASE_DIR,'templates')
STATIC_DIR=os.path.join(BASE_DIR,'static')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'xqf=@8soj6!bmn22tm_7qkt=_w#%uob06yv#7$%()$p3bge9=6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'shop_api.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'shop_api.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS=[STATIC_DIR]
MEDIA_ROOT=os.path.join(BASE_DIR,'media')
MEDIA_URL='/media/'
|
[
"shekhar.shs@gmail.com"
] |
shekhar.shs@gmail.com
|
94efa92bb11a525db0265179b423c1ebf52f1242
|
18f631727089d570d6e6cd08169d302510b8eeae
|
/py/贪吃蛇.py
|
724fc84b006cef4f6f7a992e88f42bbfa72ee8c7
|
[] |
no_license
|
xXxrAinZyian/homepage
|
8b3e5cc00767b194560b44013d03d235def8fbd0
|
fae51850d554d7344be211a5a3d4498ca744accb
|
refs/heads/master
| 2022-11-24T14:54:15.208737
| 2020-07-09T21:45:45
| 2020-07-09T21:45:45
| 278,477,472
| 2
| 0
| null | 2020-07-09T21:45:16
| 2020-07-09T21:45:15
| null |
UTF-8
|
Python
| false
| false
| 4,594
|
py
|
import pygame
import sys
import random
# 全局定义
SCREEN_X = 600
SCREEN_Y = 600
# 蛇类
# 点以25为单位
class Snake(object):
# 初始化各种需要的属性 [开始时默认向右/身体块x5]
def __init__(self):
self.dirction = pygame.K_RIGHT
self.body = []
for x in range(5):
self.addnode()
# 无论何时 都在前端增加蛇块
def addnode(self):
left,top = (0,0)
if self.body:
left,top = (self.body[0].left,self.body[0].top)
node = pygame.Rect(left,top,25,25)
if self.dirction == pygame.K_LEFT:
node.left -= 25
elif self.dirction == pygame.K_RIGHT:
node.left += 25
elif self.dirction == pygame.K_UP:
node.top -= 25
elif self.dirction == pygame.K_DOWN:
node.top += 25
self.body.insert(0,node)
# 删除最后一个块
def delnode(self):
self.body.pop()
# 死亡判断
def isdead(self):
# 撞墙
if self.body[0].x not in range(SCREEN_X):
return True
if self.body[0].y not in range(SCREEN_Y):
return True
# 撞自己
if self.body[0] in self.body[1:]:
return True
return False
# 移动!
def move(self):
self.addnode()
self.delnode()
# 改变方向 但是左右、上下不能被逆向改变
def changedirection(self,curkey):
LR = [pygame.K_LEFT,pygame.K_RIGHT]
UD = [pygame.K_UP,pygame.K_DOWN]
if curkey in LR+UD:
if (curkey in LR) and (self.dirction in LR):
return
if (curkey in UD) and (self.dirction in UD):
return
self.dirction = curkey
# 食物类
# 方法: 放置/移除
# 点以25为单位
class Food:
def __init__(self):
self.rect = pygame.Rect(-25,0,25,25)
def remove(self):
self.rect.x=-25
def set(self):
if self.rect.x == -25:
allpos = []
# 不靠墙太近 25 ~ SCREEN_X-25 之间
for pos in range(25,SCREEN_X-25,25):
allpos.append(pos)
self.rect.left = random.choice(allpos)
self.rect.top = random.choice(allpos)
print(self.rect)
def show_text(screen, pos, text, color, font_bold = False, font_size = 60, font_italic = False):
#获取系统字体,并设置文字大小
cur_font = pygame.font.SysFont("宋体", font_size)
#设置是否加粗属性
cur_font.set_bold(font_bold)
#设置是否斜体属性
cur_font.set_italic(font_italic)
#设置文字内容
text_fmt = cur_font.render(text, 1, color)
#绘制文字
screen.blit(text_fmt, pos)
def main():
pygame.init()
screen_size = (SCREEN_X,SCREEN_Y)
screen = pygame.display.set_mode(screen_size)
pygame.display.set_caption('Snake')
clock = pygame.time.Clock()
scores = 0
isdead = False
# 蛇/食物
snake = Snake()
food = Food()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.KEYDOWN:
snake.changedirection(event.key)
# 死后按space重新
if event.key == pygame.K_SPACE and isdead:
return main()
screen.fill((255,255,255))
# 画蛇身 / 每一步+1分
if not isdead:
scores+=1
snake.move()
for rect in snake.body:
pygame.draw.rect(screen,(20,220,39),rect,0)
# 显示死亡文字
isdead = snake.isdead()
if isdead:
show_text(screen,(100,200),'YOU DEAD!',(227,29,18),False,100)
show_text(screen,(150,260),'press space to try again...',(0,0,22),False,30)
# 食物处理 / 吃到+50分
# 当食物rect与蛇头重合,吃掉 -> Snake增加一个Node
if food.rect == snake.body[0]:
scores+=50
food.remove()
snake.addnode()
# 食物投递
food.set()
pygame.draw.rect(screen,(136,0,21),food.rect,0)
# 显示分数文字
show_text(screen,(50,500),'Scores: '+str(scores),(223,223,223))
pygame.display.update()
clock.tick(10)
if __name__ == '__main__':
main()
|
[
""
] | |
ca9c60d7b6abbe04a2885666d3c37d52e4b28429
|
d2cefe98bc2fb4ad6109890addded90b9b114173
|
/autodoc.py
|
f94690625f9710a21263e5a86d6c2c3be7f586ec
|
[
"MIT"
] |
permissive
|
GabeHart17/javadoc_tools
|
158ff53aa0a9252c36fa189bbcdf246de7f027c1
|
8916142fd699591c52a6a7ca7c0f5bde891cb2b9
|
refs/heads/master
| 2020-09-29T01:01:43.653403
| 2019-12-09T15:51:00
| 2019-12-09T15:51:00
| 226,908,588
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,030
|
py
|
"""
WARNING: use on text other than syntactically correct java may result in
undefined behavior.
"""
import sys
import os
from enum import Enum
class Scopes(Enum):
GLOBAL = 0
CLASS = 1
FUNCTION = 2
STATIC = 3
def find_functions(text):
functions = [] # (start_index, end_index)
curly_stack = 0
last_transition = 0 # semicolon or scope change
scope = Scopes.GLOBAL
in_string = False
escaped = False
in_line_comment = False
in_block_comment = False
current_function = []
for i in range(len(text)):
c = text[i]
prev = text[i - 1] if i > 0 else ''
if in_line_comment:
if c == '\n':
in_line_comment = False
elif in_block_comment:
if c == '/' and prev == '*':
in_block_comment = False
elif in_string:
if c == '\"' and not escaped:
in_string = False
elif c == '\\' and not escaped:
escaped = True
else:
escaped = False
elif prev == '/':
if c == '/':
in_line_comment = True
elif c == '*':
in_block_comment = True
elif scope == Scopes.GLOBAL:
if c == '{':
scope = Scopes.CLASS
elif scope == Scopes.STATIC:
if c == '{':
curly_stack = max(0, curly_stack)
curly_stack += 1
elif c == '}':
curly_stack -= 1
if curly_stack == 0:
scope = Scopes.CLASS
elif scope == Scopes.CLASS:
s = text[i:].split(maxsplit=1)
if c == '}':
scope = Scopes.GLOBAL
elif s[0] == 'static' and s[1].startswith('{'):
scope = Scopes.STATIC
curly_stack = -1
elif c == '{':
scope = Scopes.FUNCTION
current_function.append(last_transition + 1)
curly_stack += 1
elif scope == Scopes.FUNCTION:
if c == '{':
curly_stack += 1
elif c == '}':
curly_stack -= 1
if curly_stack == 0:
current_function.append(i + 1)
functions.append(tuple(current_function))
current_function = []
scope = Scopes.CLASS
if not (in_string or in_block_comment or in_line_comment) and c in '{};\n':
last_transition = i
return functions
def doc_function(fn):
raw_params = fn.split('(', maxsplit=1)[1].split(')', maxsplit=1)[0].split(',')
params = []
if '' not in raw_params:
params = [i.split()[-1].strip() for i in raw_params]
throws = []
returns = []
in_string = False
escaped = False
in_line_comment = False
in_block_comment = False
for i in range(len(fn)):
c = fn[i]
prev = fn[i - 1] if i > 0 else ''
if in_line_comment:
if c == '\n':
in_line_comment = False
elif in_block_comment:
if c == '/' and prev == '*':
in_block_comment = False
elif in_string:
if c == '\"' and not escaped:
in_string = False
elif c == '\\' and not escaped:
escaped = True
else:
escaped = False
elif prev == '/':
if c == '/':
in_line_comment = True
elif c == '*':
in_block_comment = True
elif fn[i:].startswith('return'):
ret_line = fn[i:].split(';', maxsplit=1)[0]
returns.append(ret_line.split(maxsplit=1)[1].strip())
elif fn[i:].startswith('throw'):
thr_line = fn[i:].split(';', maxsplit=1)[0].split(maxsplit=1)[1].strip()
if thr_line.split(maxsplit=1)[0].strip() == 'new':
thr_line = thr_line.split(maxsplit=1)[1].strip()
throws.append(thr_line.split('(', maxsplit=1)[0].strip())
doc = '/**\n*'
for i in params:
doc += f'\n* @param {i}'
for i in throws:
doc += f'\n* @throws {i}'
for i in returns:
doc += f'\n* @return {i}'
doc += '\n*/'
return doc
def main():
if len(sys.argv) < 2:
print('specify file as first arg')
sys.exit()
with open(sys.argv[1]) as in_file:
text = in_file.read()
fns = find_functions(text)
fns.reverse()
for f in fns:
d = doc_function(text[f[0]: f[1]])
text = text[:f[0]] + f'\n{d}\n' + text[f[0]:]
out_name = sys.argv[1]
try:
os.mkdir(os.path.join(os.path.dirname(sys.argv[1]), 'autodoc'))
except FileExistsError:
print('using existing autodoc folder')
out_name = os.path.join(os.path.dirname(sys.argv[1]), 'autodoc', os.path.basename(sys.argv[1]))
out_file = open(''.join(out_name), 'w')
out_file.write(text)
out_file.close()
print('done')
if __name__ == '__main__':
main()
|
[
"flyingmessier@gmail.com"
] |
flyingmessier@gmail.com
|
0c0e031cb5e48ab4f33a7b4e5217e5b550c80076
|
602093f6e86e84486c3be5da96ab06529cddc5d9
|
/51-100/080Remove Duplicates from Sorted Array II/Remove Duplicates from Sorted Array II.py
|
3402a596ce552ed540dace681ca418313d9be470
|
[] |
no_license
|
binzhikuwen/leetcode
|
8ac325787bd629b6e18d862f1d0cd17b57c3ca43
|
ca3422ca8ccaf7e40044dcfaeebe903c2a630baa
|
refs/heads/master
| 2021-01-17T09:32:02.774590
| 2016-03-21T10:19:40
| 2016-03-21T10:19:40
| 40,165,623
| 0
| 1
| null | 2016-03-21T10:19:41
| 2015-08-04T05:43:52
|
C++
|
UTF-8
|
Python
| false
| false
| 813
|
py
|
#!/usr/bin/env python
#-#-coding:utf-8-#-
# Source : https://oj.leetcode.com/problems/remove-duplicates-from-sorted-array-ii/
# Author : bin bin
# Date : 2016-01-26
#
# Follow up for "Remove Duplicates":
# What if duplicates are allowed at most twice?
#
# For example,
# Given sorted array A = [1,1,1,2,2,3],
#
# Your function should return length = 5, and A is now [1,1,2,2,3].
#
#
class Solution:
def removeDuplicates(self, nums):
if not(len(nums)):
return 0
tmp = nums[0]
res = 1
dup = 0
for i in range(1,len(nums)):
if tmp == nums[i]:
dup += 1
if dup <= 1:
nums[res] = nums[i]
res += 1
else:
nums[res] = nums[i]
tmp = nums[i]
dup = 0
res += 1
return res
A = [1,1,1,2,2,3]
s = Solution()
print s.removeDuplicates(A)
|
[
"602221794@qq.com"
] |
602221794@qq.com
|
9eee11420e8ec0c8b816c403ea9fe3bf54758963
|
2b22a6fbfbe2575077a369205dba4f7af3cd7d99
|
/pythonweb/rasiberryPiWebManager/middlewares.py
|
e37d7d784724cf41fe2d9de4b8c92ca3010ed830
|
[
"Apache-2.0"
] |
permissive
|
onwebbe/rasiberryPiWebManager
|
dceef67995d94786a3bf01f10b6376f75b321055
|
14ff9f14f3f873457666fa1669fae715148538c9
|
refs/heads/master
| 2022-12-22T05:28:20.800326
| 2019-09-21T15:04:44
| 2019-09-21T15:04:44
| 207,604,705
| 0
| 0
|
Apache-2.0
| 2022-12-10T15:49:54
| 2019-09-10T16:07:41
|
Vue
|
UTF-8
|
Python
| false
| false
| 217
|
py
|
from django.utils.deprecation import MiddlewareMixin
class CrossDomain(MiddlewareMixin):
def process_response(self, request, response):
response['Access-Control-Allow-Origi'] = '*'
return response
|
[
"tai.wu@sap.com"
] |
tai.wu@sap.com
|
644712612d565cc5c61874da7590ea925ce60508
|
cdb29a347d67eb80f3deb09b685ea1e82ae47e7d
|
/progs/bottomUpTraversalBT.py
|
38581d957e4b49c8360d03e0756d9c64c8e45c7a
|
[] |
no_license
|
ishankkm/pythonProgs
|
f41e82c86591f4078c4c1317ecb4829087961c76
|
be98ba8e50cc7844d519a5ae5b6e4d9901e175ca
|
refs/heads/master
| 2021-01-24T16:48:50.161323
| 2018-09-08T23:30:19
| 2018-09-08T23:30:19
| 123,213,632
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,078
|
py
|
'''
Created on Jan 24, 2018
@author: ishank
'''
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def levelOrderBottom(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
nodeStack = [root]
result = [[root.val]]
while len(nodeStack) > 0:
nodes = nodeStack.pop()
tempRes = []
tempNS = []
for node in nodes:
if node.left != None:
tempNS.append(node.left)
tempRes.append(node.left.val)
if node.right != None:
tempNS.append(node.right)
tempRes.append(node.right.val)
if len(tempNS) > 0:
result.append(tempRes)
nodeStack.append(tempNS)
return result[::-1]
|
[
"imishra@usc.edu"
] |
imishra@usc.edu
|
2bc6787c3b09b46b564b247b648c7813eb210f37
|
86dd1f9b1133fa5efb1e1134747e8df2184dc530
|
/pacman.py
|
b2c09cc858212c245c08f3125d3caf063d322b3d
|
[] |
no_license
|
tarun6285/Pacman-Search-Algorithms
|
8f34f5332f114cee8c3c7ef2a75353dcc8d82166
|
ebd834ddcf2bf9c33395f4686f01f67fdb6ceef6
|
refs/heads/master
| 2020-12-13T22:54:19.429695
| 2020-01-17T13:53:56
| 2020-01-17T13:53:56
| 234,555,349
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 26,190
|
py
|
# pacman.py
# ---------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
"""
Pacman.py holds the logic for the classic pacman game along with the main
code to run a game. This file is divided into three sections:
(i) Your interface to the pacman world:
Pacman is a complex environment. You probably don't want to
read through all of the code we wrote to make the game runs
correctly. This section contains the parts of the code
that you will need to understand in order to complete the
project. There is also some code in game.py that you should
understand.
(ii) The hidden secrets of pacman:
This section contains all of the logic code that the pacman
environment uses to decide who can move where, who dies when
things collide, etc. You shouldn't need to read this section
of code, but you can if you want.
(iii) Framework to start a game:
The final section contains the code for reading the command
you use to set up the game, then starting up a new game, along with
linking in all the external parts (agent functions, graphics).
Check this section out to see all the options available to you.
To play your first game, type 'python pacman.py' from the command line.
The keys are 'a', 's', 'd', and 'w' to move (or arrow keys). Have fun!
"""
from game import GameStateData
from game import Game
from game import Directions
from game import Actions
from util import nearestPoint
from util import manhattanDistance
import util, layout
import sys, types, time, random, os
###################################################
# YOUR INTERFACE TO THE PACMAN WORLD: A GameState #
###################################################
class GameState:
"""
A GameState specifies the full game state, including the food, capsules,
agent configurations and score changes.
GameStates are used by the Game object to capture the actual state of the game and
can be used by agents to reason about the game.
Much of the information in a GameState is stored in a GameStateData object. We
strongly suggest that you access that data via the accessor methods below rather
than referring to the GameStateData object directly.
Note that in classic Pacman, Pacman is always agent 0.
"""
####################################################
# Accessor methods: use these to access state data #
####################################################
# static variable keeps track of which states have had getLegalActions called
explored = set()
def getAndResetExplored():
tmp = GameState.explored.copy()
GameState.explored = set()
return tmp
getAndResetExplored = staticmethod(getAndResetExplored)
def getLegalActions(self, agentIndex=0):
"""
Returns the legal actions for the agent specified.
"""
# GameState.explored.add(self)
if self.isWin() or self.isLose(): return []
if agentIndex == 0: # Pacman is moving
return PacmanRules.getLegalActions(self)
else:
return GhostRules.getLegalActions(self, agentIndex)
def generateSuccessor(self, agentIndex, action):
"""
Returns the successor state after the specified agent takes the action.
"""
# Check that successors exist
if self.isWin() or self.isLose(): raise Exception('Can\'t generate a successor of a terminal state.')
# Copy current state
state = GameState(self)
# Let agent's logic deal with its action's effects on the board
if agentIndex == 0: # Pacman is moving
state.data._eaten = [False for i in range(state.getNumAgents())]
PacmanRules.applyAction(state, action)
else: # A ghost is moving
GhostRules.applyAction(state, action, agentIndex)
# Time passes
if agentIndex == 0:
state.data.scoreChange += -TIME_PENALTY # Penalty for waiting around
else:
GhostRules.decrementTimer(state.data.agentStates[agentIndex])
# Resolve multi-agent effects
GhostRules.checkDeath(state, agentIndex)
# Book keeping
state.data._agentMoved = agentIndex
state.data.score += state.data.scoreChange
GameState.explored.add(self)
GameState.explored.add(state)
return state
def getLegalPacmanActions(self):
return self.getLegalActions(0)
def generatePacmanSuccessor(self, action):
"""
Generates the successor state after the specified pacman move
"""
return self.generateSuccessor(0, action)
def getPacmanState(self):
"""
Returns an AgentState object for pacman (in game.py)
state.pos gives the current position
state.direction gives the travel vector
"""
return self.data.agentStates[0].copy()
def getPacmanPosition(self):
return self.data.agentStates[0].getPosition()
def getGhostStates(self):
return self.data.agentStates[1:]
def getGhostState(self, agentIndex):
if agentIndex == 0 or agentIndex >= self.getNumAgents():
raise Exception("Invalid index passed to getGhostState")
return self.data.agentStates[agentIndex]
def getGhostPosition(self, agentIndex):
if agentIndex == 0:
raise Exception("Pacman's index passed to getGhostPosition")
return self.data.agentStates[agentIndex].getPosition()
def getGhostPositions(self):
return [s.getPosition() for s in self.getGhostStates()]
def getNumAgents(self):
return len(self.data.agentStates)
def getScore(self):
return float(self.data.score)
def getCapsules(self):
"""
Returns a list of positions (x,y) of the remaining capsules.
"""
return self.data.capsules
def getNumFood(self):
return self.data.food.count()
def getFood(self):
"""
Returns a Grid of boolean food indicator variables.
Grids can be accessed via list notation, so to check
if there is food at (x,y), just call
currentFood = state.getFood()
if currentFood[x][y] == True: ...
"""
return self.data.food
def getWalls(self):
"""
Returns a Grid of boolean wall indicator variables.
Grids can be accessed via list notation, so to check
if there is a wall at (x,y), just call
walls = state.getWalls()
if walls[x][y] == True: ...
"""
return self.data.layout.walls
def hasFood(self, x, y):
return self.data.food[x][y]
def hasWall(self, x, y):
return self.data.layout.walls[x][y]
def isLose(self):
return self.data._lose
def isWin(self):
return self.data._win
#############################################
# Helper methods: #
# You shouldn't need to call these directly #
#############################################
def __init__(self, prevState=None):
"""
Generates a new state by copying information from its predecessor.
"""
if prevState != None: # Initial state
self.data = GameStateData(prevState.data)
else:
self.data = GameStateData()
def deepCopy(self):
state = GameState(self)
state.data = self.data.deepCopy()
return state
def __eq__(self, other):
"""
Allows two states to be compared.
"""
return hasattr(other, 'data') and self.data == other.data
def __hash__(self):
"""
Allows states to be keys of dictionaries.
"""
return hash(self.data)
def __str__(self):
return str(self.data)
def initialize(self, layout, numGhostAgents=1000):
"""
Creates an initial game state from a layout array (see layout.py).
"""
self.data.initialize(layout, numGhostAgents)
############################################################################
# THE HIDDEN SECRETS OF PACMAN #
# #
# You shouldn't need to look through the code in this section of the file. #
############################################################################
SCARED_TIME = 40 # Moves ghosts are scared
COLLISION_TOLERANCE = 0.7 # How close ghosts must be to Pacman to kill
TIME_PENALTY = 1 # Number of points lost each round
class ClassicGameRules:
"""
These game rules manage the control flow of a game, deciding when
and how the game starts and ends.
"""
def __init__(self, timeout=30):
self.timeout = timeout
def newGame(self, layout, pacmanAgent, ghostAgents, display, quiet=False, catchExceptions=False):
agents = [pacmanAgent] + ghostAgents[:layout.getNumGhosts()]
initState = GameState()
initState.initialize(layout, len(ghostAgents))
game = Game(agents, display, self, catchExceptions=catchExceptions)
game.state = initState
self.initialState = initState.deepCopy()
self.quiet = quiet
return game
def process(self, state, game):
"""
Checks to see whether it is time to end the game.
"""
if state.isWin(): self.win(state, game)
if state.isLose(): self.lose(state, game)
def win(self, state, game):
if not self.quiet: print "Pacman emerges victorious! Score: %d" % state.data.score
game.gameOver = True
def lose(self, state, game):
if not self.quiet: print "Pacman died! Score: %d" % state.data.score
game.gameOver = True
def getProgress(self, game):
return float(game.state.getNumFood()) / self.initialState.getNumFood()
def agentCrash(self, game, agentIndex):
if agentIndex == 0:
print "Pacman crashed"
else:
print "A ghost crashed"
def getMaxTotalTime(self, agentIndex):
return self.timeout
def getMaxStartupTime(self, agentIndex):
return self.timeout
def getMoveWarningTime(self, agentIndex):
return self.timeout
def getMoveTimeout(self, agentIndex):
return self.timeout
def getMaxTimeWarnings(self, agentIndex):
return 0
class PacmanRules:
"""
These functions govern how pacman interacts with his environment under
the classic game rules.
"""
PACMAN_SPEED = 1
def getLegalActions(state):
"""
Returns a list of possible actions.
"""
return Actions.getPossibleActions(state.getPacmanState().configuration, state.data.layout.walls)
getLegalActions = staticmethod(getLegalActions)
def applyAction(state, action):
"""
Edits the state to reflect the results of the action.
"""
legal = PacmanRules.getLegalActions(state)
if action not in legal:
raise Exception("Illegal action " + str(action))
pacmanState = state.data.agentStates[0]
# Update Configuration
vector = Actions.directionToVector(action, PacmanRules.PACMAN_SPEED)
pacmanState.configuration = pacmanState.configuration.generateSuccessor(vector)
# Eat
next = pacmanState.configuration.getPosition()
nearest = nearestPoint(next)
if manhattanDistance(nearest, next) <= 0.5:
# Remove food
PacmanRules.consume(nearest, state)
applyAction = staticmethod(applyAction)
def consume(position, state):
x, y = position
# Eat food
if state.data.food[x][y]:
state.data.scoreChange += 10
state.data.food = state.data.food.copy()
state.data.food[x][y] = False
state.data._foodEaten = position
# TODO: cache numFood?
numFood = state.getNumFood()
if numFood == 0 and not state.data._lose:
state.data.scoreChange += 500
state.data._win = True
# Eat capsule
if (position in state.getCapsules()):
state.data.capsules.remove(position)
state.data._capsuleEaten = position
# Reset all ghosts' scared timers
for index in range(1, len(state.data.agentStates)):
state.data.agentStates[index].scaredTimer = SCARED_TIME
consume = staticmethod(consume)
class GhostRules:
"""
These functions dictate how ghosts interact with their environment.
"""
GHOST_SPEED = 1.0
def getLegalActions(state, ghostIndex):
"""
Ghosts cannot stop, and cannot turn around unless they
reach a dead end, but can turn 90 degrees at intersections.
"""
conf = state.getGhostState(ghostIndex).configuration
possibleActions = Actions.getPossibleActions(conf, state.data.layout.walls)
reverse = Actions.reverseDirection(conf.direction)
if Directions.STOP in possibleActions:
possibleActions.remove(Directions.STOP)
if reverse in possibleActions and len(possibleActions) > 1:
possibleActions.remove(reverse)
return possibleActions
getLegalActions = staticmethod(getLegalActions)
def applyAction(state, action, ghostIndex):
legal = GhostRules.getLegalActions(state, ghostIndex)
if action not in legal:
raise Exception("Illegal ghost action " + str(action))
ghostState = state.data.agentStates[ghostIndex]
speed = GhostRules.GHOST_SPEED
if ghostState.scaredTimer > 0: speed /= 2.0
vector = Actions.directionToVector(action, speed)
ghostState.configuration = ghostState.configuration.generateSuccessor(vector)
applyAction = staticmethod(applyAction)
def decrementTimer(ghostState):
timer = ghostState.scaredTimer
if timer == 1:
ghostState.configuration.pos = nearestPoint(ghostState.configuration.pos)
ghostState.scaredTimer = max(0, timer - 1)
decrementTimer = staticmethod(decrementTimer)
def checkDeath(state, agentIndex):
pacmanPosition = state.getPacmanPosition()
if agentIndex == 0: # Pacman just moved; Anyone can kill him
for index in range(1, len(state.data.agentStates)):
ghostState = state.data.agentStates[index]
ghostPosition = ghostState.configuration.getPosition()
if GhostRules.canKill(pacmanPosition, ghostPosition):
GhostRules.collide(state, ghostState, index)
else:
ghostState = state.data.agentStates[agentIndex]
ghostPosition = ghostState.configuration.getPosition()
if GhostRules.canKill(pacmanPosition, ghostPosition):
GhostRules.collide(state, ghostState, agentIndex)
checkDeath = staticmethod(checkDeath)
def collide(state, ghostState, agentIndex):
if ghostState.scaredTimer > 0:
state.data.scoreChange += 200
GhostRules.placeGhost(state, ghostState)
ghostState.scaredTimer = 0
# Added for first-person
state.data._eaten[agentIndex] = True
else:
if not state.data._win:
state.data.scoreChange -= 500
state.data._lose = True
collide = staticmethod(collide)
def canKill(pacmanPosition, ghostPosition):
return manhattanDistance(ghostPosition, pacmanPosition) <= COLLISION_TOLERANCE
canKill = staticmethod(canKill)
def placeGhost(state, ghostState):
ghostState.configuration = ghostState.start
placeGhost = staticmethod(placeGhost)
#############################
# FRAMEWORK TO START A GAME #
#############################
def default(str):
return str + ' [Default: %default]'
def parseAgentArgs(str):
if str == None: return {}
pieces = str.split(',')
opts = {}
for p in pieces:
if '=' in p:
key, val = p.split('=')
else:
key, val = p, 1
opts[key] = val
return opts
def readCommand(argv):
"""
Processes the command used to run pacman from the command line.
"""
from optparse import OptionParser
usageStr = """
USAGE: python pacman.py <options>
EXAMPLES: (1) python pacman.py
- starts an interactive game
(2) python pacman.py --layout smallClassic --zoom 2
OR python pacman.py -l smallClassic -z 2
- starts an interactive game on a smaller board, zoomed in
"""
parser = OptionParser(usageStr)
parser.add_option('-n', '--numGames', dest='numGames', type='int',
help=default('the number of GAMES to play'), metavar='GAMES', default=1)
parser.add_option('-l', '--layout', dest='layout',
help=default('the LAYOUT_FILE from which to load the map layout'),
metavar='LAYOUT_FILE', default='mediumClassic')
parser.add_option('-p', '--pacman', dest='pacman',
help=default('the agent TYPE in the pacmanAgents module to use'),
metavar='TYPE', default='KeyboardAgent')
parser.add_option('-t', '--textGraphics', action='store_true', dest='textGraphics',
help='Display output as text only', default=False)
parser.add_option('-q', '--quietTextGraphics', action='store_true', dest='quietGraphics',
help='Generate minimal output and no graphics', default=False)
parser.add_option('-g', '--ghosts', dest='ghost',
help=default('the ghost agent TYPE in the ghostAgents module to use'),
metavar='TYPE', default='RandomGhost')
parser.add_option('-k', '--numghosts', type='int', dest='numGhosts',
help=default('The maximum number of ghosts to use'), default=4)
parser.add_option('-z', '--zoom', type='float', dest='zoom',
help=default('Zoom the size of the graphics window'), default=1.0)
parser.add_option('-f', '--fixRandomSeed', action='store_true', dest='fixRandomSeed',
help='Fixes the random seed to always play the same game', default=False)
parser.add_option('-r', '--recordActions', action='store_true', dest='record',
help='Writes game histories to a file (named by the time they were played)', default=False)
parser.add_option('--replay', dest='gameToReplay',
help='A recorded game file (pickle) to replay', default=None)
parser.add_option('-a', '--agentArgs', dest='agentArgs',
help='Comma separated values sent to agent. e.g. "opt1=val1,opt2,opt3=val3"')
parser.add_option('-x', '--numTraining', dest='numTraining', type='int',
help=default('How many episodes are training (suppresses output)'), default=0)
parser.add_option('--frameTime', dest='frameTime', type='float',
help=default('Time to delay between frames; <0 means keyboard'), default=0.1)
parser.add_option('-c', '--catchExceptions', action='store_true', dest='catchExceptions',
help='Turns on exception handling and timeouts during games', default=False)
parser.add_option('--timeout', dest='timeout', type='int',
help=default('Maximum length of time an agent can spend computing in a single game'), default=30)
options, otherjunk = parser.parse_args(argv)
if len(otherjunk) != 0:
raise Exception('Command line input not understood: ' + str(otherjunk))
args = dict()
# Fix the random seed
if options.fixRandomSeed: random.seed('cs188')
# Choose a layout
args['layout'] = layout.getLayout(options.layout)
if args['layout'] == None: raise Exception("The layout " + options.layout + " cannot be found")
# Choose a Pacman agent
noKeyboard = options.gameToReplay == None and (options.textGraphics or options.quietGraphics)
pacmanType = loadAgent(options.pacman, noKeyboard)
agentOpts = parseAgentArgs(options.agentArgs)
if options.numTraining > 0:
args['numTraining'] = options.numTraining
if 'numTraining' not in agentOpts: agentOpts['numTraining'] = options.numTraining
pacman = pacmanType(**agentOpts) # Instantiate Pacman with agentArgs
args['pacman'] = pacman
# Don't display training games
if 'numTrain' in agentOpts:
options.numQuiet = int(agentOpts['numTrain'])
options.numIgnore = int(agentOpts['numTrain'])
# Choose a ghost agent
ghostType = loadAgent(options.ghost, noKeyboard)
args['ghosts'] = [ghostType(i + 1) for i in range(options.numGhosts)]
# Choose a display format
if options.quietGraphics:
import textDisplay
args['display'] = textDisplay.NullGraphics()
elif options.textGraphics:
import textDisplay
textDisplay.SLEEP_TIME = options.frameTime
args['display'] = textDisplay.PacmanGraphics()
else:
import graphicsDisplay
args['display'] = graphicsDisplay.PacmanGraphics(options.zoom, frameTime=options.frameTime)
args['numGames'] = options.numGames
args['record'] = options.record
args['catchExceptions'] = options.catchExceptions
args['timeout'] = options.timeout
# Special case: recorded games don't use the runGames method or args structure
if options.gameToReplay != None:
print 'Replaying recorded game %s.' % options.gameToReplay
import cPickle
f = open(options.gameToReplay)
try:
recorded = cPickle.load(f)
finally:
f.close()
recorded['display'] = args['display']
replayGame(**recorded)
sys.exit(0)
return args
def loadAgent(pacman, nographics):
# Looks through all pythonPath Directories for the right module,
pythonPathStr = os.path.expandvars("$PYTHONPATH")
if pythonPathStr.find(';') == -1:
pythonPathDirs = pythonPathStr.split(':')
else:
pythonPathDirs = pythonPathStr.split(';')
pythonPathDirs.append('.')
for moduleDir in pythonPathDirs:
if not os.path.isdir(moduleDir): continue
moduleNames = [f for f in os.listdir(moduleDir) if f.endswith('gents.py')]
for modulename in moduleNames:
try:
module = __import__(modulename[:-3])
except ImportError:
continue
if pacman in dir(module):
if nographics and modulename == 'keyboardAgents.py':
raise Exception('Using the keyboard requires graphics (not text display)')
return getattr(module, pacman)
raise Exception('The agent ' + pacman + ' is not specified in any *Agents.py.')
def replayGame(layout, actions, display):
import pacmanAgents, ghostAgents
rules = ClassicGameRules()
agents = [pacmanAgents.GreedyAgent()] + [ghostAgents.RandomGhost(i + 1) for i in range(layout.getNumGhosts())]
game = rules.newGame(layout, agents[0], agents[1:], display)
state = game.state
display.initialize(state.data)
for action in actions:
# Execute the action
state = state.generateSuccessor(*action)
# Change the display
display.update(state.data)
# Allow for game specific conditions (winning, losing, etc.)
rules.process(state, game)
display.finish()
def runGames(layout, pacman, ghosts, display, numGames, record, numTraining=0, catchExceptions=False, timeout=30):
import __main__
__main__.__dict__['_display'] = display
rules = ClassicGameRules(timeout)
games = []
for i in range(numGames):
beQuiet = i < numTraining
if beQuiet:
# Suppress output and graphics
import textDisplay
gameDisplay = textDisplay.NullGraphics()
rules.quiet = True
else:
gameDisplay = display
rules.quiet = False
game = rules.newGame(layout, pacman, ghosts, gameDisplay, beQuiet, catchExceptions)
game.run()
if not beQuiet: games.append(game)
if record:
import time, cPickle
fname = ('recorded-game-%d' % (i + 1)) + '-'.join([str(t) for t in time.localtime()[1:6]])
f = file(fname, 'w')
components = {'layout': layout, 'actions': game.moveHistory}
cPickle.dump(components, f)
f.close()
if (numGames - numTraining) > 0:
scores = [game.state.getScore() for game in games]
wins = [game.state.isWin() for game in games]
winRate = wins.count(True) / float(len(wins))
print 'Average Score:', sum(scores) / float(len(scores))
print 'Scores: ', ', '.join([str(score) for score in scores])
print 'Win Rate: %d/%d (%.2f)' % (wins.count(True), len(wins), winRate)
print 'Record: ', ', '.join([['Loss', 'Win'][int(w)] for w in wins])
return games
if __name__ == '__main__':
"""
The main function called when pacman.py is run
from the command line:
> python pacman.py
See the usage string for more details.
> python pacman.py --help
"""
args = readCommand(sys.argv[1:]) # Get game components based on input
runGames(**args)
# import cProfile
# cProfile.run("runGames( **args )")
pass
|
[
"noreply@github.com"
] |
tarun6285.noreply@github.com
|
e0380b4d0e1ad946e520a3d1d6a6d3e2d171f5d4
|
e7b7cc34f77c71e61aa0fa05bcc62f54fc2fc0e1
|
/Matrix/test_q048_rotate_image.py
|
d8eafa6c6400fcc9b88cb7ec50647bd9813d252c
|
[] |
no_license
|
sevenhe716/LeetCode
|
41d2ef18f5cb317858c9b69d00bcccb743cbdf48
|
4a1747b6497305f3821612d9c358a6795b1690da
|
refs/heads/master
| 2020-03-16T16:12:27.461172
| 2019-04-22T13:27:54
| 2019-04-22T13:27:54
| 130,221,784
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 893
|
py
|
import unittest
from Matrix.q048_rotate_image import Solution
class TestRotateImage(unittest.TestCase):
"""Test q048_rotate_image.py"""
def test_rotate_image(self):
s = Solution()
matrix = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
s.rotate(matrix)
self.assertEqual([
[7, 4, 1],
[8, 5, 2],
[9, 6, 3]
], matrix)
matrix = [
[5, 1, 9, 11],
[2, 4, 8, 10],
[13, 3, 6, 7],
[15, 14, 12, 16]
]
s.rotate(matrix)
self.assertEqual([
[15, 13, 2, 5],
[14, 3, 4, 1],
[12, 6, 8, 9],
[16, 7, 10, 11]
], matrix)
matrix = []
s.rotate(matrix)
self.assertEqual([], matrix)
if __name__ == '__main__':
unittest.main()
|
[
"429134862@qq.com"
] |
429134862@qq.com
|
dad4974a14774cac8161e28685a6a1db9e4d89e7
|
fe1353329f5eec4498360523d72cbaa2656b802e
|
/StereoSfMLearner_master/data/kitti/kitti_raw_loader.py
|
a03ab5329f6ff4c1f14c71490458020392c63bd8
|
[
"MIT"
] |
permissive
|
MaxHuerlimann/3D-Vision
|
c6937038a07cf196ffb0a664af9d7cdfdb3f8dd4
|
e7384d0f11c6f015fc12f10061d808ff313096a0
|
refs/heads/master
| 2021-04-06T01:40:44.201698
| 2018-06-18T21:10:57
| 2018-06-18T21:10:57
| 124,376,620
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,343
|
py
|
from __future__ import division
import numpy as np
from glob import glob
import os
import scipy.misc
class kitti_raw_loader(object):
def __init__(self,
dataset_dir,
split,
img_height=256,
img_width=256,
seq_length=5):
dir_path = os.path.dirname(os.path.realpath(__file__))
static_frames_file = dir_path + '\\static_frames.txt'
test_scene_file = dir_path + '\\test_scenes_' + split + '.txt'
with open(test_scene_file, 'r') as f:
test_scenes = f.readlines()
self.test_scenes = [t[:-1] for t in test_scenes]
self.dataset_dir = dataset_dir
self.img_height = img_height
self.img_width = img_width
self.seq_length = seq_length
self.cam_ids = ['02', '03']
self.date_list = ['2011_09_26', '2011_09_28', '2011_09_29',
'2011_09_30', '2011_10_03']
self.collect_static_frames(static_frames_file)
self.collect_train_frames()
def collect_static_frames(self, static_frames_file):
with open(static_frames_file, 'r') as f:
frames = f.readlines()
self.static_frames = []
for fr in frames:
if fr == '\n':
continue
date, drive, frame_id = fr.split(' ')
curr_fid = '%.10d' % (np.int(frame_id[:-1]))
for cid in self.cam_ids:
self.static_frames.append(drive + ' ' + cid + ' ' + curr_fid)
def collect_train_frames(self):
all_frames = []
for date in self.date_list:
drive_set = os.listdir(self.dataset_dir + date + '\\')
for dr in drive_set:
drive_dir = os.path.join(self.dataset_dir, date, dr)
if os.path.isdir(drive_dir):
if dr[:-5] in self.test_scenes:
continue
for cam in self.cam_ids:
img_dir = os.path.join(drive_dir, 'image_' + cam, 'data')
N = len(glob(img_dir + '\\*.png'))
for n in range(N):
frame_id = '%.10d' % n
all_frames.append(dr + ' ' + cam + ' ' + frame_id)
for s in self.static_frames:
try:
all_frames.remove(s)
# print('removed static frame from training: %s' % s)
except:
pass
self.train_frames = all_frames
self.num_train = len(self.train_frames)
def is_valid_sample(self, frames, tgt_idx):
N = len(frames)
tgt_drive, cid, _ = frames[tgt_idx].split(' ')
half_offset = int((self.seq_length - 1)/2)
min_src_idx = tgt_idx - half_offset
max_src_idx = tgt_idx + half_offset
if min_src_idx < 0 or max_src_idx >= N:
return False
min_src_drive, min_src_cid, _ = frames[min_src_idx].split(' ')
max_src_drive, max_src_cid, _ = frames[max_src_idx].split(' ')
if tgt_drive == min_src_drive and tgt_drive == max_src_drive and cid == min_src_cid and cid == max_src_cid:
return True
return False
def get_train_example_with_idx(self, tgt_idx):
if not self.is_valid_sample(self.train_frames, tgt_idx):
return False
example = self.load_example(self.train_frames, tgt_idx)
return example
def load_image_sequence(self, frames, tgt_idx, seq_length):
half_offset = int((seq_length - 1)/2)
image_seq = []
for o in range(-half_offset, half_offset + 1):
curr_idx = tgt_idx + o
curr_drive, curr_cid, curr_frame_id = frames[curr_idx].split(' ')
curr_img = self.load_image_raw(curr_drive, curr_cid, curr_frame_id)
if o == 0:
zoom_y = self.img_height/curr_img.shape[0]
zoom_x = self.img_width/curr_img.shape[1]
curr_img = scipy.misc.imresize(curr_img, (self.img_height, self.img_width))
image_seq.append(curr_img)
return image_seq, zoom_x, zoom_y
def load_example(self, frames, tgt_idx):
image_seq, zoom_x, zoom_y = self.load_image_sequence(frames, tgt_idx, self.seq_length)
tgt_drive, tgt_cid, tgt_frame_id = frames[tgt_idx].split(' ')
intrinsics = self.load_intrinsics_raw(tgt_drive, tgt_cid, tgt_frame_id)
intrinsics = self.scale_intrinsics(intrinsics, zoom_x, zoom_y)
example = {}
example['intrinsics'] = intrinsics
example['image_seq'] = image_seq
example['folder_name'] = tgt_drive + '_' + tgt_cid + '\\'
example['file_name'] = tgt_frame_id
return example
def load_image_raw(self, drive, cid, frame_id):
date = drive[:10]
img_file = os.path.join(self.dataset_dir, date, drive, 'image_' + cid, 'data', frame_id + '.png')
img = scipy.misc.imread(img_file)
return img
def load_intrinsics_raw(self, drive, cid, frame_id):
date = drive[:10]
calib_file = os.path.join(self.dataset_dir, date, 'calib_cam_to_cam.txt')
filedata = self.read_raw_calib_file(calib_file)
P_rect = np.reshape(filedata['P_rect_' + cid], (3, 4))
intrinsics = P_rect[:3, :3]
return intrinsics
def read_raw_calib_file(self,filepath):
# From https://github.com/utiasSTARS/pykitti/blob/master/pykitti/utils.py
"""Read in a calibration file and parse into a dictionary."""
data = {}
with open(filepath, 'r') as f:
for line in f.readlines():
key, value = line.split(':', 1)
# The only non-float values in these files are dates, which
# we don't care about anyway
try:
data[key] = np.array([float(x) for x in value.split()])
except ValueError:
pass
return data
def scale_intrinsics(self, mat, sx, sy):
out = np.copy(mat)
out[0,0] *= sx
out[0,2] *= sx
out[1,1] *= sy
out[1,2] *= sy
return out
|
[
"maxh@ethz.ch"
] |
maxh@ethz.ch
|
af26bd38f15108886847727bd1618a0f005603df
|
e0a24108a6fd8be8c820927804f1f72d881d4351
|
/dataset/auto_download_coco.py
|
ed209c2e119fa7672bcc9246fe0a120f2f93152a
|
[
"MIT"
] |
permissive
|
1105042987/Dominant-Patterns
|
56e40d55d73fb4e95b16a77444bb022f4862ee8e
|
713b535e80aff0f04e20d1ef56d005e183a5d8a5
|
refs/heads/main
| 2023-05-10T11:25:10.577795
| 2021-06-12T13:34:23
| 2021-06-12T13:34:23
| 376,297,790
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,081
|
py
|
import os
import urllib.request
import shutil
import zipfile
def copyfileobj(fsrc, fdst, length=16*1024):
"""copy data from file-like object fsrc to file-like object fdst"""
cnt = 0
last = 0
while 1:
buf = fsrc.read(length)
if not buf:
print('\nDownload Finish')
break
cnt+=len(buf)/1024/1024
if cnt-last>1:
last = cnt
print('\r Download {:.04f} MB'.format(cnt),end='')
fdst.write(buf)
def auto_download(dataDir, dataType, dataYear):
"""Download the COCO dataset/annotations if requested.
dataDir: The root directory of the COCO dataset.
dataType: What to load (train, val, minival, valminusminival)
dataYear: What dataset year to load (2014, 2017) as a string, not an integer
Note:
For 2014, use "train", "val", "minival", or "valminusminival"
For 2017, only "train" and "val" annotations are available
"""
# Setup paths and file names
if dataType == "minival" or dataType == "valminusminival":
imgDir = "{}/{}{}".format(dataDir, "val", dataYear)
imgZipFile = "{}/{}{}.zip".format(dataDir, "val", dataYear)
imgURL = "http://images.cocodataset.org/zips/{}{}.zip".format("val", dataYear)
else:
imgDir = "{}/{}{}".format(dataDir, dataType, dataYear)
imgZipFile = "{}/{}{}.zip".format(dataDir, dataType, dataYear)
imgURL = "http://images.cocodataset.org/zips/{}{}.zip".format(dataType, dataYear)
# print("Image paths:"); print(imgDir); print(imgZipFile); print(imgURL)
# Create main folder if it doesn't exist yet
if not os.path.exists(dataDir):
os.makedirs(dataDir)
# Download images if not available locally
if not os.path.exists(imgDir):
os.makedirs(imgDir)
print("Downloading images to " + imgZipFile + " ...")
if not os.path.exists(imgZipFile):
with urllib.request.urlopen(imgURL) as resp, open(imgZipFile, 'wb') as out:
copyfileobj(resp, out)
print("... done downloading.")
print("Unzipping " + imgZipFile)
with zipfile.ZipFile(imgZipFile, "r") as zip_ref:
zip_ref.extractall(dataDir)
print("... done unzipping")
print("Will use images in " + imgDir)
# Setup annotations data paths
annDir = "{}/annotations".format(dataDir)
if dataType == "minival":
annZipFile = "{}/instances_minival2014.json.zip".format(dataDir)
annFile = "{}/instances_minival2014.json".format(annDir)
annURL = "https://dl.dropboxusercontent.com/s/o43o90bna78omob/instances_minival2014.json.zip?dl=0"
unZipDir = annDir
elif dataType == "valminusminival":
annZipFile = "{}/instances_valminusminival2014.json.zip".format(dataDir)
annFile = "{}/instances_valminusminival2014.json".format(annDir)
annURL = "https://dl.dropboxusercontent.com/s/s3tw5zcg7395368/instances_valminusminival2014.json.zip?dl=0"
unZipDir = annDir
else:
annZipFile = "{}/annotations_trainval{}.zip".format(dataDir, dataYear)
annFile = "{}/instances_{}{}.json".format(annDir, dataType, dataYear)
annURL = "http://images.cocodataset.org/annotations/annotations_trainval{}.zip".format(dataYear)
unZipDir = dataDir
# print("Annotations paths:"); print(annDir); print(annFile); print(annZipFile); print(annURL)
# Download annotations if not available locally
if not os.path.exists(annDir):
os.makedirs(annDir)
if not os.path.exists(annFile):
if not os.path.exists(annZipFile):
print("Downloading zipped annotations to " + annZipFile + " ...")
if not os.path.exists(annZipFile):
with urllib.request.urlopen(annURL) as resp, open(annZipFile, 'wb') as out:
copyfileobj(resp, out)
print("... done downloading.")
print("Unzipping " + annZipFile)
with zipfile.ZipFile(annZipFile, "r") as zip_ref:
zip_ref.extractall(unZipDir)
print("... done unzipping")
|
[
"1105042987@qq.com"
] |
1105042987@qq.com
|
67fe4d21c9604c2abd859971448d27f99ffb17b9
|
90080b87ce2964f6016c1147a39c53c151ef91e6
|
/joystick.py
|
787055068eec75447c1f204efd95a595860cd6ec
|
[] |
no_license
|
nonodamiens/pygame
|
733f86b015d80f5c269c495726cc6a049d7b90a2
|
5b5cb190404920d173305ded12febf8c8475319b
|
refs/heads/master
| 2022-05-17T02:34:10.780716
| 2020-04-23T20:46:29
| 2020-04-23T20:46:29
| 257,037,400
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,517
|
py
|
"""
Ce fichier python sert à tester la gestion du joystick avec pygame
Pour les boutons, les types d'évenements sont JOYBUTTONDOWN et JOYBUTTONUP
event.button renvoit le numéro du button de l'évenement
Pour les directions, le type d'évenement est JOYAXISMOTION
event.axis renvoit l'axe (0 pour l'axe horizontal / 1 axe vertical)
event.value renvoit la valeur (intensité) entre -1 (gauche/haut) et 1 (droite/bas)
"""
import pygame
from pygame.locals import *
# Initialisation de pygame
pygame.init()
# compter le nombre de joystick
nb_joystick = pygame.joystick.get_count()
print("Il y a {} joystick de branche".format(nb_joystick))
# si y'en a au moins un on le crée avec son numéro de branchement (commençant par 0) et on l'initialise
if nb_joystick > 0:
mon_joystick = pygame.joystick.Joystick(0)
mon_joystick.init()
# Maintenant on va compter les boutons qu'on a de dispo
print("axes: ", mon_joystick.get_numaxes())
print("boutons: ", mon_joystick.get_numbuttons())
print("trackballs: ", mon_joystick.get_numballs())
print("hats: ", mon_joystick.get_numhats())
# Faudrait récupérer les numéro des boutons -> nous pouvons les tester
if mon_joystick.get_numbuttons() > 1:
# on génère une boucle
continuer = True
while continuer:
for event in pygame.event.get():
if event.type == QUIT:
continuer = 0
if event.type == JOYBUTTONDOWN:
print(event.button)
|
[
"arnaud.mercier80@gmail.com"
] |
arnaud.mercier80@gmail.com
|
4f7eda57e1a4435c38b468c2b0fddfb150a414a6
|
63b6616962c5e4abf3db50efda40e75dd06c5137
|
/game_api/migrations/0019_auto_20200806_1503.py
|
864d8a2616ae991041929824cf0956a3c4d1210b
|
[] |
no_license
|
tabiouch03/Gaming-API
|
753ab7743d42f7b872cdd785f089050a3e2ebeea
|
a6ad14e5e0051d2a09cae3c23ad86bbabd49a315
|
refs/heads/master
| 2022-12-03T09:14:49.019219
| 2020-08-12T21:24:13
| 2020-08-12T21:24:13
| 284,426,850
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 416
|
py
|
# Generated by Django 3.0.8 on 2020-08-06 13:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('game_api', '0018_auto_20200806_1503'),
]
operations = [
migrations.AlterField(
model_name='game',
name='plateform',
field=models.ManyToManyField(blank=True, to='game_api.Plateform'),
),
]
|
[
"guerreiro.fabio@outlook.fr"
] |
guerreiro.fabio@outlook.fr
|
b25e1c7d659e00d15dc0f0fbc8c9e812dbee18bc
|
cdb0e833a4c5a60f9602a183ec04a8f251b2b75a
|
/shapes.py
|
cb6469b78d0d97bada3fc1d03321ccdea5fdb3f0
|
[] |
no_license
|
waithiageni/python_Example5
|
11bdc059fc7945bceb4edd8f5641a7efccff4b98
|
43de704c0427feb0719f05fe9c99b68e690cf998
|
refs/heads/master
| 2020-05-18T08:29:44.386629
| 2019-05-01T15:28:47
| 2019-05-01T15:28:47
| 184,297,050
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,145
|
py
|
class Circle:
def __init__(self,radius):
self.radius=radius
def Area_of_a_circle(self):
radius=self.radius
A=(3.142)*(radius*radius)
return(A)
def circumference_of_a_circle(self):
radius=self.radius
C=2*3.142*(radius)
return(C)
class Square:
def __init__(self,side):
self.side=side
def Area_of_a_square(self):
side=self.side
Area_of_a_square=side*side
return(Area_of_a_square)
def Perimeter_of_a_square(self):
side=self.side
Perimeter_of_a_square=4*side
return(Perimeter_of_a_square)
class Rectangle:
def __init__(self,w,l):
self.w=w
self.l=l
def Area_of_a_rectangle(self):
w=self.w
l=self.l
Area_of_a_rectangle=w*l
return(Area_of_a_rectangle)
def Perimeter_of_a_rectangle(self):
w=self.w
l=self.l
Perimeter_of_a_rectangle=(2*w)+(2*l)
return(Perimeter_of_a_rectangle)
class Sphere:
def __init__(self,radius):
self.radius=radius
def Sphere_SA(self):
radius=self.radius
Sphere_SA=4*3.142*radius*radius
return(Sphere_SA)
def Sphere_Volume(self):
radius=self.radius
Sphere_Volume=(4/33.142)*(radius*radius*radius)
return(Sphere_Volume)
|
[
"mwangiwaithiageni33@gmail.com"
] |
mwangiwaithiageni33@gmail.com
|
0eb2ca34b4a274b9f31db05a4538e883bc0d7089
|
cea8c9b502375a0e2bab990044973146cf8f35ba
|
/testCase/BaseCase/testBaseCase.py
|
da1c4dd24ee327fe01cfb5ef131c71d1435f645b
|
[] |
no_license
|
D6666666/autotest-api
|
b1e4d977df80a44142724018f7130155b86c52cb
|
0457e3b78ce1293a86d9610c1b13a58bce2dad22
|
refs/heads/master
| 2020-06-28T06:59:59.346780
| 2019-08-02T05:13:34
| 2019-08-02T05:13:34
| 200,169,597
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,876
|
py
|
# @Author : 程前
import time
import unittest,json
from ddt import data,ddt
import readConfig
from common.log import MyLog
from common import public
from common import configHttp
from dataDriver.FormatTrans import FormatTrans
localReadConfig = readConfig.ReadConfig()
class BaseCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.log = MyLog.get_log()
cls.logger = cls.log.get_logger()
if not hasattr(cls, 'cfg_func'):
raise NoConfigerSetError
_cfg_func = getattr(cls, 'cfg_func')
# configure
cls.timeout = cls.cfg(_cfg_func, "timeout")
cls.cfg_url = cls.cfg(_cfg_func, "url")
cls.cfg_params_default = cls.cfg(_cfg_func, "params_default")
cls.token = public.login_token(cls.cfg_url, cls.cfg_params_default,
cls.cfg(_cfg_func, "params_login"))
def setUp(self):
pass
def tearDown(self):
if self.info is None:
self.log.build_case_line(self.case_name, '', '')
else:
self.log.build_case_line(self.case_name, str(self.info['code']), str(self.info['message']) if 'message' in self.info.keys() else '')
def description(self):
"""
test report description
:return:
"""
return self.case_name
@staticmethod
def cfg(func, *args):
"""
reflect to get cfg
:param func:
:return:
"""
rc = localReadConfig
return getattr(rc, func)(*args)
def baseCase(self, data):
# data
data['token'] = self.token
ft = FormatTrans(data)
self.info = None
self.path = ft.path
self.parame = ft.parame
self.body = ft.body
self.mode = ft.mode
self.case_name = ft.casename
self.expect = ft.expect
self.skip = ft.skip
if self.skip is True:
return self.skipTest('{} died with SKIP, cos skip_tag is TRUE'.format(self.case_name))
# url
self.url = '%s%s%s%s' % (
self.cfg_url, self.path, '?' + self.cfg_params_default if self.cfg_params_default != 'NA' else '',
'&' + '&'.join(['%s=%s' % (_k, _v) for _k, _v in self.parame.items()]))
self.headers = {"Content-Type":"application/json",'token':self.token}
# log to record requests data
self.logger.debug(
'{} requests data. url: {}, body: {}, mode: {}'.format(self.case_name, self.url, self.body, self.mode))
self.req = configHttp.ConfigHttp()
if self.mode == 'GET':
self.return_json = self.req.get(self.url, self.timeout,headers=self.headers)
elif self.mode == 'POST':
self.return_json = self.req.post(self.url, self.body,self.timeout,headers=self.headers)
self.logger.info(self.url)
else:
print('invalid mode!')
return
if self.return_json is None:
self.logger.info('{} died with False, response: None. Pls check http server'.format(self.case_name))
# log to record response data
self.logger.debug('{} response data, rcode: {}, rtext: {}'.format(self.case_name, self.return_json.status_code,
self.return_json.text))
self.info = self.return_json.json()
self.logger.info('{} died with {}, response code: {}, except code {}'.format(self.case_name, self.info[
'code'] == self.expect, self.info['code'], self.expect))
self.assertEqual(self.info['code'], self.expect)
time.sleep(0.5)
class NoConfigerSetError(Exception):
def __init__(self):
super().__init__(self)
self.errorinfo = 'not set configer function!'
def __str__(self):
return self.errorinfo
if __name__ == "__main__":
unittest.main()
|
[
"chengqian@shouqiev.com"
] |
chengqian@shouqiev.com
|
4f499e84ef025f7392da0dfe8b469d8b2e293bde
|
275cac9a89e7f3412414de86fcbf50b5a782fe92
|
/realpro/settings.py
|
4a428a6c0e656cb22f6b67d4e925468984aca355
|
[] |
no_license
|
josephbudiarto/Chat-iShout
|
076564fe75eeb0f9ed26cc38affab684236fcbb1
|
0eec38b6e2a23122856169194260cefe6f5d2d05
|
refs/heads/master
| 2021-03-16T05:12:16.913381
| 2017-10-27T09:03:10
| 2017-10-27T09:03:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,202
|
py
|
"""
Django settings for realpro project.
Generated by 'django-admin startproject' using Django 1.11.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@kye4pc8tr4dhvd4%#t=_laupc2k+_=8c7pi$ndquwpuzg)tq3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'drealtime',
'example',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'drealtime.middleware.iShoutCookieMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'realpro.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'realpro.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
[
"budiartojoseph@gmail.com"
] |
budiartojoseph@gmail.com
|
e14898f8dd40cad3308234ff8f4baebb370a070f
|
96e07f8c5f171265324c6f9ae70e1a90c13526a1
|
/calculator.py
|
da9628a0e609c5e7b247b04312e3cc92481ea049
|
[] |
no_license
|
samh-gith/pythonCalculator
|
bb6663de64c14d45cd20109fde5d99f9dee2d982
|
16bc9e724f1003e42d743f08b9b40ab3d66b4ff5
|
refs/heads/main
| 2023-06-24T20:28:46.182412
| 2021-07-29T15:40:50
| 2021-07-29T15:40:50
| 390,720,425
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 545
|
py
|
def add(a,b):
result = a + b
print("a + b = ", result)
def sub(a,b):
result = a - b
print("a - b = ",result)
def mul(a,b):
result = a * b
print("your result: ",result)
def div(a,b):
result = a / b
print("a / b = ",result)
a = int(input("Enter your first number: "))
b = int(input("Enter your second number: "))
op = input("Enter your operator: ")
if op == "+":
add(a, b)
elif op == "-":
sub(a, b)
elif op == "*":
mul(a, b)
elif op == "/":
div(a, b)
else:
print("Invalid Operator")
|
[
"sayamha@protonmail.com"
] |
sayamha@protonmail.com
|
d63f0893519c71f152b806df986ad7977c378992
|
213bab28027bd7b5f382c7bd8738748d5c092baa
|
/blog/migrations/0001_initial.py
|
1eecdccfeff703a348e8d71804cb3af1b5239d60
|
[] |
no_license
|
debora2001/meuBlog
|
91e5db1ca748841e2d60fc6de1fb47ef100d81d0
|
43fc0832c095871a77cdd320837352671064777e
|
refs/heads/master
| 2020-05-25T09:12:58.784651
| 2019-05-22T23:34:13
| 2019-05-22T23:34:13
| 187,729,099
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 987
|
py
|
# Generated by Django 2.0.13 on 2019-05-14 00:20
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('authot', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"deborasilvabasilio03@gmail.com"
] |
deborasilvabasilio03@gmail.com
|
f7c5eb6b2779678645dbe907be346931306e1110
|
783244556a7705d99662e0b88872e3b63e3f6301
|
/denzo/views.py
|
0721b87bc8754714091c168de0c2ebdcf4906cd8
|
[] |
no_license
|
KobiBeef/eastave_src
|
bf8f2ce9c99697653d36ca7f0256473cc25ac282
|
dfba594f3250a88d479ccd9f40fefc907a269857
|
refs/heads/master
| 2021-01-10T16:49:14.933424
| 2016-03-08T13:30:48
| 2016-03-08T13:30:48
| 51,752,966
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,960
|
py
|
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse
from django.views import generic
from . import models
# Create your views here.
class TestView(generic.TemplateView):
# class TestView(generic.ListView):
template_name = 'denzo/index.html'
def get_context_data(self, **kwargs):
context = super(TestView, self).get_context_data(**kwargs)
context['patient_list'] = models.PatientInfo2.objects.all()
context['cardio_pt'] = models.PatientInfo2.objects.filter(category="Cardiology")
context['endo_pt'] = models.PatientInfo2.objects.filter(category="Endocrinology")
context['gastro_pt'] = models.PatientInfo2.objects.filter(category="Gastroenterology")
context['nephro_pt'] = models.PatientInfo2.objects.filter(category="Nephrology")
context['neuro_pt'] = models.PatientInfo2.objects.filter(category="Neurology")
# context['cardio_count'] = models.PatientInfo2.objects.get(category="Cardiology")
return context
# def index(request):
# patient_list = models.PatientInfo2.objects.all()
# category_list = models.PatientInfo2.objects.filter(category=self.category)
# context ={
# 'patient_list': patient_list,
# 'category_list': category_list,
# }
# return render(request, 'denzo/index.html', context)
# context_dict = {'boldmessage': "I am bold font from the context"}
# return render(request, 'denzo/index.html', context_dict)
# class Test(generic.TemplateView):
# template_name = 'denzo/index.html'
# def get_context_data(self, **kwargs):
# context = super(Test, self).get_context_data(**kwargs)
# context['init_test'] = models.
# class OverviewListView(generic.TemplateView):
# template_name = 'denzo/index.html'
# def get_context_data(self, **kwargs):
# context = super(OverviewListView, self).get_context_data(**kwargs)
# context['patient_list'] = models.PatientInfo.objects.all()
# context['physician_list'] = models.PhysicianInfo.objects.all()
# return context
|
[
"ezekielbacungan@gmail.com"
] |
ezekielbacungan@gmail.com
|
1dd4e1daead0188ecac08ab5768d4f6825208bbf
|
067fc8e0d93c8e5953a213be0048380f973d3234
|
/src/schoology-extractor/edfi_schoology_extractor/usage_analytics_facade.py
|
a79cfb2d7927c86b6042266cafda10f56b024982
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
markramonDL/LMS-Toolkit
|
0fff343afc8b5775765ccbbf8529a4e8acab5b11
|
d7097f9e063f39a45c8a08ec7316d2a1c4034e50
|
refs/heads/main
| 2023-06-19T08:36:54.384666
| 2021-07-14T21:52:09
| 2021-07-14T21:52:09
| 386,748,130
| 0
| 0
|
Apache-2.0
| 2021-07-16T19:49:16
| 2021-07-16T19:49:15
| null |
UTF-8
|
Python
| false
| false
| 2,223
|
py
|
# SPDX-License-Identifier: Apache-2.0
# Licensed to the Ed-Fi Alliance under one or more agreements.
# The Ed-Fi Alliance licenses this file to you under the Apache License, Version 2.0.
# See the LICENSE and NOTICES files in the project root for more information.
import logging
import os
import pandas as pd
import sqlalchemy
from .helpers import csv_reader
from .helpers import sync
from .mapping import usage_analytics as usageMap
logger = logging.getLogger(__name__)
def get_system_activities(
usage_input_dir: str, db_engine: sqlalchemy.engine.base.Engine
) -> pd.DataFrame:
"""
Processes the .csv or .gz files from the input directory.
Parameters
----------
usage_input_dir: str
Directory where the System Activities reports are stored
Returns
-------
pd.DataFrame
Data reshaped into the Ed-Fi LMS Unified Data Model(UDM)
"""
output = pd.DataFrame()
logger.debug(
f"Processing usage analytics files: loading files from {usage_input_dir}"
)
for file in os.scandir(usage_input_dir):
# It is not expected to have anything different from .gz or .csv
# in case there's something different, this method will throw an
# exception
mapped_row = pd.DataFrame()
if not sync.usage_file_is_processed(file.name, db_engine):
logger.info(f"Processing usage analytics file: {file.name}")
row_data = csv_reader.load_data_frame(file.path)
mapped_row = usageMap.map_to_udm(row_data)
sync.insert_usage_file_name(file.name, db_engine)
else:
logger.debug(
f"Ignoring usage analytics file because it has already been processed: {file.name}"
)
if not output.empty:
output.append(mapped_row)
else:
output = mapped_row
if output.empty:
logger.info("No new usage analytics files were found")
return output
# If reports have overlapping dates then we'll have duplicates
logger.debug(
"Processing usage_analytics files: Removing duplicated system activities from DataFrame"
)
output.drop_duplicates(inplace=True)
return output
|
[
"noreply@github.com"
] |
markramonDL.noreply@github.com
|
f37609df1a42df1f361372a57b655841feb72db1
|
f4722f955e82b0dcee8af8f66690e85de9acb0d7
|
/app/settings.py
|
630c5011d120ef9f4b571736a1ca5e2a67feb7fc
|
[] |
no_license
|
EmotiCam/mylifecut_backend
|
afd2592bef3ab95e0936a69e7e6bfbfa5f61d1fd
|
6067ea512e532de6657c70e5d000d6c9721b4ed5
|
refs/heads/master
| 2022-12-13T10:05:16.087710
| 2019-12-11T16:42:11
| 2019-12-11T16:42:11
| 219,949,635
| 0
| 0
| null | 2021-06-10T22:13:33
| 2019-11-06T08:38:52
|
Python
|
UTF-8
|
Python
| false
| false
| 3,620
|
py
|
import os
import pymysql
# Mysql install
pymysql.install_as_MySQLdb()
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# DEV mode or PRODUCTION mode
IS_DEV_MODE = os.environ.get("DJANGO_ENV") == "development"
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "xa!0psktato=%6x*2fjt4)+x(ryyk-i$q14o#)8qktk4g=#tz0"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = IS_DEV_MODE
ALLOWED_HOSTS = ["127.0.0.1", "fzfv1bjqai.execute-api.ap-northeast-2.amazonaws.com"]
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
"corsheaders",
"allauth",
"allauth.account",
"rest_auth.registration",
"rest_framework",
"rest_framework.authtoken",
"rest_auth",
"core",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"corsheaders.middleware.CorsMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "app.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
WSGI_APPLICATION = "app.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.mysql",
"NAME": os.environ.get("DB_NAME"),
"USER": os.environ.get("DB_USER"),
"PASSWORD": os.environ.get("DB_PASSWORD"),
"HOST": os.environ.get("DB_HOST"),
"PORT": os.environ.get("DB_PORT"),
"OPTIONS": {"sql_mode": "traditional"},
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": [
"rest_framework.authentication.TokenAuthentication"
]
}
ACCOUNT_EMAIL_VERIFICATION = "none"
CORS_ORIGIN_ALLOW_ALL = True
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "Asia/Seoul"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = "/static/"
SITE_ID = 1
|
[
"nobel6018@gmail.com"
] |
nobel6018@gmail.com
|
bd048cac33fa568ed2be92dcddd393d2a3eaab83
|
a1e612352487847fe30a379dee780713ba0cfc9a
|
/fonction_basiques.py
|
e71466c70cb87cb3095bdd83446396753e5b6fa5
|
[] |
no_license
|
SoniaB78/supportSG
|
807ad245068cab02e64fec245c0b81af10e05128
|
e39da5ae238d77fe6b2c6f832c1e0c15a1eaf7f0
|
refs/heads/master
| 2021-01-03T07:31:27.399471
| 2020-02-12T10:14:40
| 2020-02-12T10:14:40
| 239,982,239
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,085
|
py
|
#-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: Administrateur
#
# Created: 03/12/2019
# Copyright: (c) Administrateur 2019
# Licence: <your licence>
#-------------------------------------------------------------------------------
def main():
pass
if __name__ == '__main__':
main()
from math import *
# impot math
x = 2.718
print("\n", type(x), "x=", x)
print("round(x,2) =", round(x,2))
# x = 2.71
print("round(x**3.1) =",round(x**3.1))
# x = 22, racine carrée
print("abs(1-x) =", abs(1-x))
# x = 1.718
print("floor(x) =", floor(x))
# x = 2 troncque
print("floor(1-x) =", floor(1-x))
# x = -2
print("floor(abs(1-x)) = ", floor(abs(1-x)), "\n")
# x = 1
print("round(exp(2), 3) = ", round(exp(2), 3))
#exposant
print("round(log(2), 2) = ", round(log(2), 2))
#logarithme neperien
print("round(sqrt(2), 3)) = ",round(sqrt(2), 3), "\n")
#squirt?
print(chr(75))
#qu'est ce que c'est?, charactère n° 75 en ASCII => K
print(chr(109))
# c'est la lettre m en minuscule 109
|
[
"bougamha.sonia@gmail.com"
] |
bougamha.sonia@gmail.com
|
0ebcc8149ce068b5084956bbde4794c6ce953011
|
2cab2fddfc9d692a18f65507d6f810a2cac116c5
|
/genkeymap.py
|
fd2fe853a5119614ec1a28c80a76c416060e82a9
|
[
"MIT"
] |
permissive
|
BarnabyShearer/phone_chord
|
a469e6e532f1e727b2b021c98b1b064bef5ffd25
|
809a2b20739a0391f59bd12b6583bb60d5ac9f7c
|
refs/heads/master
| 2021-05-08T13:27:21.548466
| 2018-02-05T16:23:15
| 2018-02-05T16:23:15
| 120,014,569
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,170
|
py
|
#! /usr/bin/env python3
from brs001 import MOD, KEYS
def toint(keys):
key = 0
for i in range(1, 9):
if i in keys:
key |= 1 << (i-1)
return key
def fromint(key):
keys = set()
for i in range(8):
if key & (1 << i):
keys.add(i + 1)
return keys
def totable(name, keys):
buf = ""
keys = {toint(v): k for k, v in keys.items()}
keys[0] = '0'
buf += "uint8_t %s[] = {\n" % name
for x in range(32):
buf += " "
for y in range(8):
buf += keys.get(8*x + y, '0')
if not y == 7:
buf += ", "
buf += ",\n"
buf += "};\n"
return buf
# Check dupes
seen = set()
for k, v in MOD.items() | KEYS.items():
if toint(v) in seen:
print("Dupe Key:", k, v)
seen.add(toint(v))
# Check unused 1, 2, 3 chords
good = set()
for x in range(1, 9):
good.add(toint({x}))
for y in range(1, 9):
good.add(toint({x, y}))
for z in range(1,9):
good.add(toint({x, y, z}))
#for x in good - seen:
# print(fromint(x))
#print table
print(totable("modifiers", MOD))
print(totable("hidcode", KEYS))
|
[
"b@zi.is"
] |
b@zi.is
|
da10a8a155495c940c7a33b391b32775f9615b22
|
7a65d5c65b4dfda116a95520dc2b992e0c70730a
|
/tests/image_tests/renderpasses/test_HalfRes.py
|
b0a9347978dc17fee4541df506453a23f52ec97f
|
[
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
WeakKnight/Falcor
|
bd40129d40141ed1360960e3567188a587193989
|
a824b847ba9a6861d011868c0a4f4f0b378b821f
|
refs/heads/master
| 2023-03-19T04:41:56.538202
| 2022-09-05T09:45:28
| 2022-09-05T09:45:28
| 297,922,220
| 0
| 0
|
BSD-3-Clause
| 2020-09-23T09:39:35
| 2020-09-23T09:39:35
| null |
UTF-8
|
Python
| false
| false
| 245
|
py
|
import sys
sys.path.append('..')
from helpers import render_frames
from graphs.HalfRes import HalfRes as g
from falcor import *
m.addGraph(g)
m.loadScene('Arcade/Arcade.pyscene')
# default
render_frames(m, 'default', frames=[1,16,64])
exit()
|
[
"skallweit@nvidia.com"
] |
skallweit@nvidia.com
|
03a9b7bf007a16d97712759b3bec49976e2ef934
|
be1b4832db24a21bdec1cbc9026cb281db86a1f8
|
/classInfoSystem/classroom/migrations/0024_classroom_departman_ismi.py
|
29c0e19e9f06ada9b3a2ef72b24a7b29651a6392
|
[] |
no_license
|
bugrademiroglu/Class-Info-System-v2
|
b33eeb35b221e222524773c9c9069d2d6d758038
|
d6b5f61cc88294ad0edf50b6f68e8a7196c1af76
|
refs/heads/master
| 2022-07-19T18:55:48.183313
| 2020-05-23T22:39:50
| 2020-05-23T22:39:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 507
|
py
|
# Generated by Django 3.0.6 on 2020-05-19 23:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('department', '0006_auto_20200419_2156'),
('classroom', '0023_remove_classroom_dept_name'),
]
operations = [
migrations.AddField(
model_name='classroom',
name='departman_ismi',
field=models.ManyToManyField(to='department.Department', verbose_name='department name'),
),
]
|
[
"bugra.demiroglu@hotmail.com"
] |
bugra.demiroglu@hotmail.com
|
c3c5220ba71013b5de3514ed9a3f3559961b5089
|
38e26b647740851733829aab40f8de2cd819e4a5
|
/Deep learning/course 2/Regularization/reg_utils.py
|
1ab495cf96f9d2e5a1a758c7ea6aebb962e411eb
|
[] |
no_license
|
kingkong135/AI-self-learning
|
30599d80e8b14e84ecf5eaa70367adaa52c6629e
|
06d6c09185b450602be937981b03a408e70630fa
|
refs/heads/master
| 2020-06-27T13:40:36.823067
| 2020-02-22T06:49:50
| 2020-02-22T06:49:50
| 199,967,869
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,616
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import h5py
import sklearn
import sklearn.datasets
import sklearn.linear_model
import scipy.io
def sigmoid(x):
"""
Compute the sigmoid of x
Arguments:
x -- A scalar or numpy array of any size.
Return:
s -- sigmoid(x)
"""
s = 1 / (1 + np.exp(-x))
return s
def relu(x):
"""
Compute the relu of x
Arguments:
x -- A scalar or numpy array of any size.
Return:
s -- relu(x)
"""
s = np.maximum(0, x)
return s
def load_planar_dataset(seed):
np.random.seed(seed)
m = 400 # number of examples
N = int(m / 2) # number of points per class
D = 2 # dimensionality
X = np.zeros((m, D)) # data matrix where each row is a single example
Y = np.zeros((m, 1), dtype='uint8') # labels vector (0 for red, 1 for blue)
a = 4 # maximum ray of the flower
for j in range(2):
ix = range(N * j, N * (j + 1))
t = np.linspace(j * 3.12, (j + 1) * 3.12, N) + np.random.randn(N) * 0.2 # theta
r = a * np.sin(4 * t) + np.random.randn(N) * 0.2 # radius
X[ix] = np.c_[r * np.sin(t), r * np.cos(t)]
Y[ix] = j
X = X.T
Y = Y.T
return X, Y
def initialize_parameters(layer_dims):
"""
Arguments:
layer_dims -- python array (list) containing the dimensions of each layer in our network
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layer_dims[l], layer_dims[l-1])
b1 -- bias vector of shape (layer_dims[l], 1)
Wl -- weight matrix of shape (layer_dims[l-1], layer_dims[l])
bl -- bias vector of shape (1, layer_dims[l])
Tips:
- For example: the layer_dims for the "Planar Data classification model" would have been [2,2,1].
This means W1's shape was (2,2), b1 was (1,2), W2 was (2,1) and b2 was (1,1). Now you have to generalize it!
- In the for loop, use parameters['W' + str(l)] to access Wl, where l is the iterative integer.
"""
np.random.seed(3)
parameters = {}
L = len(layer_dims) # number of layers in the network
for l in range(1, L):
parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l - 1]) / np.sqrt(layer_dims[l - 1])
parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))
assert (parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l - 1]))
assert (parameters['b' + str(l)].shape == (layer_dims[l], 1))
return parameters
def forward_propagation(X, parameters):
"""
Implements the forward propagation (and computes the loss) presented in Figure 2.
Arguments:
X -- input dataset, of shape (input size, number of examples)
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
W1 -- weight matrix of shape ()
b1 -- bias vector of shape ()
W2 -- weight matrix of shape ()
b2 -- bias vector of shape ()
W3 -- weight matrix of shape ()
b3 -- bias vector of shape ()
Returns:
loss -- the loss function (vanilla logistic loss)
"""
# retrieve parameters
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
W3 = parameters["W3"]
b3 = parameters["b3"]
# LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
Z1 = np.dot(W1, X) + b1
A1 = relu(Z1)
Z2 = np.dot(W2, A1) + b2
A2 = relu(Z2)
Z3 = np.dot(W3, A2) + b3
A3 = sigmoid(Z3)
cache = (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3)
return A3, cache
def backward_propagation(X, Y, cache):
"""
Implement the backward propagation presented in figure 2.
Arguments:
X -- input dataset, of shape (input size, number of examples)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat)
cache -- cache output from forward_propagation()
Returns:
gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables
"""
m = X.shape[1]
(Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache
dZ3 = A3 - Y
dW3 = 1. / m * np.dot(dZ3, A2.T)
db3 = 1. / m * np.sum(dZ3, axis=1, keepdims=True)
dA2 = np.dot(W3.T, dZ3)
dZ2 = np.multiply(dA2, np.int64(A2 > 0))
dW2 = 1. / m * np.dot(dZ2, A1.T)
db2 = 1. / m * np.sum(dZ2, axis=1, keepdims=True)
dA1 = np.dot(W2.T, dZ2)
dZ1 = np.multiply(dA1, np.int64(A1 > 0))
dW1 = 1. / m * np.dot(dZ1, X.T)
db1 = 1. / m * np.sum(dZ1, axis=1, keepdims=True)
gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,
"dA2": dA2, "dZ2": dZ2, "dW2": dW2, "db2": db2,
"dA1": dA1, "dZ1": dZ1, "dW1": dW1, "db1": db1}
return gradients
def update_parameters(parameters, grads, learning_rate):
"""
Update parameters using gradient descent
Arguments:
parameters -- python dictionary containing your parameters:
parameters['W' + str(i)] = Wi
parameters['b' + str(i)] = bi
grads -- python dictionary containing your gradients for each parameters:
grads['dW' + str(i)] = dWi
grads['db' + str(i)] = dbi
learning_rate -- the learning rate, scalar.
Returns:
parameters -- python dictionary containing your updated parameters
"""
n = len(parameters) // 2 # number of layers in the neural networks
# Update rule for each parameter
for k in range(n):
parameters["W" + str(k + 1)] = parameters["W" + str(k + 1)] - learning_rate * grads["dW" + str(k + 1)]
parameters["b" + str(k + 1)] = parameters["b" + str(k + 1)] - learning_rate * grads["db" + str(k + 1)]
return parameters
def predict(X, y, parameters):
"""
This function is used to predict the results of a n-layer neural network.
Arguments:
X -- data set of examples you would like to label
parameters -- parameters of the trained model
Returns:
p -- predictions for the given dataset X
"""
m = X.shape[1]
p = np.zeros((1, m), dtype=np.int)
# Forward propagation
a3, caches = forward_propagation(X, parameters)
# convert probas to 0/1 predictions
for i in range(0, a3.shape[1]):
if a3[0, i] > 0.5:
p[0, i] = 1
else:
p[0, i] = 0
# print results
# print ("predictions: " + str(p[0,:]))
# print ("true labels: " + str(y[0,:]))
print("Accuracy: " + str(np.mean((p[0, :] == y[0, :]))))
return p
def compute_cost(a3, Y):
"""
Implement the cost function
Arguments:
a3 -- post-activation, output of forward propagation
Y -- "true" labels vector, same shape as a3
Returns:
cost - value of the cost function
"""
m = Y.shape[1]
logprobs = np.multiply(-np.log(a3), Y) + np.multiply(-np.log(1 - a3), 1 - Y)
cost = 1. / m * np.nansum(logprobs)
return cost
def load_dataset():
train_dataset = h5py.File('datasets/train_catvnoncat.h5', "r")
train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features
train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels
test_dataset = h5py.File('datasets/test_catvnoncat.h5', "r")
test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features
test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels
classes = np.array(test_dataset["list_classes"][:]) # the list of classes
train_set_y = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
test_set_y = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))
train_set_x_orig = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T
test_set_x_orig = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T
train_set_x = train_set_x_orig / 255
test_set_x = test_set_x_orig / 255
return train_set_x, train_set_y, test_set_x, test_set_y, classes
def predict_dec(parameters, X):
"""
Used for plotting decision boundary.
Arguments:
parameters -- python dictionary containing your parameters
X -- input data of size (m, K)
Returns
predictions -- vector of predictions of our model (red: 0 / blue: 1)
"""
# Predict using forward propagation and a classification threshold of 0.5
a3, cache = forward_propagation(X, parameters)
predictions = (a3 > 0.5)
return predictions
def load_planar_dataset(randomness, seed):
np.random.seed(seed)
m = 50
N = int(m / 2) # number of points per class
D = 2 # dimensionality
X = np.zeros((m, D)) # data matrix where each row is a single example
Y = np.zeros((m, 1), dtype='uint8') # labels vector (0 for red, 1 for blue)
a = 2 # maximum ray of the flower
for j in range(2):
ix = range(N * j, N * (j + 1))
if j == 0:
t = np.linspace(j, 4 * 3.1415 * (j + 1), N) # + np.random.randn(N)*randomness # theta
r = 0.3 * np.square(t) + np.random.randn(N) * randomness # radius
if j == 1:
t = np.linspace(j, 2 * 3.1415 * (j + 1), N) # + np.random.randn(N)*randomness # theta
r = 0.2 * np.square(t) + np.random.randn(N) * randomness # radius
X[ix] = np.c_[r * np.cos(t), r * np.sin(t)]
Y[ix] = j
X = X.T
Y = Y.T
return X, Y
def plot_decision_boundary(model, X, y):
# Set min and max values and give it some padding
x_min, x_max = X[0, :].min() - 1, X[0, :].max() + 1
y_min, y_max = X[1, :].min() - 1, X[1, :].max() + 1
h = 0.01
# Generate a grid of points with distance h between them
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Predict the function value for the whole grid
Z = model(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# Plot the contour and training examples
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.ylabel('x2')
plt.xlabel('x1')
plt.scatter(X[0, :], X[1, :], c=y.ravel().tolist(), cmap=plt.cm.Spectral)
plt.show()
def load_2D_dataset():
data = scipy.io.loadmat('datasets/data.mat')
train_X = data['X'].T
train_Y = data['y'].T
test_X = data['Xval'].T
test_Y = data['yval'].T
return train_X, train_Y, test_X, test_Y
|
[
"nguyentiendat1531999@gmail.com"
] |
nguyentiendat1531999@gmail.com
|
b895d81e156a405d5afdd4d89a20d08acd8105e2
|
c6cf8d1c0c09a2205c7328c143e5a6000835af32
|
/obplatform/apps/common/templatetags/utiltags.py
|
e5426a961edc0c3546755e692818b7b0ff48f161
|
[] |
no_license
|
ZoomQuiet/openbookplatform
|
9bb82972a92bc60aea616a89d8fbc46d1b9b741d
|
6d8700598138a240ddd8c9ffdf14d413518df3e2
|
refs/heads/master
| 2021-01-19T03:23:16.424255
| 2009-08-01T11:15:08
| 2009-08-01T11:15:08
| 34,236,466
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,355
|
py
|
from django import template
from django.utils.translation import gettext_lazy as _
import re
register = template.Library()
class ExprNode(template.Node):
def __init__(self, expr_string, var_name):
self.expr_string = expr_string
self.var_name = var_name
def render(self, context):
try:
clist = list(context)
clist.reverse()
d = {}
d['_'] = _
for c in clist:
d.update(c)
if self.var_name:
context[self.var_name] = eval(self.expr_string, d)
return ''
else:
return str(eval(self.expr_string, d))
except:
raise
class CatchNode(template.Node):
def __init__(self, nodelist, var_name):
self.nodelist = nodelist
self.var_name = var_name
def render(self, context):
output = self.nodelist.render(context)
context[self.var_name] = output
return ''
r_expr = re.compile(r'(.*?)\s+as\s+(\w+)', re.DOTALL)
def do_expr(parser, token):
try:
tag_name, arg = token.contents.split(None, 1)
except ValueError:
raise template.TemplateSyntaxError, "%r tag requires arguments" % token.contents[0]
m = r_expr.search(arg)
if m:
expr_string, var_name = m.groups()
else:
if not arg:
raise template.TemplateSyntaxError, "%r tag at least require one argument" % tag_name
expr_string, var_name = arg, None
return ExprNode(expr_string, var_name)
do_expr = register.tag('expr', do_expr)
def do_catch(parser, token):
"""
Catch the content and save it to var_name
Example::
{% catch as var_name %} ... {% endcatch %}
"""
try:
tag_name, arg = token.contents.split(None, 1)
except ValueError:
raise template.TemplateSyntaxError, "%r tag requires arguments" % token.contents[0]
m = re.search(r'as\s+(\w+)', arg)
if not m:
raise template.TemplateSyntaxError, '%r tag should define as "%r as var_name"' % (tag_name, tag_name)
var_name = m.groups()[0]
nodelist = parser.parse(('endcatch',))
parser.delete_first_token()
return CatchNode(nodelist, var_name)
do_catch = register.tag('catch', do_catch)
from django.template.loader import get_template
from django.conf import settings
import tokenize
import StringIO
class CallNode(template.Node):
def __init__(self, template_name, *args, **kwargs):
self.template_name = template_name
self.args = args
self.kwargs = kwargs
def render(self, context):
try:
template_name = self.template_name.resolve(context)
t = get_template(template_name)
d = {}
args = d['args'] = []
kwargs = d['kwargs'] = {}
for i in self.args:
args.append(i.resolve(context))
for key, value in self.kwargs.items():
kwargs[key] = d[key] = value.resolve(context)
context.update(d)
result = t.render(context)
context.pop()
return result
except:
if settings.TEMPLATE_DEBUG:
raise
return ''
def do_call(parser, token):
"""
Loads a template and renders it with the current context.
Example::
{% call "foo/some_include" %}
{% call "foo/some_include" with arg1 arg2 ... argn %}
"""
bits = token.contents.split()
if 'with' in bits: #has 'with' key
pos = bits.index('with')
argslist = bits[pos+1:]
bits = bits[:pos]
else:
argslist = []
if len(bits) != 2:
raise template.TemplateSyntaxError, "%r tag takes one argument: the name of the template to be included" % bits[0]
path = parser.compile_filter(bits[1])
if argslist:
args = []
kwargs = {}
for i in argslist:
if '=' in i:
a, b = i.split('=', 1)
a = str(a).strip()
b = b.strip()
buf = StringIO.StringIO(a)
keys = list(tokenize.generate_tokens(buf.readline))
if keys[0][0] == tokenize.NAME:
kwargs[a] = parser.compile_filter(b)
else:
raise template.TemplateSyntaxError, "Argument syntax wrong: should be key=value"
else:
args.append(parser.compile_filter(i))
return CallNode(path, *args, **kwargs)
register.tag('call', do_call)
class PyIfNode(template.Node):
def __init__(self, nodeslist):
self.nodeslist = nodeslist
def __repr__(self):
return "<PyIf node>"
def render(self, context):
for e, nodes in self.nodeslist:
clist = list(context)
clist.reverse()
d = {}
d['_'] = _
for c in clist:
d.update(c)
v = eval(e, d)
if v:
return nodes.render(context)
return ''
def do_pyif(parser, token):
nodeslist = []
while 1:
v = token.contents.split(None, 1)
if v[0] == 'endif':
break
if v[0] in ('pyif', 'elif'):
if len(v) < 2:
raise template.TemplateSyntaxError, "'pyif' statement requires at least one argument"
if len(v) == 2:
tagname, arg = v
else:
tagname, arg = v[0], 'True'
nodes = parser.parse(('else', 'endif', 'elif'))
nodeslist.append((arg, nodes))
token = parser.next_token()
# parser.delete_first_token()
return PyIfNode(nodeslist)
do_pyif = register.tag("pyif", do_pyif)
# pycall
# you can use it to invoke a method of a module
r_identifers = re.compile(r'[\w.]+')
class PyCallNode(template.Node):
def __init__(self, expr_string, var_name):
self.expr_string = expr_string
self.var_name = var_name
def __repr__(self):
return "<PyCall node>"
def render(self, context):
clist = list(context)
clist.reverse()
d = {}
d['_'] = _
d['context'] = context
for c in clist:
d.update(c)
m = r_identifers.match(self.expr_string)
if m:
module, func = m.group().rsplit('.', 1)
funcstring = self.expr_string[len(module) + 1:]
mod = __import__(module, {}, {}, [''])
d[func] = getattr(mod, func)
else:
raise template.TemplateSyntaxError, "The arguments of %r tag should be module.function(...)" % 'pycall'
if self.var_name:
context[self.var_name] = eval(funcstring, d)
return ''
else:
return str(eval(funcstring, d))
def do_pycall(parser, token):
try:
tag_name, arg = token.contents.split(None, 1)
except ValueError:
raise template.TemplateSyntaxError, "%r tag requires arguments" % token.contents[0]
m = re.search(r'(.*?)\s+as\s+(\w+)', arg)
if m:
expr_string, var_name = m.groups()
else:
if not arg:
raise template.TemplateSyntaxError, "The arguments of %r tag should be module.function(...)" % tag_name
expr_string, var_name = arg, None
return PyCallNode(expr_string, var_name)
do_pycall = register.tag("pycall", do_pycall)
|
[
"Zoom.Quiet@e9cb34ec-b428-0410-8744-11689a347de2"
] |
Zoom.Quiet@e9cb34ec-b428-0410-8744-11689a347de2
|
41d937507270b661d01344d62bdb1da0919636f2
|
688871f41d49613adfdb590d2c0518be14de0d2d
|
/songFiles/madonna_song_parse_by_quotes.py
|
acfb03bbb231b89d4bbf5aa3c4f13a2f867ed549
|
[] |
no_license
|
richardx14/madz_serverless
|
407af920022ca2b6d85de64e4aa773e923014f6e
|
160725d27be6acd54d0346317f65d251dcd3d666
|
refs/heads/master
| 2020-04-14T13:31:42.065854
| 2019-03-04T08:33:26
| 2019-03-04T08:33:26
| 163,871,293
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 470
|
py
|
songCounter = 0
songArray = []
with open('madonna_all_songs_wikipedia.txt') as file_object:
# contents = file_object.read()
for line in file_object:
print(line)
if line[0] == "\"":
songCounter = songCounter + 1
songString = line.split("\"")[1]
songArray.append(songString)
outputFile = 'parsedSongList.txt'
with open(outputFile,'w') as file_object:
for song in songArray:
file_object.write(str("\"" + song + "\","))
file_object.write("\n")
|
[
"richard@dicecentre.org"
] |
richard@dicecentre.org
|
10781add07967e561d3762d25085f35e80646879
|
2aee288116fccffd9a2353bea1037e4a3ed573c0
|
/zhang_archery.py
|
3473775b7b17d287789b4f80fdad7a60a6d31d54
|
[] |
no_license
|
mtz99/SMC-CS21
|
2e11ffc2af0787866436deccc393c79fa6826772
|
5767b0e4c8b0a827dac3ce3d8ee52b84aaa75721
|
refs/heads/master
| 2020-04-06T11:34:22.514773
| 2018-11-22T22:04:25
| 2018-11-22T22:04:25
| 157,422,418
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,789
|
py
|
#Matthew Zhang
#CS 21, Fall 2018
#archery.py
#Recieved help from: Jennifer Courter
#NOTICE: This program requires the graphics.py library (by John Zelle) to be in the
#same directory as this file in order for this program to work. You can find this file
#in my SMC-CS21 repository.
'''This program is a simple target game which functions as a real life dartboard.
Each ring awards the player a different amount of points with the yellow awarding
9, red awards 7, blue awards 5, black awards 3, white awards 1, and anything
outside those rings awards the player no points. To quit this game, the player
must press "q".'''
#Imports necessary graphics and math files for the program to use.
from graphics import *
from math import *
#This function creates the window which displays the target.
def create_target_window():
#Creates the window, sets its background color and the central coord point.
mywin = GraphWin("target", 500, 600)
mywin.setBackground("gray")
p = Point(250, 250)
#Draws all the circles for the target.
c1 = Circle(p, 250)
c1.setFill("white")
c1.draw(mywin)
c2 = Circle(p, 200)
c2.setFill("black")
c2.draw(mywin)
c3 = Circle(p, 150)
c3.setFill("blue")
c3.draw(mywin)
c4 = Circle(p, 100)
c4.setFill("red")
c4.draw(mywin)
c5 = Circle(p, 50)
c5.setFill("yellow")
c5.draw(mywin)
#Returns the "mywin" variable for use in the main function. (It is renamed
#"win" in the main function).
return mywin
'''This function takes in the parameter which represents the location of
where the player clicked on the target, and awards the player a certain amt. of
points corresponding to the location of the target that was hit.'''
def get_score(p):
#Calculates the distance between the center point and where you clicked on
#the target.
X = p.getX()
Y = p.getY()
distance = abs(sqrt((250-X)**2 + (250-Y)**2))
#Initializes the variable for the score.
arrow_score = 0
#Awards the player the appropriate amt of points for where the dart hits
#the board.
if (distance <= 50):
arrow_score += 9
elif (distance <= 100):
arrow_score += 7
elif (distance <= 150):
arrow_score += 5
elif (distance <= 200):
arrow_score += 3
elif (distance <= 250):
arrow_score += 1
elif (distance <= 300):
arrow_score += 0
#Returns the awarded points to the main function.
return arrow_score
def main():
#Creates a graphics window object named win which displays the archery target.
win = create_target_window()
#Creates the initial variable for the player points.
user_p = 0
#Creates the score display in the target window.
score = Text(Point(250, 550), 'Current score: 0')
score.draw(win)
#This loop is necessary in order to allow the program to work without
#interruptions.
while True:
#Checks if you've pressed 'q' to quit the game, then quits the game.
keystroke = win.checkKey()
if keystroke == "q":
break
#Checks if you've clicked inside the target window.
p = win.checkMouse()
if p:
'''Calls the function which calculates the player's score then
increments the user's score based on the amount of points
awarded determined by the algorithm in the get_score() function.'''
user_p += get_score(p)
#Draws the point where you clicked on the target.
p.draw(win)
#Updates the text in target window to show current score.
score.setText("Current score: " + str(user_p))
#Closes the target window (when the while loop has been broken).
win.close()
|
[
"noreply@github.com"
] |
mtz99.noreply@github.com
|
c8b1a7c535d268967bc462278ef3b30a730b9d78
|
503dfa12f03fea8b91f5fda466e4ac908ced90c7
|
/usbparse.py
|
0d35741f80615c2e72f851be7a8fb049b9fc8729
|
[] |
no_license
|
zpon/wiresharkusbkeyparser
|
f27b691983b30e6adc20166a53b4469066666a1c
|
6fa78e7e50a678e3e6c962a4696317ea8165beb8
|
refs/heads/master
| 2021-08-31T20:28:53.405242
| 2017-12-22T19:10:00
| 2017-12-22T19:10:00
| 115,143,260
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,167
|
py
|
import json
# Extract key usb key sequence presses from json file extracted from Wireshark
map = {0x29: 'Esc',
0x1E: "1",
0x1F: "2",
0x20: "3",
0x21: "4",
0x22: "5",
0x23: "6",
0x24: "7",
0x25: "8",
0x26: "9",
0x27: "0",
0x2D: "_ / -",
0x2E: "+ / =",
0x2A: "Back space",
0x2B: "Tab",
0x14: "Q",
0x1A: "W",
0x08: "E",
0x15: "R",
0x17: "T",
0x1C: "Y",
0x18: "U",
0x0C: "I",
0x12: "O",
0x13: "P",
0x2F: "{",
0x30: "}",
0x28: "Enter",
0x58: "Enter KP",
0xE0: "Ctrl L",
0xE4: "Ctrl R",
0x04: "A",
0x16: "S",
0x07: "D",
0x09: "F",
0x0A: "G",
0x0B: "H",
0x0D: "J",
0x0E: "K",
0x0F: "L",
0x33: ":",
0x34: "\"",
0x35: "~",
0xE1: "Shift L",
0x31: "|",
0x53: "(INT 2)",
0x1D: "Z",
0x1B: "X",
0x06: "C",
0x19: "V",
0x05: "B",
0x11: "N",
0x10: "M",
0x36: "<",
0x37: ">",
0x38: "?",
0x54: "/",
0xE5: "Shift R",
0x2C: " "
}
# From http://www.quadibloc.com/comp/scan.htm
# Scan Code Key Scan Code Key Scan Code Key
# Set Set Set USB Set Set Set USB Set Set Set USB
# 1 2 3 1 2 3 1 2 3
#
# 01 76 08 29 Esc 37 7C * PrtSc E0 5E E0 37 Power
# 02 16 16 1E ! 1 37+ 7C+ 7E 55 * KP E0 5F E0 3F Sleep
# 03 1E 1E 1F @ 2 37/54+ 7C/84 57 46 PrtSc E0 63 E0 5E Wake
# 04 26 26 20 # 3 38 11 19 E2 Alt L E0 20 E0 23 7F Mute
# 05 25 25 21 $ 4 E0 38 E0 11 39 E6 Alt R E0 30 E0 33 80 Volume Up
# 06 2E 2E 22 % 5 39 29 29 2C Space E0 2E E0 21 81 Volume Down
# 07 36 36 23 ^ 6 3A 58 14 39 Caps Lock E0 17 E0 43 7B Cut
# 08 3D 3D 24 & 7 3B 05 07 3A F1 E0 18 E0 44 7C Copy
# 09 3E 3E 25 * 8 3C 06 0F 3B F2 E0 0A E0 46 7D Paste
# 0A 46 46 26 ( 9 3D 04 17 3C F3 E0 3B E0 05 75 Help
# 0B 45 45 27 ) 0 3E 0C 1F 3D F4 E0 08 E0 3D 7A Undo
# 0C 4E 4E 2D _ - 3F 03 27 3E F5 E0 07 E0 36 Redo
# 0D 55 55 2E + = 40 0B 2F 3F F6 E0 22 E0 34 Play
# 0E 66 66 2A Back Space 41 83 37 40 F7 E0 24 E0 3B Stop
# 0F 0D 0D 2B Tab 42 0A 3F 41 F8 E0 10 E0 15 Skip Back
# 10 15 15 14 Q 43 01 47 42 F9 E0 19 E0 4D Skip Fwd
# 11 1D 1D 1A W 44 09 4F 43 F10 E0 2C E0 1A Eject
# 12 24 24 08 E 45+ 77+ 76 53 Num Lock E0 1E E0 1C Mail
# 13 2D 2D 15 R 45/46+ 77/7E+ 62 48 Pause/Bk E0 32 E0 3A Web
# 14 2C 2C 17 T 46 7E ScrLk/Bk E0 3C E0 06 Music
# 15 35 35 1C Y 46+ 7E+ 5F 47 Scroll Lock E0 64 E0 08 Pictures
# 16 3C 3C 18 U 47 6C 6C 5F 7 Home KP E0 6D E0 50 Video
# 17 43 43 0C I E0 47* E0 6C* 6E 4A Home CP
# 18 44 44 12 O 48 75 75 60 8 Up KP 5B 1F 08 68 F13
# 19 4D 4D 13 P E0 48* E0 75* 63 52 Up CP 5C 27 10 69 F14
# 1A 54 54 2F { [ 49 7D 7D 61 9 PgUp KP 5D 2F 18 6A F15
# 1B 5B 5B 30 } ] E0 49* E0 7D* 6F 4B PgUp CP 63 5E 2C 6B F16
# 1C 5A 5A 28 Enter 4A 7B 84 56 - KP 64 08 2B 6C F17
# E0 1C E0 5A 79 58 Enter KP 4B 6B 6B 5C 4 Left KP 65 10 30 6D F18
# 1D 14 11 E0 Ctrl L E0 4B* E0 6B* 61 50 Left CP 66 18 38 6E F19
# E0 1D E0 14 58 E4 Ctrl R 4C 73 73 97 5 KP 67 20 40 6F F20
# 1E 1C 1C 04 A 4D 74 74 5E 6 Right KP 68 28 48 70 F21
# 1F 1B 1B 16 S E0 4D* E0 74* 6A 4F Right CP 69 30 50 71 F22
# 20 23 23 07 D 4E 79 7C 57 + KP 6A 38 57 72 F23
# 21 2B 2B 09 F 4F 69 69 59 1 End KP 6B 40 5F 73 F24
# 22 34 34 0A G E0 4F* E0 69* 65 4D End CP 75 Help
# 23 33 33 0B H 50 72 72 5A 2 Down KP [71] 19 05 9A Attn SysRq
# 24 3B 3B 0D J E0 50* E0 72* 60 51 Down CP 76 5F 06 9C Clear
# 25 42 42 0E K 51 7A 7A 5B 3 PgDn KP 76 Stop
# 26 4B 4B 0F L E0 51* E0 7A* 6D 4E PgDn CP 77 Again
# 27 4C 4C 33 : ; 52 70 70 62 0 Ins KP 72 39 04 A3 CrSel Properties
# 28 52 52 34 " ' E0 52* E0 70* 67 49 Ins CP 0C Pause ErInp
# 29 0E 0E 35 ~ ` 53 71 71 63 . Del KP 78 Undo
# 2A 12 12 E1 Shift L E0 53* E0 71* 64 4C Del CP 74 53 03 A4 ExSel SetUp
# 2B 5D 5C 31 | \ 54 84 SysRq 6D 50 0E ErEOF Recrd
# 2B 5D 53 53 (INT 2) 56 61 13 64 (INT 1)
# 2C 1A 1A 1D Z 57 78 56 44 F11 80 Copy
# 2D 22 22 1B X 58 07 5E 45 F12 83 Print Ident
# 2E 21 21 06 C E0 5B E0 1F 8B E3 Win L 6F 6F 0A Copy Test
# 2F 2A 2A 19 V E0 5C E0 27 8C E7 Win R
# 30 32 32 05 B E0 5D E0 2F 8D 65 WinMenu 81 Paste
# 31 31 31 11 N 70 13 87 88 katakana 75 5C 01 Enl Help
# 32 3A 3A 10 M 73 51 51 87 (INT 3) 6C 48 09 Ctrl
# 33 41 41 36 < , 77 62 8C furigana 82 Find
# 34 49 49 37 > . 79 64 86 8A kanji 79 Cut
# 35 4A 4A 38 ? / 7B 67 85 8B hiragana
# 35+ 4A+ 77 54 / KP 7D 6A 5D 89 (INT 4) E0 4C E0 73 62 Rule
# 36 59 59 E5 Shift R [7E] 6D 7B (INT 5)
def main():
print("Hej " + map[0x22])
jsonObj = json.loads(open("/tmp/foo.json", "r").read())
print ("Size: " + str(len(jsonObj)))
prevValue = -1
outputStr = ""
for obj in jsonObj:
# print(">> " + str(obj.keys()))
if 'usb.capdata' in obj['_source']['layers']:
data = obj['_source']['layers']['usb.capdata']
# print(">>> " + data)
hexStr = "0x" + data.split(":")[2]
hex = int(hexStr, 16)
if prevValue != hex and hex != 0:
print("= " + hexStr + " " + str(hex) + " \t" + (map[hex] if hex in map else "??"))
prevValue = hex
outputStr += (map[hex] if hex in map else "??")
else:
prevValue = -1
print ("Output:")
print (outputStr)
if __name__ == '__main__':
main()
|
[
"zpon.dk@gmail.com"
] |
zpon.dk@gmail.com
|
13601ff9ed0074b7699add94bfa94c0fe19bcc6f
|
55173732ce1f2537a4fd8a6137b2a813f594b250
|
/azure-mgmt-network/azure/mgmt/network/operations/public_ip_addresses_operations.py
|
201db931d63a076b1711d33da7767604e20a72d1
|
[
"Apache-2.0"
] |
permissive
|
dipple/azure-sdk-for-python
|
ea6e93b84bfa8f2c3e642aecdeab9329658bd27d
|
9d746cb673c39bee8bd3010738c37f26ba6603a4
|
refs/heads/master
| 2020-02-26T15:32:39.178116
| 2016-03-01T19:25:05
| 2016-03-01T19:25:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,253
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class PublicIPAddressesOperations(object):
"""PublicIPAddressesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def delete(
self, resource_group_name, public_ip_address_name, custom_headers={}, raw=False, **operation_config):
"""
The delete publicIpAddress operation deletes the specified
publicIpAddress.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_address_name: The name of the subnet.
:type public_ip_address_name: str
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None
:rtype: msrest.pipeline.ClientRawResponse if raw=True
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = url.format(**path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.delete(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers={}):
request = self._client.get(status_link)
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [204, 202, 200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, public_ip_address_name, expand=None, custom_headers={}, raw=False, **operation_config):
"""
The Get publicIpAddress operation retreives information about the
specified pubicIpAddress
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_address_name: The name of the subnet.
:type public_ip_address_name: str
:param expand: expand references resources.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: PublicIPAddress
:rtype: msrest.pipeline.ClientRawResponse if raw=True
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = url.format(**path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PublicIPAddress', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, public_ip_address_name, parameters, custom_headers={}, raw=False, **operation_config):
"""
The Put PublicIPAddress operation creates/updates a stable/dynamic
PublicIP address
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_address_name: The name of the publicIpAddress.
:type public_ip_address_name: str
:param parameters: Parameters supplied to the create/update
PublicIPAddress operation
:type parameters: PublicIPAddress
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: PublicIPAddress
:rtype: msrest.pipeline.ClientRawResponse if raw=True
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = url.format(**path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'PublicIPAddress')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers={}):
request = self._client.get(status_link)
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [201, 200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('PublicIPAddress', response)
if response.status_code == 200:
deserialized = self._deserialize('PublicIPAddress', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list_all(
self, custom_headers={}, raw=False, **operation_config):
"""
The List publicIpAddress opertion retrieves all the publicIpAddresses
in a subscription.
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: PublicIPAddressPaged
:rtype: msrest.pipeline.ClientRawResponse if raw=True
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Network/publicIPAddresses'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = url.format(**path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.PublicIPAddressPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.PublicIPAddressPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list(
self, resource_group_name, custom_headers={}, raw=False, **operation_config):
"""
The List publicIpAddress opertion retrieves all the publicIpAddresses
in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: PublicIPAddressPaged
:rtype: msrest.pipeline.ClientRawResponse if raw=True
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = url.format(**path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.PublicIPAddressPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.PublicIPAddressPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
|
[
"lmazuel@microsoft.com"
] |
lmazuel@microsoft.com
|
e318e51ef7f358fc3b23853c9ebb8a8f74f2da9a
|
56fd3c73d9c3c820f498fb00eecc6174f5b09bf6
|
/droplet/server/benchmarks/centr_avg.py
|
1d213517a50b7de939ad3b8a6ebf981212b69106
|
[
"Apache-2.0"
] |
permissive
|
jegonzal/droplet
|
97cf4479c21e29db2ac4f22822772f0d25be6a22
|
aff149dfed53f9ae89356927f1ea9cab391582d6
|
refs/heads/master
| 2023-06-01T04:50:58.940546
| 2019-12-06T19:29:28
| 2019-12-06T19:29:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,948
|
py
|
# Copyright 2019 U.C. Berkeley RISE Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import time
import uuid
import numpy as np
def run(droplet_client, num_requests, sckt):
''' DEFINE AND REGISTER FUNCTIONS '''
def follower(droplet, exec_id, my_id):
import random
val = random.randint(0, 100)
key = '%s-%d' % (exec_id, my_id)
droplet.put(key, val)
return key, my_id, val
def leader(droplet, exec_id, num_execs):
values = []
for i in range(num_execs):
key = '%s-%d' % (exec_id, i)
result = droplet.get(key)
while result is None:
result = droplet.get(key)
values.append(result)
import numpy as np
return np.mean(values)
cloud_follow = droplet_client.register(follower, 'follower')
cloud_lead = droplet_client.register(leader, 'leader')
if cloud_follow and cloud_lead:
print('Successfully registered follower and leader functions.')
else:
sys.exit(1)
''' TEST REGISTERED FUNCTIONS '''
n = 5
latencies = []
for _ in range(num_requests):
time.sleep(2)
start = time.time()
uid = str(uuid.uuid4())
for i in range(n):
res = cloud_follow(uid, i)
result = cloud_lead(uid, n)
end = time.time()
latencies.append(end - start)
return latencies, [], [], 0
|
[
"cgwu0530@gmail.com"
] |
cgwu0530@gmail.com
|
f0090a94f38698ea763536d63249a7d5839a3e59
|
72e3f45cbcbcabcc812cbef5cce8ec678c0259ca
|
/07_classes_objects_methods/07_04_inheritance.py
|
4be4e8bd02dcf6c3f8d076f6daa20f63551db87a
|
[] |
no_license
|
Dansultan/python_fundamentals-master
|
7c5d0b035305ebe48047738240316bac1ad6bf17
|
b2136197c1a0e6cc1178835f03d67949a9e1a801
|
refs/heads/master
| 2023-04-24T15:47:42.589840
| 2021-05-13T18:50:59
| 2021-05-13T18:50:59
| 348,490,306
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 892
|
py
|
'''
CLASSES AND INHERITANCE
=======================
1) Define an empty Movie class.
2) Add a dunder init method that takes two arguments "year" and "title"
3) Create a sub-class called "RomCom" that inherits from the Movie class
4) Create another sub-class of the Movie class called "ActionMovie"
that overwrites the dunder init method of Movie and adds another
instance variable called "pg" that is set by default to the number 13.
5) EXTRA: If you finish early, use the time to practice flushing out these
classes and white-boarding code. What attributes could a Movie class
contain? What methods? What tricks can you use through inheritance?
Any class attributes you could add?
'''
class Movie:
def __init__(self,year,title):
self.year = year
self.title = title
class ActionMovie(Movie):
def __init__(self,pg=13):
self.pg = pg
|
[
"dansultan@MacBook-Pro-de-Dan.local"
] |
dansultan@MacBook-Pro-de-Dan.local
|
46de838fef043d21eeca44c08bd4d05ed8ee7015
|
daa1c5798e14292ca20eabd7fb8a2e9c9e762557
|
/exams_app_2/proc.py
|
d5c6759ff3e3f2341916c42f0b08f0249a27cdad
|
[] |
no_license
|
siliconcortex/curiousweb
|
9f1811b2c605f3484c40cc0fc465e74047e9a6ec
|
def40532bbedee8a6484e29edb4861bf3072101b
|
refs/heads/master
| 2023-05-07T12:55:31.696105
| 2020-08-18T07:30:50
| 2020-08-18T07:30:50
| 258,177,983
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,377
|
py
|
from django.shortcuts import render
from django.urls import reverse_lazy, reverse
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.views.generic import (View,TemplateView,
ListView,DetailView,
DeleteView, CreateView,
UpdateView)
from . import models
from . import forms
from datetime import datetime
from communications.standard_email import send_email
from .image_helpers import Thumbnail
def compute_stats(exampk):
if self.request.user.is_superuser:
exam = models.Exam.objects.get(pk = exampk)
percent_as = []
percent_bs = []
percent_cs = []
percent_ds = []
percent_corrects = []
item_labels = []
for i in range(1,101):
count_a = 0
count_b = 0
count_c = 0
count_d = 0
count_correct = 0
for sheet in exam.answer_sheets.all():
for item in sheet.items.filter(item_number = i):
if item.answer == 'a':
count_a = count_a + 1
elif item.answer == 'b':
count_b = count_b + 1
elif item.answer == 'c':
count_c = count_c + 1
elif item.answer == 'd':
count_d = count_d + 1
else:
pass
answer = exam.answer_key.items.filter(item_number = i)[0].answer
if item.answer == answer:
count_correct = count_correct + 1
total = count_a + count_b + count_c + count_d
percent_as.append(int(count_a * 100/ total))
percent_bs.append(int(count_b * 100/ total))
percent_cs.append(int(count_c * 100/ total))
percent_ds.append(int(count_d * 100/ total))
percent_corrects.append(int(count_correct * 100/ total))
i = 1
for item in exam.answer_key.items.all():
item_labels.append(str(i) + ', '+ str(item.answer))
i = i + 1
return {'percent_as': percent_as, 'percent_bs': percent_bs, 'percent_cs': percent_cs, 'percent_ds': percent_ds, 'percent_corrects': percent_corrects, 'item_labels': item_labels}
|
[
"lesliecaminade@gmail.com"
] |
lesliecaminade@gmail.com
|
71265a5e90e44d029f9280247964038d7fae1212
|
c8ccd9a88d2b8ffabcad451325eaca62641fb69a
|
/tests/demo/test_demo_attachments.py
|
367eb390ecfa3814166c90ccefaf6984f291bc33
|
[
"Apache-2.0"
] |
permissive
|
DenisRybas/didcomm-python
|
262fd9c1cccd796b982cf44a66e140928c06111f
|
d7e83428f086d1080aaa8a9643495ca442ef9722
|
refs/heads/main
| 2023-07-13T03:33:41.676188
| 2021-08-13T07:59:13
| 2021-08-13T07:59:13
| 398,283,959
| 0
| 0
|
Apache-2.0
| 2021-08-20T13:23:30
| 2021-08-20T13:23:30
| null |
UTF-8
|
Python
| false
| false
| 1,596
|
py
|
import pytest as pytest
from didcomm.common.resolvers import ResolversConfig
from didcomm.message import Attachment, Message, AttachmentDataJson
from didcomm.pack_encrypted import pack_encrypted
from didcomm.unpack import unpack
from tests.common.example_resolvers import ExampleSecretsResolver, ExampleDIDResolver
ALICE_DID = "did:example:alice"
BOB_DID = "did:example:bob"
resolvers_config = ResolversConfig(
secrets_resolver=ExampleSecretsResolver(),
did_resolver=ExampleDIDResolver()
)
@pytest.mark.asyncio
async def test_demo_attachments():
# ALICE
attachment = Attachment(id="123",
data=AttachmentDataJson(
json={"foo": "bar"}
),
description="foo attachment",
mime_type="application/json")
message = Message(body={"aaa": 1, "bbb": 2},
id="1234567890", type="my-protocol/1.0",
frm=ALICE_DID, to=[BOB_DID],
created_time=1516269022, expires_time=1516385931,
attachments=[attachment])
pack_result = await pack_encrypted(message=message, frm=ALICE_DID, to=BOB_DID,
resolvers_config=resolvers_config)
packed_msg = pack_result.packed_msg
print(f"Sending ${packed_msg} to ${pack_result.service_metadata.service_endpoint}")
# BOB
unpack_result = await unpack(packed_msg,
resolvers_config=resolvers_config)
print(f"Got ${unpack_result.message}")
|
[
"noreply@github.com"
] |
DenisRybas.noreply@github.com
|
6f406af0ebe5605030000fd6e116b32ce2fc6ace
|
767318c4ddf2713a8a035aa3bf68cd8260409aa0
|
/user/views.py
|
133ddf546ee61d6d46fd72f919a732c56b5965ac
|
[] |
no_license
|
sag-coder/travelbooking
|
704573b145ca04587bbaf2415f4bbdb6ad50b26f
|
dfc482ca01d1be324aba900075b2a64dc2fd1d88
|
refs/heads/master
| 2023-06-11T23:22:44.114545
| 2021-07-10T23:47:37
| 2021-07-10T23:47:37
| 384,562,878
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 952
|
py
|
from django.shortcuts import redirect, render
from django.contrib.auth.models import User
# Create your views here.
def registration(request):
if request.method == 'POST':
u_name = request.POST['name']
u_email = request.POST['Email']
u_password = request.POST['password']
user = User.objects.create_user(username= u_name, email = u_email , password = u_password)
user.save();
return redirect ('/')
else:
return render(request, 'user_login.html')
##other process
# def add_user(request):
# user_name = request.POST['name']
# user_email = request.POST['Email']
# user_pass = request.POST['password']
# print(user_name)
# print(user_email)
# print(user_pass)
# user = User.objects.create_user(username=user_name, password = user_pass , email = user_email)
# user.save();
# return redirect('/')
|
[
"sagar@sagars-MacBook-Pro.local"
] |
sagar@sagars-MacBook-Pro.local
|
2e3b4717d69eea84a3a94a60133f762eca2a5b35
|
518a0639f9124f90acbd2f6a2d7b326bed94b74f
|
/yuyitos/proveedor/views.py
|
40658e82a342b3b539f0340e06bcba8e3916ded9
|
[] |
no_license
|
QuesoCaliente/Yuyitos
|
02bef4eaf796997446cd011de7715e1487755466
|
050c521bca8d3b518203bdd971397520b9b7273f
|
refs/heads/master
| 2023-01-14T03:20:19.696898
| 2020-11-14T21:23:37
| 2020-11-14T21:23:37
| 311,866,557
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,497
|
py
|
from django.shortcuts import render, redirect
from .forms import proveedorForm
from .models import Proveedor
# Create your views here.
def ProveedorView(request):
if request.user.is_superuser:
if request.method == 'GET':
proveedores = Proveedor.objects.all()
return render(request, 'proveedores/lista_proveedores.html', {'proveedores': proveedores})
else:
return render(request, 'base/404.html', {})
def ProveedorCreateView(request):
if request.method == 'POST':
form = proveedorForm(request.POST)
if form.is_valid():
form.save()
return redirect('listarProveedores')
else:
form = proveedorForm()
return render(request, 'proveedores/crear.html', {'form': form})
def ProveedorEditView(request, proveedor_id):
proveedor = Proveedor.objects.get(id = proveedor_id)
if request.method == 'GET':
form = proveedorForm(instance=proveedor)
else:
form = proveedorForm(request.POST, instance=proveedor)
if form.is_valid():
form.save()
return redirect('listarProveedores')
return render(request, 'proveedores/crear.html', {'form': form} )
def ProveedorDeleteView(request, proveedor_id):
proveedor = Proveedor.objects.get(id = proveedor_id)
if request.method == 'POST':
proveedor.delete()
return redirect('listarProveedores')
return render(request, 'proveedores/eliminar.html', {'proveedor': proveedor})
|
[
"brianc.contacto@gmail.com"
] |
brianc.contacto@gmail.com
|
29a9842ac9f42c23747bdc22f5baf796550a87b4
|
4fa05e478a93e335937dcc0c818c24f88894756b
|
/mytest/mytest/settings.py
|
83096acd9bcc316683f184ecf85a115c9e578e35
|
[] |
no_license
|
Power098/scrapy
|
57bd60f6e02e14c41d077d4491c637beb57fa477
|
4ee791fb931ad2c15a28727ad841955f81572898
|
refs/heads/master
| 2023-02-08T11:01:39.772447
| 2019-01-19T04:54:00
| 2019-01-19T04:54:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,068
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for mytest project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'mytest'
SPIDER_MODULES = ['mytest.spiders']
NEWSPIDER_MODULE = 'mytest.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'mytest (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'mytest.middlewares.MytestSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'mytest.middlewares.MytestDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'mytest.pipelines.MytestPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"731102171@qq.com"
] |
731102171@qq.com
|
3039a726a53e46d69960dc5a985c38af2ab28ff0
|
31f8ba92d7fb4bb85bffa4e9680bd62562b40443
|
/harriet_ex03.py
|
38a3fde0541ad8cd77663c104cabcd911a45958f
|
[] |
no_license
|
hpellis/learnpythonthehardway
|
234dcf1632d3bde047d6f8bb1383f7a67777d334
|
61f49deb58e2f636132fea6b97d682c68efce3a3
|
refs/heads/master
| 2020-04-09T08:44:39.267484
| 2018-12-09T13:52:13
| 2018-12-09T13:52:13
| 160,206,537
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 308
|
py
|
print ("I will now count my chickens:")
print("Hens", 25+30/6)
print ("Roosters", 100-25*3%4)
print("Now I will count the eggs:")
print(3+2+1-5+4%2-1/4+6)
print("Is it true that 3+2 < 2-7?")
print (3 + 2 < 5 - 7)
print("What is 3+2?", 3+2)
print("What is 5-7?", 5-7)
print ("Oh, that's why it's False.")
|
[
"harriet.p.ellis@gmail.com"
] |
harriet.p.ellis@gmail.com
|
990f465d9b3d5cbc973d46bf7e267342d1e0da70
|
4b660991e5c9c93c83dccccdd3ea91531201e8a3
|
/DSA/stack/stock_span.py
|
68bfcc16d33dc9d68ee0b7ef5dc9ea8b56b4958d
|
[
"MIT"
] |
permissive
|
RohanMiraje/DSAwithPython
|
2a1515fa5f9e5cc76b08a3e6f0ce34e451fb6f4b
|
ea4884afcac9d6cc2817a93e918c829dd10cef5d
|
refs/heads/master
| 2022-09-24T08:57:04.695470
| 2021-10-21T01:06:06
| 2021-10-21T01:06:06
| 238,381,770
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 891
|
py
|
def naive_stock_span(array):
i = 0
j = 0
n = len(array)
while i < n:
j = i - 1
span = 1
while j >= 0:
if array[j] <= array[i]:
span += 1
else:
break
j -= 1
print(span, end=" ")
i += 1
def stock_span(array):
"""
:param array:
:return:
"""
n = len(array)
stack = list()
stack.append(0)
i = 1
while i < n:
while len(stack) != 0 and array[stack[-1]] <= array[i]:
stack.pop()
span = i + 1 if len(stack) == 0 else i - stack[-1]
stack.append(i)
print(span, end=" ")
i += 1
if __name__ == '__main__':
from practise.arrays.template import *
# arr = get_random_array(10, 1, 31)
arr = [15, 13, 12, 17, 14, 16, 8, 6, 4, 10, 30]
stock_span(arr)
naive_stock_span(arr)
|
[
"rohanmiraje19@gmail.com"
] |
rohanmiraje19@gmail.com
|
11d46a008f45fa1dc37f75dcd0ef7713244499e9
|
5aad5ca0071ca8fbf7572b3f439168c6c6dbe118
|
/proto_longlong.py
|
17b26c1fb78a6e71f7bc1966b1722ec231c3f374
|
[] |
no_license
|
kleskjr/assembly_sequences
|
72d024d1a59a7fd670db1d8d9735ad50fd6266c8
|
216a78eee7abffc460e8bda989d6f4a59e60c8e5
|
refs/heads/master
| 2021-01-20T12:55:19.400324
| 2017-05-05T23:59:11
| 2017-05-05T23:59:11
| 90,425,279
| 1
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 67,464
|
py
|
#import matplotlib
#matplotlib.use('Agg')
import numpy as np
import brian as bb
from brian import ms, second, Hz, mV, pA, nS, pF
#from np.random import rand,binomial
from time import time, asctime
import warnings
import nekvo
import sys
### some custom modules
import plotter
import calc_spikes
### some brian optimizations
# import brian_no_units
#bb.globalprefs.set_global_preferences(useweave=True)
#bb.globalprefs.set_global_preferences(usecodegen=True,
# usenewpropagate=True, usestdp=True)
g_l = 10.*nS
C_m = 200*pF
v_r = -60.*mV
v_e = 0.*mV
v_i = -80.*mV
tau_m_exc = 20.*ms
tau_m_inh = 20.*ms
tau_inh = 10*ms
tau_fast_inh = 10*ms
tau_exc = 5.*ms
tau_stdp = 20.*ms
alpha = .2
g_min = 0*nS
g_max = 50*nS
eqs_exc = '''dv/dt = (g_l*(v_r-v)+Ie+Ii+I)/(C_m) : volt
dge/dt = -ge/(tau_exc) : siemens
dgi/dt = -gi/(tau_inh) : siemens
Ie = ge*(v_e-v) : amp
Ii = gi*(v_i-v) : amp
I : amp '''
eqs_inh = '''dv/dt = (g_l*(v_r-v)+Ie+Ii+I)/(C_m) : volt
dge/dt = -ge/(tau_exc) : siemens
dgi/dt = -gi/(tau_inh) : siemens
Ie = ge*(v_e-v) : amp
Ii = gi*(v_i-v) : amp
I : amp '''
eq_stdp = '''dx_post/dt = -x_post/tau_stdp : 1 (event-driven)
dx_pre/dt = -x_pre/tau_stdp : 1 (event-driven)
w: siemens '''
eq_pre = '''gi+=w
w=clip(w+eta.eta*(x_post-alpha)*g_ei,g_min,g_max)
x_pre+=1'''
eq_post = '''w=clip(w+eta.eta*x_pre*g_ei,g_min,g_max)
x_post+=1'''
def if_else(condition, a, b) :
if condition: return a
else: return b
class Pointless(object):
'''a hackaround changing learning rate eta'''
pass
eta = Pointless()
eta.v = .001
eta.eta = 1.*eta.v
# defines an extra clock according to which some extra input currents
# can be injected;
# one can play with changing conductances etc...
"""
syn_input_freq=1.*Hz # frequency of current input oscillation
myclock = bb.Clock(dt=10*ms) # create an extra clock
@bb.network_operation(myclock)
def inject():
'''
Injects currents into neuronal populations...off by default
'''
if myclock.t>25000*ms:
nn.Pe.I= nn.ext_input+\
nn.Isine*(1.+0*np.sin(2*np.pi*myclock.t*syn_input_freq))
nn.Pi.I= nn.ext_input+\
nn.Isini*(1.+0*np.sin(2*np.pi*myclock.t*syn_input_freq))
"""
class Nets():
def __init__(self, Ne=10000, Ni=2500, cp_ee=.02, cp_ie=.02, cp_ei=.02,
cp_ii=.02, pr=.05, pf=.05, g_ee=0.19*nS, g_ie=0.2*nS, g_ei=1.0*nS,
g_ii=1.0*nS, n_ass=10, s_ass=500, n_chains=0, cf_ffn=1., cf_rec=1.,
type_ext_input='curr', ext_input=200*pA, synapses_per_nrn=250,
inject_some_extra_i=False, g_ff_coef=1,
symmetric_sequence=False, p_rev=0, extra_recorded_nrns=False,
limit_syn_numbers=False, continuous_ass=False,
use_random_conn_ff=False, modified_contin=False):
'''
Ne: number of excitatory neurons
r_ie: ration of Ni/Ne
cp_yx: connection probability from x to y
if type_ext_input=='pois': ext_input={'N_p:10000','f_p':25,
'coef_ep':1., 'sp':.02}
!!!
due to current limitations (that I wanna set all g_ee once and
not to care of which how much it is), currently g_ff_coef can take
only integer values, if I want a strong synapse, I just put several
normal ones!
'''
########################################################################
# define a bunch of consts
self.timestep = .1*ms # simulation time step
self.D = 2*ms # AP delay
self.m_ts = 1.*ms # monitors time step
if Ne>0:
self.r_ie = (Ni+.0)/Ne # ratio Ni/Ne
else:
self.r_ie=.0
self.Ne = Ne
self.Ni = Ni
self.N = self.Ne+self.Ni
# set some random connectivity for all E,I neurons
self.cp_ee = cp_ee
self.cp_ie = cp_ie
self.cp_ei = cp_ei
self.cp_ii = cp_ii
# conductances
self.g_ee = g_ee
self.g_ie = g_ie
self.g_ei = g_ei
self.g_ii = g_ii
self.g_max = g_max
self.g_ff_coef = int(g_ff_coef)
self.g_l = g_l
self.use_random_conn_ff = use_random_conn_ff
self.type_ext_input=type_ext_input
self.ext_input=ext_input
self.limit_syn_numbers = limit_syn_numbers
self.n_chains = n_chains
self.n_ass = n_ass # number of assemblies in the ffn/minimum 2
self.s_ass = s_ass # neurons in an assembly
self.s_assinh = int(self.s_ass*self.r_ie)
self.cf_ffn = cf_ffn # strength of ffn synaptic connections
self.cf_rec = cf_rec # strength of rec synaptic connections
# recurrent connection probabilities into a group
self.pr_ee = pr # e to e
self.pr_ie = pr # e to i
self.pr_ei = pr
self.pr_ii = pr
# FF connection probabilities
self.pf_ee = pf
self.pf_ie = 0#pf
self.pf_ei = 0#pf
self.pf_ii = 0#pf
# FB maybe?
self.symmetric_sequence= symmetric_sequence
self.continuous_ass = continuous_ass
self.synapses_per_nrn = synapses_per_nrn
self.modified_contin = modified_contin
self.sh_e = 0
self.sh_i = 0
########################################################################
# neurons and groups to measure from
self.nrn_meas_e =[]
self.nrn_meas_i = []
# neuron groups for spike time measure (for cv and ff)
if True:
self.nrngrp_meas = [0, 5, self.n_ass-1]
self.n_spikeM_gr = min(50, int(self.s_ass))
# temporal recording from ps neurons
self.nrn_meas_e.append(0*self.s_ass)
self.nrn_meas_e.append(1*self.s_ass)
self.nrn_meas_e.append(2*self.s_ass)
self.nrn_meas_e.append(3*self.s_ass)
self.nrn_meas_e.append((self.n_ass-1)*self.s_ass-1)
self.nrn_meas_e.append((self.n_ass-1)*self.s_ass+1)
self.nrn_meas_e.append((self.n_ass)*self.s_ass-1)
# put a few neurons to measure for F2 plots
for i in range(50):
self.nrn_meas_e.append((self.n_ass)*self.s_ass-50-i)
self.nrn_meas_i.append(1*self.s_assinh-1)
self.nrn_meas_e.append(self.Ne-1)
self.nrn_meas_i.append(self.Ni-1)
if extra_recorded_nrns:
# record extra all nrns in second, last assembly and random nrns
for i in range(self.s_ass):
self.nrn_meas_e.append(1*self.s_ass+i)
for i in range(self.s_ass):
self.nrn_meas_e.append((self.n_ass-1)*self.s_ass+i)
for i in range(self.s_ass):
self.nrn_meas_e.append(self.n_ass*self.s_ass+i)
self.p_ass = []
self.p_assinh = []
self.p_ass_index = []
self.p_assinh_index = []
self.dummy_ass_index = [] # index of non-PS neurons, size is s_ass
# then function to apply them (later)
self.dummy_group=[]
self.C_ed = []
self.inject_some_extra_i = inject_some_extra_i
self.p_rev = p_rev
# define variables..needed??
self.weights = []
self.create_net()
print 'inited ', asctime()
def create_net(self):
''' create a network with and connect it'''
self.network = bb.Network()
self.network.clock = bb.Clock(dt=self.timestep)
# create a couple of groups
self.Pe = bb.NeuronGroup(self.Ne, eqs_exc, threshold=-50*mV,
reset=-60*mV, refractory=2.*ms)
self.Pi = bb.NeuronGroup(self.Ni, eqs_inh, threshold=-50*mV,
reset=-60*mV, refractory=2.*ms)
self.Pe.v = (-65 + 15*np.random.rand(self.Ne))*mV
self.Pi.v = (-65 + 15*np.random.rand(self.Ni))*mV
self.network.add(self.Pe, self.Pi)
if self.inject_some_extra_i:
self.network.add(inject)
if self.type_ext_input=='curr':
self.set_in_curr([self.Pe,self.Pi])
elif self.type_ext_input=='pois':
# apparently now works only with curr
self.set_in_curr([self.Pe,self.Pi])
else:
print 'no input, sure about it?'
self.C_ee=bb.Synapses(self.Pe,self.Pe,model='w:siemens',pre='ge+=w')
self.C_ie=bb.Synapses(self.Pe,self.Pi,model='w:siemens',pre='ge+=w')
self.C_ii=bb.Synapses(self.Pi,self.Pi,model='w:siemens',pre='gi+=w')
stdp_on = True
if stdp_on:
namespace={'exp':np.exp,'clip':np.clip,'g_ei':self.g_ei}
self.C_ei = bb.Synapses(self.Pi,self.Pe,
model= eq_stdp, pre=eq_pre, post=eq_post,
code_namespace=namespace)
else:
self.C_ei = bb.Synapses(self.Pi, self.Pe,
model='w:siemens', pre='gi+=w')
def generate_ps_assemblies(self, ass_randomness='gen_no_overlap',
):
'''
generates assemblies of random neurons,
neurons can lie into several group, but once into the same group
ass_randomness : how to pick the neurons
gen_ordered : ordered assemblies
gen_no_overlap : random assemblies, no overlap
gen_ass_overlap : random assemlies with overlap
gen_random : totally random choise of neurons
'''
def gen_ordered():
'''
Generate n assemblies where neurons are ordered
sh_e, sh_i : shift of e/i neurons (by default order starts at 0)
'''
if self.n_chains:
self.sh_e += sa_e*self.n_ass
self.sh_i += sa_i*self.n_ass
nrn_e = np.arange(self.sh_e, self.Ne)
nrn_i = np.arange(self.sh_i, self.Ni)
p_ind_e= [nrn_e[n*sa_e:(n+1)*sa_e] for n in range(self.n_ass)]
p_ind_i= [nrn_i[n*sa_i:(n+1)*sa_i] for n in range(self.n_ass)]
print 'An ordered sequence is created'
return p_ind_e, p_ind_i
def gen_no_overlap():
'''
Generate n assemblies with random neurons
no repetition of a neuron is allowed
'''
nrn_perm_e = np.random.permutation(self.Ne)
nrn_perm_i = np.random.permutation(self.Ni)
p_ind_e= [nrn_perm_e[n*sa_e:(n+1)*sa_e] for n in range(self.n_ass)]
p_ind_i= [nrn_perm_i[n*sa_i:(n+1)*sa_i] for n in range(self.n_ass)]
print 'A random sequence without overlaps is created'
return p_ind_e, p_ind_i
def gen_ass_overlap():
'''
Generate a n assemblies with random neurons
repetitions of a neuron in different groups is allowed
'''
# permuate and pick the first s_ass elements..
p_ind_e = [np.random.permutation(self.Ne)[:sa_e]
for n in range(self.n_ass)]
p_ind_i = [np.random.permutation(self.Ni)[:sa_i]
for n in range(self.n_ass)]
print 'A random sequence without repetition in a group is created'
return p_ind_e, p_ind_i
def gen_random():
'''
Generate a n assemblies with random neurons, repetitions in a
group are allowed
'''
p_ind_e = np.random.randint(self.Ne,size=(self.n_ass,sa_e))
p_ind_i = np.random.randint(self.Ni,size=(self.n_ass,sa_i))
print 'A sequence with completely random neurons is created'
return p_ind_e, p_ind_i
def gen_dummy():
dum = []
indexes_flatten = np.array(p_ind_e).flatten()
# not to generate a random number for each neurons
permutated_numbers = np.random.permutation(self.Ne)
dum_size= 0
for nrn in permutated_numbers:
if nrn not in indexes_flatten:
dum.append(nrn)
dum_size+=1
if dum_size>=self.s_ass:
break
return dum
sa_e, sa_i = self.s_ass, self.s_assinh # to use shorter names
p_ind_e, p_ind_i = eval(ass_randomness)()
self.p_ass_index.append(p_ind_e)
self.p_assinh_index.append(p_ind_i)
self.dummy_ass_index.append(gen_dummy())
self.n_chains += 1
def set_net_connectivity(self):
'''sets connections in the network'''
def create_random_matrix(pre_nrns, post_nrns, p, pre_is_post=True):
'''
creates random connections between 2 populations of size
pre_nrns and post_nrns (population sizes)
might be slow but allows us to edit the connectivity matrix
before throwing it into the ruthless synapse class
ith element consists of the postsynaptic connection of ith nrn
pre_is_post : flag that prevents a neuron to connect to itself
if set to True
'''
conn_mat = []
for i in range(pre_nrns):
conn_nrn = list(np.arange(post_nrns)\
[np.random.random(post_nrns)<p])
if i in conn_nrn and pre_is_post: # no autosynapses
conn_nrn.remove(i)
conn_mat.append(conn_nrn)
return conn_mat
def make_connections_discrete():
for n_ch in range(self.n_chains): # iterate over sequences
p_index = self.p_ass_index[n_ch]
p_indexinh = self.p_assinh_index[n_ch]
# iterate over the assemblies in the PS
for n_gr in range(len(p_indexinh)):
# iterate over E neurons in a group
for p1 in p_index[n_gr]:
# E to E recurrent
p1_post = list(p_index[n_gr][
np.random.random(len(p_index[n_gr]))<self.pr_ee])
if p1 in p1_post: # no autosynapse
p1_post.remove(p1)
if remove_old_conn_flag_ee:
cee[p1] = cee[p1][len(p1_post):]
if p1<5:
print n_gr, p1, len(p1_post)
cee[p1].extend(p1_post)
# E to E feedforward
if n_gr<self.n_ass-1: # in case it's the last group
###################################################
# flag for using the random connections for ff
# instead of embedding new ff synapses, strengthen
# the background connections proportionally
use_random_conn_ff = False
if use_random_conn_ff:
p1_post = np.intersect1d(cee[p1],
p_index[n_gr+1])
for i in range(int(self.pf_ee/self.cp_ee)):
cee[p1].extend(p1_post)
#check for postsynaptic partners of p1 in cee
# do the same synapses pff/r_rand times?
pass
else:
for i in range(self.g_ff_coef):
p1_post = list(p_index[n_gr+1]
[np.random.random(len(p_index[n_gr+1]))
<self.pf_ee])
if p1 in p1_post: # no autosynapse
p1_post.remove(p1)
if remove_old_conn_flag_ee:
cee[p1] = cee[p1][len(p1_post):]
if p1<5:
print n_gr, p1, len(p1_post)
cee[p1].extend(p1_post)
# E to E reverse
if self.symmetric_sequence:
if n_gr: # in case it's first group
p1_post = list(p_index[n_gr-1][
np.random.random(len(p_index[n_gr-1])) < \
self.p_rev])
if p1 in p1_post: # no autosynapse
p1_post.remove(p1)
if remove_old_conn_flag_ee:
cee[p1] = cee[p1][len(p1_post):]
cee[p1].extend(p1_post)
# E to I recurrent
p1_post = list(p_indexinh[n_gr][
np.random.random(len(p_indexinh[n_gr]))<self.pr_ie])
if remove_old_conn_flag:
cie[p1] = cie[p1][len(p1_post):]
cie[p1].extend(p1_post)
#pr_ii = self.pr_ii/3
for i1 in p_indexinh[n_gr]:
# I to I recurrent
i1_post = list(p_indexinh[n_gr][
np.random.random(len(p_indexinh[n_gr]))<self.pr_ii])
#np.random.random(len(p_indexinh[n_gr]))<pr_ii])
if i1 in i1_post: # no autosynapse
i1_post.remove(i1)
if remove_old_conn_flag:
cii[i1] = cii[i1][len(i1_post):]
cii[i1].extend(i1_post)
'''
'''
# I to E recurrent
i1_post = list(p_index[n_gr][
np.random.random(len(p_index[n_gr]))<self.pr_ei])
if remove_old_conn_flag:
cei[i1] = cei[i1][len(i1_post):]
cei[i1].extend(i1_post)
return cee, cie, cie, cii
def make_connections_continuous():
#def find_post(p_ind, i, hw, pr):
def find_post(p_ind, i, ran_be, ran_af, pr):
'''
hw stands for half width (M/2) normally 250 neurons
range variables specify the range of connectivity from
neuron i,i.e., to how many neurons will neuron i project
ran_be: range before neuron
ran_af: range after
'''
# rns from first group will have higher rc connection to
# the following half group
if i < ran_be:
#pr_n = 2.*hw/(hw+i)*pr
pr_n = (ran_be+ran_af)/(ran_af+i)*pr
p1_post = p_ind[0:i+ran_af][\
np.random.random(i+ran_af)<pr_n]
# last neurons also need some special care to connect
elif i > len(p_ind) - ran_af:
#pr_n = 2.*hw/(hw+len(p_ind)-i-1)*pr
pr_n = pr*(ran_be+ran_af)/(ran_af+len(p_ind)-i-1)
p1_post = p_ind[i-ran_be:][\
np.random.random(len(p_ind)-i+ran_be)<pr_n]
print 'aa', len(p_ind), i, ran_be, ran_af, pr_n
print len(p_ind[i-ran_be:]), len(p_ind)-i+ran_be
# most neurons are happy
else:
pr_n = pr
p1_post = p_ind[i-ran_be:i+ran_af][
np.random.random(ran_be+ran_af)<pr_n]
return p1_post
for n_ch in range(self.n_chains): # iterate over sequences
p_index = np.array(self.p_ass_index[n_ch]).flatten()
p_indexinh = np.array(self.p_assinh_index[n_ch]).flatten()
ran_be = 1*self.s_ass/2 # here positive means before..to fix!
ran_af = 1*self.s_ass/2
ran_be_i = self.s_assinh/2+1
ran_af_i = self.s_assinh/2+1
if self.modified_contin:
ran_ff_start = 1*self.s_ass/2
ran_ff_end = 3*self.s_ass/2
# iterate over the assemblies in the PS
for i, p1 in enumerate(p_index):
# E-to-E recurrent
p1_post = find_post(p_index, i, ran_be, ran_af, self.pr_ee)
#if p1 in p1_post: # no autosynapse
#p1_post = list(p1_post).remove(p1)
cee[p1].extend(p1_post)
# E-to-I recurrent
p1_post = find_post(p_indexinh, i/4, ran_be_i, ran_af_i,
self.pr_ie)
cie[p1].extend(p1_post)
# E-to-E feedforward
if i < len(p_index)-ran_ff_end:
p1_post = p_index[i+ran_ff_start:i+ran_ff_end][
np.random.random(ran_ff_end-ran_ff_start)
< self.pf_ee]
# here not to miss connections to the last group
else:
p1_post = p_index[i:len(p_index)][
np.random.random(len(p_index)-i)<self.pf_ee]
cee[p1].extend(p1_post)
for i, i1 in enumerate(p_indexinh):
# I-to-E recurrent
i1_post = find_post(p_index, 4*i,
ran_be, ran_af, self.pr_ei)
cei[i1].extend(i1_post)
# I-to-I recurrent
i1_post = find_post(p_indexinh, i, ran_be_i, ran_af_i,
self.pr_ii)
#if i1 in i1_post: # no autosynapse
#i1_post = list(i1_post).remove(i1)
cii[i1].extend(i1_post)
def apply_connection_matrix(S, conn_mat, f_ee=False):
'''
creates the synapses by applying conn_mat connectivity matrix
to the synaptic class S
basically does the following but fast!
for i, conn_nrn in enumerate(conn_mat):
for j in conn_nrn:
S[i,j]=True
f_ee is a flag indicating e-e connections
'''
presynaptic, postsynaptic = [], []
synapses_pre = {}
nsynapses = 0
for i in range(len(conn_mat)):
conn_nrn = conn_mat[i]
k1 = len(conn_nrn)
# too connected? get rid of older synapses
if self.limit_syn_numbers and f_ee and (k1>self.synapses_per_nrn):
#conn_nrn = conn_nrn[self.synapses_per_nrn:] # simply cut!
x = max(self.synapses_per_nrn, k1-self.synapses_per_nrn)
conn_nrn = conn_nrn[-x:] # simply cut!
'''
# some exponential forgeting of old synapses
tau = (k1-self.synapses_per_nrn)/2.
conn_nrn = np.array(conn_nrn)[\
np.exp(-np.arange(k1)/tau)<np.random.random(k1)]
'''
k = len(conn_nrn) # new number of postsynaptic connections
# just print to keep an eye on what's going on
#if i<20:
#print '# synpapses before and after ', k1,k
if k:
synapses_pre[i] = nsynapses + np.arange(k)
presynaptic.append(i*np.ones(k, dtype=int))
postsynaptic.append(conn_nrn)
nsynapses += k
presynaptic = np.hstack(presynaptic)
postsynaptic = np.hstack(postsynaptic)
S.create_synapses(presynaptic, postsynaptic, synapses_pre)
# creates randomly connected matrices
cee = create_random_matrix(self.Ne, self.Ne, self.cp_ee, True)
cie = create_random_matrix(self.Ne, self.Ni, self.cp_ie, False)
cei = create_random_matrix(self.Ni, self.Ne, self.cp_ei, False)
cii = create_random_matrix(self.Ni, self.Ni, self.cp_ii, True)
# seems that these 2 flags are outdated and unusable; can't bother to
# remove them now
remove_old_conn_flag_ee = False
remove_old_conn_flag = False
########################################################################
### now imprint PS
########################################################################
if self.continuous_ass:
make_connections_continuous()
else:
make_connections_discrete()
apply_connection_matrix(self.C_ee, cee, True)
apply_connection_matrix(self.C_ie, cie)
apply_connection_matrix(self.C_ei, cei)
apply_connection_matrix(self.C_ii, cii)
self.C_ee.w = self.g_ee
self.C_ie.w = self.g_ie
self.C_ei.w = self.g_ei
self.C_ii.w = self.g_ii
self.C_ee.delay = self.D
self.C_ie.delay = self.D
self.C_ei.delay = self.D
self.C_ii.delay = self.D
self.network.add(self.C_ee)
self.network.add(self.C_ie)
self.network.add(self.C_ei)
self.network.add(self.C_ii)
self.weights.append(self.C_ei.w.data.copy()) #save weights
print 'connections imprinted! ', asctime()
def boost_pff(self, pf_ee_new):
'''
creates anew connectivity matrix and applies to code
for new ff connections that should be added after some
simulation time
'''
def get_disc_conn():
conn_mat = [[] for i in range(self.Ne)]
# E to E feedforward
for ch in range(self.n_chains):
p_index = self.p_ass_index[ch]
for gr in range(self.n_ass-1):
for p1 in p_index[gr]:
p1_post = list(p_index[gr+1]
[np.random.random(len(p_index[gr+1])) \
< self.pf_ee_new])
conn_mat[p1].extend(p1_post)
return conn_mat
def get_cont_conn():
conn_mat = [[] for i in range(self.Ne)]
if self.modified_contin:
ran_ff_start = 1*self.s_ass/2
ran_ff_end = 3*self.s_ass/2
for ch in range(self.n_chains):
p_index = np.array(self.p_ass_index[ch]).flatten()
for i, p1 in enumerate(p_index):
# E-to-E feedforward
if self.modified_contin:
if i < len(p_index)-ran_ff_end:
p1_post = p_index[i+ran_ff_start:i+ran_ff_end][
np.random.random(ran_ff_end-ran_ff_start)
<self.pf_ee_new]
# here not to miss connections to the last group
elif i < len(p_index)-ran_ff_start:
p1_post = p_index[i+ran_ff_start:len(p_index)][
np.random.random(len(p_index)-i-ran_ff_start)
<self.pf_ee_new]
else:
p1_post=[]
else:
if i < len(p_index)-self.s_ass:
p1_post = p_index[i:i+self.s_ass][
np.random.random(self.s_ass)<self.pf_ee_new]
# here not to miss connections to the last group
else:
p1_post = p_index[i:len(p_index)][
np.random.random(len(p_index)-i)<self.pf_ee_new]
conn_mat[p1].extend(p1_post)
return conn_mat
def get_rand_boost():
ex_pre = np.array(self.C_ee.presynaptic)
ex_post = np.array(self.C_ee.postsynaptic)
conn_mat = [[] for i in range(self.Ne)]
for ch in range(self.n_chains):
p_index = self.p_ass_index[ch]
for gr in range(self.n_ass-1):
for p1 in p_index[gr]:
p1_ex_post = ex_post[ex_pre==p1]
p1_post = np.intersect1d(
self.p_ass_index[0][gr+1], p1_ex_post)
for i in range(int(self.pf_ee_new/self.cp_ee)):
conn_mat[p1].extend(p1_post)
if not gr and not p1:
print p1, p1_post
print
#1/0
return conn_mat
self.pf_ee_new = pf_ee_new
self.C_ee_ff = bb.Synapses(self.Pe, self.Pe,
model='w:siemens', pre='ge+=w')
if self.continuous_ass:
conn_mat = get_cont_conn()
else:
if self.use_random_conn_ff:
conn_mat = get_rand_boost()
else:
conn_mat = get_disc_conn()
presynaptic, postsynaptic = [], []
synapses_pre = {}
nsynapses = 0
for i in range(len(conn_mat)):
conn_nrn = conn_mat[i]
k = len(conn_nrn) # new number of postsynaptic connections
if k:
synapses_pre[i] = nsynapses + np.arange(k)
presynaptic.append(i*np.ones(k, dtype=int))
postsynaptic.append(conn_nrn)
nsynapses += k
presynaptic = np.hstack(presynaptic)
postsynaptic = np.hstack(postsynaptic)
self.C_ee_ff.create_synapses(presynaptic, postsynaptic, synapses_pre)
self.C_ee_ff.w = self.g_ee
self.C_ee_ff.delay = self.D
self.network.add(self.C_ee_ff)
print 'pff boosted!'
def balance(self, bal_time=2*second, eta_c=1.):
"""
balancing function: runs the network for bal_time and:
1) sets the learning rate to eta
2) !!! switches off the spike recorder (ap_record = False)
"""
t0 = time()
eta.eta = eta.v*eta_c
self.network.run(bal_time)
# save weights after each balance
self.weights.append(self.C_ei.w.data.copy())
eta.eta = 0.0
t1 = time()
print 'balanced: ', t1-t0
def run_sim(self, run_time= 1*second):
""" runs the network for run_time with I plasticity turned off"""
t0 = time()
eta.eta = 0.0
self.network.run(run_time)
t1 = time()
print 'run: ', t1-t0
def set_in_curr(self, target, ext_input=None):
""" ce,ci currents injected in E/I populations"""
if ext_input==None:
ext_input = self.ext_input
for t in target:
t.I = ext_input
def set_in_poisson(self, target):
"""
Set poissonian input to a group of neurons
target: list of targert groups
N_p: # of poissons inputs
f_p: frequency of P
sp: sparseness of connections
coef_ep: factor of ep conductance to g_exc
"""
## somehow PoissonInput is way slower! also leads to diff behaviour
#for gr in target:
#inp_poisson = bb.PoissonInput(gr,N=100,rate=f_p,
#weight=2.*self.g_ee,state='ge')
#self.network.add(inp_poisson)
N_p=self.ext_input['N_p']
f_p=self.ext_input['f_p']
sp=self.ext_input['sp']
coef_ep=self.ext_input['coef_ep']
self.P_poisson= bb.PoissonGroup(N_p,f_p,self.network.clock)
self.network.add(self.P_poisson)
for gr in target:
#Cep = bb.Connection(self.P_poisson, gr,'ge',
# weight=coef_ep*self.g_ee, sparseness=sp)
Cep= bb.Synapses(self.P_poisson,gr,model='w:siemens',pre='ge+=w')
Cep.connect_random(self.P_poisson,gr,sparseness=sp)
Cep.w=coef_ep*self.g_ee
self.network.add(Cep)
def set_syn_input(self, target, time):
'''adding sync inputs at some time points'''
ext_in = bb.SpikeGeneratorGroup(1,[(0, time)],self.network.clock)
C_syne= bb.Synapses(ext_in,target,model='w:siemens',pre='ge+=w')
C_syne.connect_random(ext_in,target,sparseness=1.)
C_syne.w=30.*self.g_ee
self.network.add(ext_in, C_syne)
def set_syn_input_ran(self, target, time):
'''adding sync inputs at some time points'''
ext_in = bb.SpikeGeneratorGroup(1,[(0, time)],self.network.clock)
C_syne= bb.Synapses(ext_in,self.Pe,model='w:siemens',pre='ge+=w')
for n in target:
C_syne.connect_random(ext_in,self.Pe[n],sparseness=1.)
C_syne.w=30.*self.g_ee
self.network.add(ext_in, C_syne)
def set_noisy_input(self, target, time, sigma=0., mcoef=30):
'''adding sync inputs at some time points with
normal jitter distribution sigma
mcoef is the strength of stimulation
'''
#print time, sigma
t0 = time - 6.*sigma # mean delay is set to 6*sigma
ext_in = bb.SpikeGeneratorGroup(1, [(0, t0)], self.network.clock)
C_syne = bb.Synapses(ext_in, self.Pe, model='w:siemens', pre='ge+=w')
for n in target:
C_syne.connect_random(ext_in, self.Pe[n], sparseness=1.)
C_syne.w = mcoef * self.g_ee
#C_syne.delay=np.random.uniform(0,sigma,len(target))
if sigma > 0.:
C_syne.delay = np.random.normal(6.*sigma, sigma, len(target))
else:
C_syne.delay = np.zeros(len(target))
self.network.add(ext_in, C_syne)
def attach_dummy_group(self, pf=.06):
self.dummy_group = bb.NeuronGroup(500, eqs_exc, threshold=-50*mV,
reset=-60*mV, refractory=2.*ms)
self.C_ed=bb.Synapses(self.dummy_group,self.Pe,
model='w:siemens',pre='ge+=w')
for p1 in self.dummy_group:
for p2 in p_index[n_gr+1]:
if np.random.random()<nn.pf_ee:
self.C_ed[p1,p2]=True
#self.C_ee[p1,p2].w=self.g_ee
print 'hui'
#nn.C_ed.connect_random(nn.dummy_group,nn.p_ass_index[0][0],sparseness=pf)
self.C_ed.w=self.g_ee
self.C_ed.delay=self.D
self.network.add(self.dummy_group,self.C_ed)
def set_rate_monitor(self):
"""yep"""
self.mon_rate_e = bb.PopulationRateMonitor(self.Pe, bin = self.m_ts)
self.mon_rate_i = bb.PopulationRateMonitor(self.Pi, bin = self.m_ts)
self.network.add(self.mon_rate_e, self.mon_rate_i)
def set_spike_monitor(self):
"""yep"""
self.mon_spike_e = bb.SpikeMonitor(self.Pe)
self.mon_spike_i = bb.SpikeMonitor(self.Pi)
self.network.add(self.mon_spike_e, self.mon_spike_i)
def set_group_spike_monitor(self, ch=0):
"""
!!!
this would not work with random assemblies
to be removed in the future
"""
self.mon_spike_sngl = [] # measure spike times from a few single neurons
for nrn in self.nrn_meas_e:
self.mon_spike_sngl.append(bb.SpikeMonitor(self.Pe[nrn]))
self.network.add(self.mon_spike_sngl)
self.mon_spike_gr = [] # measure spike times from groups (for CV and FF)
for gr in self.nrngrp_meas:
self.mon_spike_gr.append(bb.SpikeMonitor(
self.p_ass[ch][gr][0:self.n_spikeM_gr]))
# also control group of neurons which is not included in the ps
self.mon_spike_gr.append(bb.SpikeMonitor(\
self.Pe[self.n_ass*self.s_ass:(self.n_ass+1)*self.s_ass]
[0:self.n_spikeM_gr]))
self.network.add(self.mon_spike_gr)
# default spike easure is off
for sp in self.mon_spike_gr:
sp.record = False
def set_voltage_monitor(self):
"""yep"""
self.mon_volt_e = bb.StateMonitor(self.Pe, 'v', record=self.nrn_meas_e)
self.mon_volt_i = bb.StateMonitor(self.Pi, 'v', record=self.nrn_meas_i)
self.network.add(self.mon_volt_e ,self.mon_volt_i)
def set_conductance_monitor(self):
"""yep"""
self.mon_econd_e= bb.StateMonitor(self.Pe,'ge',record=self.nrn_meas_e)
self.mon_icond_e= bb.StateMonitor(self.Pe,'gi',record=self.nrn_meas_e)
self.mon_econd_i= bb.StateMonitor(self.Pi,'ge',record=self.nrn_meas_i)
self.mon_icond_i= bb.StateMonitor(self.Pi,'gi',record=self.nrn_meas_i)
self.network.add(self.mon_econd_e, self.mon_icond_e,
self.mon_econd_i ,self.mon_icond_i)
def set_current_monitor(self):
"""yep"""
self.mon_ecurr_e= bb.StateMonitor(self.Pe, 'Ie', record=self.nrn_meas_e)
self.mon_icurr_e= bb.StateMonitor(self.Pe, 'Ii', record=self.nrn_meas_e)
self.mon_ecurr_i= bb.StateMonitor(self.Pi, 'Ie', record=self.nrn_meas_i)
self.mon_icurr_i= bb.StateMonitor(self.Pi, 'Ii', record=self.nrn_meas_i)
self.network.add(self.mon_ecurr_e ,self.mon_icurr_e,
self.mon_ecurr_i,self.mon_icurr_i)
def run_full_sim(self, sim_times):
self.generate_ordered_ps()
self.set_ffchain_new()
self.set_rate_monitor()
self.set_group_spike_monitor()
#self.set_voltage_monitor()
#self.set_current_monitor()
stim_times=np.arange(sim_times['start_sim'],sim_times['stop_sim'],1)
for t in stim_times:
self.set_syn_input(self.p_ass[0][0],t*second)
# stimulation with a que (not full)
for que in [80,60,40,20]:
start_que = sim_times['start_sim'+str(que)]
stop_que = sim_times['stop_sim'+str(que)]
que_res = que/100. # # 80,60,40,20% of pop stimulation
for t in range(start_que, stop_que):
n_sim_nrn = int(que_res*self.s_ass)
self.set_syn_input(self.p_ass[0][0][0:n_sim_nrn],t*second)
# set balance times with corresponding learning rates
t0=0
for t,r in zip(sim_times['balance_dur'],sim_times['balance_rate']):
self.balance((t-t0)*second,r)
t0=t
# run the simulations
self.run_sim((sim_times['stop_sim20']-sim_times['start_sim'])*second)
# turn on the group spike monitor
for sp in self.mon_spike_gr:
sp.record = True
# run for spontan activity
self.run_sim((sim_times['stop_spont_recording']-
sim_times['stop_sim20'])*second)
def dummy(self):
sim_times={}
#sim_times['balance_dur']=[10,20,25,35]
sim_times['balance_dur']=[10,15,20,25]
sim_times['balance_rate']=[5,1,.1,.01]
sim_times['start_sim']=16
sim_times['stop_sim']=20
sim_times['start_sim80']=20
sim_times['stop_sim80']=20
sim_times['start_sim60']=20
sim_times['stop_sim60']=20
sim_times['start_sim40']=20
sim_times['stop_sim40']=20
sim_times['start_sim20']=20
sim_times['stop_sim20']=22
sim_times['start_fr_recording']=16
sim_times['stop_fr_recording']=25
sim_times['start_spont_recording']=sim_times['stop_sim20']
sim_times['stop_spont_recording']=25
self.set_rate_monitor()
self.set_spike_monitor()
self.set_voltage_monitor()
self.set_current_monitor()
self.set_conductance_monitor()
self.run_full_sim(sim_times)
def plot_for_raster_curr_volt(self):
num_ps = 1
for n in range(num_ps):
self.generate_ps_assemblies('gen_ass_overlap')
self.set_net_connectivity()
self.set_spike_monitor()
self.set_rate_monitor()
'''
gr = self.p_ass_index[0][0]
self.set_noisy_input(gr,.5*second,sigma=0*ms)
#gr1 = self.p_ass_index[1][0]
#self.set_noisy_input(gr1,.7*second,sigma=0*ms)
self.balance(1.*second,5.)
'''
t0 = 30 # time offset for stimulation in secs
n_stim = 5
for n in range(num_ps):
for i in range(n_stim):
#gr = self.p_ass_index[n][0]
gr_num = int(self.n_ass/5.*i)
print 'stim to ', gr_num
gr = self.p_ass_index[n][gr_num]
t = (t0 + n + i*3)*second
self.set_noisy_input(gr,t,sigma=0*ms)
self.balance(10*second, 5.)
self.balance(10*second, 1.)
self.balance(5*second, .1)
self.balance(5*second, .01)
#self.run_sim((2*num_ps+2)*second)
self.run_sim(16*second)
for n in range(num_ps):
figure = plt.figure(figsize=(12., 8.))
plotter.plot_ps_raster(self, chain_n=n, frac=.01, figure=figure)
#plotter.plot_pop_raster(self,False)
#plt.xlim([19000,22000])
#plt.savefig('xxx'+ str(pr)+'_'+str(pf)+'.png')
#plotter.show()
def test_shifts(self, ie, ii, tr):
self.generate_ps_assemblies('gen_no_overlap')
self.set_net_connectivity()
self.set_spike_monitor()
self.set_rate_monitor()
#ie, ii = 0,3
self.Isine=ie*pA
self.Isini=ii*pA
self.network.add(inject)
gr = self.p_ass_index[0][0]
for i in range(9):
t = (21+i)*second
self.set_noisy_input(gr,t,sigma=0*ms)
'''
self.balance(.1*second,5.)
'''
self.balance(5*second,5.)
self.balance(5*second,1.)
self.balance(5*second,.1)
self.balance(5*second,.01)
self.run_sim(10*second)
pr, pf = self.pr_ee, self.pf_ee
figure = plt.figure(figsize=(12.,8.))
plotter.plot_ps_raster(self,chain_n=0,frac=.1,figure=figure)
#plt.xlim([6800,8300])
'''
plt.title('')
plt.xlabel('')
plt.ylabel('')
plt.xticks([])
plt.yticks([])
'''
# save the spikes data into a file fr later reading
spikes_e = [self.mon_spike_e[nrn][self.mon_spike_e[nrn] > 20.]
for gr in self.p_ass_index[0] for nrn in gr]
#fname = '../data/dynamic_switch/pr05pf05aie1ii-1pA_20_25_30sec'
#np.savez(fname,self.p_ass_index, spikes_e)
#prefix = '../data/evoked_replay_extra_currents/'
#fname = str(pr)+'_'+str(pf)+'ie'+str(ie)+'ii'+str(ii)+'_'+str(tr)
#np.savez(prefix+fname,self.p_ass_index, spikes_e)
'''
plt.savefig(prefix+fname+'.png')
plt.savefig(prefix+fname+'.pdf')
'''
def stim_curr(self, ps=0, gr=0, dur_stim=100, dur_relx=400,
curr=10*pA):
'''
stimulate group gr in ps with a continuous current
'''
for nrn in self.p_ass_index[ps][gr]:
self.Pe[nrn].I += curr
self.run_sim(dur_stim*ms)
for nrn in self.p_ass_index[ps][gr]:
self.Pe[nrn].I -= curr
self.run_sim(dur_relx*ms)
def test_symm():
from matplotlib import pyplot as plt
nn=Nets(Ne=20000, Ni=5000, cp_ee=.01, cp_ie=.01, cp_ei=0.01, cp_ii=.01,
n_ass=10, s_ass=500, pr=.15, pf=.03, symmetric_sequence=True, p_rev=.03,
g_ee=0.1*nS, g_ie=0.1*nS, g_ei=0.4*nS, g_ii=0.4*nS)
nn.generate_ps_assemblies('gen_no_overlap')
nn.set_net_connectivity()
nn.set_spike_monitor()
nn.set_rate_monitor()
'''
gr = nn.p_ass_index[0][0]
t = 20*second
nn.set_noisy_input(gr,t,sigma=0*ms)
t = 20.5*second
nn.set_noisy_input(gr,t,sigma=0*ms)
gr = nn.p_ass_index[0][9]
t = 21*second
nn.set_noisy_input(gr,t,sigma=0*ms)
t = 21.5*second
nn.set_noisy_input(gr,t,sigma=0*ms)
nn.balance(5*second,5.)
nn.balance(5*second,1.)
nn.balance(5*second,.1)
nn.balance(5*second,.01)
nn.run_sim(2*second)
'''
#gr = nn.p_ass_index[0][0]
#t = 20.5*second
#nn.set_noisy_input(gr,t,sigma=0*ms)
for gr_num in range(nn.n_ass):
gr = nn.p_ass_index[0][gr_num]
t = (20.55+gr_num*.1)*second
nn.set_noisy_input(gr,t,sigma=0*ms)
#gr = nn.p_ass_index[0][9]
#t = 22.5*second
#nn.set_noisy_input(gr,t,sigma=0*ms)
nn.balance(5*second,5.)
nn.balance(5*second,1.)
nn.balance(5*second,.1)
nn.balance(5*second,.01)
#nn.run_sim(4*second)
nn.Pe.I -= .0*pA
for nrn in nn.p_ass_index[0][0]:
nn.Pe[nrn].I += 3*pA
nn.run_sim(.5*second)
for nrn in nn.p_ass_index[0][0]:
nn.Pe[nrn].I -= 3*pA
nn.Pe.I -= 9*pA
nn.run_sim(1.*second)
nn.Pe.I += 9*pA
for nrn in nn.p_ass_index[0][9]:
nn.Pe[nrn].I += 3*pA
nn.run_sim(.5*second)
for nrn in nn.p_ass_index[0][9]:
nn.Pe[nrn].I -= 3*pA
#nn.Pe.I +=.5*pA
#nn.Pe.I +=5*pA
#nn.run_sim(1.*second)
#for nrn in nn.p_ass_index[0][0]:
#nn.Pe[nrn].I += 1*pA
#nn.run_sim(1.*second)
plotter.plot_ps_raster(nn, chain_n=0, frac=.1)
plt.xlim([20000, 22000])
return nn
def test_fr():
from matplotlib import pyplot as plt
pr, pf = 0.06, 0.06
nn = Nets(Ne=20000, Ni=5000, cp_ee=.01, cp_ie=.01, cp_ei=0.01, cp_ii=.01,
n_ass=10, s_ass=500, pr=pr, pf=pf,
g_ee=0.1*nS, g_ie=0.1*nS, g_ei=0.4*nS, g_ii=0.4*nS)
nn.generate_ps_assemblies('gen_no_overlap')
nn.set_net_connectivity()
nn.set_spike_monitor()
nn.set_rate_monitor()
gr = nn.p_ass_index[0][0]
t = 20*second
nn.set_noisy_input(gr,t,sigma=0*ms)
t = 21*second
nn.set_noisy_input(gr,t,sigma=0*ms)
'''
nn.balance(.01*second,5.)
nn.balance(.01*second,1.)
'''
nn.balance(1*second,5.)
#nn.balance(1*second,1.)
#nn.balance(1*second,.1)
#nn.balance(1*second,.01)
#nn.run_sim(1*second)
gr_fr_e = calc_spikes.make_fr_from_spikes(nn,ps=0,w=1,exc_nrns=True)
gr_fr_i = calc_spikes.make_fr_from_spikes(nn,ps=0,w=1,exc_nrns=False)
plt.subplot(211)
for gr in range(nn.n_ass):
plt.plot(calc_spikes.gaus_smooth(gr_fr_e[gr],2))
plt.subplot(212)
for gr in range(nn.n_ass):
plt.plot(calc_spikes.gaus_smooth(gr_fr_i[gr],2))
plt.show()
return nn
def test_noPS():
from matplotlib import pyplot as plt
pr, pf = 0.06, 0.06
nn=Nets(Ne=20000, Ni=5000, cp_ee=.01, cp_ie=.01, cp_ei=0.01, cp_ii=.01,
n_chains=0, n_ass=2, s_ass=500, pr=pr, pf=pf,
g_ee=0.1*nS, g_ie=0.1*nS, g_ei=0.4*nS, g_ii=0.4*nS)
nn.set_net_connectivity()
nn.set_spike_monitor()
nn.set_rate_monitor()
nn.balance(5*second,5.)
nn.balance(5*second,1.)
nn.balance(5*second,.2)
nn.balance(5*second,.05)
nn.run_sim(1*second)
return nn
def test_diff_gff(Ne=20000):
gfc = 1
pr = 0.06
#pf= 0.06/gfc
pf = 0.06
#Ne=20000
Ni = Ne/4
cp = .01
# the default conductances used for Ne=20000
ge0 = 0.1*nS
gi0 = 0.4*nS
#gee= ge0
gee = ge0*(20000./Ne)**.5
gii = gi0*(20000./Ne)**.5
pf = pf*(Ne/20000.)**.5
pr = pr*(Ne/20000.)**.5
# so that gfc*gee=g0 or the ff connection dont scale down
#gfc = 1./(20000./Ne)**.5
#grc = 1./(20000./Ne)**.5 # figure out this guy
continuous_ass=False
nn = Nets(Ne=Ne, Ni=Ni, cp_ee=cp, cp_ie=cp, cp_ei=cp, cp_ii=cp,
n_ass=10, s_ass=500, pr=pr, pf=pf, ext_input=200*pA,
g_ee=gee, g_ie=gee, g_ei=gii, g_ii=gii, g_ff_coef=gfc,
continuous_ass=continuous_ass)
#nn.generate_ps_assemblies('gen_no_overlap')
nn.generate_ps_assemblies('gen_ordered')
nn.set_net_connectivity()
nn.set_spike_monitor()
nn.set_rate_monitor()
nn.set_voltage_monitor()
nn.set_current_monitor()
#return nn
gr = nn.p_ass_index[0][0]
'''
'''
#t = 20*second
#nn.set_noisy_input(gr, t, sigma=0*ms)
t = 21*second
nn.set_noisy_input(gr, t, sigma=0*ms)
#t = 22*second
#nn.set_noisy_input(gr, t, sigma=0*ms)
t = 23*second
nn.set_noisy_input(gr, t, sigma=0*ms)
#t = 24*second
#nn.set_noisy_input(gr, t, sigma=0*ms)
nn.mon_spike_e.record = False
nn.mon_spike_i.record = False
'''
nn.balance(20*second,5.)
nn.balance(20*second,1.)
nn.balance(10*second,.1)
nn.balance(10*second,.01)
'''
#nn.balance(.5*second, 5.)
#return nn
nn.balance(5*second, 5.)
nn.balance(5*second, 1.)
nn.balance(5*second, .1)
nn.balance(5*second, .01)
nn.mon_spike_e.record = True
nn.mon_spike_i.record = True
nn.run_sim(5*second)
'''
t_end = nn.network.clock.t*1000/second
figure = plt.figure(figsize=(16., 12.))
plotter.plot_ps_raster(nn, chain_n=0, frac=1., figure=figure,
dummy_ass=True)
plt.xlim([t_end-2000, t_end])
'''
#fname_pre = 'large_nets/scaleAllg_Ne'
#plt.savefig(fname_pre + str(Ne) + 'pr' + str(pr) + 'pf' + str(pf) +
#figure2 = plt.figure(figsize=(16.,12.))
#plt.plot(nn.mon_rate_e.smooth_rate(5*ms))
#plt.xlim([5000,t_end])
#plt.xlabel('t [ms]')
#plt.ylabel('FR [sp/sec]')
#plt.savefig(fname_pre+ str(Ne) + 'pr'+str(pr)+'pf'+str(pf) + '_fr'+'.png')
#plt.close('all')
#plotter.plot_fr_cv_syn_distr(nn)
return nn
def test_psps():
'''
test PSPs
'''
from matplotlib import pyplot as plt
ge0 = 0.1*nS
gi0 = 0.4*nS
cp = 0
nn=Nets(Ne=10,Ni=2,cp_ee=cp,cp_ie=cp,cp_ei=cp,cp_ii=cp,
n_ass=0,s_ass=1,pr=0,pf=0,ext_input=0*pA,
g_ee=ge0,g_ie=ge0,g_ei=gi0,g_ii=gi0)
nn.C_ee=bb.Synapses(nn.Pe,nn.Pe,model='w:siemens',pre='ge+=w')
nn.C_ee[0,9] = True
nn.C_ee.w=nn.g_ee
nn.C_ee.delay=nn.D
nn.network.add(nn.C_ee)
'''
'''
target = nn.Pe[0]
ext_in = bb.SpikeGeneratorGroup(1,[(0, 300*ms)],nn.network.clock)
C_syne= bb.Synapses(ext_in,target,model='w:siemens',pre='ge+=w')
C_syne.connect_random(ext_in,target,sparseness=1.)
C_syne.w = 130.*nn.g_ee
nn.network.add(ext_in, C_syne)
#nn.nrn_meas_e = nn.Pe
nn.nrn_meas_e = [0,1,9]
nn.mon_volt_e = bb.StateMonitor(nn.Pe, 'v', record=nn.nrn_meas_e)#,timestep=1)
nn.network.add(nn.mon_volt_e)
nn.run_sim(500*ms)
plt.plot(nn.mon_volt_e.times/ms,
nn.mon_volt_e[9]/mV)
plotter.show()
return nn
def test_longseq():
nn=Nets(Ne=20000, Ni=5000, cp_ee=.01, cp_ie=.01, cp_ei=0.01, cp_ii=.01,
n_ass=444, s_ass=150, pr=.19, pf=.19, synapses_per_nrn=200,
ext_input=200*pA, limit_syn_numbers=True,
g_ee=0.1*nS, g_ie=0.1*nS, g_ei=0.4*nS, g_ii=0.4*nS
)
nn.generate_ps_assemblies('gen_ass_overlap')
nn.set_net_connectivity()
nn.set_spike_monitor()
nn.set_rate_monitor()
nn.set_voltage_monitor()
nn.set_current_monitor()
gr = nn.p_ass_index[0][0]
t = 21*second
nn.set_noisy_input(gr, t, sigma=0*ms, mcoef=30)
nn.mon_spike_e.record = False
nn.mon_spike_i.record = False
nn.balance(5*second, 5.)
nn.balance(5*second, 1.)
nn.balance(5*second, .1)
nn.balance(5*second, .01)
nn.mon_spike_e.record = True
nn.mon_spike_i.record = True
nn.run_sim(6*second)
#plotter.plot_ps_raster(nn, frac=1./150)
fname = 'longseq444.npz'
spikes4save = calc_spikes.get_spike_times_ps(nn, frac=1./150)
np.savez_compressed(fname, spikes4save)
return nn
def test_2ASS(Ne=20000, nass=2):
gfc = 1
pr = 0.1
#pf= 0.06/gfc
pf = 0.06
#Ne=20000
Ni = Ne/4
cp = .01
# the default conductances used for Ne=20000
ge0 = 0.1*nS
gi0 = 0.4*nS
#gee= ge0
# so that gfc*gee=g0 or the ff connection dont scale down
#gfc = 1./(20000./Ne)**.5
#grc = 1./(20000./Ne)**.5 # figure out this guy
nn = Nets(Ne=Ne, Ni=Ni, cp_ee=cp, cp_ie=cp, cp_ei=cp, cp_ii=cp,
n_ass=10, s_ass=500, pr=pr, pf=pf, ext_input=200*pA,
g_ee=ge0, g_ie=ge0, g_ei=gi0, g_ii=gi0)
#nn.generate_ps_assemblies('gen_no_overlap')
#nn.generate_ps_assemblies('gen_no_overlap')
nn.generate_ps_assemblies('gen_ordered')
nn.generate_ps_assemblies('gen_ordered')
nn.set_net_connectivity()
nn.set_spike_monitor()
nn.set_rate_monitor()
nn.set_voltage_monitor()
nn.set_current_monitor()
#return nn
gr0 = nn.p_ass_index[0][0]
gr1 = nn.p_ass_index[1][0]
'''
'''
#t = 20*second
#nn.set_noisy_input(gr, t, sigma=0*ms)
t = 21*second
nn.set_noisy_input(gr0, t, sigma=0*ms)
t = 22*second
nn.set_noisy_input(gr1, t, sigma=0*ms)
#t = 23*second
#nn.set_noisy_input(gr, t, sigma=0*ms)
#t = 24*second
#nn.set_noisy_input(gr, t, sigma=0*ms)
nn.mon_spike_e.record = False
nn.mon_spike_i.record = False
'''
nn.balance(20*second,5.)
nn.balance(20*second,1.)
nn.balance(10*second,.1)
nn.balance(10*second,.01)
'''
nn.balance(5*second, 5.)
nn.balance(5*second, 1.)
nn.balance(5*second, .1)
nn.balance(5*second, .01)
nn.mon_spike_e.record = True
nn.mon_spike_i.record = True
nn.run_sim(5*second)
#figure = plt.figure(figsize=(16., 12.))
#plotter.plot_pop_raster(nn)
#plt.xlim([t_end-2000, t_end])
#fname_pre = '2Ass'
#plt.savefig(fname_pre+ str(Ne) + 'pr'+str(pr)+'pf'+str(pf) + '_fr'+'.png')
fname = '2asss.npz'
spikes4save = calc_spikes.get_all_spikes(nn)
np.savez_compressed(fname, np.array(spikes4save))
return nn
def show_ass_frs():
'''
Plots the firing of sequent assemblies
'''
nn=Nets(Ne=20000, Ni=5000, cp_ee=.01, cp_ie=.01, cp_ei=0.01, cp_ii=.01,
n_ass=10, s_ass=500, pr=.06, pf=.06,
ext_input=200*pA,
g_ee=0.1*nS, g_ie=0.1*nS, g_ei=0.4*nS, g_ii=0.4*nS
)
nn.generate_ps_assemblies('gen_ordered')
nn.set_net_connectivity()
nn.set_spike_monitor()
nn.set_rate_monitor()
nn.set_voltage_monitor()
nn.set_current_monitor()
nn.set_conductance_monitor()
gr0 = nn.p_ass_index[0][0]
t = 21*second
nn.set_noisy_input(gr0, t, sigma=0*ms)
t = 21.5*second
nn.set_noisy_input(gr0, t, sigma=0*ms)
t = 22.*second
nn.set_noisy_input(gr0, t, sigma=0*ms)
t = 22.5*second
nn.set_noisy_input(gr0, t, sigma=0*ms)
t = 23.*second
nn.set_noisy_input(gr0, t, sigma=0*ms)
nn.mon_spike_e.record = False
nn.mon_spike_i.record = False
nn.balance(5*second, 5.)
nn.balance(5*second, 1.)
nn.balance(5*second, .1)
nn.balance(5*second, .01)
nn.mon_spike_e.record = True
nn.mon_spike_i.record = True
nn.run_sim(6*second)
plotter.plot_gr_fr2(nn, wbin=.2, ngroups=8)
plt.show()
return nn
def test_tau():
nn=Nets(Ne=20000, Ni=5000, cp_ee=.01, cp_ie=.01, cp_ei=0.01, cp_ii=.01,
n_ass=1, s_ass=500, pr=.00, pf=.00,
ext_input=200*pA,
g_ee=0.1*nS, g_ie=0.1*nS, g_ei=0.4*nS, g_ii=0.4*nS
)
nn.generate_ps_assemblies('gen_ordered')
nn.set_net_connectivity()
nn.set_spike_monitor()
nn.set_rate_monitor()
nn.set_voltage_monitor()
nn.set_current_monitor()
nn.set_conductance_monitor()
'''
gr0 = nn.p_ass_index[0][0]
t = 21*second
nn.set_noisy_input(gr0, t, sigma=0*ms)
t = 21.5*second
nn.set_noisy_input(gr0, t, sigma=0*ms)
t = 22.*second
nn.set_noisy_input(gr0, t, sigma=0*ms)
t = 22.5*second
nn.set_noisy_input(gr0, t, sigma=0*ms)
t = 23.*second
nn.set_noisy_input(gr0, t, sigma=0*ms)
t = 23.5*second
nn.set_noisy_input(gr0, t, sigma=0*ms)
'''
nn.mon_spike_e.record = False
nn.mon_spike_i.record = False
nn.balance(5*second, 5.)
nn.balance(5*second, 1.)
nn.balance(5*second, .1)
nn.balance(4*second, .01)
nn.mon_spike_e.record = True
nn.mon_spike_i.record = True
nn.balance(1*second, .01)
nstim = 20
currs = [10*pA, 20*pA, 40*pA, 80*pA, 150*pA]
dur_stim, dur_relx = 100, 400
dur = dur_stim + dur_relx
for curr in currs:
for i in range(nstim):
nn.stim_curr(curr=curr, dur_stim=dur_stim, dur_relx=dur_relx)
plotter.plot_pop_raster(nn)
nsubs = len(currs)
mfrl = []
wbin = .1
dur_stim, dur_pre = 120, 20
base_fr = 5.
plt.figure()
for i, curr in enumerate(currs):
tl = 20000 + i*nstim*dur + np.arange(nstim)*dur
plt.subplot(nsubs, 1, 1+i)
mfr = plotter.plot_mean_curr_act(nn, tl, dur_stim=dur_stim,
dur_pre=dur_pre, wbin=wbin)
mfrl.append(calc_spikes.gaus_smooth(mfr, w=wbin, sigma=.2))
peak_time = np.argmax(mfrl[-1])*wbin - dur_pre
peak_value = np.max(mfrl[-1])
peak80_time = (mfr > base_fr+(.8*(peak_value-base_fr))).argmax()*wbin - dur_pre
peak20_time = (mfr > base_fr+(.2*(peak_value-base_fr))).argmax()*wbin - dur_pre
time_const = peak80_time - peak20_time
print 'time const is ', time_const
plt.show()
return nn
def test_boost_pf():
Ne=20000
gfc = 1
pr = 0.1
#pf= 0.06/gfc
pf = 0.00
pf_boost = 0.04
Ni = Ne/4
cp = .01
# the default conductances used for Ne=20000
ge0 = 0.1*nS
gi0 = 0.4*nS
#gee= ge0
gee = ge0*(20000./Ne)**.5
gii = gi0*(20000./Ne)**.5
pf = pf*(Ne/20000.)**.5
pr = pr*(Ne/20000.)**.5
nn = Nets(Ne=Ne, Ni=Ni, cp_ee=cp, cp_ie=cp, cp_ei=cp, cp_ii=cp,
n_ass=10, s_ass=500, pr=pr, pf=pf, ext_input=200*pA,
g_ee=gee, g_ie=gee, g_ei=gii, g_ii=gii, g_ff_coef=gfc,
modified_contin=True)
#nn.generate_ps_assemblies('gen_no_overlap')
nn.generate_ps_assemblies('gen_ordered')
nn.set_net_connectivity()
nn.set_spike_monitor()
nn.set_rate_monitor()
nn.set_voltage_monitor()
nn.set_current_monitor()
#return nn
gr = nn.p_ass_index[0][0]
'''
'''
t = 19.5*second
nn.set_noisy_input(gr, t, sigma=0*ms)
t = 20*second
nn.set_noisy_input(gr, t, sigma=0*ms)
t = 22*second
nn.set_noisy_input(gr, t, sigma=0*ms)
t = 24*second
nn.set_noisy_input(gr, t, sigma=0*ms)
t = 26*second
nn.set_noisy_input(gr, t, sigma=0*ms)
t = 28*second
nn.set_noisy_input(gr, t, sigma=0*ms)
for i in range(9):
t = (29+i)*second
nn.set_noisy_input(gr, t, sigma=0*ms)
nn.mon_spike_e.record = False
nn.mon_spike_i.record = False
#nn.boost_pff(0.04)
nn.balance(5*second, 5.)
nn.balance(5*second, 1.)
nn.balance(5*second, .1)
nn.balance(4*second, .01)
nn.mon_spike_e.record = True
nn.mon_spike_i.record = True
nn.balance(1*second, .01)
nn.boost_pff(pf_boost)
nn.balance(2*second, 5.)
nn.balance(2*second, 1.)
nn.balance(2*second, .1)
nn.balance(2*second, .01)
nn.run_sim(4*second)
return nn
def test_boost_pf_cont():
Ne=20000
gfc = 1
pr = 0.08
#pf= 0.06/gfc
pf = 0.00
pf_boost = 0.04
Ni = Ne/4
cp = .01
# the default conductances used for Ne=20000
ge0 = 0.1*nS
gi0 = 0.4*nS
#gee= ge0
gee = ge0*(20000./Ne)**.5
gii = gi0*(20000./Ne)**.5
pf = pf*(Ne/20000.)**.5
pr = pr*(Ne/20000.)**.5
nn = Nets(Ne=Ne, Ni=Ni, cp_ee=cp, cp_ie=cp, cp_ei=cp, cp_ii=cp,
n_ass=10, s_ass=500, pr=pr, pf=pf, ext_input=200*pA,
g_ee=gee, g_ie=gee, g_ei=gii, g_ii=gii, g_ff_coef=gfc,
continuous_ass=True, modified_contin=True)
#nn.generate_ps_assemblies('gen_no_overlap')
nn.generate_ps_assemblies('gen_ordered')
nn.set_net_connectivity()
nn.set_spike_monitor()
nn.set_rate_monitor()
nn.set_voltage_monitor()
nn.set_current_monitor()
#return nn
gr = nn.p_ass_index[0][0]
'''
'''
t = 19.*second
nn.set_noisy_input(gr, t, sigma=0*ms)
t = 19.5*second
#nn.set_noisy_input(gr, t, sigma=0*ms)
#t = 22*second
#nn.set_noisy_input(gr, t, sigma=0*ms)
#t = 24*second
#nn.set_noisy_input(gr, t, sigma=0*ms)
#t = 26*second
#nn.set_noisy_input(gr, t, sigma=0*ms)
#t = 28*second
nn.set_noisy_input(gr, t, sigma=0*ms)
for i in range(9):
t = (28+i)*second
nn.set_noisy_input(gr, t, sigma=0*ms)
nn.mon_spike_e.record = False
nn.mon_spike_i.record = False
nn.balance(5*second, 5.)
nn.balance(5*second, 1.)
nn.balance(5*second, .1)
nn.balance(4*second, .01)
nn.mon_spike_e.record = True
nn.mon_spike_i.record = True
nn.balance(1*second, .01)
nn.boost_pff(pf_boost)
nn.balance(2*second, 5.)
nn.balance(2*second, 1.)
nn.balance(2*second, .1)
nn.balance(2*second, .01)
nn.run_sim(9*second)
frac = .1
fname = 'contASS_pr' + str(pr)+ 'pfboost' + str(pf_boost) + \
'frac' + str(frac) + '.npz'
spikes4save = calc_spikes.get_spike_times_ps(nn,
frac=frac, pick_first=False)
np.savez_compressed(fname, spikes4save)
return nn
def test_slopes():
nn=Nets(Ne=20000, Ni=5000, cp_ee=.01, cp_ie=.01, cp_ei=0.01, cp_ii=.01,
n_ass=1, s_ass=500, pr=.0, pf=.0,
ext_input=200*pA,
g_ee=0.1*nS, g_ie=0.1*nS, g_ei=0.4*nS, g_ii=0.4*nS
)
nn.generate_ps_assemblies('gen_ordered')
nn.set_net_connectivity()
nn.set_spike_monitor()
nn.set_rate_monitor()
nn.set_voltage_monitor()
nn.set_current_monitor()
nn.set_conductance_monitor()
'''
gr0 = nn.p_ass_index[0][0]
t = 21*second
nn.set_noisy_input(gr0, t, sigma=0*ms)
t = 21.5*second
nn.set_noisy_input(gr0, t, sigma=0*ms)
t = 22.*second
nn.set_noisy_input(gr0, t, sigma=0*ms)
t = 22.5*second
nn.set_noisy_input(gr0, t, sigma=0*ms)
t = 23.*second
nn.set_noisy_input(gr0, t, sigma=0*ms)
t = 23.5*second
nn.set_noisy_input(gr0, t, sigma=0*ms)
'''
nn.mon_spike_e.record = False
nn.mon_spike_i.record = False
nn.balance(5*second, 5.)
nn.balance(5*second, 1.)
nn.balance(5*second, .1)
nn.balance(4*second, .01)
nn.mon_spike_e.record = True
nn.mon_spike_i.record = True
nn.balance(1*second, .01)
for nrn in nn.p_ass_index[0][0]:
nn.Pe[nrn].I += 5*pA
nn.run_sim(.5*second)
for nrn in nn.p_ass_index[0][0]:
nn.Pe[nrn].I -= 5*pA
nn.run_sim(.5*second)
for nrn in nn.p_assinh_index[0][0]:
nn.Pi[nrn].I += 5*pA
nn.run_sim(.5*second)
for nrn in nn.p_assinh_index[0][0]:
nn.Pi[nrn].I -= 5*pA
nn.run_sim(.5*second)
fe = calc_spikes.make_fr_from_spikes(nn, 0, 5, True)[0]
fi = calc_spikes.make_fr_from_spikes(nn, 0, 5, False)[0]
plt.subplot(211)
plt.plot(fe)
plt.subplot(212)
plt.plot(fi)
#plt.show()
return nn
def test_contin(Ne=20000):
gfc = 1
pr = 0.06
#pf= 0.06/gfc
pf = 0.06
#Ne=20000
Ni = Ne/4
cp = .01
# the default conductances used for Ne=20000
ge0 = 0.1*nS
gi0 = 0.4*nS
#gee= ge0
gee = ge0
gii = gi0
'''
s_ass = 250
pr = .12
pf = .12
'''
n_ass = 10
s_ass = 500
pr = .06
pf = .06
'''
n_ass = 10
s_ass = 50
pr = .6
pf = .6
'''
continuous_ass = True
nn = Nets(Ne=Ne, Ni=Ni, cp_ee=cp, cp_ie=cp, cp_ei=cp, cp_ii=cp,
n_ass=n_ass, s_ass=s_ass, pr=pr, pf=pf, ext_input=200*pA,
g_ee=gee, g_ie=gee, g_ei=gii, g_ii=gii, g_ff_coef=gfc,
continuous_ass=continuous_ass)
#nn.generate_ps_assemblies('gen_no_overlap')
nn.generate_ps_assemblies('gen_ordered')
nn.set_net_connectivity()
nn.set_spike_monitor()
nn.set_rate_monitor()
nn.set_voltage_monitor()
nn.set_current_monitor()
#return nn
gr = nn.p_ass_index[0][0]
'''
'''
#t = 20*second
#nn.set_noisy_input(gr, t, sigma=0*ms)
t = 20*second
nn.set_noisy_input(gr, t, sigma=0*ms)
t = 21*second
nn.set_noisy_input(gr, t, sigma=0*ms)
t = 22*second
nn.set_noisy_input(gr, t, sigma=0*ms)
t = 23*second
nn.set_noisy_input(gr, t, sigma=0*ms)
t = 24*second
nn.set_noisy_input(gr, t, sigma=0*ms)
nn.mon_spike_e.record = False
nn.mon_spike_i.record = False
'''
nn.balance(20*second,5.)
nn.balance(20*second,1.)
nn.balance(10*second,.1)
nn.balance(10*second,.01)
'''
#nn.balance(.5*second, 5.)
#return nn
nn.balance(5*second, 5.)
nn.balance(5*second, 1.)
nn.balance(5*second, .1)
nn.balance(5*second, .01)
nn.mon_spike_e.record = True
nn.mon_spike_i.record = True
nn.run_sim(5*second)
'''
t_end = nn.network.clock.t*1000/second
figure = plt.figure(figsize=(16., 12.))
plotter.plot_ps_raster(nn, chain_n=0, frac=1., figure=figure,
dummy_ass=True)
plt.xlim([t_end-2000, t_end])
'''
#fname_pre = 'large_nets/scaleAllg_Ne'
#plt.savefig(fname_pre + str(Ne) + 'pr' + str(pr) + 'pf' + str(pf) +
#figure2 = plt.figure(figsize=(16.,12.))
#plt.plot(nn.mon_rate_e.smooth_rate(5*ms))
#plt.xlim([5000,t_end])
#plt.xlabel('t [ms]')
#plt.ylabel('FR [sp/sec]')
#plt.savefig(fname_pre+ str(Ne) + 'pr'+str(pr)+'pf'+str(pf) + '_fr'+'.png')
#plt.close('all')
#plotter.plot_fr_cv_syn_distr(nn)
return nn
if __name__=='__main__':
from matplotlib import pyplot as plt
'''
nn=Nets(Ne=20000, Ni=5000, cp_ee=.01, cp_ie=.01, cp_ei=0.01, cp_ii=.01,
n_ass=444, s_ass=150, pr=.2, pf=.2, synapses_per_nrn=200,
#n_ass=10,s_ass=500,pr=.15,pf=.03,symmetric_sequence=True,p_rev=.03,
#n_ass=10,s_ass=500,pr=.06,pf=.06,
g_ee=0.1*nS, g_ie=0.1*nS, g_ei=0.4*nS, g_ii=0.4*nS)
nn.plot_for_raster_curr_volt()
ie= float(sys.argv[1])
ii= float(sys.argv[2])
tr= float(sys.argv[3])
nn.test_shifts(ie,ii,tr)
'''
'''
ne= int(sys.argv[1])
nn = test_diff_gff(ne)
'''
#nn = test_psps()
#nn = test_symm()
#nn = test_fr()
#nn = test_noPS()
#nn = test_diff_gff()
#nn = test_longseq()
#nn = test_2ASS()
#nn = show_ass_frs()
#nn = test_tau()
#nn = test_boost_pf()
#nn = test_slopes()
nn = test_boost_pf_cont()
#nn = test_boost_pf()
#nn = test_contin()
plotter.plot_pop_raster(nn)
plt.ylim([0, 12*nn.s_ass])
plt.xlim([22950, 23150])
plotter.show()
# check yger2015
# get cei recurrent only
'''
wi=[]
for i,pre in enumerate(nn.C_ei.presynaptic):
if pre<500:
if nn.C_ei.postsynaptic[i] < 500:
wi.append(nn.C_ei.w[i])
'''
|
[
"kleskjr@gmail.com"
] |
kleskjr@gmail.com
|
408baf62fd58bba6a2a32daabee9d13f1bdf89d9
|
5905b96fcbd1283fa3bfe601c4ca46cc04a49c86
|
/flask_blog/views/views.py
|
15bb2959e4ae9fc24e237f180a2568e4984606df
|
[] |
no_license
|
UsamiYuta/MyFirstFlask
|
24663cd0d8fa862aeb83369d7cfe10b83f261215
|
793d58b4d588e8678e51c3463eb924cd59bc8fa9
|
refs/heads/master
| 2023-03-25T10:22:35.180610
| 2020-07-11T20:34:39
| 2020-07-11T20:34:39
| 277,211,326
| 0
| 0
| null | 2021-03-20T04:47:26
| 2020-07-05T01:28:19
|
Python
|
UTF-8
|
Python
| false
| false
| 1,185
|
py
|
#flask_blog/views/views.py
from flask import request, redirect, url_for, render_template, flash, session
from flask_blog import app
from functools import wraps
from flask import Blueprint
view = Blueprint('view', __name__)
def login_required(view):
@wraps(view)
def inner(*args, **kwargs):
if not session.get('logged_in'):
return redirect(url_for('view.login'))
return view(*args, **kwargs)
return inner
@view.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
flash('ユーザ名が異なります')
elif request.form['password'] != app.config['PASSWORD']:
flash('パスワードが異なります')
else:
session['logged_in'] = True
flash('ログインしました')
return redirect(url_for('entry.show_entries'))
return render_template('login.html')
@view.route('/logout')
def logout():
session.pop('logged_in', None)
flash('ログアウトしました')
return redirect(url_for('entry.show_entries'))
@view.app_errorhandler(404)
def non_existant_route(error):
return redirect(url_for('view.login'))
|
[
"Shoegazer.mac@gmail.com"
] |
Shoegazer.mac@gmail.com
|
a694498244b2f987b91044fe9e8527f37185aeaa
|
56d2b775030257efc45ef3fe8d985c03a4d66819
|
/Cryptography I/ps1/crypt-ps1.py
|
8c74d6ba80fc5cc465252101ca837014c5039da3
|
[] |
no_license
|
JackLJohnson/Coursera
|
ce32df54d2ee4bc4ed12cdc999380aed7ed252a1
|
5a252e16ce16ad9596275f5defbb315ad475581d
|
refs/heads/master
| 2020-05-29T11:24:30.626564
| 2013-02-14T13:28:04
| 2013-02-14T13:28:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,536
|
py
|
# Coursera - Cryptography I Assignment 1
# Takes 11 encrypted messages using the same key as input
# Outputs the plain message (decrypted) of the last messageas its result
def crypt_PS1():
# Function which decrypts the ith byte of the last message and returns the character
def decrypt(ithChar):
# Array to store the number of occurrences of xor'ed msgs resulting in [a-zA-Z]
numOcc = []
for i in xrange(len(asciiTexts)):
numOcc.append(0)
# XOR each message with every other message
for i in xrange(len(asciiTexts)):
for j in xrange(len(asciiTexts)):
if j != i:
# XOR'ed byte for the two messages
xorChar = asciiTexts[i][ithChar] ^ asciiTexts[j][ithChar]
# If XOR'ed byte is [a-zA-Z], then one of the bytes is a space character
if (xorChar in range(65,91) or xorChar in range(97, 123)):
numOcc[i] += 1
# Find highest occurrence of space character among all messages for corresponding byte
# maxOcc[0] is the ith sg, maxOcc[1] is the # of occurrences
maxOcc = [0,0]
for i in xrange(len(numOcc)):
if numOcc[i] > maxOcc[1]:
maxOcc = [i,numOcc[i]]
# XOR the byte with highest probability of being a space character with the message we wish to decode
xorChar = asciiTexts[maxOcc[0]][ithChar] ^ asciiTexts[len(asciiTexts) - 1][ithChar]
# Return the decoded character XOR'ed with a space character to obtain the proper case
return chr(xorChar ^ 32)
# Inputs of cipher texts in .txt
fi = open('ciphertexts.txt', 'r')
cipherTexts = [] # Where we store the inputs
asciiTexts = [] # Where we covert the inputs to ascii bytes (2D Array)
# Store inputs into cipherTexts, and prepare proper size of asciiTexts
for line in fi:
cipherTexts.append(line);
asciiTexts.append([]);
# For each cipher text
for i in xrange(len(cipherTexts)):
# For each byte of the cipher text
for j in range(len(cipherTexts[i]) - 1)[::2]:
# Store the ascii format of the byte
asciiTexts[i].append(int(cipherTexts[i][j] + cipherTexts[i][j+1], 16))
# Print out the messages in ASCII, grouped into bytes
for a in asciiTexts:
print a , '\n'
# Where we store the decrypted message
decryptedMsg = ""
# For each byte of the msg to decrypt, decrypt then store into variable
for i in xrange(len(asciiTexts[len(asciiTexts) - 1])):
decryptedMsg += decrypt(i)
# print out decrypted message
print decryptedMsg
if __name__ == "__main__":
crypt_PS1()
|
[
"chen.jyr@gmail.com"
] |
chen.jyr@gmail.com
|
be93b92dd6e2d9a58bcb9f8188d0f98692c84a1b
|
59a80194dda97551e484c0d8495a078af240053d
|
/Intersection of Two Arrays II.py
|
d1ac3a2b1a6df29130919c6a67a6acdf0b5b4be0
|
[] |
no_license
|
huiyi999/leetcode_python
|
8c3af97410925109838c6fc7f5fecccd07ffa178
|
3fe8c2298a52a15fadec0693e00445d875c4b6ea
|
refs/heads/main
| 2023-06-18T21:36:22.873645
| 2021-07-14T20:27:50
| 2021-07-14T20:27:50
| 386,067,856
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,297
|
py
|
from collections import Counter
from typing import List
class Solution:
def intersect(self, nums1: List[int], nums2: List[int]) -> List[int]:
nums1.sort()
# print(nums1)
nums2.sort()
point1, point2 = 0, 0
res = []
while point1 < len(nums1) and point2 < len(nums2):
if nums1[point1] == nums2[point2]:
res.append(nums1[point1])
point1 += 1
point2 += 1
elif nums1[point1] > nums2[point2]:
point2 += 1
else:
point1 += 1
# print(res)
return res
def intersect2(self, nums1: List[int], nums2: List[int]) -> List[int]:
counts1 = Counter(nums1)
print(counts1)
intersection = []
for num in nums2:
if counts1[num] > 0:
intersection.append(num)
counts1[num] -= 1
return intersection
solution = Solution()
solution.intersect([1, 2, 2, 1], [2, 2])
solution.intersect([4, 9, 5], [9, 4, 9, 8, 4])
solution.intersect2([1, 2, 2, 1], [2, 2])
solution.intersect2([4, 9, 5], [9, 4, 9, 8, 4])
'''
Example 1:
Input: nums1 = [1,2,2,1], nums2 = [2,2]
Output: [2,2]
Example 2:
Input: nums1 = [4,9,5], nums2 = [9,4,9,8,4]
Output: [4,9]
'''
|
[
"chenhuiyi940830@gmail.com"
] |
chenhuiyi940830@gmail.com
|
d97740207b681235aadc14a7e95abf2ffa3591c8
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03723/s535719247.py
|
6c7b08f059627e21dffe82343469805c282a0c89
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,336
|
py
|
import sys
def LI(): return list(map(int, sys.stdin.buffer.readline().split()))
def I(): return int(sys.stdin.buffer.readline())
def LS(): return sys.stdin.buffer.readline().rstrip().decode('utf-8').split()
def S(): return sys.stdin.buffer.readline().rstrip().decode('utf-8')
def IR(n): return [I() for i in range(n)]
def LIR(n): return [LI() for i in range(n)]
def SR(n): return [S() for i in range(n)]
def LSR(n): return [LS() for i in range(n)]
def SRL(n): return [list(S()) for i in range(n)]
def MSRL(n): return [[int(j) for j in list(S())] for i in range(n)]
mod = 10 ** 9 + 7
a, b, c = LI()
# ABCの3人が持っているクッキーを交換する。
# 3人は同時に、手持ちのクッキーを半分割し、残りの二人にそれぞれ1方を渡すことを繰り返す。
# 誰かの持っているクッキーが奇数個になったら操作をやめる。
# クッキー交換は何回行うことができるか。無限に続けられることもある。
def distribute(a,b,c):
x = a/2
y = b/2
z = c/2
a2 = y + z
b2 = x + z
c2 = x + y
if a == a2 and b == b2 and c == c2:
print(-1)
sys.exit()
return a2, b2, c2
count = 0
while (a % 2 == 0) and (b % 2 == 0) and (c % 2 == 0):
a, b, c = distribute(a, b, c)
count += 1
# print(a, b, c)
print(count)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
600a649688e691955f9144a174da283aa3094f1e
|
0c9dabd0c701e3115c8d5fa260dd1e997b98e67d
|
/practice_for_byte/project/backup_ver2.0.py
|
3200b8abc602c09b89c043380596028dc4d86273
|
[] |
no_license
|
chenstarsQ/My-Python-Study-Note
|
f6b1f18bab8682146532f43c7abeb0db2055ad18
|
cb27d815bf8dc5b651ca6237b9d15de05056eea6
|
refs/heads/master
| 2020-03-22T15:47:58.409540
| 2019-01-18T01:40:22
| 2019-01-18T01:40:22
| 140,279,393
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,583
|
py
|
import os
import time
# 1. 需要备份的文件与目录将被
# 指定在一个列表中。
# 例如在 Windows 下:
source = ['D:\\pythontest\\backup" "test\\*']
# windows cmd 无法识别空格
# 在这里要注意到我们必须在字符串中使用双引号
# 用以括起其中包含空格的名称。
# 2. 备份文件必须存储在一个
# 主备份目录中
# 例如在 Windows 下:
target_dir = r'D:\pythontest\backup'
# 3. 备份文件将打包压缩成 zip 文件。
# 4. 将当前日期作为主备份目录下的子目录名称 \\update
today = target_dir + os.sep + time.strftime('%Y%m%d')
# 将当前时间作为 zip 文件的文件名
now = time.strftime('%H%M%S')
# zip 文件名称格式
target = today + os.sep + now + '.zip'
# 如果目标目录还不存在,则进行创建
if not os.path.exists(target_dir):
print('we don\'t have this mian file' )
os.mkdir(target_dir)
else:
print('we already have this file')
# 创建目录\\仅可以创建末尾文件夹
# 如果子目录尚不存在则创建一个
if not os.path.exists(today):
os.mkdir(today)
print('Successfully created directory', today)
# 5. 我们使用 haozip 命令将文件打包成 zip 格式
HaoZipC_command ='HaoZipC a -tzip {0} {1} -w{2}'.format(target,
' '.join(source),target_dir)
# ' '.join():''分隔符,''可空
# 运行备份
print('Zip command is:')
print(HaoZipC_command)
print('Running:')
if os.system(HaoZipC_command) == 0:
print('Successful backup to', target)
else:
print('Backup FAILED')
|
[
"1140330611@qq.com"
] |
1140330611@qq.com
|
6e1da8652bee2bb74fde05769ed4be4bc302ebaf
|
3db9e0af75640bbe4fe12c5a26dcb65d8065102b
|
/libs/neuroneap/cell.py
|
52c820451cb2c76a49c63da3483765bbace615dc
|
[] |
no_license
|
maikia/kink_paper
|
8e8b8f16167c60a74a0094492c6404a92801d35b
|
fffe754dfa0e32a8c86afce7267afdff8534134f
|
refs/heads/master
| 2021-01-13T04:56:17.339238
| 2017-02-08T09:50:39
| 2017-02-08T09:50:39
| 81,143,451
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,077
|
py
|
#!/usr/bin/env python
#coding=utf-8
import neuron
import numpy as np
h = neuron.h
def integrate(tstop, i_axial=False, neuron_cells = None):
"""Run Neuron simulation and return time and 2d array of
transmembrane currents (n_pts x n_segs). If i_axial is true
return in addition axial currents.
For particular cell in the network, pass the cell as neuron_cell,
if for all the cells in the network leave it None"""
i_membrane_all = []
i_axial_all = []
v_memb_all = []
if neuron_cells != None:
for idx_cell in range(len(neuron_cells)):
i_membrane_all.append([])
i_axial_all.append([])
li = []
#for i in xrange(numcells):
# li.append( np.array( tstop/h.dt, neuron_cells[i].all )
while h.t < tstop:
h.fadvance()
if neuron_cells ==None:
v = get_for_all(get_i_membrane)
v_memb = get_v()
i_membrane_all.append(v)
v_memb_all.append(v_memb)
else:
v = []
for idx_cell, next_cell in enumerate(neuron_cells):
v_next = get_bycell(get_i_membrane, next_cell)
i_membrane_all[idx_cell].append(v_next)
if i_axial:
if neuron_cells ==None:
iax = get_for_all(get_i_axial)
i_axial_all.append(iax)
else:
iax = []
for idx_cell, next_cell in enumerate(neuron_cells):
iax_next = get_bycell(get_i_axial, next_cell)
i_axial_all[idx_cell].append(iax_next)
if neuron_cells == None:
t = np.arange(0, len(i_membrane_all))*h.dt
else:
t = np.arange(0, len(i_membrane_all[0]))*h.dt
if i_axial:
return t, np.array(i_membrane_all), np.array(v_memb_all), np.array(i_axial_all)
else:
return t, np.array(i_membrane_all), np.array(v_memb_all)
def insert_extracellular():
for sec in h.allsec():
sec.insert("extracellular")
def get_v():
v = []
for sec in h.allsec():
for seg in sec:
v.append(seg.v)
return v
def get_bycell(func, neuron_cell):
"""loops through all the segments of given cell
and calculates the given function for each segment"""
variable = []
for sec in neuron_cell.all:
#print sec.name()
variable = func(variable, sec)
return variable
def get_for_all(func):
"""loops through all the segments of all the cells
and calculates the given function for each segment"""
variable = []
for sec in h.allsec():
variable = func(variable, sec)
return variable
def get_i_membrane(v, sec):
i_sec = [seg.i_membrane for seg in sec]
x = [seg.x for seg in sec]
#add currents from point processes at the beginning and end of section
c_factor = 100 #from [i/area]=nA/um2 to [i_membrane]=mA/cm2
area0 = h.area(x[0], sec=sec)
area1 = h.area(x[-1], sec=sec)
i_sec[0] += sum(pp.i for pp in sec(0).point_processes())/area0*c_factor
i_sec[-1] += sum(pp.i for pp in sec(1).point_processes())/area1*c_factor
v += i_sec
return v
def get_i_axial(currents, sec):
"""return axial current density in mA/cm2"""
#for sec in h.allsec():
v0 = sec(0).v
for seg in sec:
v1 = seg.v
l = sec.L/sec.nseg #length in um
r = sec.Ra #resistance in ohm
iax = (v1-v0)/(r*l*1e-4)
currents.append(iax)
v0 = v1
return currents
def get_nsegs():
nsegs = 0
for sec in h.allsec():
nsegs += sec.nseg
return nsegs
def get_coords():
total_segs = get_nsegs()
coords = np.zeros(total_segs,
dtype=[("x", np.float32),
("y", np.float32),
("z", np.float32),
("L", np.float32),
("diam", np.float32)
])
j = 0
for sec in h.allsec():
n3d = int(h.n3d(sec))
x = np.array([h.x3d(i,sec) for i in range(n3d)])
y = np.array([h.y3d(i,sec) for i in range(n3d)])
z = np.array([h.z3d(i,sec) for i in range(n3d)])
nseg = sec.nseg
pt3d_x = np.arange(n3d)
seg_x = np.arange(nseg)+0.5
if len(pt3d_x)<1:
x_coord = y_coord = z_coord =np.ones(nseg)*np.nan
else:
x_coord = np.interp(seg_x, pt3d_x, x)
y_coord = np.interp(seg_x, pt3d_x, y)
z_coord = np.interp(seg_x, pt3d_x, z)
lengths = np.zeros(nseg)
diams = np.zeros(nseg)
lengths = np.ones(nseg)*sec.L*1./nseg
diams = np.ones(nseg)*sec.diam
coords['x'][j:j+nseg]=x_coord
coords['y'][j:j+nseg]=y_coord
coords['z'][j:j+nseg]=z_coord
coords['L'][j:j+nseg]=lengths
coords['diam'][j:j+nseg]=diams
j+=nseg
return coords
def get_seg_coords():
nchars = 40
total_segs = get_nsegs()
coords = np.zeros(total_segs,
dtype=[("x0", np.float32),
("y0", np.float32),
("z0", np.float32),
("x1", np.float32),
("y1", np.float32),
("z1", np.float32),
("L", np.float32),
("diam", np.float32),
("name", "|S%d" % nchars)
])
j = 0
for sec in h.allsec():
nseg = sec.nseg
diams = np.ones(nseg)*sec.diam
lengths = np.ones(nseg)*sec.L*1./nseg
names = np.repeat(sec.name()[:nchars], nseg).astype("|S%d"%nchars)
seg_x = np.arange(nseg+1)*1./nseg
x_coord, y_coord, z_coord = get_locs_coord(sec, seg_x)
coords['x0'][j:j+nseg] = x_coord[:-1]
coords['y0'][j:j+nseg] = y_coord[:-1]
coords['z0'][j:j+nseg] = z_coord[:-1]
coords['x1'][j:j+nseg] = x_coord[1:]
coords['y1'][j:j+nseg] = y_coord[1:]
coords['z1'][j:j+nseg] = z_coord[1:]
coords['diam'][j:j+nseg] = diams
coords['L'][j:j+nseg] = lengths
coords['name'][j:j+nseg] = names
j+=nseg
return coords
def get_locs_coord(sec, loc):
"""get 3d coordinates of section locations"""
n3d = int(h.n3d(sec))
x = np.array([h.x3d(i,sec) for i in range(n3d)])
y = np.array([h.y3d(i,sec) for i in range(n3d)])
z = np.array([h.z3d(i,sec) for i in range(n3d)])
arcl = np.sqrt(np.diff(x)**2+np.diff(y)**2+np.diff(z)**2)
arcl = np.cumsum(np.concatenate(([0], arcl)))
nseg = sec.nseg
#import pdb; pdb.set_trace()
pt3d_x = arcl/arcl[-1]
x_coord = np.interp(loc, pt3d_x, x)
y_coord = np.interp(loc, pt3d_x, y)
z_coord = np.interp(loc, pt3d_x, z)
return x_coord, y_coord, z_coord
def get_pp_coord(point_process):
"""get 3d coordinates of a point process such as synapse:
point_process -- neuron object"""
loc = point_process.get_loc()
sec = h.cas()
coord = get_locs_coord(sec, loc)
h.pop_section()
return coord
def get_point_processes():
"""Returns record array with all point processes and their
coordinates as tuple.
"""
point_processes = []
for sec in h.allsec():
pp_in_sec = []
for seg in sec.allseg():
pp_in_sec += seg.point_processes()
locs = [pp.get_loc() for pp in pp_in_sec]
#remove section from stack to avoid overflow
[h.pop_section() for pp in pp_in_sec]
x, y, z = get_locs_coord(sec, locs)
point_processes += zip(pp_in_sec, x, y, z)
return point_processes
def initialize(dt=0.025):
#insert_extracellular()
h.finitialize()
h.dt = dt
h.fcurrent()
h.frecord_init()
def load_model(hoc_name, dll_name=None):
if dll_name:
h.nrn_load_dll(dll_name)
h.load_file(hoc_name)
|
[
"maja_ka@hotmail.com"
] |
maja_ka@hotmail.com
|
ec40995ecdd28d2d7cdcdcb8a3a996140b25c97b
|
9164601fac2aff95bb702daef82c44321b621891
|
/snake.py
|
15397348ae8861857b95d7ffb9062a73057fb2b2
|
[
"MIT"
] |
permissive
|
sari21-meet/meet2019y1lab7
|
96e66f54a88f52e0d85f9196408e5e95fbee3fd9
|
e8d40521fb85a2c8eae828ec14da954ffe75cec0
|
refs/heads/master
| 2020-06-21T01:15:15.243675
| 2019-07-31T09:30:23
| 2019-07-31T09:30:23
| 197,307,653
| 0
| 0
|
MIT
| 2019-07-17T03:23:40
| 2019-07-17T03:23:39
| null |
UTF-8
|
Python
| false
| false
| 2,785
|
py
|
import turtle
import random #We'll need this later in the lab
turtle.tracer(1,0) #This helps the turtle move more smoothly
SIZE_X=800
SIZE_Y=500
turtle.setup(SIZE_X, SIZE_Y) #Curious? It's the turtle window
#size.
turtle.penup()
SQUARE_SIZE = 20
START_LENGTH = 6
TIME_STEP = 100
#Initialize lists
pos_list = []
stamp_list = []
food_pos = []
food_stamps = []
#Set up positions (x,y) of boxes that make up the snake
snake = turtle.clone()
snake.shape("square")
#Hide the turtle object (it's an arrow - we don't need to see it)
turtle.hideturtle()
def new_stamp():
snake_pos = snake.pos() #Get snake’s position
#Append the position tuple to pos_list
pos_list.append(snake_pos)
#snake.stamp() returns a stamp ID. Save it in some variable
ID1 = snake.stamp()
#append that stamp ID to stamp_list.
stamp_list.append(ID1)
#Draw a snake at the start of the game with a for loop
#for loop should use range() and count up to the number of pieces
#in the snake (i.e. START_LENGTH)
for sth in range(5) :
x_pos=snake.xcor()#Get x-position with snake.pos()[0]
y_pos=snake.ycor()
#Add SQUARE_SIZE to x_pos. Where does x_pos point to now?
# You're RIGHT!
x_pos+=SQUARE_SIZE
snake.goto(x_pos,y_pos) #Move snake to new (x,y)
#Now draw the new snake part on the screen (hint, you have a
#function to do this
new_stamp()
def remove_tail():
old_stamp = stamp_list.pop(0) # last piece of tail
snake.clearstamp(old_stamp) # erase last piece of tail
pos_list.pop(0) # remove last piece of tail's position
snake.direction = "Up"
def up():
snake.direction="Up" #Change direction to up
move_snake() #Update the snake drawing
print("You pressed the up key!")
#2. Make functions down(), left(), and right() that change snake.direction
####WRITE YOUR CODE HERE!!
turtle.onkeypress(up, "Up") # Create listener for up key
#3. Do the same for the other arrow keys
####WRITE YOUR CODE HERE!!
turtle.listen()
def move_snake():
my_pos = snake.pos()
x_pos = my_pos[0]
y_pos = my_pos[1]
#If snake.direction is up, then we want the snake to change
#it’s y position by SQUARE_SIZE
if snake.direction == "Up":
snake.goto(x_pos, y_pos + SQUARE_SIZE)
print("You moved up!")
elif snake.direction=="Down":
snake.goto(x_pos, y_pos - SQUARE_SIZE)
#4. Write the conditions for RIGHT and LEFT on your own
##### YOUR CODE HERE
#Make the snake stamp a new square on the screen
#Hint - use a single function to do this
___________()
######## SPECIAL PLACE - Remember it for Part 5
#remove the last piece of the snake (Hint Functions are FUN!)
___________()
turtle.mainloop()
|
[
"sari21@meet.mit.edu"
] |
sari21@meet.mit.edu
|
9f89f81d07d666adf16af5e8fd133b4b5c8fcfab
|
2132772ed2853ba888e08a9cff5d9d9892399fea
|
/L8.py
|
805908f035ceee3e2f5a7bffb019be07dbd63f8d
|
[] |
no_license
|
Oxotka/PythonChallenge
|
60c64b1ccf5b3793fcb15b6e170140fa77f9398b
|
1ec1607364536da9d2882f2e2bfed1fb1604f159
|
refs/heads/master
| 2020-12-30T16:59:02.340707
| 2017-07-28T19:41:19
| 2017-07-28T19:41:19
| 91,045,425
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 567
|
py
|
# http://www.pythonchallenge.com/pc/def/integrity.html
#
# un: 'BZh91AY&SYA\xaf\x82\r\x00\x00\x01\x01\x80\x02\xc0\x02\x00 \x00!\x9ah3M\x07<]\xc9\x14\xe1BA\x06\xbe\x084'
# pw: 'BZh91AY&SY\x94$|\x0e\x00\x00\x00\x81\x00\x03$ \x00!\x9ah3M\x13<]\xc9\x14\xe1BBP\x91\xf08'
import bz2
un = 'BZh91AY&SYA\xaf\x82\r\x00\x00\x01\x01\x80\x02\xc0\x02\x00 \x00!\x9ah3M\x07<]\xc9\x14\xe1BA\x06\xbe\x084'
pw = 'BZh91AY&SY\x94$|\x0e\x00\x00\x00\x81\x00\x03$ \x00!\x9ah3M\x13<]\xc9\x14\xe1BBP\x91\xf08'
login = bz2.decompress(un)
password = bz2.decompress(pw)
print (login, password)
|
[
"NikitaAripov@MacBook-Pro-Nikita.local"
] |
NikitaAripov@MacBook-Pro-Nikita.local
|
a40c65b34602f58cbe008b1ecf9249e2c1434ae4
|
5aa25c61c48d6beb1f82286723c0abf2343fd175
|
/023/modular.py
|
bf0ebde5efc5c26ae8c31c7fa138144cf0f5323c
|
[] |
no_license
|
zyc-glesi/python_learn1
|
049e2d352c6ea52aa6cf2f03f686b0f8b4318b3c
|
a5ce152d99930b4177548f0859e05cd9d114b88f
|
refs/heads/master
| 2020-04-13T04:24:27.501515
| 2018-12-24T07:47:39
| 2018-12-24T07:47:39
| 162,960,381
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 248
|
py
|
# coding:utf-8
# 模块 -- 调用方法一 import
import my_module
celsius = float(raw_input ("Enter a temperature in Celsius:"))
#fahrenheit = c_to f(celsius)
fahrenheit = my_module.c_to_f(celsius)
print "That's ",fahrenheit," degree Fahrenheit"
|
[
"1978059479@qq.com"
] |
1978059479@qq.com
|
b851a5b8ccc2cd22a07815d7205f0ce4dcca053e
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2607/60829/292430.py
|
2e4958a0c203ae5543f917b8ac682343e24cd616
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 851
|
py
|
def a(x):
res=[]
for i in range(len(x)):
res.append(int(x[i]))
return res
def judge(x):
res=[]
for i in x:
if not i in res:
res.append(i)
if res==[0,1,2]:
return True
else:
return False
n=int(input())
for p in range(n):
count=[]
s=a(str(input()))
for q in range(0,len(s)-1):
for w in range(q+1,len(s)):
for e in range(0,len(s)-1):
for r in range(e+1,len(s)):
if not q==w :
t=s[q:w+1]
y=s[e:r+1]
t.sort()
y.sort()
if t==y and judge(t) :
count.append(t)
aa=[0]
bb=[0]
for i in range(0,len(aa)):
if aa[i]==s:
s=bb[i]
print(s)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
ad82487eacdf683b2dff2039c0d655bed4d66016
|
dbffbef3d280a355173ea3be3bb320bca1ac1091
|
/ParserSber.py
|
b54e1d3d1d609189bfa361bda023282b4bb246d6
|
[] |
no_license
|
Vladislav-Aksentev/ParserBanks
|
cd07523144f543af3f512893dc2e5819a0ec2ed4
|
a057802fe6625e8d75bdaf57e1594300691e9b7f
|
refs/heads/master
| 2020-12-27T07:46:59.690092
| 2020-02-02T19:06:02
| 2020-02-02T19:06:02
| 237,819,803
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,279
|
py
|
import requests
from bs4 import BeautifulSoup
import csv
def get_html(url):
r = requests.get(url)
return r.text
def write_csv(data):
with open('csvs/Sberbank.csv', 'a') as f:
writer = csv.writer(f)
writer.writerow((data['items'], data['requirements']))
def get_data(html):
soup = BeautifulSoup(html, 'lxml')
all_data = soup.find_all('div', class_='kit-row terms-description__row')
for need_data in all_data:
try:
items = need_data.find('div', class_='kit-text kit-text'
'_s kit-text_note terms-description__'
'text').text.strip("\u200b").strip()
except IndexError:
items = ''
try:
requirements = need_data.find('div', class_='kit-text kit-text_s '
'terms-description__'
'text').text.strip()
except IndexError:
requirements = ''
data = {'items': items, 'requirements': requirements}
write_csv(data)
def main():
url = ('https://www.sberbank.ru/ru/person/'
'credits/home/buying_project?tab=usl')
get_data(get_html(url))
if __name__ == '__main__':
main()
|
[
"ieniki@yandex.ru"
] |
ieniki@yandex.ru
|
5480491a1faab2e525745a17580f633148b44a1a
|
f86142ddebbee80bb7bf018228da5825755388e9
|
/old/player.py
|
350c017f63dcb9c680a79918d9a37994c14c5113
|
[
"Apache-2.0"
] |
permissive
|
K-Fet/FirstArcadeGame
|
db0cb2d02e1960e7b13da3efc8ec588b1f2ebd10
|
7ff319f37c84a985c13f19eb4cc1ed0e24d065b0
|
refs/heads/master
| 2021-08-10T21:54:36.596393
| 2018-10-28T09:09:56
| 2018-10-28T09:09:56
| 141,492,563
| 0
| 0
|
Apache-2.0
| 2018-10-28T09:09:57
| 2018-07-18T21:33:52
|
Python
|
UTF-8
|
Python
| false
| false
| 216
|
py
|
import arcade
from MyGame import *
import math
from data import *
class player(arcade.Sprite):
def __init__(self):
super().__init__("Image/kfet.png",SPRITE_SCALING_PLAYER)
self.center_x=50
self.center_y=50
|
[
"mesjtarbes.direction@aplb.lan"
] |
mesjtarbes.direction@aplb.lan
|
4a30125a018a387720baa5484485dc68a3ac943e
|
3c34110f40eccfaf7bc77c85e39fcf8d6b20f75d
|
/observations/__init__.py
|
256f028630205ebde45547f7417448cf4c57579f
|
[
"Apache-2.0"
] |
permissive
|
zhangxt/observations
|
8776d9fcebdb459b0b10a2efc45545c6dfa6ee5f
|
abce0bfb4149f6494b6042a33493e85adce1a706
|
refs/heads/master
| 2021-01-01T18:35:15.404564
| 2017-07-25T01:06:38
| 2017-07-25T01:06:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 994
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from observations.boston_housing import boston_housing
from observations.celeba import celeba
from observations.cifar10 import cifar10
from observations.cifar100 import cifar100
from observations.enwik8 import enwik8
from observations.enwik8 import enwik8
from observations.iris import iris
from observations.lsun import lsun
from observations.mnist import mnist
from observations.ptb import ptb
from observations.sick import sick
from observations.small32_imagenet import small32_imagenet
from observations.small64_imagenet import small64_imagenet
from observations.snli import snli
from observations.stanford_sentiment_treebank import stanford_sentiment_treebank
from observations.svhn import svhn
from observations.text8 import text8
from observations.util import maybe_download_and_extract
from observations.wikitext2 import wikitext2
__version__ = '0.1.0'
VERSION = __version__
|
[
"dustinviettran@gmail.com"
] |
dustinviettran@gmail.com
|
9c5a3b6964145da1b7d2482285a5d325b214cd0a
|
ddac7346ca9f1c1d61dfd7b3c70dc6cd076a9b49
|
/dftfit/db/query.py
|
111ed1bd948964e63ffb558b31de439c6953d8ca
|
[
"MIT"
] |
permissive
|
gvenus/dftfit
|
f8cf5e9bef5a173ff0aa7202bacbfee0df61bd14
|
a00354f8f0d611bf57c6925f920c749d8628cf98
|
refs/heads/master
| 2023-03-17T18:58:52.287217
| 2019-10-20T04:07:44
| 2019-10-20T04:07:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,229
|
py
|
import pandas as pd
import numpy as np
from ..potential import Potential
def potential_from_evaluation(dbm, evaluation_id):
"""Construct potential from single evaluation id
Parameters
----------
dbm: dftfit.db.table.DatabaseManager
dftfit database access class
evaluation_id: int
integer representing the id of the evaluation
Returns
-------
dftfit.potential.Potential
potential representing the single evaluation
"""
result = dbm.connection.execute('''
SELECT potential.schema, evaluation.parameters, run.initial_parameters, run.indicies, run.bounds
FROM evaluation
JOIN run ON run.id = evaluation.run_id
JOIN potential ON potential.hash = run.potential_hash
WHERE evaluation.id = ?
''', (evaluation_id,)).fetchone()
if result is None:
raise ValueError(f'evaluation_id {evaluation_id} does not exist')
return Potential.from_run_evaluation(
result['schema'],
result['initial_parameters'],
result['indicies'], result['parameters'], result['bounds'])
def list_runs(dbm, parameters=True, stats=True):
"""Create pandas dataframe of runs
Parameters
----------
dbm: dftfit.db.table.DatabaseManager
dftfit database access class
Returns
-------
pandas.DataFrame:
dataframe with fields: id, name, potential_hash, training_hash,
start_time, end_time, features, weights, num_evaluations, min_value
"""
SELECT_RUNS = '''
SELECT id as run_id, name,
potential_hash, training_hash,
start_time, end_time, features, weights
FROM run
'''
run_df = pd.read_sql(SELECT_RUNS, dbm.connection, index_col='run_id')
if stats:
SELECT_RUN_EVAL_AGG_MIN_COUNT = '''
SELECT run_id, count(*) as num_evaluations, min(value) as min_value
FROM evaluation
GROUP BY run_id
'''
run_df = pd.merge(run_df, pd.read_sql(SELECT_RUN_EVAL_AGG_MIN_COUNT, dbm.connection, index_col='run_id'), on='run_id')
# SELECT_RUN_EVAL_AGG_MIN_MEAN = '''
# SELECT run_id, min(value) as last_100_min_value, avg(value) as last_100_mean_value
# FROM evaluation
# GROUP BY run_id
# ORDER BY id DESC LIMIT 100
# '''
# run_df = pd.merge(run_df, pd.read_sql(SELECT_RUN_EVAL_AGG_MIN_MEAN, dbm.connection, index_col='run_id'), on='run_id')
if parameters:
parameter_values = []
for run_id in run_df.index.values:
SELECT_RUN_MIN_VALUE_PARAMETERS = '''
SELECT id, parameters
FROM evaluation
WHERE run_id = {}
ORDER BY id DESC LIMIT 1
'''.format(run_id)
cursor = dbm.connection.execute(SELECT_RUN_MIN_VALUE_PARAMETERS).fetchone()
parameter_values.append({'run_id': run_id, 'final_parameters': cursor['parameters']})
run_df = pd.merge(run_df, pd.DataFrame(parameter_values), on='run_id')
return run_df
def list_run_evaluations(dbm, run_id, min_evaluation=None):
"""Create pandas dataframe of evaluations with run_id
Parameters
----------
dbm: dftfit.db.table.DatabaseManager
dftfit database access class
run_id: int
identifier of run
min_evaluation: int
used a filter to select all evaluations since certain point
Returns
-------
pandas.DataFrame:
dataframe with fields: evaluation_id, potential parameters,
all features with error, and value
"""
run = dbm.connection.execute(
'SELECT features FROM run WHERE id = ?', (run_id,)).fetchone()
if run is None:
raise ValueError('run with run_id {} does not exist'.format(run_id))
features = run['features']
SELECT_EVALUATIONS = '''
SELECT id as evaluation_id, parameters, errors, value
FROM evaluation
WHERE run_id = {}
'''.format(run_id)
df = pd.read_sql(SELECT_EVALUATIONS, dbm.connection, index_col='evaluation_id')
errors = np.array(df['errors'].values.tolist())
for i, feature in enumerate(features):
df[feature] = errors[:, i]
return df.drop('errors', axis=1)
def filter_evaluations(dbm, potential=None, limit=10, condition='best', run_id=None, labels=None, include_potentials=False):
cursor = dbm.connection.execute('SELECT id FROM run')
run_ids = {_['id'] for _ in cursor}
if potential:
RUNS_WITH_POTENTIAL = """
SELECT run_id
FROM run
WHERE run.potential_hash = ?
"""
cursor = dbm.connection.execute(RUNS_WITH_POTENTIAL, (potential.md5hash,))
potential_run_ids = {_['run_id'] for _ in cursor}
run_ids = potential_run_ids & run_ids
if run_id:
run_ids = run_ids & {run_id}
if labels:
arguments = []
for key, value in labels:
arguments.extend([key, value])
RUN_ID_FROM_LABEL_IDS = """
SELECT run_id
FROM run_label
JOIN label ON run_label.label_id = label.id
WHERE {}
GROUP BY run_id
HAVING count = {}
""".format(
' OR '.join(['(label.key = ? AND label.value = ?)']*len(labels)),
len(labels)
)
cursor = dbm.connection.execute(RUN_ID_FROM_LABEL_IDS, arguments)
label_run_ids = {_['run_id'] for _ in cursor}
run_ids = label_run_ids & run_ids
if len(run_ids) == 0:
return pd.DataFrame()
if condition != 'best':
raise ValueError('only know how to sort on condition best right now')
SELECT_EVALUATIONS = '''
SELECT id as evaluation_id, run_id, parameters, errors, value
FROM evaluation
WHERE {}
ORDER BY value LIMIT {}
'''.format(' OR '.join(['run_id = %d' % run_id for run_id in run_ids]), limit)
df = pd.read_sql(SELECT_EVALUATIONS, dbm.connection, index_col='evaluation_id')
if include_potentials:
eval_potentials = []
for eval_id in df.index.values:
eval_potentials.append({'evaluation_id': eval_id, 'potential': potential_from_evaluation(dbm, int(eval_id))})
df = pd.merge(df, pd.DataFrame(eval_potentials), on='evaluation_id')
return df
def copy_database_to_database(src_dbm, dest_dbm, only_unique=False):
SELECT_RUNS = 'SELECT id FROM run'
SELECT_RUN = 'SELECT id, name, potential_hash, training_hash, configuration, start_time, end_time, initial_parameters, indicies, bounds, features, weights FROM run WHERE id = ?'
SELECT_RUN_LABELS = 'SELECT label.key, label.value FROM run_label JOIN label ON run_label.label_id = label.id WHERE run_label.run_id = ?'
SELECT_RUN_POTENTIAL = 'SELECT potential.hash, potential.schema FROM potential JOIN run ON run.potential_hash = potential.hash WHERE run.id = ?'
SELECT_RUN_TRAINING = 'SELECT training.hash, training.schema FROM training JOIN run ON run.training_hash = training.hash WHERE run.id = ?'
SELECT_RUN_EVALUATION_COUNT = 'SELECT count(*) as num_evaluations FROM evaluation WHERE run_id = ?'
SELECT_RUN_EVALUATION = '''
SELECT parameters, errors, value FROM evaluation
WHERE run_id = ? ORDER BY id LIMIT ? OFFSET ?
'''
UNIQUE_RUN_POTENTIAL = 'SELECT hash FROM potential WHERE potential.hash = ?'
UNIQUE_RUN_TRAINING = 'SELECT hash FROM training WHERE training.hash = ?'
UNIQUE_RUN = '''
SELECT run.id FROM run
JOIN potential ON potential.hash = run.potential_hash
JOIN training ON training.hash = run.training_hash
WHERE name = ? AND potential.hash = ? AND training.hash = ? AND configuration = ?
AND start_time = ? AND (end_time = ? OR end_time IS NULL AND ? is NULL)
AND initial_parameters = ? AND indicies = ? AND bounds = ? AND features = ? AND weights = ?
'''
INSERT_RUN_POTENTIAL = 'INSERT INTO potential (hash, schema) VALUES (?, ?)'
INSERT_RUN_TRAINING = 'INSERT INTO training (hash, schema) VALUES (?, ?)'
INSERT_RUN = 'INSERT INTO run (name, potential_hash, training_hash, configuration, start_time, end_time, initial_parameters, indicies, bounds, features, weights) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)'
INSERT_RUN_EVALUATION = 'INSERT INTO evaluation (run_id, parameters, errors, value) VALUES (?, ?, ?, ?)'
for run in src_dbm.connection.execute(SELECT_RUNS):
# Potential
query_result = src_dbm.connection.execute(SELECT_RUN_POTENTIAL, (run['id'],)).fetchone()
potential_hash = query_result['hash']
query = dest_dbm.connection.execute(UNIQUE_RUN_POTENTIAL, (potential_hash,)).fetchone()
if not query:
with dest_dbm.connection:
cursor = dest_dbm.connection.execute(INSERT_RUN_POTENTIAL, (query_result['hash'], query_result['schema']))
# Training
query_result = src_dbm.connection.execute(SELECT_RUN_TRAINING, (run['id'],)).fetchone()
training_hash = query_result['hash']
query = dest_dbm.connection.execute(UNIQUE_RUN_TRAINING, (training_hash,)).fetchone()
if not query:
with dest_dbm.connection:
cursor = dest_dbm.connection.execute(INSERT_RUN_TRAINING, (query_result['hash'], query_result['schema']))
# Run
query_result = src_dbm.connection.execute(SELECT_RUN, (run['id'],)).fetchone()
query = dest_dbm.connection.execute(UNIQUE_RUN, (
query_result['name'], potential_hash, training_hash, query_result['configuration'],
query_result['start_time'], query_result['end_time'], query_result['end_time'],
query_result['initial_parameters'], query_result['indicies'], query_result['bounds'], query_result['features'], query_result['weights'])).fetchone()
if query and only_unique:
run_id = query['id']
else:
with dest_dbm.connection:
cursor = dest_dbm.connection.execute(INSERT_RUN, (
query_result['name'], potential_hash, training_hash, query_result['configuration'],
query_result['start_time'], query_result['end_time'],
query_result['initial_parameters'], query_result['indicies'], query_result['bounds'], query_result['features'], query_result['weights']))
run_id = cursor.lastrowid
# Evaluation
num_evaluations = src_dbm.connection.execute(SELECT_RUN_EVALUATION_COUNT, (run['id'],)).fetchone()['num_evaluations']
print(' adding run %d with %d evaluations' % (run['id'], num_evaluations))
evaluation_limit = 1000
for offset in range(0, num_evaluations, evaluation_limit):
cursor = src_dbm.connection.execute(SELECT_RUN_EVALUATION, (run['id'], evaluation_limit, offset))
evaluations = [(run_id, row['parameters'], row['errors'], row['value']) for row in cursor]
with dest_dbm.connection:
dest_dbm.connection.executemany(INSERT_RUN_EVALUATION, evaluations)
# labels
labels = {row['key']: row['value'] for row in src_dbm.connection.execute(SELECT_RUN_LABELS, (run['id'],))}
with dest_dbm.connection:
_write_labels(dest_dbm, run_id, labels)
|
[
"chris.ostrouchov@gmail.com"
] |
chris.ostrouchov@gmail.com
|
a26a9d7148fa2971bac599ec0b5ad65d747475c3
|
c3b90f783156ea91768e950e477b48ae08e3807c
|
/library/modules/learning/language_tools/spellchecker/4english/_tr_spell.py
|
cc213a340ef9593d783fdd29ecb0bddac4068fc2
|
[] |
no_license
|
burakyldrm/calisma
|
b11688f30a46c0f1ad01f4b5348a92fbfd39904d
|
30ef63c753e6c2dbba6715941f8eaf049dbd6f3f
|
refs/heads/master
| 2021-05-03T14:37:32.645469
| 2018-02-15T21:18:01
| 2018-02-15T21:18:01
| 120,460,285
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,143
|
py
|
"""Spelling Corrector in Python 3; see http://norvig.com/spell-correct.html
Copyright (c) 2007-2016 Peter Norvig
MIT license: www.opensource.org/licenses/mit-license.php
"""
'''
edited for tr. 1) tranined with tr dataset; 2) letters in edit1() are tr characters; 3) uppercase/mixed case handling added.
dicle
'''
################ Spelling Corrector
'''
from collections import Counter
TEXTSPATH = "/home/dicle/Documents/data/tr/tr_gazete_siir/tr_text_compilation.txt"
#def words(text): return re.findall(r'\w+', text.lower())
def words(text): return re.findall(r'\w+', text)
#def words(text): return text.split()
WORDS = Counter(words(open(TEXTSPATH).read()))
'''
import re
import string
from time import time
from language_tools.spellchecker import dump_dict
WORDS = dump_dict.getWORDS()
def P(word, N=sum(WORDS.values())):
"Probability of `word`."
return WORDS[word] / N
def correction(word):
"Most probable spelling correction for word."
return max(candidates(word), key=P)
def candidates(word):
"Generate possible spelling corrections for word."
return (known([word]) or known(edits1(word)) or known(edits2(word)) or [word])
def known(words):
"The subset of `words` that appear in the dictionary of WORDS."
return set(w for w in words if w in WORDS)
def edits1(word):
"All edits that are one edit away from `word`."
if word.isupper() or word.istitle():
letters = 'ABCÇDEFGĞHIİJKLMNOÖPRSŞTUÜVYZ'
else:
letters = 'abcçdefgğhıijklmnoöprsştuüvyz'
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R) > 1]
replaces = [L + c + R[1:] for L, R in splits if R for c in letters]
inserts = [L + c + R for L, R in splits for c in letters]
return set(deletes + transposes + replaces + inserts)
'''
def edits1(word):
"All edits that are one edit away from `word`."
letters = 'abcçdefgğhıijklmnoöprsştuüvyz'
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R)>1]
replaces = [L + c + R[1:] for L, R in splits if R for c in letters]
inserts = [L + c + R for L, R in splits for c in letters]
return set(deletes + transposes + replaces + inserts)
'''
def edits2(word):
"All edits that are two edits away from `word`."
return (e2 for e1 in edits1(word) for e2 in edits1(e1))
################ Test Code
'''
def unit_tests():
assert correction('speling') == 'spelling' # insert
assert correction('korrectud') == 'corrected' # replace 2
assert correction('bycycle') == 'bicycle' # replace
assert correction('inconvient') == 'inconvenient' # insert 2
assert correction('arrainged') == 'arranged' # delete
assert correction('peotry') =='poetry' # transpose
assert correction('peotryy') =='poetry' # transpose + delete
assert correction('word') == 'word' # known
assert correction('quintessential') == 'quintessential' # unknown
assert words('This is a TEST.') == ['this', 'is', 'a', 'test']
assert Counter(words('This is a test. 123; A TEST this is.')) == (
Counter({'123': 1, 'a': 2, 'is': 2, 'test': 2, 'this': 2}))
assert len(WORDS) == 32198
assert sum(WORDS.values()) == 1115585
assert WORDS.most_common(10) == [
('the', 79808),
('of', 40024),
('and', 38311),
('to', 28765),
('in', 22020),
('a', 21124),
('that', 12512),
('he', 12401),
('was', 11410),
('it', 10681)]
assert WORDS['the'] == 79808
assert P('quintessential') == 0
assert 0.07 < P('the') < 0.08
return 'unit_tests pass'
'''
'''
def spelltest(tests, verbose=False):
"Run correction(wrong) on all (right, wrong) pairs; report results."
import time
start = time.clock()
good, unknown = 0, 0
n = len(tests)
for right, wrong in tests:
w = correction(wrong)
good += (w == right)
if w != right:
unknown += (right not in WORDS)
if verbose:
print('correction({}) => {} ({}); expected {} ({})'
.format(wrong, w, WORDS[w], right, WORDS[right]))
dt = time.clock() - start
print('{:.0%} of {} correct ({:.0%} unknown) at {:.0f} words per second '
.format(good / n, n, unknown / n, n / dt))
def Testset(lines):
"Parse 'right: wrong1 wrong2' lines into [('right', 'wrong1'), ('right', 'wrong2')] pairs."
return [(right, wrong)
for (right, wrongs) in (line.split(':') for line in lines)
for wrong in wrongs.split()]
'''
if __name__ == '__main__':
sentence = "gostericiye, 'bıletimi' veryorum."
words = sentence.split()
s2 = " ".join([correction(w) for w in words])
|
[
"burak.yldrm@hotmail.com.tr"
] |
burak.yldrm@hotmail.com.tr
|
3f0f9e6f118825a4388d3351f8201d4e06e293b1
|
1d321b9d9d8b094f4be2b3ddfb0c9dccef571100
|
/special_char.py
|
acf70aef01c28a8c9c7d33be80ddb941e8e07066
|
[] |
no_license
|
lkogant1/Python
|
fe78cc10c1c4b5ffa6ff8e2ddd12b052a5ddc137
|
dba157eba3a5b0f8e9a97c59a61b6a925b232b69
|
refs/heads/master
| 2020-03-14T15:01:35.327628
| 2018-05-15T15:24:39
| 2018-05-15T15:24:39
| 131,666,756
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 632
|
py
|
#special characters
print("welcome to xyz")
#backslash \
print("welcome \\ to \\ xyz")
#single quote \'
print("welcome \' to \' xyz")
#double quote \"
print("welcome \" to \" xyz")
#bell \a
print("welcome \a to \a xyz")
#backspace \b
print("wel\bcome to x\byz")
#newline \n
print("welcome \n to \n XYZ")
#carriage return \r
print("welcome to \r to \r xyz")
#tab space character \t
print("welcome \t to \t xyz")
#vertical tab
print("welcome \v to \v xyz")
#5 new lines; two ways
print(5*"\n")
print("\n\n\n\n\n")
#print 5 new lines and then a
print(5*"\nc")
#prints a on each new line
print(5*"\n","b")
print("\nb\nb\nb\nb\nb")
|
[
"koganl001@gmail.com"
] |
koganl001@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.