blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
49479c29ab960d2dd33e72a45d7b32019fd9862c
|
Python
|
valluriavi/Simplecalculator
|
/Simple_calculator.py
|
UTF-8
| 178
| 4.03125
| 4
|
[] |
no_license
|
# Simple Calculator #
print('Welcome to simple calculator')
a = int(input('Enter a number :'))
b = int(input('Enter a number to be multiplied :'))
c = a*b
print('Answer = ', c)
| true
|
5ea8e38151e67acb89e08cd9c557e354d51870b0
|
Python
|
VigneshReddyJulakanti/Harry_python
|
/tasks/design-2.py
|
UTF-8
| 101
| 3.296875
| 3
|
[] |
no_license
|
a=int(input())
print("*"*a)
for i in range(a-2):
print("*{0}*".format(" "*(a-2)))
print("*"*a)
| true
|
5edc61f12ccd46b2d4f75d92051dc2dd109fd605
|
Python
|
scott-p-lane/Advent-of-Code-2019
|
/Day_04/d4main.py
|
UTF-8
| 1,004
| 3.59375
| 4
|
[] |
no_license
|
'''
Created on Jan 4, 2020
@author: slane
'''
matchesCount = 0
failCount = 0
'''
Input range is 234208 - 765869, however the end value in Python range is not inclusive, so I had to bump
it up by one.
'''
for val in range(234208,765870):
valray = list(str(val))
hasDecrease = 0
hasValidDouble = 0
repeatCount = 0
for i in range(1,len(valray)):
prior = int(valray[i-1])
curr = int(valray[i])
if (curr < prior):
hasDecrease = 1
break
if (curr == prior):
repeatCount += 1
if (curr != prior):
if (repeatCount == 1):
hasValidDouble += 1
repeatCount = 0
#Covers the case where the last two digits are a repeated.
if (repeatCount == 1):
hasValidDouble += 1
if (hasValidDouble >= 1 and hasDecrease == 0):
matchesCount += 1
else:
failCount += 1
print("Done! Found",matchesCount,"matches and ",failCount,"failures.")
| true
|
6185b387244914d04dde6340f4766712db7d3fbd
|
Python
|
tjstoll/guess-word-ai
|
/index.py
|
UTF-8
| 820
| 3.78125
| 4
|
[] |
no_license
|
'''
GuessWordAI in Python...
Author: Taneisha Stoll
'''
import GameLoop
class Index(object):
""" Entry and exit point """
def __init__(self, name, nLets, category):
'''
name - name of player
nLets - number of letters to be guessed
category - word category
'''
self.player_name = name
self.number_of_letters = nLets
self.category = category
gameLoop = GameLoop.GameLoop(name, nLets, category)
# =============================================================================
if __name__ == '__main__':
print("Hi I'm AnnieT1000. Imma guess whatever word you're thinking of.")
name = input('Your name: ')
numLets = input('Number of letters: ')
cat = input('Category: ')
game = Index(name, int(numLets), cat)
| true
|
e53b694399551ba35a054a896fab177ffa132516
|
Python
|
Ed-Narvaez/CartShop
|
/prin.py
|
UTF-8
| 2,787
| 3.078125
| 3
|
[] |
no_license
|
from producto import Producto
from factura import Factura
from funcionesBD import *
conexion = conectar()
print("Ingrese usuario y contraseña... [UTIICE: usuario: user | contraseña: mipass]")
usuario = input("Ingrese usuario")
miPass = input("Ingrese pass")
data = (usuario, miPass)
c = ejecutar("select * from users where nombre = '%s' and pass = '%s'", data, conexion)
while(c == None):
print("ERROR, ingrese datos nuevamente...")
usuario = input("Ingrese usuario")
miPass = input("Ingrese pass")
data = (usuario, miPass)
c = ejecutar("select * from users where nombre = %s and pass = %s", data, conexion)
print("------ MENU:-------\n 1 - Crear producto\n 2 - Listar productos \n 3-Crear facturas\n 4-Listar facturas\n 5-Salir")
opc = int(input(""))
while opc!=5:
if opc == 1:
print("Ingrese nombre del producto a crear:")
nomProd = input("")
print("Ingrese precio:")
precProd = float(input(""))
miProd = Producto(nomProd, precProd)
miProd.crearProd()
print("Producto creado con éxito")
elif opc == 2:
print("Listado de productos:")
lista = Producto.listarProductos()
for i in range(len(lista)):
print(lista[i])
elif opc == 3:
print("Ingrese fecha de la factura:")
fec = input("")
miFac = Factura(fec)
print("Seleccione por codigo productos de la lista. Toque -1 para dejar de cargar")
lista = Producto.listarProductos()
for i in range(len(lista)):
print(lista[i])
codProd = int(input("Ingrese el codigo del que desea agregar"))
while codProd !=-1:
cantidad = int(input("Ingrese la cantidad que desea cargar de este producto"))
miFac.agregarProd(codProd, cantidad)
for i in range(len(lista)):
print(lista[i])
codProd = int(input("Ingrese el codigo del que desea agregar"))
miFac.grabar()
print("Factura creada con éxito")
elif opc == 4:
print("Lista de facturas. Ingrese el código de la que quiera ver")
lista = Factura.listarFacturas()
for j in range(len(lista)):
print(lista[j])
print("Ingrese el código de una de ellas para ver el detalle; al finalizar, o -1 para salir")
cod = int(input("Ingrese código..."))
while cod!=-1:
lista = Factura.verPorId(cod)
for i in range(len(lista)):
print(lista[i])
cod = int(input("Ingrese código..."))
print("------ MENU:-------\n 1 - Crear producto\n 2 - Listar productos \n 3-Crear facturas\n 4-Listar facturas\n 5-Salir")
opc = int(input(""))
cerrar(conexion)
| true
|
e69238d222d74d5dff1b93d72f2fbc089f3d03a2
|
Python
|
bobmayuze/RPI_Education_Material
|
/CSCI_1100/Week_6/Lecture_10/part_4.py
|
UTF-8
| 322
| 3.4375
| 3
|
[] |
no_license
|
co2_levels = [ 320.03, 322.16, 328.07, 333.91, 341.47, 348.92, 357.29, 363.77, 371.51, 382.47, 392.95 ]
i = 0
p = float(input('Enter the fraction: '))
print(p)
for level in range(len(co2_levels)):
co2_levels[i] *= (1+p)
i += 1
print('First: {:.2f}'.format(co2_levels[0]))
print('Last: {:.2f}'.format(co2_levels[-1]))
| true
|
34d928d0e74df2864b1f14ba6d0ed724fc5d283a
|
Python
|
anthonylife/ReviewBasedRatingPrediction
|
/script/convertTripleRatingFormat.py
|
UTF-8
| 4,116
| 2.6875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env python
#encoding=utf8
#Copyright [2014] [Wei Zhang]
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
###################################################################
# Date: 2014/5/9 #
# Convert raw format of data into triple format for LIBMF: #
# Two formats: (1). triple format; (2). sparse matrix. #
###################################################################
import sys, csv, json, argparse
with open("../SETTINGS.json") as fp:
settings = json.loads(fp.read())
def convert(infile, outfile):
''' for libmf '''
wfd = open(outfile, "w")
for line in open(infile):
uid, pid, rating = line.strip("\r\t\n").split(" ")[:3]
wfd.write("%s %s %s\n" % (uid, pid, rating))
wfd.close()
def convert1(infile, outfile):
''' for max-margin matrix factorization '''
max_uid = 0
for line in open(infile):
uid, pid, rating = line.strip("\r\t\n").split(" ")[:3]
if int(uid) > max_uid:
max_uid = int(uid)
output_result = [[] for i in xrange(max_uid+1)]
for line in open(infile):
uid, pid, rating = line.strip("\r\t\n").split(" ")[:3]
uid = int(uid)
pid = int(pid)+1
rating = float(rating)
output_result[uid].append([pid, rating])
wfd = open(outfile, "w")
for mul_entry in output_result:
for entry in mul_entry:
wfd.write("%d:%.1f " % (entry[0], entry[1]))
wfd.write("\n")
wfd.close()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-d', type=int, action='store',
dest='data_num', help='choose which data set to use')
if len(sys.argv) != 3:
print 'Command e.g.: python evaluation.py -d 0(1)'
sys.exit(1)
para = parser.parse_args()
if para.data_num == 0:
train_infile = settings["ROOT_PATH"] + settings["TRAIN_DATA_FILE1"]
vali_infile = settings["ROOT_PATH"] + settings["VALI_DATA_FILE1"]
test_infile = settings["ROOT_PATH"] + settings["TEST_DATA_FILE1"]
train_outfile = settings["ROOT_PATH"] + settings["TRAIN_TRIPLE_FILE1"]
vali_outfile = settings["ROOT_PATH"] + settings["VALI_TRIPLE_FILE1"]
test_outfile = settings["ROOT_PATH"] + settings["TEST_TRIPLE_FILE1"]
train_outfile1 = settings["ROOT_PATH"] + settings["TRAIN_SPMAT_FILE1"]
vali_outfile1 = settings["ROOT_PATH"] + settings["VALI_SPMAT_FILE1"]
test_outfile1 = settings["ROOT_PATH"] + settings["TEST_SPMAT_FILE1"]
elif para.data_num == 1:
train_infile = settings["ROOT_PATH"] + settings["TRAIN_DATA_FILE2"]
vali_infile = settings["ROOT_PATH"] + settings["VALI_DATA_FILE2"]
test_infile = settings["ROOT_PATH"] + settings["TEST_DATA_FILE2"]
train_outfile = settings["ROOT_PATH"] + settings["TRAIN_TRIPLE_FILE2"]
vali_outfile = settings["ROOT_PATH"] + settings["VALI_TRIPLE_FILE2"]
test_outfile = settings["ROOT_PATH"] + settings["TEST_TRIPLE_FILE2"]
train_outfile1 = settings["ROOT_PATH"] + settings["TRAIN_SPMAT_FILE2"]
vali_outfile1 = settings["ROOT_PATH"] + settings["VALI_SPMAT_FILE2"]
test_outfile1 = settings["ROOT_PATH"] + settings["TEST_SPMAT_FILE2"]
else:
print 'Invalid choice of dataset'
sys.exit(1)
#convert(train_infile, train_outfile)
#convert(vali_infile, vali_outfile)
#convert(test_infile, test_outfile)
convert1(train_infile, train_outfile1)
convert1(vali_infile, vali_outfile1)
convert1(test_infile, test_outfile1)
if __name__ == "__main__":
main()
| true
|
56d66db44c1714de2ecfbb36dc10727353afdb21
|
Python
|
ChunjieShan/Baselines
|
/simpleconv3_pt_dataset/dataset.py
|
UTF-8
| 1,304
| 2.78125
| 3
|
[] |
no_license
|
#!/usr/bin/python3
# -*- coding: utf8 -*-
import torch
import os
from PIL import Image
from torch.utils.data import Dataset
class ImageData(Dataset):
def __init__(self, data_dir, transform=None):
self.label_name = {"Cat": 0, "Dog": 1}
self.data_info = self.get_img_info(data_dir)
self.transform = transform
def __getitem__(self, index):
path_img, label = self.data_info[index]
img = Image.open(path_img).convert("RGB")
if self.transform is not None:
img = self.transform(img)
return img, label
def __len__(self):
return len(self.data_info)
@staticmethod
def get_img_info(data_dir):
label_name = {"Cat": 0, "Dog": 1}
data_info = list()
for root, dirs, _ in os.walk(data_dir):
for sub_dir in dirs:
img_names = os.listdir(os.path.join(root, sub_dir))
img_names = list(
filter(lambda x: x.endswith('.jpg'), img_names))
for i in range(len(img_names)):
img_name = img_names[i]
path_img = os.path.join(root, sub_dir, img_name)
label = label_name[sub_dir]
data_info.append((path_img, int(label)))
return data_info
| true
|
5113d2ae9fe953749bbb3a0d7dea0278b2b8196f
|
Python
|
saranya258/python
|
/56.py
|
UTF-8
| 115
| 3.546875
| 4
|
[] |
no_license
|
g=input()
for i in range(0,len(g)):
if(g[i].isalpha() and g[i].isdigit()):
print("No")
else:
print("Yes")
| true
|
eae50e9c515b83538678c6344174787cddcb9587
|
Python
|
jumphone/Bioinformatics
|
/scRNAseq/Natalie_20181113/combine.py
|
UTF-8
| 751
| 2.640625
| 3
|
[] |
no_license
|
import sys
f1=open(sys.argv[1])
f2=open(sys.argv[2])
fo=open(sys.argv[3],'w')
GENE={}
header1=f1.readline().rstrip().split('\t')[1:]
header2=f2.readline().rstrip().split('\t')[1:]
newheader='GENE\t'+'\t'.join(header1)+'\t'+'\t'.join(header2)+'\n'
fo.write(newheader)
GENE1={}
for line in f1:
seq=line.rstrip().split('\t')
GENE1[seq[0]]=seq[1:]
GENE[seq[0]]=['0']*(len(header1)+len(header2))
GENE2={}
for line in f2:
seq=line.rstrip().split('\t')
GENE2[seq[0]]=seq[1:]
GENE[seq[0]]=['0']*(len(header1)+len(header2))
for gene in GENE1:
GENE[gene][0:len(header1)]= GENE1[gene]
for gene in GENE2:
GENE[gene][len(header1):]= GENE2[gene]
for gene in GENE:
fo.write(gene+'\t'+'\t'.join(GENE[gene])+'\n')
| true
|
d2951eb49e291dc94ca74616e1741c0f47899d64
|
Python
|
AyeniTrust/python-for-everybody
|
/Using Python to Access Web Data/Week 4/Following Links in HTML Using BeautifulSoup.py
|
UTF-8
| 672
| 3
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 16 21:12:49 2017
@author: atse
"""
import urllib.request, urllib.parse, urllib.error
from bs4 import BeautifulSoup
import ssl
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
url = input('Enter URL: ')
num = input('Enter count: ')
pos = input('Enter position: ')
print('Retrieving: ', url)
for times in range(int(num)):
html = urllib.request.urlopen(url, context=ctx).read()
soup = BeautifulSoup(html, 'html.parser')
tags = soup('a')
print('Retrieving: ', tags[int(pos)-1].get('href', None))
url = tags[int(pos)-1].get('href', None)
| true
|
ad3cfd758f7a6274bc6055615eddefef655416e8
|
Python
|
cnmodel/cnmodel
|
/cnmodel/data/ionchannels.py
|
UTF-8
| 30,253
| 2.515625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
# -*- encoding: utf-8 -*-
from ._db import add_table_data
"""
Ion channel density tables
All of the ion channel densities for the models implemented in cnmodel
are (or should be) stated here, and should not be modified in the
cnmodel code itself.
"""
add_table_data('RM03_channels', row_key='field', col_key='model_type',
species='guineapig', data=u"""
This table describes the ion channel densities (and voltage shifts if necessary)
for different cell types in the original Rothman Manis 2003 model.
Data from Table 1, except for "octopus" cells, which is modified (see note 3)
map to cell: bushy-II bushy-II-I tstellate tstellate-t bushy-I-II octopus
-----------------------------------------------------------------------------------------------------------------------------------
II II-I I-c I-t I-II II-o
nacn_gbar 1000. [1] 1000. [1] 1000. [1] 1000. [1] 1000. [2] 0000. [3]
jsrna_gbar 0000. [1] 0000. [1] 0000. [1] 0000. [1] 0000. [2] 1000. [3]
kht_gbar 150.0 [1] 150.0 [1] 150.0 [1] 80.0 [1] 150.0 [2] 150.0 [3]
klt_gbar 200.0 [1] 35.0 [1] 0.0 [1] 0.0 [1] 20.0 [2] 1000. [3]
ka_gbar 0.0 [1] 0.0 [1] 0.0 [1] 65.0 [1] 0.0 [2] 0.0 [3]
ih_gbar 20.0 [1] 3.5 [1] 0.5 [1] 0.5 [1] 2.0 [2] 30.0 [3]
leak_gbar 2.0 [1] 2.0 [1] 2.0 [1] 2.0 [1] 2.0 [2] 2.0 [3]
leak_erev -65 [1] -65 [1] -65 [1] -65 [1] -65 [2] -65 [3]
na_type nacn [1] nacn [1] nacn [1] nacn [1] nacn [2] jsrna [3]
ih_type ihvcn [1] ihvcn [1] ihvcn [1] ihvcn [1] ihvcn [2] ihvcn [3]
soma_Cap 12.0 [1] 12.0 [1] 12.0 [1] 12.0 [1] 12.0 [2] 25.0 [3]
e_k -84 [1] -84 [1] -84 [1] -84 [2] -84 [2] -84 [2]
e_na 50. [1] 50. [1] 50. [1] 50. [2] 50. [2] 50. [2]
ih_eh -43 [1] -43 [1] -43 [1] -43 [2] -43 [2] -43 [2]
-----------------------------------------------------------------------------------------------------------------------------------
[1] Rothman and Manis, 2003
Age "adult", Temperature=22C
Units are nS.
[2] Rothman and manis, 2003, model I-II
Some low-voltage K current, based on observations of
a single spike near threshold and regular firing for higher
currents (Xie and Manis, 2017)
[3] Derived from Rothman and Manis, 2003, model II
Large amounts of low-voltage K current, and elevated HCN. Conductances
based on Rothman and Manis, 2003; concept from Cao and Oertel
[4] Designation for elevated LTK and Ih for octopus cells
""")
add_table_data('XM13_channels', row_key='field', col_key='model_type',
species='mouse', data=u"""
This table describes the REFERENCE ion channel densities (and voltage shifts if necessary)
for different cell types based on the Xie and Manis 2013 models for mouse.
The REFERENCE values are applied to "point" models, and to the soma of
compartmental models.
The names of the mechanisms must match a channel mechanism (Neuron .mod files)
and the following _(gbar, vshift, etc) must match an attribute of that channel
that can be accessed.
-----------------------------------------------------------------------------------------------------------------------------------
II II-I I-c I-II I-t
nav11_gbar 1000. [4] 0000. [4] 800. [4] 800. [4] 1000. [4]
nacn_gbar 2300. [1] 1000. [1] 3000. [1] 0000. [2] 0000. [1]
na_gbar 0000. [1] 0000. [1] 3000. [1] 1800. [2] 0000. [1]
kht_gbar 58.0 [1] 58.0 [1] 500.0 [1] 150.0 [2] 500.0 [1]
klt_gbar 80.0 [1] 20.0 [1] 0.0 [1] 14.0 [3] 0.0 [1]
ka_gbar 0.0 [1] 0.0 [1] 0.0 [1] 0.0 [2] 125.0 [1]
ihvcn_gbar 30.0 [1] 30.0 [1] 18.0 [1] 2.0 [2] 18.0 [1]
leak_gbar 2.0 [1] 2.0 [1] 8.0 [1] 2.0 [2] 8.0 [1]
leak_erev -65 [1] -65 [1] -65 [1] -65 [2] -65 [1]
na_type nacn [1] nacn [1] nacn [1] na [3] nav11 [1]
ih_type ihvcn [1] ihvcn [1] ihvcn [1] ihvcn [2] ihvcn [1]
soma_Cap 26.0 [1] 26.0 [1] 25.0 [1] 25.0 [2] 25.0 [1]
nav11_vshift 4.3 [1] 4.3 [1] 4.3 [1] 4.3 [1] 4.3 [1]
e_k -84 [1] -84 [1] -84 [1] -70 [3] -84 [1]
e_na 50. [1] 50. [1] 50. [1] 55. [3] 50. [1]
ih_eh -43 [1] -43 [1] -43 [1] -43 [2] -43 [1]
-----------------------------------------------------------------------------------------------------------------------------------
[1] Uses channels from Rothman and Manis, 2003
Conductances are for Mouse bushy cells
Xie and Manis, 2013
Age "adult", Temperature=34C
Units are nS.
[2] Rothman and manis, 2003, model I-II
Some low-voltage K current, based on observations of
a single spike near threshold and regular firing for higher
currents (Xie and Manis, 2017)
[3] These values for the I-II (dstellate) are from the original checkpoint test
for cnmodel 12/2017.
[4] nav11 channels were used in original Xie and Manis (2013) ms, but are not
used for mice in the master distribution of cnmodel, which used only the nacn
channels. The channel type can be overridden however.
""")
add_table_data('XM13_channels_compartments', row_key='parameter', col_key='compartment',
species='mouse', model_type='II', data=u"""
This table describes the ion channel densities relative to somatic densities,
e.g., relative to REFERENCE densities in the table XM13_channels.
and voltage shifts, for different compartments of the specified neuron,
Conductances will be calculated from the Model derived from Xie and Manis 2013 for mouse
(data table: mGVC_channels).
------------------------------------------------------------------------------------------------------------------------------------------------------------------
axon unmyelinatedaxon myelinatedaxon initialsegment hillock soma dendrite primarydendrite secondarydendrite
nav11_gbar 3.0 [1] 3.0 [1] 0.0 [1] 5.0 [1] 5.0 [1] 1.0 [1] 0.5 [1] 0.50 [1] 0.25 [1]
kht_gbar 1.0 [1] 2.0 [1] 0.01 [1] 2.0 [1] 2.0 [1] 1.0 [1] 0.5 [1] 0.5 [1] 0.25 [1]
klt_gbar 1.0 [1] 1.0 [1] 0.01 [1] 1.0 [1] 1.0 [1] 1.0 [1] 0.5 [1] 0.5 [1] 0.25 [1]
ihvcn_gbar 0.0 [1] 0.0 [1] 0.0 [1] 0.5 [1] 0.0 [1] 1.0 [1] 0.5 [1] 0.5 [1] 0.5 [1]
leak_gbar 1.0 [1] 0.25 [1] 0.25e-3 [1] 1.0 [1] 1.0 [1] 1.0 [1] 0.5 [1] 0.5 [1] 0.5 [1]
leak_erev -65. [1] -65. [1] -65. [1] -65. [1] -65. [1] -65. [1] -65. [1] -65. [1] -65. [1]
nav11_vshift 4.3 [1] 4.3 [1] 0.0 [1] 4.3 [1] 4.3 [1] 0.0 [1] 0.0 [1] 0.0 [1] 0.0 [1]
na_type nav11 nav11 nav11 nav11 nav11 nav11 nav11 nav11 nav11
ih_type ihvcn ihvcn ihvcn ihvcn ihvcn ihvcn ihvcn ihvcn ihvcn
-------------------------------------------------------------------------------------------------------------------------------------------------------------------
[1] Scaling is relative to soma scaling. Numbers are estimates based on general distribution from literature on cortical neurons.
""")
# ***** BEGINNING OF XM13_Channels for nacncoop version of model
add_table_data('XM13nacncoop_channels', row_key='field', col_key='model_type',
species='mouse', data=u"""
This table describes the REFERENCE ion channel densities (and voltage shifts if necessary)
for different cell types based on the Xie and Manis 2013 models for mouse, but using
the nacncoop mechanism (coooperative sodium channels)
!!!!!!!!!!!! USAGE OF THIS TABLE SHOULD BE CONSIDERED EXPERIMENTAL !!!!!!!!!!!!!!
The REFERENCE values are applied to "point" models, and to the soma of
compartmental models.
The names of the mechanisms must match a channel mechanism (Neuron .mod files)
and the following _(gbar, vshift, etc) must match an attribute of that channel
that can be accessed.
-----------------------------------------------------------------------------------------------------------------------------------
II II-I I-c I-II I-t
nacncoop_gbar 3000. [4] 1000. [4] 1000. [4] 1000. [4] 1000. [4]
kht_gbar 58.0 [1] 58.0 [1] 500.0 [1] 150.0 [2] 500.0 [1]
klt_gbar 80.0 [1] 20.0 [1] 0.0 [1] 14.0 [3] 0.0 [1]
ka_gbar 0.0 [1] 0.0 [1] 0.0 [1] 0.0 [2] 125.0 [1]
ihvcn_gbar 30.0 [1] 30.0 [1] 18.0 [1] 2.0 [2] 18.0 [1]
leak_gbar 2.0 [1] 2.0 [1] 8.0 [1] 2.0 [2] 8.0 [1]
leak_erev -65 [1] -65 [1] -65 [1] -65 [2] -65 [1]
na_type nacncoop [4] nacncoop [4] nacncoop [4] nacncoop [3] nacncoop [4]
ih_type ihvcn [1] ihvcn [1] ihvcn [1] ihvcn [2] ihvcn [1]
soma_Cap 26.0 [1] 26.0 [1] 25.0 [1] 25.0 [2] 25.0 [1]
nacncoop_vshift 0.0 [1] 0.0 [1] 0.0 [1] 0.0 [1] 0.0 [1]
e_k -84 [1] -84 [1] -84 [1] -70 [3] -84 [1]
e_na 50. [1] 50. [1] 50. [1] 55. [3] 50. [1]
ih_eh -43 [1] -43 [1] -43 [1] -43 [2] -43 [1]
-----------------------------------------------------------------------------------------------------------------------------------
[1] Uses channels from Xie and Manis, 2013
Age "adult", Temperature=34C
Units are nS.
[2] Rothman and manis, 2003, model I-II
Some low-voltage K current, based on observations of
a single spike near threshold and regular firing for higher
currents (Xie and Manis, 2017)
[3] These values for the I-II (dstellate) are from the original checkpoint test
for cnmodel 12/2017.
[4] nav11 channels were used in original Xie and Manis (2013) ms,
However, this version uses cooperative na channels for faster activation
""")
add_table_data('XM13nacncooop_channels_compartments', row_key='parameter', col_key='compartment',
species='mouse', model_type='II', data=u"""
!!!!!!!!!!!! USAGE OF THIS TABLE SHOULD BE CONSIDERED EXPERIMENTAL !!!!!!!!!!!!!!
This table describes the ion channel densities relative to somatic densities,
e.g., relative to REFERENCE densities in the table XM13_nacncoop_channels.
and voltage shifts, for different compartments of the specified neuron,
Conductances will be calculated from the Model derived from Xie and Manis 2013 for mouse
------------------------------------------------------------------------------------------------------------------------------------------------------------------
axon unmyelinatedaxon myelinatedaxon initialsegment hillock soma dendrite primarydendrite secondarydendrite
nacncoop_gbar 3.0 [1] 3.0 [1] 0.0 [1] 5.0 [1] 5.0 [1] 1.0 [1] 0.5 [1] 0.50 [1] 0.25 [1]
kht_gbar 1.0 [1] 2.0 [1] 0.01 [1] 2.0 [1] 2.0 [1] 1.0 [1] 0.5 [1] 0.5 [1] 0.25 [1]
klt_gbar 1.0 [1] 1.0 [1] 0.01 [1] 1.0 [1] 1.0 [1] 1.0 [1] 0.5 [1] 0.5 [1] 0.25 [1]
ihvcn_gbar 0.0 [1] 0.0 [1] 0.0 [1] 0.5 [1] 0.0 [1] 1.0 [1] 0.5 [1] 0.5 [1] 0.5 [1]
leak_gbar 1.0 [1] 0.25 [1] 0.25e-3 [1] 1.0 [1] 1.0 [1] 1.0 [1] 0.5 [1] 0.5 [1] 0.5 [1]
leak_erev -65. [1] -65. [1] -65. [1] -65. [1] -65. [1] -65. [1] -65. [1] -65. [1] -65. [1]
nacncoop_vshift 0.0 [1] 0.0 [1] 0.0 [1] 0.0 [1] 0.0 [1] 0.0 [1] 0.0 [1] 0.0 [1] 0.0 [1]
na_type nacncoop nacncoop nacncoop nacncoop nacncoop nacncoop nacncoop nacncoop nacncoop
ih_type ihvcn ihvcn ihvcn ihvcn ihvcn ihvcn ihvcn ihvcn ihvcn
--------------------------------------------------------------------------------------------------------------------------------------------------------------------
[1] Scaling is relative to soma scaling. Numbers are estimates based on general distribution from literature on cortical neurons.
""")
# ***** END OF XM13_Channels for nacncoop version of model
add_table_data('mGBC_channels', row_key='field', col_key='model_type',
species='mouse', data=u"""
This table describes the REFERENCE ion channel densities (and voltage shifts if necessary)
for different cell types based on the Xie and Manis 2013 models for mouse.
The REFERENCE values are applied to "point" models, and to the soma of
compartmental models.
The names of the mechanisms must match a channel mechanism (Neuron .mod files)
and the following _(gbar, vshift, etc) must match an attribute of that channel
that can be accessed.
-----------------------------------------------------------------------------------------------------------------------------------
II II-I I-c I-II I-t
nav11_gbar 1600. [1] 1600. [1] 3000. [1] 1600. [2] 3000. [1]
kht_gbar 58.0 [1] 58.0 [1] 500.0 [1] 150.0 [2] 500.0 [1]
klt_gbar 80.0 [1] 14.0 [1] 0.0 [1] 20.0 [2] 0.0 [1]
ka_gbar 0.0 [1] 0.0 [1] 0.0 [1] 0.0 [2] 125.0 [1]
ihvcn_gbar 30.0 [1] 30.0 [1] 18.0 [1] 2.0 [2] 18.0 [1]
leak_gbar 2.0 [1] 2.0 [1] 8.0 [1] 2.0 [2] 8.0 [1]
leak_erev -65 [1] -65 [1] -65 [1] -65 [2] -65 [1]
na_type nav11 [1] nav11 [1] nav11 [1] nav11 [1] nav11 [1]
ih_type ihvcn [1] ihvcn [1] ihvcn [1] ihvcn [2] ihvcn [1]
soma_Cap 26.0 [1] 26.0 [1] 25.0 [1] 26.0 [2] 25.0 [1]
nav11_vshift 4.3 [1] 4.3 [1] 4.3 [1] 4.3 [1] 4.3 [1]
e_k -84 [1] -84 [1] -84 [1] -84 [2] -84 [1]
e_na 50. [1] 50. [1] 50. [1] 50. [2] 50. [1]
ih_eh -43 [1] -43 [1] -43 [1] -43 [2] -43 [1]
-----------------------------------------------------------------------------------------------------------------------------------
[1] Uses channels from Rothman and Manis, 2003, except for Na channels
Conductances are for Mouse bushy cells
Xie and Manis, 2013
Age "adult", Temperature=34C
Units are nS.
[2] Rothman and Manis, 2003, model I-II
Some low-voltage K current, based on observations of
a single spike near threshold and regular firing for higher
currents (Xie and Manis, 2017)
""")
add_table_data('mGBC_channels_compartments', row_key='parameter', col_key='compartment',
species='mouse', model_type='II', data=u"""
This table describes the ion channel densities relative to somatic densities,
e.g., relative to REFERENCE densities in the table XM13_channels.
and voltage shifts, for different compartments of the specified neuron,
Conductances will be calculated from the Model for Xie and Manis 2013 for mouse
(data table: XM13_channels).
------------------------------------------------------------------------------------------------------------------------------------------------------------------
axon unmyelinatedaxon myelinatedaxon initialsegment hillock soma dendrite primarydendrite secondarydendrite
nav11_gbar 3.0 [1] 3.0 [1] 0.0 [1] 3.0 [1] 2.0 [1] 1.0 [1] 0.25 [1] 0.25 [1] 0.25 [1]
kht_gbar 1.0 [1] 2.0 [1] 0.01 [1] 2.0 [1] 2.0 [1] 1.0 [1] 0.5 [1] 0.5 [1] 0.25 [1]
klt_gbar 1.0 [1] 1.0 [1] 0.01 [1] 1.0 [1] 1.0 [1] 1.0 [1] 0.5 [1] 0.5 [1] 0.25 [1]
ihvcn_gbar 0.0 [1] 0.0 [1] 0.0 [1] 0.5 [1] 0.0 [1] 1.0 [1] 0.5 [1] 0.5 [1] 0.5 [1]
leak_gbar 1.0 [1] 0.25 [1] 0.25e-3 [1] 1.0 [1] 1.0 [1] 1.0 [1] 0.5 [1] 0.5 [1] 0.5 [1]
leak_erev -65. [1] -65. [1] -65. [1] -65. [1] -65. [1] -65. [1] -65. [1] -65. [1] -65. [1]
nav11_vshift 4.3 [1] 4.3 [1] 0.0 [1] 4.3 [1] 4.3 [1] 0.0 [1] 0.0 [1] 0.0 [1] 0.0 [1]
na_type nav11 nav11 nav11 nav11 nav11 nav11 nav11 nav11 nav11
ih_type ihvcn ihvcn ihvcn ihvcn ihvcn ihvcn ihvcn ihvcn ihvcn
-------------------------------------------------------------------------------------------------------------------------------------------------------------------
[1] Scaling is relative to soma scaling. Numbers are estimates based on general distribution from literature on cortical neurons.
""")
add_table_data('POK_channels', row_key='field', col_key='model_type',
species='rat', data=u"""
This table describes the ion channel densities and voltage shifts for rat DCN pyramidal cells,
from Kanold and Manis, 2001
------------------------------------------------------------------------------------------------------------------------------------------
pyramidal
soma_napyr_gbar 350.0 [1]
soma_kdpyr_gbar 80.0 [1]
soma_kif_gbar 150.0 [1]
soma_kis_gbar 40.0 [1]
soma_ihpyr_gbar 2.8 [1]
soma_leak_gbar 2.8 [1]
soma_leak_erev -62.0 [3]
soma_e_na 50. [1]
soma_e_k -81.5 [1]
soma_e_h -43.0 [1]
soma_natype napyr
soma_Cap 12.0 [1]
------------------------------------------------------------------------------------------------------------------------------------------
[1] Kanold and Manis, 1999, 2001, 2005
Age P11-14, Temperature=32C
Units are nS.
Default cap is 12 pF.
[2] Adjustable q10 added for fitting
soma_ihpyr_adj_q10 1.0 [2] (removed for testing)
[3] Original values (was -62 in tests?)
""")
add_table_data('Ceballos_channels', row_key='field', col_key='model_type',
species='mouse', data=u"""
This table describes the ion channel densities and voltage shifts for mouse DCN pyramidal cells,
from Ceballos et al., 2016
------------------------------------------------------------------------------------------------------------------------------------------
pyramidal
soma_napyr_gbar 1005.0 [1]
soma_nappyr_gbar 1.257
soma_cap_pcabar 0. [3]
soma_kdpyr_gbar 251.3 [1]
soma_kcnq_gbar 0. [3]
soma_kpksk_gbar 0. [3]
soma_kir_gbar 6.283 [3]
soma_kif_gbar 150.0 [1]
soma_kis_gbar 60.0 [1]
soma_ihpyr_gbar 6.79 [1]
soma_leak_gbar 1.885 [1]
soma_leak_erev -51.3 [1]
soma_e_na 50. [1]
soma_e_k -81.5 [1]
soma_e_h -43.0 [1]
soma_natype napyr
soma_Cap 15.2 [1]
------------------------------------------------------------------------------------------------------------------------------------------
[1] Kanold and Manis, 1999, 2001, 2005 and Ceballos, 2016
Age P11-14, Temperature=32C
Units are nS.
Default cap is 15.2 pF, for 22 um dia cell
Conversions calculated from Leao et al., 2012 and Ceballos, et al. 2016
[2] Adjustable q10 added for fitting
soma_ihpyr_adj_q10 1.0 [2] (removed for testing)
[3] for implementing the additional channels from Li et al., and Leao et al. Default remains
original model set to 0; also see Ceballo et al. 2016.
6.283 is for 0.5 mmho/cm2 (quiet cells), vs 12.566 (1.0) for "active cells".
""")
add_table_data('CW_channels', row_key='field', col_key='model_type',
species='mouse', data=u"""
This table describes the ion channel densities and voltage shifts
for a mouse carthweel cell model.
Ad-hoc model, based on a Purkinje cell model (ref [1]).
-----------------------------------------------------------------------------------------------------------------------------------
cartwheel
soma_narsg_gbar 500.0 [1]
soma_bkpkj_gbar 2.0
soma_kpkj_gbar 100. [1]
soma_kpkj2_gbar 50.
soma_kpkjslow_gbar 150 [1]
soma_kpksk_gbar 25.0 [1]
soma_lkpkj_gbar 5.0 [1]
soma_hpkj_gbar 5.0 [1]
soma_e_na 50. [1]
soma_e_k -80.0 [1]
soma_hpkj_eh -43.0 [1]
soma_lkpkj_e -65.0 [1]
soma_e_ca 50.
soma_na_type narsg
soma_pcabar 0.00015 [1]
soma_Dia 18
-----------------------------------------------------------------------------------------------------------------------------------
[1] Channels from Khaliq, Gouwens and Raman, J. Neurosci. 2003
Conductance levels modified.
""")
add_table_data('TV_channels', row_key='field', col_key='model_type',
species='mouse', data=u"""
This table describes the ion channel densities and voltage shifts
for a mouse tuberculoventral cell model.
Ad-hoc model, based on the t-stellate cell model, but adjusted
to match the data from Kuo and Trussell.
-----------------------------------------------------------------------------------------------------------------------------------
TVmouse
soma_nacncoop_gbar 5800.0 [2]
soma_kht_gbar 400.0 [1]
soma_ihvcn_gbar 2.5 [2]
soma_ka_gbar 65.0 [1]
soma_leak_gbar 4.5 [1]
soma_leak_erev -72.0 [1]
soma_e_na 50. [1]
soma_e_k -81.5 [1]
soma_ihvcn_eh -43.0 [1]
soma_na_type nacncoop [2]
soma_Cap 35 [1]
-----------------------------------------------------------------------------------------------------------------------------------
[1] Values obtained from brute force runs and comparision to
FI curve from Kuo, Lu and Trussell, J Neurophysiol. 2012 Aug 15;
108(4): 1186–1198.
[2] Cooperative sodium channel model, based on (see the mechanisms folder)
concepts and implementation similar to Oz et al. J.Comp. Neurosci. 39: 63, 2015,
and Huang et al., PloSOne 7:e37729, 2012.
""")
add_table_data('sgc_mouse_channels', row_key='field', col_key='model_type',
species='mouse', data=u"""
This table describes the ion channel densities (and voltage shifts if necessary)
for SGC cells, based on
-----------------------------------------------------------------------------------------------------------------------------------
sgc-a sgc-bm
sgc_name a bm
soma_na_gbar 350. [2] 350. [2]
soma_kht_gbar 58.0 [1] 58.0 [1]
soma_klt_gbar 80.0 [1] 80.0 [1]
soma_ihap_gbar 3.0 [3] 0.0 [1]
soma_ihap_eh -41.0 [3] -41.0 [3]
soma_ihbm_gbar 0.0 [3] 3.0 [3]
soma_ihbm_eh -41.0 [3] -41.0 [3]
soma_leak_gbar 2.0 [1] 2.0 [1]
soma_leak_erev -65 [1] -65 [1]
soma_na_type jsrna [2] jsrna [2]
soma_Cap 12.0 [1] 12.0 [1]
soma_e_k -84 [1] -84 [1]
soma_e_na 50. [1] 50. [1]
-----------------------------------------------------------------------------------------------------------------------------------
[1] Model is based on the mouse bushy cell model (XM13, above),
but with a fast sodium channel from Rothman et al, 1993. and Ih currents
from Liu et al. 2014
[2] Sodium channel from Rothman, Young and Manis, J Neurophysiol. 1993 Dec;70(6):2562-83.
[3] Ih Currents from Liu, Manis, Davis, J Assoc Res Otolaryngol. 2014 Aug;15(4):585-99.
doi: 10.1007/s10162-014-0446-z. Epub 2014 Feb 21.
Age "P10" (cultured SGC cells), Original data temperature=22C.
Units are nS.
""")
add_table_data('sgc_guineapig_channels', row_key='field', col_key='model_type',
species='guineapig', data=u"""
This table describes the ion channel densities (and voltage shifts if necessary)
for a model SGC cell, which is based on a bushy cell with a different Na channel.
-----------------------------------------------------------------------------------------------------------------------------------
sgc-a sgc-bm
sgc_name a bm
soma_na_gbar 1000. [2] 1000. [2]
soma_kht_gbar 150.0 [1] 150.0 [1]
soma_klt_gbar 200.0 [1] 200.0 [1]
soma_ihap_gbar 3.0 [3] 0.0 [3]
soma_ihap_eh -41.0 [3] -41.0 [3]
soma_ihbm_gbar 0.0 [3] 3.0 [3]
soma_ihbm_eh -41.0 [3] -41.0 [3]
soma_leak_gbar 2.0 [1] 2.0 [1]
soma_leak_erev -65 [1] -65 [1]
soma_na_type jsrna [2] jsrna [2]
soma_Cap 12.0 [1] 12.0 [1]
soma_e_k -84 [1] -84 [1]
soma_e_na 50. [1] 50. [1]
-----------------------------------------------------------------------------------------------------------------------------------
[1] Model is based on the guinea pig bushy cell model (RM03, above),
but with a fast sodium channel from Rothman et al, 1993. and Ih currents
from Liu et al. 2014
[2] Sodium channel from Rothman, Young and Manis, J Neurophysiol. 1993 Dec;70(6):2562-83.
[3] Ih Currents from Liu, Manis, Davis, J Assoc Res Otolaryngol. 2014 Aug;15(4):585-99.
doi: 10.1007/s10162-014-0446-z. Epub 2014 Feb 21.
Age "P10" (cultured SGC cells), Temperature=22C.
Units are nS.
""")
add_table_data('MSO_principal_channels', row_key='field', col_key='model_type',
species='guineapig', data=u"""
This table describes the ion channel densities
for a putative MSO principal neuron based on the original Rothman Manis 2003 model for bushy cells.
-----------------------------------------------------------------------------------------------------------------------------------
MSO-principal
MSO_name Principal
na_gbar 1000. [1]
soma_kht_gbar 150.0 [1]
soma_klt_gbar 200.0 [1]
soma_ka_gbar 0.0 [1]
soma_ih_gbar 20.0 [1]
soma_leak_gbar 2.0 [1]
soma_leak_erev -65 [1]
soma_na_type nacn [1]
soma_ih_type ihvcn [1]
soma_Cap 12.0 [1]
soma_e_k -84 [1]
soma_e_na 50. [1]
soma_ih_eh -43 [1]
-----------------------------------------------------------------------------------------------------------------------------------
[1] This MSO neuron model is basied on Rothman and Manis, 2003 bushy cell, type II
Age "adult", Temperature=22C
Units are nS.
""")
| true
|
125e68e4f06b2872d17a03bf0f42533f7cfb731c
|
Python
|
ljmulshine/cs263-final-project
|
/messageDecode.py
|
UTF-8
| 9,198
| 2.96875
| 3
|
[] |
no_license
|
import sys
import os
import bot_config as config
import subprocess
from skimage import io # install skimage
##########################################################
# Decoding
##########################################################
########################################
#
# getNbits(S,N)
#
# Functionality: get N bits from the argument string
#
# arguents: string, number of bits to get from the beginning of the string
# return: the N first bits of string, S
#
########################################
def getNbits(S, N):
if not (N == 1 or N == 2 or N == 4 or N == 8 or N == 16):
print("Density must be a member of the set, S = {1, 2, 4, 8, 16}")
return []
return S[0:N]
########################################
#
# decode(p,N)
#
# Functionality: decode pixel p at bit density N
#
# arguents: pixel value (p) [0,255], and encode density N {1,2,4,8,16}
# return: zero padded decoded value
#
########################################
def decode(p, N):
d = str(bin(p % 2**N))[2:]
d = d.zfill(N)
return d
def bin_to_ascii(string):
length = len(string)
plaintext = ""
for i in range(0, length, 8):
binary = string[i:i+8]
code = int(binary, 2)
char = chr(code)
plaintext += char
return plaintext
########################################
#
# getIdentifier(m,identity)
#
# Functionality: Try to find the a magic string at the start of an image, using different
# encoding densities.
#
# arguents: 1D image (im) and binary identifier string (identity)
# return: Bool (good or bad message), encoding density, image with identifier removed
#
########################################
def getIdentifier(im, identity):
# get length of identifier string
idLen = len(identity)
# iterate through pixels in image, decoding at different bit densities
# until identifier string is found
for N in [1,2,4,8,16]:
for i in range(0,int(idLen / N)):
# identifier string indicator
match = False
# check if next set of N bits match with identifier string
if not(decode(im[i][0],N) == getNbits(identity[N*i:(N*i+N)],N)):
match = False
break
else:
match = True
# if last check succeeded, the identifier was found
if (match):
return (True, N, im[(i+1):])
# if no identifier was found, return false
return (False,0,'')
########################################
#
# getSignature(im,encodeDensity,sigLen)
#
# Functionality: extract signature from image message
#
# arguents: 1D image (im),encode density used to encode the message (encodeDensity),
# and the signature length in bytes (sigLen)
# return: signature (base 2) and image message with signature removed
#
########################################
def getSignature(im, encodeDensity, sigLen):
# determine how many pixels are needed to encode signature
sigLenBin = sigLen*8
sigLenNumPixels = int(sigLenBin / encodeDensity)
# extract signature from image
sig = ''
for i in range(0,sigLenNumPixels):
sig = sig + decode(im[i][0],encodeDensity)
return (sig,im[sigLenNumPixels:])
########################################
#
# getPayloadLen(im,encodeDensity,pLenID)
#
# Functionality: extract payload length from image message
#
# arguents: 1D image (im) and encode density used to encode message in image (encodeDensity),
# and the number of bytes used to encode the payload length (pLenID)
# return: payload length (base 10), and the message with payload length pixels removed
#
########################################
def getPayloadLen(im, encodeDensity, pLenID):
# determine how many pixels are needed to encode payload length
payloadLenBin = pLenID*8
payloadLenNumPixels = int(payloadLenBin / encodeDensity)
# extract payload length (base 2)
length = ''
for i in range(0,payloadLenNumPixels):
length = length + decode(im[i][0],encodeDensity)
# return payload length in bytes (base 10)
return (int(length, 2), im[payloadLenNumPixels:])
########################################
#
# getPayload(im,encodeDensity,payloadLength)
#
# Functionality: extract payload from image message
#
# arguents: 1D image (im) and encode density used to encode message in image (encodeDensity),
# and the payload length in bytes, base 10 (payloadLength)
# return: binary representation of payload
#
########################################
def getPayload(im,encodeDensity,payloadLength):
# determine how many pixels are needed to hold payload
payloadLenBin = payloadLength*8
payloadLenNumPixels = int(payloadLenBin / encodeDensity)
payload = ''
for i in range(0,payloadLenNumPixels):
payload = payload + decode(im[i][0],encodeDensity)
return payload
def bot(imMessage):
# get image message dimensions
H = imMessage.shape[0]
W = imMessage.shape[1]
numpixels = H*W
# reshape image with message encoded in pixels
messageIm = imMessage.reshape(numpixels*4,1)
# indentifier key - ensure that the identifier key size >= (encodeDensity / 8) bytes
identifier = config.identifier
binkey = [(bin(ord(identifier[i]))[2:]).zfill(8) for i in range(0,len(identifier))]
binIdentifier = "".join(binkey)
# signature specifications - ensure that signature size is >= (encodeDensity / 8) bytes
sigLen = 256 # update this with actual signature length in bytes
# number of bytes used to represent the payload length
pLenID = 4 # update this with actual payload identifier length in bytes
# check for identifier, return encoding precision
[validMessage, encodeDensity, message] = getIdentifier(messageIm, binIdentifier)
#print("Encode Density: ", encodeDensity)
# get signature
if (validMessage):
#print("Valid Message: Getting signature...")
[signature, message] = getSignature(message, encodeDensity, sigLen)
#print("WE MUST VERIFY THIS SIGNATURE")
else:
print("Invalid Message...")
sys.exit(1)
# get payload length
[payloadLength, message] = getPayloadLen(message, encodeDensity, pLenID)
#print("Payload Length: ", payloadLength, " bytes")
# total number of pixels used
numPixels = int(8 * payloadLength / encodeDensity / 4)
# get payload
payload = getPayload(message,encodeDensity,payloadLength)
#print("Signature: ", signature)
ascii_sig = bin_to_ascii(signature)
ascii_message = bin_to_ascii(payload)
with open(config.tmp_file, "w") as f:
f.write(payload)
f.closed
with open(config.tmp_sig, "w") as f:
f.write(ascii_sig)
f.closed
rsa_verify_cmd = config.verify_cmd.format(config.public_key,
config.tmp_sig, config.tmp_file)
verify_proc = subprocess.Popen(rsa_verify_cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
verification, err = verify_proc.communicate()
if err:
print "error verifying data: {}".format(err)
sys.exit(1)
if "Verified OK" not in verification:
print "failure to verify data with public key"
sys.exit(1)
try:
os.remove(config.tmp_file)
os.remove(config.tmp_sig)
except:
print "error: failed to remove temporary files"
with open(config.command_file, "w") as f:
f.write(ascii_message)
f.closed
proc = subprocess.Popen(["python", config.command_file],
stdout = subprocess.PIPE, stderr = subprocess.PIPE)
out, err = proc.communicate()
if out:
print out
if err:
print err
try:
os.remove(config.command_file)
except:
print "error: failed to remove command file"
return (payload, numPixels)
def do_analysis():
################################
#
# ANALYSIS
#
################################
im = io.imread('transparentImage.png')
# reshape original and encoded image into 1D array
H = im.shape[0]
W = im.shape[1]
np = H*W
im1D = im.reshape(np*4,1)
im1D_encoded = imMessage.reshape(np*4,1)
meanSquaredError = sum([((int(im1D_encoded[i][0]) - int(im1D[i][0]))**2)**(1/2.0) for i in range(0,np)]) / numPixels
print "\n******************************************************"
print "* Statistics:\n*"
print "* Number of Pixels used to encode data: ", numPixels, "\n*"
print "* Percentage of image pixels used: %1.3f" % (100.0 * numPixels / float(np)), "% \n*"
print "* Mean Squared Error of altered Pixels: %1.3f" % meanSquaredError, "\n*"
print "******************************************************"
if __name__ == "__main__":
try:
imageFile = sys.argv[1]
except:
imageFile = "encodedImage.png"
imMessage = io.imread(imageFile)
[payload, numPixels] = bot(imMessage)
#f = open('test.txt','w')
#f.write(payload)
#do_analysis()
| true
|
14e96c5567f3f6418b29fe464be2f932306b3c8f
|
Python
|
vtranduc/Android-customizable-puzzle-game
|
/menu.py
|
UTF-8
| 8,754
| 2.53125
| 3
|
[] |
no_license
|
import kivy
kivy.require('1.7.2')
from kivy.uix.widget import Widget
from cropImage import CropImage, cropFit, ratioFit, centering_widget
from kivy.core.window import Window
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.popup import Popup
from kivy.uix.label import Label
from kivy.storage.jsonstore import JsonStore
from kivy.uix.scrollview import ScrollView
from artInfo import ArtInfo
from kivy.clock import Clock
class Menu(Widget):
start_button_str='./imageAssets/gmbuttonstart.png'
score_button_str='./imageAssets/gmbuttonscore.png'
gallery_button_str='./imageAssets/gmbuttonsgallery.png'
configure_button_str='./imageAssets/gmbuttonsconfigure.png'
exit_button_str='./imageAssets/gmbuttonsEXIT.png'
#Put the strings of buttons into the list, so we can use for loop later
button_paths=[start_button_str,score_button_str,gallery_button_str,
configure_button_str,exit_button_str]
top_edge=0.5 #Manual
left_edge=0.3 #Manual
background='./imageAssets/bg.png'
#Background image's position will be computed to center
#customize_button_str='./imageAssets/CUSTOMIZE_BUTTON.png'
#Here, we define the horizontal borders for buttons, from the top
horizontal_borders=[float(Window.height)*(5./5.),float(Window.height)*(4./5.),
float(Window.height)*(3./5.),float(Window.height)*(2./5.),float(Window.height)*(1./5.),0.]
#The vertical borders are there to limit the size of buttons. BUTTONS ARE ON THE LEFT HAND SIDE OF THESE BORDERS
vertical_borders=[float(Window.width)*(1./2.),float(Window.width)*(1./2.),float(Window.width)*(1./2.),
float(Window.width)*(1./2.),float(Window.width)*(1./2.)]
loading_points=[0,5,15,20,70,75,80,85,90,95,100];
loading_bar_interval=0.001
loading_increment=2.5
def __init__(self,**kwargs):
super(Menu,self).__init__(**kwargs)
bg=cropFit(self.background,Window.width,Window.height,0,0)
self.add_widget(bg)
self.loading_bar()
def create_menu_buttons(self):
self.button_widget_list=[]
for index in range(0,len(self.button_paths)):
button=ratioFit(self.button_paths[index],self.vertical_borders[index],
self.horizontal_borders[index]-self.horizontal_borders[index+1])
centering_widget(button,0,self.horizontal_borders[index+1],self.vertical_borders[index],
self.horizontal_borders[index]-self.horizontal_borders[index+1])
self.button_widget_list.append(button)
#self.add_widget(button)
def display_buttons(self):
for button in self.button_widget_list:
self.add_widget(button)
def create_pop_up(self):
#THESE WILL BE WORKED ON IN THE FUTURE VERSION
self.unavailable_feature_popUp=self.unvailable_feature_popUp_creator()
#This is fine
self.exit_popUp=self.are_you_sure_exit()
def loading_point(self):
self.loading_point_index+=1
return self.loading_points[self.loading_point_index]
def update_progress(self,next_stat,next_action,dt):
current_stat=self.pb.value
if current_stat>=next_stat or next_stat>self.pb.max:
raise Exception('You have to specify next_stat between current_stat and max possible value')
Clock.schedule_interval(self.update_progress_accessory,self.loading_bar_interval)
self.next_stat=next_stat
self.next_function=next_action
def update_progress_accessory(self,dt):
#To be accessed by update_progress method in the same class only
self.pb.value+=self.loading_increment
if self.pb.value<self.next_stat:
return
elif self.pb.value>self.next_stat:
self.pb.value=self.next_stat
Clock.unschedule(self.update_progress_accessory)
Clock.schedule_once(self.next_function,0)
def clear_init_instances(self):
del self.pb
del self.loading_point_index
del self.next_stat
del self.next_function
del self.loading_bar
def loading_bar(self):
from kivy.uix.progressbar import ProgressBar
self.loading_bar=Popup()
self.loading_bar.auto_dismiss=False
self.loading_bar.title='Initializing'
self.loading_bar.size_hint=(0.8,0.2)
self.pb=ProgressBar()
self.pb.max=self.loading_points[-1]
self.loading_bar.add_widget(self.pb)
self.loading_point_index=0
self.pb.value=self.loading_points[0]
def unvailable_feature_popUp_creator(self):
#I WILL MAKE THE FEATURES SIMPLE UNAVAILABLE FOR NOW
myBox=BoxLayout()
myBox.orientation='vertical'
myLabel=Label()
myLabel.text='This feature will be\navailable in future version'
button=Button()
button.text='Dismiss'
button.size_hint_y=0.3
myBox.add_widget(myLabel)
myBox.add_widget(button)
popUp=Popup()
popUp.title='Feature not yet available'
popUp.content=myBox
popUp.size_hint=(0.8,0.5)
button.bind(on_release=popUp.dismiss)
return popUp
def are_you_sure_exit(self):
myBox=BoxLayout()
myBox.orientation='vertical'
myLabel=Label()
myLabel.text='Are you sure\nyou wanna exit the game?'
button=Button()
button.text='No'
button.size_hint_y=0.2
button1=Button()
button1.text='Yes'
button1.size_hint_y=0.2
bar=Widget()
bar.size_hint_y=0.02
myBox.add_widget(myLabel)
myBox.add_widget(button1)
myBox.add_widget(bar)
myBox.add_widget(button)
popUp=Popup()
popUp.title='Exiting the game'
popUp.content=myBox
popUp.size_hint=(0.8,0.8)
button.bind(on_release=popUp.dismiss)
button1.bind(on_release=self.exit_application)
return popUp
def exit_application(self,instance):
from kivy.app import App
App.get_running_app().stop()
def achievement_update(self):
if JsonStore('highest_score.json').exists('highest_score0'):
score=JsonStore('highest_score.json').get('highest_score0')['highest_score']
else:
score=0
achievement='Stage 1: '+str(score)
if score>=ArtInfo.max_score[0] and ArtInfo.max_score[0]!=None:
achievement=achievement+' (Max reached)'
for index in range(1,len(ArtInfo.fanpage_url)):
what_to_get='highest_score'+str(index)
if JsonStore('highest_score.json').exists(what_to_get):
score=JsonStore('highest_score.json').get(what_to_get)['highest_score']
achievement=achievement+'\nStage '+str(index+1)+': '+str(score)
if score>=ArtInfo.max_score[index] and ArtInfo.max_score[index]!=None:
achievement=achievement+' (Max reached)'
else:
#THIS SHOULD BE UNNECESSARY. IT IS HERE FOR SAFETY REASON
if not JsonStore('levelCacche.json').exists('level'):
JsonStore('levelCacche.json').put('level',level=0)
myLevel=JsonStore('levelCacche.json').get('level')['level']
if myLevel<index:
for index2 in range(index,len(ArtInfo.fanpage_url)):
achievement=achievement+'\nStage '+str(index2+1)+': Locked'
return achievement
else: #THIS SHOULD NEVER BE CALLED IN THE APP
achievement=achievement+'\nStage '+str(index+1)+': 0'
return achievement
def achievement_popUp(self):
#MUST BE CALLED AFTER CALLING self.achievement_update() above
myBox=BoxLayout()
myBox.orientation='vertical'
myLabel=Label()
myLabel.text=self.achievement_update()
myLabel.size_hint_y=len(ArtInfo.fanpage_url)*0.07
myScroll=ScrollView()
myScroll.add_widget(myLabel)
button=Button()
button.text='Dismiss'
button.size_hint_y=0.15
myBox.add_widget(myScroll)
myBox.add_widget(button)
popUp=Popup()
popUp.title='Highest scores'
popUp.content=myBox
popUp.size_hint=(0.9,0.8)
button.bind(on_release=popUp.dismiss)
popUp.open()
| true
|
2028d34f1164f7dd35380537427155fdfc8329bb
|
Python
|
Zero-Grav/art-apex
|
/apex/src/lib/adafruit_bno08x/i2c.py
|
UTF-8
| 4,238
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
# SPDX-FileCopyrightText: Copyright (c) 2020 Bryan Siepert for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
Subclass of `adafruit_bno08x.BNO08X` to use I2C
"""
from struct import pack_into
import adafruit_bus_device.i2c_device as i2c_device
from . import BNO08X, DATA_BUFFER_SIZE, const, Packet, PacketError
_BNO08X_DEFAULT_ADDRESS = const(0x4A)
class BNO08X_I2C(BNO08X):
"""Library for the BNO08x IMUs from Hillcrest Laboratories
:param ~busio.I2C i2c_bus: The I2C bus the BNO08x is connected to.
"""
def __init__(
self, i2c_bus, reset=None, address=_BNO08X_DEFAULT_ADDRESS, debug=False
):
self.bus_device_obj = i2c_device.I2CDevice(i2c_bus, address)
super().__init__(reset, debug)
def _send_packet(self, channel, data):
data_length = len(data)
write_length = data_length + 4
pack_into("<H", self._data_buffer, 0, write_length)
self._data_buffer[2] = channel
self._data_buffer[3] = self._sequence_number[channel]
for idx, send_byte in enumerate(data):
self._data_buffer[4 + idx] = send_byte
packet = Packet(self._data_buffer)
self._dbg("Sending packet:")
self._dbg(packet)
with self.bus_device_obj as i2c:
i2c.write(self._data_buffer, end=write_length)
self._sequence_number[channel] = (self._sequence_number[channel] + 1) % 256
return self._sequence_number[channel]
# returns true if available data was read
# the sensor will always tell us how much there is, so no need to track it ourselves
def _read_header(self):
"""Reads the first 4 bytes available as a header"""
with self.bus_device_obj as i2c:
i2c.readinto(self._data_buffer, end=4) # this is expecting a header
packet_header = Packet.header_from_buffer(self._data_buffer)
self._dbg(packet_header)
return packet_header
def _read_packet(self):
with self.bus_device_obj as i2c:
i2c.readinto(self._data_buffer, end=4) # this is expecting a header?
self._dbg("")
# print("SHTP READ packet header: ", [hex(x) for x in self._data_buffer[0:4]])
header = Packet.header_from_buffer(self._data_buffer)
packet_byte_count = header.packet_byte_count
channel_number = header.channel_number
sequence_number = header.sequence_number
self._sequence_number[channel_number] = sequence_number
if packet_byte_count == 0:
self._dbg("SKIPPING NO PACKETS AVAILABLE IN i2c._read_packet")
raise PacketError("No packet available")
packet_byte_count -= 4
self._dbg(
"channel",
channel_number,
"has",
packet_byte_count,
"bytes available to read",
)
self._read(packet_byte_count)
new_packet = Packet(self._data_buffer)
if self._debug:
print(new_packet)
self._update_sequence_number(new_packet)
return new_packet
# returns true if all requested data was read
def _read(self, requested_read_length):
self._dbg("trying to read", requested_read_length, "bytes")
# +4 for the header
total_read_length = requested_read_length + 4
if total_read_length > DATA_BUFFER_SIZE:
self._data_buffer = bytearray(total_read_length)
self._dbg(
"!!!!!!!!!!!! ALLOCATION: increased _data_buffer to bytearray(%d) !!!!!!!!!!!!! "
% total_read_length
)
with self.bus_device_obj as i2c:
i2c.readinto(self._data_buffer, end=total_read_length)
@property
def _data_ready(self):
header = self._read_header()
if header.channel_number > 5:
self._dbg("channel number out of range:", header.channel_number)
if header.packet_byte_count == 0x7FFF:
print("Byte count is 0x7FFF/0xFFFF; Error?")
if header.sequence_number == 0xFF:
print("Sequence number is 0xFF; Error?")
ready = False
else:
ready = header.data_length > 0
# self._dbg("\tdata ready", ready)
return ready
| true
|
888b406a2aada1ceeeb311bc30123fe4ec4f5112
|
Python
|
moongchi98/MLP
|
/백준/JAN_FEB/1316_그룹단어체커.py
|
UTF-8
| 268
| 3.328125
| 3
|
[] |
no_license
|
T =int(input())
cnt = 0
for _ in range(T):
stack = []
word = input()
for alpa in word:
if alpa not in stack:
stack.append(alpa)
else:
if stack[-1] != alpa:
break
else:
cnt += 1
print(cnt)
| true
|
d0a53763935f70585804bb18aa637b1675d696b6
|
Python
|
m-barneto/ArcadeBot
|
/Bot/Filler/tile.py
|
UTF-8
| 111
| 2.921875
| 3
|
[] |
no_license
|
class Tile:
def __init__(self, color: int, team: int):
self.color = color
self.team = team
| true
|
dee7acaca8dfc16045615524ca4974da01d2cd7f
|
Python
|
SkittlePox/Direct-Compositional-Parser
|
/LexicalStructures/Syntax.py
|
UTF-8
| 1,733
| 2.984375
| 3
|
[] |
no_license
|
import enum
from functools import reduce
VERBOSE = True
class SyntacticPrimitive(enum.Enum):
def __str__(self):
return str(self.value)
S = "S"
NP = "NP"
PP = "PP"
N = "N"
CP = "CP"
class SyntacticFeature(enum.Enum):
def __str__(self):
return self.value
A = "A"
V = "V"
TO = "TO"
OF = "OF"
GEN = "GEN"
class SyntacticSlash(enum.Enum):
def __str__(self):
if VERBOSE:
return self.value
else:
return ""
L = "˻"
R = "ʳ"
class SyntacticCategory:
def __init__(self, lhs=None, rhs=None, slash=None, features=None):
self.lhs = lhs
self.rhs = rhs # This is possibly None
self.features = features
self.slash = slash
if slash == None and self.rhs is not None: # Word order rules
if self.lhs.lhs == SyntacticPrimitive.S or self.lhs == self.rhs:
self.slash = SyntacticSlash.L
else:
self.slash = SyntacticSlash.R
def optional_features(self):
if self.features is None:
return ""
else:
return "[" + reduce(lambda a, b: f"{a}][{b}", self.features) + "]"
def possible_primitive(self):
if self.rhs is None:
return f"{str(self.lhs)}"
else:
return f"({str(self.lhs)}/{str(self.slash)}{str(self.rhs)})"
def __str__(self):
return f"{self.possible_primitive()}{self.optional_features()}"
def __eq__(self, other):
return isinstance(other, SyntacticCategory) \
and self.lhs == other.lhs and self.rhs == other.rhs \
and self.features == other.features and self.slash == other.slash
| true
|
ccb31698744764ccda62e9475c129483a4b3170c
|
Python
|
xuanxuan03021/ml_implementationfrom_strach
|
/coursework1/data/data_explore.py
|
UTF-8
| 3,303
| 3.359375
| 3
|
[] |
no_license
|
import numpy as np
# read dataset
#train_full
x_full=np.loadtxt("train_full.txt", delimiter=',',dtype= str)
x_sub=np.loadtxt("train_sub.txt", delimiter=',',dtype= str)
x_noisy=np.loadtxt("train_noisy.txt", delimiter=',',dtype= str)
# Your function/class method should return:
# 1. a NumPy array of shape (N,K) representing N training instances of K attributes;
# 2. a NumPy array of shape (N, ) containing the class label for each N instance. The class label
# should be a string representing the character, e.g. "A", "E".
#How many samples/instances are there?
print(x_full.shape)
print(x_sub.shape)
print(x_noisy.shape)
#split dataset
y_full=x_full[:,16]
y_sub=x_sub[:,16]
y_noisy=x_noisy[:,16]
x_full=x_full[:,0:16].astype(int)
x_sub=x_sub[:,0:16].astype(int)
x_noisy=x_noisy[:,0:16].astype(int)
#How many unique class labels (characters to be recognised) are there?
#they all have O,C,Q,G,A,E
y_full_class=set(y_full)
y_sub_class=set(y_sub)
y_noise_class= set(y_noisy)
print(y_full_class)
print(y_sub_class)
print(y_noise_class)
#What is the distribution across the classes (e.g. 40% ‘A’s, 20% ‘C’s)?
print("+++++++++++++train_full class distribution+++++++++++++++")
y_full_A=len(y_full[y_full=='A'])/len(y_full)
y_full_C=len(y_full[y_full=='C'])/len(y_full)
y_full_G=len(y_full[y_full=='G'])/len(y_full)
y_full_E=len(y_full[y_full=='E'])/len(y_full)
y_full_O=len(y_full[y_full=='O'])/len(y_full)
y_full_Q=len(y_full[y_full=='Q'])/len(y_full)
print(y_full_A)
print(y_full_C)
print(y_full_G)
print(y_full_E)
print(y_full_O)
print(y_full_Q)
print("+++++++++++++train_sub class distribution+++++++++++++++")
y_sub_A=len(y_sub[y_sub=='A'])/len(y_sub)
y_sub_C=len(y_sub[y_sub=='C'])/len(y_sub)
y_sub_G=len(y_sub[y_sub=='G'])/len(y_sub)
y_sub_E=len(y_sub[y_sub=='E'])/len(y_sub)
y_sub_O=len(y_sub[y_sub=='O'])/len(y_sub)
y_sub_Q=len(y_sub[y_sub=='Q'])/len(y_sub)
print(y_sub_A)
print(y_sub_C)
print(y_sub_G)
print(y_sub_E)
print(y_sub_O)
print(y_sub_Q)
print("+++++++++++++train_noisy class distribution+++++++++++++++")
y_noisy_A=len(y_noisy[y_noisy=='A'])/len(y_noisy)
y_noisy_C=len(y_noisy[y_noisy=='C'])/len(y_noisy)
y_noisy_G=len(y_noisy[y_noisy=='G'])/len(y_noisy)
y_noisy_E=len(y_noisy[y_noisy=='E'])/len(y_noisy)
y_noisy_O=len(y_noisy[y_noisy=='O'])/len(y_noisy)
y_noisy_Q=len(y_noisy[y_noisy=='Q'])/len(y_noisy)
print(y_noisy_A)
print(y_noisy_C)
print(y_noisy_G)
print(y_noisy_E)
print(y_noisy_O)
print(y_noisy_Q)
#Are the samples balanced across all the classes, or are they biased towards one or two classes?
#in train_full and train_noisy the classes are banlanced, all classes 17%, but in train_sub they are not balanced, cs are two times than it should be
#G and Q have little porportion, E have a slightly higher porportion
#mean and range of the attribute of the train_full and train_sub is almost the same, with a slighly change
attribute_full_range=x_full.max(axis=0)-x_full.min(axis=0)
print("Attributes range in train_full",attribute_full_range)
attribute_sub_range=x_sub.max(axis=0)-x_sub.min(axis=0)
print("Attributes range in train_sub",attribute_sub_range)
attribute_full_mean=x_full.mean(axis=0)
print("Attributes mean in train_full",attribute_full_mean)
attribute_sub_mean=x_sub.mean(axis=0)
print("Attributes mean in train_sub",attribute_sub_mean)
| true
|
7bb3e12ed54f98f86b4e31ddb6e2cb05ba43d95e
|
Python
|
TheAlgorithms/Python
|
/project_euler/problem_030/sol1.py
|
UTF-8
| 1,187
| 4.375
| 4
|
[
"MIT",
"CC-BY-NC-4.0",
"CC-BY-NC-SA-4.0"
] |
permissive
|
""" Problem Statement (Digit Fifth Powers): https://projecteuler.net/problem=30
Surprisingly there are only three numbers that can be written as the sum of fourth
powers of their digits:
1634 = 1^4 + 6^4 + 3^4 + 4^4
8208 = 8^4 + 2^4 + 0^4 + 8^4
9474 = 9^4 + 4^4 + 7^4 + 4^4
As 1 = 1^4 is not a sum it is not included.
The sum of these numbers is 1634 + 8208 + 9474 = 19316.
Find the sum of all the numbers that can be written as the sum of fifth powers of their
digits.
9^5 = 59049
59049 * 7 = 413343 (which is only 6 digit number)
So, numbers greater than 999999 are rejected
and also 59049 * 3 = 177147 (which exceeds the criteria of number being 3 digit)
So, number > 999
and hence a number between 1000 and 1000000
"""
DIGITS_FIFTH_POWER = {str(digit): digit**5 for digit in range(10)}
def digits_fifth_powers_sum(number: int) -> int:
"""
>>> digits_fifth_powers_sum(1234)
1300
"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(number))
def solution() -> int:
return sum(
number
for number in range(1000, 1000000)
if number == digits_fifth_powers_sum(number)
)
if __name__ == "__main__":
print(solution())
| true
|
06db442da07f1cd0b54d5f33624bc8784f3e99bd
|
Python
|
JacobIRR/rovers
|
/mars_rovers.py
|
UTF-8
| 11,094
| 3.5
| 4
|
[] |
no_license
|
#! /usr/bin/python
import sys
class CollisionError(Exception):
"Raise this when a non-self_preserve rover collides with another"
pass
class OutOfBoundsError(Exception):
"Raise this when a non-self_preserve rover runs off the edge"
pass
class CrossedOwnPathException(Exception):
"Raise when we cross our own path"
pass
class Rover(object):
"""
One of potentially many rovers on the plateau that can move and turn
"""
def __init__(self, x, y, facing, moves, self_preserve=False):
self.x = x
self.y = y
self.facing = facing
self.moves = moves
self.self_preserve = self_preserve
# Rotation / movement lookup tools
self.clockwise = {'N': 'E', 'E': 'S', 'S': 'W', 'W': 'N'}
self.counter_clockwise = {'N': 'W', 'W': 'S', 'S': 'E', 'E': 'N'}
self.movement_map = {'N': [0, 1], 'E': [1, 0], 'W': [-1, 0], 'S': [0, -1]}
self.past_moves = set()
def __str__(self):
"""
Match the expected output of the specification, e.g. "1 1 N"
"""
return ' '.join([str(self.x), str(self.y), self.facing])
def rotate(self, direction):
"""
Take an incoming Left / Right direction and based on that direction,
assign `facing` attribute based on lookup of corresponding clockwise
or counter_clockwise hashtable. In this hashtable, the key is
the current direction and the value returned is the resulting direction.
"""
if direction == 'L':
self.facing = self.counter_clockwise[self.facing]
if direction == 'R':
self.facing = self.clockwise[self.facing]
# return for use with unit tests
return self.facing
def advance(self, no_go_zones, width, height):
"""
Based on current position and facing, move forward one point on the plateau.
The `movement_map` defines which directions result in which X, Y change.
Depending on self_preserve status, we may raise and exception when
running off the edge or colliding with another rover.
"""
x_change, y_change = self.movement_map[self.facing]
test_position = (self.x + x_change, self.y + y_change)
# Test for collisions
if test_position in no_go_zones:
if self.self_preserve:
print "A rover almost bumped into another rover! Skipping this move..."
return None
else:
raise CollisionError("A rover ran into another rover! MISSION FAILED")
# Test for out of bounds error
elif any([test_position[0] > width, test_position[0] < 0,
test_position[1] > height, test_position[1] < 0]):
if self.self_preserve:
print "A rover almost rolled off the plateau! Skipping this move..."
return None
else:
raise OutOfBoundsError("A rover drove off the edge of the plateau! MISSION FAILED")
# Throw exception if we cross our own path
if test_position in self.past_moves:
raise CrossedOwnPathException
# Advance forward if both tests passed
else:
self.x, self.y = test_position
self.past_moves.add(test_position)
# return for use with unit tests
return self.x, self.y
class PlateauEnvironment(object):
"""
This class acts as a factory to create the plateau and rovers,
and handles returning the result of the rover motions back to the caller
Before initializing the Plateau and Rovers, confirm that args match the spec
"""
def __init__(self):
# Empty init until args are validated
self.rovers = []
self.width = None
self.height = None
def get_rover_params(self, rover_args, self_preserve):
"""
Parse and validate command line input for rover paramters
The X and Y coordinates of the rover are 0-indexed.
"""
# First, make sure we have a cooresponding moves line for every x,y,facing line
try:
assert rover_args and len(rover_args) % 2 == 0
except AssertionError:
lines = '\n'.join(rover_args)
raise ValueError("Not enough lines to create rovers: \n %s " % lines)
# Now loop over args and validate each param
rover_params = []
claimed_ground = set()
for ndx, i in enumerate(rover_args):
if ndx % 2 == 0:
try:
# odd rows (even index) has rover position and facing
x, y, facing = list(i.replace(' ', ''))
x = int(x)
y = int(y)
claimed_ground.add((x, y))
assert facing.upper() in ('N', 'E', 'W', 'S')
assert x >= 0
assert y >= 0
assert x <= self.width
assert y <= self.height
except:
msg = 'Cannot construct rover from : %r \n'
msg += 'Use two positive integers between 0 and plateau size '
msg += 'and a Direction (N, S, E, W).'
raise ValueError(msg % i)
try:
# skip ahead to the next line to get the moves for this rover
moves = list(rover_args[ndx+1].replace(' ', '').upper())
assert set(moves).issubset(set(['L', 'R', 'M']))
except:
raise ValueError('This moves string is not valid : %r' % rover_args[ndx+1])
# and save this tuple to the list of attrs
rover_params.append((x, y, facing, moves, self_preserve))
else:
# skip even row (odd index)
continue
# One last check to make sure that no rovers ended up on top of others:
if len(claimed_ground) < len(rover_params):
raise ValueError("Rovers were placed on top of each other!")
return rover_params
def get_plateau_dims(self, line):
"""
Verify that we can construct the plateau from command line input
The width and height of the plateau must be at least 1 X 1
"""
try:
dims = [int(i) for i in filter(None, line.strip().split(' '))]
if len(dims) == 2:
width, height = dims
assert (width + height) > 1
return width, height
else:
raise Exception
except:
msg = "Bad input for plateau dimensions: %r."
msg += "Use two positive integers"
raise ValueError(msg % line)
def create_rover(self, x, y, facing, moves, self_preserve=False):
"""
Build and return a new Rover
Add this rover to self.rovers
"""
rover = Rover(x, y, facing, moves, self_preserve=self_preserve)
self.rovers.append(rover)
# return for use with unit tests
return rover
def create_plateau(self, width, height):
"""
Here we assign the attributes for this plateau, having validated them
as well as the rover parameters.
"""
self.width, self.height = width, height
# return for use with unit tests
return self
def run_rover_moves(self, rover):
"""
Digest the moves of a given rover
Reposition the rover in its final space
Raise errors if rover.self_preserve is not set
Skip "Illegal" moves if rover.self_preserve is set
"""
# Because this method is called once for each rover, `no_go_zones`
# reflects an accurate snapshot of current rover positions
no_go_zones = [(r.x, r.y) for r in self.rovers
if (r.x, r.y) != (rover.x, rover.y)]
# Execute each move
for step in rover.moves:
if step == 'M':
rover.advance(no_go_zones, self.width, self.height)
else:
# step is either L or R here
rover.rotate(step)
return rover.__str__()
def result(self, w, h):
"""
Run all rover moves
Return a new line for each rover's x/y/facing attributes
"""
final_state = '\n'.join([self.run_rover_moves(r) for r in self.rovers])
return final_state
# ##############################################################################
# SETUP FUNCTIONS / COMMAND LINE CLIENT
# ##############################################################################
def main():
"""
Main function for running direct tests from command line.
Ctrl + C exits
"""
# Don't expose code in tracebacks, just show the last line of the Exception
sys.tracebacklimit = 0
# Dispatch to setup prompts and break out with Control-C without error
try:
return set_up_environment()
except KeyboardInterrupt:
print
sys.exit("Exiting...")
def get_bool_answer(question):
"""
Ask a Yes/No Question and coerce to a bool value
"""
bool_map = {'Y': True,
'YES': True,
'N': False,
'NO': False}
var = raw_input(question)
while var.upper() not in bool_map.keys():
var = raw_input("Please type `Y` or `N`: ")
return bool_map[var.upper()]
def set_up_environment():
"""
Quickly dump in all args to create the environment
"""
print
print "Rovers are in danger of running into each other, or off the edge of the plateau..."
self_preserve = get_bool_answer("Will these rovers be \"self-preserving?\" ")
print
print "Now, let's set up the Plateau and Rovers."
print "Please enter the plateau size, and rover positions/movements, for example:"
print """
5 5 # plateau
1 2 N # position of rover 1
LMLMLMLMM # movements of rover 1
3 3 E # position of rover 2
MMRMMRMRRM # movements of rover 2
(Hit ENTER twice when finished)
"""
# Collect input lines for as many rovers as the user would like to create
lines = []
while True:
line = raw_input(">")
if line:
lines.append(line)
else:
break
# start up the factory/environment
plateau = PlateauEnvironment()
# Get plateau dimensions from the first line and create plateau
width, height = plateau.get_plateau_dims(lines[0])
plateau.create_plateau(width, height)
# Get args for rovers from the remaining lines and create rovers
rover_params = plateau.get_rover_params(lines[1:], self_preserve)
for rover in rover_params:
plateau.create_rover(*rover)
# Process all the moves and return the final state
result = plateau.result(width, height)
print result
# In case this gets consumed by an external service/test
return result
if __name__ == '__main__':
main()
| true
|
9bb8532cb4885173185bf788e5ecfc69ab1415bf
|
Python
|
saimahithanatakala/python-basics
|
/dates/1.todays date.py
|
UTF-8
| 137
| 2.96875
| 3
|
[] |
no_license
|
from datetime import date
today=date.today()
print("todays date is: ",today)
print("date is: ",today.day,"-",today.month,"-",today.year)
| true
|
128ec82de5336e4c08ecb1d35869940a59c337b6
|
Python
|
saikb92/rookie
|
/abc.py
|
UTF-8
| 330
| 3.25
| 3
|
[] |
no_license
|
def sum(a,b):
return a+b
def avg(a,b):
return sum(a,b)//2
a= float(input("Enter first number:"))
b= float(input("Enter second number:"))
c= input("enter the test")
print("Sum of the given two numbers is: ", sum(a,b))
print("Average of the given numbers is: ", avg(a,b))
print("Input c:", c1)
saikrishna
| true
|
38a25942208e36c5c4f496350b9f311bb7157946
|
Python
|
sydney0zq/LeetCode
|
/AAAAU_LeetCodeLTS/300_length_of_lis.py
|
UTF-8
| 837
| 3.21875
| 3
|
[] |
no_license
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2020 qiang.zhou <qiang.zhou@Macbook>
#
# Distributed under terms of the MIT license.
"""
https://leetcode-cn.com/problems/longest-increasing-subsequence/
"""
class Solution:
def lengthOfLIS(self, nums) -> int:
# init dp array
if len(nums) == 0: return 0
dp = [1] * len(nums)
for i in range(len(nums)):
n = nums[i]
all_larger_dp = []
for j in range(0, i):
#print ("j", j)
if n > nums[j]:
all_larger_dp.append(dp[j])
#print (all_larger_dp)
if len(all_larger_dp) != 0:
dp[i] += max(all_larger_dp)
return max(dp)
aa = [10,9,2,5,3,7,101,18]
#aa = [-2,-1]
print (Solution().lengthOfLIS(aa))
| true
|
0b6ae7deeb2e463392395b60b0f4cbb7461e2eba
|
Python
|
Ariyohalder2007/PythonAssingment2
|
/Q-3.py
|
UTF-8
| 225
| 2.796875
| 3
|
[] |
no_license
|
def file_read(fname):
my_array = []
with open(fname) as f:
for i in f:
my_array.append(i)
print(my_array)
file_read('D:/python assingment2/Q-2.txt')
| true
|
e8c77f39a4242843fb4fc0603ff9dec31a1025d0
|
Python
|
prymnumber/AoC
|
/AoC/day6.py
|
UTF-8
| 1,676
| 2.671875
| 3
|
[] |
no_license
|
import pdb
import sys
from common import *
l_file = '/Users/iposton/GitHub/PyPractice/AoC/'+str(sys.argv[1])
init = map(int,get_file(l_file).split('\t'))
class memory_bank:
blocks = 0
id = None
cycle = 0
MaxBlock = False
pattern = []
def __init__(self,blocks,id,cycle):
self.blocks = blocks
self.id = id
self.cycle = cycle
self.MaxBlock = False
self.pattern = []
def find_and_set_max_bank(banks):
max_bank = max(banks,key=lambda b: b.blocks)
max_bank.MaxBlock = True
return max_bank
def redistribute(max_bank, banks):
#pdb.set_trace()
banks = banks
max_block = max_bank.blocks
id = max_bank.id
banks[id].blocks = 0
for i in range(max_block):
if id == len(banks)-1:
id = 0
else:
id +=1
banks[id].blocks += 1
pattern = []
idx = None
for i in banks:
pattern.append(i.blocks)
if i.MaxBlock == True:
idx = i.id
banks[idx].pattern = pattern
return idx,banks
banks = []
i = 0
new_pattern = []
redist_cycles = 0
size_of_loop = 0
for bank in init:
banks.append(memory_bank(bank,i,0))
i+=1
while True:
#pdb.set_trace()
maxbank = find_and_set_max_bank(banks)
idx,banks = redistribute(maxbank,banks)
new_pattern.append(banks[idx].pattern)
redist_cycles +=1
if new_pattern.count(banks[idx].pattern) >1:
loop_start = new_pattern.index(banks[idx].pattern)
#print('loop_start:',loop_start)
size_of_loop = abs(redist_cycles-loop_start)-1
break
print('END Redist_cycle:',redist_cycles)
print('Size of loop:',size_of_loop)
| true
|
b047f196198f5af2735e5fc9def83aa13e70423e
|
Python
|
woodybury/raspi_flask
|
/main.py
|
UTF-8
| 1,732
| 2.671875
| 3
|
[
"MIT"
] |
permissive
|
from flask import Flask, render_template, redirect
from flask_basicauth import BasicAuth
import datetime
import env
# gate code imports
import time
import RPi.GPIO as GPIO
# set GPIO mode
GPIO.setmode(GPIO.BCM)
def openGate():
GPIO.setup(17,GPIO.OUT)
GPIO.output(17,GPIO.LOW)
time.sleep(1.45)
GPIO.output(17,GPIO.HIGH)
def closeGate():
GPIO.setup(17,GPIO.OUT)
GPIO.output(17,GPIO.LOW)
GPIO.setup(27,GPIO.OUT)
GPIO.output(27,GPIO.LOW)
time.sleep(1.45)
GPIO.output(17,GPIO.HIGH)
GPIO.output(27,GPIO.HIGH)
app = Flask(__name__)
app.config['BASIC_AUTH_USERNAME'] = env.config["auth"]["name"]
app.config['BASIC_AUTH_PASSWORD'] = env.config["auth"]["password"]
basic_auth = BasicAuth(app)
gate = 0
message = "The coop is shut!"
@app.route("/")
@basic_auth.required
def hello():
now = datetime.datetime.now()
timeString = now.strftime("%Y-%m-%d %H:%M")
templateData = {
'title' : 'HELLO BILL!',
'time': timeString,
'gate' : gate,
'message' : message
}
return render_template('main.html', **templateData)
@app.route("/gate/<int:gate_action>")
@basic_auth.required
def action(gate_action):
global gate, message
if gate_action == 1:
print (action, "open the gate")
# Gate open code here
openGate()
message = "The coop is open!"
gate = 1
elif gate_action == 0:
print (action, "close the gate")
# Gate close code here
closeGate()
message = "The coop is shut!"
gate = 0
else:
print ("hacker alert!")
message = "You can't do that!"
return redirect('/')
if __name__ == "__main__":
app.run(host=env.config["ip"], port=3000, debug=True)
GPIO.cleanup()
| true
|
de5c0a911849514d8023567a8e15d834fd3ee69e
|
Python
|
dianezhou96/egocentricgaze
|
/video_to_data.py
|
UTF-8
| 9,710
| 2.875
| 3
|
[] |
no_license
|
import cv2
import mmcv
import numpy as np
import pandas as pd
import random
import torch
from torch.utils.data import DataLoader, Dataset, IterableDataset
from torchvision import transforms
# multiple videos
class GazeFrameDataset(IterableDataset):
"""
An dataset to iterate through frames of video with gaze data
data_path: folder where the videos are located
videos_list: list of names of videos to include
transform: transform to apply to data
"""
def __init__(self, data_path, videos_list, transform=None, shuffle=False):
self.data_path = data_path
self.transform = transform
self.video_readers = [self.get_video_reader(video_name) for video_name in videos_list]
self.gaze_dfs = [self.get_gaze_positions(video_name) for video_name in videos_list]
self.video_gaze_tuples = []
for i, gaze_df in enumerate(self.gaze_dfs):
self.video_gaze_tuples.extend([(i, j) for j in range(len(gaze_df))])
if shuffle:
random.shuffle(self.video_gaze_tuples)
self.video_gaze_idx = 0
def __iter__(self):
return self
def __next__(self):
# Find next frame
# Sometimes last frames are empty so need to set new video
found_frame = False
while not found_frame:
if self.video_gaze_idx == len(self.video_gaze_tuples):
raise StopIteration
video_idx, gaze_idx = self.video_gaze_tuples[self.video_gaze_idx]
video = self.video_readers[video_idx]
gaze_df = self.gaze_dfs[video_idx]
frame_idx = gaze_df.loc[gaze_idx, 'world_index']
if frame_idx < len(video):
frame = video[frame_idx]
else:
frame = None
self.video_gaze_idx += 1
if frame is not None:
found_frame = True
# Extract gaze info for frame
frame = frame.transpose(1, 0, 2) # make shape H x W
gaze_position = (gaze_df.loc[gaze_idx, 'norm_pos_x'],
gaze_df.loc[gaze_idx, 'norm_pos_y'])
sample = (frame, gaze_position)
if self.transform:
sample = self.transform(sample)
return sample
def get_video_reader(self, video_name):
path = self.data_path + video_name + '/world_resized.mp4'
video = mmcv.VideoReader(path)
return video
def get_gaze_positions(self, video_name):
path = self.data_path + video_name + '/gaze_positions.csv'
df = pd.read_csv(path)
# Keep only one gaze position per frame
df_unique = df.groupby('world_index', group_keys=False).apply(lambda df: df.sample(1))
# Keep only relevant columns
df_gaze = df_unique[['world_index', 'norm_pos_x', 'norm_pos_y']].reset_index(drop=True)
return df_gaze
class SetSize(object):
"""
Transform object to set frame to desired size and create saliency map from gaze location
"""
def __init__(self, frame_size, map_size, gaussian_blur_size, class_size=None):
self.frame_size = frame_size
self.map_size = map_size
self.gaussian_blur_size = gaussian_blur_size
self.class_size = class_size
def __call__(self, sample):
frame, gaze_position = sample
# Resize frame
# resized_frame = cv2.resize(frame, dsize=self.frame_size)
resized_frame = frame
# Create target
gaze_norm_x, gaze_norm_y = gaze_position
get_abs_pos = lambda x, upper: int(max(0, min(x * upper, upper-1)))
# Saliency map with gaussian blur
if self.gaussian_blur_size:
height, width = self.map_size
gaze_y = get_abs_pos(gaze_norm_y, height)
gaze_x = get_abs_pos(gaze_norm_x, width)
target = np.zeros((height, width))
target[gaze_y, gaze_x] = 1
target = cv2.GaussianBlur(target, self.gaussian_blur_size, 0)
# Class label
elif self.class_size:
height, width = self.class_size
gaze_y = get_abs_pos(gaze_norm_y, height)
gaze_x = get_abs_pos(gaze_norm_x, width)
target = gaze_y * width + gaze_x
# Normalized coordinates
else:
target = gaze_position
return resized_frame, target
class ToTensor(object):
"""
Transform object to set frame and saliency map to tensor type
"""
def __call__(self, sample):
frame, target = sample
# numpy image: H x W x C
# torch image: C x H x W
frame = frame.transpose((2, 0, 1)) / 255 # 0 to 1 instead of 0 to 255
target = np.expand_dims(target, 0)
return (torch.from_numpy(frame).float(), torch.from_numpy(target).float())
class SetSizeShiftedGrids(object):
"""
Transform object to set frame to desired size and
create target classes from gaze location for shifted grids method
"""
def __init__(self, frame_size, N, class_size=None):
self.frame_size = frame_size
self.N = N
self.class_size = class_size
if self.N:
shift = 1 / (2 * N)
self.shifted_grids = [
(0, 0),
(-shift, 0),
(shift, 0),
(0, -shift),
(0, shift)
]
def __call__(self, sample):
frame, gaze_position = sample
# Resize frame
# resized_frame = cv2.resize(frame, dsize=self.frame_size)
resized_frame = frame
# Create target saliency map with shifted grids
gaze_norm_x, gaze_norm_y = gaze_position
if not self.N: # Only need the normalized positions
return resized_frame, [gaze_norm_y, gaze_norm_x]
targets = []
get_abs_pos = lambda x, upper: int(max(0, min(x * upper, upper-1)))
for i in range(len(self.shifted_grids)):
x_shift, y_shift = self.shifted_grids[i]
gaze_norm_x_shifted = gaze_norm_x + x_shift
gaze_norm_y_shifted = gaze_norm_y + y_shift
if not self.class_size:
gaze_y = get_abs_pos(gaze_norm_y_shifted, self.N)
gaze_x = get_abs_pos(gaze_norm_x_shifted, self.N)
target = gaze_y * self.N + gaze_x
else:
height, width = self.class_size
gaze_y = get_abs_pos(gaze_norm_y_shifted, height)
gaze_x = get_abs_pos(gaze_norm_x_shifted, width)
target = gaze_y * width + gaze_x
targets.append(target)
return resized_frame, targets
class ToTensorShiftedGrids(object):
"""
Transform object to set frame to tensor type
"""
def __call__(self, sample):
frame, targets = sample
# numpy image: H x W x C
# torch image: C x H x W
frame = frame.transpose((2, 0, 1)) / 255 # 0 to 1 instead of 0 to 255
frame = torch.from_numpy(frame).float()
return (frame, targets)
def make_transform(gaussian_blur_size=(3,3), class_size=None):
size_transform = SetSize((227,227), (13,13), gaussian_blur_size, class_size)
tensor_transform = ToTensor()
transform = transforms.Compose([size_transform, tensor_transform])
return transform
def make_transform_shifted_grids(N=5, class_size=None):
size_transform = SetSizeShiftedGrids((227,227), N, class_size)
tensor_transform = ToTensorShiftedGrids()
transform = transforms.Compose([size_transform, tensor_transform])
return transform
def get_videos_list_from_file(filename):
with open(filename, 'r') as f:
videos_list = f.read().split("\n")
if len(videos_list[-1]) == 0: # in case last line is empty
videos_list.pop()
return videos_list
if __name__ == '__main__':
# print("Dummy test...")
# frames = get_frames('world')
# print("Number of frames:", len(frames))
# gaze_positions = get_gaze_positions('gaze_positions')
# print(gaze_positions)
# size_transform = SetSize((227,227), (13,13), (3,3))
# tensor_transform = ToTensor()
# transform = transforms.Compose([size_transform, tensor_transform])
# dataset = GazeFrameDataset("./data/", "2020-03-15_19-27-56-f2472745", transform=transform)
# print(dataset[0][0].shape)
# print(dataset[0][1].shape)
# torch.utils.data.DataLoader(dataset)
# data_path = "./data/"
# videos_list = ["2020-03-15_19-27-56-f2472745", "2020-06-22_11-14-22-319eaf00",
# "2020-06-25_17-25-16_alexl_everyday-tyingshoelaces-189703d3"]
# dataset = GazeFrameDataset(data_path, videos_list, transform=transform)
# for i, sample in enumerate(dataset):
# if i % 100 == 99:
# print(i+1)
# torch.utils.data.DataLoader(dataset)
print("Shifted grids test...")
size_transform = SetSizeShiftedGrids((227,227), 5)
tensor_transform = ToTensorShiftedGrids()
transform = transforms.Compose([size_transform, tensor_transform])
# dataset = GazeFrameDataset("./data/", "2020-03-15_19-27-56-f2472745", transform=transform)
# print(dataset[0][0].shape)
# print(dataset[0][1].shape)
# torch.utils.data.DataLoader(dataset)
data_path = "./data/"
videos_list = ["2020-03-15_19-27-56-f2472745", "2020-06-22_11-14-22-319eaf00",
"2020-06-25_17-25-16-189703d3"]
dataset = GazeFrameDataset(data_path, videos_list, transform=transform, shuffle=True)
# for i, sample in enumerate(dataset):
# if i % 100 == 99:
# print(i+1)
dataloader = torch.utils.data.DataLoader(dataset)
for i, data in enumerate(dataloader, 0):
if i % 100 == 99:
print(i+1)
print("Done!")
| true
|
400adfffde85b29d60daaa695b22cf2153d898e9
|
Python
|
YuenFuiLau/Hand-Tracking-Project
|
/ConvNet/test_utils.py
|
UTF-8
| 3,942
| 3.3125
| 3
|
[] |
no_license
|
import numpy as np
def batchnorm_forward(x, gamma, beta, bn_param):
"""
Forward pass for batch normalization.
During training the sample mean and (uncorrected) sample variance are
computed from minibatch statistics and used to normalize the incoming data.
During training we also keep an exponentially decaying running mean of the
mean and variance of each feature, and these averages are used to normalize
data at test-time.
At each timestep we update the running averages for mean and variance using
an exponential decay based on the momentum parameter:
running_mean = momentum * running_mean + (1 - momentum) * sample_mean
running_var = momentum * running_var + (1 - momentum) * sample_var
Input:
- x: Data of shape (N, D)
- gamma: Scale parameter of shape (D,)
- beta: Shift paremeter of shape (D,)
- bn_param: Dictionary with the following keys:
- mode: 'train' or 'test'; required
- eps: Constant for numeric stability
- momentum: Constant for running mean / variance.
- running_mean: Array of shape (D,) giving running mean of features
- running_var Array of shape (D,) giving running variance of features
Returns a tuple of:
- out: of shape (N, D)
- cache: A tuple of values needed in the backward pass
"""
mode = bn_param['mode']
eps = bn_param.get('eps', 1e-5)
momentum = bn_param.get('momentum', 0.9)
N, D = x.shape
running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))
running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))
if mode == 'train':
sample_mean = x.mean(axis=0)
sample_var = x.var(axis=0)
running_mean = momentum * running_mean + (1 - momentum) * sample_mean
running_var = momentum * running_var + (1 - momentum) * sample_var
std = np.sqrt(sample_var + eps)
x_centered = x - sample_mean
x_norm = x_centered / std
out = gamma * x_norm + beta
cache = (x_norm, x_centered, std, gamma)
elif mode == 'test':
x_norm = (x - running_mean) / np.sqrt(running_var + eps)
out = gamma * x_norm + beta
cache = ()
else:
raise ValueError('Invalid forward batchnorm mode "%s"' % mode)
# Store the updated running means back into bn_param
bn_param['running_mean'] = running_mean
bn_param['running_var'] = running_var
return out, cache
def batchnorm_backward(dout, cache):
"""
Backward pass for batch normalization.
Use a computation graph for batch normalization and propagate gradients
backward through intermediate nodes.
Inputs:
- dout: Upstream derivatives, of shape (N, D)
- cache: Variable of intermediates from batchnorm_forward.
Returns a tuple of:
- dx: Gradient with respect to inputs x, of shape (N, D)
- dgamma: Gradient with respect to scale parameter gamma, of shape (D,)
- dbeta: Gradient with respect to shift parameter beta, of shape (D,)
"""
N = dout.shape[0]
x_norm, x_centered, std, gamma = cache
dgamma = (dout * x_norm).sum(axis=0)
dbeta = dout.sum(axis=0)
dx_norm = dout * gamma
dx_centered = dx_norm / std
dmean = -(dx_centered.sum(axis=0) + 2/N * x_centered.sum(axis=0))
dstd = (dx_norm * x_centered * -std**(-2)).sum(axis=0)
dvar = dstd / 2 / std
dx = dx_centered + (dmean + dvar * 2 * x_centered) / N
#print(f"dvar:{dvar} , dmean:{dmean}")
#print(f"dx_norm:{dx_norm}")
return dx, dgamma, dbeta
def fc_forward(X, W, b):
out = np.dot(W,X) + b
cache = (W, X)
return out, cache
def fc_backward(dout, cache):
W, h = cache
dW = np.dot(dout,h.T)
db = np.sum(dout, axis=1)
dX = np.dot(W.T,dout)
return dX, dW, db
| true
|
c67ae7caba441ac342857dc1e199494fda55f804
|
Python
|
SyGoing/ClassificationWork
|
/networks/mynet_new.py
|
UTF-8
| 1,372
| 2.546875
| 3
|
[] |
no_license
|
import torch.nn as nn
import torch
class Net(nn.Module):
def __init__(self):
super(Net,self).__init__()
self.net_Conv=nn.Sequential(
nn.Conv2d(3, 16, 3, stride=1),
nn.ReLU(),
nn.MaxPool2d(3, stride=2),
nn.Conv2d(16, 32, 3, stride=1),
nn.ReLU(),
nn.MaxPool2d(3, stride=2),
nn.Conv2d(32, 64, 3, stride=1),
nn.ReLU(),
nn.MaxPool2d(2, stride=2),
nn.Conv2d(64, 128, 3, stride=2),
nn.ReLU(),
)
self.net_Linear=nn.Sequential(
nn.Linear(128 * 6 * 6,128),
nn.Dropout(0.5),
nn.Linear(128, 2)
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
def forward(self,inputs):
x=self.net_Conv(inputs)
x=x.view(-1,self.num_flat_features(x))
x=self.net_Linear(x)
return x
def num_flat_features(self,x):
size=x.size()[1:]
num_features=1
for s in size:
num_features*=s
return num_features
| true
|
9dda60b227b6204235e6aeb93e715e3d525444c3
|
Python
|
TAUTIC/PartGrabber
|
/partgrabber.py
|
UTF-8
| 2,803
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python
# The MIT License (MIT)
#
# Copyright (c) 2014 Jayson Tautic
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from bs4 import BeautifulSoup
from lxml import html
from pprint import pprint
import requests
import sys
def getDetail(part):
print part[:7]
def getHyperlinks(html):
return [(a.text, a['href']) for a in html.findAll("a")]
def getDetails(barcode):
quantity = barcode[7:16]
page = requests.get('http://www.digikey.com/product-detail/en/0/0/' + barcode[:7])
part = dict() # Holds details related to the part
pricing = dict() # Holds pricing for the part
soup = BeautifulSoup(page.text) #open("test.html")
# Parse pricing data from html
table = soup.find("table",{"id":"pricing"})
for row in table.findAll("tr"):
tds = row.findAll(text=True)
if len(tds) > 1:
if tds[0] != "Price Break": pricing[tds[0]] = tds[1] #print tds[0], tds[1]
# Parse product details from html
part['ProductNumber'] = soup.find("meta",{"name":"WT.pn_sku"})['content']
part['MfgProductNumber'] = soup.find("meta",{"itemprop":"name"})['content']
# Grab the dynamic details table data
table = soup.find("table",{"class":"product-additional-info"})
for row in table.table.findAll("tr"):
part[row.th(text=True)[0]] = getHyperlinks(row.td) or row.td(text=True)
print '------------------------------------------------'
print 'Pricing data:'
pprint(pricing)
print '------------------------------------------------'
print 'Part data:'
pprint(part)
print '------------------------------------------------'
print 'Qty in bag: ' , quantity
while True:
code = raw_input('Please scan barcode, or press q to quit.\r\nPartGrabber> ')
if code == 'q': sys.exit()
elif len(code) > 7: getDetails(code)
else: getDetails("0607274000000010146541") # Used for testing..
| true
|
477b971725a7962f0d6741c6534ba139a9969061
|
Python
|
Bohyunnn/PythonBasic
|
/week2/03_for_while.py
|
UTF-8
| 1,017
| 4.0625
| 4
|
[] |
no_license
|
"""
"""
"""
반복문
for : 정해진 횟수 동안
while : ~ 조건이 유지되는 동안
"""
# 1에서 부터 10까지 출력을 하고 싶다.
number = 1
# 중복되는 부분을 반복문 안에서 처리하고
# 바뀌어야 하는 부분이 무엇인지 고민한다.
while number <= 10 :
print(number)
number = number + 1
"""
while [조건문] :
[실헹구문]
while True :
# 데이터를 입력 받는데 언제 끝낼지 모를 때
# 프로그램 전체를 반복할 떄
# 데이터 입력이 올바른 경우만 종료하고 싶을 때
"""
while True:
weight = input("체중을 입력해주세요 : ")
try:
weight = float(weight)
if weight > 0:
break
except:
print("입력값이 올바르지 않습니다.")
while True:
height = input("체중을 입력해주세요 : ")
try:
height = float(height)
if height > 0:
break
except:
print("입력값이 올바르지 않습니다.")
| true
|
d267a5b9c995b78713db136bb4e362a1629fa41d
|
Python
|
seyon123/slack-bot
|
/bot.py
|
UTF-8
| 3,772
| 2.859375
| 3
|
[] |
no_license
|
# For Slack
import slack
from slackeventsapi import SlackEventAdapter
# Store Keys
from dotenv import load_dotenv
import os
from pathlib import Path
from datetime import datetime
from geotext import GeoText
import random
import requests
import json
# To handle requests
from flask import Flask
# Load the .env file and read values
env_path = Path('.') / '.env'
load_dotenv(dotenv_path=env_path)
SIGNING_SECRET = os.environ['SIGNING_SECRET']
SLACK_TOKEN = os.environ['SLACK_TOKEN']
WEATHER_SECRET=os.environ['WEATHER_SECRET']
#Create the flask application
app = Flask(__name__)
#Configure the flask app to connect with Slack
slack_event_adapter = SlackEventAdapter(SIGNING_SECRET,'/slack/events', app)
client = slack.WebClient(token=SLACK_TOKEN)
#
THIS_BOT_ID = client.api_call("auth.test")['user_id']
#Generic function to send a message
def message(msg, channel_id):
client.chat_postMessage(channel=channel_id, text=str(msg))
#Send a welcome message on start
now = datetime.now().strftime("%d-%b-%Y (%H:%M:%S.%f)")
message("["+ now + "] Bot started on this channel.", "ram-bot")
#What to do when a message is recieved
# handling Message Events
@slack_event_adapter.on('message')
def message(payload):
event = payload.get('event',{})
channel_id = event.get('channel')
user_id = event.get('user')
#Get the user's username
user_info = client.users_info(user=user_id)
username = user_info["user"]["real_name"]
#Make sure we aren't talking to a bot
if "bot" not in username.lower():
msg = event.get('text')
places = GeoText(msg.title())
print(places.cities)
if THIS_BOT_ID != user_id:
msg_strip = msg.strip()
print(msg_strip)
# Response when userr aska a question
if msg_strip[-1] == "?":
messages = ["wow!", "that's actually crazy.", "you sound like a Toronto manz.", "hmm.", "you are amazing.", "yo fam.", "OH MY GOSH!", "I know I am awesome... but.." ]
client.chat_postMessage(channel=channel_id, text="<@" + user_id + "> "+random.choice(messages)+" You asked me:")
client.chat_postMessage(channel=channel_id, text=msg)
# Response to weather
elif len(places.cities) > 0:
try:
city = places.cities[0]
weather_api = "http://api.openweathermap.org/data/2.5/weather?q=" + city + "&units=metric&appid=" + WEATHER_SECRET
print(weather_api)
request = requests.get(weather_api)
response = json.loads(request.text)
message=">*Current Weather for: " + response["name"] + "* :flag-" + str((response["sys"])["country"]).lower() +":"\
"\n>\n>*Weather*"\
"\n>Current Forecast: *" + (response["weather"][0])["description"] + "*"\
"\n>\n>*Temperature* :thermometer:"\
"\n>Current: *" + str((response["main"])["temp"]) + "°C*"\
"\n>Feels Like: *" + str((response["main"])["feels_like"]) + "°C*"\
"\n>Max: *" + str((response["main"])["temp_max"]) + "°C*"\
"\n>Min: *" + str((response["main"])["temp_min"]) + "°C*"\
"\n>Humidity: *" + str((response["main"])["humidity"]) + "%*"\
"\n>\n>*Cloud Cover* :cloud:"\
"\n>Cloudiness: *" + str((response["clouds"])["all"]) + "%*"
client.chat_postMessage(channel=channel_id, text=message)
except Exception as err:
print(f'An Error Occurred: {err}')
#Start the flask webserver
if __name__ == "__main__":
app.run(debug=True)
| true
|
0c4610d1216d152b8d94a757896aa1e8c0e3c63f
|
Python
|
beduffy/rl-autoencoder-experiments
|
/save_images_from_env.py
|
UTF-8
| 839
| 2.515625
| 3
|
[] |
no_license
|
import time
import numpy as np
import matplotlib.pyplot as plt
import sys
import gym
# save image
def show_image(obs):
imgplot = plt.imshow(obs)
plt.show()
number_of_saved_images = 0
# observation (210, 160, 3)
open_ai_env_name = 'SpaceInvaders-v0'
env = gym.make(open_ai_env_name)
for i_episode in range(10):
observation = env.reset()
for t in range(10000):
env.render()
if t % 30 == 0:
action = env.action_space.sample()
fp = 'saved_images/saved_image_{}.png'.format(number_of_saved_images)
np.save(fp, observation)
print('Saving image:', fp)
number_of_saved_images += 1
observation, reward, done, info = env.step(action)
if done:
print("Episode finished after {} timesteps".format(t+1))
break
| true
|
545df0d7977e86cdef3e748b3aadc8df646242c2
|
Python
|
gpioblink/alg1
|
/le06/B.py
|
UTF-8
| 426
| 3.109375
| 3
|
[] |
no_license
|
# WHATIS: パーテーション 入力された配列を右端の数字の大小で仕分け
def partition(A, p, r):
x = A[r]
i = p-1
for j in range(p,r):
if A[j] <= x:
i = i+1
A[i], A[j] = A[j], A[i]
A[i+1],A[r] = A[r], A[i+1]
return i+1
n = int(input())
A = list(map(int, input().split()))
mid = partition(A,0,n-1)
print(' '.join(map(str,A[:mid])),'['+str(A[mid])+']',' '.join(map(str,A[mid+1:])))
| true
|
0dbaa2c72e1832db35958b614cc313b46bb8549e
|
Python
|
Heavysss/Hevyks
|
/PyCharm/Module_Os.py
|
UTF-8
| 184
| 2.859375
| 3
|
[] |
no_license
|
# Изучение модуля os
import os
print(os.getcwd())
os.chdir(r"C:\Users\Yummer\Documents")
print(os.getcwd())
print(os.path.basename(r"C:"))
k = 'piP'
k.lower()
print(k)
| true
|
f920c1434c2373311bdec5485ff66e8501c82a4e
|
Python
|
jiang2533001/Cracking-the-Coding-Interview
|
/Chapter 2/stack.py
|
UTF-8
| 831
| 4
| 4
|
[] |
no_license
|
class Stack(object):
def __init__(self):
self.head = None
self.list = []
self.size = 0
def is_empty(self):
if self.size == 0:
return True
else:
return False
def push(self, val):
self.list.append(val)
self.head = self.size
self.size += 1
def pop(self):
if not self.is_empty():
val = self.list[self.head]
del self.list[self.head]
self.size -= 1
self.head = self.size - 1
return val
def front(self):
if not self.is_empty():
return self.list[self.head]
def print_stack(self):
print '[',
if not self.is_empty():
for i in range(self.size-1, -1, -1):
print self.list[i]
print ']'
| true
|
5efb52843dc1038a0aef23a68f703eaf98ef2151
|
Python
|
ekqls3659/Algorithm_Study-BackJoon
|
/2439.py
|
UTF-8
| 258
| 4
| 4
|
[] |
no_license
|
# 첫째 줄에는 별 1개, 둘째 줄에는 별 2개, N번째 줄에는 별 N개를 찍는 문제
# 하지만, 오른쪽을 기준으로 정렬한 별(예제 참고)을 출력하시오.
a=int(input())
for i in range(1,a+1):
print(" "*(a-i) + "*"*i)
| true
|
b0974040778bf956e466edee02981bb291a6ea7a
|
Python
|
way2joy/air_analysis_v3
|
/cnn_3d.py
|
UTF-8
| 7,039
| 2.625
| 3
|
[] |
no_license
|
from read_data import read_data
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
import os
def batch_creator(X_set, y_set, batch_size, dataset_length):
"""Create batch with random samples and return appropriate format"""
batch_mask = rng.choice(dataset_length-1, batch_size)
batch_x = X_set[[batch_mask]]
#batch_x = batch_x[..., np.newaxis]
batch_y = y_set[[batch_mask]]
batch_y = batch_y.reshape(-1, batch_y.shape[1]*batch_y.shape[2])
return batch_x, batch_y
# random number
seed = 128
rng = np.random.RandomState(seed)
# argument flags
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_boolean('train', True, 'True: training and testing. False: testing only')
flags.DEFINE_integer('epoch', 0, 'Number of epoch to restore')
# read train data
stations = 25
T_X = 48 # number of hours to be the historical hours
T_y = 6 # number of hours to be the forecasting hours
dataset = read_data(T_X, T_y)
dataset.read_file()
dataset.split_data()
X, y = dataset.X, dataset.y
X = dataset.convert2D_to_3D()
print(X.shape)
# train set = year 2015 + 2016 => 731 days
# val set = year 2017 => 365 days
train_size = 731*24/6
X_train, X_val = X[:train_size], X[train_size:]
y_train, y_val = y[:train_size], y[train_size:]
print(X_train.shape)
print(X_val.shape)
print('Reading training data done !')
### define the layers
image_height = stations
image_width = T_X/3
output_T = T_y
output_size = stations*output_T
filters = [128, 64, 64]
kernels = [5, 3, 1]
fc_size = output_size
model_path = "model/cnn_new_epoch{}.ckpt"
# define placeholders
x = tf.placeholder(tf.float32, [None, image_height, image_width, 3])
y = tf.placeholder(tf.float32, [None, output_size])
# parameters value
epochs = 100
batch_size = 128
dropout_rate = 0.5
learning_rate = 0.001
### weight initialization
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
### define model
# convolution-pooling layer define
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
# convolution-pooling layer #1
W_conv1 = weight_variable([kernels[0], kernels[0], 3, filters[0]])
b_conv1 = bias_variable([filters[0]])
conv1 = tf.nn.relu(conv2d(x, W_conv1) + b_conv1)
pool1 = max_pool_2x2(conv1)
# convolution-pooling layer #2
W_conv2 = weight_variable([kernels[1], kernels[1], filters[0], filters[1]])
b_conv2 = bias_variable([filters[1]])
conv2 = tf.nn.relu(conv2d(pool1, W_conv2) + b_conv2)
pool2 = max_pool_2x2(conv2)
# convolution-pooling layer #3
W_conv3 = weight_variable([kernels[2], kernels[2], filters[1], filters[2]])
b_conv3 = bias_variable([filters[2]])
conv3 = tf.nn.relu(conv2d(pool2, W_conv3) + b_conv3)
pool3 = max_pool_2x2(conv3)
# fully connected
flatten = tf.contrib.layers.flatten(pool3)
flatten_dim = flatten.get_shape()[1].value
W_fc = tf.get_variable(name='W_fc', shape=[flatten_dim, fc_size],
initializer=tf.contrib.layers.xavier_initializer())
b_fc = tf.Variable(tf.zeros(fc_size))
fc = tf.nn.relu(tf.add(tf.matmul(flatten, W_fc), b_fc))
# drop out layer
dropout = tf.layers.dropout(
inputs=fc, rate=dropout_rate, training=True)
# output layer
W_output = tf.get_variable(name='W_output', shape=[fc_size, output_size],
initializer=tf.contrib.layers.xavier_initializer())
b_output = tf.Variable(tf.zeros(output_size))
output = tf.add(tf.matmul(dropout, W_output), b_output)
### loss function - absolute_difference
loss = tf.reduce_mean(tf.abs(tf.subtract(y, output)))
beta = 0.01
regularizer = tf.nn.l2_loss(W_conv1) + tf.nn.l2_loss(W_fc) + tf.nn.l2_loss(W_output)
loss = tf.reduce_mean(loss + beta * regularizer)
tf.summary.scalar('cnn_new_loss', loss)
## optimizer
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
# initialize all variables
init = tf.global_variables_initializer()
# 'Saver' op to save and restore all the variables
saver = tf.train.Saver()
# Merge all the summaries and write them out
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter('log/new_train', flush_secs=10)
val_writer = tf.summary.FileWriter('log/new_val', flush_secs=10)
# Training process
train = FLAGS.train
if train:
print('\nTraining start ...')
with tf.Session() as sess:
sess.run(init)
# Restore model weights from previously saved model
epoch = FLAGS.epoch
if epoch > 0:
saver.restore(sess, model_path.format(epoch))
print('Checkpoint {} restored!'.format(epoch))
### for each epoch, do:
### for each batch, do:
### create pre-processed batch
### run optimizer by feeding batch
### find cost and reiterate to minimize
for epoch in range(epochs):
total_batch = int(X_train.shape[0]/batch_size)
for i in range(total_batch):
batch_x, batch_y = batch_creator(X_train, y_train, batch_size, X_train.shape[0])
_, train_loss, summary = sess.run([optimizer, loss, merged], feed_dict = {x: batch_x, y: batch_y})
#print('Epoch:{}'.format(epoch+1) + '. Cost = {:.5f}'.format(train_loss))
train_writer.add_summary(summary, epoch)
# compute error on validate set
batch_x, batch_y = batch_creator(X_val, y_val, X_val.shape[0], X_val.shape[0])
[validate_loss, summary] = sess.run([loss, merged], feed_dict={x: batch_x, y: batch_y})
#print("Epoch:{}. Validate error: {:.2f}".format(epoch+1, validate_loss))
val_writer.add_summary(summary, epoch)
# Save model weights to disk
if (epoch+1) % 100 == 0:
save_path = saver.save(sess, model_path.format(epoch+1))
print("Model saved in file: %s" % save_path)
print("\nTraining complete!")
# Running testing session
print("---------------------------")
print("Starting testing session...")
with tf.Session() as sess:
# Initialize variables
sess.run(init)
# Restore model weights from previously saved model
epoch = FLAGS.epoch
if epoch == 0:
epoch = epochs
saver.restore(sess, model_path.format(epoch))
print('Checkpoint {} restored!'.format(epoch))
batch_x, batch_y = batch_creator(X_val, y_val, X_val.shape[0], X_val.shape[0])
test_loss, pred_out = sess.run([loss, output], feed_dict={x: batch_x, y: batch_y})
print("Test error: {:.2f}".format(test_loss))
# Visualizing as image
fig = plt.figure()
ax = fig.add_subplot(121)
ax.set_title('Prediction')
X_i = pred_out[0].reshape(stations, output_T)
#print(X_i)
ax.imshow(X_i, interpolation='bilinear')
ax = fig.add_subplot(122)
ax.set_title('Actual')
X_i1 = batch_y[0].reshape(image_height, output_T)
#print(X_i1)
ax.imshow(X_i1, interpolation='bilinear')
plt.show()
| true
|
1e76b1754a46749e1e43f75ccf50b56fc992e6fb
|
Python
|
akshitgupta29/Competitive_Programming
|
/LeetCode & GFG & IB/P7 - Counting Elements.py
|
UTF-8
| 512
| 3.859375
| 4
|
[] |
no_license
|
'''
Given an integer array arr, count element x such that x + 1 is also in arr.
If there're duplicates in arr, count them seperately.
Example 1:
Input: arr = [1,2,3]
Output: 2
Explanation: 1 and 2 are counted cause 2 and 3 are in arr.
'''
from typing import List
def countElements(arr: List[int]) -> int:
count = 0
set1 = set(arr)
for item in arr:
if (item + 1) in set1:
count +=1
return count
if __name__ == "__main__":
arr = [1,1,2,2]
print (countElements(arr))
| true
|
24680331acbbfaf0015ce5d50d2fe151687e5da1
|
Python
|
lizhenQAZ/code_manage
|
/Python2.7/Flask/E5_SQLARCHEMY数据库与迁移.py
|
UTF-8
| 2,581
| 3.125
| 3
|
[] |
no_license
|
# coding=utf-8
"""
功能:
1.SQLALCHEMY使用:
设置SQLALCHEMY_DATABASE_URI与SQLALCHEMY_TRACK_MODIFICATIONS
2.模型类定义:
设置表名、反向引用、外键、主键与唯一
3.数据库迁移:
# 1.实例化管理器对象
manager = Manager(app)
# 2.使用迁移扩展
Migrate(app, db)
# 3.使用迁移命令
manager.add_command('db', MigrateCommand)
# 4.执行迁移
manager.run()
"""
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
# 导入扩展命令行
from flask_script import Manager
# 导入迁移扩展包
from flask_migrate import Migrate, MigrateCommand
app = Flask(__name__)
# 迁移第一步:实例化管理器对象
manager = Manager(app)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://guest:guest@localhost/user'
# 动态追踪修改
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
# 构造数据库实例
db = SQLAlchemy(app)
# 迁移第二步:使用迁移扩展
Migrate(app, db)
# 迁移第三步:使用迁移命令
manager.add_command('db', MigrateCommand)
# 自定义模型类
class Role(db.Model):
# 未定义时,使用同类名的表名
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(32), unique=True)
# 反向引用,定义在一的一方
us = db.relationship("User", backref="role")
def __repr__(self):
return 'role:%s' % self.name
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(32))
email = db.Column(db.String(32), unique=True)
pswd = db.Column(db.String(32))
# 定义在外键指向
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
def __repr__(self):
return 'user:%s' % self.name
if __name__ == '__main__':
db.drop_all()
db.create_all()
# 添加数据
ro1 = Role(name='admin')
ro2 = Role(name='user')
# 提交数据到数据库会话对象,add_all([])一次添加多条数据
# add()一次添加一条数据
db.session.add_all([ro1, ro2])
# 提交数据到数据库中
db.session.commit()
us1 = User(name='wang', email='wang@163.com', pswd='123456', role_id=ro1.id)
us2 = User(name='zhang', email='zhang@189.com', pswd='201512', role_id=ro2.id)
us3 = User(name='chen', email='chen@126.com', pswd='987654', role_id=ro2.id)
us4 = User(name='zhou', email='zhou@163.com', pswd='456789', role_id=ro1.id)
db.session.add_all([us1, us2, us3, us4])
db.session.commit()
# app.run(debug=True)
manager.run()
| true
|
d738a57e22ad502e6e8c1d8c7ecfd03e4a6d2368
|
Python
|
colinwke/tcc_cloth_matching
|
/tcc_taobao_clothes_matching/preprocess/offline_set_generator.py
|
UTF-8
| 2,289
| 3.328125
| 3
|
[] |
no_license
|
"""
1. 测试商品集随时间购买次数
2. 匹配商品集随时间购买次数
通过1,2的图像可得,测试商品集是从匹配商品集中抽样出来的
因此可直接从匹配商品集合中抽样出线下训练和测试商品集
生成训练集和验证集
训练集5500
验证集2541
"""
import pandas as pd
import matplotlib.pyplot as plt
from core.config import *
def plot_itemset_bought_count2(itemset, history, multi_factor=1):
history = history[history['item_id'].isin(itemset)]
plot_itemset_bought_count(history, multi_factor)
def plot_itemset_bought_count(history, multi_factor=1):
bought_count = history['date'].value_counts()
if multi_factor is not 1:
bought_count *= multi_factor
bought_count = bought_count.sort_index()
bought_count.plot()
def simple_sample(match_set, frac=0.08):
match_set = match_set.sample(frac=frac)
print(len(match_set))
return match_set
def sample_set(match_set):
"""
抽样方法1:
先抽取6000个商品
再抽取5000个作为训练商品,1000个作为测试商品
"""
match_set = simple_sample(match_set, 0.132) # frac=0.132 -> 8041个样本
# 前面5000做训练集
# 后面1500做测试集
gap = 5500
train_set = match_set[:gap]
test_set = match_set[gap:]
train_set1 = pd.DataFrame(train_set)
train_set1.columns = ['item_id']
test_set1 = pd.DataFrame(test_set)
test_set1.columns = ['item_id']
train_set1.to_csv(file_offline_train_item, index=False)
test_set1.to_csv(file_offline_test_item, index=False)
return train_set, test_set
def main():
history = pd.read_csv(file_user_bought_history)
history['date'] = history['date'].map(str)
match_set = pd.read_csv(file_dim_fashion_matchsets)
match_set = pd.concat([match_set['item_id_x'], match_set['item_id_y']], ignore_index=True)
match_set = match_set.drop_duplicates()
test_set = pd.read_csv(file_test_items)
test_set = test_set['item_id']
train_set, test_set2 = sample_set(match_set)
plot_itemset_bought_count2(test_set, history)
plot_itemset_bought_count2(train_set, history)
plot_itemset_bought_count2(test_set2, history, multi_factor=2.5)
plt.show()
if __name__ == '__main__':
main()
| true
|
1053e61af63fd3fae0b1d0d42228be09f14dd96c
|
Python
|
jmcb/murderrl
|
/builder/builder.py
|
UTF-8
| 28,802
| 2.703125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python
"""
Attempt to create a "manor" akin to::
###############################################
#.........#......#........#...........#.......#
#.........#......#........#...........#.......#
#.........#......#........#...........#.......#
#.........#......#........#...........#.......#
#########+####+######+###########+#####.......#
#.......+......+......................+.......#
#.......######+######+#.......#######+#########
#.......#......#......#<<#....#.......#.......#
#.......#......#......#<<#....#.......#.......#
#.......#......#......####....+.......#.......#
#.......#......#......#..+....#.......#.......#
##########################....#################
##++##
"""
import random, copy, room
from library import shape, collection
from library.coord import *
from library.random_util import *
from library.feature import *
# Specific build styles:
BASE_SHAPE = "single-corridor"
L_LAYOUT = "L-corridors"
Z_LAYOUT = "Z-corridors"
N_LAYOUT = "N-corridors"
H_LAYOUT = "H-corridors"
O_LAYOUT = "O-corridors"
U_LAYOUT = "U-corridors"
class BuilderCollection (collection.ShapeCollection):
corridors = None
rooms = None
legs = None
main_corridor = None
def __init__ (self, c=[]):
if c != [] and isinstance(c, BuilderCollection):
self.legs = c.legs
collection.ShapeCollection.__init__(self, c)
self.rebuild()
def copy (self):
my_copy = BuilderCollection(copy.copy(self._shapes))
my_copy.legs = copy.deepcopy(self.legs)
return my_copy
def rebuild (self):
self.corridors = []
self.rooms = []
if not self.legs:
self.legs = []
for index, sh in enumerate(self):
if isinstance(sh.shape, MainCorridor):
self.main_corridor = index
if isinstance(sh.shape, Corridor):
self.corridors.append(index)
else:
self.rooms.append(index)
def corridor (self, index):
assert index in self.corridors
return self[index]
def get_corridors (self):
return self.corridors
def get_room (self, index):
assert index in self.rooms
return self[index]
def get_rooms (self):
if not self.rooms:
return None
return self.rooms
def mark_leg (self, leg):
self.legs.append(leg)
def count_legs (self):
return len(self.legs)
def leg_at (self, side, placement):
return (side, placement) in self.legs
def get_leg (self, side, placement):
for leg in self.legs:
if leg == (side, placement):
return leg
return None
def _rebuild_wrap (function):
def wrapper (self, *args, **kwargs):
function(self, *args, **kwargs)
self.rebuild()
wrapper.__name__ = function.__name__
wrapper.__doc__ = function.__doc__ + "\n\nCalling this function automatically rebuilds the BuilderCollection index."
return wrapper
__setitem__ = _rebuild_wrap(collection.ShapeCollection.__setitem__)
append = _rebuild_wrap(collection.ShapeCollection.append)
extend = _rebuild_wrap(collection.ShapeCollection.extend)
insert = _rebuild_wrap(collection.ShapeCollection.insert)
pop = _rebuild_wrap(collection.ShapeCollection.pop)
prioritise = _rebuild_wrap(collection.ShapeCollection.prioritise)
reverse = _rebuild_wrap(collection.ShapeCollection.reverse)
reversed = _rebuild_wrap(collection.ShapeCollection.reversed)
sort = _rebuild_wrap(collection.ShapeCollection.sort)
append = _rebuild_wrap(collection.ShapeCollection.append)
prioritise = _rebuild_wrap(collection.ShapeCollection.prioritise)
class Corridor (shape.Shape):
pass
class MainCorridor (Corridor):
pass
def join_row_rooms (row, left_corr=False, right_corr=False, check_offset=False):
"""
Given a list of rooms, joins them together as a ShapeCollection.
:``row``: A list of Room objects that should be placed in a row. *Required*.
:``left_corr``: If true, leaves a gap between the first and second rooms
to make space for a corridor. *Default False*.
:``right_corr``: If true, leaves a gap between the last and second-last rooms
to make space for a corridor. *Default False*.
:``check_offset``: If true, compares the room heights to see if they
need to be offset from the top. *Default False*.
"""
assert(len(row) > 2)
first_room = row[0].as_shape()
second_room = row[1].as_shape()
# Does some weird stuff to offset everything
offset_both = False
if check_offset and first_room.height() == second_room.height():
offset_both = True
# Join the first two rooms.
top_offset = 0
if check_offset:
top_offset = 2
overlap = 1
if left_corr:
overlap = -1
row_collection = shape.adjoin(first_room, second_room, top_offset=top_offset, overlap=overlap, collect=True, offset_both=offset_both)
# Join the middle rooms.
for curr in row[2:-1]:
room_shape = curr.as_shape()
to = top_offset
if check_offset and (room_shape.height() == first_room.height() and not offset_both or room_shape.height() > first_room.height()):
to = 0
row_collection = shape.adjoin(row_collection, room_shape, top_offset=to, overlap=1, collect=True, offset_both=offset_both)
# Join the last room.
last_room = row[-1].as_shape()
if check_offset and (last_room.height() == first_room.height() and not offset_both or last_room.height() > first_room.height()):
top_offset = 0
overlap = 1
if right_corr:
overlap = -1
row_collection = shape.adjoin(row_collection, last_room, top_offset=top_offset, overlap=overlap, collect=True)
return row_collection
ROOM_WIDTH_LIST = [7, 8, 9, 10, 11, 12]
def random_room_height ():
"""
Returns a random value for the height of a room.
"""
height = 7
if coinflip():
height += 1
elif one_chance_in(3):
height -= 1
return height
def base_builder (min_rooms=0, top_left=None, top_right=None, bottom_left=None, bottom_right=None, tl_corr=False, tr_corr=False, bl_corr=False, br_corr=False,top_height=None, bottom_height=None):
"""
Attempts to build a basic rectangular manor. It returns ShapeCollection
and a list of Room objects.
:``min_rooms``: The minimum number of rooms. *Default None*.
:``top_left``: The width of the top left room. Random, if none. *Default None*.
:``top_right``: The width of the top right room. Random, if none. *Default None*.
:``bottom_left``: The width of the bottom left room. Random, if none. *Default None*.
:``bottom_right``: The width of the bottom right room. Random, if none. *Default None*.
:``tl_corr``: If true, leaves a gap for a corridor between the top-left two rooms. *Default False*.
:``tr_corr``: If true, leaves a gap for a corridor between the top-right two rooms. *Default False*.
:``bl_corr``: If true, leaves a gap for a corridor between the bottom-left two rooms. *Default False*.
:``br_corr``: If true, leaves a gap for a corridor between the bottom-right two rooms. *Default False*.
:``top_height``: The height of the top row rooms. Random, if none. *Default None*.
:``bottom_height``: The height of the bottom row rooms. Random, if none. *Default None*.
"""
if top_left == None:
top_left = random.choice(ROOM_WIDTH_LIST)
if top_right == None:
top_right = random.choice(ROOM_WIDTH_LIST)
if bottom_left == None:
bottom_left = random.choice(ROOM_WIDTH_LIST)
if bottom_right == None:
bottom_right = random.choice(ROOM_WIDTH_LIST)
# tl_corr = True
# tr_corr = True
# bl_corr = True
# br_corr = True
print "tl: %s, tr: %s, bl: %s, br: %s" % (top_left, top_right, bottom_left, bottom_right)
print "tl: %s, tr: %s, bl: %s, br: %s" % (tl_corr, tr_corr, bl_corr, br_corr)
# Top row of rooms
row1 = []
# Corridor, then bottom row of rooms
row2 = []
max_length = 6*12 # currently unused
# manor_width = random.randint(max_length/2, max_length)
# Decide the row heights.
if top_height == None:
top_height = random_room_height()
if bottom_height == None:
bottom_height = random_room_height()
print "top_height: %s, bottom_height: %s" % (top_height, bottom_height)
# first rooms on either row
height1 = top_height
height2 = bottom_height
check_overlap = False
if top_left < bottom_left or top_left == bottom_left and coinflip():
height1 += 2
else:
height2 += 2
check_overlap = True
first = room.Room(width=top_left, height=height1)
row1.append(first)
first = room.Room(width=bottom_left, height=height2)
row2.append(first)
# print "first rooms: height1=%s, height2=%s" % (height1, height2)
length1 = top_left + top_right - 2
if tl_corr:
length1 += 2
if tr_corr:
length1 += 2
length2 = bottom_left + bottom_right - 2
if bl_corr:
length2 += 2
if br_corr:
length2 += 2
print "Row 1:"
print "room 1: w=%s, length1: %s" % (top_left, length1)
while len(row1) <= 5:
# If we have four rooms, one in three chance of not adding any more
# rooms.
if len(row1) > 3 and one_chance_in(3):
break
new_room = room.Room(width=random.choice(ROOM_WIDTH_LIST), height=top_height)
row1.append(new_room)
length1 += new_room.width - 1
print "room %s: w=%s, length1: %s" % (len(row1), new_room.width, length1)
print "room %s: w=%s" % (len(row1)+1, top_right)
manor_width = length1
print "\nRow 2:"
print "room 1: w=%s, length2: %s" % (bottom_left, length2)
while length2 < manor_width:
dist_left = manor_width - length2 + 1
if dist_left < 14:
new_width = dist_left
else:
new_width = random.choice(ROOM_WIDTH_LIST)
next_width = dist_left - new_width
if next_width < 7:
new_width = random.choice((6,7,8))
new_room = room.Room(width=new_width, height=bottom_height)
row2.append(new_room)
length2 += new_width - 1
print "room %s: w=%s, length2: %s" % (len(row2), new_width, length2)
print "room %s: w=%s" % (len(row2)+1, bottom_right)
# last rooms on either row
height1 = top_height
height2 = bottom_height
if top_right < bottom_right or top_right == bottom_right and coinflip():
height1 += 2
check_overlap = False
else:
height2 += 2
# check_overlap = True
# print "last rooms: height1=%s, height2=%s" % (height1, height2)
last = room.Room(width=top_right, height=height1)
row1.append(last)
last = room.Room(width=bottom_right, height=height2)
row2.append(last)
print "\nrow1: %s rooms, row2: %s rooms, manor width: %s" % (len(row1), len(row2), manor_width)
# Try to get the minimum number of rooms.
if len(row1) + len(row2) < min_rooms:
return base_builder(min_rooms - 1)
# Now, start drawing it! YAY!
# First row
row1_collection = join_row_rooms(row1, tl_corr, tr_corr)
# second row
row2_collection = join_row_rooms(row2, bl_corr, br_corr, True)
# Finally, make a corridor!
overlap = 3
if check_overlap:
overlap = 1
my_collection = shape.underneath(row1_collection, row2_collection, overlap=overlap, collect=True)
m = BuilderCollection(my_collection)
noncorr_left = min(top_left, bottom_left)
noncorr_right = min(top_right, bottom_right)
corridor_length = my_collection.width() - noncorr_left - noncorr_right
# print "noncorr_left: %s, noncorr_right: %s, corridor_length: %s" % (noncorr_left, noncorr_right, corridor_length)
corridor = MainCorridor(shape.Row(width=corridor_length, fill="."))
m.append(collection.ShapeCoord(corridor, coord.Coord(noncorr_left, top_height)))
return m
class Placement (object):
def __init__ (self, side1, side2, this_side):
self.sides = [side1, side2]
self.this_side = this_side
def opposite (self):
return self.sides[self.this_side-1]
def __hash__ (self):
return hash(str(self))
def __str__ (self):
return self.sides[self.this_side]
def __repr__ (self):
return "<Placement %s>" % self
def __cmp__ (self, other):
return cmp(str(self), str(other))
SIDE_LEFT = Placement("left", "right", 0)
SIDE_RIGHT = Placement("left", "right", 1)
PLACE_TOP = Placement("top", "bottom", 0)
PLACE_BOTTOM = Placement("top", "bottom", 1)
class Leg (object):
"""
The representation of a manor leg (or "wing") that is attached to the
base manor.
"""
def __init__ (self, h_placement, v_placement, width=None, height=None, leg=None):
assert not (leg is None and width is None and height is None)
if leg is not None:
width, height = leg.size()
self.placement = (h_placement, v_placement)
self.width = width
self.height = height
def __repr__ (self):
return "<Leg h:%s w:%s %s>" % (self.height, self.width, self.placement)
def __cmp__ (self, other):
if isinstance(other, Leg):
return cmp(self.placement, other.placement)
elif isinstance(other, tuple):
return cmp(self.placement, other)
def attach_leg (base, leg, side=SIDE_LEFT, placement=PLACE_TOP, corr_offset = None, x_offset = None):
"""
Take a result of base_builder() and attach a leg.
:``base``: The base shape collection.
:``leg``: The leg shape collection.
:``side``: Which side the leg should be placed on. *Default ``SIDE_LEFT``*.
:``placement``: Whether the leg should be placed above or below. *Default ``PLACE_TOP``*.
:``corr_offset``: A number by which to vertically offset the corridor placement.
If none, uses the default room height. *Default None*.
:``x_offset``: A number by which to horizontally offset the corridor placement.
*Default None*.
"""
assert not base.leg_at(side, placement)
old_leg = leg.copy()
no_vert_offset = False
vert_offset = 0
if base.leg_at(side.opposite(), placement):
l = base.get_leg(side.opposite(), placement)
vert_offset = base.height() - l.height
no_vert_offset = True
else:
vert_offset = base.height() - 1
# Find the corridor
corridor, start = base.corridor(base.main_corridor)
assert corridor is not None
# Find the corridor's end point
stop = coord.Coord(start)
stop.x = corridor.width()
if side == SIDE_RIGHT:
offs = leg[0].width() - start.x
leg.offset(coord.Coord(stop.x-offs-1, 0))
if x_offset == None:
x_offset = stop.x + start.x
elif side == SIDE_LEFT and x_offset == None:
x_offset = start.x
print "vert_offset: %s, x_offset: %s, no_vert_offset: %s" % (vert_offset, x_offset, no_vert_offset)
if corr_offset == None:
corr_offset = room.Room().height
ncorr_height = leg.height() + corr_offset - 1
new_corridor = Corridor(shape.Column(height=ncorr_height, fill="."))
corridor_offset = None
if placement == PLACE_BOTTOM:
if no_vert_offset:
base.place_on(leg, offset=coord.Coord(0, vert_offset))
else:
left_offset = 0
if side == SIDE_RIGHT:
left_offset = base.width()-leg.width()
base = shape.underneath(base, leg, left_offset=left_offset, overlap=1, collect=True)
new_corridor[coord.Coord(0, new_corridor.height()-1)] = "#"
corridor_offset = coord.Coord(x_offset, vert_offset - corr_offset + 1)
base.append(new_corridor, corridor_offset)
elif placement == PLACE_TOP:
if no_vert_offset:
base.place_on(leg)
else:
left_offset = 0
if side == SIDE_RIGHT:
left_offset = leg.width()-base.width()
# print "leg width (%s) - base width (%s) = left_offset (%s)" % (leg.width(), base.width(), left_offset)
base = shape.underneath(leg, base, left_offset=left_offset, overlap=1, collect=True)
new_corridor[POS_ORIGIN] = "#"
corridor_offset = coord.Coord(x_offset, 0)
base.append(new_corridor, corridor_offset)
if placement == PLACE_TOP:
start = coord.Coord(corridor_offset.x - 1, leg.height() - 1)
elif placement == PLACE_BOTTOM:
start = coord.Coord(corridor_offset.x - 1, vert_offset - corr_offset + 1)
base = BuilderCollection(base)
base.mark_leg(Leg(side, placement, leg=old_leg))
return base
def build_leg (rooms_tall=2, rooms_wide=2, width_left=12, width_right=12, make_corridor=True, do_cleanup=True):
"""
Create and return a "leg" to be used with add_leg.
:``rooms_tall``: How many rooms tall to make the leg. *Default 2*.
:``rooms_wide``: How many rooms wide to make the leg. *Max 2. Default 2*.
:``width_left``: The width of the leftmost rooms. *Default 12*.
:``width_right``: The width of the rightmost rooms. *Default 12*.
:``make_corridor``: Include a corridor when building. *Default True*.
:``do_cleanup``: Perform corridor, etc, clean-up when built. *Default True*.
"""
assert rooms_wide >= 1 and rooms_wide <= 2
assert rooms_tall >= 1
leg_rooms = collection.ShapeCollection()
if width_left == None:
width_left = random.choice(ROOM_WIDTH_LIST)
if width_right == None:
width_right = random.choice(ROOM_WIDTH_LIST)
heights = []
for r in xrange(rooms_tall):
heights.append(7)
for column in xrange(rooms_wide):
this_col = collection.ShapeCollection()
width = width_left
if column > 0:
width = width_right
height_list = heights[:]
if len(heights) > 1 and one_chance_in(5):
indices = range(len(height_list))
small = random.choice(indices)
indices.remove(small)
large = random.choice(indices)
height_list[small] -= 1
height_list[large] += 2
else:
large = random.choice(xrange(len(height_list)))
height_list[large] += 1
for row in xrange(rooms_tall):
new_room = room.Room(width=width,height=height_list[row]).as_shape()
# print "new_room height: %s, this_col height: %s" % (new_room.height(), this_col.height())
this_col = shape.underneath(new_room, this_col, offset_second=False, overlap=1, collect=True)
# print "leg_rooms width: %s, this_col width: %s" % (leg_rooms.width(), this_col.width())
leg_rooms = shape.adjoin(leg_rooms, this_col, overlap=-1, collect=True)
return leg_rooms
def build_L (base=None, min_rooms=0, rooms=2, rooms_wide=2):
"""
Modifies the results of base_builder() to result in an L shape in any
orientation.
:``base``: The base shape collection. If None, a new base will be built from
base_builder. *Default None*.
:``rooms``: How many rooms to build along the sides of the new axis. *Default 2*.
:``rooms_wide``: How many rooms wide to make the leg. *Max 2. Default 2*.
"""
side = random.choice([SIDE_LEFT, SIDE_RIGHT])
placement = random.choice([PLACE_TOP, PLACE_BOTTOM])
tlc = (side == SIDE_LEFT and placement == PLACE_TOP)
trc = (side == SIDE_RIGHT and placement == PLACE_TOP)
blc = (side == SIDE_LEFT and placement == PLACE_BOTTOM)
brc = (side == SIDE_RIGHT and placement == PLACE_BOTTOM)
if tlc or blc: # left side
tlw = random.choice(ROOM_WIDTH_LIST)
blw = random.choice(ROOM_WIDTH_LIST)
trw = None
brw = None
if tlc:
if blw < tlw:
blw = tlw
left = tlw
else:
if tlw < blw:
tlw = blw
left = blw
right = None
else: # right side
tlw = None
blw = None
trw = random.choice(ROOM_WIDTH_LIST)
brw = random.choice(ROOM_WIDTH_LIST)
if trc:
if brw < trw:
brw = trw
right = trw
else:
if trw < brw:
trw = brw
right = brw
left = None
tht = None
bht = None
corr_offset = random_room_height()
if placement == PLACE_TOP:
tht = corr_offset
else:
bht = corr_offset
if base is None:
base = base_builder(min_rooms=min_rooms-4, top_left=tlw, top_right=trw, bottom_left=blw, bottom_right=brw, tl_corr=tlc, tr_corr=trc, bl_corr=blc, br_corr=brc, top_height=tht, bottom_height=bht)
# Draw the new rooms.
new_rooms = build_leg(rooms, rooms_wide, width_left=left, width_right=right)
offset = None
if side == SIDE_RIGHT:
offset = base.width() - right - 1
base = attach_leg(base, new_rooms, side=side, placement=placement, corr_offset=corr_offset, x_offset=offset)
return base
def build_Z (base=None, min_rooms=0):
"""
Modifies the results of base_builder() to result in an L shape in any
orientation. Not implemented.
Currently just returns the base builder results.
:``base``: The base shape collection. If None, a new base will be built from
base_builder. *Default None*.
"""
if base is None:
base = base_builder(min_rooms=min_rooms)
return base
def build_N (base=None, min_rooms=0):
"""
Modifies the results of base_builder() to result in an L shape in any
orientation. Not implemented.
Currently just returns the base builder results.
:``base``: The base shape collection. If None, a new base will be built from
base_builder. *Default None*.
"""
if base is None:
base = base_builder(min_rooms=min_rooms)
return base
def build_O (base=None, min_rooms=0):
"""
Modifies the results of base_builder() to result in an L shape in any
orientation. Not implemented.
Currently just returns the base builder results.
:``base``: The base shape collection. If None, a new base will be built from
base_builder. *Default None*.
"""
if base is None:
base = base_builder(min_rooms=min_rooms)
return base
def build_H (base=None, min_rooms=0):
"""
Modifies the results of base_builder() to result in an H-shaped layout.
:``base``: The base shape collection. If None, a new base will be built from
base_builder. *Default None*.
"""
outer = random.choice(ROOM_WIDTH_LIST) # outer leg
inner = random.choice(ROOM_WIDTH_LIST) # inner leg
tht = random_room_height()
bht = random_room_height()
if base is None:
base = base_builder(min_rooms=min_rooms-16, top_left=outer, top_right=outer, bottom_left=outer, bottom_right=outer,
tl_corr=True, tr_corr=True, bl_corr=True, br_corr=True, top_height=tht, bottom_height=bht)
base = build_U(base, min_rooms=min_rooms, placement=PLACE_TOP, outer=outer, inner=inner, room_height=tht)
base = build_U(base, min_rooms=min_rooms, placement=PLACE_BOTTOM, outer=outer, inner=inner, room_height=bht)
return base
def build_U (base=None, min_rooms=0, rooms=2, rooms_wide=2, placement=None, outer=None, inner=None, room_height=None):
"""
Modifies the results of base_builder() to result in an U-shaped layout.
:``base``: The base shape collection. If None, a new base will be built from
base_builder. *Default None*.
:``rooms``: How many rooms to build along the sides of the new axis. *Default 2*.
:``rooms_wide``: How many rooms wide to make the leg. *Max 2. Default 2*.
:``placement``: The vertical orientation of the manor legs. Random, if none. *Default None*.
:``inner``: The width of the inner manor legs' rooms. Random, if none. *Default None*.
:``outer``: The width of the outer manor legs' rooms. Random, if none. *Default None*.
:``room_height``: The height of the base manor rooms on the side facing the legs.
Random, if none. *Default None*.
"""
if placement is None:
placement = random.choice([PLACE_TOP, PLACE_BOTTOM])
if outer == None:
outer = random.choice(ROOM_WIDTH_LIST) # outer leg
if inner == None:
inner = random.choice(ROOM_WIDTH_LIST) # inner leg
tht = None
bht = None
if room_height == None:
room_height = random_room_height()
if placement == PLACE_TOP:
tht = room_height
else:
bht = room_height
if base is None:
tlc = (placement == PLACE_TOP)
trc = tlc
blc = not tlc
brc = blc
noleg = random.choice(ROOM_WIDTH_LIST) # opposite side
if noleg < outer:
noleg = outer
if tlc: # top
tlw = outer
trw = outer
blw = noleg
brw = noleg
else: # bottom
tlw = noleg
trw = noleg
blw = outer
brw = outer
base = base_builder(min_rooms=min_rooms-8, top_left=tlw, top_right=trw, bottom_left=blw, bottom_right=brw, tl_corr=tlc, tr_corr=trc, bl_corr=blc, br_corr=brc, top_height=tht, bottom_height=bht)
leg_width = outer + inner + 1
distance = base.width() - 2 * leg_width
print "base width=%s, outer=%s, inner=%s, leg width=%s, distance=%s" % (base.width(), outer, inner, leg_width, base.width() - 2*leg_width)
if distance < 5 and distance != -1:
if distance % 2 == 0 or base.width() % 2 == 0:
if distance < 0:
inner -= 2 + (-distance)
inner -= 2
else:
inner = base.width()/2 - outer
leg_width = outer + inner + 1
distance = base.width() - 2 * leg_width
print "base width=%s, outer=%s, inner=%s, leg width=%s, distance=%s" % (base.width(), outer, inner, leg_width, base.width() - 2*leg_width)
new_rooms_L = build_leg(rooms, rooms_wide, width_left=outer, width_right=inner)
new_rooms_R = build_leg(rooms, rooms_wide, width_left=inner, width_right=outer)
base = attach_leg(base, new_rooms_L, side=SIDE_LEFT, placement=placement, corr_offset=room_height)
base = attach_leg(base, new_rooms_R, side=SIDE_RIGHT, placement=placement, corr_offset=room_height, x_offset=base.width() - outer - 1)
return base
def builder_by_type (type = None, min_rooms=0):
"""
Creates and returns a manor of a given layout type.
:``type``: The layout type in a character representation. *Default None*.
``B``: base manor.
``L``: L-shaped layout.
``U``: L-shaped layout.
``H``: L-shaped layout.
``None``: random layout.
"""
if type == None:
return build_random(min_rooms=min_rooms)
if type == 'B':
return base_builder(min_rooms=min_rooms)
if type == 'L':
return build_L(min_rooms=min_rooms)
if type == 'U':
return build_U(min_rooms=min_rooms)
if type == 'H':
return build_H(min_rooms=min_rooms)
# The other types don't exist yet and fall back on the base_builder.
if type == 'O':
return build_O(min_rooms=min_rooms)
if type == 'N':
return build_N(min_rooms=min_rooms)
if type == 'Z':
return build_Z(min_rooms=min_rooms)
else:
return base_builder(min_rooms=min_rooms)
def build_random (base=None, min_rooms=0):
"""
Creates and returns a manor of a random layout type.
:``base``: The base shape collection. If None, a new base will be built from
base_builder. *Default None*.
"""
l_list = [Z_LAYOUT, N_LAYOUT, O_LAYOUT, L_LAYOUT, U_LAYOUT, H_LAYOUT]
layout = random.choice(l_list)
if min_rooms > 25:
layout = H_LAYOUT
elif min_rooms > 20:
layout = random.choice(l_list[-2:])
elif min_rooms > 15:
layout = random.choice(l_list[-3:])
if layout == L_LAYOUT:
return build_L(base, min_rooms=min_rooms)
elif layout == Z_LAYOUT:
return build_Z(base, min_rooms=min_rooms)
elif layout == N_LAYOUT:
return build_N(base, min_rooms=min_rooms)
elif layout == H_LAYOUT:
return build_H(base, min_rooms=min_rooms)
elif layout == O_LAYOUT:
return build_O(base, min_rooms=min_rooms)
elif layout == U_LAYOUT:
return build_U(base, min_rooms=min_rooms)
else:
return base_builder(min_rooms=min_rooms)
| true
|
e89575bd46357a5182cfe3e472b84f21468de76e
|
Python
|
jamiezeminzhang/Leetcode_Python
|
/dynamic programming/010_OO_regular_expression_matching.py
|
UTF-8
| 4,757
| 4.09375
| 4
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 22 10:00:45 2015
LeetCdoe #10 Regular Expression Matching
Implement regular expression matching with support for '.' and '*'.
'.' Matches any single character.
'*' Matches zero or more of the preceding element.
The matching should cover the entire input string (not partial).
The function prototype should be:
bool isMatch(const char *s, const char *p)
Some examples:
isMatch("aa","a") → false
isMatch("aa","aa") → true
isMatch("aaa","aa") → false
isMatch("aa", "a*") → true
isMatch("aa", ".*") → true
isMatch("ab", ".*") → true
isMatch("aab", "c*a*b") → true
@author: zzhang
This one is the algorithm provided by the editor. Java and C++ works under this
algorithm, but Python not. So we using a dynamic programming method for Python
先来看递归的解法:
如果P[j+1]!='*',S[i] == P[j]=>匹配下一位(i+1, j+1),S[i]!=P[j]=>匹配失败;
如果P[j+1]=='*',S[i]==P[j]=>匹配下一位(i+1, j+2)或者(i, j+2),S[i]!=P[j]=>匹配下一位(i,j+2)。
匹配成功的条件为S[i]=='\0' && P[j]=='\0'。
class Solution:
# @param {string} s
# @param {string} p
# @return {boolean}
def isMatch(self, s, p):
if not p:
return not s
if not s:
return False
return self.is_match_aux(s,p,0,0)
def is_match_aux(self,s,p,si,pi):
if pi == len(p):
return si == len(s)
# Next char is not *
# pi may be the last char
if pi<len(p)-1 and p[pi+1] != '*' or pi == len(p) -1:
assert p[pi] != '*'
# si must be in bound
is_cur_matched = si<len(s) and (p[pi] == s[si] or p[pi] == '.')
is_next_matched = self.is_match_aux(s,p,si+1,pi+1)
return is_cur_matched and is_next_matched
# Next char is *
while si<len(s) and pi< len(p) and (p[pi]==s[si] or p[pi]=='.'):
if self.is_match_aux(s,p,si,pi+2):
return True
si += 1
return self.is_match_aux(s,p,si,pi+2)
==============OR====================
class Solution:
# @return a boolean
def isMatch(self, s, p):
if len(p)==0: return len(s)==0
if len(p)==1 or p[1]!='*':
if len(s)==0 or (s[0]!=p[0] and p[0]!='.'):
return False
return self.isMatch(s[1:],p[1:])
else:
i=-1; length=len(s)
while i<length and (i<0 or p[0]=='.' or p[0]==s[i]):
if self.isMatch(s[i+1:],p[2:]): return True
i+=1
return False
=================DP===========================
http://www.makuiyu.cn/2015/01/LeetCode_10.%20Regular%20Expression%20Matching/
思路是使用bool类型的二维数组dp[m+1][n+1](m、n分别为字符串s和p的长度)记录s和p是否匹配,
即dp[i+1][j+1]表示s的前i个字符是否与p的前j的字符相匹配。
如果p[j]不等于'*',则dp[i + 1][j + 1] = dp[i][j] && s[i] == p[j]
如果p[j]等于'*',则当且仅当在下面三种情况为真,dp[i + 1][j + 1]为真:
'*'前面字符重复出现0次,则p字符串需要往前看2位,即dp[i + 1][j - 1]是否为真
'*'前面的字符重复出现1次,则p字符串只需要往前看1位,即dp[i + 1][j]是否为真
'*'前面的字符重复出现次数大于等于2次,则s字符串需要往前看1位,即dp[i][j + 1]是否为真,
以及s字符串当前字符(s[i])与p字符串'*'前面字符(p[j - 1])是否相匹配。
(想清楚!!!!例子:s='aaa', p='a*',考虑最后一位)
注意,'.'可以与任意单个字符匹配。
注:动态规划思路是参考的这里。这里将空间复杂度降低到了O(n),有兴趣的可以看看。
时间复杂度:O(mn)
空间复杂度:O(mn)
"""
class Solution:
# @return a boolean
def isMatch(self, s, p):
dp=[[False for i in range(len(p)+1)] for j in range(len(s)+1)]
dp[0][0]=True
for i in range(1,len(p)+1):
if p[i-1]=='*':
if i>=2:
dp[0][i]=dp[0][i-2] or dp[0][i-1]# 包括i-1那个位置的字符都不重复,重复0次。
for i in range(1,len(s)+1):
for j in range(1,len(p)+1):
if p[j-1]=='.':
dp[i][j]=dp[i-1][j-1]
elif p[j-1]=='*':
# 字符重复出现1次,0次,和大于1次
# 大于1次的情况要仔细考虑清楚
dp[i][j]=dp[i][j-1] or dp[i][j-2] or (dp[i-1][j] and (s[i-1]==p[j-2] or p[j-2]=='.'))
else:
dp[i][j]=dp[i-1][j-1] and s[i-1]==p[j-1]
return dp[len(s)][len(p)]
| true
|
72e7857b20d02c01a89918c6011322beca5fac03
|
Python
|
3n73rp455/cli
|
/main.py
|
UTF-8
| 1,324
| 2.640625
| 3
|
[] |
no_license
|
import argparse
import os
import requests
import sys
from modules import auth, get
class CLI(object):
def __init__(self, user, password, endpoint):
self.token = auth.login(user, password)
self.endpoint = endpoint
def create(self):
return
def get(self, pk):
pw = get.password(token=self.token, pk=pk)
return pw
def update(self):
return
def delete(self):
return
def exit(self):
sys.exit
def main():
parser = argparse.ArgumentParser(description='Access enterPass API via Command Line')
parser.add_argument('--endpoint', required=True, type=str, help='What API endpoint to access')
parser.add_argument('--method', required=True, type=str, help='What method to call')
parser.add_argument('--pk', type=int, help='Primary Key for Object')
parser.add_argument('--username', type=str, help='Username')
parser.add_argument('--password', type=str, help='Password')
args = parser.parse_args()
endpoint = args.endpoint
method = args.method
pk = args.pk
user = args.username
password = args.password
pw = getattr(CLI(user=user,
password=password,
endpoint=endpoint), method)(pk=pk)
print(pw)
if __name__ == "__main__":
main()
| true
|
7491c266f7c131f690dbb89a27842bd15de37bb2
|
Python
|
GBaileyMcEwan/python
|
/src/comparePrices/comparePrices.py
|
UTF-8
| 2,700
| 2.90625
| 3
|
[] |
no_license
|
#!/usr/local/bin/python3
from bs4 import BeautifulSoup
from termcolor import colored
import requests
import re
import json
#import time
#grab user search string
product = input("What product would you like to search for? ")
#product = "tomato sauce"
print(f"Product was: {product}")
#time.sleep(10)
#woolworths needs specific headers - defining them below
woolworthsHeaders = {
'Accept': 'application/json, text/plain, */*',
'Referer': 'https://www.woolworths.co.za/cat?Ntt=tomato%20sauce&Dy=1'
}
#import the data
pnp = requests.get(f"https://www.pnp.co.za/pnpstorefront/pnp/en/search/?text={product}")
woolies = requests.get(f"https://www.woolworths.co.za/server/searchCategory?pageURL=%2Fcat&Ntt={product}&Dy=1", headers=woolworthsHeaders)
#print the HTTP response
#print(f"{woolies.text}")
#Woolworths section
myWoolworthsJSON = json.loads(woolies.text)
myCount=0
myProductData = []
with open(f'./wooliesPrices.json', 'w+') as f:
myProductData = []
for x in myWoolworthsJSON['contents']:
#print(f"{x}")
for y in x['mainContent']:
for z in y['contents']:
for a in z['records']:
myDict = a['attributes']
myDict2 = a['startingPrice']
if "p_displayName" in myDict.keys():
print(f"{myDict['p_displayName']} - Price: {myDict2['p_pl10']}")
content = {
'product': myDict['p_displayName'],
'price': myDict2['p_pl10']
}
myProductData.append(content)
myCount+=1
json.dump(myProductData, f)
#PicknPay section
#load the data into BS4
soup = BeautifulSoup(pnp.text, 'html.parser')
soup2 = BeautifulSoup(woolies.text, 'html.parser')
#print(soup2)
#exit()
# get data simply by looking for each h4 header (which contains the username)
data = []
for div in soup.find_all('div', { 'class': 'item-name'}):
data.append(div.text)
# get the comments of each user by looking for a div with class bbWrapper
data2 = []
for div in soup.find_all('div', { 'class': 'currentPrice' }):
#remove all data with <div> or </div> or </span> or with spaces
div = re.sub(r'(^<div.*>|</div.*>|</span>|\s)', '', str(div))
#replace <span> with a . (pnps website has a span element separating rands and cents!)
div = re.sub(r'<span>', '.', str(div))
data2.append(div)
myCount=0
with open(f'./pnpPrices.json', 'w+') as f:
myProductData = []
#myProductData['picknpay'] = []
for product in data:
#print(colored(f"Product: {product}", 'red'))
#print(colored(f"Price: {data2[myCount]}", 'green'))
content = {
'product': product,
'price': data2[myCount]
}
print(content)
#myData = str(content).","
#print(myData)
myProductData.append(content)
myCount+=1
#json.dump(content, f)
json.dump(myProductData, f)
| true
|
5b37c743696ad032c6beb3bf653f28d0fd88dfd1
|
Python
|
pritesh-ugrankar/edxpy
|
/varb.py
|
UTF-8
| 224
| 3.640625
| 4
|
[] |
no_license
|
varA = 10
varB = 'moretext'
if type(varA) == str or type(varB) == str :
print("type varA: strings involved")
if varA > varB:
print("bigger")
if varA == varB:
print("equal")
if varA < varB:
print("smaller")
| true
|
a0d22f91973e9167344ca3c53453d8f35758fc85
|
Python
|
chenchienlin/Algorithmic-Toolbox
|
/interesting_topics/puzzle_solver.py
|
UTF-8
| 1,367
| 2.953125
| 3
|
[] |
no_license
|
from collections import deque
from interesting_topics.puzzle_solver_util import *
import logging
logging.basicConfig(level=logging.DEBUG)
LOGGER = logging.getLogger()
def BFSSolver(initial, goal):
BLANK = 16
Q = deque()
Q.append(initial)
prev = dict()
prev[list_to_str(initial)] = None
state = None
while True:
state = Q.popleft()
if state == goal:
# return prev
break
successors = compute_successors(state, initial=initial)
for suc in successors:
if list_to_str(suc) not in prev:
prev[list_to_str(suc)] = state
Q.append(suc)
curr = state
record = []
while curr:
record.append(curr)
curr = prev[list_to_str(curr)]
moves = []
curr = record.pop()
while curr != goal:
print_puzzle(curr)
next = record.pop()
motion = find_swap(curr, next)
moves.append(motion)
LOGGER.debug(f'Swap {curr[motion[0]]} {curr[motion[1]]}\n')
curr = next
print_puzzle(curr)
return curr, moves
if __name__ == '__main__':
max_degree = 5
initial = generate_puzzle(max_degree)
goal = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]
_, moves = BFSSolver(initial, goal)
from interesting_topics.puzzle_solver_pygame import main
main(initial, moves)
| true
|
45b75100835cf569c58e9108f614b92a7030043d
|
Python
|
Darkkyelfo/Replicacao-pixel-clustering
|
/Replicacao-pixel-clustering/execucoes.py
|
UTF-8
| 1,588
| 2.515625
| 3
|
[] |
no_license
|
'''
Created on 30 de dez de 2017
@author: raul1
'''
from classificadores import classicarKNN
from imagemparabase import imgsParaBase
from dividirbase import Holdout
from pixelcluster import IntensityPatches
from numba import jit
def executarIntensity(base,qtCla=15,hold=10,knn=1):
#Bases
if(base=="georgia"):
baseAtual = imgsParaBase("Bases/%s"%base,qtClasses=qtCla,dirClasse = "s",tipoArq = "jpg")
else:
baseAtual = imgsParaBase("Bases/%s"%base,qtClasses=qtCla)
rodarInt(base,baseAtual,hold,knn)
def gerarBases(baseAtual,hold):
bases = []
for k in range(hold):
bTesteOri, bTreinoOri = Holdout.dividirImg(baseAtual)
bases.append([bTesteOri,bTreinoOri])
return bases
def criarCSV(nome,valores):
caminhoArq = "Resultados/%s"%nome+".csv"
arqSave = open(caminhoArq,"a")
arqSave.write(valores)
arqSave.close()
@jit
def rodarInt(nomeBase,baseAtual,hold,k):
basesHold = gerarBases(baseAtual,hold+1)
bTreinoOri = basesHold[0][0]
for j in range(1,12):
pCluster = IntensityPatches()
pCluster.fit(bTreinoOri, 2**j)
erro = 0
for h in basesHold[1:]:
bTeste = h[0]
bTreino = h[1]
bTreino = pCluster.run(bTreino)
bTeste = pCluster.run(bTeste)
erro = classicarKNN(bTreino.atributos, bTreino.classes, bTeste.atributos, bTeste.classes,k) + erro
criarCSV("IntensityPatches_%s" % nomeBase, "%s,%s\n" % (2**j,1 - (erro / hold)))
print("IntensityPatches_%s" % nomeBase, "%s,%s\n" % (2**j,1 - (erro / hold)))
| true
|
0bf1bbabd140742158d59ef01aacc79281ae5baa
|
Python
|
pit-ray/Anime-Semantic-Segmentation-GAN
|
/datasets.py
|
UTF-8
| 2,268
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
# coding: utf-8
import os
from glob import glob
import joblib
import numpy as np
from chainer.datasets.tuple_dataset import TupleDataset
from PIL import Image
from functions import label2onehot
def gamma_correction(img, gamma=2.5):
return img ** (1 / gamma)
def get_dataset(opt):
files = glob(opt.dataset_dir + '/*.png')
os.makedirs('dump', exist_ok=True)
dump_file = 'dump/datasets_with_label.joblib'
if os.path.exists(dump_file):
with open(dump_file, 'rb') as f:
x, t = joblib.load(f)
return TupleDataset(x, t)
x, t = [], []
for filename in files:
if not os.path.exists(filename):
continue
img_array = np.array(Image.open(filename), dtype='float16')
img_array = img_array.transpose((2, 0, 1)) / 255
x_array = img_array[:3, :, :256]
t_array = img_array[:3, :, 256:]
# convert to onehot
t_array = label2onehot(t_array, threshold=0.4, dtype='float16')
x.append(x_array)
t.append(t_array)
# Data-Augmentation
if opt.augment_data:
# mirroring
x.append(x_array[:, :, ::-1])
t.append(t_array[:, :, ::-1])
# gamma-correction
x.append(gamma_correction(x_array, gamma=2.5))
t.append(t_array)
# mirroring and gamma correction
x.append(gamma_correction(x_array[:, :, ::-1], gamma=2.5))
t.append(t_array[:, :, ::-1])
with open(dump_file, 'wb') as f:
joblib.dump((x, t), f, compress=3)
return TupleDataset(x, t)
def get_unlabel_dataset(opt):
files = glob(opt.unlabel_dataset_dir + '/*.png')
os.makedirs('dump', exist_ok=True)
dump_file = 'dump/datasets_without_label.joblib'
if os.path.exists(dump_file):
with open(dump_file, 'rb') as f:
x = joblib.load(f)
return x
x = []
for filename in files:
if not os.path.exists(filename):
continue
x_array = np.array(Image.open(filename), dtype='float16')
x_array = x_array.transpose((2, 0, 1)) / 255
x_array = x_array[:3]
x.append(x_array)
with open(dump_file, 'wb') as f:
joblib.dump(x, f, compress=3)
return x
| true
|
2a9ac33e7fee4825cded5e2665a56f09b132a138
|
Python
|
harrybiddle/ynab
|
/ynab/bank.py
|
UTF-8
| 1,509
| 3.40625
| 3
|
[] |
no_license
|
import uuid
class ObjectWithSecrets:
def __init__(self, secrets, *args, **kwargs):
self._secrets = secrets or dict()
@classmethod
def from_config(cls, config, keyring):
secrets_keys = config.pop("secrets_keys", {})
secrets = keyring.get_secrets(secrets_keys)
return cls(config, secrets)
def validate_secrets(self, *expected):
""" Raises an AssertionError if the list of secret names (given as
function arguments) doesn't match exactly the secrets we have """
given = self._secrets.keys()
error_msg = (
"Given secrets differ from expected. Received\n {}\nbut " "expected\n {}"
).format(str(given), str(expected))
assert set(expected) == set(given), error_msg
def secret(self, name):
""" Return the value of the given secret, e.g.
>> self.secret('password')
'pass123'
Raises:
KeyError if the secret doesn't exist
"""
return self._secrets[name]
class Bank(ObjectWithSecrets):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._uuid = str(uuid.uuid4())
def __hash__(self):
return hash(self.uuid())
def __eq__(self, other):
return self.uuid() == other.uuid()
def __ne__(self, other):
return not (self == other)
def uuid(self):
""" Returns the unique UUID for this object, as a string
"""
return self._uuid
| true
|
805c06e084250af2a071cf0f55f97abffb95bfe3
|
Python
|
defoe-code/defoe
|
/defoe/es/queries/geoparser_pages.py
|
UTF-8
| 3,952
| 2.75
| 3
|
[
"MIT",
"CC0-1.0",
"CC-BY-4.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain"
] |
permissive
|
"""
It uses ES stored data.
Identify the locations per page and geo-resolve them.
It uses the Original Edinburgh geoparser pipeline for identifying all the posible locations within a page and georesolve them.
"""
from operator import add
from defoe import query_utils
from defoe.hdfs.query_utils import blank_as_null
from pyspark.sql import SQLContext
from pyspark.sql.functions import col, when
import yaml, os
def do_query(df, config_file=None, logger=None, context=None):
"""
It ingests NLS pages, applies the original geoparser for identifying the possible locations of each page.
And also for getting the latituted and longitude of each location.
Before applying the geoparser, two clean steps are applied - long-S and hyphen words.
A config_file should be indicated to specify the gazetteer to use,
the defoe_path, the bounding box (optional), as well as the operating system.
Example:
- 1842:
- archive: /home/rosa_filgueira_vicente/datasets/sg_simple_sample/97437554
- edition: 1842, Volume 1
- georesolution_page:
- Annan-rb17:
- in-cc: ''
- lat: '54.98656134974328'
- long: '-3.259540348679'
- pop: ''
- snippet: is 8 miles north-west of Annan , and commands a fine
- type: ppl
- Annan-rb18:
- in-cc: ''
- lat: '54.98656134974328'
- long: '-3.259540348679'
- pop: ''
- snippet: valley is washed by the Annan , and lies open from
- type: ppl
....
- lang_model: geoparser_original
- page_filename: alto/97440572.34.xml
- text_unit id: Page252
- title: topographical, statistical, and historical gazetteer of Scotland
:param archives: RDD of defoe.nls.archive.Archive
:type archives: pyspark.rdd.PipelinedRDD
:param config_file: query configuration file
:type config_file: str or unicode
:param logger: logger (unused)
:type logger: py4j.java_gateway.JavaObject
:return: "0"
:rtype: string
"""
with open(config_file, "r") as f:
config = yaml.load(f)
gazetteer = config["gazetteer"]
if "bounding_box" in config:
bounding_box = " -lb " + config["bounding_box"] + " 2"
else:
bounding_box = ""
if "os_type" in config:
if config["os_type"] == "linux":
os_type = "sys-i386-64"
else:
os_type= "sys-i386-snow-leopard"
else:
os_type = "sys-i386-64"
if "defoe_path" in config :
defoe_path = config["defoe_path"]
else:
defoe_path = "./"
fdf = df.withColumn("source_text_clean", blank_as_null("source_text_clean"))
newdf=fdf.filter(fdf.source_text_clean.isNotNull()).filter(fdf["model"]=="nls").filter(df["year"]=="1828").select(fdf.year, fdf.title, fdf.edition, fdf.archive_filename, fdf.source_text_filename, fdf.text_unit_id, fdf.source_text_clean)
pages=newdf.rdd.map(tuple)
geo_xml_pages = pages.flatMap(
lambda clean_page: [(clean_page[0], clean_page[1], clean_page[2],\
clean_page[3], clean_page[4], clean_page[5], query_utils.geoparser_cmd(clean_page[6], defoe_path, os_type, gazetteer, bounding_box))])
matching_pages = geo_xml_pages.map(
lambda geo_page:
(geo_page[0],
{"title": geo_page[1],
"edition": geo_page[2],
"archive": geo_page[3],
"page_filename": geo_page[4],
"text_unit id": geo_page[5],
"lang_model": "geoparser_original",
"georesolution_page": query_utils.geoparser_coord_xml(geo_page[6])}))
result = matching_pages \
.groupByKey() \
.map(lambda date_context:
(date_context[0], list(date_context[1]))) \
.collect()
return result
| true
|
3f9fce8c94dad4542b654b276dcd554323b3c444
|
Python
|
OverHall27/Gasyori100knock
|
/Question_61_70/myanswers/myans66.py
|
UTF-8
| 1,528
| 2.953125
| 3
|
[
"MIT"
] |
permissive
|
import cv2
import numpy as np
def BGRtoGRAY(img):
gray = 0.2126 * img[..., 2] + 0.7152 * img[..., 1] + 0.0722 * img[..., 0]
return gray
def HOG(gray):
def GetGradXY(gray):
Ver, Hor = gray.shape
gray = np.pad(gray, (1, 1), 'edge')
gx = gray[1:Ver+1, 2:] - gray[1:Ver+1, :Hor]
#gy = gray[:Ver, 1:Hor+1] - gray[2:, 1:Hor+1]
gy = gray[2:, 1:Hor+1] - gray[:Ver, 1:Hor+1]
# keep from zero-dividing
gx[gx == 0] = 1e-6
return gx, gy
def GetMagAngle(gx, gy):
mag = np.sqrt(gx ** 2 + gy ** 2)
ang = np.arctan(gy / gx)
# arctanは返り値(-2/pi, 2/pi)なので,負の値はpi回転
ang[ang < 0] += np.pi
return mag, ang
def QuantizationAngle(ang):
# angはradianなので,[0, 180]のindex化を調節
quantized_ang = np.zeros_like(ang, dtype=np.int)
for i in range(9):
low = (np.pi * i) / 9
high = (np.pi * (i + 1)) / 9
quantized_ang[np.where((ang >= low) & (ang < high))] = i + 1
return quantized_ang
gx, gy = GetGradXY(gray)
mag, ang = GetMagAngle(gx, gy)
ang = QuantizationAngle(ang)
return mag, ang
img = cv2.imread("../imori.jpg").astype(np.float)
gray = BGRtoGRAY(img)
mag, ang = HOG(gray)
mag = mag.astype(np.uint8)
ang = ang.astype(np.uint8)
cv2.imwrite("myans_66mag.jpg", mag)
cv2.imwrite("myans_66ang.jpg", ang)
cv2.imshow("mag", mag)
cv2.imshow("ang", ang)
cv2.waitKey(0)
cv2.destroyAllWindows()
| true
|
2c9d360753f76eec3f61d53de3f487da0305e8fb
|
Python
|
varanasisrikar/Programs
|
/Python/Random Data Plot.py
|
UTF-8
| 193
| 2.578125
| 3
|
[] |
no_license
|
import numpy as np
import pylab as pl
Y = Data = np.random.normal(5.8, 5.4, 1000)
print(Data)
X = np.arange(1, 10)
pl.plot(Data, "ro")
pl.show()
pl.plot(Data)
pl.show()
pl.hist(Data)
pl.show()
| true
|
bb58dafcd18adc3207a90239b44294563961da29
|
Python
|
yeqown/playground
|
/pythonic/analyze_image_exif.py
|
UTF-8
| 2,329
| 3.328125
| 3
|
[] |
no_license
|
"""
This is a snippet of code that will analyze the EXIF data of an image.
It will print out the EXIF data in a human readable format.
@File: analyze_image_exif.py
@Author: yeqown
"""
import exifread
from geopy.geocoders import Nominatim
from typing import Dict, Any
geoconverter = Nominatim(user_agent="analyze_image_exif.py")
class ImageExif(object):
"""
define a class to store the exif data of an image
"""
filename = None
datetime = None
latitude = None # 纬度
longitude = None # 经度
def __init__(self, filename:str, tags:Dict[str, Any]):
if tags is None:
return
self.filename = filename
# print(tags)
if "Image DateTime" in tags:
self.datetime = tags["Image DateTime"]
if "GPS GPSLatitude" in tags:
self.latitude = ImageExif.format_gps(tags["GPS GPSLatitude"])
if "GPS GPSLongitude" in tags:
self.longitude = ImageExif.format_gps(tags["GPS GPSLongitude"])
def __str__(self):
location = "Unknown"
if self.latitude is not None and self.longitude is not None:
query = "{},{}".format(self.latitude, self.longitude)
location = geoconverter.reverse(query=query)
return " datetime: %s\n latitude: %s\nlongitude: %s\n location: %s" % (self.datetime, self.latitude, self.longitude, location)
@staticmethod
def format_gps(data):
list_tmp=str(data).replace('[', '').replace(']', '').split(',')
list=[ele.strip() for ele in list_tmp]
data_sec = int(list[-1].split('/')[0]) /(int(list[-1].split('/')[1])*3600)# 秒的值
data_minute = int(list[1])/60
data_degree = int(list[0])
result=data_degree + data_minute + data_sec
return result
def analyze(imageFile: str) -> ImageExif:
with open(imageFile, "rb") as f:
tags:Dict[str, Any] = exifread.process_file(f)
if len(tags) == 0:
return None
return ImageExif(imageFile, tags)
# analyze ended
def main():
file = "/Users/yeqown/Downloads/IMG_1779.png"
exif = analyze(file)
print("image(%s) analyzing finished" % file)
if exif is None:
print("no exif data found")
return
print(exif)
if __name__ == "__main__":
main()
| true
|
f42a73112e38a0bdc6ed0d7cc374c4aca0e74ab2
|
Python
|
fchamicapereira/projecteuler
|
/17.py
|
UTF-8
| 1,721
| 3.609375
| 4
|
[] |
no_license
|
numbers1 = ['','one','two','three','four','five','six','seven','eight','nine','ten','eleven','twelve','thirteen','fourteen','fifteen','sixteen','seventeen','eighteen','nineteen']
numbers2 = ['','','twenty','thirty','forty','fifty','sixty','seventy','eighty','ninety']
numbers3 = ['','hundred','thousand','million','billion']
def orderOne(x):
return numbers1[x]
def orderTwo(x):
if x < 20:
return numbers1[x]
secondDigit = x % 10
firstDigit = (x - secondDigit) / 10
if x % 10 == 0:
return numbers2[firstDigit]
return numbers2[firstDigit] + "-" + numbers1[secondDigit]
def orderThree(x):
digit = int(x/100)
otherDigits = x - digit*100
if otherDigits == 0:
return numbers1[digit] + " " + numbers3[1]
return numbers1[digit] + " " + numbers3[1] + " and " + orderTwo(otherDigits)
def orderFourToSix(x):
firstThreeDigits = int(x / 1000)
otherDigits = x - firstThreeDigits * 1000
if otherDigits == 0:
return text( firstThreeDigits ) + " " + numbers3[2]
if int(otherDigits / 100) != 0:
return text( firstThreeDigits ) + " " + numbers3[2] + ", " + text (otherDigits)
else:
return text( firstThreeDigits ) + " " + numbers3[2] + " and " + text (otherDigits)
def text(x):
nDigits = len(str(x))
if nDigits == 1:
return orderOne(x)
if nDigits == 2:
return orderTwo(x)
if nDigits == 3:
return orderThree(x)
if nDigits > 3 and nDigits < 7:
return orderFourToSix(x)
def count_letters(word):
BAD_LETTERS = " ,-"
return len([letter for letter in word if letter not in BAD_LETTERS])
sum = 0
for x in range(1,1001):
sum += count_letters(text(x))
print sum
| true
|
eef0e8f5ba33e8e89f2d5ac2cfca80620c0ee726
|
Python
|
Taeheon-Lee/Programmers
|
/level1/gcd_and_lcm.py
|
UTF-8
| 730
| 4.03125
| 4
|
[] |
no_license
|
"최대공약수와 최소공배수"
# 문제 링크 "https://programmers.co.kr/learn/courses/30/lessons/12940"
def solution(n, m):
n1, n2 = max(n, m), min(n, m) # 두 수를 비교하여 큰 값, 작은 값을 변수로 대입
i = 1
while i > 0: # 유클리드 호제법 이용 (Euclidean algorithm)
i = n1 % n2 # 큰 수에서 작은 수를 나눈 뒤, 다시 작은 수를 나머지로 나누는 작업을 반복하여 나머지가 0이 될 때,
n1, n2 = n2, i # 그 수를 나누었던 수가 최대 공약수
return [n1, int(n*m/n1)] # 최소 공배수는 두 수의 곱을 두 수의 최대공약수로 나눈 수가 최소 공배수
| true
|
d67c7aef77e0899b5c7867606a0ac2b8f475b3bc
|
Python
|
toe4626/picryptoclock
|
/clock.py
|
UTF-8
| 3,089
| 3.09375
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
import os, time, pygame
# Initialize PyGame...
os.putenv('SDL_VIDEODRIVER', 'fbcon') # works for 320x240 Adafruit PiTFT
os.putenv('SDL_FBDEV', '/dev/fb1') # which is treated as a framebuffer
pygame.init()
pygame.mouse.set_visible(False)
screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)
pygame.font.init()
# Miscellaneous setup for clock...
font_hhmm = pygame.font.Font(None, 180)
font_ss = pygame.font.Font(None, 100)
font_ss2 = pygame.font.Font(None, 58)
black = (0, 0, 0) # RGB tuples
green = (0, 255, 0)
white = (255, 255, 255)
last_second_shown = 0 # seconds past the epoc (midnight 1970-01-01 UTC)
# Loop to display updated clock each second...
now = time.time()
last_minute = int(now)
#READ INFO FROM FILE CREATED BY CRON JOB
file = open("/home/pi/picryptoclock/btcprice.txt","r")
linedata = file.readline()
linedata = linedata.split("|")
price = linedata[0]
mktcap = linedata[1]
file.close()
ss = "BTC: $" + str(price)
mc = "Total Market Cap: $" + str(mktcap) + "b"
while True:
now = time.time()
current_second = int(now)
if current_second != last_second_shown: # if a new/different second...
last_second_shown = current_second
screen.fill(black) # erase everything
if (current_second >= last_minute + 60):
last_minute = current_second
file = open("/home/pi/picryptoclock/btcprice.txt","r")
linedata = file.readline()
linedata = linedata.split("|")
price = linedata[0]
mktcap = linedata[1]
file.close()
ss = "BTC: $" + str(price)
mc = "Total Market Cap: $" + str(mktcap) + "b"
#IF YOU DON'T WANT YOUR SCREEN FLIPPED THEN YOU WILL NEED TO
#REMOVE pygame.transform.rotate(ss_surface, 180) AND JUST PASS IN ss_surface INSTEAD
#YOU WILL ALSO NEED TO PLAY AROUND WITH xy and xy2
ss_surface = font_ss.render(ss, True, white, black)
#xy2 = xy[0] + hhmm_surface.get_width(), xy[1] + 50 # follows HH:MM
xy3 = (40, 100) # where to draw
screen.blit(pygame.transform.rotate(ss_surface, 180), xy3)
ss_surface = font_ss2.render(mc, True, white, black)
#xy2 = xy[0] + hhmm_surface.get_width(), xy[1] + 50 # follows HH:MM
xy2 = (10, 30) # where to draw
screen.blit(pygame.transform.rotate(ss_surface, 180), xy2)
tm = time.localtime(now)
# draw HH:MM on screen
hhmm = '{:02}:{:02}:{:02}'.format(tm.tm_hour, tm.tm_min, tm.tm_sec)
hhmm_surface = font_hhmm.render(hhmm, True, white, black)
xy = (5, 190) # where to draw
screen.blit(pygame.transform.rotate(hhmm_surface, 180), xy)
#screen.blit(hhmm_surface, xy)
# draw :SS on screen (following HH:MM, slightly smaller)
#screen.blit(ss_surface, xy2)
pygame.display.update() # and have PyGame display the updated screen
pygame.time.wait(200) # Wait 1/5th of a second before checking again
| true
|
2e9bb0a22844ccd2f38ca4698d6494e3f1933752
|
Python
|
AdamZhouSE/pythonHomework
|
/Code/CodeRecords/2900/60624/316453.py
|
UTF-8
| 164
| 3.109375
| 3
|
[] |
no_license
|
def func13():
s = input().strip()
ans = len(s)
for i in s:
if i==" " or i=="\n":
ans -= 1
print(ans,end="")
return
func13()
| true
|
9ce494c9e4be8da9187cf7a35d8ca60c289f52ff
|
Python
|
Shreeyak/cleargrasp
|
/z-ignore-scripts-helper/move_images_syn_dataset.py
|
UTF-8
| 5,001
| 2.6875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env python3
import argparse
import concurrent.futures
import fnmatch
import glob
import itertools
import json
import multiprocessing as mp
import os
import shutil
import time
from pathlib import Path
import sys
from termcolor import colored
# The various subfolders into which the synthetic data is to be organized into.
# These folders will be created and the files with given postfixes will be moved into them.
SUBFOLDER_MAP_SYNTHETIC = {
'rgb-files': {
'postfix': '-rgb.jpg',
'folder-name': 'rgb-imgs'
},
'depth-files': {
'postfix': '-depth.exr',
'folder-name': 'depth-imgs'
},
'json-files': {
'postfix': '-masks.json',
'folder-name': 'json-files'
},
'world-normals': {
'postfix': '-normals.exr',
'folder-name': 'world-normals'
},
'variant-masks': {
'postfix': '-variantMasks.exr',
'folder-name': 'variant-masks'
},
'component-masks': {
'postfix': '-componentMasks.exr',
'folder-name': 'component-masks'
},
'camera-normals': {
'postfix': '-cameraNormals.exr',
'folder-name': 'camera-normals'
},
'camera-normals-rgb': {
'postfix': '-cameraNormals.png',
'folder-name': 'camera-normals/rgb-visualizations'
},
'outlines': {
'postfix': '-outlineSegmentation.png',
'folder-name': 'outlines'
},
'outlines-rgb': {
'postfix': '-outlineSegmentationRgb.png',
'folder-name': 'outlines/rgb-visualizations'
},
'depth-files-rectified': {
'postfix': '-depth-rectified.exr',
'folder-name': 'depth-imgs-rectified'
},
'segmentation-masks': {
'postfix': '-segmentation-mask.png',
'folder-name': 'segmentation-masks'
}
}
def move_to_subfolders(dest_dir, source_dir, index, dest_index=None):
'''Move each file type to it's own subfolder.
It will create a folder for each file type. The file type is determined from it's postfix.
The file types and their corresponding directory are defined in the SUBFOLDER_MAP dict
Args:
dest_dir (str): Path to new dataset.
source_dir (str): Path to old dataset from which to move.
index (int): The prefix num which is to be moved.
Returns:
None
'''
count_moved = 0
for filetype in SUBFOLDER_MAP_SYNTHETIC:
file_postfix = SUBFOLDER_MAP_SYNTHETIC[filetype]['postfix']
subfolder = SUBFOLDER_MAP_SYNTHETIC[filetype]['folder-name']
filename = os.path.join(source_dir, subfolder, '{:09d}'.format(index) + file_postfix)
if os.path.isfile(filename):
if dest_index is None:
dest_index = index
shutil.move(filename, os.path.join(dest_dir, subfolder, '{:09d}'.format(dest_index)+file_postfix))
count_moved += 1
print("\tMoved {} to {}".format(index, dest_index))
if count_moved > 1:
count_moved = 1
return count_moved
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Rearrange non-contiguous numbered images in a dataset, move to separate folders and process.')
parser.add_argument('-s', required=True, help='Path to source dataset', metavar='path/to/dataset')
parser.add_argument('-d', required=True, help='Path to dest dataset', metavar='path/to/dataset')
parser.add_argument('-n', default=0, type=int, help='Numbering in dest will strt from this')
args = parser.parse_args()
if not os.path.isdir(args.s):
print(colored('ERROR: Did not find {}. Please pass correct path to dataset'.format(args.s), 'red'))
exit()
if not os.path.isdir(args.d):
print(colored('ERROR: Did not find {}. Please pass correct path to dataset'.format(args.d), 'red'))
exit()
source_dir = args.s
dest_dir = args.d
for filetype in SUBFOLDER_MAP_SYNTHETIC:
subfolder_path = os.path.join(dest_dir, SUBFOLDER_MAP_SYNTHETIC[filetype]['folder-name'])
if not os.path.isdir(subfolder_path):
os.makedirs(subfolder_path)
print("\tCreated dir:", subfolder_path)
data = 0
while True:
index = input("Enter range or prefix of files to move: ")
if index == 'q':
print('Quitting.')
exit()
elif index == 'c':
start_num = int(input("Enter starting prefix: "))
end_num = int(input("Enter end prefix: "))
for ii in range(start_num, end_num):
move_to_subfolders(dest_dir, source_dir, ii)
elif index == 'a':
start_num = int(input("Enter starting prefix: "))
end_num = int(input("Enter end prefix: "))
count_moved = 0
for ii in range(start_num, end_num):
count_moved += move_to_subfolders(dest_dir, source_dir, ii, ii + args.n)
else:
index = int(index)
move_to_subfolders(dest_dir, source_dir, index)
| true
|
fb659966a7e17500bcdf3d5ae1410221416b7cb2
|
Python
|
SaahilClaypool/CS534_AI
|
/1_Assignment/1_part/HeavyHill.py
|
UTF-8
| 7,053
| 3.28125
| 3
|
[] |
no_license
|
import random
import heapq
from queue import PriorityQueue
import time
from typing import Sequence, Mapping
from copy import deepcopy
import argparse
class Board:
"""
array of
"""
def __init__(self, board: [], prev_cost: int, added_cost = 0, rand=False):
self.size = len(board)
self.prev_cost = prev_cost
self.added_cost = added_cost
if(rand):
self.board = Board.generate(self.size)
else:
self.board = list(board)
def __str__(self):
st = "[" + ", ".join(map(str, self.board)) + "]"
st += "\n"
st += "moved peice cost: " + str(self.prev_cost)
st += "\n"
st += "total cost: " + str(self.cost())
st += "\n"
st += "number of attacking queens: " + str(self.attacking_pairs())
st += "\n"
return st
def __repr__(self):
return self.__str__()
def __eq__(self, value):
return self.board == value.board
def __hash__(self):
tens = 1
s = 0
for i in self.board:
s += 0 * tens
tens *= 10
return s
def __ne__(self, other):
return not(self == other)
def __lt__(self, other):
return self.cost() < other.cost()
@staticmethod
def generate(size: int):
"""
return random array
"""
ar = []
for i in range(size):
ar.append(random.randint(0, size - 1))
return ar
def attacking_pairs(self):
cnt = 0
for c1 in range(len(self.board)):
# calc horizontal
for c2 in range(len(self.board)):
if (c1 == c2):
continue
elif (self.board[c1] == self.board[c2]):
cnt += 1
elif (abs(self.board[c1] - self.board[c2]) == abs(c1 - c2)) : #if difference in column = difference in rows then on diag
cnt += 1
if (cnt == 0):
return 0
else:
return cnt / 2
def calculate_heuristic(self):
cnt = self.attacking_pairs()
if (cnt == 0):
return 0
else:
return 10 + cnt
def cost(self) -> int:
return self.prev_cost + self.calculate_heuristic()
# return self.calculate_heuristic()
def calc_next(self) -> Sequence['Board']:
moves = []
for c in range(self.size):
for r in range(self.size):
if (r == self.board[c]): # didn't move
continue
new_board_ar : Sequence[int] = deepcopy(self.board)
new_board_ar[c] = r
added_cost = (self.board[c] - r) ** 2 + 10
moves.append(Board(new_board_ar, added_cost + self.prev_cost, added_cost))
return moves
def a_star(self):
explored = [] # already explored
todo : PriorityQueue = PriorityQueue()
todo.put(self)
camefrom : Mapping[Board, Board] = {} # best previous for given state
prevcost : Mapping[Board, int] = {} # cost to get to given state
hcost: Mapping[Board, int] = {} # cost to get to given state
prevcost[self] = 0 # start with zero
hcost[self] = self.calculate_heuristic()
while (todo):
# cur = heapq.heappop(todo)
cur = todo.get()
if (cur.calculate_heuristic() == 0):
return cur, camefrom, len(explored)
explored.append(cur)
neighbors = cur.calc_next()
for n in neighbors:
if (n in explored):
continue
if (n not in todo.queue):
todo.put(n)
cost_there = prevcost[cur] + n.added_cost
if (n in prevcost.keys() and \
cost_there >= prevcost[n]):
continue
camefrom[n] = cur
prevcost[n] = cost_there
hcost[n] = prevcost[n] + n.calculate_heuristic()
def climb(self):
start_time = time.time()
best = self
best_score = self.calculate_heuristic()
best_chain = [self]
cur_best = best
cur_best_score = best.calculate_heuristic()
cur_chain = [cur_best]
checked = 0
while(time.time() - start_time < 10 and\
not cur_best.calculate_heuristic() == 0):
checked += 1
prev_best = cur_best
next_moves = best.calc_next()
random.shuffle(next_moves)
next_moves.append(prev_best)
for m in next_moves:
if(m.calculate_heuristic() < cur_best_score or\
m.calculate_heuristic() == cur_best_score and m.prev_cost < cur_best.prev_cost):
cur_best_score = m.calculate_heuristic()
cur_best = m
cur_chain.append(cur_best)
if (m.calculate_heuristic() < best_score or\
m.calculate_heuristic() == best_score and m.prev_cost < best.prev_cost):
best_score = m.calculate_heuristic()
best = m
best_chain = cur_chain
if (cur_best == prev_best):
cur_best = Board(self.board, 0, 0, True)
cur_best_score = cur_best.calculate_heuristic()
cur_chain = []
return best, best_chain, time.time() - start_time, checked
def climb(b: Board):
print(b)
best, chain, t, checked = b.climb()
print("Best Board: ")
print(best)
print("Chain: ")
step = 0
for i in chain:
step += 1
print(step, "-----------------\n")
print(i)
print("Completed in : ", t, "seconds")
print("Checked {} nodes".format(checked))
print("Effective branching factor: " + str(checked ** (1 / step)))
def astar(b):
print(b)
s = time.time()
best, camefrom, expanded = b.a_star()
print("Steps in reverse:")
print("-------------\n")
cur = best
steps = []
while(cur in camefrom.keys()):
steps.append(cur)
cur = camefrom[cur]
i = 0
steps.append(b)
steps.reverse()
for step in steps:
print("step {}: {}".format(i, step))
i += 1
print("Finished in {}".format(time.time() - s))
print("Expanded {} nodes".format(expanded))
print("Effective branching factor: " + str(expanded ** (1 / len(steps))))
def main():
parser = argparse.ArgumentParser(description='Simulates the Heavy Queens Problem.')
parser.add_argument('N', help='The number of Queens', type=int)
parser.add_argument('algorithm', help='Which algorithm to use: 1 - A*, 2 - Hill Climb', type=int)
args = parser.parse_args()
b = Board([i for i in range(args.N)], 0, 0, True)
if args.algorithm == 1:
astar(b)
elif args.algorithm == 0:
climb(b)
else:
print('Error: algorithm argument must be a 1 or a 0!')
if __name__ == "__main__":
main()
| true
|
5b88fa997fdca34aa3bbd944ae8a7a021aef9d26
|
Python
|
entoad/location-entropy
|
/test_utils.py
|
UTF-8
| 1,040
| 2.96875
| 3
|
[] |
no_license
|
import math
import unittest
from pyspark.sql import SparkSession
from utils import calc_location_entropy
class TestUtils(unittest.TestCase):
def test_location_entropy(self):
spark = SparkSession.builder.master("local[2]").appName("pyspark-sql-test").getOrCreate()
lst = [('test-loc', 1)]
df = spark.createDataFrame(lst, ['location', 'num_visits'])
self.assertEquals(calc_location_entropy(df), 0)
list2 = [('loc1', 1), ('loc2', 1)]
df2 = spark.createDataFrame(list2, ['location', 'num_visits'])
self.assertTrue(self.are_floats_equal(calc_location_entropy(df2), - 2 * 0.5 * math.log(0.5, 2)))
list3 = [('loc1', 2), ('loc2', 2), ('loc3', 2)]
one_third = 1.0 / 3
df3 = spark.createDataFrame(list3, ['location', 'num_visits'])
self.assertTrue(self.are_floats_equal(calc_location_entropy(df3), - 3 * one_third * math.log(one_third, 2)))
@staticmethod
def are_floats_equal(f1, f2, threshold=.0001):
return abs(f1 - f2) <= threshold
| true
|
d56ea7d1000e36720a395fcb81dd038cd531de0b
|
Python
|
jtlai0921/XB1828-
|
/XB1828_Python零基礎最強入門之路-王者歸來_範例檔案New/ch11/ch11_30_1.py
|
UTF-8
| 197
| 2.6875
| 3
|
[] |
no_license
|
# ch11_30_1.py
def printmsg():
print("列印全域變數: ", msg)
msg = "Java" # 嘗試更改全域變數造成錯誤
print("更改後: ", msg)
msg = "Python"
printmsg()
| true
|
bb34451ef6b1da8fb940e27ee4cdd50c254faabb
|
Python
|
mackoo13/pmp
|
/pmp/rules/bloc.py
|
UTF-8
| 668
| 2.578125
| 3
|
[] |
no_license
|
from .weakly_separable import WeaklySeparable
class Bloc(WeaklySeparable):
"""Bloc vote scoring rule."""
def __str__(self):
return "Bloc"
def initialise_weights(self, k, _profile):
self.weights = [1] * k
def find_committee(self, k, profile, random_winning_committee=False):
self.initialise_weights(k, profile)
committee = WeaklySeparable.find_committee(self, k, profile, random_winning_committee)
return committee
def compute_score(self, candidate, k, profile):
self.initialise_weights(k, profile)
score = WeaklySeparable.compute_score(self, candidate, k, profile)
return score
| true
|
647dd92c2c51cf82d1fa728f09427f81bdc1bd4b
|
Python
|
lurenxiao1998/CTFOJ
|
/[De1CTF 2019]SSRF Me/test.py
|
UTF-8
| 258
| 2.578125
| 3
|
[] |
no_license
|
import socket
import urllib
def scan(param):
socket.setdefaulttimeout(10)
# try:
return urllib.urlopen(param).read()[:100]
# except:
# return "Connection Timeout"
if __name__ == "__main__":
print(scan("a.py"))
| true
|
a37d3b54d78c8c3f33af16044ffcaa66bde22ca3
|
Python
|
ssoomin1/CosPro_Test
|
/모의고사3_2.py
|
UTF-8
| 371
| 3
| 3
|
[] |
no_license
|
#신수민
def func_a(arr):
total=0
for i in arr:
total+=i
return total
def solution(total,arr):
result=[]
req_total=func_a(arr)
for i in arr:
if req_total>total:
result.append(total//len(arr))
else:
result.append(i)
return result
total=100
arr=[20,20,30,40]
ret=solution(total,arr)
print(ret)
| true
|
c9bf188095f2a110441922c2b73cd330ce898c39
|
Python
|
ToJohnTo/OTUS_Python_QA_Engineer
|
/PythonQAOtus_Lesson28/Test_REST_API_1/test_1_API_1.py
|
UTF-8
| 433
| 2.765625
| 3
|
[] |
no_license
|
import pytest
import requests
url = "https://dog.ceo/api/"
proxy = {"https": "localhost:8080"}
@pytest.mark.parametrize("number", [2, 3, 4, 5])
def test_count_image(number):
""" Проверка получения фотографий рандомных пород. """
response = requests.get(url + "breeds/image/random/{}".format(number), proxies=proxy, verify=False)
assert len(response.json()["message"]) == number
| true
|
6f40e38df27d95a0b766e93adc844e3b7d9fae22
|
Python
|
pshrest21/Histogram-Equalization
|
/project2.py
|
UTF-8
| 7,033
| 3.203125
| 3
|
[] |
no_license
|
import cv2
from collections import Counter
import matplotlib.pyplot as mplot
import numpy as np
from PIL import Image
#Read the images using openCV
fish = cv2.imread('fish.pgm', 0)
jet = cv2.imread('jet.pgm', 0)
def oneDList(img_array):
#create convert 2D array of images to 1D array
new_img_array = [item for sublist in img_array for item in sublist]
return new_img_array
def img_dict(img_array):
new_img_array = oneDList(img_array)
#make a dictionary where the key is the grayscale and value is it's count
my_dict = dict(Counter(new_img_array))
#sort the dictionary in ascending order and convert it to list of tuples of key and value pairs
my_list = sorted(my_dict.items())
#set the keys to x and values to y
x,y = zip(*my_list)
x = np.array(x)
y = np.array(y)
my_new_dict = dict()
#have key and value pairs for all grayscales (0-255)
for i in range(0, 256):
if(i in x):
my_new_dict[i] = my_dict[i]
else:
my_new_dict[i] = 0
return my_new_dict
#returns a numpy array of cumulative density function's y-axis's values
def cdf(my_list, y):
cdf_list = []
sum = y[0]
cdf_list.append(sum)
for i in range(1, len(y)):
sum = sum + y[i]
cdf_list.append(sum)
cdf_list = np.array(cdf_list)
return cdf_list
#function that takes 2D list of image, upper bound and lower bound, and returns
#a 2D list whose each element is linearly stretched
def contrastEnhance(img_array, l2, l1):
lower = l1 * 255
upper = l2 * 255
slope = 255 / (upper - lower)
b = -1 * slope * lower
for i in range (0, len(img_array)-1):
for j in range(0, len(img_array[i])-1):
if(img_array[i][j] < lower):
img_array[i][j] = 0
elif(img_array[i][j] > upper):
img_array[i][j] = 255
else:
img_array[i][j] = slope * img_array[i][j] + b
img_array = np.clip(img_array,0,255)
return img_array
#level slicing
def levelSlice(img_array, l):
for i in range(0, len(img_array)):
for j in range(0, len(img_array[i])):
if img_array[i][j] >= l and img_array[i][j] <= l+10:
img_array[i][j] = 255
else:
img_array[i][j] = 0
return img_array
#Histogram Equalization
def histEqualize(img_array, cdf_list):
final_dict = dict()
cdf_list = np.rint(cdf_list)
#print(cdf_list)
for i in range(0, len(cdf_list)):
final_dict[i] = int(cdf_list[i])
#print(final_dict)
my_list = np.zeros([512, 512])
#print(img_array)
for i in range(0, len(img_array)):
for j in range(0, len(img_array[i])):
my_list[i][j] = final_dict[img_array[i][j]]
#print(my_list)
return my_list
def displayPmf(x, y, xLabel, yLabel, title):
mplot.bar(x, y)
mplot.xlabel(xLabel)
mplot.ylabel(yLabel)
mplot.title(title)
mplot.show()
def displayCdf(x, y, xLabel, yLabel, title):
mplot.plot(x, y)
mplot.xlabel(xLabel)
mplot.ylabel(yLabel)
mplot.title(title)
mplot.show()
def getImageInfo(img_array):
#getting information from original image
dict_img = img_dict(img_array)
my_list = sorted(dict_img.items())
#set the keys to x and values to y
x,y = zip(*my_list)
x = np.array(x)
y = np.array(y) / 262144
cdf_fish = cdf(x, y)
return x,y,cdf_fish
#----Display original image, it's pmf and cdf---
#for fish.pgm
fish_image = Image.fromarray(fish)
fish_image.convert('L').save('fish.png', optimize = True)
x, y, main_cdf_fish = getImageInfo(fish)
#main_cdf_fish = main_cdf_fish / 262144
displayPmf(x, y, "Gray Level (0-255)", "Total Number of Pixels (0-1)", "Pmf of 'fish.png'")
displayCdf(x, main_cdf_fish, "Gray Level (0-255)", "Cumulative Distribution of Gray Levels", "Cdf of 'fish.png'")
#for jet.pgm
jet_image = Image.fromarray(jet)
jet_image.convert('L').save('jet.png', optimize = True)
x,y,main_cdf_jet = getImageInfo(jet)
#main_cdf_jet = main_cdf_jet / 262144
displayPmf(x, y, "Gray Level (0-255)", "Total Number of Pixels (0-1)", "Pmf of 'jet.png'")
displayCdf(x, main_cdf_jet, "Gray Level (0-255)", "Total Number of Pixels (0-1)", "Cdf of 'jet.png'")
#-------------Contrast Stretching--------------
#for fish
fish_array = contrastEnhance(fish, 0.9, 0.1)
img_fish = Image.fromarray(fish_array)
img_fish.convert('L').save('fish_stretched.png', optimize = True)
#problem here too
x,y,cdf_fish = getImageInfo(fish_array)
displayPmf(x, y, "Gray Level (0-255)", "Total Number of Pixels (0-1)", "Pmf of Contrast Stretched Fish")
#for jet
jet_array = contrastEnhance(jet, 0.9, 0.1)
img_jet = Image.fromarray(jet_array)
img_jet.convert('L').save('jet_stretched.png', optimize = True)
x,y,cdf_jet = getImageInfo(jet_array)
displayPmf(x, y, "Gray Level (0-255)", "Total Number of Pixels (0-1)", "Pmf of Contrast Stretched Jet")
#-------------Level Slicing--------------------
fish = cv2.imread('fish.pgm', 0)
jet = cv2.imread('jet.pgm', 0)
#for fish
img_array_fish = levelSlice(fish, 200)
img_fish = Image.fromarray(img_array_fish)
img_fish.convert('L').save('fish_sliced.png', optimize = True)
x,y,cdf_fish = getImageInfo(img_array_fish)
displayPmf(x, y, "Gray Level (0-255)", "Total Number of Pixels (0-1)", "Pmf of Sliced Image Fish")
#for jet
img_array_jet = levelSlice(jet, 200)
img_jet = Image.fromarray(img_array_jet)
img_jet.convert('L').save('jet_sliced.png', optimize = True)
x,y,cdf_jet= getImageInfo(img_array_jet)
displayPmf(x, y, "Gray Level (0-255)", "Total Number of Pixels (0-1)", "Pmf of Sliced Image Jet")
#-------------Histogram Equalization For Fish-----------
#read the image again because when doing slicing, the original image array gets sliced into either 255 or 0 graylevel
fish = cv2.imread('fish.pgm', 0)
jet = cv2.imread('jet.pgm', 0)
new_fish = histEqualize(fish, main_cdf_fish * 255)
new_x, new_y, new_cdf_fish = getImageInfo(new_fish)
new_img_fish = Image.fromarray(new_fish)
new_img_fish.convert('L').save('fish_equalized.png', optimize = True)
displayPmf(new_x, new_y, "Gray Level (0-255)", "Total Number of Pixels (0-1)", "Histogram of Equalized Image 'fish.png'")
displayCdf(new_x, new_cdf_fish, "Gray Level (0-255)", "Total Number of Pixels (0-1)", "Cdf of Equalized Image 'fish.png'")
#-------------Histogram Equalization For Jet-----------
new_jet = histEqualize(jet, main_cdf_jet * 255)
new_x, new_y, new_cdf_jet = getImageInfo(new_jet)
new_img_jet = Image.fromarray(new_jet)
new_img_jet.convert('L').save('jet_equalized.png', optimize = True)
displayPmf(new_x, new_y, "Gray Level (0-255)", "Total Number of Pixels (0-1)", "Histogram of Equalized Image 'jet.png'")
displayCdf(new_x, new_cdf_jet, "Gray Level (0-255)", "Total Number of Pixels (0-1)", "Cdf of Equalized Image 'jet.png'")
| true
|
ea3f78c45c550a49ebc9dd67cac27fe68396dd87
|
Python
|
LennyPhoenix/Jank-Engine
|
/jank/load_animation_sheet.py
|
UTF-8
| 845
| 2.796875
| 3
|
[
"MIT"
] |
permissive
|
import jank
def load_animation_sheet(image: jank.pyglet.image.AbstractImage, data: dict):
max_length = max(a["length"] for a in data["animations"])
sprite_sheet = jank.pyglet.image.ImageGrid(
image,
len(data["animations"]),
max_length
)
animations = {}
for a in range(len(data["animations"])):
animation_data = data["animations"][a]
frames = []
frame_length = data["animations"][a]["frame_length"]
for i in range(animation_data["length"]):
frames.append(
sprite_sheet[(a, i)]
)
animations[
data["animations"][a]["alias"]
] = jank.pyglet.image.Animation.from_image_sequence(
frames,
frame_length,
loop=data["animations"][a]["loop"]
)
return animations
| true
|
7be59a6ec97d4ce920b307663ecf9d2ae63636ff
|
Python
|
KermaniMojtaba/Social_distancing_demo_COVID_19
|
/ImpactOfSocialDistancing2.py
|
UTF-8
| 1,625
| 3.0625
| 3
|
[] |
no_license
|
city = pd.DataFrame(data={'id': np.arange(POPULATION), 'infected': False, 'recovery_day': None, 'recovered': False})
city = city.set_index('id')
firstCases = city.sample(INITIALLY_AFFECTED, replace=False)
city.loc[firstCases.index, 'infected'] = True
city.loc[firstCases.index, 'recovery_day'] = DAYS_TO_RECOVER
stat_active_cases = [INITIALLY_AFFECTED]
stat_recovered = [0]
for today in range(1, DAYS):
# Mark people who have recovered today
city.loc[city['recovery_day'] == today, 'recovered'] = True
city.loc[city['recovery_day'] == today, 'infected'] = False
# Calcuate the number of people who are infected today
spreadingPeople = city[ (city['infected'] == True)]
totalCasesToday = round(len(spreadingPeople) * SPREAD_FACTOR)
casesToday = city.sample(totalCasesToday, replace=True)
# Ignore people who were already infected in casesToday
casesToday = casesToday[ (casesToday['infected'] == False) & (casesToday['recovered'] == False) ]
# Mark the new cases as infected, and their recovery day
city.loc[casesToday.index, 'infected'] = True
city.loc[casesToday.index, 'recovery_day'] = today + DAYS_TO_RECOVER
stat_active_cases.append(len(city[city['infected'] == True]))
# stat_recovered.append(len(city[city['recovered'] == True]))
# Try and reduce the SPREAD_FACTOR to simulate the effects of different levels of social distancing
# if today >= 5:
# SPREAD_FACTOR = 1
# if today >= 10:
# SPREAD_FACTOR = 0.1
import matplotlib.pyplot as plt
plt.bar(x=np.arange(DAYS), height=stat_active_cases, color="red")
plt.show()
| true
|
fa338a6e91041744ae9316bbb9158c497c9f1be7
|
Python
|
FrancescAlted/IC
|
/invisible_cities/reco/peak_functions.py
|
UTF-8
| 12,035
| 2.546875
| 3
|
[] |
no_license
|
"""Functions to find peaks, S12 selection etc.
JJGC and GML December 2016
"""
import numpy as np
from scipy import signal
from .. core import system_of_units as units
from .. sierpe import blr
from . import peak_functions_c as cpf
from . params import CalibratedSum
from . params import PMaps
def calibrated_pmt_sum(CWF, adc_to_pes, pmt_active = [], n_MAU=200, thr_MAU=5):
"""Compute the ZS calibrated sum of the PMTs
after correcting the baseline with a MAU to suppress low frequency noise.
input:
CWF : Corrected waveform (passed by BLR)
adc_to_pes : a vector with calibration constants
n_MAU : length of the MAU window
thr_MAU : treshold above MAU to select sample
NB: This function is used mainly for testing purposes. It is
programmed "c-style", which is not necesarily optimal in python,
but follows the same logic that the corresponding cython function
(in peak_functions_c), which runs faster and should be used
instead of this one for nornal calculations.
"""
NPMT = CWF.shape[0]
NWF = CWF.shape[1]
MAU = np.array(np.ones(n_MAU), dtype=np.double) * (1 / n_MAU)
pmt_thr = np.zeros((NPMT, NWF), dtype=np.double)
csum = np.zeros( NWF, dtype=np.double)
csum_mau = np.zeros( NWF, dtype=np.double)
MAU_pmt = np.zeros( NWF, dtype=np.double)
MAUL = []
PMT = list(range(NPMT))
if len(pmt_active) > 0:
PMT = pmt_active
for j in PMT:
# MAU for each of the PMTs, following the waveform
MAU_pmt = signal.lfilter(MAU, 1, CWF[j,:])
MAUL.append(MAU_pmt)
csum += CWF[j] * 1 / adc_to_pes[j]
for k in range(NWF):
if CWF[j,k] >= MAU_pmt[k] + thr_MAU: # >= not >. Found testing
pmt_thr[j,k] = CWF[j,k]
csum_mau += pmt_thr[j] * 1 / adc_to_pes[j]
return csum, csum_mau, np.array(MAUL)
def wfzs(wf, threshold=0):
"""Takes a waveform wf and return the values of the wf above
threshold: if the input waveform is of the form [e1,e2,...en],
where ei is the energy of sample i, then then the algorithm
returns a vector [e1,e2...ek], where k <=n and ei > threshold and
a vector of indexes [i1,i2...ik] which label the position of the
zswf of [e1,e2...ek]
For example if the input waveform is:
[1,2,3,5,7,8,9,9,10,9,8,5,7,5,6,4,1] and the trhesold is 5
then the algoritm returns
a vector of amplitudes [7,8,9,9,10,9,8,7,6] and a vector of indexes
[4,5,6,7,8,9,10,12,14]
NB: This function is used mainly for testing purposed. It is
programmed "c-style", which is not necesarily optimal in python,
but follows the same logic that the corresponding cython function
(in peak_functions_c), which runs faster and should be used
instead of this one for nornal calculations.
"""
len_wf = wf.shape[0]
wfzs_e = np.zeros(len_wf, dtype=np.double)
wfzs_i = np.zeros(len_wf, dtype=np.int32)
j=0
for i in range(len_wf):
if wf[i] > threshold:
wfzs_e[j] = wf[i]
wfzs_i[j] = i
j += 1
wfzs_ene = np.zeros(j, dtype=np.double)
wfzs_indx = np.zeros(j, dtype=np.int32)
for i in range(j):
wfzs_ene [i] = wfzs_e[i]
wfzs_indx[i] = wfzs_i[i]
return wfzs_ene, wfzs_indx
def time_from_index(indx):
"""Return the times (in ns) corresponding to the indexes in indx
NB: This function is used mainly for testing purposed. It is
programmed "c-style", which is not necesarily optimal in python,
but follows the same logic that the corresponding cython function
(in peak_functions_c), which runs faster and should be used
instead of this one for nornal calculations.
"""
len_indx = indx.shape[0]
tzs = np.zeros(len_indx, dtype=np.double)
step = 25 #ns
for i in range(len_indx):
tzs[i] = step * float(indx[i])
return tzs
def rebin_waveform(t, e, stride=40):
"""
Rebin a waveform according to stride
The input waveform is a vector such that the index expresses time bin and the
contents expresses energy (e.g, in pes)
The function returns the rebinned T and E vectors
NB: This function is used mainly for testing purposed. It is
programmed "c-style", which is not necesarily optimal
in python, but follows the same logic that the corresponding cython
function (in peak_functions_c), which runs faster and should be used
instead of this one for nornal calculations.
"""
assert len(t) == len(e)
n = len(t) // stride
r = len(t) % stride
lenb = n
if r > 0:
lenb = n+1
T = np.zeros(lenb, dtype=np.double)
E = np.zeros(lenb, dtype=np.double)
j = 0
for i in range(n):
esum = 0
tmean = 0
for k in range(j, j + stride):
esum += e[k]
tmean += t[k]
tmean /= stride
E[i] = esum
T[i] = tmean
j += stride
if r > 0:
esum = 0
tmean = 0
for k in range(j, len(t)):
esum += e[k]
tmean += t[k]
tmean /= (len(t) - j)
E[n] = esum
T[n] = tmean
return T, E
def find_S12(wfzs, index,
tmin = 0, tmax = 1e+6,
lmin = 8, lmax = 1000000,
stride=4, rebin=False, rebin_stride=40):
"""
Find S1/S2 peaks.
input:
wfzs: a vector containining the zero supressed wf
indx: a vector of indexes
returns a dictionary
do not interrupt the peak if next sample comes within stride
accept the peak only if within [lmin, lmax)
accept the peak only if within [tmin, tmax)
returns a dictionary of S12
NB: This function is a wrapper around the cython function. It returns
a dictionary of namedtuples (Waveform(t = [t], E = [E])), where
[t] and [E] are np arrays.
"""
from collections import namedtuple
Waveform = namedtuple('Waveform', 't E')
S12 = cpf.find_S12(wfzs, index,
tmin, tmax,
lmin, lmax,
stride,
rebin, rebin_stride)
return {i: Waveform(t, E) for i, (t,E) in S12.items()}
def find_S12_py(wfzs, index,
tmin = 0, tmax = 1e+6,
lmin = 8, lmax = 1000000,
stride=4, rebin=False, rebin_stride=40):
"""
Find S1/S2 peaks.
input:
wfzs: a vector containining the zero supressed wf
indx: a vector of indexes
returns a dictionary
do not interrupt the peak if next sample comes within stride
accept the peak only if within [lmin, lmax)
accept the peak only if within [tmin, tmax)
returns a dictionary of S12
NB: This function is used mainly for testing purposed. It is programmed
"c-style", which is not necesarily optimal
in python, but follows the same logic that the corresponding cython
function (in peak_functions_c), which runs faster and should be used
instead of this one for nornal calculations.
"""
P = wfzs
T = time_from_index(index)
assert len(wfzs) == len(index)
S12 = {}
S12L = {}
s12 = []
S12[0] = s12
S12[0].append([T[0], P[0]])
j = 0
for i in range(1, len(wfzs)) :
if T[i] > tmax:
break
if T[i] < tmin:
continue
if index[i] - stride > index[i-1]: #new s12
j += 1
s12 = []
S12[j] = s12
S12[j].append([T[i], P[i]])
# re-arrange and rebin
j = 0
for i in S12:
ls = len(S12[i])
if not (lmin <= ls < lmax):
continue
t = np.zeros(ls, dtype=np.double)
e = np.zeros(ls, dtype=np.double)
for k in range(ls):
t[k] = S12[i][k][0]
e[k] = S12[i][k][1]
if rebin == True:
TR, ER = rebin_waveform(t, e, stride = rebin_stride)
S12L[j] = [TR, ER]
else:
S12L[j] = [t, e]
j += 1
return S12L
def sipm_s2_dict(SIPM, S2d, thr=5 * units.pes):
"""Given a vector with SIPMs (energies above threshold), and a
dictionary of S2s, S2d, returns a dictionary of SiPMs-S2. Each
index of the dictionary correspond to one S2 and is a list of np
arrays. Each element of the list is the S2 window in the SiPM (if
not zero)
"""
return {i: sipm_s2(SIPM, S2, thr=thr) for i, S2 in S2d.items()}
def index_from_s2(S2):
"""Return the indexes defining the vector."""
t0 = int(S2[0][0] // units.mus)
return t0, t0 + len(S2[0])
def sipm_s2(dSIPM, S2, thr=5*units.pes):
"""Given a vector with SIPMs (energies above threshold), return a dict
of np arrays, where the key is the sipm with signal.
"""
#import pdb; pdb.set_trace()
i0, i1 = index_from_s2(S2)
SIPML = {}
for ID, sipm in dSIPM.values():
slices = sipm[i0:i1]
psum = np.sum(slices)
if psum > thr:
SIPML[ID] = slices.astype(np.double)
return SIPML
def compute_csum_and_pmaps(pmtrwf, sipmrwf, s1par, s2par, thresholds,
event, calib_vectors, deconv_params):
"""Compute calibrated sum and PMAPS.
:param pmtrwf: PMTs RWF
:param sipmrwf: SiPMs RWF
:param s1par: parameters for S1 search (S12Params namedtuple)
:param s2par: parameters for S2 search (S12Params namedtuple)
:param thresholds: thresholds for searches (ThresholdParams namedtuple)
('ThresholdParams',
'thr_s1 thr_s2 thr_MAU thr_sipm thr_SIPM')
:param pmt_active: a list specifying the active (not dead) pmts
in the event. An empty list implies all active.
:param n_baseline: number of samples taken to compute baseline
:param thr_trigger: threshold to start the BLR process
:param event: event number
:returns: a nametuple of calibrated sum and a namedtuple of PMAPS
"""
s1_params = s1par
s2_params = s2par
thr = thresholds
adc_to_pes = calib_vectors.adc_to_pes
coeff_c = calib_vectors.coeff_c
coeff_blr = calib_vectors.coeff_blr
adc_to_pes_sipm = calib_vectors.adc_to_pes_sipm
pmt_active = calib_vectors.pmt_active
# deconv
CWF = blr.deconv_pmt(pmtrwf[event], coeff_c, coeff_blr,
pmt_active = pmt_active,
n_baseline = deconv_params.n_baseline,
thr_trigger = deconv_params.thr_trigger)
# calibrated sum
csum, csum_mau = cpf.calibrated_pmt_sum(CWF,
adc_to_pes,
pmt_active = pmt_active,
n_MAU = 100,
thr_MAU = thr.thr_MAU)
# zs sum
s2_ene, s2_indx = cpf.wfzs(csum, threshold=thr.thr_s2)
s1_ene, s1_indx = cpf.wfzs(csum_mau, threshold=thr.thr_s1)
# S1 and S2
S1 = cpf.find_S12(s1_ene, s1_indx, **s1_params._asdict())
S2 = cpf.find_S12(s2_ene, s2_indx, **s2_params._asdict())
#S2Si
sipm = cpf.signal_sipm(sipmrwf[event], adc_to_pes_sipm,
thr=thr.thr_sipm, n_MAU=100)
SIPM = cpf.select_sipm(sipm)
S2Si = sipm_s2_dict(SIPM, S2, thr=thr.thr_SIPM)
return (CalibratedSum(csum=csum, csum_mau=csum_mau),
PMaps(S1=S1, S2=S2, S2Si=S2Si))
def select_peaks(peaks,
Emin, Emax,
Lmin, Lmax,
Hmin, Hmax,
Ethr = -1):
is_valid = lambda E: (Lmin <= np.size(E) < Lmax and
Hmin <= np.max (E) < Hmax and
Emin <= np.sum (E) < Emax)
return {peak_no: (t, E) for peak_no, (t, E) in peaks.items() if is_valid(E[E > Ethr])}
def select_Si(peaks,
Nmin, Nmax):
is_valid = lambda sipms: Nmin <= len(sipms) < Nmax
return {peak_no: sipms for peak_no, sipms in peaks.items() if is_valid(sipms)}
| true
|
ebb96ff1b6957656bc946a57a69b451562dfa348
|
Python
|
icaro67621/clases
|
/eliminar_duplicados.py
|
UTF-8
| 1,104
| 3.390625
| 3
|
[] |
no_license
|
import pandas as pd
import numpy as np
import random as rm
lista_valores = [[1,2],[1,2],[5,6],[5,8]]
lista_indices = list('list')
lista_columna = ['valor1','valor2']
print(lista_valores)
print(lista_indices)
print(lista_columna)
dataframe1 = pd.DataFrame(lista_valores,index=lista_indices,columns=lista_columna)
print(dataframe1)
print(dataframe1.drop_duplicates())
dataframe2 = dataframe1.drop_duplicates()
print(dataframe2.drop_duplicates(['valor1']))
print(dataframe2.drop_duplicates(['valor1'],keep='last'))
#############################
### reemplazar datos en series
serie = pd.Series([1,2,3,4,5,6],list('camilo'))
print(serie)
serie=serie.replace(1,9)
print(serie)
serie=serie.replace({2:8,3:7,4:6,5:25,6:4})
print(serie)
#########################
### renombrar indices en Dataframe
print(dataframe1)
nuevos_indices = dataframe1.index.map(str.upper)
dataframe1.index = nuevos_indices
print(dataframe1)
dataframe1 = dataframe1.rename (index=str.lower)
print(dataframe1)
nuevos_indices2 ={'l':'c','i':'a','s':'m','t':'i'}
dataframe1 = dataframe1.rename (index=nuevos_indices2)
print(dataframe1)
| true
|
6cef2bc24af0bd0804da1b53693d6db6b05d5a08
|
Python
|
jreniel/BayDeltaSCHISM
|
/pyschism/station.py
|
UTF-8
| 8,861
| 3.0625
| 3
|
[
"Apache-2.0"
] |
permissive
|
from builtins import open, file, str
from pandas.compat import u
import pandas as pd
import argparse
station_variables = ["elev", "air pressure", "wind_x", "wind_y",
"temp", "salt", "u", "v", "w"]
def read_station_in(fpath):
"""Read a SCHISM station.in file into a pandas DataFrame
.. note::
This only reads the tabular part, and assumes the BayDelta SCHISM format with columns:
index x y z ! id subloc "Name"
Note that there is no header and the delimiter is a space. Also note that the text beginning with !
is extra BayDeltaSCHISM extra metadata, not required for vanilla SCHISM
Parameters
----------
fpath : fname
Path to input station.in style file
Returns
-------
Result : DataFrame
DataFrame with hierarchical index (id,subloc) and columns x,y,z,name
"""
with open(fpath,'r') as f:
request = f.readline()
n_entry = f.readline()
stations = pd.read_csv(f,sep = "\s+",header=None,
names=["index","x","y","z","excl","id","subloc","name"],
usecols=["x","y","z","id","subloc","name"],
index_col=["id","subloc"],na_values="-",keep_default_na=True)
return stations
def write_station_in(fpath,station_in,request=None):
"""Write a SCHISM station.in file given a pandas DataFrame of metadata
Parameters
----------
fpath : fname
Path to output station.in file
station_in : DataFrame
DataFrame that has station id, x, y, z, name and subloc labels (id is the station id, index will be autogenerated)
request : 'all' or list(str)
List of variables to put in output request from the choices 'elev', 'air pressure', 'wind_x', 'wind_y', 'temp', 'salt', 'u', 'v', 'w'
or 'all' to include them all
"""
request_int = [0]*len(station_variables)
if request == "all": request = ["all"]
request_str = station_variables if request[0] == "all" else request
request_int = [(1 if var in request_str else 0) for var in station_variables]
dfmerged =station_in.reset_index()
dfmerged.index += 1
dfmerged["excl"] = "!"
nitem = len(dfmerged)
# First two lines are a space delimited 1 or 0 for each request then the
# total number of station requests
buffer = " ".join([str(x) for x in request_int]) + "\n{}\n".format(nitem)
# Then the specific requests, here written to a string buffer
buffer2 = dfmerged.to_csv(None,columns=["x","y","z","excl","id","subloc","name"],index_label="id",
sep=' ',float_format="%.2f",header=False)
with open(fpath,"w",newline='') as f:
f.write(buffer)
f.write(u(buffer2))
#f.write(u(buffer))
#f.write(u(buffer2))
def read_station_depth(fpath):
"""Read a BayDeltaSCHISM station_depths.csv file into a pandas DataFrame
The BayDelta SCHISM format has a header and uses "," as the delimiter and has these columns:
id,subloc,z
The id is the station id, which is the key that joins this file to the station database. 'subloc' is a label that describes
the sublocation or depth and z is the actual elevation of the instrument
Example might be:
id,subloc,z
12345,top,-0.5
Other columns are allowed, but this will commonly merged with the station database file so we avoid column names like 'name' that might collide
Parameters
----------
fpath : fname
Path to input station.in style file
Returns
-------
Result : DataFrame
DataFrame with hierarchical index (id,subloc) and data column z
"""
df = pd.read_csv(fpath,sep=",",header=0,index_col=["id","subloc"])
df["z"] = -df.depth
return df[["z"]]
def read_station_dbase(fpath):
"""Read a BayDeltaSCHISM station data base csv file into a pandas DataFrame
The BayDelta SCHISM format is open, but expects these columns:
index x y z ! id subloc "Name"
Parameters
----------
fpath : fname
Path to input station.in style file
Returns
-------
Result : DataFrame
DataFrame with hierarchical index (id,subloc) and columns x,y,z,name
"""
return pd.read_csv(fpath,sep=",",header=0,index_col="id")
def merge_station_depth(station_dbase,station_depth,default_z):
"""Merge BayDeltaSCHISM station database with depth file, producing the union of all stations and depths including a default entry for stations with no depth entry
Parameters
----------
station_dbase : DataFrame
This should be the input that has only the station id as an index and includes other metadata like x,y,
station_depth : DataFrame
This should have (id,subloc) as an index
Returns
-------
Result : DataFrame
DataFrame that links the information.
"""
merged = station_dbase.reset_index().merge(station_depth.reset_index(),
left_on="id",right_on="id",
how='left')
merged.fillna({"subloc":"default","z": default_z},inplace=True)
merged.set_index(["id","subloc"],inplace=True)
return merged
def read_obs_links(fpath):
"""Read an obs_links csv file which has comma as delimiter and (id,subloc) as index """
return pd.read_csv(fpath,sep=",",header=0,index_col=["id","subloc"])
def read_station_out(fpath_base,stationinfo,var=None,start=None):
if var is None:
fname = fpath_base
else:
try:
fileno = station_variables.index(var)
except ValueError:
raise ValueError("Variable name {} not on list: {}.format(var,station_variables")
fname = "{}_{:d}".format(fileno)
data = pandas.read_csv(fpath,var,sep="\s+",index_col=0,
header=None,names = stationinfo.index,dtype='d')
if start is not None:
data = elapsed_to_date(data)
return data
def example():
print(read_station_in("example_station.in"))
stations_utm = read_station_dbase("stations_utm.csv")
print(stations_utm)
sdepth = read_station_depth("station_depth.csv")
stations_in = merge_station_depth(stations_utm,sdepth,default_z=-0.5)
#stations_in = pd.merge(stations_utm,sdepth,how='inner',left_index=True,right_index=True)
#print(stations_in)
station_request = ["salt","elev"]
write_station_in("station.in",stations_in,request=station_request)
#stations_in = read_station_in("station.in")
obs_links = read_obs_links("obs_links.csv")
merged = stations_in.merge(obs_links,left_index=True,right_index=True,how="left")
if True:
print("**")
print(obs_links)
print("**")
print(stations_in)
print("**")
print(stations_utm)
print("**")
print(merged)
def convert_db_station_in(outfile="station.in",stationdb="stations_utm.csv",depthdb="station_depth.csv",station_request="all",default=-0.5):
stations_utm = read_station_dbase(stationdb)
sdepth = read_station_depth(depthdb)
stations_in = merge_station_depth(stations_utm,sdepth,default_z=-0.5)
write_station_in(outfile,stations_in,request=station_request)
def create_arg_parser():
""" Create an argument parser
"""
parser = argparse.ArgumentParser(description="Create station.in file from station database (stations_utm.csv) and station depth listing station_depth.csv")
parser.add_argument('--station_db', default = "stations_utm.csv",
help="station database, often stations_utm.csv")
parser.add_argument('--depth_db', default = "station_depth.csv",
help="depth listings for stations (otherwise default depth)")
parser.add_argument('--request', default='all',nargs="+",help="requested variables or 'all' for all of them. Possibilities are: {}".format(",".join(station_variables)))
parser.add_argument('--default_zcor',default='-0.5',
help="depth used when there is no listing for station id")
parser.add_argument('--out', default = "station.in",
help="station.in formatted file")
return parser
def main():
""" A main function to convert polygon files
"""
parser = create_arg_parser()
args = parser.parse_args()
stationdb = args.station_db
depthdb = args.depth_db
default = args.default_zcor
request = args.request
outfile = args.out
print(request)
convert_db_station_in(outfile,stationdb,depthdb,request,default)
if __name__ == '__main__':
#example()
main()
| true
|
821c4c9ce70125a3cf2ae491f4cbad68bd5dbb5b
|
Python
|
st4lk/LSP
|
/plugin/core/events.py
|
UTF-8
| 923
| 2.890625
| 3
|
[
"MIT"
] |
permissive
|
try:
from typing import Any, List, Dict, Tuple, Callable, Optional
assert Any and List and Dict and Tuple and Callable and Optional
except ImportError:
pass
class Events:
def __init__(self):
self._listener_dict = dict() # type: Dict[str, Callable[..., None]]
def subscribe(self, key, listener):
if key in self._listener_dict:
self._listener_dict[key].append(listener)
else:
self._listener_dict[key] = [listener]
return lambda: self._unsubscribe(key, listener)
def unsubscribe(self, key, listener):
if key in self._listener_dict:
self._listener_dict[key].remove(listener)
def publish(self, key, *args):
if key in self._listener_dict:
for listener in self._listener_dict[key]:
listener(*args)
def reset(self):
self._listener_dict = dict()
global_events = Events()
| true
|
e4f6bce74b8faa9562e63b8a0b842a39ccb8537a
|
Python
|
thinksource/pysp
|
/pysp/spiders/bbc_spider.py
|
UTF-8
| 1,087
| 2.671875
| 3
|
[] |
no_license
|
import scrapy
from string import *
class BbcSpider(scrapy.Spider):
name = "bbc"
main_url = 'http://www.bbc.com'
urls=[]
link_parts=["a.top-list-item__link", "a.media__link"]
title_parts=[""]
def start_requests(self):
scrapy.Request(url=self.main_url, callback=self.parse)
def main_parse(self, response):
for part in link_parts:
for a in response.css("a.block-link__overlay-link"):
href=response.urljoin(a.css("::attr(href)").extract_first())
title=strip(a.css("::text").extract_first())
yield scrapy.Request(url=href, callback=self.detail_parse)
def detail_parse(self, response):
title=response.css("h1::text").extract_first()
author=response.css("a")
def parse(self, response):
print response.url
print "==============="
# page = response.url.split("/")[-2]
# print page
filename = 'main.html'
with open(filename, 'wb') as f:
f.write(response.body)
self.log('Saved file %s' % filename)
| true
|
b074d9ff508c8b3a8a8c55f4b87b3c1d1d0c0623
|
Python
|
michaelamican/python_starter_projects
|
/Python_OOP/SLists.py
|
UTF-8
| 997
| 3.546875
| 4
|
[] |
no_license
|
class Node:
def __init__(self,value):
self.value = value
self.next = None
class SList:
def __init__(self,value):
node = Node(value)
self.head = node
def addNode(self,value):
node = Node(value)
runner = self.head
while(runner.next != None):
runner = runner.next
runner.next = node
def removeNode(self,value):
runner = self.head
if(self.head.value == value):
self.head = runner.next
while(runner.next.value != value):
runner = runner.next
if(runner.next.value == value):
runner.next = runner.next.next
return True
return False
def printAllValues(self, msg=""):
runner = self.head
print("\n\head points to ", id(self.head))
print("Printing the values in the list ---", msg, "---")
while(runner.next != None):
print(id(runner), runner.value, id(runner.next))
runner = runner.next
print(id(runner), runner.value, id(runner.next))
print("\n\n\n\n========START OF THE PROGRAM========\n\n\n\n")
list.printAllValues("Attempt 1")
| true
|
d0056b17017a5e923020f42e3178e99f23fdc8b7
|
Python
|
lightspeed1001/tgra_3d_lab
|
/Matrices.py
|
UTF-8
| 10,771
| 2.890625
| 3
|
[] |
no_license
|
from math import * # trigonometry
from Base3DObjects import *
class ModelMatrix:
def __init__(self):
self.matrix = [1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1]
self.stack = []
self.stack_count = 0
self.stack_capacity = 0
def load_identity(self):
self.matrix = [1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1]
def copy_matrix(self):
new_matrix = [0] * 16
for i in range(16):
new_matrix[i] = self.matrix[i]
return new_matrix
def add_transformation(self, matrix2):
counter = 0
new_matrix = [0] * 16
for row in range(4):
for col in range(4):
for i in range(4):
new_matrix[counter] += self.matrix[row * 4 + i] * matrix2[col + 4 * i]
counter += 1
self.matrix = new_matrix
def add_movement(self, position: Point):
other_matrix = [1, 0, 0, position.x,
0, 1, 0, position.y,
0, 0, 1, position.z,
0, 0, 0, 1]
self.add_transformation(other_matrix)
def add_scale(self, scalar):
other_matrix = [scalar, 0, 0, 0,
0, scalar, 0, 0,
0, 0, scalar, 0,
0, 0, 0, 1]
self.add_transformation(other_matrix)
def add_x_scale(self, scalar):
other_matrix = [scalar, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1]
self.add_transformation(other_matrix)
def add_y_scale(self, scalar):
other_matrix = [1, 0, 0, 0,
0, scalar, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1]
self.add_transformation(other_matrix)
def add_z_scale(self, scalar):
other_matrix = [1, 0, 0, 0,
0, 1, 0, 0,
0, 0, scalar, 0,
0, 0, 0, 1]
self.add_transformation(other_matrix)
def add_x_rotation(self, angle):
other_matrix = [1, 0, 0, 0,
0, cos(angle), -sin(angle), 0,
0, sin(angle), cos(angle), 0,
0, 0, 0, 1]
self.add_transformation(other_matrix)
def add_y_rotation(self, angle):
other_matrix = [cos(angle), 0, sin(angle), 0,
0, 1, 0, 0,
-sin(angle), 0, cos(angle), 0,
0, 0, 0, 1]
self.add_transformation(other_matrix)
def add_z_rotation(self, angle):
other_matrix = [cos(angle), -sin(angle), 0, 0,
sin(angle), cos(angle), 0, 0,
0, 0, 1, 0,
0, 0, 0, 1]
self.add_transformation(other_matrix)
def add_nothing(self):
other_matrix = [1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1]
self.add_transformation(other_matrix)
# YOU CAN TRY TO MAKE PUSH AND POP (AND COPY) LESS DEPENDANT ON GARBAGE COLLECTION
# THAT CAN FIX SMOOTHNESS ISSUES ON SOME COMPUTERS
def push_matrix(self):
self.stack.append(self.copy_matrix())
def pop_matrix(self):
self.matrix = self.stack.pop()
# This operation mainly for debugging
def __str__(self):
ret_str = ""
counter = 0
for _ in range(4):
ret_str += "["
for _ in range(4):
ret_str += " " + str(self.matrix[counter]) + " "
counter += 1
ret_str += "]\n"
return ret_str
# The ViewMatrix class holds the camera's coordinate frame and
# set's up a transformation concerning the camera's position
# and orientation
class ViewMatrix:
def __init__(self):
self.eye = Point(0, 0, 0)
self.u = Vector(1, 0, 0) # left
self.v = Vector(0, 1, 0) # up
self.n = Vector(0, 0, 1) # back
def look(self, eye, center, up):
self.eye = eye
self.n = (eye - center)
self.n.normalize()
self.u = up.cross(self.n)
self.u.normalize()
self.v = self.n.cross(self.u)
def get_matrix(self):
minusEye = Vector(-self.eye.x, -self.eye.y, -self.eye.z)
return [self.u.x, self.u.y, self.u.z, minusEye.dot(self.u),
self.v.x, self.v.y, self.v.z, minusEye.dot(self.v),
self.n.x, self.n.y, self.n.z, minusEye.dot(self.n),
0, 0, 0, 1]
def slide(self, del_u, del_v, del_n):
self.eye += self.u * del_u + self.v * del_v + self.n * del_n
# self.eye.x += del_u * self.u.x + del_v * self.v.x + del_n * self.n.x
# self.eye.y += del_u * self.u.y + del_v * self.v.y + del_n * self.n.y
# self.eye.z += del_u * self.u.z + del_v * self.v.z + del_n * self.n.z
def roll(self, angle):
# Rotate around n
ang_cos = cos(angle * pi / 180.0)
ang_sin = sin(angle * pi / 180.0)
t = Vector(self.u.x, self.u.y, self.u.z)
# self.n = ang_cos * t + ang_sin * self.v
# self.v = -ang_sin * t + ang_cos * self.v
self.u = Vector(ang_cos * t.x + ang_sin * self.v.x,
ang_cos * t.y + ang_sin * self.v.y,
ang_cos * t.z + ang_sin * self.v.z)
self.v = Vector(-ang_sin * t.x + ang_cos * self.v.x,
-ang_sin * t.y + ang_cos * self.v.y,
-ang_sin * t.z + ang_cos * self.v.z)
def yaw(self, angle):
# Rotate around v
ang_cos = cos(angle * pi / 180.0)
ang_sin = sin(angle * pi / 180.0)
t = Vector(self.u.x, self.u.y, self.u.z)
self.u = Vector(ang_cos * t.x + ang_sin * self.n.x,
ang_cos * t.y + ang_sin * self.n.y,
ang_cos * t.z + ang_sin * self.n.z)
self.n = Vector(-ang_sin * t.x + ang_cos * self.n.x,
-ang_sin * t.y + ang_cos * self.n.y,
-ang_sin * t.z + ang_cos * self.n.z)
def pitch(self, angle):
# Rotate around u
ang_cos = cos(angle * pi / 180.0)
ang_sin = sin(angle * pi / 180.0)
t = Vector(self.n.x, self.n.y, self.n.z)
self.n = Vector(ang_cos * t.x + ang_sin * self.v.x,
ang_cos * t.y + ang_sin * self.v.y,
ang_cos * t.z + ang_sin * self.v.z)
self.v = Vector(-ang_sin * t.x + ang_cos * self.v.x,
-ang_sin * t.y + ang_cos * self.v.y,
-ang_sin * t.z + ang_cos * self.v.z)
class FPSViewMatrix(ViewMatrix):
def slide(self, del_u, del_v, del_n):
self.eye.x += del_u * self.u.x + del_v * self.v.x + del_n * self.n.x
# self.eye.y += del_u * self.u.y + del_v * self.v.y + del_n * self.n.y
self.eye.z += del_u * self.u.z + del_v * self.v.z + del_n * self.n.z
def roll(self, angle):
pass
# You generally can't roll the camera in an fps
# Maybe implement a sort of lean mechanic?
def yaw(self, angle):
# Rotate around v
ang_cos = cos(angle * pi / 180.0)
ang_sin = sin(angle * pi / 180.0)
self.u = Vector( ang_cos * self.u.x + ang_sin * self.u.z,
self.u.y,
-ang_sin * self.u.x + ang_cos * self.u.z)
self.v = Vector( ang_cos * self.v.x + ang_sin * self.v.z,
self.v.y,
-ang_sin * self.v.x + ang_cos * self.v.z)
self.n = Vector( ang_cos * self.n.x + ang_sin * self.n.z,
self.n.y,
-ang_sin * self.n.x + ang_cos * self.n.z)
def pitch(self, angle):
# TODO Clamp to some max/min
ang_cos = cos(angle * pi / 180.0)
ang_sin = sin(angle * pi / 180.0)
t = Vector(self.n.x, self.n.y, self.n.z)
self.n = Vector(ang_cos * t.x + ang_sin * self.v.x,
ang_cos * t.y + ang_sin * self.v.y,
ang_cos * t.z + ang_sin * self.v.z)
self.v = Vector(-ang_sin * t.x + ang_cos * self.v.x,
-ang_sin * t.y + ang_cos * self.v.y,
-ang_sin * t.z + ang_cos * self.v.z)
# The ProjectionMatrix class builds transformations concerning
# the camera's "lens"
class ProjectionMatrix:
def __init__(self, left=-1, right=1, bottom=-1, top=1, near=-1, far=1, ortho=True):
self.left = left
self.right = right
self.bottom = bottom
self.top = top
self.near = near
self.far = far
self.is_orthographic = ortho
def set_perspective(self, fov_y, aspect_ratio, near, far):
self.near = near
self.far = far
self.top = near * tan(fov_y / 2)
self.bottom = -self.top
self.right = self.top * aspect_ratio
self.left = -self.right
self.is_orthographic = False
def set_orthographic(self, left, right, bottom, top, near, far):
self.left = left
self.right = right
self.bottom = bottom
self.top = top
self.near = near
self.far = far
self.is_orthographic = True
def get_matrix(self):
if self.is_orthographic:
A = 2 / (self.right - self.left)
B = -(self.right + self.left) / (self.right - self.left)
C = 2 / (self.top - self.bottom)
D = -(self.top + self.bottom) / (self.top - self.bottom)
E = 2 / (self.near - self.far)
F = (self.near + self.far) / (self.near - self.far)
return [A, 0, 0, B,
0, C, 0, D,
0, 0, E, F,
0, 0, 0, 1]
else:
A = (2 * self.near) / (self.right - self.left)
B = (self.right + self.left) / (self.right - self.left)
C = (2 * self.near) / (self.top - self.bottom)
D = (self.top + self.bottom) / (self.top - self.bottom)
E = -(self.far + self.near) / (self.far - self.near)
F = -(2 * self.far * self.near) / (self.far - self.near)
return [A, 0, B, 0,
0, C, D, 0,
0, 0, E, F,
0, 0, -1, 0]
| true
|
b13001d17b815e4c9e6822a4dab86fd63d830e8e
|
Python
|
oshsage/Python_Pandas
|
/py4e/CodeUp/1010_input_output_01.py
|
UTF-8
| 353
| 4.15625
| 4
|
[] |
no_license
|
# 정수형(int)으로 변수를 선언하고, 변수에 정수값을 저장한 후
# 변수에 저장되어 있는 값을 그대로 출력해보자.
a = int(input())
print(a)
# 새로운 개념: input()
# input() 함수는 어떠한 값을 입력받을 때 쓰는 함수이다. 괄호 안에 '' 로 문구를 적으면 문구도 띄울 수 있다.
| true
|
5af21ba3a136a3292f4b057f66820531e56b292a
|
Python
|
vpalmerini/unicamp-api
|
/subjects/tests/test_models.py
|
UTF-8
| 1,249
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
from django.test import TestCase
from subjects.models import Subject, Semester, PreReq, Continence, Equivalence
from subjects.tests.factories import SemesterFactory, SubjectFactory, PreReqFactory, ContinenceFactory, EquivalenceFactory
class BaseModelTest(TestCase):
def setUp(self):
semester = SemesterFactory()
subject = SubjectFactory()
semester.subjects.add(subject)
pre_req = PreReqFactory()
pre_req.subjects.add(subject)
continence = ContinenceFactory()
continence.subjects.add(subject)
equivalence = EquivalenceFactory()
equivalence.subjects.add(subject)
class SubjectModelTest(BaseModelTest):
def setUp(self):
BaseModelTest.setUp(self)
def test_subject_creation(self):
subject = SubjectFactory()
pre_req = PreReqFactory()
self.assertTrue(isinstance(subject, Subject))
self.assertEqual(str(subject), subject.initials)
self.assertEqual(subject.name,
"Algoritmos e Programação de Computadores")
self.assertEqual(subject.workload, 6)
self.assertEqual(subject.institute.initials, "IC")
self.assertEqual(pre_req.subjects.get(initials="MC102").workload, 6)
| true
|
ffb3ee86fb7455d3cce096c8214f9a53036fb879
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p03041/s481457150.py
|
UTF-8
| 175
| 3.15625
| 3
|
[] |
no_license
|
N, K = map(int, input().split())
S = str(input())
if S[K-1] == "A":
print(S[:K-1]+"a"+S[K:])
elif S[K-1] == "B":
print(S[:K-1]+"b"+S[K:])
else:
print(S[:K-1]+"c"+S[K:])
| true
|
11a6b56b37bda0ac16e2415f5e4a3144ef35895c
|
Python
|
microease/Old-boy-Python-knight-project-1
|
/16-30/day30/5 demo2/server.py
|
UTF-8
| 822
| 2.84375
| 3
|
[] |
no_license
|
import socket
sk = socket.socket() # 买手机
# sk.bind(('192.168.16.11',9000)) # 给新买的手机换上一张卡
sk.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1) #就是它,在bind前加
sk.bind(('127.0.0.1',9000)) # 给新买的手机换上一张卡
sk.listen() # 开机
while True:
try:
conn,addr = sk.accept() # 等电话
while True:
msg = input('>>>')
conn.send(msg.encode('utf-8'))# 说话
if msg == 'q': break
msg = conn.recv(1024) # 听对方说
if msg == b'q':break
print(msg.decode('utf-8'))
conn.close() # 挂电话
except UnicodeDecodeError:
pass
sk.close()
# 对于TCP协议的socket server来说
# 不能同时接受多个client端的连接
| true
|
239c4ea6f4c9590c34b2738297557f15689926cf
|
Python
|
linusreM/Gelo
|
/embedded/python/stepper.py
|
UTF-8
| 3,459
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
import numpy as np
from time import sleep
import RPi.GPIO as GPIO
M2 = 18
M1 = 15
M0 = 14
DIR1 = 20 #Direction GPIO Pin
DIR2 = 16
STEP = 21 # Step GPIO Pin
SLEEP = 27
CW = 1 # Clockwise Rotation
CCW = 0 # Counterclockwise Rotation
FW = 1
BW = 0
def motor_setup():
GPIO.setmode(GPIO.BCM)
GPIO.setup(DIR1, GPIO.OUT)
GPIO.setup(DIR2, GPIO.OUT)
GPIO.setup(STEP, GPIO.OUT)
GPIO.output(DIR1, CCW)
GPIO.output(DIR2, CCW)
GPIO.setup(M2, GPIO.OUT)
GPIO.setup(M2, GPIO.OUT)
GPIO.setup(M1, GPIO.OUT)
GPIO.setup(M0, GPIO.OUT)
GPIO.setup(SLEEP, GPIO.OUT)
GPIO.output(SLEEP, 0)
GPIO.output(M2,0)
GPIO.output(M1,0)
GPIO.output(M0,0)
VELMAX=1.0/1664
def motor_turn(rotation_direction, rotation_degree, max_velocity, tilt_ramp=10.0):
stepspermm = 2048.0/(75.61*np.pi)
stepperdegree = ((215.0*np.pi)/360)*stepspermm
STEPCOUNTf = stepperdegree*rotation_degree # Steps per Revolution (360 / 7.5)
STEPCOUNT = int(STEPCOUNTf) #Whole steps
STARTDELAY =100 #Denominator of delay fraction
NBRSTEP =0 #How many steps has happened
RAMP = STEPCOUNT/2
print(STEPCOUNT)
GPIO.output(SLEEP, 1)
GPIO.output(DIR1, rotation_direction)
GPIO.output(DIR2, rotation_direction)
for x in range(STEPCOUNT):
if NBRSTEP < RAMP: #Positive acceleration
STARTDELAY +=1*tilt_ramp
delay = 1.0/STARTDELAY
if NBRSTEP > RAMP: #Negative acceleration
STARTDELAY -=1*tilt_ramp
delay = 1.0/STARTDELAY
if delay<max_velocity: #Continiuous speed
delay = max_velocity
GPIO.output(STEP, GPIO.HIGH)
sleep(delay)
GPIO.output(STEP, GPIO.LOW)
sleep(delay)
NBRSTEP+=1
GPIO.output(SLEEP, 0)
def motor_move(movement_direction,movement_distance, max_velocity, tilt_ramp=10.0):
global stepno
stepspermm = 2048.0/(75.61*np.pi)
STEPCOUNTf = stepspermm*movement_distance # Steps per Revolution (360 / 7.5)
STEPCOUNT = int(STEPCOUNTf) #Whole steps
STARTDELAY =100 #Denominator of delay fraction
NBRSTEP =0 #How many steps has happened
RAMP = STEPCOUNT/2
print(STEPCOUNT)
GPIO.output(SLEEP, 1)
if movement_direction:
GPIO.output(DIR1, CW)
GPIO.output(DIR2, CCW)
else:
GPIO.output(DIR1, CCW)
GPIO.output(DIR2, CW)
for x in range(STEPCOUNT):
if NBRSTEP < RAMP: #Positive acceleration
STARTDELAY +=1*tilt_ramp
delay = 1.0/STARTDELAY
if NBRSTEP > RAMP: #Negative acceleration
STARTDELAY -=1*tilt_ramp
delay = 1.0/STARTDELAY
if delay<max_velocity: #Continiuous speed
delay = max_velocity
GPIO.output(STEP, GPIO.HIGH)
sleep(delay)
GPIO.output(STEP, GPIO.LOW)
sleep(delay)
NBRSTEP+=1
stepno=NBRSTEP
print(stepno)
GPIO.output(SLEEP, 0)
#motor_turn(CW,180,VELMAX) #Turn clockwise, 180 degrees with lowest delay of VELMAX)
#motor_move(FW,100.0, VELMAX)
#sleep(5)
#motor_move(BW,100.0, VELMAX)
#motor_turn(CCW,180,VELMAX)
#sleep(1)
#GPIO.cleanup()
| true
|
060d30a8c2a97c0de99cf63942ce062a3a979a2a
|
Python
|
sandeepks23/pythondjango
|
/class/calculator.py
|
UTF-8
| 444
| 3.9375
| 4
|
[] |
no_license
|
class Calculator:
def __init__(self,num1,num2):
self.num1=num1
self.num2=num2
def add(self):
sum=self.num1+self.num2
print(sum)
def sub(self):
diff=self.num1-self.num2
print(diff)
def mul(self):
prod=self.num1*self.num2
print(prod)
def div(self):
res=self.num1/self.num2
print(res)
obj=Calculator(8,2)
obj.add()
obj.sub()
obj.mul()
obj.div()
| true
|
6d92cc29126527e628f5a570d6f9421d3df3be9b
|
Python
|
forkcodeaiyc/skulpt_parser
|
/run-tests/t492.py
|
UTF-8
| 465
| 3.84375
| 4
|
[
"MIT"
] |
permissive
|
class B:
def __init__(self, x=0, y=0, z=0):
self.x = x
self.y = y
self.z = z
def __str__(self):
return str((self.x, self.y, self.z))
print("\nClass with defaults")
print(B())
print(B(1, 2, 3))
print(B(1), B(2), B(3))
print(B(x=1), B(y=2), B(z=3))
print(B(x=1, z=3), B(z=3, x=1))
print(B(x=1, y=2), B(y=2, x=1))
print(B(z=3, y=2), B(y=2, z=3))
print(B(z=3, x=1, y=2), B(z=3, y=2, x=1), B(y=2, z=3, x=1), B(y=2, x=1, z=3))
| true
|
79a58a50de1e0f2b2382f4767ab4db50af6f2c38
|
Python
|
alshamiri5/makerfaire-booth
|
/2016/1-o/mini-magneto/controller.py
|
UTF-8
| 1,529
| 2.8125
| 3
|
[
"Apache-2.0"
] |
permissive
|
import time
import gevent
import gevent.monkey
gevent.monkey.patch_all()
import socket
import serial
def derivative(value, prev):
deriv = value - prev
if (abs(deriv) < 180):
print prev, value, deriv
else:
deriv = value - (prev-360)
if (abs(deriv) < 180):
print prev, value, deriv
else:
deriv = (360-value) - prev
return deriv
s = serial.Serial("/dev/ttyACM0", 9600)
s.timeout = 1
ready = True
def read_from_port():
global ready
print("Starting to read")
while True:
reading = s.readline()
if "woot." == reading:
print("ready!")
ready = True
#print(reading)
def client():
prev = None
client_socket = gevent.socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect(("127.0.0.1", 6000))
t0 = time.time()
num_zeroes = 0
zero_threshold = 20
while True:
data = client_socket.recv(1024)
try:
value = int(data)
except:
print "failed to parse", data
else:
if prev:
d = derivative(value, prev)
if ready:
if abs(d) > 5:
print prev, value, d
s.write("%d\r\n" % d)
s.flush()
num_zeroes = 0
else:
num_zeroes += 1
if num_zeroes >= zero_threshold:
print "ZERO"
s.write("%d\r\n" % 0)
s.flush()
prev = value
client = gevent.spawn(client)
readthread = gevent.spawn(read_from_port)
gevent.joinall([client, readthread])
| true
|
057cf30becd832a974dde4fd0eaaa491ae542453
|
Python
|
B2BDA/Streamlit_Web_App_Basics
|
/Bank_App/dummy_bank_frontend.py
|
UTF-8
| 3,216
| 2.53125
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 1 15:33:29 2020
@author: bishw
"""
import streamlit as st
import pandas as pd
from datetime import timedelta
import os
from smtplib import SMTP
from email.mime.text import MIMEText
from pretty_html_table import build_table
from email.mime.multipart import MIMEMultipart
import dummy_bank_credentials
cred = dummy_bank_credentials.credentials
st.title("SMART NOTIFICATIONS")
st.subheader("Enter Details")
def write_to_db(Primary_Name,Secondary_Name,Bank_Name,Matured_Amount,Maturity_Date):
if os.path.isfile('Bank_Maturity.xlsx') == False:
df = pd.DataFrame([[Primary_Name,Secondary_Name,Bank_Name,Matured_Amount,Maturity_Date]],
columns=['Primary_Name','Secondary_Name','Bank_Name','Matured_Amount','Maturity_Date'])
df['Email_Trigger_Date ']= Maturity_Date - timedelta(days=7)
df.to_excel('Bank_Maturity.xlsx', index=False)
else:
df = pd.read_excel('Bank_Maturity.xlsx')
df = df.append(pd.DataFrame({'Primary_Name':Primary_Name,'Secondary_Name':Secondary_Name,'Bank_Name':Bank_Name,'Matured_Amount':Matured_Amount,'Maturity_Date':Maturity_Date}, index = [0]), ignore_index=False)
df['Email_Trigger_Date ']= Maturity_Date - timedelta(days=7)
df.to_excel('Bank_Maturity.xlsx',index=False)
st.subheader("New Entry")
st.table(df.iloc[-1:,:])
def send_email(recipients):
df = pd.read_excel('Bank_Maturity.xlsx')
body = build_table(df, 'orange_light')
SENDER = cred.get('SENDER')
PASSWORD = cred.get('PASSWORD')
for r in cred.get('RECEPIENT'):
message = MIMEMultipart()
message['Subject'] = 'SMART NOTIFICATION - Maturity Amount Alert!!'
message['From'] = SENDER
message['To'] = r
body_content = body
message.attach(MIMEText(body_content, "html"))
msg_body = message.as_string()
server = SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(SENDER, PASSWORD)
server.sendmail(message['From'], message['To'], msg_body)
server.quit()
print("Email sent!")
def details_input():
Primary_Name = st.text_input("Enter Primary Account Holder Name")
Secondary_Name = st.text_input("Enter Sencondary Account Holder Name")
Bank_Name = st.text_input("Enter Bank Name")
Matured_Amount = st.text_input("Enter Maturity Amount")
Maturity_Date = st.date_input("Enter Maturity Date")
if st.button("Submit"):
write_to_db(Primary_Name,Secondary_Name,Bank_Name,Matured_Amount,Maturity_Date)
st.sidebar.title("Details")
add_selectbox = st.sidebar.selectbox('Would You Like to see all the Data?',('No', 'Yes'))
if add_selectbox == 'Yes':
try:
df = pd.read_excel("Bank_Maturity.xlsx")
st.write("DETAILS")
st.table(df)
except Exception:
st.write("File Not Found")
add_selectbox = st.sidebar.selectbox('Would You email this data?',('No', 'Yes'))
if add_selectbox == 'Yes':
send_email(["rinabiswas1972@gmail.com","bishwarup1429@gmail.com"])
if __name__=="__main__":
details_input()
| true
|
387b61f45d79a7eb05cea613a2710549d172cf6f
|
Python
|
robsiegwart/simpleFEA
|
/simpleFEA/preprocessing.py
|
UTF-8
| 2,229
| 2.953125
| 3
|
[
"MIT"
] |
permissive
|
'''
Preprocessing classes and functions.
'''
from simpleFEA.loads import Force, Displacement
def N_dist(n1,n2):
'''Calculate the scalar distance between two nodes'''
return ((n2.x - n1.x)**2 + (n2.y - n1.y)**2 + (n2.z - n1.z)**2)**0.5
class Node:
'''
Node class.
:param num x,y,z: scalar location components
:param num num: node number, defaults to *max defined node number + 1*
'''
nodes = []
def __init__(self, x=0, y=0, z=0, num=None):
self.x = x
self.y = y
self.z = z
self.num = num if num else self.max_n + 1
Node.nodes.append(self)
# Initialize property containers
self.solution = {}
"""Store solution quantities here"""
self.loads = []
"""All loads applied to this node"""
self.forces = []
self.disp = []
self.DOF = set()
'''The DOFs for this node (none defined until attached to an element)'''
self.indices = dict()
'''The indices of this node's DOF in the global matrix'''
self.elements = set()
'''The parent elements this node is attached to'''
@property
def nDOF(self):
return len(self.DOF)
@property
def max_n(self):
return max([n.num for n in Node.nodes] + [0])
def F(self, x=None, y=None, z=None):
'''Apply a force to the node'''
f = Force(self,x,y,z)
return f
def D(self, x=None, y=None, z=None):
'''Apply a displacement to the node'''
d = Displacement(self,x,y,z)
return d
def __repr__(self):
return f'Node {self.num} ({round(self.x,3)},{round(self.y,3)},{round(self.z,3)})'
@property
def ux(self):
'''The ux displacement solution quantity in the global coordinate system'''
return self.solution[1]
@property
def uy(self):
'''The uy displacement solution quantity in the global coordinate system'''
return self.solution[2]
@property
def uz(self):
'''The uz displacement solution quantity in the global coordinate system'''
try:
return self.solution[3]
except KeyError:
return 0
| true
|
0a2fd64a99a72b21aa3b869245ac261d306c7215
|
Python
|
idlelearner/interview_qns
|
/coding_practice/algos/search/count_occurence.py
|
UTF-8
| 263
| 3.71875
| 4
|
[] |
no_license
|
# count occurence in a elmt in a list
def count_occurrences(lst, val):
return len([x for x in lst if x == val and type(x) == type(val)])
if __name__=='__main__':
print count_occurrences([3,5,1,2,6,5,3],5)
print count_occurrences([3,5,1,2,6,5,3],0)
| true
|
4dffcbdcf5ca0146f78bdcd54481ab8786cbf31c
|
Python
|
phildavis17/Advent_Of_Code
|
/2020/AoC_2020_4_test.py
|
UTF-8
| 1,920
| 2.796875
| 3
|
[] |
no_license
|
from AoC_2020_4 import * # Wildcard seemed appropriate here. Is this good practice?
BAD_PASSPORTS = """eyr:1972 cid:100
hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926
iyr:2019
hcl:#602927 eyr:1967 hgt:170cm
ecl:grn pid:012533040 byr:1946
hcl:dab227 iyr:2012
ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277
hgt:59cm ecl:zzz
eyr:2038 hcl:74454a iyr:2023
pid:3556412378 byr:2007"""
GOOD_PASSPORTS = """pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980
hcl:#623a2f
eyr:2029 ecl:blu cid:129 byr:1989
iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm
hcl:#888785
hgt:164cm byr:2001 iyr:2015 cid:88
pid:545766238 ecl:hzl
eyr:2022
iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719"""
def test_validate_byr():
assert validate_byr("2002") == True
assert validate_byr("2003") == False
def test_validate_iyr():
assert validate_iyr("2010") == True
assert validate_iyr("2009") == False
def test_validate_eyr():
assert validate_eyr("2030") == True
assert validate_eyr("2020") == True
assert validate_eyr("2025") == True
assert validate_eyr("2031") == False
def test_validate_hgt():
assert validate_hgt("59in") == True
assert validate_hgt("76in") == True
assert validate_hgt("190cm") == True
assert validate_hgt("190in") == False
assert validate_hgt("190") == False
assert validate_hgt("19") == False
def test_validate_hcl():
assert validate_hcl("#123abc") == True
assert validate_hcl("#123abz") == False
assert validate_hcl("123abc") == False
def test_validate_ecl():
assert validate_ecl("brn") == True
assert validate_ecl("wat") == False
def test_validate_pid():
assert validate_pid("000000001") == True
assert validate_pid("0123456789") == False
def test_count_valid_passports():
assert count_valid_passports(BAD_PASSPORTS) == 0
assert count_valid_passports(GOOD_PASSPORTS) == 4
| true
|
fa70f52dccede30d8d0aff7215e2ade57e3d5e57
|
Python
|
AlanFermat/leetcode
|
/linkedList/445 addTwoNumII.py
|
UTF-8
| 876
| 3.296875
| 3
|
[] |
no_license
|
from ListNode import *
def add(x, y):
start = ListNode(-1)
res= start
values = [0]
x1, x2= x, y
m, n = 0, 0
while x1:
x1 = x1.next
m += 1
while x2:
x2 = x2.next
n += 1
# make sure x is the longer
if n > m:
x, y, m ,n = y, x, n, m
for i in range(m-n):
values.append(x.val)
x = x.next
for _ in range(n):
values.append(x.val + y.val)
x, y = x.next, y.next
for idx in range(len(values)-1, 0, -1):
values[idx-1]= values[idx -1] + values[idx]/10
values[idx] = values[idx] % 10
for k in range(len(values)):
start.next= ListNode(values[k])
start = start.next
if res.next.val == 0:
return res.next.next
return res.next
l1 = ListNode(2)
l1.next = ListNode(4)
l1.next.next = ListNode(4)
# l1.next.next.next = ListNode(3)
l2 = ListNode(2)
l2.next = ListNode(4)
l2.next.next = ListNode(4)
print ( l1 == l2 )
show(add(l1, l2))
| true
|
714e024b2b39f028e011dfb58dd19223bd543210
|
Python
|
eroncastro/learning_algorithms
|
/linked_list.py
|
UTF-8
| 2,123
| 3.953125
| 4
|
[] |
no_license
|
class Element(object):
def __init__(self, value):
self.value = value
self.next = None
class LinkedList(object):
def __init__(self, head=None):
self.head = head
def append(self, new_element):
current = self.head
if self.head:
while current.next:
current = current.next
current.next = new_element
else:
self.head = new_element
def get_position(self, position):
"""Get an element from a particular position.
Assume the first position is "1".
Return "None" if position is not in the list."""
if self.head is None:
return
elif position == 1:
return self.head
index = 1
current = self.head
while current.next:
index += 1
current = current.next
if index == position:
return current
def insert(self, new_element, position):
"""Insert a new node at the given position.
Assume the first position is "1".
Inserting at position 3 means between
the 2nd and 3rd elements."""
if position == 1:
if self.head:
new_element.next = self.head
self.head = new_element
return
index = 1
prev_elem = self.head
cur_elem = prev_elem.next
while cur_elem:
index += 1
if index == position:
new_element.next = cur_elem
prev_elem.next = new_element
return
prev_elem = cur_elem
cur_elem = cur_elem.next
def delete(self, value):
"""Delete the first node with a given value."""
if self.head and self.head.value == value:
self.head = self.head.next
return
prev_elem = self.head
cur_elem = prev_elem.next
while cur_elem:
if cur_elem.value == value:
prev_elem.next = cur_elem.next
return
prev_elem = cur_elem
cur_elem = cur_elem.next
| true
|
5ece63ba5364148d0e3c94db7fba6da9aa9b91f7
|
Python
|
podhmo/pyramid-experiment
|
/point/junks/convertor/schema/wtforms.py
|
UTF-8
| 1,360
| 2.546875
| 3
|
[] |
no_license
|
from . import SchemaValidationException
class _ListDict(dict):
""" dummy multidict
"""
def getlist(self, k):
return [self[k]]
class SchemaMapping(object):
def __init__(self, schema):
self.schema = schema
def __call__(self, *args, **kwargs):
return self.schema(*args, **kwargs)
def _validate_iff_need(self, schema, validatep, request):
if not validatep:
return schema
elif schema.validate():
return schema
else:
if request:
# if hasattr(request, "_schema"):
# raise Exception("conflict! request._schema")
request._schema = schema
raise SchemaValidationException(schema, message=str(schema.errors))
def from_request(self, request, validate=False, method="POST"):
data = getattr(request, method)
return self.from_postdata(data, validate=validate, request=request)
def from_postdata(self, postdata, validate=False, request=None):
if hasattr(postdata, "getlist"):
form = self.schema(postdata)
else:
form = self.schema(_ListDict(postdata))
return self._validate_iff_need(form, validate, request)
def from_dict(self, D):
return self.schema(**D)
def as_dict(self, schema):
return schema.data
| true
|
018434f2da3b7b850a89e86d9df1af7df3f9f5ae
|
Python
|
muthurajendran/modeinc-logsearch
|
/backend/app.py
|
UTF-8
| 3,850
| 2.65625
| 3
|
[] |
no_license
|
from flask import Flask, request
from flask_restful import Resource, Api, reqparse
from json import dumps
import os
import json
import operator
from pymongo import MongoClient
import pymongo
from flask_pymongo import PyMongo
from collections import defaultdict
import datetime
from flask_cors import CORS, cross_origin
app = Flask(__name__)
CORS(app)
api = Api(app)
# Connect to mongodb database
# TODO - Future move credentials to env for security
app.config['MONGO_DBNAME'] = 'modeinc'
app.config['MONGO_URI'] = '<uri goes here>'
mongo = PyMongo(app)
def get_db():
''' Mongodb connector client '''
db = MongoClient('<uri goes here>')
return db.modeinc
def buildIndexForQuerying(db, ignore=False):
''' build data and jaccard similarity '''
try:
print "Building timeseries data..."
if ignore:
return
db.logs.drop()
input_path = './data'
for filename in os.listdir(input_path):
if not filename.startswith("."):
with open(input_path + "/" + filename, 'r') as fc:
path = input_path + "/" + filename
first = json.loads(fc.readline())
last = json.loads(fc.readlines()[-1])
first_time = datetime.datetime.strptime(first['ts'], "%Y-%m-%dT%H:%M:%SZ")
last_time = datetime.datetime.strptime(last['ts'], "%Y-%m-%dT%H:%M:%SZ")
db.logs.insert({'start': first_time, 'end': last_time, 'filepath': path})
print "Done building the data"
except Exception as e:
print "Error building recommendations", e
class Search(Resource):
''' API class for recommendation '''
def checkLastLine(self, start, end_line):
pass
def get(self):
db = get_db()
parser = reqparse.RequestParser()
parser.add_argument('page', type=int, default=0)
parser.add_argument('limit', type=int, default=10)
parser.add_argument('start', type=str)
page = parser.parse_args()['page']
limit = parser.parse_args()['limit']
start = parser.parse_args()['start']
if not start:
return {'success': False, 'message':'Start date needed'}
# Date given - now search for first file
search_start = datetime.datetime.strptime(start, "%Y-%m-%dT%H:%M:%SZ")
start_obj = db.logs.find_one({
"start": {"$lte": search_start},
"end": {"$gte": search_start}
})
if not start_obj:
return {'success': False, 'message':'No data available'}
# Find all the files
# new start to get series of files
new_start = start_obj['start']
results = db.logs.find({
"start": {"$gte": new_start}
})
# Iterate through all data for pages
page_limit = page * limit
data = []
for doc in results.limit(limit).sort('start', pymongo.ASCENDING):
with open(doc['filepath'], 'r') as f:
lines = f.readlines()
for line in lines:
obj_line = json.loads(line)
obj_line_time = datetime.datetime.strptime(obj_line['ts'], "%Y-%m-%dT%H:%M:%SZ")
if search_start <= obj_line_time:
if page_limit > 0:
page_limit -= 1
else:
data.append(obj_line)
if len(data) == limit:
return {'success': True, 'data': data, 'count': len(data)}
return {'success': True, 'data': data, 'count': len(data)}
api.add_resource(Search, '/search')
if __name__ == '__main__':
buildIndexForQuerying(get_db(), True)
# print "*" * 50
print "Query Example: http://localhost:5000/search"
app.run()
| true
|
00975f114a2c4865075d23f8d15a1ecc72505aaf
|
Python
|
dhairya0904/Deep-Learning
|
/LanguageTranslator/translate.py
|
UTF-8
| 2,628
| 2.78125
| 3
|
[] |
no_license
|
import numpy as np
import pandas as pd
import nltk
seed = 7
np.random.seed(seed)
from sklearn.preprocessing import LabelEncoder
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense
from keras.layers import LSTM
import pickle
import sys
from createModel import CreateModel
# In[107]:
def decode_sequence(input_seq,encoder_model,decoder_model,obj):
# Encode the input as state vectors.
states_value = encoder_model.predict(input_seq)
# Generate empty target sequence of length 1.
target_seq = np.zeros((1, 1, obj.num_decoder_tokens))
# Populate the first character of target sequence with the start character.
target_seq[0, 0, 1] = 1
# Sampling loop for a batch of sequences
# (to simplify, here we assume a batch of size 1).
stop_condition = False
decoded_sentence = ''
while not stop_condition:
output_tokens, h, c = decoder_model.predict(
[target_seq] + states_value)
# Sample a token
sampled_token_index = np.argmax(output_tokens[0, -1, :])
sampled_char = obj.target_label_encoder.inverse_transform( [sampled_token_index][0] )
decoded_sentence += sampled_char
# Exit condition: either hit max length
# or find stop character.
if (sampled_char == '\n' or
len(decoded_sentence) > obj.max_decoder_seq_length):
stop_condition = True
# Update the target sequence (of length 1).
target_seq = np.zeros((1, 1, obj.num_decoder_tokens))
target_seq[0, 0, sampled_token_index] = 1.
# Update states
states_value = [h, c]
return decoded_sentence
# In[108]:
if __name__ == "__main__":
sentence = sys.argv[1:]
print sentence
f = open('data_features.pkl', 'rb')
data_features = pickle.load(f)
lang_translate = CreateModel(data_features=data_features)
model,encoder_model, decoder_model = lang_translate.getModel()
model.load_weights('model.h5')
encoded_sentence = lang_translate.input_label_encoder.transform(sentence)
encoded_sentence = pad_sequences([encoded_sentence], padding='post', maxlen = lang_translate.max_encoder_seq_length)
encoded_sentence = to_categorical(encoded_sentence, num_classes=lang_translate.num_encoder_tokens)
encoded_sentence = encoded_sentence.reshape((1,encoded_sentence.shape[0],encoded_sentence.shape[1]))
print decode_sequence(encoded_sentence, encoder_model, decoder_model, lang_translate)
# In[ ]:
| true
|
6aa6a921a6f7e0f3d104655f3384e450226c6930
|
Python
|
Lucces/leetcode
|
/reorderList_143.py
|
UTF-8
| 847
| 3.390625
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# coding=utf-8
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def reorderList(self, head):
"""
:type head: ListNode
:rtype: void Do not return anything, modify head in-place instead.
"""
node = head
d = dict()
length = 0
while node != None:
length += 1
d[length] = node
node = node.next
left = 1
right = length
while right - left > 1:
left_node = d[left]
right_node = d[right]
right_node.next = left_node.next
left_node.next = right_node
d[right - 1].next = None
left += 1
right -= 1
| true
|
f9b24ea61bcee36a1edfcb3c865fd4a17ca67081
|
Python
|
DylanMsK/TIL
|
/Algorithm/SW Expert Academy/7087. 문제 제목 붙이기.py
|
UTF-8
| 506
| 2.640625
| 3
|
[] |
no_license
|
# url = 'https://swexpertacademy.com/main/code/problem/problemDetail.do?contestProbId=AWkIdD46A5EDFAXC&categoryId=AWkIdD46A5EDFAXC&categoryType=CODE'
for _ in range(int(input())):
N = int(input())
lst = []
for i in range(N):
lst.append(input())
alp = {chr(i): 0 for i in range(ord('A'), ord('Z')+1)}
for s in lst:
alp[s[0]] += 1
cnt = 0
for i in alp:
if alp[i]:
cnt += 1
else:
break
print(f'#{_+1} {cnt}')
| true
|
fab93ded81ea59d887d301d46cf2923919969c09
|
Python
|
yangyang198599/CQMB_Project
|
/cqen/telnet_task.py
|
UTF-8
| 558
| 2.828125
| 3
|
[] |
no_license
|
import getpass
import telnetlib
def telnet_task():
try:
HOST = "localhost"
user = input("Enter your remote account: ")
password = getpass.getpass()
tn = telnetlib.Telnet(HOST)
tn.read_until(b"login: ")
tn.write(user.encode('ascii') + b"\n")
if password:
tn.read_until(b"Password: ")
tn.write(password.encode('ascii') + b"\n")
tn.write(b"ls\n")
tn.write(b"exit\n")
print(tn.read_all().decode('utf-8'))
except Exception as error:
print(error)
| true
|
6e8edb724c8b1ea5c4c767dc59416394baa68b70
|
Python
|
JiahuaLink/PokemonDemo
|
/battleProcess.py
|
UTF-8
| 1,685
| 2.859375
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : battleProcess.py
@Time : 2019/11/28 22:53:50
@Author : Jawa
@Version : 1.0
@Contact : 840132699@qq.com
@Desc : 战斗进程实现
'''
# here put the import lib
import time
import threading
from battleControls import PlayerControls, EnemyControls
class BattleProcess():
lockPlayer = threading.Lock()
lockEnemy = threading.Lock()
def player_rounds(self, data):
player_info = data["player"]["base_info"]
player = data["player"]["battle_info"]
player_hp = player["battle_statistic"]["hp"]
self.lockPlayer.acquire()
PlayerControls().playcontrols(data)
if player_hp == 0:
print("%s倒下" % player_info["name"])
return
self.lockEnemy.release()
time.sleep(0.1)
self.player_rounds(data)
def enemy_rounds(self, data):
enemy_info = data["enemy"]["base_info"]
enemy_battle_info = data["enemy"]["battle_info"]
enemy_hp = enemy_battle_info["battle_statistic"]["hp"]
self.lockEnemy.acquire()
print("%s发起了攻击" % enemy_info["name"])
EnemyControls().enemycontrols(data)
if enemy_hp == 0:
print("%s倒下" % enemy_info["name"])
return
self.lockPlayer.release()
time.sleep(0.1)
self.enemy_rounds(data)
def start(self, data):
self.lockEnemy.acquire()
t1 = threading.Thread(target=self.player_rounds, args=(data, ))
t2 = threading.Thread(target=self.enemy_rounds, args=(data, ))
t1.start()
t2.start()
t1.join()
t2.join()
| true
|
30415ae41f2c652d5f21a17ab0809df136ee897a
|
Python
|
CodeForContribute/Algos-DataStructures
|
/stackCodes/check_parenthesis.py
|
UTF-8
| 806
| 3.65625
| 4
|
[] |
no_license
|
def check_parenthesis(exp):
stack = list()
for i in range(len(exp)):
if exp[i] == '(' or exp[i] == '[' or exp[i] == '{':
stack.append(exp[i])
continue
if len(stack) == 0:
return False
if exp[i] == ')':
x = stack.pop()
if x == '[' or x == '{':
return False
elif exp[i] == ']':
x = stack.pop()
if x == ')' or x == '}':
return False
elif exp[i] == '}':
x = stack.pop()
if x == ')' or x == ']':
return False
return True
# if len(stack) == 0:
# return True
# else:
# return False
if __name__ == '__main__':
exp = "{[()]}"
n = len(exp)
print(check_parenthesis(exp))
| true
|
8ec48e87c69aaedd1dbd48b4ccc66964adb3e9f9
|
Python
|
SietsmaRJ/dsls_master_thesis
|
/side_scripts/train_xgb_models.py
|
UTF-8
| 5,301
| 2.9375
| 3
|
[] |
no_license
|
import argparse
import pickle
import xgboost as xgb
import json
from sklearn.model_selection import train_test_split
from impute_preprocess import impute, preprocess, cadd_vars
import gzip
import pandas as pd
import numpy as np
class ArgumentSupporter:
"""
Class to handle the given command line input.
Type python3 PreComputeCapice.py --help for more details.
"""
def __init__(self):
parser = self._create_argument_parser()
self.arguments = parser.parse_args()
@staticmethod
def _create_argument_parser():
parser = argparse.ArgumentParser(
prog="train_xgb_models.py",
description="Python script to convert RandomSearchCV optimal"
" parameters to a pickled XGBClassifier model.")
required = parser.add_argument_group("Required arguments")
required.add_argument('-i',
'--input',
nargs=1,
type=str,
required=True,
help='The json of parameters.')
required.add_argument('-o',
'--output',
nargs=1,
type=str,
required=True,
help='The location of the'
' XGBClassifier pickled output.')
required.add_argument('-f',
'--file',
nargs=1,
type=str,
required=True,
help='The location of the training database.')
return parser
def get_argument(self, argument_key):
"""
Method to get a command line argument.
:param argument_key: Command line argument.
:return: List or string.
"""
if self.arguments is not None and argument_key in self.arguments:
value = getattr(self.arguments, argument_key)
else:
value = None
return value
class TrainModel:
def __init__(self, params, output_loc, training_file):
self.params = params
self.output_loc = output_loc
self.train = None
self.eval_set = None
self.processed_feats = []
self.prepare_input_data(training_file)
def train_model(self):
model = xgb.XGBClassifier(verbosity=1,
objective='binary:logistic',
booster='gbtree', n_jobs=8,
min_child_weight=1, max_delta_step=0,
subsample=1, colsample_bytree=1,
colsample_bylevel=1, colsample_bynode=1,
reg_alpha=0, reg_lambda=1,
scale_pos_weight=1, base_score=0.5,
random_state=0,
max_depth=self.params['max_depth'],
learning_rate=self.params['learning_rate'],
n_estimators=self.params['n_estimators'])
model.fit(self.train[self.processed_feats],
self.train['binarized_label'],
early_stopping_rounds=15,
eval_metric='auc',
eval_set=self.eval_set,
verbose=True,
sample_weight=self.train['sample_weight'])
pickle.dump(model, open(self.output_loc, 'wb'))
def prepare_input_data(self, training_file):
skip_rows = 0
for line in gzip.open(training_file):
if line.decode().startswith("##"):
skip_rows = 1
break
data = pd.read_csv(training_file, compression='gzip',
skiprows=skip_rows, sep='\t', low_memory=False)
train, test = train_test_split(data, test_size=0.2, random_state=4)
self.train = preprocess(impute(train), isTrain=True)
for col in self.train:
for feat in cadd_vars:
if col == feat or col.startswith(feat):
if col not in self.processed_feats:
self.processed_feats.append(col)
test_preprocessed = preprocess(impute(test), isTrain=False,
model_features=self.processed_feats)
self.eval_set = [(test_preprocessed[self.processed_feats],
test_preprocessed['binarized_label'], 'test')]
def main():
arguments = ArgumentSupporter()
input_json = arguments.get_argument('input')
if isinstance(input_json, list):
input_json = str(input_json[0])
output_loc = arguments.get_argument('output')
if isinstance(output_loc, list):
output_loc = str(output_loc[0])
training_file = arguments.get_argument('file')
if isinstance(training_file, list):
training_file = str(training_file[0])
with open(input_json) as input_params:
loaded_input_params = json.load(input_params)
train_model = TrainModel(loaded_input_params, output_loc, training_file)
train_model.train_model()
if __name__ == '__main__':
main()
| true
|