blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
ea805ac9d3700f4fed3e9bc78c5b92a310a69428 | Python | neuralfilter/hackerrank | /Hacker_Rank/Python/itertoolsproduct.py | UTF-8 | 139 | 2.59375 | 3 | [] | no_license | from itertools import product
k = map(int, raw_input().split())
l = map(int, raw_input().split())
print(" ".join(map(str, product(k,l))))
| true |
00a2968587f2f0f1d740cc5e237439b588af0319 | Python | pauleveritt/wired_components | /tests/samples/simple/test_views.py | UTF-8 | 756 | 2.96875 | 3 | [
"MIT"
] | permissive | import pytest
from bs4 import BeautifulSoup
@pytest.fixture
def app(registry) -> None:
from wired_components.samples.simple import wired_setup
wired_setup(registry)
@pytest.mark.parametrize(
'path, heading',
[
('/', 'Root: My Site'),
('/f1', 'Folder: The Folder At The Root'),
('/d1', 'Document: A Doc At The Root'),
]
)
def test_pages(registry, app, path, heading):
from wired_components.samples.simple import render_path
result = render_path(registry, path)
# Parse it and do some tests
soup: BeautifulSoup = BeautifulSoup(result, 'html.parser')
nav = soup.find('nav').string.strip()
assert nav == 'BC: label is BC'
h1 = soup.find('h1').string.strip()
assert h1 == heading
| true |
7f217fc7786c0099255dae847b8f70fd2b87baf2 | Python | fran-bravo/socketpy | /socketpy/templates.py | UTF-8 | 15,724 | 3.359375 | 3 | [
"MIT"
] | permissive | MODEL = """#ifndef MODELOS_H_
#define MODELOS_H_
typedef struct {
int length;
char *data;
} t_stream;
// Header de stream
typedef struct {
uint8_t tipoEstructura;
uint16_t length;
} __attribute__ ((__packed__)) t_header;
// Modelos
#endif"""
PACKC = """#include "paquetes.h"
// Paquetizacion
t_stream * paquetizar(int tipoEstructura, void * estructuraOrigen){
t_stream * buffer;
switch(tipoEstructura){
} //Fin del switch
return buffer;
}
// Despaquetizacion
void * despaquetizar(uint8_t tipoEstructura, char * dataPaquete, uint16_t length){
void * buffer;
switch(tipoEstructura){
} //Fin del switch
return buffer;
}
// Headers
char * crearDataConHeader(uint8_t tipoEstructura, int length){
char * data = malloc(length);
uint16_t lengthDatos = length - sizeof(t_header);
t_header header = crearHeader(tipoEstructura, lengthDatos); //creo el header
int tamanoTotal = 0, tamanoDato = 0;
memcpy(data, &header.tipoEstructura, tamanoDato = sizeof(uint8_t)); //copio el tipoEstructura del header a data
tamanoTotal = tamanoDato;
memcpy(data + tamanoTotal, &header.length, tamanoDato = sizeof(uint16_t)); //copio el length del header a data
return data;
}
t_header crearHeader(uint8_t tipoEstructura, uint16_t lengthDatos){
t_header header;
header.tipoEstructura = tipoEstructura;
header.length = lengthDatos;
return header;
}
t_header despaquetizarHeader(char * header){
t_header estructuraHeader;
int tamanoTotal = 0, tamanoDato = 0;
memcpy(&estructuraHeader.tipoEstructura, header + tamanoTotal, tamanoDato = sizeof(uint8_t));
tamanoTotal = tamanoDato;
memcpy(&estructuraHeader.length, header + tamanoTotal, tamanoDato = sizeof(uint16_t));
return estructuraHeader;
}
// Auxiliar
// Auxiliar
// End"""
PACKH = """#ifndef PAQUETES_H_
#define PAQUETES_H_
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <stdint.h>
#include "string.h"
#include "modelos.h"
// Paquetizacion
t_stream * paquetizar(int tipoEstructura, void * estructuraOrigen);
// Despaquetizacion
void * despaquetizar(uint8_t tipoEstructura, char * dataPaquete, uint16_t length);
// Headers
char * crearDataConHeader(uint8_t tipoEstructura, int length);
t_header crearHeader(uint8_t tipoEstructura, uint16_t lengthDatos);
t_header despaquetizarHeader(char * header);
// Auxiliar
#endif"""
SOCKC = """#include "socket.h"
/*
* Nombre: socket_crearCliente/0
* Argumentos:
* - NINGUNO
*
* Devuelve:
* int (Descriptor al socket creado), en caso de error, devuelve -1.
*
*
* Funcion: Crea el socket para un cliente.
*/
int socket_crearCliente(void){
int sockfd;
if((sockfd = socket(AF_INET,SOCK_STREAM,0)) == -1){
perror("Error al crear socket");//Crear log para este error.
return -1;
}
return sockfd;
}
/*Nombre: socket_conectarCliente/3
* Argumentos:
* - sockfd (int), (descriptor del socket cliente).
* - serverIp (char*),(IP del server a conectar)
* - serverPort (int), (puerto del server a conectar)
*
* Devuelve:
* int (Descriptor al socket que se va a conectar, devuelve -1 si hay error).
*
* Funcion: Conectarme a un server con su IP y puerto.
*
*/
int socket_conectarCliente(int sockfd,char *serverIp, int serverPort){
struct sockaddr_in socketInfo;
//INICIALIZACION DE SOCKETINFO
socketInfo.sin_family = AF_INET;
socketInfo.sin_port = htons(serverPort); //host to network short
socketInfo.sin_addr.s_addr = inet_addr(serverIp);
memset(&(socketInfo.sin_zero),'\\0',8); // PONGO A 0 EL RESTO DE LA ESTRUCTURA
// ME CONECTO CON LA DIRECCIÓN DE SOCKETINFO
//SIEMPRE VERIFICANDO QUE NO DEN -1 LAS FUNCIONES O 0 EN CASO DE RECV() -- SOLO PARA SERVER IGUAL :)
if(connect(sockfd , (struct sockaddr *)&socketInfo , sizeof(socketInfo)) == -1){
perror("Falló la conexión"); // Cambiar esto por un log.
return -1;
}
return sockfd;
}
/*Nombre: socket_crearYConectarCliente/2
* Argumentos:
* - serverIp (char*),(IP del server a conectar)
* - serverPort (int), (puerto del server a conectar)
*
* Devuelve:
* int (Descriptor al socket que se va a conectar).
*
* Funcion: Crear y conectar un nuevo cliente a un server con su IP y puerto.
*
*/
int socket_crearYConectarCliente(char *serverIp, int serverPort){
int sockfd;
sockfd = socket_crearCliente();
if (sockfd < 0)
return -1;
sockfd = socket_conectarCliente( sockfd,(char*)serverIp, serverPort);
return sockfd;
}
/*Nombre: socket_crearServidor/2
* Argumentos:
* - serverIp (char*),(IP del server)
* - serverPort (int), (puerto del server)
*
* Devuelve:
* int (Descriptor al socket del server).
*
* Funcion: Crear un nuevo servidor.
*
*/
int socket_crearServidor(char *ip, int port){
int socketEscucha;
struct sockaddr_in miSocket;//ESTE ES EL SOCKET CON LA DRECCION IP
if((socketEscucha = socket(AF_INET,SOCK_STREAM,0)) == -1){
perror("Error al crear socket");
return -1;
}
miSocket.sin_family = AF_INET;
miSocket.sin_port = htons(port);
miSocket.sin_addr.s_addr = inet_addr(ip);
memset(&(miSocket.sin_zero),'\\0',8); //NI LE PRESTEN ATENCION A ESTO
int yes = 1;
if (setsockopt(socketEscucha, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(int)) == -1) {
perror("setsockopt");
exit(1);
}
if(bind(socketEscucha,(struct sockaddr*)&miSocket, sizeof(miSocket)) == -1){
perror ("Error al bindear el socket escucha");
return -1;
}
if (listen(socketEscucha, MAX_CONNECTION_SERVER) == -1){
perror("Error en la puesta de escucha");
return -1;
}
return socketEscucha;
}
/*Nombre: socket_crearServidor/2
* Argumentos:
* - serverIp (char*),(IP del server)
* - serverPort (int), (puerto del server)
*
* Devuelve:
* int (Descriptor al socket del server).
*
* Funcion: Crear un nuevo servidor.
*
*/
int socket_crearServidorIpLocal(int port){
int socketEscucha;
struct sockaddr_in miSocket;//ESTE ES EL SOCKET CON LA DRECCION IP
if((socketEscucha = socket(AF_INET,SOCK_STREAM,0)) == -1){
perror("Error al crear socket");
return -1;
}
miSocket.sin_family = AF_INET;
miSocket.sin_port = htons(port);
miSocket.sin_addr.s_addr = INADDR_ANY;
memset(&(miSocket.sin_zero),'\\0',8); //NI LE PRESTEN ATENCION A ESTO
int yes = 1;
if (setsockopt(socketEscucha, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(int)) == -1) {
perror("setsockopt");
exit(1);
}
if(bind(socketEscucha,(struct sockaddr*)&miSocket, sizeof(miSocket)) == -1){
perror ("Error al bindear el socket escucha");
return -1;
}
if (listen(socketEscucha, MAX_CONNECTION_SERVER) == -1){
perror("Error en la puesta de escucha");
return -1;
}
return socketEscucha;
}
int socket_crearServidorPuertoRandom(char *ip, int * port){
int socketEscucha;
struct sockaddr_in miSocket;//ESTE ES EL SOCKET CON LA DRECCION IP
if((socketEscucha = socket(AF_INET,SOCK_STREAM,0)) == -1){
perror("Error al crear socket");
return -1;
}
miSocket.sin_family = AF_INET;
miSocket.sin_port = htons(0);
miSocket.sin_addr.s_addr = inet_addr(ip);
memset(&(miSocket.sin_zero),'\\0',8); //NI LE PRESTEN ATENCION A ESTO
int yes = 1;
if (setsockopt(socketEscucha, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(int)) == -1) {
perror("setsockopt");
exit(1);
}
if(bind(socketEscucha,(struct sockaddr*)&miSocket, sizeof(miSocket)) == -1){
perror ("Error al bindear el socket escucha");
return -1;
}
if (listen(socketEscucha, MAX_CONNECTION_SERVER) == -1){
perror("Error en la puesta de escucha");
return -1;
}
struct sockaddr_in sin;
socklen_t len = sizeof(sin);
if (getsockname(socketEscucha, (struct sockaddr *)&sin, &len) == -1){
perror("getsockname");
return -1;
}
*port = ntohs(sin.sin_port);
return socketEscucha;
}
/*Nombre: socket_aceptarCliente/1
* Argumentos:
* - socketEscucha (int),(descriptor del socket del server para escuchar conexiones)
*
* Devuelve:
* int (Descriptor al socket de la nueva conexión).
*
* Funcion: Aceptar un cliente que está siendo previamente escuchado.
*
*/
int socket_aceptarCliente(int socketEscucha){
int socketNuevaConexion;
unsigned int size_sockAddrIn;
struct sockaddr_in suSocket;
size_sockAddrIn = sizeof(struct sockaddr_in);
socketNuevaConexion = accept(socketEscucha, (struct sockaddr *)&suSocket, &size_sockAddrIn);
if(socketNuevaConexion < 0) {
perror("Error al aceptar conexion entrante");
return -1;
}
return socketNuevaConexion;
}
/*
* Nombre: socket_enviar/3
* Argumentos:
* - socketReceptor
* - tipo: (unsigned char) tipo de socket
* - estructura (void *) (lo que quiero enviar)
* - tipoEstructura (int que define qué estructura quiero enviar)
*
* Devuelve:
* int (1->si se envio o false->error al envio).
* --> convierte la estructura a un buffer transferible, y lo envia.
*
* Funcion: paquetiza y envia la estructura, convierte la estructura a un buffer transferible y la envia
*/
int socket_enviar(int socketReceptor, t_tipoEstructura tipoEstructura, void* estructura){
int cantBytesEnviados;
t_stream * paquete = paquetizar(tipoEstructura, estructura);
cantBytesEnviados = send(socketReceptor, paquete->data, paquete->length, 0);
free(paquete->data);
free(paquete);
if( cantBytesEnviados == -1){
perror("Server no encontrado");
return 0;
}
else {
return 1;
}
}
/*
* Nombre: socket_recibir/3
* SINTAXIS CORRECTA: socket_recibir(soquetEmisor, &tipoRecibido, &PunteroAEstructuraRecibida)
* NOTA: El segudno y tercer parametro son por referencia. Los modifica en la funcion.
* Admite que se mande NULL en cualquiera de los dos, si no interesa uno de los datos.
* Argumentos:
* - socketEmisor
* - tipoEstructura: (t_tipoEstructura *) puntero a la variable tipo del paquete
* - estructura (void **) puntero a una variable tipo void*
*
* Devuelve:
* int (1-> se recibio ok, 0-> si hubo problemas).
*
* Funcion: recibir y despaquetizar, convierte el paquete recibido a la estructura que corresponda.
*/
int socket_recibir(int socketEmisor, t_tipoEstructura * tipoEstructura, void** estructura){
int cantBytesRecibidos;
t_header header;
char* buffer;
char* bufferHeader;
bufferHeader = malloc(sizeof(t_header));
cantBytesRecibidos = recv(socketEmisor, bufferHeader, sizeof(t_header), MSG_WAITALL); //Recivo por partes, primero el header.
if(cantBytesRecibidos == -1){
free(bufferHeader);
perror("Error al recibir datos");
return 0;
}
header = despaquetizarHeader(bufferHeader);
free(bufferHeader);
if (tipoEstructura != NULL) {
*tipoEstructura = header.tipoEstructura;
}
if(header.length == 0){ // Que pasa si recivo mensaje con length 0? retorno 1 y *estructura NULL.
if (estructura != NULL) {
*estructura = NULL;
}
return 1;
}
buffer = malloc(header.length);
cantBytesRecibidos = recv(socketEmisor, buffer, header.length, MSG_WAITALL); //Recivo el resto del mensaje con el tamaño justo de buffer.
if(cantBytesRecibidos == -1){
free(buffer);
perror("Error al recibir datos");
return 0;
}
if(estructura != NULL) {
*estructura = despaquetizar(header.tipoEstructura, buffer, header.length);
}
free(buffer);
if (cantBytesRecibidos == 0){
*tipoEstructura = 0;
}
return 1;
}
char* socket_ip(char* direccionCompleta){
char * dir = string_duplicate(direccionCompleta);
string_append(&dir,"basura"); // Le agrego al final cualquier cosa, cuestion de que si me mandan "127.0.0.1:", pueda dividirlo correctamente...
char * resultado = ( string_split(dir, ":") )[0]; //Divido en el ":", en un array de char* y digo que me de la primera parte.
free(dir);
return resultado;
}
int socket_puerto(char* direccionCompleta){
char * dir = string_duplicate("basura");
string_append(&dir,direccionCompleta); // Le agrego al principio cualquier cosa, cuestion de que si me mandan ":2532", pueda dividirlo correctamente...
int resultado = atoi(( string_split(dir, ":") )[1]); //Divido en el ":", en un array de char* y digo que me de la segunda parte en forma de int.
free(dir);
return resultado;
}
char* socket_unirDireccion(char* ip, int puerto){
return string_from_format("%s:%d", ip, puerto);
}
/*
* Nombre: socket_cerrarConexion/3
*
* Argumentos:
* - socket
*
* Devuelve:
* int (-1-> si se cerro ok, 0-> si hubo problemas).
*
* Funcion: recibir y despaquetizar, convierte el paquete recibido a la estructura que corresponda.
*/
int socket_cerrarConexion(int socket){
return close(socket);
}
char **string_split(char *text, char *separator) {
int _is_last_token(char* next, int _) {
return next[0] != '\\0';
}
return _string_split(text, separator, _is_last_token);
}
char** _string_split(char* text, char* separator, int(*condition)(char*, int)) {
char **substrings = NULL;
int size = 0;
char *text_to_iterate = string_duplicate(text);
char *next = text_to_iterate;
char *str = text_to_iterate;
while(condition(next, size)) {
char* token = strtok_r(str, separator, &next);
if(token == NULL) {
break;
}
str = NULL;
size++;
substrings = realloc(substrings, sizeof(char*) * size);
substrings[size - 1] = string_duplicate(token);
};
if (next[0] != '\\0') {
size++;
substrings = realloc(substrings, sizeof(char*) * size);
substrings[size - 1] = string_duplicate(next);
}
size++;
substrings = realloc(substrings, sizeof(char*) * size);
substrings[size - 1] = NULL;
free(text_to_iterate);
return substrings;
}
void string_append(char** original, char* string_to_add) {
*original = realloc(*original, strlen(*original) + strlen(string_to_add) + 1);
strcat(*original, string_to_add);
}
char* string_from_format(const char* format, ...) {
char* nuevo;
va_list arguments;
va_start(arguments, format);
nuevo = string_from_vformat(format, arguments);
va_end(arguments);
return nuevo;
}
char* string_duplicate(char* original) {
return strdup(original);
}"""
SOCKH = """#ifndef SOCKET_H_
#define SOCKET_H_
#include <stdlib.h>
#include <netinet/in.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <sys/epoll.h>
#include <errno.h>
#include <sys/ioctl.h>
#include "paquetes.h"
#define MAX_EVENTS_EPOLL 60
#define MAX_CONNECTION_SERVER 60 //VAMOS A ATENDER DE A 10 CONEXIONES COMO MAXIMO A LA VEZ
// Estructura para paquetizar datos a enviar/recibir
typedef struct {
uint8_t tipo;
uint16_t length;
} t_socketHeader;
typedef uint8_t t_tipoEstructura;
//FUNCIONES PARA EL CLIENTE
int socket_crearCliente(void);
int socket_conectarCliente(int sockfd,char *serverIp, int serverPort);
int socket_crearYConectarCliente(char *serverIp, int serverPort);
//FUNCIONES PARA EL SERVIDOR
int socket_crearServidor(char *ip, int port);
int socket_crearServidorPuertoRandom(char *ip, int * port);
int socket_aceptarCliente(int socketEscucha);
int socket_crearServidorIpLocal(int port);
//FUNCIONES COMUNES
int socket_enviar(int socketReceptor, t_tipoEstructura tipoEstructura, void* estructura);
int socket_recibir(int socketEmisor, t_tipoEstructura * tipoEstructura, void** estructura);
int socket_cerrarConexion(int socket);
//FUNCIONES DE MANIPULACION DE DIRECCIONES IP y PUERTO
char* socket_ip(char* direccionCompleta);
int socket_puerto(char* direccionCompleta);
char* socket_unirDireccion(char* ip, int puerto);
//FUNCIONES DE STRINGS
char** string_split(char * text, char * separator);
char** _string_split(char* text, char* separator, int(*condition)(char*, int));
void string_append(char ** original, char * string_to_add);
char* string_from_format(const char* format, ...);
char* string_duplicate(char* original);
#endif /* SOCKET_H_ */""" | true |
78d0284b1b7941663ac179a0859aadfd49f69bdb | Python | Cjbeleta/CSC-184 | /server.py | UTF-8 | 1,014 | 2.796875 | 3 | [] | no_license | from flask import Flask, request, jsonify
app = Flask(__name__)
anime = [{'title': 'Ao No Exorcist'},
{'title': 'Boku No Hero Academia'},
{'title': 'Code Geass'},
{'title': 'Danganronpa'},
{'title': 'Fairy Tail'},
{'title': 'HunterXHunter'}]
@app.route('/anime', methods = ['GET'])
def showall():
return jsonify({'anime': anime})
@app.route('/anime', methods = ['POST'])
def addanime():
ani = {'title': request.json['title']}
anime.append(ani)
return jsonify({'anime': anime})
@app.route('/anime/<string:title>', methods = ['PUT'])
def editanime(title):
select = [ani for ani in anime if ani['title'] == title]
select[0]['title'] = request.json['title']
return jsonify({'anime': select[0]})
@app.route('/anime/<string:title>', methods = ['DELETE'])
def deleteanime(title):
select = [ani for ani in anime if ani['title'] == title]
anime.remove(select[0])
return jsonify({'anime': anime})
if __name__ == '__main__':
app.run(debug=True, port=5000) | true |
8ab9264b6c556d50afc215dd2449d951c67b6952 | Python | BrianPavillarCIS/2348HW | /HW3/11.18/main.py | UTF-8 | 336 | 3.734375 | 4 | [] | no_license | # Name: Brian Pavillar
# ID: 1863509
input_vals = input()
numbers = input_vals.split()
list_vals = []
for number in numbers:
list_vals.append(int(number))
new_list = []
for number in list_vals:
if number >= 0:
new_list.append(int(number))
new_list.sort()
for i in new_list:
print(i, end = " ")
| true |
24f7f84e5d55f08423c4fea2c53d3eea38ac9ed9 | Python | jeremander/AttrVN | /nominate.py | UTF-8 | 9,874 | 2.515625 | 3 | [
"Apache-2.0"
] | permissive | """After obtaining all the desired embeddings, stacks them and applies supervised learning to nominate nodes whose nomination_attr_type value is unknown. Optionally uses leave-one-out cross-validation to nominate the known nodes as well.
Usage: python3 nominate.py [path]
The directory [path] must include a file params.py containing all necessary parameters."""
import sys
import embed
import imp
import itertools
from sklearn.preprocessing import Imputer, StandardScaler
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.decomposition import PCA
from kde import TwoClassKDE
from attr_vn import *
def main():
path = sys.argv[1].strip('/')
pm = imp.load_source('params', path + '/params.py')
attr_filename = path + '/' + pm.attr_filename
if (pm.rng_seed is not None):
np.random.seed(pm.rng_seed)
# partition attribute types into text/discrete (str dtype) or numeric
text_attr_types, num_attr_types = [], []
for (attr_type, dtype) in pm.predictor_attr_types.items():
if (attr_type != pm.nomination_attr_type):
if (dtype is str):
text_attr_types.append(attr_type)
else:
num_attr_types.append(attr_type)
attr_types = text_attr_types + num_attr_types # all predictor attribute types
# get data frame of numeric features
if pm.verbose:
print("Gathering numeric features...")
start_time = time.time()
num_df = pd.read_csv(attr_filename, sep = ';')
num_df = num_df[np.vectorize(lambda t : t in set(num_attr_types))(num_df['attributeType'])]
num_df = num_df.pivot(index = 'node', columns = 'attributeType', values = 'attributeVal')
num_df = num_df.convert_objects(convert_numeric = True)
if pm.verbose:
print(time_format(time.time() - start_time))
# stack feature vectors, projecting to sphere if desired
if pm.verbose:
print("\nStacking feature vectors...")
start_time = time.time()
mats = []
# get embedding features
(context_features, text_attr_features_by_type) = embed.main()
embedding_mats = []
if pm.use_context:
if pm.sphere_context:
normalize_mat_rows(context_features)
embedding_mats.append(context_features)
for attr_type in text_attr_types:
if pm.sphere_content:
normalize_mat_rows(text_attr_features_by_type[attr_type])
embedding_mats.append(text_attr_features_by_type[attr_type])
if (len(text_attr_types) > 0):
mats += embedding_mats
if (len(num_attr_types) > 0):
# impute missing numeric data (using naive mean or median of the known values)
imputer = Imputer(strategy = pm.imputation)
mats.append(imputer.fit_transform(num_df))
mat = np.hstack(mats)
if pm.verbose:
print(time_format(time.time() - start_time))
# standard-scale the columns
mat = StandardScaler().fit_transform(mat)
# perform PCA on features, if desired
if pm.use_pca:
ncomps = mat.shape[1] if (pm.max_eig_pca is None) else min(pm.max_eig_pca, mat.shape[1])
pca = PCA(n_components = ncomps, whiten = pm.whiten)
if pm.verbose:
print("\nPerforming PCA on feature matrix...")
mat = timeit(pca.fit_transform)(mat)
sq_sing_vals = pca.explained_variance_
if (pm.which_elbow > 0):
elbows = get_elbows(sq_sing_vals, n = pm.which_elbow, thresh = 0.0)
k = elbows[min(len(elbows), pm.which_elbow) - 1]
else:
k = len(sq_sing_vals)
mat = mat[:, :k]
# identify seeds
n = mat.shape[0]
if pm.verbose:
print("\nCreating AttributeAnalyzer...")
a = timeit(AttributeAnalyzer, pm.verbose)(attr_filename, n, text_attr_types + [pm.nomination_attr_type])
ind = a.get_attribute_indicator(pm.nomination_attr_val, pm.nomination_attr_type)
true_seeds, false_seeds = ind[ind == 1].index, ind[ind == 0].index
num_true_seeds, num_false_seeds = len(true_seeds), len(false_seeds)
training = list(ind[ind >= 0].index)
assert ((num_true_seeds > 1) and (num_false_seeds > 1)) # can't handle this otherwise, yet
if pm.verbose:
print("\n%d total seeds (%d positive, %d negative)" % (num_true_seeds + num_false_seeds, num_true_seeds, num_false_seeds))
# construct classifier
if (pm.classifier == 'logreg'):
clf = LogisticRegression()
elif (pm.classifier == 'naive_bayes'):
clf = GaussianNB()
elif (pm.classifier == 'randfor'):
clf = RandomForestClassifier(n_estimators = pm.num_trees, n_jobs = pm.n_jobs)
elif (pm.classifier == 'boost'):
clf = AdaBoostClassifier(n_estimators = pm.num_trees)
elif (pm.classifier == 'kde'):
clf = TwoClassKDE()
train_in = mat[training]
train_out = ind[training]
if pm.verbose:
print("\nCross-validating to optimize KDE bandwidth...")
timeit(clf.fit_with_optimal_bandwidth)(train_in, train_out, gridsize = pm.kde_cv_gridsize, dynamic_range = pm.kde_cv_dynamic_range, cv = pm.kde_cv_folds, verbose = int(pm.verbose), n_jobs = pm.n_jobs)
else:
raise ValueError("Invalid classifier '%s'." % pm.classifier)
# cross-validate
if (pm.cv_max > 0):
true_seeds_for_cv = list(true_seeds[np.random.permutation(range(num_true_seeds))])
false_seeds_for_cv = list(false_seeds[np.random.permutation(range(num_false_seeds))])
# include equal proportion of positive & negative examples in cross-validation, if possible
cv_seeds = list(itertools.islice(filter(None, sum(itertools.zip_longest(true_seeds_for_cv, false_seeds_for_cv), ())), pm.cv_max))
num_cv_seeds = len(cv_seeds)
start_time = time.time()
cv_probs = np.zeros(num_cv_seeds, dtype = float)
num_true = ind[cv_seeds].sum()
guess_rate = num_true / num_cv_seeds
training_set = set(training)
if pm.verbose:
print("\nCross-validating %d seeds (%d positive, %d negative) with %s = %s..." % (num_cv_seeds, num_true, num_cv_seeds - num_true, pm.nomination_attr_type, pm.nomination_attr_val))
for (i, seed) in enumerate(cv_seeds):
training_set.remove(seed) # remove sample
cv_train = list(training_set)
train_in = mat[cv_train]
train_out = ind[cv_train]
clf.fit(train_in, train_out)
cv_in = mat[[seed]]
cv_probs[i] = clf.predict_proba(cv_in)[0, 1]
training_set.add(seed) # add back sample
cv_df = pd.DataFrame(columns = ['node', 'prob'] + [pm.nomination_attr_type] + attr_types)
cv_df['node'] = cv_seeds
cv_df['prob'] = cv_probs
for attr_type in [pm.nomination_attr_type] + text_attr_types:
attrs_by_node = a.attrs_by_node_by_type[attr_type]
cv_df[attr_type] = [str(attrs_by_node[node]) if (len(attrs_by_node[node]) > 0) else '{}' for node in cv_seeds]
for attr_type in num_attr_types:
vals = num_df[attr_type]
cv_df[attr_type] = ['' if np.isnan(vals[node]) else str(vals[node]) for node in cv_seeds]
cv_df = cv_df.sort_values(by = 'prob', ascending = False)
cumulative_prec = np.cumsum(np.asarray(ind[cv_df['node']])) / np.arange(1.0, num_cv_seeds + 1.0)
AP = np.mean(cumulative_prec) # average precision
if pm.verbose:
print(time_format(time.time() - start_time))
print("\nguess rate = %5f" % guess_rate)
print("average precision = %5f" % AP)
print("cumulative precisions:")
print(cumulative_prec)
if pm.save_info:
cv_df.to_csv(path + '/%s_%s_cv_nomination.txt' % (pm.nomination_attr_type, pm.nomination_attr_val), index = False, sep = '\t')
plt.figure()
plt.plot(cumulative_prec, color = 'blue', linewidth = 2)
plt.axhline(y = guess_rate, color = 'black', linewidth = 2, linestyle = 'dashed')
plt.axvline(x = num_true, color = 'black', linewidth = 2, linestyle = 'dashed')
plt.xlabel('rank')
plt.ylabel('prec')
plt.ylim((0, min(1.0, 1.1 * cumulative_prec.max())))
plt.title('Cumulative precision of cross-validated seeds\nAP = %5f' % AP, fontweight = 'bold')
plt.savefig(path + '/%s_%s_cv_prec.png' % (pm.nomination_attr_type, pm.nomination_attr_val))
# nominate the unknown nodes
start_time = time.time()
if pm.verbose:
print("\nNominating unknown nodes...")
train_in = mat[training]
train_out = ind[training]
clf.fit(train_in, train_out)
test = list(ind[~(ind >= 0)].index) # complement of seed set
test_in = mat[test]
test_probs = clf.predict_proba(test_in)[:, 1]
if pm.verbose:
print(time_format(time.time() - start_time))
nom_df = pd.DataFrame(columns = ['node', 'prob'] + attr_types)
nom_df['node'] = test
nom_df['prob'] = test_probs
for attr_type in text_attr_types:
attrs_by_node = a.attrs_by_node_by_type[attr_type]
nom_df[attr_type] = [str(attrs_by_node[node]) if (len(attrs_by_node[node]) > 0) else '{}' for node in test]
for attr_type in num_attr_types:
vals = num_df[attr_type]
nom_df[attr_type] = ['' if np.isnan(vals[node]) else str(vals[node]) for node in test]
nom_df = nom_df.sort_values(by = 'prob', ascending = False)
nom_df[:pm.nominate_max].to_csv(path + '/%s_%s_nomination.out' % (pm.nomination_attr_type, pm.nomination_attr_val), index = False, sep = '\t')
if pm.verbose:
print("\nSaved results to %s/%s_%s_nomination.out" % (path, pm.nomination_attr_type, pm.nomination_attr_val))
if __name__ == "__main__":
main()
| true |
46e720d29d1df25fe74433a6bae45e7064d36ab6 | Python | 873490374/startracker | /program/tracker/main_program.py | UTF-8 | 2,083 | 2.875 | 3 | [] | no_license | import numpy as np
from program.tracker.image_processor import ImageProcessor
from program.tracker.attitude_finder import AttitudeFinder
from program.tracker.star_identifier import StarIdentifier
from program.tracker.tracker import Tracker
class StarTracker:
def __init__(
self,
image_processor: ImageProcessor,
star_identifier: StarIdentifier,
attitude_finder: AttitudeFinder,
tracker: Tracker,
tracking_mode_enabled: bool):
self.image_processor = image_processor
self.star_identifier = star_identifier
self.attitude_finder = attitude_finder
self.tracker = tracker
self.tracking_mode_enabled = tracking_mode_enabled
def run(self):
if self.tracking_mode_enabled:
# tracking mode
identified_stars = []
while True:
image_stars = self.get_image_stars()
if (not identified_stars or len((set(
[int(star[1]) for star in identified_stars]))) < 3):
identified_stars = self.identify_stars(image_stars)
else:
identified_stars = self.tracker.track(
image_stars, identified_stars)
attitude = self.find_attitude(identified_stars)
yield identified_stars, attitude
else:
while True:
# LIS mode
image_stars = self.get_image_stars()
identified_stars = self.identify_stars(image_stars)
attitude = self.find_attitude(identified_stars)
yield identified_stars, attitude
def get_image_stars(self) -> np.ndarray:
return self.image_processor.get_image_star_vectors()
def identify_stars(self, image_stars: np.ndarray):
return self.star_identifier.identify_stars(image_stars)
def find_attitude(self, identified_stars: np.ndarray):
attitude = self.attitude_finder.find_attitude(
identified_stars)
return attitude
| true |
940b1cfc804762d6fd008dc7be473960539a2581 | Python | cobymotion/PythonCourse | /001_Introduccion/EjerciciosClases/008_ciclos.py | UTF-8 | 596 | 4 | 4 | [
"Apache-2.0"
] | permissive | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 29 12:56:37 2019
Manejo de ciclos
@author: Luis Cobian
"""
contador = 0
while contador<=10:
if contador%3==0 and contador!=0:
contador+=1
continue
elif contador==7:
break
print (contador)
contador+=1
else:
print ("Fin del ciclo normal")
print("Ejemplo de FOR")
lista = [1,2,3,5,6,8,7]
for elemento in lista:
print(elemento)
print("-------------------------")
for i in range(0,10,2):
print(i)
for indice,valor in enumerate(lista):
print("{} El valor es {}".format(indice,valor))
| true |
7e64916e19b3169ea942e9f5376ee9f1b739bcf8 | Python | sodiqafolayan/asorock_100days_pythoncode | /day_47_force.py | UTF-8 | 3,102 | 4.625 | 5 | [] | no_license | # In this problem, you're going to use that class to calculate
# the net force from a list of forces.
#
# Write a function called find_net_force. find_net_force should
# have one parameter: a list of instances of Force. The
# function should return new instance of Force with the total
# net magnitude and net angle as the values for its magnitude
# and angle attributes.
#
# As a reminder:
#
# - To find the magnitude of the net force, sum all the
# horizontal components and sum all the vertical components.
# The net force is the square root of the sum of the squares
# of the horizontal forces and the vertical foces (i.e.
# (total_horizontal ** 2 + total_vertical ** 2) ** 0.5)
# - To find the angle of the net force, call atan2 with two
# arguments: the total vertical and total horizontal
# forces (in that order).
# - Remember to round both the magnitude and direction to one
# decimal place. This can be done using round(magnitude, 1)
# and round(angle, 1).
# - The Force class has three methods: get_horizontal returns
# a single force's horizontal component. get_vertical
# returns a single force's vertical component. get_angle
# returns a single force's angle in degrees (or in radians
# if you call get_angle(use_degrees = False).
#
# HINT: Don't overcomplicate this. The Force class does a lot
# of your work for you. Use it! You should not need any trig
# functions except atan2, degrees, and radians.
from math import sin, cos, atan2, radians, degrees, sqrt
if __name__ == '__main__':
# Add your code here!
class Force:
def __init__(self, magnitude, angle):
self.angle = radians(angle)
self.magnitude = magnitude
def get_horizontal(self):
horizontal = self.magnitude * cos(self.angle)
return round(horizontal, 2)
def get_vertical(self):
vertical = self.magnitude * sin(self.angle)
return vertical
def get_angle(self, use_degrees=True):
if use_degrees:
return degrees(self.angle)
return self.angle
def find_net_force(list_of_force_instances):
total_horizontal = 0
total_vertical = 0
for i in list_of_force_instances:
total_horizontal += i.get_horizontal()
total_vertical += i.get_vertical()
net_magnitude = round((((total_horizontal ** 2) + (total_vertical ** 2)) ** 0.5), 1)
net_angle = round(degrees(atan2(total_vertical, total_horizontal)), 1)
new_force_Instance = Force(net_magnitude, net_angle)
return new_force_Instance
# Below are some lines of code that will test your object.
# You can change these lines to test your code in different
# ways.
#
# If your code works correctly, this will originally run
# error-free and print:
# 103.1
# -14.0
force_1 = Force(50, 90)
force_2 = Force(75, -90)
force_3 = Force(100, 0)
forces = [force_1, force_2, force_3]
net_force = find_net_force(forces)
print(net_force.magnitude)
print(net_force.get_angle()) | true |
b8a197a9065f6f8e8fb0ad9ad7d79d70fb9bafe3 | Python | sauerBT/PersonalRepo | /BrianPersonalCodingRepo/005_PracticeScripts/Python_Scripts/03_Web_Scraping/webScraping_yahooToCSV.py | UTF-8 | 232 | 2.671875 | 3 | [] | no_license | import yfinance as yf
import pandas as pd
import re
msft = yf.Ticker("MSFT")
print(msft)
yahooMSFTHistorical = msft.history(period="max")
OpenType = type(yahooMSFTHistorical["Open"])
Open = yahooMSFTHistorical["Open"].toList() | true |
815a48cacad5c1fe8e086892721fee174dbc2de8 | Python | Escaity/Library | /python/atcoder/ABC/C/解説AC/c169a.py | UTF-8 | 158 | 3.328125 | 3 | [] | no_license | from decimal import Decimal
# decimal型は文字列で渡す
a, b = [Decimal(i) for i in input().split()]
# decimal型→int型に変換
print(int(a * b))
| true |
14ae9c05237aa140029fca163985c578f4d1d673 | Python | tomsoutherland/vbm.py | /FileLock.py | UTF-8 | 1,315 | 3 | 3 | [] | no_license | import os
class FileLockException(Exception):
pass
class FileLock(object):
def __init__(self, file_name, timeout=10, delay=.05):
""" Prepare the file locker. Specify the file to lock and optionally
the maximum timeout and the delay between each attempt to lock.
"""
if timeout is not None and delay is None:
raise ValueError("If timeout is not None, then delay must not be None.")
self.is_locked = False
self.lockfile = os.path.join(os.getcwd(), "%s.lock" % file_name)
self.file_name = file_name
def acquire(self):
try:
self.fd = os.open(self.lockfile, os.O_CREAT | os.O_EXCL | os.O_RDWR)
self.is_locked = True # moved to ensure tag only when locked
except OSError as e:
print("Unable to acquire lock: ", self.lockfile, "\nOS Error: ", e.strerror)
exit(1)
def release(self):
if self.is_locked:
os.close(self.fd)
os.unlink(self.lockfile)
self.is_locked = False
def __enter__(self):
if not self.is_locked:
self.acquire()
return self
def __exit__(self, type, value, traceback):
if self.is_locked:
self.release()
def __del__(self):
self.release() | true |
3b33c30e6992b323e17eb1dd4b8b1237419fad64 | Python | avneetsingh/ecommerce-recommender | /Products.py | UTF-8 | 10,998 | 2.53125 | 3 | [] | no_license | from Tkinter import*
import requests
import sys
from bs4 import BeautifulSoup
import Tkinter as tk
import urllib
from PIL import ImageTk, Image
import os
import json
import io
import base64
import turtle
import image_file
import create_table
import product_reviews
import time
from glob import glob
from cStringIO import StringIO
try:
# Python2
import Tkinter as tk
from urllib2 import urlopen
except ImportError:
# Python3
import tkinter as tk
from urllib.request import urlopen
def onFrameConfigure(canvas):
'''Reset the scroll region to encompass the inner frame'''
canvas.configure(scrollregion=canvas.bbox("all"))
def features1(par):
create_table.main(par)
def reviewsfeatures1(par):
product_reviews.main(par)
def viewImage(par):
#image_file.main(par)
url=par
u = urlopen(url)
raw_data = u.read()
u.close()
image_file = Image.open(StringIO(raw_data))
photo_image = ImageTk.PhotoImage(image_file)
label = tk.Label(image=photo_image)
label.pack()
root.mainloop()
class Application(Frame):
def __init__(self,master):
Frame.__init__(self,master)
tk.Frame.__init__(self, master)
self.canvas = tk.Canvas(master, borderwidth=0, background="#ffffff")
self.frame = tk.Frame(self.canvas, background="#ffffff")
self.vsb = tk.Scrollbar(master, orient="vertical", command=self.canvas.yview)
self.canvas.configure(yscrollcommand=self.vsb.set)
self.cur=0
self.image = PhotoImage()
self.images = glob("*.gif")
self.vsb.pack(side="right", fill="y")
self.canvas.pack(side="left", fill="both", expand=True)
self.canvas.create_window((4,4), window=self.frame, anchor="nw",
tags="self.frame")
self.frame.bind("<Configure>", self.onFrameConfigure)
self.create_widgets()
def create_widgets(self):
self.instruction = Label(self,text="Search Product").grid(row=0,column=0,columnspan=1)
self.text = Entry(self)
self.text.grid(row=0,column=1)
self.submit_button = Button(self,text="Search", command=self.update_text).grid(row=0,column=3)
Label(self,text="Choose Sites").grid(row=3,column=0,sticky=W)
#instructions
#Label(self,text="Select all that apply:").grid(row=4,column=0,sticky=W)
#Flipkart check button
self.flipkart = BooleanVar()
Checkbutton(self,text = "Flipkart",variable = self.flipkart,command = self.update_text).grid(row = 2,column =2)
#Amazon check button
self.amazon = BooleanVar()
#Checkbutton(self,text = "Amazon",variable = self.amazon,command = self.update_text).grid(row = 2,column = 3)
#Snapdeal check button
self.snapdeal = BooleanVar()
#Checkbutton(self,text = "Snapdeal",variable=self.snapdeal,command=self.update_text).grid(row = 2,column =4)
self.result = Text(self,width = 40,height = 5,wrap = WORD)
#self.result.grid(row = 8,column = 0,columnspan = 3)
def onFrameConfigure(self, event):
'''Reset the scroll region to encompass the inner frame'''
self.canvas.configure(scrollregion=self.canvas.bbox("all"))
def show_next(self):
self.cur = (self.cur + 1) % 150
self.image.configure(file=self.images[self.cur])
def update_text(self):
likes=""
if self.flipkart.get():
rr=7
for i in range(150):
tk.Label(self.frame,text="",borderwidth=0, relief="solid",font=("Helvetica", 10)).grid(row=rr,column=4,sticky="nsew",padx=1, pady=5)
tk.Label(self.frame,text="",borderwidth=0, relief="solid",font=("Helvetica", 10)).grid(row=rr,column=6,sticky="nsew",padx=1, pady=5)
tk.Label(self.frame,text="",borderwidth=0, relief="solid",font=("Helvetica", 10)).grid(row=rr,column=8,sticky="nsew",padx=1, pady=5)
rr=rr+1
tk.Label(self.frame,text="",borderwidth=0, relief="solid",font=("Helvetica", 10)).grid(row=rr,column=4,sticky="nsew",padx=1, pady=5)
tk.Label(self.frame,text="",borderwidth=0, relief="solid",font=("Helvetica", 10)).grid(row=rr,column=6,sticky="nsew",padx=1, pady=5)
url ="http://www.flipkart.com/search?q="
url2=url+self.text.get()+"&as=off&as-show=on&otracker=end";
#print(url2)
#r= requests.get(url2,proxies=proxies);
r=requests.get(url2)
soup =BeautifulSoup(r.content, 'html.parser');
links =soup.find_all("a");
r=7
for i in range(10):
tk.Label(self.frame,text=" ",borderwidth=0, relief="solid",font=("Helvetica", 10)).grid(row=r,column=4,sticky="nsew",padx=1, pady=5)
r=r+1
lst=[]
b_id=0
b_id2=0
g_data =soup.find_all("div",{"class":"gu3"})
if not g_data:
g_data= soup.find_all("div",{"class":"unit"})
self.button3=[]
self.button4=[]
self.button5=[]
self.button6=[]
b_id3=0
b_id4=0
#print g_data
for col in g_data:
for sets in col.find_all("div",{"class":"pu-title"}):
#print sets.get_text('|',strip=True)
tk.Label(self.frame,text=sets.get_text(' ',strip=True),borderwidth=0, relief="solid",font=("Helvetica", 10)).grid(row=r,column=4,sticky="nsew",padx=1, pady=5)
#r=r+1
for sets in col.find_all("div",{"class":"pu-price"}):
#print sets.get_text('|',strip=True)
tk.Label(self.frame,text=sets.get_text(' ',strip=True),borderwidth=0, relief="solid",font=("Helvetica", 10)).grid(row=r,column=6,sticky="nsew",padx=1, pady=5)
#r=r+1
for sets in col.find_all("div",{"class":"pu-rating"}):
#print sets.get_text('|',strip=True)
tk.Label(self.frame,text=sets.get_text(' ',strip=True),borderwidth=0, relief="solid",font=("Helvetica", 10)).grid(row=r,column=8,sticky="nsew",padx=1, pady=5)
flag=0
for sets in col.find_all("div",{"class":"pu-visual-section"}):
#print sets
p_img=sets.find_all("img")
#print p_img
for pimg in p_img:
var=pimg.get("data-src")
#print var
#url="http://img6a.flixcart.com/image/sofa-sectional/w/a/c/6000020310001-semi-aniline-leather-hometown-brown-brown-400x400-imae94v2g7gdcdsk.jpeg"
#print var
url=var
#url="http://img6a.flixcart.com/image/sofa-sectional/w/a/c/6000020310001-semi-aniline-leather-hometown-brown-brown-400x400-imae94v2g7gdcdsk.jpeg"
#u = requests.get(url,proxies=proxies)
u = requests.get(url)
raw_data = u.content
u.close()
image_file = Image.open(StringIO(raw_data))
self.photo_image = ImageTk.PhotoImage(image_file)
#self.button4.append(tk.Button(self.frame,text="Zoom Image",image=self.photo_image))
#self.button4[b_id2].grid(row=r,column=16,sticky="nsew")
#r=r+1
#self.grid()
self.button6.append(tk.Button(self.frame,text="",image=self.photo_image,height=200, width=200,compound=CENTER))
self.button6[b_id4].grid(row=r,column=10,sticky="nsew",pady=5, padx=5)
rowFrame =Frame(self)
curLbl = Label(rowFrame,image=self.photo_image)
curLbl.grid(row=r,column=15,sticky='news');
curLbl.bind(self.button6[b_id4],lambda e, image=self.photo_image:self.popImage(self.photo_image))
#self.button6.append(Button(self.frame,text="view Image", command=lambda var=var:viewImage(var)))
#self.button6[b_id4].grid(row=r,column=18,sticky="nsew")
b_id4=b_id4+1
#self.show_next()
#self.button4[b_id2].pack()
#self.image = Tkinter.PhotoImage(file=filename)
b_id2=b_id2+1
#b.pack(side="right")
#label = tk.Label(image=photo_image)
#label.grid(row=r,column=9)
#r=r+2
#label.pack()
r=r+1
#print sets
if(flag==0):
for sets in col.find_all("a",{"class":"pu-image"}):
#print sets.get_text('|',strip=True)
val="http://www.flipkart.com"+sets.get("href")
tk.Label(self.frame,text=" ",borderwidth=0, relief="solid",font=("Helvetica", 10)).grid(row=r,column=4,sticky="nsew",padx=1, pady=5)
r=r+1
#print val
lst.append(val)
#tk.Label(self.frame,text="http://www.flipkart.com"+sets.get("href"),borderwidth=0, relief="solid",font=("Helvetica", 10)).grid(row=r,column=4,sticky="nsew",padx=1, pady=5)
self.button3.append(Button(self.frame,text="Check this Product", command=lambda val=val:features1(val)))
self.button3[b_id].grid(row=r,column=4,sticky="nsew")
b_id=b_id+1
#self.button1["command"]=self.features
#tk.button1.grid(row=r,column=4,sticky="nsew")
#r=r+1
self.button5.append(Button(self.frame,text="Check out reviews", command=lambda val=val:product_reviews.main(val)))
self.button5[b_id3].grid(row=r,column=6,sticky="nsew")
b_id3=b_id3+1
#print "<a href= http://www.flipkart.com%s>%s</a>"%(sets.get("href"),sets.text)
r=r+1
for i in range(3):
tk.Label(self.frame,text=" ",borderwidth=0, relief="solid",font=("Helvetica", 10)).grid(row=r,column=4,sticky="nsew",padx=1, pady=5)
r=r+1
#r=r+100
if self.amazon.get():
likes += "You like amazon"
if self.snapdeal.get():
likes +="You like Snapdeal"
self.result.delete(0.0,END)
self.result.insert(0.0,likes)
root=Tk()
root.title("Products")
Application(root).pack(side="top", fill="both", expand=True)
root.mainloop()
| true |
d23605d1b17c3635f0d00a227536595a4964c087 | Python | szabgab/slides | /python/examples/sqlite/sql_insert.py | UTF-8 | 971 | 3.40625 | 3 | [] | no_license | import sqlite3
sql = 'INSERT INTO companies (name, employees, established) VALUES (?, ?, ?)'
def insert_one(conn, crs):
company_name = 'Hostlocal'
employee_count = 1
year_of_establishment = 2000
try:
crs.execute(sql, (company_name, employee_count, year_of_establishment))
except sqlite3.IntegrityError as err:
print('sqlite error: ', err.args[0]) # column name is not unique
conn.commit()
def insert_many(conn, crs):
companies = [
('Google', 150_028, 1998),
('Facebook', 68_177, 2003),
('Apple', 154_000, 1977),
('Microsoft', 181_000, 1975),
]
try:
crs.executemany(sql, companies)
except sqlite3.IntegrityError as err:
print(f'sqlite error: {err.args[0]}')
conn.commit()
def main():
conn = sqlite3.connect("companies.db")
crs = conn.cursor()
insert_one(conn, crs)
insert_many(conn, crs)
conn.close()
print('done')
main()
| true |
6caa7f4769cd1fb01bf5239b605cd38c54466f01 | Python | vyankateshgithubber/speech-analyer | /Complete_Project/Payment/togther.py | UTF-8 | 9,408 | 3.03125 | 3 | [] | no_license |
import pandas as pd
import numpy as np
import nltk
nltk.download('stopwords')
train_df = pd.read_csv("/content/drive/MyDrive/PROJECT/Audio_text/Datasets/train.txt",sep=';')
train_df.columns = ["Sentance","Emotion"]
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
ps = PorterStemmer()
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
def tokenize(tweets):
stop_words = stopwords.words("english")
tokenized_tweets = []
for tweet in tweets:
# split all words in the tweet
words = tweet.split(" ")
tokenized_string = ""
for word in words:
# remove @handles -> useless -> no information
if word[0] != '@' and word not in stop_words:
# if a hashtag, remove # -> adds no new information
if word[0] == "#":
word = word[1:]
#word = ps.stem(word) # stemming line
tokenized_string += word + " "
tokenized_tweets.append(tokenized_string)
return tokenized_tweets
def encod_tweets(tweets):
tokenizer = Tokenizer(filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n', split=" ", lower=True)
tokenizer.fit_on_texts(tweets)
return tokenizer, tokenizer.texts_to_sequences(tweets)
tokenized_tweets = tokenize(train_df['Sentance'])
tokenizer, encoded_tweets = encod_tweets(tokenized_tweets)
import random
def tok(data,emotion):
index = data.index
index = random.choice(index[data['Emotion']==emotion])
s = data['Sentance'][index]
to = tokenizer.texts_to_sequences([s])
to = pad_sequences(to, maxlen= 20, padding='post')
to = np.array(to,dtype="float32")
data.drop(index=index)
return to,data
import librosa
import soundfile
import os, glob, pickle
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score
def extract_feature(file_name, mfcc, chroma, mel):
with soundfile.SoundFile(file_name) as sound_file:
X = sound_file.read(dtype="float32")#32
sample_rate=sound_file.samplerate
#print("MFCC")
#if chroma:
# stft=np.abs(librosa.stft(X))
result=np.array([])
if mfcc:
mfccs=np.mean(librosa.feature.mfcc(y=X, sr=16000, n_mfcc=40).T, axis=0)
# print("MFCC")
result=np.hstack((result, mfccs))
#if chroma:
# chroma=np.mean(librosa.feature.chroma_stft(S=stft, sr=16000).T,axis=0)
# result=np.hstack((result, chroma))
#if mel:
# mel=np.mean(librosa.feature.melspectrogram(X, sr=16000).T,axis=0)
# result=np.hstack((result, mel))
return result
emotions={
'01':'neutral',
'02':'calm',
'03':'happy',
'04':'sad',
'05':'angry',
'06':'fearful',
'07':'disgust',
'08':'surprised'
}
# 'disgust':'anger','calm':'love','angry':'anger','neutral':'love'
#DataFlair - Emotions to observe
observed_emotions=['happy','disgust','calm','angry','neutral','surprised','fearful','sad']
#DataFlair - Load the data and extract features for each sound file
def load_data(train_df,test_size):
x,y=[],[]
x = np.empty((1,60))
count_sad=0
count_happy=0
map = {'happy':'joy','surprised':'surprise','disgust':' ','calm':'surprise','angry':'anger','neutral':' ','fearful':'fear','sad':'sadness'}
for file in glob.glob("/content/drive/MyDrive/PROJECT/Audio_text/Datasets/ravdess/RAVDESS/Actor_*/*.wav"):
file_name=os.path.basename(file)
emotion=emotions[file_name.split("-")[2]]
if emotion not in observed_emotions:
continue
feeling = map[emotion]
feature=extract_feature(file, mfcc=True, chroma=False, mel=False)
feature = feature.astype('float32')
if(feeling!=' '):
tokenized, train_df = tok(train_df,feeling)
else:
tokenized = np.zeros((1,20))
tokenized = np.reshape(tokenized,(1,-1))
feature = np.reshape(feature,(1,-1))
# print(tokenized,feature)
out = np.hstack((feature,tokenized))
if emotion=='happy':
count_happy+=1
if emotion=='sad':
count_sad+=1
if count_sad<=count_happy and emotion=='sad':
x = np.vstack([x, out])
y.append(observed_emotions.index(emotion))
elif emotion !='sad':
x = np.vstack([x, out])
y.append(observed_emotions.index(emotion))
count_sad=0
count_happy=0
for file in glob.glob("/content/drive/MyDrive/PROJECT/Audio_text/Datasets/TESS/*.wav"):
file_name=os.path.basename(file)
emotion=file_name.split("_")[2][:-4]
if emotion not in observed_emotions:
continue
feeling = map[emotion]
feature=extract_feature(file, mfcc=True, chroma=False, mel=False)
feature = feature.astype('float32')
if(feeling!=' '):
tokenized, train_df = tok(train_df,feeling)
else:
tokenized = np.zeros((1,20))
tokenized = np.reshape(tokenized,(1,-1))
feature = np.reshape(feature,(1,-1))
# print(tokenized,feature)
out = np.hstack((feature,tokenized))
if emotion=='happy':
count_happy+=1
if emotion=='sad':
count_sad+=1
if count_sad<=count_happy and emotion=='sad':
x = np.vstack([x, out])
y.append(observed_emotions.index(emotion))
elif emotion !='sad':
x = np.vstack([x, out])
y.append(observed_emotions.index(emotion))
x = np.delete(x,0,0)
return train_test_split(x, np.array(y), test_size=test_size, random_state=9)
#DataFlair - Split the dataset
x_train,x_test,y_train,y_test=load_data(train_df,test_size=0.20)
x_train.shape,y_train.shape
x_traincnn=np.expand_dims(x_train,axis=2)
x_testcnn=np.expand_dims(x_test,axis=2)
x_traincnn.shape, x_testcnn.shape
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import keras
from matplotlib.pyplot import specgram
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense , Embedding
from keras.layers import LSTM
from keras.utils import to_categorical
from keras.layers import Input,Flatten,Dropout,Activation
from keras.layers import Conv1D,MaxPooling1D,AveragePooling1D
from keras.models import Model
from keras.callbacks import ModelCheckpoint
from sklearn.metrics import confusion_matrix,classification_report
model=Sequential()
model.add(Conv1D(256,5,padding='same',input_shape=(60,1)))
model.add(Activation('relu'))
model.add(Dropout(0.1))#0.1/0.2
model.add(Conv1D(128,5,padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling1D(pool_size=(8)))
model.add(Conv1D(128,5,padding='same'))
model.add(Activation('relu'))
model.add(Conv1D(128,5,padding='same'))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(10))
model.add(Activation('softmax'))
opt=keras.optimizers.RMSprop(lr=0.00001,decay=1e-6)
#model.summary()
model.compile(loss='sparse_categorical_crossentropy',optimizer=opt,metrics=['accuracy'])
cnnhistory=model.fit(x_traincnn,y_train,batch_size=16,epochs=250,validation_data=(x_testcnn,y_test))
#model = keras.models.load_model('/content/drive/MyDrive/multimodel')
#https://heartbeat.fritz.ai/working-with-audio-signals-in-python-6c2bd63b2daf
#score = model.evaluate(x_testcnn, y_test, verbose=0)
#print(f'Test loss: {score[0]} / Test accuracy: {score[1]}')
#y_pred = model.predict(x_testcnn)
# summarize history for accuracy
plt.plot(cnnhistory.history['accuracy'])
plt.plot(cnnhistory.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(cnnhistory.history['loss'])
plt.plot(cnnhistory.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
#model.save('/content/drive/MyDrive/my_model')
def next_block(filepath,text):
feature=extract_feature(filepath, mfcc=True, chroma=False, mel=False)
feature=np.reshape(feature,(1,-1))
#text = tokenize([text])
to = tokenizer.texts_to_sequences([text])
to = pad_sequences(to, maxlen= 20 , padding='post')
to = np.array(to)
to = np.reshape(to,(1,-1))
feature1 = np.hstack((feature,to))
feature1 = np.reshape(feature1,(1,60,1))
a = model.predict(feature1)
classes = np.argmax(a, axis = 1)
print("Emotion : " ,classes," ",observed_emotions[classes[0]])
return observed_emotions[classes[0]]
import time
import pyaudio
import speech_recognition as sr
"""def callback(recognizer , audio):
try:
input_string=recognizer.recognize_google(audio,language="en-SG")
#first run the next_block cell and then run this one
print(input_string)
print("Calling")
next_block(filepath=filepath,text=input_string)
except:
print("Opps didn't catch")
r=sr.Recognizer()
m=sr.AudioFile(filepath)
with m as source:
r.dynamic_energy_threshold=True
r.adjust_for_ambient_noise(source,duration=1)
time.sleep(0.5)
stop_listening=r.listen_in_background(m,callback)
for _ in range(8):time.sleep(0.1)
stop_listening()
for i in range(5):time.sleep(0.1)
"""
| true |
65a82c6f2b384087c4b198c26bb5665226a89c7f | Python | ItamarHavenstein-zz/Python | /Exercicios11a20/ex014.py | UTF-8 | 187 | 3.984375 | 4 | [
"MIT"
] | permissive | temperatura = float(input('Informe a temperatura em °C:'))
farenheit = (temperatura * 9 / 5) + 32
print('A temperatura de {}°C corresponde a {:.2f}°F'.format(temperatura, farenheit))
| true |
03aff826e6f2c7ca3c21637d082e40641f0aff84 | Python | iandersen/drone-car | /ClientSide_Python/irTest.py | UTF-8 | 204 | 2.9375 | 3 | [] | no_license | import pigpio
import time
pi = pigpio.pi()
for i in range(2,27):
pi.set_mode(i, pigpio.OUTPUT)
while 1:
for i in range(2, 27):
if pi.read(i) > 0:
print pi.read(i)
time.sleep(1)
| true |
e2c335767d651849f326e3ccc04972a59e4518fd | Python | Alasdair-Roddick/Image-Pixel-Encription | /Decript.py | UTF-8 | 389 | 2.984375 | 3 | [] | no_license | from PIL import Image
import itertools
x = input("Plaese enter file directory (including extenstion): ")
img = Image.open(x)
pixel = img.load()
code = []
finalcode = []
for x in range(img.size[0]):
print(pixel[x,0])
code.append(pixel[x,0])
for a_tuple in code:
finalcode.append(a_tuple[0])
x = ''.join(chr(i) for i in finalcode)
print(x)
input() | true |
8ce8db81c319da4f219a0a674322a8274e059700 | Python | perestoandr/hackerrank-challenges | /morgan-and-string.py | UTF-8 | 1,565 | 3.25 | 3 | [] | no_license | __author__ = 'Andrey Perestoronin'
def commonprefix(m):
"Given a list of pathnames, returns the longest common leading component"
if not m: return ''
s1 = min(m)
s2 = max(m)
for i, c in enumerate(s1):
if c != s2[i]:
return s1[:i]
return s1
def solve(f_str, s_str):
# """
# >>> solve('abcd', 'bdxyz')
# 'abbcddxyz'
# >>> solve('JACK', 'DANIEL')
# 'DAJACKNIEL'
# """
to_first = True
f_str, s_str = list(f_str[::-1]), list(s_str[::-1])
result = list()
while f_str and s_str:
x, y = f_str.pop(), s_str.pop()
if x < y:
result.append(x)
s_str.append(y)
elif y < x:
result.append(y)
f_str.append(x)
else:
ls = list()
ls.append(''.join(f_str[::-1]))
ls.append(''.join(s_str[::-1]))
ls = commonprefix(ls)
if ls and len(ls) != len(s_str) and len(ls) != len(f_str):
to_first = f_str[::-1][len(ls)] < s_str[::-1][len(ls)]
elif (not ls) and (len(f_str) or len(s_str)):
if to_first:
result.append(x)
s_str.append(y)
else:
result.append(y)
f_str.append(x)
if f_str:
result = result + f_str[::-1]
if s_str:
result = result + s_str[::-1]
return ''.join(result)
if __name__ == '__main__':
for _ in xrange(input()):
print solve(raw_input().strip(), raw_input().strip())
import doctest
doctest.testmod()
| true |
41e647a789e3159ae579e2054ce1889c0e24084b | Python | gabriellaec/desoft-analise-exercicios | /backup/user_086/ch48_2019_09_11_13_16_23_258190.py | UTF-8 | 165 | 3.421875 | 3 | [] | no_license | strmes=input('qual o nome do mês escolhido? ')
contador=0
while contador<len(listastr):
if strmes==listastr[contador]:
print(contador+1)
contador+=1 | true |
08e2e9cd7a9984f154df3ac687c52dff06072fc5 | Python | DongjiY/Kattis | /src/temperature.py | UTF-8 | 199 | 3.328125 | 3 | [] | no_license | x, y = [int(i) for i in input().split()]
if y == 1:
if x == 0:
print("ALL GOOD")
elif x != 0:
print("IMPOSSIBLE")
else:
intersection = (-1*x)/(y-1)
print(intersection) | true |
476779304e28e90f8cf876c8b750e341d0c3e528 | Python | ZoranPandovski/design-patterns | /Behavioral/ChainOfResponsability/Python/ChainOfResponsibility.py | UTF-8 | 1,281 | 3.125 | 3 | [
"CC0-1.0"
] | permissive | class Handler:
def __init__(self):
self.next = None
#execute chain
def execu(self, obj):
if self.next is None:
return self.handle(obj)
else:
return self.next.execu(self.handle(obj))
#chain handler
def handle(self, obj):
raise NotImplementedError("You MUST implement this method!")
#adds handler at the last element of the chain
def addHandler(self, handler):
nxt = self
while nxt.next is not None:
nxt = nxt.next
nxt.next = handler
return handler
class SplitDot(Handler):
def handle(self, obj):
return obj.split('.')
class Reverse(Handler):
def handle(self, obj):
return obj[::-1]
class JoinWithDot(Handler):
def handle(self, obj):
return ".".join(obj)
class AppendInAddr(Handler):
def handle(self, obj):
return obj+".in-addr.arpa"
if __name__=="__main__":
addr = "192.168.1.3"
split = SplitDot()
reverse = Reverse()
join = JoinWithDot()
app = AppendInAddr()
# set the chain of responsibility
split.addHandler(reverse)
reverse.addHandler(join)
join.addHandler(app)
print("Result1: " + split.execu(addr))
# alternative
base = SplitDot()
# set the chain of responsibility
base.addHandler(Reverse())
base.addHandler(JoinWithDot())
base.addHandler(AppendInAddr())
print("Result2: " + split.execu(addr))
| true |
f9876d25c7e5c881cb3a8732dfeb2fefd3a714f1 | Python | hinklej/CSCI-400-Cloud-Computing | /Project4/wordFreq.py | UTF-8 | 730 | 2.734375 | 3 | [] | no_license | #!/usr/bin/env python
import mincemeat
# Don't forget to start a client!
# ./mincemeat.py -l -p changeme
file = open('mobydick.txt','r')
data = list(file)
file.close()
# The data source can be any dictionary-like object
datasource = dict(enumerate(data))
def mapfn(k, v):
for word in v.split():
word = word.strip()
if len(word) >= 1:
yield word, 1
def reducefn(k, vs):
result = sum(vs)
return result
s = mincemeat.Server()
s.datasource = datasource
s.mapfn = mapfn
s.reducefn = reducefn
results = s.run_server(password="changeme")
resultlist = []
for k in results.keys():
resultlist.append((k,results[k]))
resultlist = sorted(resultlist, key=lambda a: a[1])
print resultlist[-5:]
| true |
3fa8252939de3b6a62a445edc9dac8f7a756feea | Python | ryantupo/twitch-bot | /pagan.py | UTF-8 | 5,391 | 3.515625 | 4 | [] | no_license |
#boardT = ['_0_','_1_','_2_',
# '_3_','_4_','_5_',
# _6_','_7_','_8_'
# 012 , 345, 678 - landscape wins
# 036 , 147 , 258 - portrait wins
# 048 ,642 - diagonal wins
def win_clause(boardT):
#---------------------------------------------------------------------------
#landscape wins
#top wins
if((boardT[0] == "X") and (boardT[1] == "X") and (boardT[2] == "X")):
print("X won")
state = True
return state
elif((boardT[0] == "O") and (boardT[1] == "O") and (boardT[2] == "O")):
print("O won")
state = True
return state
#middle wins
elif((boardT[3] == "X") and (boardT[4] == "X") and (boardT[5] == "X")):
print("X won")
state = True
return state
elif((boardT[3] == "O") and (boardT[4] == "O") and (boardT[5] == "O")):
print("O won")
state = True
return state
#bottom wins
elif((boardT[6] == "X") and (boardT[7] == "X") and (boardT[8] == "X")):
print("X won")
state = True
return state
elif((boardT[6] == "O") and (boardT[7] == "O") and (boardT[8] == "O")):
print("O won")
state = True
return state
#---------------------------------------------------------------------------
#portrait wins
#left wins
elif((boardT[3] == "X") and (boardT[4] == "X") and (boardT[5] == "X")):
print("X won")
state = True
return state
elif((boardT[3] == "O") and (boardT[4] == "O") and (boardT[5] == "O")):
print("O won")
state = True
return state
#middle wins
elif((boardT[1] == "X") and (boardT[4] == "X") and (boardT[7] == "X")):
print("X won")
state = True
return state
elif((boardT[1] == "O") and (boardT[4] == "O") and (boardT[7] == "O")):
print("O won")
state = True
return state
#right wins
elif((boardT[2] == "X") and (boardT[5] == "X") and (boardT[8] == "X")):
print("X won")
state = True
return state
elif((boardT[2] == "O") and (boardT[5] == "O") and (boardT[8] == "O")):
print("O won")
state = True
return state
#---------------------------------------------------------------------------
#diagonal wins
#left top bottom down
elif((boardT[0] == "X") and (boardT[4] == "X") and (boardT[8] == "X")):
print("X won")
state = True
return state
elif((boardT[0] == "O") and (boardT[4] == "O") and (boardT[8] == "O")):
print("O won")
state = True
return state
#bottom left top right
elif((boardT[6] == "X") and (boardT[4] == "X") and (boardT[2] == "X")):
print("X won")
state = True
return state
elif((boardT[6] == "O") and (boardT[4] == "O") and (boardT[2] == "O")):
print("O won")
state = True
return state
#--------------------------------------------------------------------------
else:
print("no one won")
print((boardT[0] == "X") , " " , (boardT[1] == "X") , " " , (boardT[2] == "X"))
return False
# 3 lists
boardT = ['_','_','_',
'_','_','_',
'_','_','_']
#boardT = ['_0_','_1_','_2_',
# '_3_','_4_','_5_',
# _6_','_7_','_8_'
def board_print():
print("|"+boardT[0]+"|"+boardT[1]+"|"+boardT[2]+"|")
print("|"+boardT[3]+"|"+boardT[4]+"|"+boardT[5]+"|")
print("|"+boardT[6]+"|"+boardT[7]+"|"+boardT[8]+"|")
win =(False)
def choose_X(x_entry):
while(win == False):
board_print()
while True:
try:
x_entry = int(input("which space you want? (first)" ))
if((x_entry >= 0) and (x_entry<= 8) and (boardT[x_entry] !="X") and (boardT[x_entry] !="O")):
boardT[x_entry]=("X")
win_clause(boardT)
break
elif(boardT[x_entry] == "X" or boardT[x_entry] == "O"):
print("space already held try again")
x_entry = int(input("which space you want?(second)" ))
continue
except:
print("invalid entry try again")
continue
board_print()
while True:
try:
O_entry = int(input("which space you want? (first)" ))
if((O_entry >= 0) and (O_entry<= 8) and (boardT[O_entry] !="X") and (boardT[O_entry] !="O")):
boardT[O_entry]=("O")
win_clause(boardT)
break
elif(boardT[O_entry] == "X" or boardT[O_entry] == "O"):
print("space already held try again")
O_entry = int(input("which space you want?(second)" ))
continue
except:
print("invalid entry try again")
continue
board_print()
print()
print()
| true |
c89ef12fa6c269c773f816a0a3cd06a88121c09e | Python | Namenaro/iter12-mnist | /show_motif.py | UTF-8 | 3,631 | 2.703125 | 3 | [] | no_license | import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from motif import *
from logger import *
from sensors import *
from create_motif import *
from init_motif import *
from data import *
def visualise_on_pic(motif, pic, desired_num_of_full_sprouts, logger):
# росток это последовательность записей вида "нода, фактическая координата гипотезы"
dict_coords_sprouts = motif.get_sprouts_for_all_pic(pic, desired_num_of_full_sprouts)
# рисуем все точки, из которых удалось вырастатить ростки успешно
fig, ax = plt.subplots()
ax.imshow(pic, cmap='gray_r')
for coord_pair in dict_coords_sprouts.keys():
x = coord_pair[0]
y= coord_pair[1]
ax.scatter(x, y, s=100, c='red', marker='o', alpha=0.4)
logger.add_fig(fig)
# рисуем все ростки, из всех точек
fig, ax = plt.subplots()
for key in dict_coords_sprouts.keys():
sprouts_from_point = dict_coords_sprouts[key]
for sprout in sprouts_from_point:
plot_sprout_with_radiuses(sprout, pic, ax)
logger.add_fig(fig)
def plot_sprout(sprout, pic, ax):
ax.imshow(pic, cmap='gray_r')
X=[]
Y=[]
for triple in sprout:
x= triple[1]
y= triple[2]
X.append(x)
Y.append(y)
ax.plot(X,Y, 'o-')
def plot_sprout_with_radiuses(sprout, pic, ax):
ax.imshow(pic, cmap='gray_r')
X=[]
Y=[]
for triple in sprout:
x= triple[1]
y= triple[2]
X.append(x)
Y.append(y)
u_radius = triple[0].experiment.u_radius
UX, UY = get_coords_less_or_eq_raduis(x, y, u_radius)
plt.scatter(UX, UY, s=100, c='blue', marker='o', alpha=0.4)
# sensor_field_radius = triple[0].experiment.sensor_field_radius
# sensX, sensY = get_coords_less_or_eq_raduis(x, y, sensor_field_radius)
# plt.scatter(sensX, sensY, s=100, c="#308040", marker='*', alpha=0.8)
ax.plot(X,Y, 'o-')
def ONE_PIC_EXP():
logger = HtmlLogger("EX2")
motif,_ = motif_from_json("motif3.json")
# motif = init_motif_handly()
pic = etalons_of3()[0]
desired_num_of_full_sprouts = 2
visualise_on_pic(motif, pic, desired_num_of_full_sprouts, logger)
logger.close()
def MANY_PIC_EXP():
logger = HtmlLogger("EX3")
motif,_ = motif_from_json("simplest.motif")
desired_num_of_full_sprouts = 3
pics = etalons_of3()
for pic in pics:
dict_coords_sprouts = motif.get_sprouts_for_all_pic(pic, desired_num_of_full_sprouts)
# рисуем все ростки, из всех точек
fig, ax = plt.subplots()
for key in dict_coords_sprouts.keys():
sprouts_from_point = dict_coords_sprouts[key]
for sprout in sprouts_from_point:
plot_sprout_with_radiuses(sprout, pic, ax)
logger.add_fig(fig)
logger.close()
def visualise_motif_on_many_pics(motif, logger):
desired_num_of_full_sprouts = 1
pics = etalons_of3()
for pic in pics:
dict_coords_sprouts = motif.get_sprouts_for_all_pic(pic, desired_num_of_full_sprouts)
# рисуем все ростки, из всех точек
fig, ax = plt.subplots()
for key in dict_coords_sprouts.keys():
sprouts_from_point = dict_coords_sprouts[key]
for sprout in sprouts_from_point:
plot_sprout_with_radiuses(sprout, pic, ax)
logger.add_fig(fig)
if __name__ == "__main__":
MANY_PIC_EXP()
| true |
9b0aa681a0446c64e3656d94917840e35033b0ce | Python | pjputzel/dyn_surv_global_with_shifts | /src/loss/WeibullLogProbCalculatorDeltaIJ.py | UTF-8 | 2,099 | 2.546875 | 3 | [] | no_license | from loss.DeltaIJBaseLogProbCalculator import DeltaIJBaseLogProbCalculator
import torch
import torch.nn as nn
class WeibullLogProbCalculatorDeltaIJ(DeltaIJBaseLogProbCalculator):
def compute_shifted_times(self, deltas, batch, eps=1e-20):
#deltas.register_hook(print_grad)
# deltas.register_hook(clamp_grad)
shifted_event_times = batch.event_times.unsqueeze(1) + deltas.squeeze(2)
shifted_cov_times = batch.cov_times + deltas.squeeze(2)
# prevent numerical issues with gradients
shifted_event_times = shifted_event_times + eps
shifted_cov_times = shifted_cov_times + eps
#deltas.register_hook(print_grad)
#print(deltas, 'deltas')
#print(torch.max(batch.cov_times, dim=1)[0])
#print(shifted_cov_times, 'shifted cov time')
return shifted_event_times, shifted_cov_times
def compute_logpdf(self, shifted_event_times, global_theta):
# global_theta.register_hook(print_grad)
# print(global_theta, 'global theta')
# global_theta.register_hook(clamp_grad)
scale = global_theta[0]
shape = global_theta[1]
logpdf = \
torch.log(shape) - torch.log(scale) + \
(shape - 1) * (torch.log(shifted_event_times) - torch.log(scale)) - \
(shifted_event_times/scale)**(shape)
return logpdf
def compute_logsurv(self, shifted_event_times, global_theta):
#global_theta.register_hook(clamp_grad)
scale = global_theta[0]
shape = global_theta[1]
return -(shifted_event_times/scale)**shape
def compute_lognormalization(self, shifted_cov_times, global_theta):
scale = global_theta[0]
shape = global_theta[1]
logsurv = -(shifted_cov_times/scale)**(shape)
return logsurv
def print_grad(grad):
print(grad, torch.sum(torch.isnan(grad)))
def clamp_grad(grad, thresh=5.):
grad[grad > float(thresh)] = thresh
# for beta which goes to positive infinity
grad[torch.isnan(grad)] = -thresh
grad[grad < float(-thresh)] = -thresh
| true |
8e137cdce8f38dcf750236f3d0134ed155b8080c | Python | bsimps01/Herd-Immunity-Simulation | /virus.py | UTF-8 | 1,028 | 3.390625 | 3 | [] | no_license | class Virus(object):
'''Properties and attributes of the virus used in Simulation.'''
def __init__(self, name, repro_rate, mortality_rate):
self.name = name
self.repro_rate = repro_rate
self.mortality_rate = mortality_rate
def test_virus_instantiation():
#TODO: Create your own test that models the virus you are working with
'''Check to make sure that the virus instantiator is working.'''
virus = Virus("HIV", 0.8, 0.3)
assert virus.name == "HIV"
assert virus.repro_rate == 0.8
assert virus.mortality_rate == 0.3
def virus_sars(self):
virus = Virus("Sars", 0.75, 0.6)
virus.name == "Sars"
virus.repro_rate == 0.75
virus.mortality_rate == 0.6
def virus_flu(self):
virus = Virus("Flu", 0.6, 0.5)
virus.name == "Flu"
virus.repro_rate == 0.6
virus.mortality_rate == 0.5
def virus_whooping_cough(self):
virus = Virus("Whooping Cough", 0.7, 0.25)
virus.name == "Whooping Cough"
virus.repro_rate == 0.7
virus.mortality_rate == 0.25 | true |
6db2e8b30d50d13fbfd1b6ec32653238bba7596f | Python | Desklop/ipapy | /ipapy/tests/test_kirshenbaummapper.py | UTF-8 | 5,254 | 2.734375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
# coding=utf-8
import unittest
from ipapy.kirshenbaummapper import KirshenbaumMapper
from ipapy.ipastring import IPAString
class TestKirshenbaumMapper(unittest.TestCase):
def test_can_map_ipa_string(self):
mapper = KirshenbaumMapper()
values = [
(u"", True),
(u"foo", True),
(u"\u0070\u032A", True),
(u"\u025F", True),
(u"\u0294", True),
(u"foo\u025F\u0294", True),
(u"fo\u02C8o\u025F\u0294", True),
(u"foo bar", True),
(u"\u0261\u0067", True),
(u"ma\u0272ana", True),
(u"\u02A3", True),
(u"\u02A7", True),
(u"\u1DC6", False), # valid IPA char, unmapped in Kirshenbaum
(u"foo\u1DC6bar", False), # valid IPA char, unmapped in Kirshenbaum
]
for v, e in values:
self.assertEqual(mapper.can_map_ipa_string(IPAString(unicode_string=v)), e)
def test_map_unicode_string(self):
mapper = KirshenbaumMapper()
values = [
(None, None),
(u"", u""),
(u"foo", u"foo"),
(u"\u0070\u032A", u"p["),
(u"\u025F", u"J"),
(u"\u0294", u"?"),
(u"foo\u025F\u0294", u"fooJ?"),
(u"fo\u02C8o\u025F\u0294", u"fo'oJ?"),
(u"foo bar", u"foo#bar<trl>"),
(u"\u0261\u0067", u"gg"),
(u"ma\u0272ana", u"man^ana"),
(u"\u02A3", u"dz"),
(u"\u02A7", u"tS"),
]
for v, e in values:
self.assertEqual(mapper.map_unicode_string(v), e)
def test_map_ipa_string(self):
mapper = KirshenbaumMapper()
values = [
(u"", u""),
(u"foo", u"foo"),
(u"\u0070\u032A", u"p["),
(u"\u025F", u"J"),
(u"\u0294", u"?"),
(u"foo\u025F\u0294", u"fooJ?"),
(u"fo\u02C8o\u025F\u0294", u"fo'oJ?"),
(u"foo bar", u"foo#bar<trl>"),
(u"\u0261\u0067", u"gg"),
(u"ma\u0272ana", u"man^ana"),
(u"\u02A3", u"dz"),
(u"\u02A7", u"tS"),
]
for v, e in values:
self.assertEqual(mapper.map_ipa_string(IPAString(unicode_string=v)), e)
def test_map_unicode_string_ignore(self):
mapper = KirshenbaumMapper()
values = [
(None, None),
(u"", u""),
(u"foo", u"foo"),
(u"\u0070\u032A", u"p["),
(u"\u025F", u"J"),
(u"\u0294", u"?"),
(u"foo\u025F\u0294", u"fooJ?"),
(u"fo\u02C8o\u025F\u0294", u"fo'oJ?"),
(u"foo bar", u"foo#bar<trl>"),
(u"\u0261\u0067", u"gg"),
(u"ma\u0272ana", u"man^ana"),
(u"\u02A3", u"dz"),
(u"\u02A7", u"tS"),
(u"L", u""),
(u"foo", u"foo"),
(u"\u0070\u032AL", u"p["),
(u"L\u025FM", u"J"),
(u"L\u0294M", u"?"),
(u"fLoo\u025F\u0294M", u"fooJ?"),
(u"fo\u02C8oL\u025F\u0294M", u"fo'oJ?"),
(u"fooL MbarN", u"foo#bar<trl>"),
(u"\u0261L\u0067", u"gg"),
(u"mLa\u0272Mana", u"man^ana"),
(u"L\u02A3", u"dz"),
(u"\u02A7M", u"tS"),
]
for v, e in values:
self.assertEqual(mapper.map_unicode_string(v, ignore=True), e)
def test_map_ipa_string_ignore(self):
mapper = KirshenbaumMapper()
values = [
(u"", u""),
(u"foo", u"foo"),
(u"\u0070\u032A", u"p["),
(u"\u025F", u"J"),
(u"\u0294", u"?"),
(u"foo\u025F\u0294", u"fooJ?"),
(u"fo\u02C8o\u025F\u0294", u"fo'oJ?"),
(u"foo bar", u"foo#bar<trl>"),
(u"\u0261\u0067", u"gg"),
(u"ma\u0272ana", u"man^ana"),
(u"\u02A3", u"dz"),
(u"\u02A7", u"tS"),
(u"L", u""),
(u"foo", u"foo"),
(u"\u0070\u032AL", u"p["),
(u"L\u025FM", u"J"),
(u"L\u0294M", u"?"),
(u"fLoo\u025F\u0294M", u"fooJ?"),
(u"fo\u02C8oL\u025F\u0294M", u"fo'oJ?"),
(u"fooL MbarN", u"foo#bar<trl>"),
(u"\u0261L\u0067", u"gg"),
(u"mLa\u0272Mana", u"man^ana"),
(u"L\u02A3", u"dz"),
(u"\u02A7M", u"tS"),
]
for v, e in values:
self.assertEqual(mapper.map_ipa_string(IPAString(unicode_string=v, ignore=True), ignore=True), e)
def test_map_unicode_string_single(self):
mapper = KirshenbaumMapper()
values = [
(None, None),
(u"", u""),
(u"foo", u"foo"),
(u"\u0070\u032A", u"p["),
(u"\u025F", u"J"),
(u"\u0294", u"?"),
(u"foo\u025F\u0294", u"fooJ?"),
(u"fo\u02C8o\u025F\u0294", u"fo'oJ?"),
(u"foo bar", u"foo#bar<trl>"),
(u"\u0261\u0067", u"gg"),
(u"ma\u0272ana", u"man^ana"),
(u"\u02A3", u"dz"),
(u"\u02A7", u"tS"),
]
for v, e in values:
self.assertEqual(mapper.map_unicode_string(v, single_char_parsing=True), e)
| true |
91715956eb35dfe2f6d612d5537330e0e8a4144a | Python | EricLee523/VehicleSteeringDirection | /test.py | UTF-8 | 2,630 | 3.015625 | 3 | [] | no_license | import pickle
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
file=open('trajectories.pickle','rb')
a_dict1=pickle.load(file)
df = pd.DataFrame(a_dict1)
sli=df.loc[ :,['pos_x','pos_y']]
plt.figure('trajectories')
for i in range(100):
posx=sli.iloc[i,0]
posy=sli.iloc[i,1]
# first point
preX1 = sli.iloc[i, 0][0]
preY1 = sli.iloc[i, 1][0]
# second point
preX2 = sli.iloc[i, 0][2]
preY2 = sli.iloc[i, 1][2]
size = len(sli.iloc[i, 0])
# thrid point
postX1 = sli.iloc[i, 0][size - 3]
postY1 = sli.iloc[i, 1][size - 3]
# last point
postX2 = sli.iloc[i, 0][-1]
postY2 = sli.iloc[i, 1][-1]
#转换成从0到360连续的角度
preAngle = math.atan2(preY2-preY1, preX2-preX1)
preAngle = 180 - math.degrees(preAngle)
postAngle = math.atan2(postY2-postY1, postX2-postX1)
postAngle = 180 - math.degrees(postAngle)
#直行
if (math.fabs(preAngle-postAngle) < 45 or math.fabs(360 - math.fabs(preAngle-postAngle))<45):
print("直行")
plt.xlim(-30, 35)
plt.ylim(-25, 35)
plt.xlabel('position x')
plt.ylabel('position y')
new_ticks1 = np.linspace(-30, 35, 14)
plt.xticks(new_ticks1)
new_ticks2 = np.linspace(-25, 35, 13)
plt.yticks(new_ticks2)
plt.plot(posx, posy, 'b-')
#右转
if (45 < postAngle - preAngle < 162 or 45 < 360 - preAngle + postAngle < 162):
print("右转")
plt.xlim(-30,35)
plt.ylim(-25,35)
plt.xlabel('position x')
plt.ylabel('position y')
new_ticks1=np.linspace(-30,35,14)
plt.xticks(new_ticks1)
new_ticks2=np.linspace(-25,35,13)
plt.yticks(new_ticks2)
plt.plot(posx,posy,'r-')
#左转
if (45 < preAngle - postAngle < 162 or 45 < 360 - postAngle + preAngle < 162):
print("左转")
plt.xlim(-30, 35)
plt.ylim(-25, 35)
plt.xlabel('position x')
plt.ylabel('position y')
new_ticks1 = np.linspace(-30, 35, 14)
plt.xticks(new_ticks1)
new_ticks2 = np.linspace(-25, 35, 13)
plt.yticks(new_ticks2)
plt.plot(posx, posy, 'g-')
# 掉头
if (math.fabs(math.fabs(preAngle - postAngle) - 180) < 18 ):
print("左转")
plt.xlim(-30, 35)
plt.ylim(-25, 35)
plt.xlabel('position x')
plt.ylabel('position y')
new_ticks1 = np.linspace(-30, 35, 14)
plt.xticks(new_ticks1)
new_ticks2 = np.linspace(-25, 35, 13)
plt.yticks(new_ticks2)
plt.plot(posx, posy, 'k-')
plt.show()
| true |
14190abadc81cced1376ea7a16eb00b7ea9f2920 | Python | green-fox-academy/FarkasLaszlo | /week-04/day-03/Counter.py | UTF-8 | 156 | 3.3125 | 3 | [] | no_license | def counter(string):
lista = []
for i in range(len(string)):
lista.append(string[i:i + 1])
return {x: lista.count(x) for x in lista}
| true |
10c73b61b2d06d2f3d53916a478712efec025731 | Python | dixonaws/kafka_etl | /generate_records.py | UTF-8 | 1,705 | 3.15625 | 3 | [] | no_license | from record import Record
import csv
import sys
import argparse
import os
def main():
parser=argparse.ArgumentParser(description="Specify the number of records to generate and the filename to write, e.g. generate_records.py 1000 my_csv_file.csv")
parser.add_argument("Records", help="The number of records to write (integer)")
parser.add_argument("Filename", help="The name of the file to write; if this file exists, then it will be overwritten.")
args=parser.parse_args()
int_records_to_put = int(args.Records)
str_filename=str(args.Filename)
list_records=[]
file_employ_data=open(str_filename, "w")
sys.stdout.write("Generating " + str(int_records_to_put) + " records... ")
for dict_record in Record().generate(int_records_to_put): # generate int_users_to_put users
# remove the newline from the address field
str_address=dict_record["address"]
str_corrected_address=str_address.replace("\n", ", ")
dict_record["address"]=str_corrected_address
print(dict_record)
list_records.append(dict_record)
print("done, list_records contains " + str(len(list_records)) + " records." )
sys.stdout.write("Writing " + str_filename + " in CSV format... ")
csvwriter=csv.writer(file_employ_data)
# write each record in the list
int_counter=0
for record in list_records:
# if we are writing the first record, write the header row
if(int_counter==0):
csvwriter.writerow(record.keys())
csvwriter.writerow(record.values())
int_counter=int_counter+1
print("done, " + str(get_file_size_bytes(str_filename)/1024) + " kB written")
file_employ_data.close()
def get_file_size_bytes(str_a_filename):
statinfo=os.stat(str_a_filename)
return(int(statinfo.st_size))
main()
| true |
1c9d751d83b8249152cf4ace0cd6a62e4cb65d82 | Python | VLD62/PythonFundamentals | /00.EXAMS/11August2018/Problem 1 – Gladiator Expenses.py | UTF-8 | 697 | 3.8125 | 4 | [] | no_license | if __name__ == "__main__":
lost_fights_count = int(input())
helmet_price = float(input())
sword_price = float(input())
shield_price = float(input())
armor_price = float(input())
shield_broken = 0
total_expenses = 0
for n in range(1, lost_fights_count+1):
if n % 2 == 0:
total_expenses += helmet_price
if n % 3 == 0:
total_expenses += sword_price
if n % 2 == 0 and n % 3 == 0:
total_expenses += shield_price
shield_broken += 1
if shield_broken == 2:
total_expenses += armor_price
shield_broken = 0
print(f'Gladiator expenses: {total_expenses:.2f} aureus') | true |
023d3ca764cf1b41f9c766c05c0c6d84ae72a7b6 | Python | Guilherme-Schwann/Listas-de-Exercicios-UFV-CCF-110 | /Lista05/ex002.py | UTF-8 | 409 | 3.59375 | 4 | [
"MIT"
] | permissive | saldo = float(input('Insira seu saldo médio do último ano: '))
if 0 < saldo <= 500:
print(f'Saldo médio: {saldo:.2f}\nCrédito: Nenhum crédito.')
exit()
elif 501 < saldo <= 1000:
cred = (30 / 100) * saldo
elif 1001 < saldo <= 3000:
cred = (40 / 100) * saldo
elif 3001 <= saldo:
cred = (50 / 100) * saldo
cred += cred * (2/100)
print(f'Saldo médio: {saldo:.2f}\nCrédito: {cred:.2f}')
| true |
c0315cdbdca6a370875e00cb7e18af72fd04eb4b | Python | mkp-in/codefights-py | /intro/level2/shape_area.py | UTF-8 | 76 | 3.234375 | 3 | [] | no_license | def shapeArea(n):
return (n ** 2) + ((n-1) ** 2)
print(shapeArea(3))
| true |
5659d077ec7d362421ebc156cbe9054ab3a64b7d | Python | dinysen/python-learning | /(5)函数式编程/5_偏函数.py | UTF-8 | 705 | 3.5 | 4 | [] | no_license | #!/usr/bin/env python3
# -*- coding : utf-8 -*-
#偏函数
import functools
int2 = functools.partial(int,base=2);
int_test = int2("01000000");
print(str(int_test));
#functools.partial的作用就是,把一个函数的某些参数给固定住(也就是设置默认值),返回一个新的函数
#已经固定住的参数,也可以在函数调用时传入其他值
int_test_2 = int2("01000000",base=10);
print(str(int_test_2));
#创建偏函数,其实就是接收 函数对象、*args、**kw 三个参数
int2 = functools.partial(int,base=2);#遇到这种就是关键字参数固定
=> int(XX,{base:2});
int3 = functools.partial(int,10);#遇到这种就是移到可变参数第一位
=> int(10,XX); | true |
fa9e53a978b830475a0f867677d65e1a29c4a097 | Python | bql20000/GMM_Ace_Challenge | /main.py | UTF-8 | 7,114 | 2.71875 | 3 | [] | no_license | import numpy as np
from scipy.io import loadmat
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
import Visualization
import Optimize_nCluster
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
from sklearn.datasets import make_spd_matrix
# todo part1: load data
data = loadmat('cardio.mat')
X = np.array(data['X'])
y = data['y']
X_train = X[np.where(y == 0)[0]]
X_test = X[np.where(y == 1)[0]]
# Dimensionarity reduction: maintain 95% of original information. The result data shape is (1655, 13).
# Reason: To avoid overflow calculation
pca = PCA(0.95, whiten=True, random_state=0)
X_train = pca.fit_transform(X_train)
X_test = pca.transform(X_test)
print(X_test.shape)
# todo part2: Building my GMM
# part2.1: init parameters and variances: nData, nDimension, nClusters, means (mu), covariances (Sigma), priors (phi), logLikelihood
nData = X_train.shape[0]
nDimension = X_train.shape[1]
# Applying BIC to find optimal nCluster: 7
# nCluster = Optimize_nCluster.optimal_number_of_components(X_train)
nCluster = 7
# Applying K-means to initialize parameters
kmeans = KMeans(n_clusters=nCluster, random_state=0).fit(X_train)
means = kmeans.cluster_centers_ # init mu
priors = np.zeros(nCluster)
covariances = np.zeros((nCluster, nDimension, nDimension)) # using "full" covariance_type
for k in range(nCluster):
Xk = X_train[np.where(kmeans.labels_ == k)[0]]
priors[k] = float(Xk.shape[0]) / nData
if np.size(Xk):
covariances[k] = np.cov(Xk.T) #Initialzie covariance matrices via points in each KMeans-cluster
else:
covariances[k] = np.cov(X_train.T)
# part2.2: Expectation-Maximization
def calculate_probability_density(X, means, covariances):
probability_density = np.zeros((nData, nCluster))
for i in range(X.shape[0]):
for k in range(nCluster):
vector_2d = np.reshape((X[i] - means[k]), (nDimension, 1))
a = np.exp(-0.5 * np.dot(np.dot(vector_2d.T, np.linalg.inv(covariances[k])), vector_2d)[0][0])
b = np.sqrt(np.power(2 * np.pi, nDimension) * np.linalg.det(covariances[k]))
#if (i == 0 and k == 0) : print(np.linalg.det(covariances[0]))
#print(i, np.power(2 * np.pi, nDimension) * np.linalg.det(covariances[k]))
#print(i, np.linalg.det(covariances[k]))
probability_density[i][k] = a / b
return probability_density
def calculate_probability_matrix(X, probability_density, priors):
probability_matrix = np.zeros((nData, nCluster))
for i in range(X.shape[0]):
px = 0
for k in range(nCluster):
px += priors[k] * probability_density[i][k]
for k in range(nCluster):
if (px == 0):
probability_matrix[i][k] = 0;
else:
probability_matrix[i][k] = priors[k] * probability_density[i][k] / px
return probability_matrix
def calculate_log_likelihood(probability_density, priors):
log_likelihood = 0
for i in range(nData):
px = 0
for k in range(nCluster):
px += priors[k] * probability_density[i][k]
if (px != 0): log_likelihood += np.log(px)
return log_likelihood
def calculate_means(X, probability_matrix):
means_new = np.zeros_like(means)
for k in range(nCluster):
Nk = 0
for i in range(nData):
Nk += probability_matrix[i][k]
for i in range(nData):
means_new[k] += probability_matrix[i][k] * X[i] / Nk
return means_new
def calculate_covariances(X, probability_matrix, means):
covariances_new = np.zeros_like(covariances)
for k in range(nCluster):
Nk = 0;
for i in range(nData):
Nk += probability_matrix[i][k]
for i in range(nData):
vector_2d = np.reshape((X[i] - means[k]), (nDimension, 1))
covariances_new[k] += probability_matrix[i][k] * np.dot(vector_2d, vector_2d.T)
#print(Nk, np.linalg.det(covariances_new[k]))
covariances_new[k] /= Nk
#print(probability_density[0][0])
return covariances_new
def calculate_priors(X, probability_matrix):
priors_new = np.zeros_like(priors)
for k in range(nCluster):
Nk = 0;
for i in range(nData):
Nk += probability_matrix[i][k]
priors_new[k] = Nk / nData
#print(probability_matrix[1123][0]); print("dm")
return priors_new
convergenceCriterion = 0.1
preLogLikelihood = 0
curLogLikelihood = 0
count = 0
while (True):
count += 1
print(count)
# E step
probability_density = calculate_probability_density(X_train, means, covariances)
probability_matrix = calculate_probability_matrix(X_train, probability_density, priors)
# M step
means = calculate_means(X_train, probability_matrix)
covariances = calculate_covariances(X_train, probability_matrix, means)
priors = calculate_priors(X_train, probability_matrix)
# Convergence evaluation
preLogLikelihood = curLogLikelihood
curLogLikelihood = calculate_log_likelihood(probability_density, priors)
print(curLogLikelihood / nData)
if curLogLikelihood - preLogLikelihood < convergenceCriterion and count > 30: break
#todo part3: Compare with sklearn
from sklearn.mixture import GaussianMixture
gmm = GaussianMixture(n_components=nCluster, covariance_type='full', random_state=0)
gmm.fit(X_train)
print("Score found by my code: ", curLogLikelihood / nData)
print("Score found by sklearn: ", gmm.score(X_train))
#todo part3+ visualize samples with color in 3D
clt = [np.argmax(probability_matrix[i]) for i in range(nData)] #the i sample belongs to cluster clt[i]
color_list = ['b', 'r', 'g', 'y', 'k', 'm', 'c'] #color for 7 clusters
X_visualize = X[np.where(y == 0)[0]]
pca2 = PCA(3, whiten=True, random_state=0)
X_visualize = pca2.fit_transform(X_visualize)
x = X_visualize[:, 0]
y = X_visualize[:, 1]
z = X_visualize[:, 2]
ax = plt.axes(projection='3d')
#plotting all normal samples with different colors
#for i in range(nData):
# ax.scatter3D(x[i], y[i], z[i], color=color_list[clt[i]])
#plt.show()
# todo part4: Anomaly detection
# calculate threshold = min probability among normal samples
prob = np.zeros((nData))
for i in range(nData):
for k in range(nCluster):
prob[i] += priors[k] * probability_density[i][k]
threshold = min(prob)
# calculate probability of suspect samples
probability_density_test = calculate_probability_density(X_test, means, covariances)
nTest = X_test.shape[0]
predict = np.zeros(nTest)
anomaly = np.zeros(nTest, dtype='bool')
anomaly_counter = 0
for i in range(nTest):
for k in range(nCluster):
predict[i] += priors[k] * probability_density_test[i][k]
if (predict[i] < threshold):
anomaly[i] = True
anomaly_counter += 1
#plotting to visualize
for i in range(nData):
ax.scatter3D(x[i], y[i], z[i], color='g') #x,y,z from part3+
for i in range(nTest):
if (anomaly[i]):
ax.scatter3D(X_test[i][0], X_test[i][1], X_test[i][2], color='k')
print("Number of anomaly detected: ", anomaly_counter)
plt.show() | true |
5753a59fbf980165774551fdeef1856279361d27 | Python | gaustin/Walk-Score-Coding-Challenge | /digraphing/digraph.py | UTF-8 | 1,680 | 3.5625 | 4 | [] | no_license | class Digraph:
def __init__(self):
self.graph = dict()
def items(self):
return self.graph.items()
def addArc(self, fromNode, toNode):
if not self.graph.has_key(fromNode):
self.graph[fromNode] = set()
if not self.graph.has_key(toNode):
self.graph[toNode] = set()
self.graph[fromNode].add(toNode)
def removeArc(self, fromNode, toNode):
if not self.graph.has_key(fromNode) or not self.graph.has_key(toNode):
raise NodeError("A given node does not exist in the graph.")
# Delete the arc
self.graph[fromNode].discard(toNode)
def removeNode(self, node, cascade=False):
if self.graph.has_key(node):
del self.graph[node]
def inArcs(self, targetNode):
# Return all of the arcs pointing at this node
inArcs = set()
for node, outArcs in self.graph.items():
if targetNode in outArcs:
inArcs.add(node)
return inArcs
def outArcs(self, targetNode):
if self.graph.has_key(targetNode):
return self.graph[targetNode]
else:
return set()
def has_arc(self, fromNode, toNode):
if not self.graph.has_key(fromNode):
return False
return toNode in self.graph[fromNode]
def adjacent(self, aNode, bNode):
if bNode in self.graph[aNode] or aNode in self.graph[bNode]:
return True
else:
return False
def nodeCount(self):
return len(self.graph.keys())
class NodeError(Exception):
def __init__(self, message):
self.message = message
| true |
474c6e1eff4d7be5bea0590fe70b614bccfe1232 | Python | igortereshchenko/amis_python | /km72/Burova_Anastasiya/3/task1.py | UTF-8 | 124 | 3.359375 | 3 | [] | no_license | num1=float(input("number1= "))
num2=float(input("number2= "))
num3=float(input("number3= "))
a=num1+num2+num3
print(a)
| true |
f2484ab2c165cedfad30a521b73e0190fbc317c0 | Python | dairof7/holbertonschool-higher_level_programming | /0x0C-python-almost_a_circle/tests/test_models/test_base.py | UTF-8 | 616 | 2.796875 | 3 | [] | no_license | #!/usr/bin/python3
"""
Unittest for Base class
"""
import unittest
import os
import json
from models.base import Base
from models.rectangle import Rectangle
from models.square import Square
class Test_Base(unittest.TestCase):
"""Base tests"""
def setUp(self):
"""setup of unittest"""
Base._Base__nb_objects = 0
def test_00(self):
"""Test - id int"""
b0 = Base(8)
b1 = Base(-7)
b2 = Base()
b3 = Base()
self.assertEqual(b0.id, 8)
self.assertEqual(b1.id, -7)
self.assertEqual(b2.id, 1)
self.assertEqual(b3.id, 2)
| true |
b96b9f0978e7c842ec74ba8714ec9c4aae770ec5 | Python | yining0713/CS221-Language-Classification | /scripts/sklearn_test.py | UTF-8 | 1,560 | 2.78125 | 3 | [] | no_license | #from sklearn.datasets import fetch_mldata
from sklearn.preprocessing import StandardScaler
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
import pandas as pd
import numpy as np
from Log import Log
import os
import sys
topdir = os.path.split(os.path.split(os.path.realpath(sys.argv[0]))[0])[0]
logfile = os.path.join(topdir, 'log/logistic_training.log')
logging = Log(logfile)
#Training
training_features = os.path.join(topdir, 'log/timeseries_training_value.csv')
training_labels = os.path.join(topdir, 'log/timeseries_training_target.csv')
# Training
df_numpy = np.genfromtxt(training_features, delimiter=",")
y_numpy = np.genfromtxt(training_labels, delimiter=",")
train_feature, test_feature, train_label, test_label = train_test_split(
df_numpy, y_numpy, test_size=1/10.0, random_state=122)
scaler = StandardScaler()
# Fit on training set only.
scaler.fit(train_feature)
# Apply transform to both the training set and the test set.
train_feature = scaler.transform(train_feature)
test_feature = scaler.transform(test_feature)
model = LogisticRegression(solver = 'lbfgs', max_iter=200)
model.fit(train_feature, train_label)
# use the model to make predictions with the test data
y_pred = model.predict(test_feature)
# how did our model perform?
count_misclassified = (test_label != y_pred).sum()
print('Misclassified samples: {}'.format(count_misclassified))
accuracy = metrics.accuracy_score(test_label, y_pred)
print('Accuracy: {:.2f}'.format(accuracy)) | true |
f80de75727869048e5e0411a9fceae7384e16e9c | Python | Abarna13/Py_script | /Dictionary/Create.py | UTF-8 | 64 | 2.828125 | 3 | [] | no_license | a = {1:"one",2:"two"}
print("To create a dictionary")
print(a) | true |
0abc326579ddcc21de5a64cf0b08b61a5222f045 | Python | ZAKERR/-16-python- | /软件第四次作业/软件162/2016021185吴明洋/asdsaad.py | UTF-8 | 321 | 3.953125 | 4 | [] | no_license | i = int(input("请选择运算类型:(1代表加法 2代表减法 3代表乘法 4代表除法)"))
a1 = eval(input("请输入第一个数:"))
a2 = eval(input("请输入第二个数:"))
if i == 1:
a1 = a1 + a2
elif i == 2:
a1 = a1 - a2
elif i == 3:
a1 = a1 * a2
else:
a1 = a1 / a2
print(a1)
| true |
38b4310cc40bccf1f6f62a764c6fa2c3a2dbd2ed | Python | jaykaneriya6143/python | /day5/overloading.py | UTF-8 | 247 | 3.34375 | 3 | [] | no_license | class Myclass():
def sum(self, n1,n2):
ans = n1 + n2
print("Ans is :",ans)
def sum(self, n1,n2,n3):
ans = n1 + n2 + n3
print("Ans is :",ans)
j = Myclass()
#j.sum(40,50)
j.sum(20,30,50)
| true |
ae0db61cb7fde8c073f28103bbb0e266550d7e82 | Python | DoaaY98/Mancala-Game-AI-Project | /play_now.py | UTF-8 | 414 | 3.015625 | 3 | [] | no_license | import keyboard
class PlayNow():
def __init__(self, play_now=0):
self.play_now = play_now
def keyPressed(self):
while True:
if keyboard.is_pressed('p'): # if key 'q' is pressed
self.play_now = 1
break # finishing the loop
def set_playNow(self):
self.play_now = 0
def check_playNow(self):
return self.play_now | true |
99b9e015fd69a261d933bfd93520e56e29747edd | Python | clarisli/RL-Easy21 | /mc_prediction.py | UTF-8 | 1,844 | 2.859375 | 3 | [] | no_license | from rl_agent import *
from utils import *
import sys
class MCPrediction(RLAgent):
def __init__(self, environment, N0=100, discount_factor=1, _lambda=0.1):
super().__init__(environment, N0, discount_factor, _lambda)
self.returns_sum = self._init_tenor()
self.returns_count = self._init_tenor()
self.V = self._init_tenor()
def _init_tenor(self):
return np.zeros((self.env.max_dealer_sum + 1, self.env.max_player_sum + 1))
def train(self, num_episodes=1000):
for e in range(num_episodes):
episode = self._generate_episode()
for i, (s, _, _) in enumerate(episode):
first_occurence_idx = next(i for i,x in enumerate(episode) if x[0] == s)
if i == first_occurence_idx:
self.returns_count[s.dealer_sum][s.player_sum] += 1
G = sum([x[2].value*(self.discount_factor**i) for i,x in enumerate(episode[first_occurence_idx:])])
self.returns_sum[s.dealer_sum][s.player_sum] += G
self.V[s.dealer_sum][s.player_sum] = self.returns_sum[s.dealer_sum][s.player_sum] / self.returns_count[s.dealer_sum][s.player_sum]
if e % 1000 == 0:
print("\rEpisode {}/{}.".format(e, num_episodes), end="")
return self.V
def _generate_episode(self):
episode = []
state = self.env.init_state()
while not state.is_terminal:
action = self._policy(state)
next_state, reward = self.env.step(state, Action(action))
episode.append((state, action, reward))
state = next_state
return episode
def _policy(self, state):
if state.player_sum >= 17:
return Action.STICK.value
else:
return Action.HIT.value
# easy21 = Environment()
# mc = MCPrediction(easy21)
# V = mc.train(500000)
# plot_value_function(V, title="500,000 Episodes, MC Prediction")
# easy21 = Environment()
# mc = MCPrediction(easy21)
# V = mc.train(10000)
# plot_value_function(V, title="10,000 Episodes, MC Prediction")
| true |
d43e84094a57566a8632db78c7ba6b4f0e0017d2 | Python | pmaslankowski/rushing-turtles-backend | /tests/test_player.py | UTF-8 | 3,119 | 3.125 | 3 | [] | no_license | import pytest
from rushing_turtles.model.person import Person
from rushing_turtles.model.player import Player
from rushing_turtles.model.turtle import Turtle
from rushing_turtles.model.card import Card
def test_has_card_should_return_false_when_user_has_no_cards(person, turtle):
cards = []
player = Player(person, turtle, cards)
actual = player.has_card(Card(0, 'RED', 'PLUS'))
assert not actual
def test_has_card_should_return_false_when_user_has_other_card(person, turtle):
cards = [Card(1, 'RED', 'MINUS')]
player = Player(person, turtle, cards)
actual = player.has_card(Card(0, 'RED', 'PLUS'))
assert not actual
def test_has_card_should_return_true_when_user_has_given_card(person, turtle):
card = Card(0, 'RED', 'PLUS')
player = Player(person, turtle, [card])
actual = player.has_card(card)
assert actual
def test_has_card_returns_true_when_has_card_in_two_cards(person, turtle):
cards = [Card(0, 'RED', 'PLUS'), Card(1, 'RED', 'MINUS')]
player = Player(person, turtle, cards)
actual = player.has_card(cards[0])
assert actual
def test_add_card_should_add_card_for_player_with_no_cards(person, turtle):
card = Card(0, 'RED', 'PLUS')
player = Player(person, turtle, [])
player.add_card(card)
assert player.has_card(card)
def test_add_card_should_add_card_for_player_with_one_card(person, turtle):
player = Player(person, turtle, [Card(0, 'RED', 'PLUS')])
card = Card(1, 'RED', 'MINUS')
player.add_card(card)
assert player.has_card(card)
def test_new_card_should_be_the_first_of_player_cards(person, turtle):
player = Player(person, turtle, [Card(0, 'RED', 'PLUS')])
card = Card(1, 'RED', 'MINUS')
player.add_card(card)
assert player.cards[0] == card
def test_new_card_should_add_card_for_player_with_two_cards(person, turtle):
cards = [Card(0, 'RED', 'PLUS'), Card(1, 'GREEN', 'MINUS')]
player = Player(person, turtle, cards)
card = Card(2, 'RAINBOW', 'ARROW')
player.add_card(card)
assert player.cards[0] == card
def test_remove_card_should_raise_when_player_has_no_cards(person, turtle):
player = Player(person, turtle, [])
with pytest.raises(ValueError):
player.remove_card(Card(0, 'RED', 'MINUS'))
def test_remove_card_should_raise_when_player_has_other_card(person, turtle):
player = Player(person, turtle, [Card(1, 'RED', 'MINUS')])
with pytest.raises(ValueError):
player.remove_card(Card(0, 'RED', 'PLUS'))
def test_player_should_have_no_cards_after_removing_only_card(person, turtle):
card = Card(0, 'RED', 'PLUS')
player = Player(person, turtle, [card])
player.remove_card(card)
assert not player.cards
def test_player_shouldnt_have_card_after_its_removal(person, turtle):
card = Card(0, 'RED', 'PLUS')
cards = [card, Card(1, 'RED', 'MINUS')]
player = Player(person, turtle, cards)
player.remove_card(card)
assert not player.has_card(card)
@pytest.fixture
def person():
return Person(0, 'Piotr')
@pytest.fixture
def turtle():
return Turtle('RED')
| true |
b5d0daeb66c89b151e9f701a5ab73c75e38c31a8 | Python | PeterPirog/PRT_calculations | /calculate_abc7.py | UTF-8 | 1,382 | 3.046875 | 3 | [
"Apache-2.0"
] | permissive |
#calculation a,b,c coefficient in range 7 from 0.01 C to 660.323 C
# with no uncertainties,
import ITS90
import numpy as np
# STEP 1 - DATA FROM CERTIFICATE
Wt = np.array([3.37543469, 2.56855103, 1.89259628])
t = np.array([660.323, 419.527, 231.928])
Wrt=np.array(list(map(ITS90.calculate_Wr,t)))
deltaW=Wt-Wrt
print('Wr(t)=',Wrt)
print('W(t)=',Wt)
print('W(t)-Wr(t)=',deltaW)
A=ITS90.create_Wt1_array(Wt)
print('A=',A)
B=np.reshape(deltaW,(-1,1))
print('B=',B)
#STEP 2 - CALCULATING a,b,c ITS-90 coefficients
pinvA=np.linalg.pinv(A)
coeff=np.matmul(pinvA,B)
print('coeff=',coeff)
a=coeff[0][0]
b=coeff[1][0]
c=coeff[2][0]
print('\nCalculated coefficients:\n')
print('a coeff=',a)
print('b coeff=',b)
print('c coeff=',c)
#### STEP 3- TEMPERATURE CALCULATION FROM RESISTANCE MEASURED
#resistancec from measurement
R0=100 #Triple point of water resistance
Ri=[102,189,300] # measured resistances
Ri=np.array(Ri)
Wt=Ri/R0 # calculating reference function for measured resistances
print('Calculated W(t)=',Wt)
Wt1_array=ITS90.create_Wt1_array(Wt)
Wt=np.reshape(Wt,(-1,1))
print('Wt1_array=',Wt1_array)
print('coeff=',coeff)
delta_Wt_Wrt=np.matmul(Wt1_array,coeff)
print('W(r)-Wr(t)=',delta_Wt_Wrt)
Wr=Wt-delta_Wt_Wrt
print('Wr(t)=',Wr)
#Convertt Wr(t) to temperature
t_calculated=np.array(list(map(ITS90.calculate_temp_from_Wr,Wr)))
print('t calculated=',t_calculated) | true |
f15e6c41db751eb95b0db3928ab34fcd7b96124f | Python | colin0000007/seq2seq | /Tasks_nmt/nmtWithBasicSeq2Seq.py | UTF-8 | 3,108 | 2.578125 | 3 | [] | no_license | #encoding=UTF-8
import tensorflow as tf
from seq2seqV2.BasicSeq2seqModel import BasicSeq2SeqModel
#使用beam search必须添加这个导入
from tensorflow.contrib.seq2seq.python.ops import beam_search_ops
from utils.dataPreprocessing import source_seq_list_2_ids
from utils.dataPreprocessing import load_data
import pickle
import numpy as np
'''
神经机器翻译
'''
#模型训练
def train():
source_data_path = "D:\\nlp语料\\机器翻译语料\\chinese.raw.sample.seg.ch_max_len=50.lines=200000.txt"
target_data_path = "D:\\nlp语料\\机器翻译语料\\english.raw.sample.seg.ch_max_len=50.lines=200000.txt"
src_encoding = "utf-8"
tgt_encoding = "gbk"
source_split_char = " "
target_split_char = " "
smwf = 2 #source 最小词频
tmwf = 2 #target最小词频
batch_size = 50
epochs = 40
dataInfoObj, gen = load_data(source_data_path,target_data_path,source_split_char,target_split_char,source_minimum_word_frequency=smwf,target_minimum_word_frequency=tmwf,batch_size=batch_size,epochs=epochs,source_encoding=src_encoding,target_encoding=tgt_encoding)
#保存数据集的一些信息
f = open("../modelFile/nmt/model2/model.dataInfoObj","wb")
pickle.dump(dataInfoObj,f)
f.close()
#超参数开始
src_embedding_size = 256
tgt_embedding_size = 256
'''
encoder是否双向
注意:使用bidirectional,encoder rnn的num_units变为decoder的一半,这是为了能够保证encoder_states和decoder的输入shape能对应上
'''
is_encoder_bidirectional = True
rnn_layer_size = 6
rnn_num_units = 512
cell_type = "LSTM"
lr = 0.5
decoding_method = "beamSearch"
#训练
model = BasicSeq2SeqModel(src_vocab_size=dataInfoObj.source_vocab_size,tgt_time_step=dataInfoObj.target_max_len,tgt_vocab_size=dataInfoObj.target_vocab_size,start_token_id=dataInfoObj.target_token_2_id['<s>'],end_toekn_id=dataInfoObj.target_token_2_id['</s>'])
model.train("../modelFile/nmt/model2/model.ckpt", gen, src_embedding_size, tgt_embedding_size, is_encoder_bidirectional,rnn_layer_size, rnn_num_units, cell_type, lr,decoding_method=decoding_method,beam_width=10)
#模型测试
def test():
dataInfoObj = pickle.load(open("../modelFile/nmt/model.dataInfoObj","rb"))
model_path = "../modelFile/nmt/model.ckpt"
model = BasicSeq2SeqModel(model_path=model_path)
#预测
input = load_test_data()
source_batch,seq_len = source_seq_list_2_ids(dataInfoObj,input)
answer_logits = model.predict(source_batch, seq_len)
print("answer_logits:",answer_logits.shape)
end_token_id = dataInfoObj.target_token_2_id['</s>']
answer = [[dataInfoObj.target_token_list[index] for index in seq if index != end_token_id] for seq in answer_logits]
for i in range(len(input)):
print("".join(input[i])," "," ".join(answer[i]))
def load_test_data():
f = open("./test.txt","r",encoding="utf-8")
lines = f.readlines()
sens = [line.strip().split(" ") for line in lines]
f.close()
return sens
if __name__=="__main__":
#train()
test()
| true |
40919f1a09fe6af143c9bcc9a01eb186a8b59333 | Python | wcmaclean/home-repo | /Python_Ruby/analyze_firefox_cache/domains.py | UTF-8 | 1,236 | 2.96875 | 3 | [] | no_license | # domains.py
#
# Will MacLean
# CSPP51060
import subprocess
import sys
import re
# grab coche directory from command-line input
if (len(sys.argv) == 2):
cache_dir = sys.argv[1]
else:
print "Usage: python domains.py <cache filepath>"
sys.exit(0)
# read all filenames in directory
find_files = subprocess.Popen(['ls'], stdout = subprocess.PIPE, cwd = cache_dir)
data = find_files.stdout.read()
file_names = data.split()
# pull out all domain names
re_cache = re.compile('_CACHE_')
re_clean = re.compile('HTTP:(.*).com{1}')
domains_data = []
for file in file_names:
if (re_cache.search(file)):
file_path = cache_dir + file
file_open = open(file_path, "r")
for line in file_open:
if (re_clean.search(line)):
domain = re_clean.search(line)
fields = domain.group().split('/')
domain = fields[2].lstrip('www.')
domains_data.append(domain)
# sort alpha
domains_data.sort()
# sort unique
def unique(s):
u = []
for x in s:
if x not in u:
u.append(x)
return u
domains_data = unique(domains_data)
# write to output
domains = ''
for value in domains_data:
domains = domains + value
print value
# write to file
# save to a file
file_out = open('domains_data', 'w')
file_out.write(domains)
file_out.close()
| true |
c967613d7c4d41d2db5af4c964903d87c5660f3e | Python | RoopeKeto/Predicting-Company-In-News | /NER_on_goodnews.py | UTF-8 | 4,041 | 3.5625 | 4 | [] | no_license | from flair.data import Sentence
from flair.models import SequenceTagger
import pandas as pd
import sys
import itertools
import operator
import os
import csv
# # # Predicting what organization/company is talked about in a news article # # #
# Loading previously scraped good news finland data. (Scraped every news article from the site)
# (see scraper in folder "news_scraper/news_scraper/spiders/news_spider.py")
news_data = pd.read_csv("./news_scraper/goodnews.csv")
# opening with panda for easy manipulation and exploration
# Setting up Named entity recognition model
# loading the NER model, and using it in tagger
tagger = SequenceTagger.load('ner')
# creating function for finding the organizations from given sentence
def sentence_to_org(sentence):
try:
sentence_tokenized = Sentence(sentence)
tagger.predict(sentence_tokenized)
sentence_dict = sentence_tokenized.to_dict(tag_type='ner')
org_names = []
for entity in sentence_dict['entities']:
if entity['type'] == 'ORG':
org_names.append(entity['text'])
return org_names
except Exception as e:
print(e)
# helper function for finding the most common organization in the sentence
def most_common(L):
if not L:
return None
# get an iterable of (item, iterable) pairs
SL = sorted((x, i) for i, x in enumerate(L))
# print 'SL:', SL
groups = itertools.groupby(SL, key=operator.itemgetter(0))
# auxiliary function to get "quality" for an item
def _auxfun(g):
item, iterable = g
count = 0
min_index = len(L)
for _, where in iterable:
count += 1
min_index = min(min_index, where)
# print 'item %r, count %r, minind %r' % (item, count, min_index)
return count, -min_index
# pick the highest-count/earliest item
return max(groups, key=_auxfun)[0]
# helper function for checking if the header contains some of the companies found.
def does_header_contain(header, companies):
if not companies:
return None
for company in companies:
if company in header:
return company
return None
# file for predictions
filename = "company_predictons.csv"
if not os.path.isfile(filename):
header_row = ['link to article', 'header', 'found company tags', 'prediction']
with open(filename, 'w') as csvFile:
writer = csv.writer(csvFile)
writer.writerow(header_row)
# testing out the "Company predictor function" on the scraped data.
# looping over the data and saving to new file: "company_predictons.csv"
for index, row in news_data.iterrows():
# check first 20
#if index == 10:
# break
print("Link to article: ", row['link'])
print("Article header: ", row['header'])
# getting all the company tags from sentence:
companies = sentence_to_org(row['content'])
print("all the found company tags: ", companies)
# getting the most common company tag
most_common_company = most_common(companies)
# checking what company the header contains
company_in_header = does_header_contain(row['header'], companies)
# if company in header matches the most common company, predicting that article is about that company
if most_common_company == company_in_header:
predicted_company = most_common_company
elif company_in_header == None:
predicted_company = most_common_company
elif company_in_header:
predicted_company = company_in_header
else:
predicted_company = None
if predicted_company != None:
print("Article is predicted to be about following company: ", predicted_company)
else:
print("Article was not about any particular company.")
print("\n")
# saving to csv file the predictions
row = [row['link'], row['header'], companies, predicted_company]
with open(filename, 'a') as csvFile:
writer = csv.writer(csvFile)
writer.writerow(row)
| true |
75587a5843e7166a6a76b5be115f6cd0a80d285b | Python | rorik302/sp-test | /bonus_cards/views.py | UTF-8 | 2,658 | 2.5625 | 3 | [] | no_license | import json
from django.core.exceptions import FieldError, ValidationError
from django.core.serializers import serialize
from django.db import IntegrityError
from django.http import HttpResponse
from django.utils.dateparse import parse_date
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_http_methods
from .models import BonusCard
def list_view(request):
'''
Если в запросе есть query параметры, то выполняется поиск.
Если query параметров нет, то возвращается список всех карт
'''
if len(request.GET):
queryset = filter_by_query_params(request.GET)
else:
queryset = BonusCard.objects.all()
data = serialize('json', queryset)
return HttpResponse(data, content_type='application/json')
@csrf_exempt
@require_http_methods(["POST"])
def create(request):
'''Добавление карты'''
data = parse_data(json.loads(request.body.decode('utf-8')))
card = BonusCard(**data)
if is_valid(card):
try:
card.save()
return HttpResponse(serialize('json', [card]), content_type='application/json', status=201)
except IntegrityError as e:
return HttpResponse(status=400)
return HttpResponse(status=400)
@csrf_exempt
@require_http_methods(["DELETE"])
def delete(request, pk):
'''Удаление карты'''
try:
card = BonusCard.objects.get(pk=pk)
except BonusCard.DoesNotExist:
return HttpResponse(status=404)
card.delete()
return HttpResponse(status=200)
def filter_by_query_params(query_params):
'''
Фильтрация на основании параметров запроса.
Если запрос валидный, то фильтрует список карт и возвращает. Если невалидный, то возвращает пустой список.
'''
filter_by = {}
for field, value in query_params.items():
filter_by.update({'{}__iexact'.format(field): value})
try:
return BonusCard.objects.filter(**filter_by)
except FieldError:
return []
def parse_data(data):
'''Преобразование входящих данных'''
data['expired_at'] = parse_date(data['expired_at'])
return data
def is_valid(card):
'''Валидация данных'''
try:
card.clean_fields()
except ValidationError:
return False
return not BonusCard.objects.filter(serial=card.serial, number=card.number).exists()
| true |
dcf360cef450886c6702c6a7e7fb7c9b8a689984 | Python | ommprasad99/Python-program2 | /Fibonacci.py | UTF-8 | 134 | 3.21875 | 3 | [] | no_license | import time
list1=[0,1]
while(True):
n=len(list1)
list1.append(list1[n-1]+list1[n-2])
print(list1)
time.sleep(2) | true |
48c94012e57269ba599f49b491c580232baa7f6d | Python | Romualdi-Lab/brewerix-cli | /workflow/guess_loi/merge_vcfs.py | UTF-8 | 1,074 | 2.75 | 3 | [] | no_license | from argparse import ArgumentParser
from os.path import join
from subprocess import check_call
from tempfile import TemporaryDirectory
def merge_vcfs():
parser = ArgumentParser(description="""
Merge VCFs into a single VCF
""")
parser.add_argument('output', help="output file name")
parser.add_argument('vcfs', nargs='+', help="vcf files")
args = parser.parse_args()
run_merge_vcfs(args.vcfs, args.output)
def run_merge_vcfs(files, output):
with TemporaryDirectory() as wdir:
gzipped_files = []
for file in files:
gfile = join(wdir, file + ".gz")
gzipped_files.append(gfile)
with open(gfile, "wb") as dest:
check_call(["bgzip", "-c", file], stdout=dest)
check_call(["tabix", "-p", "vcf", gfile])
cmd = [
'bcftools', "merge",
"-m", "none",
"-O", "v",
"-o", output,
] + gzipped_files
print(' '.join(cmd))
check_call(cmd)
| true |
e101cc0c9c7923f46decf665d65d0a99e9661b29 | Python | sammous/spacy-lefff | /tests/test_leffflemmatizer.py | UTF-8 | 1,299 | 2.875 | 3 | [
"MIT"
] | permissive | # coding: utf-8
import pytest
import spacy
from spacy_lefff import LefffLemmatizer
"""
Test suite coming from spacy.
Link: https://github.com/explosion/spaCy
/blob/master/spacy/tests/lang/fr/test_lemmatization.py
"""
def test_lemmatizer_verb(nlp):
tokens = nlp("J'ai une maison à Paris.")
assert tokens[1]._.lefff_lemma == "avoir"
def test_lemmatizer_noun_verb(nlp):
tokens = nlp("Les abaissements de température sont gênants.")
assert tokens[1]._.lefff_lemma == "abaissement"
def test_lemmatizer_noun(nlp):
tokens = nlp("il y a des Françaises.")
assert tokens[4]._.lefff_lemma == "français"
def test_lemmatizer_noun_2(nlp):
tokens = nlp("Les abaissements de température sont gênants.")
assert tokens[1]._.lefff_lemma == "abaissement"
assert tokens[3]._.lefff_lemma == "température"
def test_punctuations(nlp):
tokens = nlp(". ?")
assert tokens[0]._.lefff_lemma == "."
assert tokens[1]._.lefff_lemma == "?"
@pytest.mark.exception
def test_lemmatizer_exception():
french_lemmatizer = LefffLemmatizer()
assert french_lemmatizer.lemmatize("unknow34", "unknown") is None
def test_lemmatizer_default():
french_lemmatizer = LefffLemmatizer(default=True)
assert french_lemmatizer.lemmatize("Apple", "NOUN") == "apple"
| true |
4ce195b9fdf2f9db935c413e578e195cf21ee5c4 | Python | tomas-cortez/Projects | /simple_python/IA_PRO1_TP7.py | UTF-8 | 2,703 | 3.84375 | 4 | [] | no_license | def MostrarSueldos():
archivo = open("IEFI.csv","r")
datos = archivo.readlines()
print ("Apellido".ljust(11),"Nombre".ljust(11),"S.Bas".ljust(7),"S.1°Aum".ljust(9),"S.2°Aum","\n")
for x in datos:
x = x.replace("\n","")
x = x.split(";")
a1 = ( (int(x[5]))+( (int(x[5])*(15/100) )) )
a2 = ( (int(a1))+( (int(a1)*(12/100) )) )
print (f"{x[1].ljust(9)} {x[2].ljust(9)} {x[5]} {a1} {a2}")
archivo.close()
def ExportarSueldos():
archivo = open("IEFI.csv","r")
datos = archivo.readlines()
archivo2 = open("IEFI2.csv","w")
for x in datos:
x = x.replace("\n","")
x = x.split(";")
aa = round( (int(x[4]))*((int(x[5])*(1.7/100)) ),1)
t = (int(x[5])+aa)
archivo2.write(x[1]+", "+x[2]+";"+str(x[5])+";"+str(aa)+";"+str(t)+"\n")
archivo.close()
archivo2.close()
def CalcularPromedio(promedio):
archivo = open("IEFI.csv","r")
datos = archivo.readlines()
c = 0
s = 0
for x in datos:
x = x.replace("\n","")
x = x.split(";")
s = s + int(x[5])
c = c + 1
promedio = s/c
print (f"El promedio es: {promedio}")
return (promedio)
archivo.close()
def ConsultarUnAumentoDeSueldo(legajo):
archivo = open("IEFI.csv","r")
datos = archivo.readlines()
for x in datos:
x = x.replace("\n","")
x = x.split(";")
an = (x[1]+", "+x[2])
sb = x[5]
sa = ( (int(x[5]))+( (int(x[5])*(15/100) )) )
if legajo == int(x[0]): return (print (f"{an} {sb} {sa}"))
else: return (print ("El legajo es inexistente\n"))
archivo.close()
print ("En este programa puedes hacer las siguientes operaciones:\n",
"1 – Listar los sueldos. \n",
"2 – Exportar los sueldos.\n",
"3 – Calcular el promedio de los sueldos básicos.\n",
"4 – Consultar el aumento de sueldo de un empleado.")
a = int(input("Ingresa el número que corresponde a tu elección: "))
while a < 5:
if a == 1:
MostrarSueldos()
elif a == 2:
ExportarSueldos()
print ("Archivo generado exitosamente!")
elif a == 3:
CalcularPromedio(a)
elif a == 4:
a = int(input ("Legajo: "))
ConsultarUnAumentoDeSueldo(a)
print ("En este programa puedes hacer las siguientes operaciones:\n",
"1 – Listar los sueldos. \n",
"2 – Exportar los sueldos.\n",
"3 – Calcular el promedio de los sueldos básicos.\n",
"4 – Consultar el aumento de sueldo de un empleado.")
a = int(input("Ingresa el número que corresponde a tu elección: "))
else: (print ("Esa opción no está disponible"))
print("Fin del programa.") | true |
ffb33670aa171d0e62ebb40cd4c0fe28c0605106 | Python | lobZter/HW_in_NCTU | /Machine_Learning/HW2/HW2_2.py | UTF-8 | 724 | 3.40625 | 3 | [] | no_license |
file_path = raw_input("File path: ")
a = input("intial a: ")
b = input("intial b: ")
with open(file_path.strip()) as f:
lines = f.readlines()
lines = [line.strip() for line in lines]
for line in lines:
N, m, p = observe(line)
print 'MLE: {} | Prior a: {}, b: {} |'.format(p, a, b),
# a = m + a
# b = N - m + b
a += m
b += (N - m)
print 'Posterior a: {}, b: {}'.format(a, b)
def observe(input):
head_count = 0
tail_count = 0
total_count = len(input)
while input != "":
if input[0] == "1":
head_count += 1
if input[0] == "0":
tail_count += 1
input = input[1:]
return total_count, head_count, float(head_count)/total_count
| true |
d7f97a8805855816aa947807b1def1e6205a173f | Python | LemonChocolate/Web_Scrolling_img | /py_format/Pintersest.py | UTF-8 | 4,623 | 2.640625 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# ### Pinterest
# In[1]:
### Pinterest scrolling
"""Before execute code below, check out your kernel or jupyter notebook kernel environment
If you have problem, just copy this code and paste to yout jupyter notebook (recommended)
Also, before execute this page, execute this first >> "Get Chrome driver & dir setting.ipynb"
You must login with Pinterest Account ---> You Can't login with google account.
I try to login with google, i don't have solution with new browser pop up issue.
And browser must be pop up on the screen : if the browser is in a state of minimization, results may go bad
(It does not matter covering the pinterest page with other page like jupyternotebook >> you can do other works)
If you have trouble with lxml, selenium, bs4, try to isntall module in anaconda prompt
>>> execute anconda prompt, try to [conda install lxml], [conda install selenium], [conda install bs4]
warning : If you try this code with high frequency, Search engine may ban your ip temporarily (for 5~10 minutes)
Refer to : Scroll_cnt=5 >>> about 307 imgs(depending on the searching word)"""
## Install module required
# !pip install lxml
# !pip install selenium
# !pip install bs4
## Import modules
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from bs4 import BeautifulSoup
import urllib.request
from time import sleep
import time as time
import getpass
##### Path ######################################################################
Chromedriver_PATH = 'c:\\chrome_webdriver\\chromedriver.exe' # Chromedriver PATH
save_path = 'D:\\images\\pinterest\\' #save_path
#################################################################################
## get userdata & parameters
username = input("Input ID : ") # User ID
password = getpass.getpass("Input PWD : ") # User PWD #getpass : hidden option
Search_Tag = input("Input Search_Tag : ") # Search_Tag
scroll_cnt = int(input("Input scroll_cnt : ")) #scroll count
scrolltime = float(input("Input scroll_sleep_second >>> range(5~10) : ")) #Sleep time
## Get driver & open
driver = webdriver.Chrome(Chromedriver_PATH) # Chromedriver PATH
driver.get("https://www.pinterest.co.kr/")
driver.maximize_window()
sleep(1)
## click login botton
driver.find_element_by_css_selector(".RCK.Hsu.USg.adn.CCY.czT.Vxj.aZc.Zr3.hA-.Il7.Jrn.hNT.BG7.gn8.L4E.kVc").click()
# ## login with goggle : Preparing...
# driver.find_element_by_css_selector(".S9gUrf-YoZ4jf").click()
### Login with Pinterest account
# insert logindata in "login div"
element_id = driver.find_element_by_name("id")
element_id.send_keys(username)
element_password = driver.find_element_by_name("password")
element_password.send_keys(password)
driver.implicitly_wait(1)
## click login botton : by Pinterest account
driver.find_element_by_css_selector('.red.SignupButton.active').click()
## input Search_Tag & push 'Enter'
time.sleep(10) #recommand not to change times
driver.page_source #get source
search = driver.find_element_by_name("searchBoxInput")
search.send_keys(Search_Tag)
time.sleep(5) #recommand not to change times
search.send_keys(Keys.ENTER)
time.sleep(5) #recommand not to change times
############## Functions ################################################################################
def fetch_list_url(): #parsing src url
imgList = soup.find_all("img", class_="hCL kVc L4E MIw")
for im in imgList:
try :
params.append(im["src"])
except KeyError:
params.append(im["srcset"])
return params
def fetch_detail_url(): #save src to local #changing save_path : Go to the top of this page (Path)
for idx,p in enumerate(params,1): #enumerate idx option 1 : get start index from 1 (default=0)
urllib.request.urlretrieve(p, save_path + Search_Tag + '_' + str(idx) + "_pinterest" + ".jpg")
###########################################################################################################
## Scrolling & Parsing
params=[]
for i in range(scroll_cnt):
html = driver.page_source #get source
soup = BeautifulSoup(html, "lxml")
params = fetch_list_url() #save the img_url to params
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);") #scroll
time.sleep(scrolltime)
## Save imgs
print('')
print("Overlaped srcs : ", len(params))
params=list(dict.fromkeys(params)) #delete overlap #index URL >> https://m31phy.tistory.com/130
fetch_detail_url() #save img
print("Non_Overlap srcs : ", len(params))
driver.close() #close browser
| true |
b20d1594f6f7588835ce5d1a0a40f50223ae366f | Python | juechengxi/tf_uniform_loss | /tf_uniform_loss.py | UTF-8 | 1,566 | 2.5625 | 3 | [] | no_license | import os
import sys
import numpy as np
import tensorflow as tf
def uniform_loss(features, labels, num_class):
centers = tf.get_variable(name='features_centers', shape=[class_num, features.get_shape().as_list()[-1]],
dtype=tf.float32,
initializer=tf.constant_initializer(0),
trainable=False)
batch_size = features.get_shape().as_list()[0]
labels = tf.cast(labels, tf.int32)
selected_centers = tf.gather(centers, labels)
diff = selected_centers - features
labels_rh = tf.reshape(labels, [-1, 1])
adjacency = tf.equal(labels_rh, tf.transpose(labels_rh))
adjacency_not = tf.cast(tf.logical_not(adjacency), tf.float32)
adjacency = tf.cast(adjacency, tf.float32)
denom = tf.reduce_sum(adjacency, axis=1, keepdims=True)
diff /= denom
centers = tf.scatter_sub(centers, labels, diff)
with tf.control_dependencies([centers]): # update centers first
a = tf.reduce_sum(tf.square(selected_centers), axis=1, keepdims=True)
b = tf.reduce_sum(tf.square(tf.transpose(selected_centers)), axis=0, keepdims=True)
ab = tf.matmul(selected_centers, selected_centers, transpose_b=True)
pd_mat = tf.add(a, b) - 2.0 * ab
error_mask = tf.less_equal(pd_mat, 0.0)
pd_mat = tf.sqrt(pd_mat + tf.to_float(error_mask) * 1e-16) + 1.0
pd_mat = tf.multiply(1.0 / pd_mat, adjacency_not)
uniform_loss = tf.reduce_sum(pd_mat) / (batch_size * (batch_size - 1.0))
return uniform_loss
| true |
7bedec1174bc9534ef186d28b4d6e18c2f1b2ac4 | Python | HuangPayoung/angr_learning | /angr_ctf/03_angr_symbolic_registers/03_exp.py | UTF-8 | 1,607 | 2.84375 | 3 | [] | no_license | import angr
'''
def main():
project = angr.Project('./03_angr_symbolic_registers')
initial_state = project.factory.entry_state()
simulation = project.factory.simgr(initial_state)
def is_good(state):
return b'Good Job.\n' in state.posix.dumps(1)
def is_bad(state):
return b'Try again.\n' in state.posix.dumps(1)
simulation.explore(find = is_good, avoid = is_bad)
for solution_state in simulation.found:
solution = solution_state.posix.dumps(0)
print('Success: ' + solution.decode())
'''
def main():
project = angr.Project('./03_angr_symbolic_registers')
start_addr = 0x08048980
start_state = project.factory.blank_state(addr = start_addr)
simulation = project.factory.simgr(start_state)
password1 = start_state.solver.BVS("password1", 32)
password2 = start_state.solver.BVS("password2", 32)
password3 = start_state.solver.BVS("password3", 32)
start_state.regs.eax = password1
start_state.regs.ebx = password2
start_state.regs.edx = password3
def is_good(state):
return b'Good Job.\n' in state.posix.dumps(1)
def is_bad(state):
return b'Try again.\n' in state.posix.dumps(1)
simulation.explore(find = is_good, avoid = is_bad)
if simulation.found:
solution_state = simulation.found[0]
input1 = solution_state.solver.eval(password1)
input2 = solution_state.solver.eval(password2)
input3 = solution_state.solver.eval(password3)
print('Success: {:x} {:x} {:x}'.format(input1, input2, input3))
if __name__ == '__main__':
main()
| true |
98f191c788688b4832114b8be9eef8154fa860b2 | Python | GreaterGoodCorp/SuperHelper | /src/SuperHelper/Modules/Stenographer/__main__.py | UTF-8 | 16,325 | 2.75 | 3 | [
"MIT"
] | permissive | import bz2
import functools
import io
import logging
import re
import sys
import click
import cryptography.fernet
from PIL import Image
from SuperHelper.Core.Config import Config, pass_config
from SuperHelper.Core.Utils import BitOps, Cryptographer
MODULE_NAME: str = "Stenographer"
pass_config_no_lock = functools.partial(pass_config, module_name=MODULE_NAME, lock=False)
pass_config_with_lock = functools.partial(pass_config, module_name=MODULE_NAME, lock=True)
__name__ = f"SuperHelper.Builtins.{MODULE_NAME}"
logger = logging.getLogger(__name__)
class Header:
"""Provides for the preparation of the creation of steganography."""
# Padding character, used when header is too short
# after writing all the required metadata
padding_character: str = "-"
# Separator is used to make regex easier
separator: str = "?"
# Various types of length for the header
maximum_data_length: int = 8
maximum_flag_length: int = 3
salt_length: int = 24
separator_length: int = 2
header_length: int = maximum_data_length + maximum_flag_length + salt_length + separator_length
# Regex pattern of the header
# data_length?flag?salt
pattern: str = r"(\d{1,8})\?(\d{1,3})\?"
hash_pattern: str = r"((?:[A-Za-z0-9+/]{4})+(?:[A-Za-z0-9+/]{2}==" + \
r"|[A-Za-z0-9+/]{3}=)?)"
padding_pattern: str = r"-*"
pattern: re.Pattern = re.compile(f"^{pattern + hash_pattern + padding_pattern}$")
def __str__(self) -> str:
"""Returns the header."""
return self.header
def __repr__(self) -> str:
"""Same as __str__, returns the header."""
return str(self)
def __init__(self, data_length: int, compression: int, density: int,
salt: str) -> None:
self.header: str = str()
self.data_length: int = data_length
self.compression: int = compression
self.density: int = density
self.salt: str = salt
self.generate()
def generate(self) -> None:
"""
Generates a header created from input_file given during
Header initialisation.
There is no need to call this method, unless any metadata has been
modified after initialisation.
"""
# Create a flag from compression level and density level.
# Bit 6 - 2: Compression level (0 (no compression) - 9)
# Bit 1 - 0: Density level (1 - 3)
flag = (self.compression << 2) + self.density
result_header = Header.separator.join(
(str(self.data_length), str(flag), self.salt))
result_header += Header.padding_character * (Header.header_length - len(result_header))
assert Header.pattern.match(result_header)
# Assign as a class attribute
self.header = result_header
def validate_header(b: bytes) -> bool:
try:
s = str(b, "utf-8")
return True if Header.pattern.match(s) else False
except UnicodeDecodeError:
return False
def fix(data: bytes, is_encrypt: bool = True) -> bytes:
if is_encrypt:
return data + b"++"
else:
return data[:-2]
@pass_config_no_lock()
def build_header(config: dict[str, ...], data_length: int, salt: str, compression: int, density: int) -> Header:
compression = config["default_compression"] if compression not in config["available_compression"] else compression
density = config["default_density"] if density not in config["available_density"] else density
return Header(data_length, compression, density, salt)
def parse_header(b: bytes) -> Header:
if not validate_header(b):
raise ValueError("Invalid header!")
header_match = Header.pattern.match(str(b, "utf-8"))
hdr_data_length = int(header_match[1])
hdr_flag = int(header_match[2])
hdr_salt = header_match[3]
hdr_density = hdr_flag & 0b11
hdr_compression = (hdr_flag - hdr_density) >> 2
# Build and return a Header object
return build_header(
data_length=hdr_data_length,
compression=hdr_compression,
density=hdr_density,
salt=hdr_salt
)
@pass_config()
def patch_config(config: Config) -> None:
cfg = {
"available_compression": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
"available_density": [1, 2, 3],
"default_compression": 9,
"default_density": 1,
"default_auth_key": "bGs21Gt@31",
"flag_show_image_on_completion": False,
"flag_file_open_mode": "rb",
}
config.apply_module_patch(MODULE_NAME, cfg)
@pass_config_no_lock()
def write_steganography(input_file: io.IOBase, image_file: Image.Image, output_file: io.IOBase, auth_key: str,
compression: int, density: int, show_image_on_completion: bool,
config: dict[str, ...] = None) -> int:
auth_key = config["default_auth_key"] if auth_key is None else auth_key
compression = config["default_compression"] if compression not in config["available_compression"] else compression
density = config["default_density"] if density not in config["available_density"] else density
show_image_on_completion = config["flag_show_image_on_completion"] \
if show_image_on_completion is None else show_image_on_completion
input_file.seek(0)
data = input_file.read()
if data is None:
logger.error("Input file is not readable!")
return 1
if len(data) == 0:
logger.error("Input file is empty or exhausted!")
return 1
if compression > 0:
# Compress using the builtin bzip2 library
data = bz2.compress(data, compresslevel=compression)
crypto = Cryptographer.make_encrypter(Cryptographer.encode_salt(Cryptographer.make_salt()), auth_key)
data = crypto.encrypt(data)
data = fix(data)
# Craft the finished input_file
header = build_header(
data_length=len(data),
compression=compression,
density=density,
salt=crypto.get_salt_string(),
)
# 2. Serialise header and prepend input_file with header
data = bytes(header.header, "utf-8") + data
try:
pix = image_file.load()
except Exception or BaseException:
logger.exception("Cannot load image_file file!")
return 1
x_dim, y_dim = image_file.size
# Make sure there are enough space to store all bits
no_of_pixel = x_dim * y_dim
no_of_rgb = no_of_pixel * 3
no_of_storable_bit = no_of_rgb * density
no_of_stored_bit = len(data) * 8
if no_of_storable_bit < no_of_stored_bit:
# If there are not enough, raise error
logger.error("Data is too big to be stored!")
return 1
x, y, count, bit_loc = 0, 0, 0, density - 1
current_pix = list(pix[0, 0])
# Firstly, iterate through all the bytes to be written
for byte in data:
# Secondly, iterate through all the bits of the given byte
for i in range(8):
# Thirdly, check if the bit is set
# If bit is set
if BitOps.is_bit_set(byte, i):
# Check if the bit at the current location in the image_file is set
# If unset then set it, otherwise unchanged
current_pix[count] = BitOps.set_bit(current_pix[count], bit_loc)
# If bit is unset
else:
# Check if the bit at the current location in the image_file is set
# If set then unset it, otherwise unchanged
current_pix[count] = BitOps.unset_bit(current_pix[count], bit_loc)
# Move to the next bit
# by decrementing index
bit_loc -= 1
# If reached the final bit
if bit_loc == -1:
# Move to the next integer
# by incrementing the count
count += 1
# Reset density
bit_loc = density - 1
# If reached the last RGB
if count == 3:
# Save pixel
pix[x, y] = tuple(current_pix)
# Reset count
count = 0
y += 1
# If the entire row of pixel is written
if y == y_dim:
# Move on to the next row and reset
y = 0
x += 1
# Request new pixel to be written
current_pix = list(pix[x, y])
try:
image_file.save(output_file, "png")
except OSError:
logger.exception("Cannot save image_file to output_file file!")
return 1
if show_image_on_completion:
image_file.show("Demo")
input_file.close()
image_file.close()
output_file.close()
return 0
@pass_config_no_lock()
def extract_header(image: Image.Image, config: dict[str, ...] = None) -> Header:
pix = image.load()
y_dim = image.size[1]
x, y, count = 0, 0, 0
result_data = b""
density = 1
# Firstly, the header is retrieved by reading for its known length.
# Since the density is unknown, check all density one by one.
while density in config["available_density"]:
bit_loc = density - 1
while len(result_data) < Header.header_length:
byte = 0
# Read every single bit
# Iterate through every single bit of the byte
for i in range(8):
# If bit is set, set the corresponding bit of 'byte'
if BitOps.is_bit_set(pix[x, y][count], bit_loc):
byte = BitOps.set_bit(byte, i)
# Move to the next bit by decrement bit index
bit_loc -= 1
# If all readable bits of the colour integer are consumed
if bit_loc == -1:
# Move to the next RGB and reset the bit index
count += 1
bit_loc = density - 1
# If the entire pixel is read
if count == 3:
# Move to the next pixel in the row and reset the count
count = 0
y += 1
# If the entire row of pixels is read
if y == y_dim:
# Move to the next row and reset row index
y = 0
x += 1
# Convert the single byte (integer) to bytes
# By design, the resulting input_file is strictly stored in 1 byte
# and endianness does not matter since it is only 1 byte
result_data += byte.to_bytes(1, "big")
# If header is invalid
# e.g wrong density
try:
# Invalid header has undecodable byte
return parse_header(result_data)
except ValueError:
# Hence, switch to the next possible density
# Reset all values to original
density += 1
result_data = b""
x, y, count = 0, 0, 0
def extract_steganography(input_file: io.IOBase, output_file: io.IOBase, auth_key: str) -> int:
try:
image = Image.open(input_file)
except Image.UnidentifiedImageError:
logger.exception(f"Not an image file!")
return 1
header = extract_header(image)
pix = image.load()
y_dim = image.size[1]
data_length = Header.header_length + header.data_length
x, y, count = 0, 0, 0
result_data = b""
bit_loc = header.density - 1
# Attempt to read input_file
while len(result_data) < data_length:
byte = 0
# Read every single bit
# Iterate through every single bit of the byte
for i in range(8):
# If bit is set, set the corresponding bit of 'byte'
if BitOps.is_bit_set(pix[x, y][count], bit_loc):
byte = BitOps.set_bit(byte, i)
# Move to the next bit by decrement bit index
bit_loc -= 1
# If all readable bits of the colour integer are consumed
if bit_loc == -1:
# Move to the next RGB and reset the bit index
count += 1
bit_loc = header.density - 1
# If the entire pixel is read
if count == 3:
# Move to the next pixel in the row and reset the count
count = 0
y += 1
# If the entire row of pixels is read
if y == y_dim:
# Move to the next row and reset row index
y = 0
x += 1
# Convert the single byte (integer) to bytes
# By design, the resulting input_file is strictly stored in 1 byte
# and endianness does not matter since it is only 1 byte
result_data += byte.to_bytes(1, "big")
# Strip header by slicing its known length
result_data = result_data[Header.header_length:]
result_data = fix(result_data, False)
# Decrypt input_file
crypto = Cryptographer.make_decrypter(header.salt, auth_key)
try:
# 5. Store decrypted input_file
result_data = crypto.decrypt(result_data)
except cryptography.fernet.InvalidToken:
logger.exception("Invalid authentication key!")
return 1
# If compressed (as indicated by the header), decompress it
if header.compression > 0:
result_data = bz2.decompress(result_data)
# Write input_file to output_file file objects
# Iterate through all file objects
try:
output_file.write(result_data)
output_file.close()
except IOError:
logger.exception("Data cannot be writen")
return 1
return 0
@click.group("steg")
def main() -> None:
"""Applies steganography on images."""
patch_config()
@main.command("create", help="Creates steganography")
@click.option("-i", "--image_file", help="Path to custom image_file file", type=click.File("rb"), required=True)
@click.option("-k", "--key", help="The authentication key", type=str)
@click.option("-c", "--compress", help="Compression level of the steganography", type=int, default=-1)
@click.option("-d", "--density", help="Density of the steganography (from 1 to 3)", type=int, default=-1)
@click.option("-o", "--output_file", help="Path to output file", type=click.File("wb"), required=True)
@click.option("--show-image", help="Whether to show image_file on creation", type=bool, default=False)
@click.argument("input_file", type=click.File("rb"), required=True)
@pass_config_no_lock()
def create(image_file: io.IOBase, key: str, compress: int, density: int, output_file: io.IOBase, show_image: bool,
input_file: io.IOBase, config: dict[str, ...]) -> None:
density = config["default_density"] if density == -1 else density
if density not in config["available_density"]:
raise click.exceptions.BadOptionUsage(
"density", "Density must be from 1 to 3!")
compress = config["default_compression"] if compress == -1 else compress
if compress not in config["available_compression"]:
raise click.exceptions.BadOptionUsage(
"density", "Density must be from 0 (no compress) to 9!")
key = config["default_auth_key"] if key is None else key
try:
image = Image.open(image_file)
except Image.UnidentifiedImageError:
logger.exception("Not an image file!")
sys.exit(1)
# Perform operation
sys.exit(write_steganography(input_file, image, output_file, key, compress, density, show_image))
@main.command("extract", help="Extracts steganography")
@click.option("-k", "--key", help="The authentication key", type=str)
@click.option("-o", "--output_file", help="Path to output file", type=click.File("wb"), required=True)
@click.argument("steganography", required=True, type=click.File("rb"))
@pass_config_no_lock()
def extract(key: str, output_file: io.IOBase, steganography: io.IOBase, config: dict[str, ...]) -> None:
key = config["default_auth_key"] if key is None else key
try:
Image.open(steganography)
except Image.UnidentifiedImageError:
logger.exception("Not an image file!")
sys.exit(1)
sys.exit(extract_steganography(steganography, output_file, key))
| true |
c316331bb475edbfa99fcd1afbec4f7ebabd8548 | Python | Rasoolaghd/House_Pricing | /pipeline_all_fields.py | UTF-8 | 428 | 2.953125 | 3 | [] | no_license |
from sklearn.base import BaseEstimator, TransformerMixin
# a transformer that gets the desired attributes and converting Dataframe to numpy array.
class DataFrameSelector(BaseEstimator, TransformerMixin):
def __init__(self, attribute_names):
self.attribute_names = attribute_names
def fit(self, X, y=None):
return self
def transform(self, X):
return X[self.attribute_names].values
| true |
6b3329029769b55219afe00e0ba99575e1103dc7 | Python | DeltaForce14/Python_Training | /Functions_Basics.py | UTF-8 | 1,462 | 4.25 | 4 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 12 13:42:28 2018
@author: ditapeskova
"""
# Functions
# Structure of Simple Function
# output = function_name(input)
# Finding max value of a list
expenses = [3.21, 67.76, 87.64, 54, 6.1, 78.4]
max(expenses)
# Round number
round(1.34)
# Round number of one decimal place
round(1.34, 1)
# Finding out lenght of a list
len(expenses)
# Create a complex number
complex(2)
# Finding more about functions
# Finding arguments that can be entered
# Argument in [] is an optional argument
help(round)
# Sorting list in ascending order
sorted(expenses, reverse = False)
# Sorting list in descending order
sorted(expenses, reverse = True)
# Methods
# Methods are functions that belong to specific objects
# List Methods
cont = ["jane", 124, "peter", 157, "ben", 48, "frank", 203, "mandy", 95]
# Get index of an element
cont.index("ben")
# How many times an element appears in a list
cont.count(124)
# Add one element at the end of a list
cont.append("kate")
cont.append(143)
# Reverse the order of the elements in the list it is called on
expenses.reverse()
# Remove the first element of a list that matches the input
expenses.remove(6.1)
# String Methods
cat = "pepper"
# Capitalize first letter
cat.capitalize()
# Replace parts of a string (pepper to pepps)
cat.replace("er","s")
# Capitalize all the letters
cat.upper()
# Find out all the methods for string
help(str)
| true |
5c8b44daea83ee8c9e220a72284f1e7c8824b658 | Python | tekktonic/programming | /ops.py | UTF-8 | 582 | 2.53125 | 3 | [
"ISC"
] | permissive | import hw
def none():
pass
def ldx():
pc += 1
regx = mem[pc]
def ldy():
pc += 1
regy = mem[pc]
def ldz():
pc += 1
regz = mem[pc]
def rtx():
pc += 1
mem[mem[pc]] = rtx
def rty():
pc += 1
mem[mem[pc]] = rty
def rtz():
pc += 1
mem[mem[pc]] = regz
def add():
regz = regx + regy
def sub():
regz = regx - regy
def mul():
regz = regx * regy
def div():
regz = regx / regy
def mov():
mem[regy] = regx
def jmp():
pc = mem[pc + 1]
def die():
exit(0)
def prt():
pc += 1
print(mem[mem[pc]])
| true |
1c20baf0fdd8108570900e2e4f2949005f0539dd | Python | twinkle-zp/tensorflow-basis | /tensorflow框架/tf3_2.py | UTF-8 | 231 | 2.875 | 3 | [] | no_license | import tensorflow as tf
x=tf.constant([[1.0,2.0]]) #一行两列的张量
w=tf.constant([[3.0],[4.0]]) #两行一列的张量
y=tf.matmul(x,w) #矩阵乘法
print y
with tf.Session() as sess: #计算
print sess.run(y)
| true |
6298786b32be8d79c96ae5b61fb255d46997556e | Python | ytree-project/ytree | /doc/source/examples/halo_significance.py | UTF-8 | 1,737 | 2.875 | 3 | [
"BSD-3-Clause"
] | permissive | """
Calculate a halo's significance, defined as the time
integrated mass of all a halo's progenitors.
NOTE: this script includes extra code to make it run within
the test suite. To run conventionally, remove the following
lines and return the code block in the middle to the proper
tabbing (i.e., 4 spaces to the left).
from mpi4py import MPI
comm = MPI.Comm.Get_parent()
try:
except BaseException:
pass
comm.Disconnect()
"""
import yt
yt.enable_parallelism()
import ytree
def calc_significance(node):
if node.descendent is None:
dt = 0. * node["time"]
else:
dt = node.descendent["time"] - node["time"]
sig = node["mass"] * dt
if node.ancestors is not None:
for anc in node.ancestors:
sig += calc_significance(anc)
node["significance"] = sig
return sig
if __name__ == "__main__":
# Remove the next three and final three lines to run conventionally.
from mpi4py import MPI
comm = MPI.Comm.Get_parent()
try:
a = ytree.load("tiny_ctrees/locations.dat")
a.add_analysis_field("significance", "Msun*Myr")
ap = ytree.AnalysisPipeline()
ap.add_operation(calc_significance)
trees = list(a[:])
for tree in ytree.parallel_trees(trees, filename="halo_significance"):
yt.mylog.info(f"Processing {tree}.")
ap.process_target(tree)
if yt.is_root():
a2 = ytree.load("halo_significance/halo_significance.h5")
a2.set_selector("max_field_value", "significance")
prog = list(a2[0]["prog"])
print (prog)
# Remove the next three lines to run conventionally.
except BaseException:
pass
comm.Disconnect()
| true |
8faef9f572bc298aeb04990772125ef98ea2dae8 | Python | jzkelter/SAFTE | /compare_safte_methods.py | UTF-8 | 4,555 | 3.28125 | 3 | [] | no_license | import matplotlib.pyplot as plt
from safte_help import SAFTE_1, SAFTE_mult, repeat_safte_1
import basic_math as bm
from operator import add
#global variable
# sleep_times = [[1.28,8.98],[23.4,5.33], [23.4,5.33], [23.4,5.33]]
sleep_times2 = ([[0,8]] * 8)
# sleep_times = [[1.28,8.98],[23.4,5.33],[23.71,6.01],[22.15,5.25],[21.95,6.86]]
#methods for getting "typical" SAFTE output for a person
def many_days_of_average_sleep(sleep_times):
"""Ouput the average mental effectiveness using SAFTE of a person given past sleep times.
This function averages all the sleeps and then runs SAFTE long enough to reach an equilibrium.
Then it outputs the last day."""
ave_IB_times = bm.mean_sleep_times(sleep_times)
ave_sleep_times = ave_sleep_from_ave_TIB(ave_IB_times)
#run SAFTE for many days
sleep_times = [ave_sleep_times] * 10
hours_in_days, E_array_full = SAFTE_mult(sleep_times)
#get the last day of the SAFTE outputs
hours_in_1_day = trim_to_first_x_days(hours_in_days, 1)
E_array_1_day = trim_to_last_x_days(E_array_full,1)
mean_sleep = ave_sleep_times[1] - ave_sleep_times[0]
print "ave sleep: ", mean_sleep ,"\t ME_max: ", max(E_array_1_day)
return [hours_in_1_day, E_array_1_day]
def average_of_many_days(sleep_times):
"""Ouput the average mental effectiveness using SAFTE of a person given past sleep times.
This function runs safte_mult(i.e. peaks are picked once based on average) for all the days, and then averages them."""
n_days = len(sleep_times)
n_starting_ave_days = 3
ave_sleep_times = bm.mean_sleep_times(sleep_times) # get average sleep times
sleep_times = [ave_sleep_times] * n_starting_ave_days + sleep_times # we will run a few days of average sleep first, then the actual sleep to acount for unknown past
hours_in_days, E_array_full = SAFTE_mult(sleep_times) # get safte output
hours_in_1_day = trim_to_first_x_days(hours_in_days, 1) # get just one day of hours in day array
E_array_trimmed = trim_to_last_x_days(E_array_full, n_days)
E_array_averaged = average_sub_lists(E_array_trimmed, n_days)
return [hours_in_1_day, E_array_averaged]
def ave_repeat_safte_1(sleep_times):
"""Ouput the average mental effectiveness using SAFTE of a person given past sleep times.
This function runs repeat_safte_1 (i.e. peaks are picked every day) for all the days, and then averages them."""
n_days = len(sleep_times)
hours_in_days, E_array_full = repeat_safte_1(sleep_times) # get safte output
hours_in_1_day = trim_to_first_x_days(hours_in_days, 1) # get just one day of hours in day array
E_array_averaged = average_sub_lists(E_array_full, n_days)
return [hours_in_1_day, E_array_averaged]
#*******end of "typical" SAFTE methods
#*******helper funcions************
def trim_to_last_x_days(array_by_minute, num_days):
minutes_in_days = 60 * 24 * num_days
trimmed_array = array_by_minute[-minutes_in_days:len(array_by_minute)]
return trimmed_array
def trim_to_first_x_days(array_by_minute, num_days):
minutes_in_days = 60 * 24 * num_days
trimmed_array = array_by_minute[0:minutes_in_days]
return trimmed_array
def split_list(list1, num_new_lists):
sub_list_len = len(list1) / num_new_lists
list_of_lists = []
for n in range(num_new_lists):
i0 = sub_list_len * n
i1 = sub_list_len * (n + 1)
list_of_lists.append(list1[i0:i1])
return list_of_lists
def average_sub_lists(list1, n_sub_lists):
sub_lists = split_list(list1, n_sub_lists)
summed_lists = [sum(x) for x in zip(*sub_lists)]
average_sub_list = [x / n_sub_lists for x in summed_lists]
return average_sub_list
def plot_arrays(arrays):
for a in arrays:
plt.plot(a[0],a[1])
plt.show()
def sum_lists(lists):
"""input a list of lists of numbers. sum them elementwise"""
return [sum(x) for x in zip(*lists)]
def ave_sleep_from_ave_TIB(IB_times):
#takes in the average TIB and gives an average of time asleep
TIB = IB_times[1] - IB_times[0]
TST = 0.84 * TIB + 0.395
wake_time = IB_times[1]
sleep_time = wake_time - TST
return [sleep_time, wake_time]
#************end helper functions************
def max_ME_different_sleep():
arrays = []
for w in range(9,2,-1):
sleep_times = [[0, w]] * 7
arrays.append(many_days_of_average_sleep(sleep_times))
plot_arrays(arrays)
max_ME_different_sleep()
# arrays = [many_days_of_average_sleep(sleep_times2)]
# plot_arrays(arrays)
| true |
ef5eb1de79201ee35d9440f4e71e4238355924c7 | Python | luisesanmartin/crypto-ML-predictor | /scripts/utils/data_fetching_utils.py | UTF-8 | 3,024 | 2.96875 | 3 | [] | no_license | from datetime import datetime, timedelta
import requests
import json
import pandas as pd
URL = 'https://rest.coinapi.io/'
FMT = '%Y-%m-%dT%H:%M:%S'
def get_api_key(text='../../data/key.txt'):
with open(text) as file:
key = file.read()
return key
def time_in_datetime(time):
'''
Transforms a time string to datetime
'''
return datetime.strptime(time, FMT)
def time_in_string(time):
'''
Transforms a datetime to a time string
'''
isoformat = time.isoformat()
if '.' in isoformat:
return isoformat[:-7].split('.')[0]
else:
return isoformat
def time_bounds(gap=6):
'''
gap is measured in hours
'''
now = datetime.now()
before = now - timedelta(hours=gap)
upper_bound = time_in_string(now)
lower_bound = time_in_string(before)
rv = {
'upper bound': upper_bound,
'lower bound': lower_bound
}
return rv
def get_data_time_delta(crypto='BTC', period='10MIN', time_delta=6):
'''
Returns data in the frequency of period for a range specified in time_delta,
until now. time_delta is measured in hours
'''
times = time_bounds(gap=time_delta)
data = get_data(crypto, period, times['lower bound'], times['upper bound'])
return data
def get_data_max_possible(crypto='BTC', period=10, end=time_in_string(datetime.now())):
'''
get the max observations allowed by the API in a single call (100k), given
the end time
period is the frequency of the data, must be in minutes
'''
minutes_delta = period * 99990
end_dt = time_in_datetime(end)
start_dt = end_dt - timedelta(minutes=minutes_delta)
print(start_dt.isoformat())
start = time_in_string(start_dt)
frequency = str(period) + 'MIN'
print('Start obs:', start)
print('Final obs:', end)
print('\nRetrieving data...')
data = get_data(crypto, frequency, start, end)
print('\nData retrieved')
print('Observations:', len(data))
return data
def get_data(crypto, period, start, end):
'''
Retrieves data for a crypto, with a frequency of 'period', and
specified start and end times
'''
request_url = URL + 'v1/ohlcv/' + crypto + '/USD/history?period_id=' + \
period + '&time_start=' + start + \
'&time_end=' + end + '&limit=100000'
headers = {"X-CoinAPI-Key": get_api_key()}
response = requests.get(request_url, headers=headers)
data = json.loads(response.text)
return data
def calculate_observations(start, end, frequency):
'''
Calculates the number of observations between start and end, given
the frequency
'''
obs = None
start_dt = time_in_datetime(start)
end_dt = time_in_datetime(end)
if type(frequency) == str:
if 'MIN' in frequency:
freq_minutes = int(frequency.split('M')[0])
else:
freq_minutes = frequency
minutes = (end_dt - start_dt).total_seconds() / 60
return int((minutes / freq_minutes)) + 1
| true |
c0a3f333b59c6936cc6fd4fa553e810a2b640267 | Python | hong-hanh-dang/tkt4140 | /src/src-ch5/wave.py | UTF-8 | 1,385 | 2.9375 | 3 | [] | no_license | from numpy import*
from Tkinter import *
import time
dt = 0.0001
dx = 0.1
v = 100 #Courant condition says dx/dt >= v
gamma = 0
c1 = (v**2) * (dt**2) / (dx**2)
c2 = gamma*dt / dx**2
nodes = 30
source_node = nodes/3 #point of application of initial pulse
x = arange(0, nodes, 1)
u_new = zeros(nodes)
u = zeros(nodes)
u_old = zeros(nodes)
##--Tkinter stuff--
t_start = time.time()#for referencing
height = 300.0
width = 400.0
center = height//2
x_factor = width/(nodes-1)
y_factor = 200
##------------------
u[source_node] = -10
u_old[source_node] = -10
def paint(canvas, parent):
for i in range (1, nodes-1):
U1 = u[i+1] - 2*u[i] + u[i-1]
U0 = u_old[i+1] - 2*u_old[i] + u_old[i-1]
u_new[i] = c1*U1 + c2*(U1-U0) + 2.0*u[i] - u_old[i]
for i in range (0, nodes):
u_old[i] = u[i]
u[i] = u_new[i]
xy=[]
for i in range(0, nodes):
xy.append((int)(i*x_factor))
xy.append((int)(u_new[i]*y_factor)+center)
#time.sleep(0.001)
c.coords("curve", *xy)
parent.after_idle(paint,parent,canvas)
##------------------
#--Tkinter stuff:--
root = Tk()
root.title("Animated wave equation - initial pulse at L/3")
root.bind('q','exit')
c = Canvas(width=width, height=height, bg='white')
c.pack()
c.create_line(tag = "curve", *zeros(2*width), fill='blue')
root.after(100,paint,root,c)
root.mainloop() | true |
9b1a594d86d405083a5d7c9c7ba7fd229b8c2cf9 | Python | jrobchin/this-game-does-not-exist | /scripts/utils.py | UTF-8 | 4,138 | 2.6875 | 3 | [
"MIT"
] | permissive | import random
random.seed(42)
from typing import Tuple, List, Optional
from enum import Enum
from dataclasses import dataclass
import logging
import pandas as pd
from transformers import GPT2LMHeadModel, GPT2TokenizerFast
import torch
from torch import nn
import numpy as np
from PIL import Image
from scripts.models import HeaderGenerator, ScreenshotGenerator
torch_device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
@dataclass
class Game:
name: str
developer: str
publisher: str
description: str
genres: List[str]
platforms: List[str]
categories: List[str]
header_img: Optional[str] # ID of the image
screenshot_img: Optional[str] # ID of the image
class TOKENS(Enum):
NAME = "<|name|>"
DEVELOPER = "<|developer|>"
PUBLISHER = "<|publisher|>"
DESCRIPTION = "<|description|>"
GENRES = "<|genres|>"
GAME = "<|game|>"
ENDOFTEXT = "<|endoftext|>"
def load_tokenizer_and_model(model_path: str, device = torch_device, eval=True) -> Tuple[GPT2TokenizerFast, GPT2LMHeadModel]:
tokenizer = GPT2TokenizerFast.from_pretrained(model_path)
model = GPT2LMHeadModel.from_pretrained(model_path, pad_token_id=tokenizer.eos_token_id)
model.to(device)
if eval:
model.eval()
return tokenizer, model
def generate_text(tokenizer: GPT2TokenizerFast, model: GPT2LMHeadModel, start_token: str, **gen_kwargs) -> str:
"""Generate a single output of text. A different function would be needed to batch generation."""
prompt_encoded = tokenizer.encode(start_token, return_tensors="pt").to(model.device)
output = model.generate(
prompt_encoded,
do_sample=True,
top_k=50,
top_p=0.95,
no_repeat_ngram_size=5,
**gen_kwargs
)
output_decoded = tokenizer.decode(output[0])
return output_decoded
def generate_game_text(tokenizer: GPT2TokenizerFast, model: GPT2LMHeadModel, dataset: pd.DataFrame) -> Game:
game_text: str = generate_text(tokenizer, model, TOKENS.GAME.value, max_length=1000)
logging.info(game_text)
if not any(token.value in game_text for token in [TOKENS.GAME, TOKENS.DEVELOPER, TOKENS.PUBLISHER, TOKENS.DESCRIPTION, TOKENS.ENDOFTEXT]):
raise ValueError(f"Tokens missing from game text: {game_text}")
# Parse game text fields (yeah I know I could have used regex)
name: str = game_text.split(TOKENS.GAME.value)[-1].split(TOKENS.DEVELOPER.value)[0]
developer: str = game_text.split(TOKENS.DEVELOPER.value)[-1].split(TOKENS.PUBLISHER.value)[0]
publisher: str = game_text.split(TOKENS.PUBLISHER.value)[-1].split(TOKENS.DESCRIPTION.value)[0]
description: str = game_text.split(TOKENS.DESCRIPTION.value)[-1].split(TOKENS.ENDOFTEXT.value)[0]
# Temporarily sample random fields from dataset
sample: pd.Series = dataset.sample(1).iloc[0]
genres: List[str] = sample.genres.split(";")
platforms: List[str] = sample.platforms.split(";")
categories: List[str] = sample.categories.split(";")
return Game(
name=name,
developer=developer,
publisher=publisher,
description=description,
genres=genres,
platforms=platforms,
categories=categories,
header_img=None,
screenshot_img=None
)
def arr_to_image(data: np.ndarray):
return Image.fromarray((255.0 / data.max() * (data - data.min())).astype(np.uint8))
def generate_header(gen_model: HeaderGenerator, device = torch_device) -> Image:
noise = torch.randn(1, HeaderGenerator.LATENT_VECTOR_SIZE, 1, 1, device=device)
generated_header = gen_model(noise).detach().cpu().numpy()[0]
generated_header = np.transpose(generated_header, (1, 2, 0))
return arr_to_image(generated_header)
def generate_screenshot(gen_model: ScreenshotGenerator, device = torch_device) -> None:
noise = torch.randn(1, ScreenshotGenerator.LATENT_VECTOR_SIZE, 1, 1, device=device)
generate_screenshot = gen_model(noise).detach().cpu().numpy()[0]
generate_screenshot = np.transpose(generate_screenshot, (1, 2, 0))
return arr_to_image(generate_screenshot)
| true |
29317edc880d50284cb533080ce04c56ae434a54 | Python | Brovarets/ICS-11-Brovarets | /process_data.py | UTF-8 | 3,044 | 3.078125 | 3 | [] | no_license | from data_service import get_dovidnik, get_prices
zmina = {
'name market' : "", # найменування ринку
'name product' : "", # найменування товара
'Unit' : "", # одиниця виміру
'price level in 2007' : 0, # зміна рівня цін 2007
'price level in 2008 in UAH' : 0, # зміна рівня цін 2008 у грн
'price level in 2008 in % to 2007' : 0, # зміна рівня цін 2008 у % до 2007
'price level in 2011 in UAH' : 0, # зміна рівня цін 2011 у грн
'price level in 2011 in % to 2008' : 0, # зміна рівня цін 2011 у % до 2008
'price level of 2017 in UAH' : 0, # зміна рівня цін 2017 у грн
'price level of 2017 in % to 2011' : 0 # зміна рівня цін 2017 у % до 2011
}
def create_zmina():
dovidniks = get_dovidnik()
price = get_prices()
def get_dovidnik_name(dovidnik_code):
for dovidnik in dovidniks:
if dovidnik[0] == dovidnik_code:
return dovidnik[1]
return "*** назва не знайдена"
def get_dovidnik_discount(dovidnik_code):
for dovidnik in dovidniks:
if dovidnik[0] == dovidnik_code:
return dovidnik[2]
return "*** назва не знайдена"
zmina_list = []
for prices in price:
zmina_copy = zmina.copy()
zmina_copy['name market'] = get_dovidnik_name market(prices[0])
zmina_copy['name product'] = prices[3].rstrip()
zmina_copy['Unit'] = prices[1]
zmina_copy['price level in 2007'] = float(get_dovidnik_price level in 2007(prices[0]))
zmina_copy['price level in 2008 in UAH'] = int(zmina_copy['price level in 2008 in UAH']) * int(zmina_copy['price level in 2007'] * 10) / 10
zmina_copy['price level in 2008 in % to 2007'] = int(zmina_copy['price level in 2008 in % to 2007']) * int(zmina_copy['price level in 2007'] * 10) / 10
zmina_copy['price level in 2011 in UAH'] = int(zmina_copy['price level in 2011 in UAH']) * int(zmina_copy['price level in 2007'] * 10) / 10
zmina_copy['price level in 2011 in % to 2008'] = int(zmina_copy['price level in 2011 in % to 2008']) * int(zmina_copy['price level in 2007'] * 10) / 10
zmina_copy['price level of 2017 in UAH'] = int(zmina_copy['price level of 2017 in UAH']) * int(zmina_copy['price level in 2007'] * 10) / 10
zmina_copy['price level of 2017 in % to 2011'] = int(zmina_copy['price level of 2017 in % to 2011']) * int(zmina_copy['price level in 2007'] * 10) / 10
zmina_list.append(zmina_copy)
return zmina_list
#result = create_zmina()
#for line in result:
# print(line) | true |
4579f5c21e95864766152be034191da1a1061a4e | Python | sumithkumarEsap/Python | /Source/ICP5/TShirt.py | UTF-8 | 3,063 | 3.453125 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
import random
#creatig a cluster at the start like random one
def create_cluster(X, centroid_pts):
cluster = {}
for x in X:
value = min([(i[0],np.linalg.norm(x - centroid_pts[i[0]]))for i in enumerate(centroid_pts)], key=lambda s:s[1])[0]
try:
cluster[value].append(x)
except:
cluster[value] = [x]
return cluster
#calculate new cluster value based on the centroid values found
def calculate_new_center(cluster):
keys =sorted(cluster.keys())
newmu = np.array([(np.mean(cluster[k],axis = 0))for k in keys])
return newmu
#comparing new and old centroids and if same then we can keep the centroid as the one
def matched(new_centroids, old_centroids):
return (set([tuple(a)for a in new_centroids]) == set([tuple(a)for a in old_centroids]))
#runnig the k-mean algorithm on the data
def Apply_Kmeans(X, K, N):
# selecting random centroids from dataset and by number of clusters.
old_centroids = np.random.randint(N, size = K)
old_centroid_pts = np.array([X[i]for i in old_centroids])
print("old :",old_centroids)
print(old_centroid_pts)
cluster_info = create_cluster(X, old_centroid_pts)
print("Initial cluster information:")
print(cluster_info)
new_centroid_pts=calculate_new_center(cluster_info)
print("new :", new_centroid_pts)
itr = 0
print("Graph after selecting initial clusters with initial centroids:")
plot_cluster(old_centroid_pts,cluster_info,itr)
#comparing the centroids and calculating for new centroid if not matched else keeping the centroid
while not matched(new_centroid_pts, old_centroid_pts):
itr = itr + 1
old_centroid_pts = new_centroid_pts
cluster_info = create_cluster(X,new_centroid_pts)
plot_cluster(new_centroid_pts, cluster_info,itr)
new_centroid_pts = calculate_new_center(cluster_info)
print("Results after final iteration:")
plot_cluster(new_centroid_pts, cluster_info, itr)#plotting the cluster
return
#plotting the cluster
def plot_cluster(mu,cluster, itr):
color = 10 * ['r.','g.','k.','c.','b.','m.']
print('Iteration number : ',itr)
for l in cluster.keys():
for m in range(len(cluster[l])):
plt.plot(cluster[l][m][0], cluster[l][m][1], color[l], markersize=10)
plt.scatter(mu[:,0],mu[:,1],marker = 'x', s = 150, linewidths = 5, zorder = 10)
plt.show()
#plotting the graph
def init_graph(N, p1, p2):
X = np.array([(random.choice(p1),random.choice(p2))for i in range(N)])
return X
#intitialising the values for the cluster
def Simulate_Clusters():
K = int(input('Enter the number of Clusters.......'))
print('Running...\n')
p1 = np.array([5.5,6,5.4,6.2,5,6.8,7,4,5,5.6,5.5,6.3])
p2 = np.array([55,56,65,50,56,49,55,60,78,66,56,61])
X = init_graph(len(p1), p2, p1)
plt.scatter(X[:, 0], X[:, 1])
plt.show()
Apply_Kmeans(X, K, len(X))#calling the function
if __name__ == '__main__':
Simulate_Clusters() | true |
38cc85eda8c053ebc45f535dc03cbbf4cfefbad3 | Python | kanade788/myCode | /Triangle.py | UTF-8 | 676 | 4.15625 | 4 | [] | no_license | #Perform operator overloading for triangle in a way that add function adds
#length of three sides and greater operator returns the side with highest length
class Triangle:
def __init__(self,a=0,b=0,c=0):
self.a=a
self.b=b
self.c=c
def __add__(self,other):
self.l=self.a+self.b+self.c
print(self.l)
def __gt__(self,other):
if self.a>self.b and self.a>self.c:
print("a has the highest length which is ",self.a)
elif self.b>self.a and self.b>self.c:
print("b has the highest length which is ",self.b)
else:
print("c has the highest length which is ",self.c)
| true |
c94b89b98f67d06a29d05f97d4d6e98186c38beb | Python | zoezirlin/E-Commerce-Shopper-Intentions | /Python code 6.9.2020 | UTF-8 | 5,433 | 3.421875 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 9 18:55:53 2020
@author: zoezirlin
"""
# Importing relevant libraries
import pandas as pd #Data manipulation and analysis
import numpy as np #Data manipulation and analysis
import seaborn as sns #Data visualization
from matplotlib import pyplot as plt #Data visualization
import statsmodels.api as sm
from sklearn.linear_model import LogisticRegression #Logistic Regression
from sklearn.metrics import classification_report, confusion_matrix #Testing the logistic regression
from scipy.stats import pointbiserialr #importing point biserial correlation abilities
### Importing and learning the basics of the dataset ###
## Importing the dataset from desktop
intention_df = pd.read_csv("/Users/zoezirlin/Desktop/online_shoppers_intention.csv")
## Printing the first ten datalines in dataframe "intention"
intention_df[:10]
### Graphing and Data Analysis ###
## Learning that 84% of instances/observations did NOT result in purchase
intention_df["Revenue"].value_counts(normalize=True).plot(kind= 'bar', color = 'Orange')
plt.title('Revenue Disparity')
intention_df["Revenue"].value_counts(normalize=True)
## Learning that the bounce rates do not follow a normal distribution, grouped at rates of 0 to 0.05
intention_df["BounceRates"].plot.hist(grid=True, bins=10, rwidth=2, color= 'Orange')
plt.xlabel('Bouce Rate')
plt.title('Bouce Rates Histogram')
## Learning that the exit rates do not follow a normal distribution...
# but moreso than bouce rates, grouped at rates of 0 to 0.075
intention_df["ExitRates"].plot.hist(grid=True, bins=10, rwidth=2, color= 'Orange')
plt.xlabel('Exit Rate')
plt.title('Exit Rates Histogram')
## Creating bar plots for categorical variables
cols = ['TrafficType','Region','VisitorType','Month']
n_rows = 2
n_cols = 2
figs, axs = plt.subplots(n_rows, n_cols, figsize = (n_cols * 5, n_rows * 5)) #the 5 denotes size of the graph
for r in range(0, n_rows):
for c in range(0, n_cols):
i = r * n_cols + c
ax = axs[r][c]
sns.countplot(intention_df[cols[i]], hue=intention_df['Revenue'], ax=ax)
ax.set_title(cols[i])
ax.legend(title='Revenue', loc = 'upper right')
plt.tight_layout()
## Looking at revenue by month
sns.barplot(x = 'Revenue', y = 'Month', data = intention_df )
intention_df.pivot_table('Revenue','Month')
#November and October have the highest revenue probabilities
#February has the lowest revenue probabilities
## Looking at revenue by month and weekend
intention_df.pivot_table('Revenue',
index = 'Weekend',
columns = 'Month').plot()
intention_df.pivot_table('Revenue',
index = 'Weekend',
columns = 'Month')
# Weekends in November have the highest revenue ratios
# Weekends in February have the lowest revenue ratios
# Weekdays in November have the highest revenue ratios
# Weekdays in February have the lowest revenue ratios
## Looking at purchase rates based on visitor type
# Learning that New Visitors have a higher rate of purchase than Return Visitors
intention_df["VisitorType"].value_counts().plot(kind='bar', color='Pink')
intention_df.pivot_table('Revenue','VisitorType')
## Looking at chances of purchase by region and weekday/weekend
intention_df.pivot_table('Revenue',
index = 'Weekend' ,
columns = 'Region')
# Learning that on a weekend in region 2, there is a 22% chance of purchase
# Learning that on a weekend in region 9, there is a 20% chance of purchase
# Learning that on a weekday in region 8, there is a 12% chance of purchase
## Looking at bouce rates by region
ax = sns.boxplot(x='Region', y='BounceRates', data=intention_df)
x = intention_df['Region']
y = intention_df['BounceRates']
## Looking at product related duratin by revenue
ax = sns.boxplot(x='Revenue', y='ProductRelated_Duration', data=intention_df)
x = intention_df['Revenue']
y = intention_df['ProductRelated_Duration']
### Variable Logistic Regression Elimination/Model Creation ###
## Correlation Matrix
intention_df.corr(method='pearson')
## Correlation Matrix Graphic
corr = intention_df.corr()
heatmap = sns.heatmap(
corr,
square=True
)
# Logistic Regression
# Model: 'Revenue' = 'ExitRates',
# 'Page Values',
# 'ProductRelated_Duration',
# 'Month' ,
# 'Region'
#Dropping NA observations
intention_df = intention_df.dropna()
intention_df.head()
#Assigning the True/False options of Revenue to 0/1
#df = pd.get_dummies(df, columns=['type'])
#intention_df = pd.get_dummies(intention_df['Revenue'])
#Labeling each column's identification number for ILOC program
#col_mapping = [f"{c[0]}:{c[1]}" for c in enumerate(intention_df.columns)]
#print(col_mapping)
#logit_grouping = intention_df.iloc[:, [6,8,10,12,15,17]]
#print(logit_grouping)
#col_mapping_2 = [f"{c[0]}:{c[1]}" for c in enumerate(logit_grouping.columns)]
#print(col_mapping_2)
#train_cols = logit_grouping.columns[:4]
# Index([gre, gpa, prestige_2, prestige_3, prestige_4], dtype=object)
#logit = sm.Logit(logit_grouping['Revenue'], logit_grouping[train_cols])
# fit the model
#result = logit.fit()
#train_cols = logit_grouping.columns[:]
| true |
0656d5af993ac181762d5ad7f05a401abf04d3f8 | Python | microsoft/qlib | /qlib/model/interpret/base.py | UTF-8 | 1,133 | 3.078125 | 3 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""
Interfaces to interpret models
"""
import pandas as pd
from abc import abstractmethod
class FeatureInt:
"""Feature (Int)erpreter"""
@abstractmethod
def get_feature_importance(self) -> pd.Series:
"""get feature importance
Returns
-------
The index is the feature name.
The greater the value, the higher importance.
"""
class LightGBMFInt(FeatureInt):
"""LightGBM (F)eature (Int)erpreter"""
def __init__(self):
self.model = None
def get_feature_importance(self, *args, **kwargs) -> pd.Series:
"""get feature importance
Notes
-----
parameters reference:
https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.Booster.html?highlight=feature_importance#lightgbm.Booster.feature_importance
"""
return pd.Series(
self.model.feature_importance(*args, **kwargs), index=self.model.feature_name()
).sort_values( # pylint: disable=E1101
ascending=False
)
| true |
cb121c1ba497056825f981fae07b4d4199b21a8a | Python | MonaBeikirch/neurobotics | /neuro_deep_planner/src/ros_handler.py | UTF-8 | 2,449 | 2.859375 | 3 | [] | no_license | #!/usr/bin/env python
import rospy
import numpy as np
from neuro_local_planner_wrapper.msg import Transition
from geometry_msgs.msg import Twist, Vector3
class ROSHandler:
def __init__(self):
# Initially assumed Input size, since init is false these values will be updated with the first received msg
self.__init = False
self.depth = 4
self.height = 84
self.width = 84
self.state = np.zeros((self.width, self.height, self.depth), dtype='float')
self.reward = 0.0
self.is_episode_finished = False
self.__sub = rospy.Subscriber("/move_base/NeuroLocalPlannerWrapper/transition", Transition,
self.input_callback)
self.__pub = rospy.Publisher("neuro_deep_planner/action", Twist, queue_size=10)
self.__new_msg_flag = False
def input_callback(self, transition_msg):
# If msg is received for the first time adjust parameters
if not self.__init:
self.depth = transition_msg.depth
self.width = transition_msg.width
self.height = transition_msg.height
self.state = np.zeros((self.depth, self.width, self.height), dtype='float')
self.__init = True
# Lets update the new reward
self.reward = transition_msg.reward
# Check if episode is done or not
self.is_episode_finished = transition_msg.is_episode_finished
# Lets update the new costmap its possible that we need to switch some axes here...
if not self.is_episode_finished:
temp_state = np.asarray(transition_msg.state_representation).reshape(self.depth, self.height, self.width).\
swapaxes(1, 2)
self.state = np.rollaxis(temp_state, 0, 3)
# Normalize!
self.state = self.state.astype(float)
self.state = np.divide(self.state, 100.0)
# We have received a new msg
self.__new_msg_flag = True
def publish_action(self, action):
# Generate msg output
vel_cmd = Twist(Vector3(action[0], 0, 0), Vector3(0, 0, action[1]))
# Send the action back
self.__pub.publish(vel_cmd)
def new_msg(self):
# Return true if new msg arrived only once for every new msg
output = False
if self.__new_msg_flag:
output = True
self.__new_msg_flag = False
return output
| true |
61c694b92e777ddab5bb30cbd89846a63fbbc0e1 | Python | WhaleHippo/2018-1 | /dataStructureStudy/heap.py | UTF-8 | 2,997 | 3.28125 | 3 | [] | no_license | class node :
def __init__(self, data, parent = None, left = None, right = None):
if isinstance(data, node):
self.data = data.data;
self.parent = data.parent;
self.left = data.left;
self.right = data.right;
else :
self.data = data;
self.parent = parent;
self.left = left;
self.right = right;
'''
def __str__(self):
return str(self.data);
'''
class maxHeap :
def __init__(self):
self.size = 0;
self.topNode = None;
def insert(self, value) :
if self.size == 0 :
self.topNode = node(value)
self.topNode.parent = node(999999999)
else :
currentNode = self.getNindex((self.size - 1)/2);
if self.size % 2 == 0 :
currentNode.right = node(value, currentNode);
else :
currentNode.left = node(value, currentNode);
n = self.size;
currentNode = self.getNindex(n)
while currentNode.data > currentNode.parent.data :
self.swapNode(n,(n - 1)/2);
n = (n - 1)/2
currentNode = self.getNindex(n)
self.size = self.size + 1;
return self;
def getNindex(self, n) :
if n == 0:
return self.topNode;
else :
parentNode = self.getNindex((n-1)/2);
if n%2 == 1:
return parentNode.left;
else :
return parentNode.right;
def printAll(self, currentNode = 0) :
for i in range(self.size):
print self.getNindex(i).data;
return self;
def swapNode(self, a, b):
nodeA = self.getNindex(a);
nodeB = self.getNindex(b);
temp = nodeA.data;
nodeA.data = nodeB.data;
nodeB.data = temp;
def delNode(self) :
if self.size == 0 :
delData = "heap is empty!"
return delData;
elif self.size == 1 :
delData = self.topNode.data;
self.topNode = None;
else :
#swap 1st(=topNode) and last node
delData = self.topNode.data;
self.swapNode(0, self.size-1);
parentNode = self.getNindex(self.size-1).parent;
if (self.size-1)%2 == 1:
parentNode.left = None;
else :
parentNode.right = None;
#Rebalancing
currentNode = self.topNode;
currentNode = 0;
#문제 찾았는데 결국은 none halder문제임 ㅅㅂ
while True:
if (currentNode.left.data < currentNode.data) and (currentNode.right.data < currentNode.data) :
break;
else :
if currentNode.left.data < currentNode.right.data :
currentNode = currentNode.right;
self.swapNode(currentNode,currentNode*2 + 2)
currentNode = currentNode*2 + 2;
self.size = self.size - 1;
return delData;
mh = maxHeap();
mh.insert(1).insert(2).insert(1).insert(2).insert(1).insert(2).insert(1).insert(2).insert(1).insert(2)
mh.delNode()
mh.delNode()
mh.delNode()
mh.delNode()
mh.delNode()
mh.delNode()
mh.delNode()
mh.delNode()
| true |
a2d12810d70bb471cebe13288c4da74ff8d688a5 | Python | UWPCE-PythonCert-ClassRepos/SP_Online_Course2_2018 | /students/ShinTran/lesson06/water-regulation/waterregulation/test.py | UTF-8 | 1,749 | 2.890625 | 3 | [] | no_license | """
Unit tests for the water-regulation module
"""
import unittest
from unittest.mock import MagicMock
from pump import Pump
from sensor import Sensor
from .controller import Controller
from .decider import Decider
ACTIONS = {
'PUMP_IN': 1,
'PUMP_OFF': 0,
'PUMP_OUT': -1
}
class DeciderTests(unittest.TestCase):
"""
Unit tests for the Decider class
"""
def test_decider(self):
"""Tests for each of the behaviors defined for Decider.decide"""
decider = Decider(100, 0.05)
self.assertEqual(decider.decide(85, 0, ACTIONS), 1)
self.assertEqual(decider.decide(107, 0, ACTIONS), -1)
self.assertEqual(decider.decide(96, 0, ACTIONS), 0)
self.assertEqual(decider.decide(101, 1, ACTIONS), 0)
self.assertEqual(decider.decide(75, 1, ACTIONS), 1)
self.assertEqual(decider.decide(80, -1, ACTIONS), 0)
self.assertEqual(decider.decide(103, -1, ACTIONS), -1)
class ControllerTests(unittest.TestCase):
"""
Unit tests for the Controller class
"""
def test_controller(self):
"""Tests for each of the behaviors defined for Controller.tick"""
sensor = Sensor('127.0.0.1', 8000)
pump = Pump('127.0.0.1', 8000)
decider = Decider(100, 0.05)
controller = Controller(sensor, pump, decider)
sensor.measure = MagicMock(return_value=95)
pump.get_state = MagicMock(return_value=pump.PUMP_IN)
decider.decide = MagicMock(return_value=pump.PUMP_IN)
pump.set_state = MagicMock(return_value=True)
controller.tick()
sensor.measure.assert_called_with()
pump.get_state.assert_called_with()
decider.decide.assert_called_with(95, pump.PUMP_IN, ACTIONS)
| true |
b79ca57bcd34ad4e51fc7901d25361ac8808226c | Python | databill86/HyperFoods | /venv/lib/python3.6/site-packages/gensim/matutils.py | UTF-8 | 54,711 | 2.734375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Math helper functions."""
from __future__ import with_statement
from itertools import chain
import logging
import math
from gensim import utils
from gensim.utils import deprecated
import numpy as np
import scipy.sparse
from scipy.stats import entropy
import scipy.linalg
from scipy.linalg.lapack import get_lapack_funcs
from scipy.linalg.special_matrices import triu
from scipy.special import psi # gamma function utils
from six import iteritems, itervalues, string_types
from six.moves import zip, range
logger = logging.getLogger(__name__)
def blas(name, ndarray):
"""Helper for getting the appropriate BLAS function, using :func:`scipy.linalg.get_blas_funcs`.
Parameters
----------
name : str
Name(s) of BLAS functions, without the type prefix.
ndarray : numpy.ndarray
Arrays can be given to determine optimal prefix of BLAS routines.
Returns
-------
object
BLAS function for the needed operation on the given data type.
"""
return scipy.linalg.get_blas_funcs((name,), (ndarray,))[0]
def argsort(x, topn=None, reverse=False):
"""Efficiently calculate indices of the `topn` smallest elements in array `x`.
Parameters
----------
x : array_like
Array to get the smallest element indices from.
topn : int, optional
Number of indices of the smallest (greatest) elements to be returned.
If not given, indices of all elements will be returned in ascending (descending) order.
reverse : bool, optional
Return the `topn` greatest elements in descending order,
instead of smallest elements in ascending order?
Returns
-------
numpy.ndarray
Array of `topn` indices that sort the array in the requested order.
"""
x = np.asarray(x) # unify code path for when `x` is not a np array (list, tuple...)
if topn is None:
topn = x.size
if topn <= 0:
return []
if reverse:
x = -x
if topn >= x.size or not hasattr(np, 'argpartition'):
return np.argsort(x)[:topn]
# np >= 1.8 has a fast partial argsort, use that!
most_extreme = np.argpartition(x, topn)[:topn]
return most_extreme.take(np.argsort(x.take(most_extreme))) # resort topn into order
def corpus2csc(corpus, num_terms=None, dtype=np.float64, num_docs=None, num_nnz=None, printprogress=0):
"""Convert a streamed corpus in bag-of-words format into a sparse matrix `scipy.sparse.csc_matrix`,
with documents as columns.
Notes
-----
If the number of terms, documents and non-zero elements is known, you can pass
them here as parameters and a (much) more memory efficient code path will be taken.
Parameters
----------
corpus : iterable of iterable of (int, number)
Input corpus in BoW format
num_terms : int, optional
Number of terms in `corpus`. If provided, the `corpus.num_terms` attribute (if any) will be ignored.
dtype : data-type, optional
Data type of output CSC matrix.
num_docs : int, optional
Number of documents in `corpus`. If provided, the `corpus.num_docs` attribute (in any) will be ignored.
num_nnz : int, optional
Number of non-zero elements in `corpus`. If provided, the `corpus.num_nnz` attribute (if any) will be ignored.
printprogress : int, optional
Log a progress message at INFO level once every `printprogress` documents. 0 to turn off progress logging.
Returns
-------
scipy.sparse.csc_matrix
`corpus` converted into a sparse CSC matrix.
See Also
--------
:class:`~gensim.matutils.Sparse2Corpus`
Convert sparse format to Gensim corpus format.
"""
try:
# if the input corpus has the `num_nnz`, `num_docs` and `num_terms` attributes
# (as is the case with MmCorpus for example), we can use a more efficient code path
if num_terms is None:
num_terms = corpus.num_terms
if num_docs is None:
num_docs = corpus.num_docs
if num_nnz is None:
num_nnz = corpus.num_nnz
except AttributeError:
pass # not a MmCorpus...
if printprogress:
logger.info("creating sparse matrix from corpus")
if num_terms is not None and num_docs is not None and num_nnz is not None:
# faster and much more memory-friendly version of creating the sparse csc
posnow, indptr = 0, [0]
indices = np.empty((num_nnz,), dtype=np.int32) # HACK assume feature ids fit in 32bit integer
data = np.empty((num_nnz,), dtype=dtype)
for docno, doc in enumerate(corpus):
if printprogress and docno % printprogress == 0:
logger.info("PROGRESS: at document #%i/%i", docno, num_docs)
posnext = posnow + len(doc)
# zip(*doc) transforms doc to (token_indices, token_counts]
indices[posnow: posnext], data[posnow: posnext] = zip(*doc) if doc else ([], [])
indptr.append(posnext)
posnow = posnext
assert posnow == num_nnz, "mismatch between supplied and computed number of non-zeros"
result = scipy.sparse.csc_matrix((data, indices, indptr), shape=(num_terms, num_docs), dtype=dtype)
else:
# slower version; determine the sparse matrix parameters during iteration
num_nnz, data, indices, indptr = 0, [], [], [0]
for docno, doc in enumerate(corpus):
if printprogress and docno % printprogress == 0:
logger.info("PROGRESS: at document #%i", docno)
# zip(*doc) transforms doc to (token_indices, token_counts]
doc_indices, doc_data = zip(*doc) if doc else ([], [])
indices.extend(doc_indices)
data.extend(doc_data)
num_nnz += len(doc)
indptr.append(num_nnz)
if num_terms is None:
num_terms = max(indices) + 1 if indices else 0
num_docs = len(indptr) - 1
# now num_docs, num_terms and num_nnz contain the correct values
data = np.asarray(data, dtype=dtype)
indices = np.asarray(indices)
result = scipy.sparse.csc_matrix((data, indices, indptr), shape=(num_terms, num_docs), dtype=dtype)
return result
def pad(mat, padrow, padcol):
"""Add additional rows/columns to `mat`. The new rows/columns will be initialized with zeros.
Parameters
----------
mat : numpy.ndarray
Input 2D matrix
padrow : int
Number of additional rows
padcol : int
Number of additional columns
Returns
-------
numpy.matrixlib.defmatrix.matrix
Matrix with needed padding.
"""
if padrow < 0:
padrow = 0
if padcol < 0:
padcol = 0
rows, cols = mat.shape
return np.bmat([
[mat, np.matrix(np.zeros((rows, padcol)))],
[np.matrix(np.zeros((padrow, cols + padcol)))],
])
def zeros_aligned(shape, dtype, order='C', align=128):
"""Get array aligned at `align` byte boundary in memory.
Parameters
----------
shape : int or (int, int)
Shape of array.
dtype : data-type
Data type of array.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous (row- or column-wise) order in memory.
align : int, optional
Boundary for alignment in bytes.
Returns
-------
numpy.ndarray
Aligned array.
"""
nbytes = np.prod(shape, dtype=np.int64) * np.dtype(dtype).itemsize
buffer = np.zeros(nbytes + align, dtype=np.uint8) # problematic on win64 ("maximum allowed dimension exceeded")
start_index = -buffer.ctypes.data % align
return buffer[start_index: start_index + nbytes].view(dtype).reshape(shape, order=order)
def ismatrix(m):
"""Check whether `m` is a 2D `numpy.ndarray` or `scipy.sparse` matrix.
Parameters
----------
m : object
Object to check.
Returns
-------
bool
Is `m` a 2D `numpy.ndarray` or `scipy.sparse` matrix.
"""
return isinstance(m, np.ndarray) and m.ndim == 2 or scipy.sparse.issparse(m)
def any2sparse(vec, eps=1e-9):
"""Convert a numpy.ndarray or `scipy.sparse` vector into the Gensim bag-of-words format.
Parameters
----------
vec : {`numpy.ndarray`, `scipy.sparse`}
Input vector
eps : float, optional
Value used for threshold, all coordinates less than `eps` will not be presented in result.
Returns
-------
list of (int, float)
Vector in BoW format.
"""
if isinstance(vec, np.ndarray):
return dense2vec(vec, eps)
if scipy.sparse.issparse(vec):
return scipy2sparse(vec, eps)
return [(int(fid), float(fw)) for fid, fw in vec if np.abs(fw) > eps]
def scipy2scipy_clipped(matrix, topn, eps=1e-9):
"""Get the 'topn' elements of the greatest magnitude (absolute value) from a `scipy.sparse` vector or matrix.
Parameters
----------
matrix : `scipy.sparse`
Input vector or matrix (1D or 2D sparse array).
topn : int
Number of greatest elements, in absolute value, to return.
eps : float
Ignored.
Returns
-------
`scipy.sparse.csr.csr_matrix`
Clipped matrix.
"""
if not scipy.sparse.issparse(matrix):
raise ValueError("'%s' is not a scipy sparse vector." % matrix)
if topn <= 0:
return scipy.sparse.csr_matrix([])
# Return clipped sparse vector if input is a sparse vector.
if matrix.shape[0] == 1:
# use np.argpartition/argsort and only form tuples that are actually returned.
biggest = argsort(abs(matrix.data), topn, reverse=True)
indices, data = matrix.indices.take(biggest), matrix.data.take(biggest)
return scipy.sparse.csr_matrix((data, indices, [0, len(indices)]))
# Return clipped sparse matrix if input is a matrix, processing row by row.
else:
matrix_indices = []
matrix_data = []
matrix_indptr = [0]
# calling abs() on entire matrix once is faster than calling abs() iteratively for each row
matrix_abs = abs(matrix)
for i in range(matrix.shape[0]):
v = matrix.getrow(i)
v_abs = matrix_abs.getrow(i)
# Sort and clip each row vector first.
biggest = argsort(v_abs.data, topn, reverse=True)
indices, data = v.indices.take(biggest), v.data.take(biggest)
# Store the topn indices and values of each row vector.
matrix_data.append(data)
matrix_indices.append(indices)
matrix_indptr.append(matrix_indptr[-1] + min(len(indices), topn))
matrix_indices = np.concatenate(matrix_indices).ravel()
matrix_data = np.concatenate(matrix_data).ravel()
# Instantiate and return a sparse csr_matrix which preserves the order of indices/data.
return scipy.sparse.csr.csr_matrix(
(matrix_data, matrix_indices, matrix_indptr),
shape=(matrix.shape[0], np.max(matrix_indices) + 1)
)
def scipy2sparse(vec, eps=1e-9):
"""Convert a scipy.sparse vector into the Gensim bag-of-words format.
Parameters
----------
vec : `scipy.sparse`
Sparse vector.
eps : float, optional
Value used for threshold, all coordinates less than `eps` will not be presented in result.
Returns
-------
list of (int, float)
Vector in Gensim bag-of-words format.
"""
vec = vec.tocsr()
assert vec.shape[0] == 1
return [(int(pos), float(val)) for pos, val in zip(vec.indices, vec.data) if np.abs(val) > eps]
class Scipy2Corpus(object):
"""Convert a sequence of dense/sparse vectors into a streamed Gensim corpus object.
See Also
--------
:func:`~gensim.matutils.corpus2csc`
Convert corpus in Gensim format to `scipy.sparse.csc` matrix.
"""
def __init__(self, vecs):
"""
Parameters
----------
vecs : iterable of {`numpy.ndarray`, `scipy.sparse`}
Input vectors.
"""
self.vecs = vecs
def __iter__(self):
for vec in self.vecs:
if isinstance(vec, np.ndarray):
yield full2sparse(vec)
else:
yield scipy2sparse(vec)
def __len__(self):
return len(self.vecs)
def sparse2full(doc, length):
"""Convert a document in Gensim bag-of-words format into a dense numpy array.
Parameters
----------
doc : list of (int, number)
Document in BoW format.
length : int
Vector dimensionality. This cannot be inferred from the BoW, and you must supply it explicitly.
This is typically the vocabulary size or number of topics, depending on how you created `doc`.
Returns
-------
numpy.ndarray
Dense numpy vector for `doc`.
See Also
--------
:func:`~gensim.matutils.full2sparse`
Convert dense array to gensim bag-of-words format.
"""
result = np.zeros(length, dtype=np.float32) # fill with zeroes (default value)
# convert indices to int as numpy 1.12 no longer indexes by floats
doc = ((int(id_), float(val_)) for (id_, val_) in doc)
doc = dict(doc)
# overwrite some of the zeroes with explicit values
result[list(doc)] = list(itervalues(doc))
return result
def full2sparse(vec, eps=1e-9):
"""Convert a dense numpy array into the Gensim bag-of-words format.
Parameters
----------
vec : numpy.ndarray
Dense input vector.
eps : float
Feature weight threshold value. Features with `abs(weight) < eps` are considered sparse and
won't be included in the BOW result.
Returns
-------
list of (int, float)
BoW format of `vec`, with near-zero values omitted (sparse vector).
See Also
--------
:func:`~gensim.matutils.sparse2full`
Convert a document in Gensim bag-of-words format into a dense numpy array.
"""
vec = np.asarray(vec, dtype=float)
nnz = np.nonzero(abs(vec) > eps)[0]
return list(zip(nnz, vec.take(nnz)))
dense2vec = full2sparse
def full2sparse_clipped(vec, topn, eps=1e-9):
"""Like :func:`~gensim.matutils.full2sparse`, but only return the `topn` elements of the greatest magnitude (abs).
This is more efficient that sorting a vector and then taking the greatest values, especially
where `len(vec) >> topn`.
Parameters
----------
vec : numpy.ndarray
Input dense vector
topn : int
Number of greatest (abs) elements that will be presented in result.
eps : float
Threshold value, if coordinate in `vec` < eps, this will not be presented in result.
Returns
-------
list of (int, float)
Clipped vector in BoW format.
See Also
--------
:func:`~gensim.matutils.full2sparse`
Convert dense array to gensim bag-of-words format.
"""
# use np.argpartition/argsort and only form tuples that are actually returned.
# this is about 40x faster than explicitly forming all 2-tuples to run sort() or heapq.nlargest() on.
if topn <= 0:
return []
vec = np.asarray(vec, dtype=float)
nnz = np.nonzero(abs(vec) > eps)[0]
biggest = nnz.take(argsort(abs(vec).take(nnz), topn, reverse=True))
return list(zip(biggest, vec.take(biggest)))
def corpus2dense(corpus, num_terms, num_docs=None, dtype=np.float32):
"""Convert corpus into a dense numpy 2D array, with documents as columns.
Parameters
----------
corpus : iterable of iterable of (int, number)
Input corpus in the Gensim bag-of-words format.
num_terms : int
Number of terms in the dictionary. X-axis of the resulting matrix.
num_docs : int, optional
Number of documents in the corpus. If provided, a slightly more memory-efficient code path is taken.
Y-axis of the resulting matrix.
dtype : data-type, optional
Data type of the output matrix.
Returns
-------
numpy.ndarray
Dense 2D array that presents `corpus`.
See Also
--------
:class:`~gensim.matutils.Dense2Corpus`
Convert dense matrix to Gensim corpus format.
"""
if num_docs is not None:
# we know the number of documents => don't bother column_stacking
docno, result = -1, np.empty((num_terms, num_docs), dtype=dtype)
for docno, doc in enumerate(corpus):
result[:, docno] = sparse2full(doc, num_terms)
assert docno + 1 == num_docs
else:
# The below used to be a generator, but NumPy deprecated generator as of 1.16 with:
# """
# FutureWarning: arrays to stack must be passed as a "sequence" type such as list or tuple.
# Support for non-sequence iterables such as generators is deprecated as of NumPy 1.16 and will raise an error
# in the future.
# """
result = np.column_stack([sparse2full(doc, num_terms) for doc in corpus])
return result.astype(dtype)
class Dense2Corpus(object):
"""Treat dense numpy array as a streamed Gensim corpus in the bag-of-words format.
Notes
-----
No data copy is made (changes to the underlying matrix imply changes in the streamed corpus).
See Also
--------
:func:`~gensim.matutils.corpus2dense`
Convert Gensim corpus to dense matrix.
:class:`~gensim.matutils.Sparse2Corpus`
Convert sparse matrix to Gensim corpus format.
"""
def __init__(self, dense, documents_columns=True):
"""
Parameters
----------
dense : numpy.ndarray
Corpus in dense format.
documents_columns : bool, optional
Documents in `dense` represented as columns, as opposed to rows?
"""
if documents_columns:
self.dense = dense.T
else:
self.dense = dense
def __iter__(self):
"""Iterate over the corpus.
Yields
------
list of (int, float)
Document in BoW format.
"""
for doc in self.dense:
yield full2sparse(doc.flat)
def __len__(self):
return len(self.dense)
class Sparse2Corpus(object):
"""Convert a matrix in scipy.sparse format into a streaming Gensim corpus.
See Also
--------
:func:`~gensim.matutils.corpus2csc`
Convert gensim corpus format to `scipy.sparse.csc` matrix
:class:`~gensim.matutils.Dense2Corpus`
Convert dense matrix to gensim corpus.
"""
def __init__(self, sparse, documents_columns=True):
"""
Parameters
----------
sparse : `scipy.sparse`
Corpus scipy sparse format
documents_columns : bool, optional
Documents will be column?
"""
if documents_columns:
self.sparse = sparse.tocsc()
else:
self.sparse = sparse.tocsr().T # make sure shape[1]=number of docs (needed in len())
def __iter__(self):
"""
Yields
------
list of (int, float)
Document in BoW format.
"""
for indprev, indnow in zip(self.sparse.indptr, self.sparse.indptr[1:]):
yield list(zip(self.sparse.indices[indprev:indnow], self.sparse.data[indprev:indnow]))
def __len__(self):
return self.sparse.shape[1]
def __getitem__(self, document_index):
"""Retrieve a document vector from the corpus by its index.
Parameters
----------
document_index : int
Index of document
Returns
-------
list of (int, number)
Document in BoW format.
"""
indprev = self.sparse.indptr[document_index]
indnow = self.sparse.indptr[document_index + 1]
return list(zip(self.sparse.indices[indprev:indnow], self.sparse.data[indprev:indnow]))
def veclen(vec):
"""Calculate L2 (euclidean) length of a vector.
Parameters
----------
vec : list of (int, number)
Input vector in sparse bag-of-words format.
Returns
-------
float
Length of `vec`.
"""
if len(vec) == 0:
return 0.0
length = 1.0 * math.sqrt(sum(val**2 for _, val in vec))
assert length > 0.0, "sparse documents must not contain any explicit zero entries"
return length
def ret_normalized_vec(vec, length):
"""Normalize a vector in L2 (Euclidean unit norm).
Parameters
----------
vec : list of (int, number)
Input vector in BoW format.
length : float
Length of vector
Returns
-------
list of (int, number)
L2-normalized vector in BoW format.
"""
if length != 1.0:
return [(termid, val / length) for termid, val in vec]
else:
return list(vec)
def ret_log_normalize_vec(vec, axis=1):
log_max = 100.0
if len(vec.shape) == 1:
max_val = np.max(vec)
log_shift = log_max - np.log(len(vec) + 1.0) - max_val
tot = np.sum(np.exp(vec + log_shift))
log_norm = np.log(tot) - log_shift
vec -= log_norm
else:
if axis == 1: # independently normalize each sample
max_val = np.max(vec, 1)
log_shift = log_max - np.log(vec.shape[1] + 1.0) - max_val
tot = np.sum(np.exp(vec + log_shift[:, np.newaxis]), 1)
log_norm = np.log(tot) - log_shift
vec = vec - log_norm[:, np.newaxis]
elif axis == 0: # normalize each feature
k = ret_log_normalize_vec(vec.T)
return k[0].T, k[1]
else:
raise ValueError("'%s' is not a supported axis" % axis)
return vec, log_norm
blas_nrm2 = blas('nrm2', np.array([], dtype=float))
blas_scal = blas('scal', np.array([], dtype=float))
def unitvec(vec, norm='l2', return_norm=False):
"""Scale a vector to unit length.
Parameters
----------
vec : {numpy.ndarray, scipy.sparse, list of (int, float)}
Input vector in any format
norm : {'l1', 'l2', 'unique'}, optional
Metric to normalize in.
return_norm : bool, optional
Return the length of vector `vec`, in addition to the normalized vector itself?
Returns
-------
numpy.ndarray, scipy.sparse, list of (int, float)}
Normalized vector in same format as `vec`.
float
Length of `vec` before normalization, if `return_norm` is set.
Notes
-----
Zero-vector will be unchanged.
"""
supported_norms = ('l1', 'l2', 'unique')
if norm not in supported_norms:
raise ValueError("'%s' is not a supported norm. Currently supported norms are %s." % (norm, supported_norms))
if scipy.sparse.issparse(vec):
vec = vec.tocsr()
if norm == 'l1':
veclen = np.sum(np.abs(vec.data))
if norm == 'l2':
veclen = np.sqrt(np.sum(vec.data ** 2))
if norm == 'unique':
veclen = vec.nnz
if veclen > 0.0:
if np.issubdtype(vec.dtype, np.integer):
vec = vec.astype(np.float)
vec /= veclen
if return_norm:
return vec, veclen
else:
return vec
else:
if return_norm:
return vec, 1.0
else:
return vec
if isinstance(vec, np.ndarray):
if norm == 'l1':
veclen = np.sum(np.abs(vec))
if norm == 'l2':
if vec.size == 0:
veclen = 0.0
else:
veclen = blas_nrm2(vec)
if norm == 'unique':
veclen = np.count_nonzero(vec)
if veclen > 0.0:
if np.issubdtype(vec.dtype, np.integer):
vec = vec.astype(np.float)
if return_norm:
return blas_scal(1.0 / veclen, vec).astype(vec.dtype), veclen
else:
return blas_scal(1.0 / veclen, vec).astype(vec.dtype)
else:
if return_norm:
return vec, 1.0
else:
return vec
try:
first = next(iter(vec)) # is there at least one element?
except StopIteration:
if return_norm:
return vec, 1.0
else:
return vec
if isinstance(first, (tuple, list)) and len(first) == 2: # gensim sparse format
if norm == 'l1':
length = float(sum(abs(val) for _, val in vec))
if norm == 'l2':
length = 1.0 * math.sqrt(sum(val ** 2 for _, val in vec))
if norm == 'unique':
length = 1.0 * len(vec)
assert length > 0.0, "sparse documents must not contain any explicit zero entries"
if return_norm:
return ret_normalized_vec(vec, length), length
else:
return ret_normalized_vec(vec, length)
else:
raise ValueError("unknown input type")
def cossim(vec1, vec2):
"""Get cosine similarity between two sparse vectors.
Cosine similarity is a number between `<-1.0, 1.0>`, higher means more similar.
Parameters
----------
vec1 : list of (int, float)
Vector in BoW format.
vec2 : list of (int, float)
Vector in BoW format.
Returns
-------
float
Cosine similarity between `vec1` and `vec2`.
"""
vec1, vec2 = dict(vec1), dict(vec2)
if not vec1 or not vec2:
return 0.0
vec1len = 1.0 * math.sqrt(sum(val * val for val in itervalues(vec1)))
vec2len = 1.0 * math.sqrt(sum(val * val for val in itervalues(vec2)))
assert vec1len > 0.0 and vec2len > 0.0, "sparse documents must not contain any explicit zero entries"
if len(vec2) < len(vec1):
vec1, vec2 = vec2, vec1 # swap references so that we iterate over the shorter vector
result = sum(value * vec2.get(index, 0.0) for index, value in iteritems(vec1))
result /= vec1len * vec2len # rescale by vector lengths
return result
@deprecated(
"Function will be removed in 4.0.0, use "
"gensim.similarities.termsim.SparseTermSimilarityMatrix.inner_product instead")
def softcossim(vec1, vec2, similarity_matrix):
"""Get Soft Cosine Measure between two vectors given a term similarity matrix.
Return Soft Cosine Measure between two sparse vectors given a sparse term similarity matrix
in the :class:`scipy.sparse.csc_matrix` format. The similarity is a number between `<-1.0, 1.0>`,
higher is more similar.
Notes
-----
Soft Cosine Measure was perhaps first defined by `Grigori Sidorov et al.,
"Soft Similarity and Soft Cosine Measure: Similarity of Features in Vector Space Model"
<http://www.cys.cic.ipn.mx/ojs/index.php/CyS/article/view/2043/1921>`_.
Parameters
----------
vec1 : list of (int, float)
A query vector in the BoW format.
vec2 : list of (int, float)
A document vector in the BoW format.
similarity_matrix : {:class:`scipy.sparse.csc_matrix`, :class:`scipy.sparse.csr_matrix`}
A term similarity matrix. If the matrix is :class:`scipy.sparse.csr_matrix`, it is going
to be transposed. If you rely on the fact that there is at most a constant number of
non-zero elements in a single column, it is your responsibility to ensure that the matrix
is symmetric.
Returns
-------
`similarity_matrix.dtype`
The Soft Cosine Measure between `vec1` and `vec2`.
Raises
------
ValueError
When the term similarity matrix is in an unknown format.
See Also
--------
:meth:`gensim.models.keyedvectors.WordEmbeddingsKeyedVectors.similarity_matrix`
A term similarity matrix produced from term embeddings.
:class:`gensim.similarities.docsim.SoftCosineSimilarity`
A class for performing corpus-based similarity queries with Soft Cosine Measure.
"""
if not isinstance(similarity_matrix, scipy.sparse.csc_matrix):
if isinstance(similarity_matrix, scipy.sparse.csr_matrix):
similarity_matrix = similarity_matrix.T
else:
raise ValueError('unknown similarity matrix format')
if not vec1 or not vec2:
return 0.0
vec1 = dict(vec1)
vec2 = dict(vec2)
word_indices = sorted(set(chain(vec1, vec2)))
dtype = similarity_matrix.dtype
vec1 = np.fromiter((vec1[i] if i in vec1 else 0 for i in word_indices), dtype=dtype, count=len(word_indices))
vec2 = np.fromiter((vec2[i] if i in vec2 else 0 for i in word_indices), dtype=dtype, count=len(word_indices))
dense_matrix = similarity_matrix[[[i] for i in word_indices], word_indices].todense()
vec1len = vec1.T.dot(dense_matrix).dot(vec1)[0, 0]
vec2len = vec2.T.dot(dense_matrix).dot(vec2)[0, 0]
assert \
vec1len > 0.0 and vec2len > 0.0, \
u"sparse documents must not contain any explicit zero entries and the similarity matrix S " \
u"must satisfy x^T * S * x > 0 for any nonzero bag-of-words vector x."
result = vec1.T.dot(dense_matrix).dot(vec2)[0, 0]
result /= math.sqrt(vec1len) * math.sqrt(vec2len) # rescale by vector lengths
return np.clip(result, -1.0, 1.0)
def isbow(vec):
"""Checks if a vector is in the sparse Gensim bag-of-words format.
Parameters
----------
vec : object
Object to check.
Returns
-------
bool
Is `vec` in BoW format.
"""
if scipy.sparse.issparse(vec):
vec = vec.todense().tolist()
try:
id_, val_ = vec[0] # checking first value to see if it is in bag of words format by unpacking
int(id_), float(val_)
except IndexError:
return True # this is to handle the empty input case
except (ValueError, TypeError):
return False
return True
def _convert_vec(vec1, vec2, num_features=None):
if scipy.sparse.issparse(vec1):
vec1 = vec1.toarray()
if scipy.sparse.issparse(vec2):
vec2 = vec2.toarray() # converted both the vectors to dense in case they were in sparse matrix
if isbow(vec1) and isbow(vec2): # if they are in bag of words format we make it dense
if num_features is not None: # if not None, make as large as the documents drawing from
dense1 = sparse2full(vec1, num_features)
dense2 = sparse2full(vec2, num_features)
return dense1, dense2
else:
max_len = max(len(vec1), len(vec2))
dense1 = sparse2full(vec1, max_len)
dense2 = sparse2full(vec2, max_len)
return dense1, dense2
else:
# this conversion is made because if it is not in bow format, it might be a list within a list after conversion
# the scipy implementation of Kullback fails in such a case so we pick up only the nested list.
if len(vec1) == 1:
vec1 = vec1[0]
if len(vec2) == 1:
vec2 = vec2[0]
return vec1, vec2
def kullback_leibler(vec1, vec2, num_features=None):
"""Calculate Kullback-Leibler distance between two probability distributions using `scipy.stats.entropy`.
Parameters
----------
vec1 : {scipy.sparse, numpy.ndarray, list of (int, float)}
Distribution vector.
vec2 : {scipy.sparse, numpy.ndarray, list of (int, float)}
Distribution vector.
num_features : int, optional
Number of features in the vectors.
Returns
-------
float
Kullback-Leibler distance between `vec1` and `vec2`.
Value in range [0, +∞) where values closer to 0 mean less distance (higher similarity).
"""
vec1, vec2 = _convert_vec(vec1, vec2, num_features=num_features)
return entropy(vec1, vec2)
def jensen_shannon(vec1, vec2, num_features=None):
"""Calculate Jensen-Shannon distance between two probability distributions using `scipy.stats.entropy`.
Parameters
----------
vec1 : {scipy.sparse, numpy.ndarray, list of (int, float)}
Distribution vector.
vec2 : {scipy.sparse, numpy.ndarray, list of (int, float)}
Distribution vector.
num_features : int, optional
Number of features in the vectors.
Returns
-------
float
Jensen-Shannon distance between `vec1` and `vec2`.
Notes
-----
This is a symmetric and finite "version" of :func:`gensim.matutils.kullback_leibler`.
"""
vec1, vec2 = _convert_vec(vec1, vec2, num_features=num_features)
avg_vec = 0.5 * (vec1 + vec2)
return 0.5 * (entropy(vec1, avg_vec) + entropy(vec2, avg_vec))
def hellinger(vec1, vec2):
"""Calculate Hellinger distance between two probability distributions.
Parameters
----------
vec1 : {scipy.sparse, numpy.ndarray, list of (int, float)}
Distribution vector.
vec2 : {scipy.sparse, numpy.ndarray, list of (int, float)}
Distribution vector.
Returns
-------
float
Hellinger distance between `vec1` and `vec2`.
Value in range `[0, 1]`, where 0 is min distance (max similarity) and 1 is max distance (min similarity).
"""
if scipy.sparse.issparse(vec1):
vec1 = vec1.toarray()
if scipy.sparse.issparse(vec2):
vec2 = vec2.toarray()
if isbow(vec1) and isbow(vec2):
# if it is a BoW format, instead of converting to dense we use dictionaries to calculate appropriate distance
vec1, vec2 = dict(vec1), dict(vec2)
indices = set(list(vec1.keys()) + list(vec2.keys()))
sim = np.sqrt(
0.5 * sum((np.sqrt(vec1.get(index, 0.0)) - np.sqrt(vec2.get(index, 0.0)))**2 for index in indices)
)
return sim
else:
sim = np.sqrt(0.5 * ((np.sqrt(vec1) - np.sqrt(vec2))**2).sum())
return sim
def jaccard(vec1, vec2):
"""Calculate Jaccard distance between two vectors.
Parameters
----------
vec1 : {scipy.sparse, numpy.ndarray, list of (int, float)}
Distribution vector.
vec2 : {scipy.sparse, numpy.ndarray, list of (int, float)}
Distribution vector.
Returns
-------
float
Jaccard distance between `vec1` and `vec2`.
Value in range `[0, 1]`, where 0 is min distance (max similarity) and 1 is max distance (min similarity).
"""
# converting from sparse for easier manipulation
if scipy.sparse.issparse(vec1):
vec1 = vec1.toarray()
if scipy.sparse.issparse(vec2):
vec2 = vec2.toarray()
if isbow(vec1) and isbow(vec2):
# if it's in bow format, we use the following definitions:
# union = sum of the 'weights' of both the bags
# intersection = lowest weight for a particular id; basically the number of common words or items
union = sum(weight for id_, weight in vec1) + sum(weight for id_, weight in vec2)
vec1, vec2 = dict(vec1), dict(vec2)
intersection = 0.0
for feature_id, feature_weight in iteritems(vec1):
intersection += min(feature_weight, vec2.get(feature_id, 0.0))
return 1 - float(intersection) / float(union)
else:
# if it isn't in bag of words format, we can use sets to calculate intersection and union
if isinstance(vec1, np.ndarray):
vec1 = vec1.tolist()
if isinstance(vec2, np.ndarray):
vec2 = vec2.tolist()
vec1 = set(vec1)
vec2 = set(vec2)
intersection = vec1 & vec2
union = vec1 | vec2
return 1 - float(len(intersection)) / float(len(union))
def jaccard_distance(set1, set2):
"""Calculate Jaccard distance between two sets.
Parameters
----------
set1 : set
Input set.
set2 : set
Input set.
Returns
-------
float
Jaccard distance between `set1` and `set2`.
Value in range `[0, 1]`, where 0 is min distance (max similarity) and 1 is max distance (min similarity).
"""
union_cardinality = len(set1 | set2)
if union_cardinality == 0: # Both sets are empty
return 1.
return 1. - float(len(set1 & set2)) / float(union_cardinality)
try:
# try to load fast, cythonized code if possible
from gensim._matutils import logsumexp, mean_absolute_difference, dirichlet_expectation
except ImportError:
def logsumexp(x):
"""Log of sum of exponentials.
Parameters
----------
x : numpy.ndarray
Input 2d matrix.
Returns
-------
float
log of sum of exponentials of elements in `x`.
Warnings
--------
For performance reasons, doesn't support NaNs or 1d, 3d, etc arrays like :func:`scipy.special.logsumexp`.
"""
x_max = np.max(x)
x = np.log(np.sum(np.exp(x - x_max)))
x += x_max
return x
def mean_absolute_difference(a, b):
"""Mean absolute difference between two arrays.
Parameters
----------
a : numpy.ndarray
Input 1d array.
b : numpy.ndarray
Input 1d array.
Returns
-------
float
mean(abs(a - b)).
"""
return np.mean(np.abs(a - b))
def dirichlet_expectation(alpha):
"""Expected value of log(theta) where theta is drawn from a Dirichlet distribution.
Parameters
----------
alpha : numpy.ndarray
Dirichlet parameter 2d matrix or 1d vector, if 2d - each row is treated as a separate parameter vector.
Returns
-------
numpy.ndarray
Log of expected values, dimension same as `alpha.ndim`.
"""
if len(alpha.shape) == 1:
result = psi(alpha) - psi(np.sum(alpha))
else:
result = psi(alpha) - psi(np.sum(alpha, 1))[:, np.newaxis]
return result.astype(alpha.dtype, copy=False) # keep the same precision as input
def qr_destroy(la):
"""Get QR decomposition of `la[0]`.
Parameters
----------
la : list of numpy.ndarray
Run QR decomposition on the first elements of `la`. Must not be empty.
Returns
-------
(numpy.ndarray, numpy.ndarray)
Matrices :math:`Q` and :math:`R`.
Notes
-----
Using this function is less memory intense than calling `scipy.linalg.qr(la[0])`,
because the memory used in `la[0]` is reclaimed earlier. This makes a difference when
decomposing very large arrays, where every memory copy counts.
Warnings
--------
Content of `la` as well as `la[0]` gets destroyed in the process. Again, for memory-effiency reasons.
"""
a = np.asfortranarray(la[0])
del la[0], la # now `a` is the only reference to the input matrix
m, n = a.shape
# perform q, r = QR(a); code hacked out of scipy.linalg.qr
logger.debug("computing QR of %s dense matrix", str(a.shape))
geqrf, = get_lapack_funcs(('geqrf',), (a,))
qr, tau, work, info = geqrf(a, lwork=-1, overwrite_a=True)
qr, tau, work, info = geqrf(a, lwork=work[0], overwrite_a=True)
del a # free up mem
assert info >= 0
r = triu(qr[:n, :n])
if m < n: # rare case, #features < #topics
qr = qr[:, :m] # retains fortran order
gorgqr, = get_lapack_funcs(('orgqr',), (qr,))
q, work, info = gorgqr(qr, tau, lwork=-1, overwrite_a=True)
q, work, info = gorgqr(qr, tau, lwork=work[0], overwrite_a=True)
assert info >= 0, "qr failed"
assert q.flags.f_contiguous
return q, r
class MmWriter(object):
"""Store a corpus in `Matrix Market format <https://math.nist.gov/MatrixMarket/formats.html>`_,
using :class:`~gensim.corpora.mmcorpus.MmCorpus`.
Notes
-----
The output is written one document at a time, not the whole matrix at once (unlike e.g. `scipy.io.mmread`).
This allows you to write corpora which are larger than the available RAM.
The output file is created in a single pass through the input corpus, so that the input can be
a once-only stream (generator).
To achieve this, a fake MM header is written first, corpus statistics are collected
during the pass (shape of the matrix, number of non-zeroes), followed by a seek back to the beginning of the file,
rewriting the fake header with the final values.
"""
HEADER_LINE = b'%%MatrixMarket matrix coordinate real general\n' # the only supported MM format
def __init__(self, fname):
"""
Parameters
----------
fname : str
Path to output file.
"""
self.fname = fname
if fname.endswith(".gz") or fname.endswith('.bz2'):
raise NotImplementedError("compressed output not supported with MmWriter")
self.fout = utils.open(self.fname, 'wb+') # open for both reading and writing
self.headers_written = False
def write_headers(self, num_docs, num_terms, num_nnz):
"""Write headers to file.
Parameters
----------
num_docs : int
Number of documents in corpus.
num_terms : int
Number of term in corpus.
num_nnz : int
Number of non-zero elements in corpus.
"""
self.fout.write(MmWriter.HEADER_LINE)
if num_nnz < 0:
# we don't know the matrix shape/density yet, so only log a general line
logger.info("saving sparse matrix to %s", self.fname)
self.fout.write(utils.to_utf8(' ' * 50 + '\n')) # 48 digits must be enough for everybody
else:
logger.info(
"saving sparse %sx%s matrix with %i non-zero entries to %s",
num_docs, num_terms, num_nnz, self.fname
)
self.fout.write(utils.to_utf8('%s %s %s\n' % (num_docs, num_terms, num_nnz)))
self.last_docno = -1
self.headers_written = True
def fake_headers(self, num_docs, num_terms, num_nnz):
"""Write "fake" headers to file, to be rewritten once we've scanned the entire corpus.
Parameters
----------
num_docs : int
Number of documents in corpus.
num_terms : int
Number of term in corpus.
num_nnz : int
Number of non-zero elements in corpus.
"""
stats = '%i %i %i' % (num_docs, num_terms, num_nnz)
if len(stats) > 50:
raise ValueError('Invalid stats: matrix too large!')
self.fout.seek(len(MmWriter.HEADER_LINE))
self.fout.write(utils.to_utf8(stats))
def write_vector(self, docno, vector):
"""Write a single sparse vector to the file.
Parameters
----------
docno : int
Number of document.
vector : list of (int, number)
Document in BoW format.
Returns
-------
(int, int)
Max word index in vector and len of vector. If vector is empty, return (-1, 0).
"""
assert self.headers_written, "must write Matrix Market file headers before writing data!"
assert self.last_docno < docno, "documents %i and %i not in sequential order!" % (self.last_docno, docno)
vector = sorted((i, w) for i, w in vector if abs(w) > 1e-12) # ignore near-zero entries
for termid, weight in vector: # write term ids in sorted order
# +1 because MM format starts counting from 1
self.fout.write(utils.to_utf8("%i %i %s\n" % (docno + 1, termid + 1, weight)))
self.last_docno = docno
return (vector[-1][0], len(vector)) if vector else (-1, 0)
@staticmethod
def write_corpus(fname, corpus, progress_cnt=1000, index=False, num_terms=None, metadata=False):
"""Save the corpus to disk in `Matrix Market format <https://math.nist.gov/MatrixMarket/formats.html>`_.
Parameters
----------
fname : str
Filename of the resulting file.
corpus : iterable of list of (int, number)
Corpus in streamed bag-of-words format.
progress_cnt : int, optional
Print progress for every `progress_cnt` number of documents.
index : bool, optional
Return offsets?
num_terms : int, optional
Number of terms in the corpus. If provided, the `corpus.num_terms` attribute (if any) will be ignored.
metadata : bool, optional
Generate a metadata file?
Returns
-------
offsets : {list of int, None}
List of offsets (if index=True) or nothing.
Notes
-----
Documents are processed one at a time, so the whole corpus is allowed to be larger than the available RAM.
See Also
--------
:func:`gensim.corpora.mmcorpus.MmCorpus.save_corpus`
Save corpus to disk.
"""
mw = MmWriter(fname)
# write empty headers to the file (with enough space to be overwritten later)
mw.write_headers(-1, -1, -1) # will print 50 spaces followed by newline on the stats line
# calculate necessary header info (nnz elements, num terms, num docs) while writing out vectors
_num_terms, num_nnz = 0, 0
docno, poslast = -1, -1
offsets = []
if hasattr(corpus, 'metadata'):
orig_metadata = corpus.metadata
corpus.metadata = metadata
if metadata:
docno2metadata = {}
else:
metadata = False
for docno, doc in enumerate(corpus):
if metadata:
bow, data = doc
docno2metadata[docno] = data
else:
bow = doc
if docno % progress_cnt == 0:
logger.info("PROGRESS: saving document #%i", docno)
if index:
posnow = mw.fout.tell()
if posnow == poslast:
offsets[-1] = -1
offsets.append(posnow)
poslast = posnow
max_id, veclen = mw.write_vector(docno, bow)
_num_terms = max(_num_terms, 1 + max_id)
num_nnz += veclen
if metadata:
utils.pickle(docno2metadata, fname + '.metadata.cpickle')
corpus.metadata = orig_metadata
num_docs = docno + 1
num_terms = num_terms or _num_terms
if num_docs * num_terms != 0:
logger.info(
"saved %ix%i matrix, density=%.3f%% (%i/%i)",
num_docs, num_terms, 100.0 * num_nnz / (num_docs * num_terms), num_nnz, num_docs * num_terms
)
# now write proper headers, by seeking and overwriting the spaces written earlier
mw.fake_headers(num_docs, num_terms, num_nnz)
mw.close()
if index:
return offsets
def __del__(self):
"""Close `self.fout` file. Alias for :meth:`~gensim.matutils.MmWriter.close`.
Warnings
--------
Closing the file explicitly via the close() method is preferred and safer.
"""
self.close() # does nothing if called twice (on an already closed file), so no worries
def close(self):
"""Close `self.fout` file."""
logger.debug("closing %s", self.fname)
if hasattr(self, 'fout'):
self.fout.close()
try:
# try to load fast, cythonized code if possible
from gensim.corpora._mmreader import MmReader
except ImportError:
FAST_VERSION = -1
class MmReader(object):
"""Matrix market file reader, used internally in :class:`~gensim.corpora.mmcorpus.MmCorpus`.
Wrap a term-document matrix on disk (in matrix-market format), and present it
as an object which supports iteration over the rows (~documents).
Attributes
----------
num_docs : int
Number of documents in market matrix file.
num_terms : int
Number of terms.
num_nnz : int
Number of non-zero terms.
Notes
-----
Note that the file is read into memory one document at a time, not the whole matrix at once
(unlike e.g. `scipy.io.mmread` and other implementations).
This allows us to process corpora which are larger than the available RAM.
"""
def __init__(self, input, transposed=True):
"""
Parameters
----------
input : {str, file-like object}
Path to the input file in MM format or a file-like object that supports `seek()`
(e.g. smart_open objects).
transposed : bool, optional
Do lines represent `doc_id, term_id, value`, instead of `term_id, doc_id, value`?
"""
logger.info("initializing corpus reader from %s", input)
self.input, self.transposed = input, transposed
with utils.open_file(self.input) as lines:
try:
header = utils.to_unicode(next(lines)).strip()
if not header.lower().startswith('%%matrixmarket matrix coordinate real general'):
raise ValueError(
"File %s not in Matrix Market format with coordinate real general; instead found: \n%s" %
(self.input, header)
)
except StopIteration:
pass
self.num_docs = self.num_terms = self.num_nnz = 0
for lineno, line in enumerate(lines):
line = utils.to_unicode(line)
if not line.startswith('%'):
self.num_docs, self.num_terms, self.num_nnz = (int(x) for x in line.split())
if not self.transposed:
self.num_docs, self.num_terms = self.num_terms, self.num_docs
break
logger.info(
"accepted corpus with %i documents, %i features, %i non-zero entries",
self.num_docs, self.num_terms, self.num_nnz
)
def __len__(self):
"""Get the corpus size: total number of documents."""
return self.num_docs
def __str__(self):
return ("MmCorpus(%i documents, %i features, %i non-zero entries)" %
(self.num_docs, self.num_terms, self.num_nnz))
def skip_headers(self, input_file):
"""Skip file headers that appear before the first document.
Parameters
----------
input_file : iterable of str
Iterable taken from file in MM format.
"""
for line in input_file:
if line.startswith(b'%'):
continue
break
def __iter__(self):
"""Iterate through all documents in the corpus.
Notes
------
Note that the total number of vectors returned is always equal to the number of rows specified
in the header: empty documents are inserted and yielded where appropriate, even if they are not explicitly
stored in the Matrix Market file.
Yields
------
(int, list of (int, number))
Document id and document in sparse bag-of-words format.
"""
with utils.file_or_filename(self.input) as lines:
self.skip_headers(lines)
previd = -1
for line in lines:
docid, termid, val = utils.to_unicode(line).split() # needed for python3
if not self.transposed:
termid, docid = docid, termid
# -1 because matrix market indexes are 1-based => convert to 0-based
docid, termid, val = int(docid) - 1, int(termid) - 1, float(val)
assert previd <= docid, "matrix columns must come in ascending order"
if docid != previd:
# change of document: return the document read so far (its id is prevId)
if previd >= 0:
yield previd, document # noqa:F821
# return implicit (empty) documents between previous id and new id
# too, to keep consistent document numbering and corpus length
for previd in range(previd + 1, docid):
yield previd, []
# from now on start adding fields to a new document, with a new id
previd = docid
document = []
document.append((termid, val,)) # add another field to the current document
# handle the last document, as a special case
if previd >= 0:
yield previd, document
# return empty documents between the last explicit document and the number
# of documents as specified in the header
for previd in range(previd + 1, self.num_docs):
yield previd, []
def docbyoffset(self, offset):
"""Get the document at file offset `offset` (in bytes).
Parameters
----------
offset : int
File offset, in bytes, of the desired document.
Returns
------
list of (int, str)
Document in sparse bag-of-words format.
"""
# empty documents are not stored explicitly in MM format, so the index marks
# them with a special offset, -1.
if offset == -1:
return []
if isinstance(self.input, string_types):
fin, close_fin = utils.open(self.input, 'rb'), True
else:
fin, close_fin = self.input, False
fin.seek(offset) # works for gzip/bz2 input, too
previd, document = -1, []
for line in fin:
docid, termid, val = line.split()
if not self.transposed:
termid, docid = docid, termid
# -1 because matrix market indexes are 1-based => convert to 0-based
docid, termid, val = int(docid) - 1, int(termid) - 1, float(val)
assert previd <= docid, "matrix columns must come in ascending order"
if docid != previd:
if previd >= 0:
break
previd = docid
document.append((termid, val,)) # add another field to the current document
if close_fin:
fin.close()
return document
| true |
eff8eba6cf12ea7bb75d0b97dcdfb06a7af2bfbf | Python | Dainius-P/scrapyd-dash | /scrapyd_dash/operations/versions_delete.py | UTF-8 | 425 | 2.625 | 3 | [
"MIT"
] | permissive | import requests
"""
Delete a project version.
If there are no more versions available for a given project, that project will be deleted too.
"""
def delete_version(server, project, version):
url = "http://{}/delversion.json".format(server)
data = {
"project": project,
"version": version
}
with requests.Session() as session:
try:
r = session.post(url, data=data)
except:
return None
return r.json()
| true |
8f2c621cedb94d9f1b82e5037a520b6e1409b000 | Python | steinstadt/Python_Algorithm | /chapter6/exam_2.py | UTF-8 | 1,009 | 4.03125 | 4 | [] | no_license | # 式の木
class TreeNode():
def __init__(self, left, ope, right):
self.left = left
self.ope = ope
self.right = right
def gentree(p, w):
n = len(w)
p.ope = w[n-1] # 文字の後ろを取り出す
s = w[:n-1]
if not (p.ope=='-' or p.ope=='+' or p.ope=='*' or p.ope=='/'): # 文字が定数であれば
p.left = None
p.right = None
else:
p.right = TreeNode(None, None, None) # 右の部分木を初期化
p.left = TreeNode(None, None, None) # 左の部分木を初期化
p.right, s = gentree(p.right, s)
p.left, s = gentree(p.left, s)
return p, s
def postfix(p):
if not p==None:
postfix(p.left)
postfix(p.right)
print(p.ope, end='')
def main():
root = TreeNode(None,None,None)
expression = "ab*cd+e/-"
root, s = gentree(root, expression) # 木の作成
# 結果の出力
print("postfix = ", end="")
postfix(root)
if __name__=="__main__":
main()
| true |
699b989fec92d4d68781fb792d6d3cc671a850c4 | Python | lkwatson/hack-a-wall | /cam_client/cam_client_sim.py | UTF-8 | 949 | 2.53125 | 3 | [] | no_license | from socketIO_client_nexus import SocketIO, LoggingNamespace
import json
import time
import random
import math
sock_client = SocketIO('localhost', 3000, LoggingNamespace)
i = 0
while True:
if i < 10:
x_1, x_2 = (25*math.sin(i/10.) + 50), (10*math.sin(i/89.) + 20)
y_1, y_2 = (10*math.cos(i/80.) + 10), (5*math.sin(i/70.) + 20)
sock_client.emit('messages',
json.dumps([
{'person_id': 'Billy', 'x': x_1, 'y': y_1},
{'person_id': 'Tommy', 'x': x_2, 'y': y_2}
]))
else:
x_1 = (25*math.sin(i/100.) + 75)
y_1 = (10*math.cos(i/80.) + 8)
sock_client.emit('messages',
json.dumps([
{'person_id': 'Billy', 'x': x_1, 'y': y_1},
]))
i += 1
if i > 40:
i = 0
time.sleep(0.2 + 0.2*random.random())
| true |
9d100e83391cb071b7818126ca4c5f4f902abba9 | Python | devin730/OfficeAutomation | /EditWordFile.py | UTF-8 | 2,206 | 3 | 3 | [] | no_license | #!/usr/bin/python
# coding: utf-8
from docx import Document
from convDoc2Docx import convDocToDocx as cnv
#! there are mainly three parameters in this class:
# @params in_dict is a dictionary,key: origin string,value: replaced string.
# @params origin_word_file: the path of origin docx file (or template word file).
# @params new_word_file: the path of docx file (saved file path).
class WordPro():
def __init__(self, in_dict={"target str": "new str"}, origin_word_file='./old.docx', new_word_file='./new.docx'):
self.replace_DICT = in_dict
if origin_word_file.split(".")[-1] == 'docx':
document = Document(origin_word_file)
document = self.process(document)
document.save(new_word_file)
elif origin_word_file.split(".")[-1] == 'doc':
new_docx_file = cnv(origin_word_file)
document = Document(new_docx_file)
document = self.process(document)
document.save(new_word_file)
else:
print("input file is illegal.")
def process(self, document):
for para in document.paragraphs:
print(para.text)
# tables
for table in document.tables:
for row in range(len(table.rows)):
for col in range(len(table.columns)):
for key, value in self.replace_DICT.items():
if key in table.cell(row, col).text:
print(key+"->"+value)
table.cell(row, col).text = table.cell(row, col).text.replace(key, value)
# paragraphs
for para in document.paragraphs:
for i in range(len(para.runs)):
for key, value in self.replace_DICT.items():
if key in para.runs[i].text:
print(key+"->"+value)
para.runs[i].text = para.runs[i].text.replace(key, value)
for para in document.paragraphs:
print(para.text)
return document
if __name__ == '__main__':
dict1 = {'http': 'Devin'}
WordPro(in_dict=dict1)
#! params origin_word_file and new_word_file is used default.
| true |
e695e78c4a4a2c6c18e53da38bbc5da3b5fd7f69 | Python | Embot2000/IE_playground | /first_model/backend/manager.py | UTF-8 | 672 | 3.546875 | 4 | [] | no_license | from pathlib import Path
import os
def save_to_file(file_name, text):
""" A method to save input to a file.
:param file_name: (str) The file name.
:param text: (str) Input string obtained from GUI.
:return: (str , str) Returns a string message and a string file path.
"""
file = Path(file_name)
if file.is_file():
add_to_file = open(file, "a")
add_to_file.write(f'\n{text}')
add_to_file.close()
return "Appended to file.", os.path.abspath(file)
else:
new_file = open(file, "x")
new_file.write(text)
new_file.close()
return "Created a new file.", os.path.abspath(file)
| true |
fbc29f2d4190c90e7d597cb317a31e473008b196 | Python | AlexKurs/seo-helpers | /url_status_checker.py | UTF-8 | 904 | 3.21875 | 3 | [] | no_license | import requests
from bs4 import BeautifulSoup
import csv
# Wordpress SEO Yast Sitemaps Checker
def checker():
website = 'https://' #Your Wordpress Website
sitemap = website + '/sitemap_index.xml'
# Kategorie parsen
r = requests.get(sitemap)
doc = BeautifulSoup(r.text, 'html.parser')
links = doc.find_all('loc')
date = []
for i in links:
url = i.text
date.append(url)
# URLs aus Kategorien parsen
liste = []
for link in date:
source = link
response = requests.get(source)
doc = BeautifulSoup(response.text, 'html.parser')
urls = doc.find_all('loc')
for x in urls:
url = x.text
liste.append(url)
# Status Code Checker
for url in liste:
r = requests.get(url)
status = r.status_code
print(status, url)
return liste
checker()
print('Fertig')
| true |
19cab517ebdbba5d2027525325be9760efb12cb8 | Python | alanbly/ProjectEuler | /56.py | UTF-8 | 271 | 3.296875 | 3 | [
"MIT"
] | permissive | def digitSum(num) :
value = num
total = 0
while value > 0 :
total += value % 10
value /= 10
return total
maxSum = 0
for i in range(1,100) :
for j in range(1,100) :
value = digitSum(i**j)
if value > maxSum :
maxSum = value
print maxSum
| true |
9ad55b7d3fd3a61ab1188187b226306b4837cf00 | Python | mverrilli/python_snippets | /sql_functions.py | UTF-8 | 6,181 | 3.140625 | 3 | [
"MIT"
] | permissive | import re
def regexp_flags(regexp_modifier=None):
flags = 0
set_unicode = True
for c in regexp_modifier:
if c == 'b':
# Treat strings as binary octets rather than UTF-8 characters.
set_unicode = False
elif c == 'c':
# Forces the match to be case sensitive.
# This is the default, no flag setting needed
pass
elif c == 'i':
# Forces the match to be case insensitive.
flags |= re.IGNORECASE
elif c == 'm':
# Treats the string being matched as multiple lines. With this
# modifier, the start of line (^) and end of line ($) regular
# expression operators match line breaks (\n) within the string.
# Ordinarily, these operators only match the start and end of the
# string.
flags |= re.MULTILINE
elif c == 'n':
# Allows the single character regular expression operator (.) to
# match a newline (\n). Normally, the . operator will match any
# character except a newline.
flags |= re.DOTALL
elif c == 'x':
# Allows you to document your regular expressions. It causes all
# unescaped space characters and comments in the regular expression
# to be ignored. Comments start with a hash character (#) and end
# with a newline. All spaces in the regular expression that you
# want to be matched in strings must be escaped with a backslash
# (\) character.
flags |= re.VERBOSE
if set_unicode:
flags |= re.UNICODE
return flags
def sql_regexp_substr(txt, pattern, position=1, occurrence=1, regexp_modifier='c', captured_subexp=0):
occurrence = 1 if occurrence < 1 else occurrence
flags = regexp_flags(regexp_modifier)
rx = re.compile(pattern, flags)
matches = rx.finditer(txt, position-1)
cnt = 0
for match in matches:
cnt += 1
if occurrence == cnt:
try:
return match.group(captured_subexp)
except IndexError:
return ''
return ''
def sql_regexp_count(txt, pattern, position=1, regexp_modifier='c'):
flags = regexp_flags(regexp_modifier)
rx = re.compile(pattern, flags)
matches = rx.findall(txt, position-1)
return len(matches)
def sql_regexp_like(txt, pattern, regexp_modifier='c'):
flags = regexp_flags(regexp_modifier)
rx = re.compile(pattern, flags)
matches = rx.finditer(txt)
for match in matches:
return True
return False
def sql_regexp_instr( txt, pattern, position=1, occurrence=1, return_position=0, regexp_modifier='c', captured_subexp=0):
occurrence = 1 if occurrence < 1 else occurrence
flags = regexp_flags(regexp_modifier)
rx = re.compile(pattern, flags)
matches = rx.finditer(txt, position-1)
cnt = 0
for match in matches:
cnt += 1
if occurrence == cnt:
try:
if return_position > 0:
return match.end(captured_subexp) + return_position
else:
return match.start(captured_subexp) + 1
except IndexError:
return 0
return 0
def sql_regexp_replace( txt, target, replacement=None, position=1, occurrence=1, regexp_modifier='c'):
occurrence = 1 if occurrence < 1 else occurrence
flags = regexp_flags(regexp_modifier)
rx = re.compile(pattern, flags)
retval = rx.sub(replacement, txt, position-1)
cnt = 0
for match in matches:
cnt += 1
if occurrence == cnt:
try:
return match.group(captured_subexp)
except IndexError:
return ''
return ''
def sql_regexp_replace( txt, pattern, replacement='', position=1, occurrence=0, regexp_modifier='c'):
class ReplWrapper(object):
def __init__(self, replacement, occurrence):
self.count = 0
self.replacement = replacement
self.occurrence = occurrence
def repl(self, match):
self.count += 1
if self.occurrence == 0 or self.occurrence == self.count:
return match.expand(self.replacement)
else:
try:
return match.group(0)
except IndexError:
return match.group(0)
occurrence = 0 if occurrence < 0 else occurrence
flags = regexp_flags(regexp_modifier)
rx = re.compile(pattern, flags)
replw = ReplWrapper(replacement, occurrence)
return txt[0:position-1] + rx.sub(replw.repl, txt[position-1:])
def sql_left(txt, i):
return txt[:i]
def sql_right(txt, i):
return txt[i-1:]
def sql_split_part(txt, delim, field):
try:
return txt.split(delim)[field-1]
except IndexError:
return ''
def sql_substr(txt, pos, extent=None):
if extent is not None:
return txt[pos-1:pos-1+extent]
elif extent == 0:
return ''
else:
return txt[pos-1:]
def sql_instr(txt, subtxt, pos=1, occurrence=1):
cnt = 0
while cnt < occurrence:
idx = txt.find(subtxt, pos - 1)
if idx == -1:
return 0
cnt += 1
pos = idx + 2 # pos is used starting at 1, plus need the next char
return idx + 1
def sql_concat(txt1, txt2):
return '' if txt1 is None else None + '' if txt2 is None else None
def sql_nvl(expr1, expr2):
if expr1 is None or expr1 == '':
return expr2
return expr1
def sql_nvl2(expr1, expr2, expr3):
if expr1 is None or expr1 == '':
return expr3
return expr2
def sql_null_if_zero(expr):
if expr is None or expr == 0:
return ''
def sql_zero_if_null(expr):
if expr is None or expr == '':
return 0
def sql_coalesce(*argv):
for arg in argv:
if arg is not None and arg != '':
return arg
return ''
def sql_decode(expr, *argv):
for test_expr, retval in zip(argv[::2], argv[1::2]):
if expr == test_expr:
return retval
if len(argv) % 2 == 1:
return argv[-1]
return ''
| true |
b2e32b1103441ba725904f1d73c07de2c91ab0f6 | Python | JoshBradshaw/AOC2020 | /day19.py | UTF-8 | 1,845 | 3.078125 | 3 | [] | no_license | import fileinput
import re
from functools import lru_cache
def rules_to_regex(rules):
@lru_cache(maxsize=None)
def to_regex(rule_idx):
current_rule = rules[rule_idx]
if isinstance(current_rule, str):
return current_rule
else:
alternative_rules = []
for rule_group in current_rule:
alternative_rules.append("".join(to_regex(subrule) for subrule in rule_group))
if len(alternative_rules) == 1:
return "".join(alternative_rules)
else:
return "({})".format('|'.join(alternative_rules))
return to_regex(0)
def part1(rules, messages):
rule_regex = re.compile(rules_to_regex(rules))
return sum(1 for message in messages if rule_regex.fullmatch(message))
def part2(rules, messages):
"""
8: 42 | 42 8
11: 42 31 | 42 11 31
:param rules:
:param messages:
:return:
"""
rules[8] = [[42], [42, 42], [42, 42, 42], [42, 42, 42, 42], [42, 42, 42, 42, 42], [42, 42, 42, 42, 42, 42]]
rules[11] = [[42, 31], [42, 42, 31, 31], [42, 42, 42, 31, 31, 31], [42, 42, 42, 42, 31, 31, 31, 31]]
rule_regex = re.compile(rules_to_regex(rules))
return sum(1 for message in messages if rule_regex.fullmatch(message))
rule_lines = []
messages = []
for line in fileinput.input():
if line[0].isdigit():
rule_lines.append(line.strip())
elif not line.strip():
continue
else:
messages.append(line.strip())
rule_lines.sort(key=lambda s: int(s.split(':')[0]))
rules = []
for line in rule_lines:
rule = line.split(':')[1]
if '"' in rule:
rules.append(rule.strip('\n "'))
else:
rules.append([list(map(int, subrule.split())) for subrule in rule.split('|')])
print(part1(rules, messages))
print(part2(rules, messages)) | true |
95fe6a2fd3f0b8e32cabd39896d1535e1252e421 | Python | anthony-symphony/symphony-api-client-python | /sym_api_client_python/loaders.py | UTF-8 | 3,681 | 2.734375 | 3 | [
"MIT"
] | permissive | import logging
import os
from .auth.auth import Auth
from .auth.rsa_auth import SymBotRSAAuth
from .configure.configure import SymConfig
def load_from_env_var(env_var, delimiter=":"):
"""Look for an environment variable with the format:
env_var=[RSA|CERT]:/path/to/config
E.g. SYMPHONY_CONFIG=RSA:resources/config.json
The env_var parameter describes which environment variable to check
config.configure() and auth.authenticate() are called within this method so don't need to
be called again
Returns a tuple of configuration, auth
"""
env_value = os.environ.get(env_var)
if env_value is None:
raise ValueError("Unable to find environment variable at: " + env_var)
split = env_value.split(delimiter)
if len(split) == 1:
raise ValueError(f"Did not find {delimiter} in environment variable: {env_value}")
elif len(split) > 2:
# On windows you can have a colon in the path
if len(split) == 3 and os.name == "nt":
split = [split[0], split[1] + ":" + split[2]]
else:
raise ValueError(f"Found more than one {delimiter} in environment_variable: {env_value}")
if split[0].lower() not in ["rsa", "cert"]:
raise ValueError(f"Didn't recognise f{split[0]}, expected one of: RSA, CERT")
logging.debug(f"Loading config from {split[1]} with authentication mode: {split[0]}")
conf = SymConfig(split[1], split[1])
conf.load_config()
if split[0].lower() == "rsa":
auth = SymBotRSAAuth(conf)
else:
auth = Auth(conf)
auth.authenticate()
return conf, auth
def configure_logging(filename=None, directory=None, log_level=logging.DEBUG, format=None, filemode='a'):
"""Set up the loggers with basic defaults
Set filename and directory to both be None to not save to file.
filename: The filename to give to the logs. If this has paths as well they will be relative
to the directory. Defaults to log.log.
directory: Directory to save the logs in. Defaults to a directory called "logs" adjacent
to the main. Put "." to make this current working directory. This will creat the
folder if it doesn't already exist.
log_level: One of logging.levels
format: Defaults to '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
filemode: How to open the logfile, 'w' rewrites, 'a' appends to existing
"""
file_log = True
if directory is None and filename is None:
file_log = False
if directory is None:
# It's generally bad practice to import in functions, but in this instance
# this import might fail (if it's loaded from a REPL etc) so it's probably
# best to leave it until it's definitely needed.
import __main__
directory = os.path.dirname(__main__.__file__)
elif directory == ".":
directory = sys.curdir
elif filename is None:
filename = "log.log"
if not file_log:
full_path = None
else:
full_path = os.path.join(directory, filename)
# This has to be done again to handle the case where filename was relative
full_dir = os.path.dirname(full_path)
if not os.path.exists(full_dir):
os.makedirs(full_dir, exist_ok=True)
if format is None:
format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(
filename=full_path,
format=format,
filemode=filemode, level=log_level
)
logging.getLogger("urllib3").setLevel(logging.WARNING)
| true |
384b390d3d8ad62cb3e98493a1ccf46a6efdee88 | Python | ricardoleopoldo/crackSenhaUserLinux | /crackSenhaUserLinux.py | UTF-8 | 653 | 3.21875 | 3 | [] | no_license | #!/usr/bin/python
#coding: utf-8
import crypt
import sys
import time
hash = raw_input('Digite o hash completo: ')
salt = raw_input('Digite o salt:')
file = open('wl.txt','r') #carrega a wordlist
senhas = file.read().split('\n')
inicio = time.time()
print "\nTestando as senhas, por favor aguarde ..."
for senha in senhas:
resultado = crypt.crypt(senha,salt)
if (resultado == hash):
print "Senha encontrada: "+senha
final = time.time()
print "Tempo de execução: %.2f" % (final - inicio)+" ms"
sys.exit(0)
file.close()
print "Senha não encontrada"
final = time.time()
print "Tempo de execução: %.2f" % (final - inicio)+" ms"
sys.exit(0)
| true |
f5bd5fbeb3daaacc1dd030b15d56e874436212ce | Python | miroslavvidovic/gcalcli-gui | /gcalcli-gui.py | UTF-8 | 4,392 | 2.65625 | 3 | [
"MIT"
] | permissive | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
import sys
from subprocess import call
from PyQt4.QtCore import pyqtSlot
from PyQt4 import QtGui, QtCore
from PyQt4.QtGui import QFormLayout, QHBoxLayout, QFrame
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from Task import Task
class TaskForm(QtGui.QWidget):
def __init__(self):
super(TaskForm, self).__init__()
self.initUI()
def initUI(self):
self.form = QFormLayout()
self.labelTitle = QtGui.QLabel(self)
self.labelTitle.setText("Title:")
self.textboxTitle = QtGui.QLineEdit(self)
self.textboxTitle.textChanged.connect(self.toggle_ok_button)
self.form.addRow(self.labelTitle, self.textboxTitle)
self.labelDate = QtGui.QLabel(self)
self.labelDate.setText("Date:")
self.cal = QtGui.QCalendarWidget(self)
self.cal.setGridVisible(True)
self.cal.clicked[QtCore.QDate].connect(self.showDate)
self.form.addRow(self.labelDate, self.cal)
self.lbl = QtGui.QLabel(self)
date = self.cal.selectedDate()
self.lbl.setText(date.toString())
self.lbl.setAlignment(Qt.AlignCenter)
self.form.addRow(self.lbl)
self.labelTime= QtGui.QLabel(self)
self.labelTime.setText("Time:")
self.textboxTime = QtGui.QTimeEdit(self)
self.form.addRow(self.labelTime, self.textboxTime)
self.labelLocation= QtGui.QLabel(self)
self.labelLocation.setText("Location:")
self.textboxLocation = QtGui.QLineEdit(self)
self.form.addRow(self.labelLocation, self.textboxLocation)
self.labelDescription = QtGui.QLabel(self)
self.labelDescription.setText("Description:")
self.plainTextDesc = QtGui.QPlainTextEdit(self)
self.form.addRow(self.labelDescription, self.plainTextDesc)
self.hbox = QHBoxLayout()
self.labelDuration = QtGui.QLabel(self)
self.labelDuration.setText("Duration:")
self.textboxDuration = QtGui.QLineEdit(self)
self.textboxDuration.setValidator(QIntValidator(0,100))
self.hbox.addWidget(self.textboxDuration)
self.labelReminder = QtGui.QLabel(self)
self.labelReminder.setText(" Reminder:")
self.textboxReminder = QtGui.QLineEdit(self)
self.textboxReminder.setValidator(QIntValidator(0,100))
self.hbox.addWidget(self.labelReminder)
self.hbox.addWidget(self.textboxReminder)
self.form.addRow(self.labelDuration,self.hbox)
self.separator = QFrame()
self.separator.setFrameShape(QFrame.HLine)
self.separator.setFrameShadow(QFrame.Sunken)
self.separator.setFixedHeight(20)
self.form.addWidget(self.separator)
self.buttons = QHBoxLayout()
self.btnOk = QtGui.QPushButton('Ok', self)
self.btnOk.clicked.connect(self.ok_click)
self.buttons.addWidget(self.btnOk)
self.btnCancel = QtGui.QPushButton('Cancel', self)
self.buttons.addWidget(self.btnCancel)
self.form.addItem(self.buttons)
self.setGeometry(500, 50, 550, 530)
self.setWindowTitle('Google calendar task')
self.setWindowIcon(QtGui.QIcon('calendar.png'))
self.setLayout(self.form)
self.show()
@pyqtSlot()
def ok_click(self):
gmail = "vidovic.miroslav.vm@gmail.com"
title = self.textboxTitle.text()
time = self.textboxTime.text()
location = self.textboxLocation.text()
duration = self.textboxDuration.text()
reminder = self.textboxReminder.text()
date = self.lbl.text()
description = self.plainTextDesc.toPlainText()
print(title, location, date, time, duration, description, reminder)
task = Task(gmail, title, location, date, time, duration, description,
reminder)
task.add_to_calendar()
sys.exit()
def showDate(self, date):
self.lbl.setText(date.toString("M/dd/yyyy"))
# TODO: Fix the issue where the ok button is enabled on start
def toggle_ok_button(self):
text = self.textboxTitle.text()
if text:
self.btnOk.setEnabled(True)
else:
self.btnOk.setEnabled(False)
def main():
app = QtGui.QApplication(sys.argv)
ex = TaskForm()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| true |
666d472013c59cd6ad7452b5826994e9db2d0622 | Python | thacdtd/AAI | /IRM/irm.py | UTF-8 | 6,150 | 3.390625 | 3 | [] | no_license | """Chinese Restaurant Process Infinite Mixture Model
This implementation assumes that the likelihood function and the prior distribution
over the parameters are conjugate pairs.
"""
import random
import numpy
import scipy.stats as stats
import scipy.cluster.vq as vq
class CRPClusterModel(object):
"""Chinese Restaurant Process Infinite Mixture Model
Non-parametric Bayesian clustering with Chinese Restaurant Process prior
The parameters for Gibbs sampling can be specified:
num_iter : number of iterations to run. One iteration cycles through every data point once.
eb_start : The trial where Empirical Bayes alpha adjustment begins
eb_interval : The interval (number of trials) at which we adjust alpha
"""
def __init__(self, alpha, likelihood_fn):
"""Initialize with the concentration hyperparameter alpha and likelihood function
The likelihood function must be have this form
def likelihood_fn(data, i, clustering, cluster_assn):
Returns a vector x of length len(clustering) + 1
x[j] = P(data[i] | the cluster assignment so far AND data[i] assign to cluster j)
where
clustering - a list of clusters. Each cluster is a list of indices in the data
cluster assignment - a list of cluster number (assignment)
Examples
Cluster 0 contains data from [1, 2, 5]
Cluster 1 contains data from [0, 3, 4]
Then clustering == [ [1,2,5], [0,3,4] ]
AND cluster_assn = [1, 0, 0, 1, 1, 0]
Note that the two formats are redundant.
"""
self.alpha = alpha
self.likelihood_fn = likelihood_fn
# gibbs sampling parameters
self.num_iter = 100
self.eb_start = 20
self.eb_interval = 5
def cluster(self, data):
"""Cluster the data based on CRP prior infinite mixture model
Args
data must be a list of data points. Each data point can be any form.
but self.likelihood_fn should be implemented accordingly.
Returns
clustering - a list of clusters. Each cluster is a list of indices in the data
cluster assignment - a list of cluster number (assignment)
"""
return self._gibbs_sampling_crp(data)
def _initialize_assn(self, data):
"""Initial cluster assignment before Gibbs sampling Process
"""
clustering = []
cluster_assn = []
for i in range(len(data)):
crp_prior = [(len(x) + 0.0) / (i + self.alpha) for j, x in enumerate(clustering)]
crp_prior.append(self.alpha / (i + self.alpha))
crp_prior = numpy.array(crp_prior)
likelihood = self.likelihood_fn(data, i, clustering, cluster_assn)
probs = crp_prior * likelihood
cluster = sample_with_weights(probs)
if cluster == len(clustering):
s = set([i])
clustering.append(s)
else:
clustering[cluster].add(i)
cluster_assn.append(clustering[cluster])
return clustering, cluster_assn
def _gibbs_sampling_crp(self, data):
"""Run Gibbs sampling to get the cluster assignment """
num_data = len(data)
clustering, cluster_assn = self._initialize_assn(data)
for t in range(self.num_iter):
num_new_clusters = 0.0
for i in range(num_data):
cluster_assn[i].remove(i)
if len(cluster_assn[i]) == 0:
clustering.remove(cluster_assn[i])
crp_prior = [(len(x) + 0.0) / (num_data - 1 + self.alpha) for j, x in enumerate(clustering)]
crp_prior.append(self.alpha / (num_data - 1 + self.alpha))
crp_prior = numpy.array(crp_prior)
likelihood = self.likelihood_fn(data, i, clustering, cluster_assn)
probs = crp_prior * likelihood
cluster = sample_with_weights(probs)
if cluster == len(clustering):
s = set([i])
clustering.append(s)
num_new_clusters += 1
else:
clustering[cluster].add(i)
cluster_assn[i] = clustering[cluster]
# Empirical Bayes for adjusting hyperparameters
if t % self.eb_interval == 0 and t > self.eb_start:
self.alpha = num_new_clusters
return clustering, cluster_assn
def sample_with_weights(weights, sum_weights=None):
"""Sample from a multinomial distribution
Args:
weights - a numpy array of positive numbers of associated weights for each index
sum_weights - the sum of the above list. if we have call this function many times
on the same weight, providing the sum will save a lot of computation time
Returns:
the index that gets chosen.
-1 if a weight is invalid
"""
if sum_weights is None:
sum_weights = numpy.sum(weights)
p = random.uniform(0, sum_weights)
sum_roulette = 0
for i, weight in enumerate(weights):
if weight < 0:
return -1
sum_roulette = sum_roulette + weight
if (p < sum_roulette):
return i
return -1
def example_likelihood_fn(data, i, clustering, cluster_assn):
"""Example of likelihood function """
means = [numpy.mean(data[list(cluster)]) for cluster in clustering]
means.append(0)
stds = [1 for cluster in clustering]
stds.append(10)
return stats.norm.pdf(data[i], means, stds)
if __name__ == '__main__':
true_means = [5, 10, 12]
data = numpy.concatenate(
(stats.norm.rvs(5, 1, size=200), stats.norm.rvs(10, 1, size=200), stats.norm.rvs(12, 1, size=200)))
random.shuffle(data)
crp_model = CRPClusterModel(1.0, example_likelihood_fn)
clustering, cluster_assn = crp_model._gibbs_sampling_crp(data)
means = [numpy.mean(data[list(cluster)]) for cluster in clustering]
print 'True means are %s.\nCluster means are %s.' % (true_means, means)
print clustering
print cluster_assn | true |
acd2b2538dda1b3c5bd5fd0ae8151c7047862299 | Python | prensing/fll_scheduling | /schedulingModel.py | UTF-8 | 36,419 | 2.84375 | 3 | [] | no_license | #!/usr/bin/python3
import sys
import datetime
import math
import re
import csv
import copy
import functools
from dateutil.parser import parse as dateparse
from random import shuffle, seed
# from pprint import pprint
@functools.total_ordering
class EventTime(object):
# share startT across all instances
eventStartTime = None
eventEndTime = None # integer
def __init__(self, tm):
if isinstance(tm, str):
dt = dateparse(tm)
self._minutes = int((dt - self.eventStartTime).total_seconds() // 60)
else:
self._minutes = tm
if self.eventEndTime is not None:
self._minutes = min(self.eventEndTime, self._minutes)
return
def __int__(self):
return self._minutes
def __hash__(self):
return hash(self._minutes)
def __incr__(self, min_incr):
if type(min) != int:
raise TypeError
val = self._minutes + min_incr
if self.eventEndTime is not None:
val = min(self.eventEndTime, val)
self._minutes = val
return self
def __add__(self, min_incr):
if type(min_incr) != int:
raise TypeError
val = self._minutes + min_incr
if self.eventEndTime is not None:
val = min(self.eventEndTime, val)
return EventTime(val)
def __sub__(self, other):
if type(other) != type(self):
raise TypeError
return self._minutes - other._minutes
def __eq__(self, other):
if type(other) != type(self):
raise TypeError("Unsupported type %s" % type(other))
return self._minutes == other._minutes
def __lt__(self, other):
if type(other) != type(self):
raise TypeError
return self._minutes < other._minutes
def __str__(self):
return (self.eventStartTime + datetime.timedelta(minutes=self._minutes)).strftime('%H:%M')
def __repr__(self):
return "EventTime('%s')" % str(self)
class Team(object):
def __init__(self, index, teamNumber, name):
self.index = index
self.teamNumber = teamNumber
self.name = name
self.schedule = []
return
def designation(self):
if self.teamNumber > 0:
return self.teamNumber
return self.name
def addEvent(self, evt, slot):
self.schedule.append([evt, slot]) # use a list so we can swap slots later
return
def __lt__(self, other):
if type(other) != type(self):
raise TypeError
return self.teamNumber < other.teamNumber
def __str__(self):
return str(self.teamNumber)
def travelTime(self):
minTravel = 10000
prevET = None
for evt, slot in sorted(self.schedule, key=lambda e: e[0].startTime()):
if prevET is not None:
dt = evt.startTime() - prevET
minTravel = min(minTravel, dt)
prevET = evt.endTime()
return minTravel
def inventoryTables(self):
'''Inventory tables the team uses'''
tables = {}
for evt, slot in self.schedule:
if isinstance(evt, Match):
tbl = evt.tableName(slot)
tables.setdefault(tbl, []).append(evt)
return tables
def countDuplicateTables(self):
'''Count the number of duplicate tables the team uses'''
return sum([len(t) - 1 for t in self.inventoryTables().values()])
def minimizeDuplicateTables(self):
tbls = self.inventoryTables()
matchsets = sorted(tbls.values(), key=lambda t: len(t), reverse=True)
swapped = False
for mlist in matchsets:
if len(mlist) == 1:
break
for match in mlist:
if match.trySwapTeams():
swapped = True
break
return swapped
def outputSchedule(self, outCSV):
outCSV.writerow((self.teamNumber, self.name))
outCSV.writerow(('Event', 'Room/Table', 'StartTime', 'EndTime'))
for evt, slot in sorted(self.schedule):
evt.outputTeamSchedule(slot, outCSV)
return
@functools.total_ordering
class TimeSlot(object):
travelTime = 0
timeBlockBoundaries = []
timeBlock2Index = {}
def __init__(self, index, startT, endT):
self.index = index
self._startT = copy.copy(startT)
self._endT = copy.copy(endT)
self.extendEnd = 0
return
@classmethod
def setTimeBlocks(cls, alltimes):
cls.timeBlockBoundaries = list(sorted(alltimes))
cls.timeBlock2Index = {t: i for i, t in enumerate(cls.timeBlockBoundaries)}
return
@classmethod
def numTimeBlocks(cls):
# 1 less because we are counting regions. timeBlockBoundaries holds the edges
return len(cls.timeBlock2Index) - 1
def startTime(self):
return self._startT
def endTime(self, padded=False):
if padded:
return self._endT + (self.travelTime + self.extendEnd)
return self._endT
def timeBlockRange(self, padded=False):
return range(self.timeBlock2Index[self._startT], self.timeBlock2Index[self.endTime(padded)])
def __eq__(self, other):
if not isinstance(other, TimeSlot):
raise TypeError("Unsupported type %s" % type(other))
return self._startT == other._startT
def __lt__(self, other):
if not isinstance(other, TimeSlot):
raise TypeError
return self._startT < other._startT
class JudgeEvent(object):
def __init__(self, index, name):
self.index = index
self.name = name
self.rooms = []
self.sessions = []
self.subSchedule = None # for block scheduling
self.blockEvents = {}
return
def findSession(self, sessIndex):
s = self.sessions[sessIndex - 1]
assert(s.index == sessIndex)
return s
def buildBlockSubschedule(self, config):
nSessions = len(config['events'])
sessLen = config['sessionLen']
deltaT = sessLen + config['sessionBreak']
times = []
sT = 0
evtRoom = []
for se in config['events']:
eT = sT + sessLen
times.append((sT, eT))
self.blockEvents[se['name']] = se['rooms']
nRooms1 = len(se['rooms'])
for r in se['rooms']:
evtRoom.append((se['name'], r))
sT += deltaT
step = 0
results = {}
for rmIndex, rm in enumerate(self.rooms):
reslist = []
used = set()
seRmIndex = rmIndex % nRooms1
for seLoop in range(nSessions):
trials = 0
while step > 0 and seRmIndex in used:
seRmIndex = (seRmIndex + 1) % nRooms1
trials += 1
if trials >= nRooms1:
print("Error: no available room")
sys.exit(10)
seIndex = (seLoop + step) % nSessions
reslist.append({'session': seLoop,
'event': config['events'][seIndex]['name'],
'room': config['events'][seIndex]['rooms'][seRmIndex],
'startTM': times[seLoop][0],
'endTM': times[seLoop][1]})
used.add(seRmIndex)
seRmIndex = (seRmIndex + step) % nRooms1
# results[rm] = sorted(reslist, key=lambda x: x['session'])
results[rm] = reslist
if (rmIndex + 1) % nRooms1 == 0:
step += 1
self.subSchedule = results
return
def outputSchedule(self, outstrm):
allSessions = {}
for sess in self.sessions:
sess.judgeScheduleEntries(allSessions)
for evtName, sessions in sorted(allSessions.items()):
outstrm.write(evtName + '\r\n') # CSV files use DOS eol
fields = ['Session', 'StartTime', 'EndTime', ]
if self.subSchedule is None:
fields.extend(self.rooms)
else:
fields.extend(self.blockEvents[evtName])
outCSV = csv.DictWriter(outstrm, fieldnames=fields)
outCSV.writeheader()
index = 1
for sess in sorted(sessions.values(), key=lambda r: r['StartTime']):
sess['Session'] = index
outCSV.writerow(sess)
index += 1
outstrm.write('\r\n')
return
class JudgeSession(TimeSlot):
def __init__(self, event, index, startT, endT, penalty=0):
TimeSlot.__init__(self, index, startT, endT)
self.event = event
self.penalty = penalty
self.teams = len(self.event.rooms) * [None, ]
return
def assignTeam(self, team):
# No need for shuffling, and that way, empty room is always latest possible
for i in range(len(self.event.rooms)):
if self.teams[i] is None:
self.teams[i] = team
team.addEvent(self, i)
return
raise Exception("Too many teams for judge rooms")
def __str__(self):
args = [self.index, self.startTime(), self.endTime(), self.event.name]
args.extend(self.teams)
return ' '.join([str(x) for x in args])
def outputTeamSchedule(self, slot, outCSV):
if self.event.subSchedule is not None:
for subE in self.event.subSchedule[self.event.rooms[slot]]:
outCSV.writerow((subE['event'], subE['room'], self.startTime() + subE['startTM'], self.startTime() + subE['endTM']))
else:
outCSV.writerow((self.event.name, self.event.rooms[slot], self.startTime(), self.endTime()))
return
def judgeScheduleEntries(self, entries):
if self.event.subSchedule is not None:
for i in range(len(self.event.rooms)):
t = self.teams[i]
for subE in self.event.subSchedule[self.event.rooms[i]]:
r1 = entries.get(subE['event'], None)
if r1 is None:
r1 = {}
entries[subE['event']] = r1
st = self.startTime() + subE['startTM']
r2 = r1.get(st, None)
if r2 is None:
r2 = {'StartTime': st, 'EndTime': self.startTime() + subE['endTM']}
r1[st] = r2
r2[subE['room']] = t.teamNumber if t is not None else ''
else:
if self.event.name not in entries:
entries[self.event.name] = {}
row = {'StartTime': self.startTime(), 'EndTime': self.endTime()}
for i in range(len(self.teams)):
if self.teams[i] is not None:
row[self.event.rooms[i]] = self.teams[i].teamNumber
entries[self.event.name][self.startT] = row
return
class MatchList(object):
def __init__(self):
self.matches = []
self.nGamesPerTeam = 0
self.dummyTeam = False
self.breakTime = 0
self.maxTeamMatchesPerFields = None
return
def outputSchedule(self, outstrm):
fields = ['Match', 'StartTime', 'EndTime']
for tl in self.tableNames:
for t in tl:
for i in range(2):
tn = '{} {}'.format(t, i+1)
fields.append(tn)
outCSV = csv.DictWriter(outstrm, fieldnames=fields)
outCSV.writeheader()
row = {'Match': -1}
prevEndTime = None
for m in self.matches:
if row['Match'] != m.matchNum:
if row['Match'] > 0:
outCSV.writerow(row)
# be nice and flag breaks
if prevEndTime is not None and m.startTime() - prevEndTime > self.breakTime:
outCSV.writerow({'Match': '', 'StartTime': prevEndTime, 'EndTime': m.startTime(), fields[3]: 'Break'})
row = {'Match': m.matchNum,
'StartTime': m.startTime(),
'EndTime': m.endTime()}
prevEndTime = m.endTime()
for i in range(len(m.teams)):
if m.teams[i] is not None:
row[m.tableName(i)] = m.teams[i].designation()
outCSV.writerow(row)
return
class Match(TimeSlot):
def __init__(self, index, startT, endT, matchNum, table):
TimeSlot.__init__(self, index, startT, endT)
self.matchNum = matchNum
self.table = table
# self.penalty = penalty
self.teams = [None, None]
return
def assignTeam(self, team):
slots = [0, 1]
shuffle(slots)
for i in slots:
if self.teams[i] is None:
self.teams[i] = team
team.addEvent(self, i)
return
raise Exception("Too many teams for match slots")
def __eq__(self, other):
if type(other) != type(self):
raise TypeError("Unsupported type %s" % type(other))
return self.index == other.index and self.table == other.table
def __str__(self):
args = [self.index, self.startTime(), self.endTime(), self.table]
args.extend(self.teams)
return ' '.join([str(x) for x in args])
def tableName(self, slot):
return '{} {}'.format(self.table, slot+1)
def swapTeams(self):
'''Swap the team slots'''
t0 = self.teams[0]
t1 = self.teams[1]
self.teams[0] = t1
self.teams[1] = t0
# make sure to change the entries in the team
for e in t0.schedule:
if e[0] == self:
e[1] = 1
break
for e in t1.schedule:
if e[0] == self:
e[1] = 0
break
return
def trySwapTeams(self):
'''Try to swap the team slots to reduce table duplication in the teams'''
cnts_start = [t.countDuplicateTables() for t in self.teams]
self.swapTeams()
cnts_end = [t.countDuplicateTables() for t in self.teams]
if max(cnts_end) < max(cnts_start) or sum(cnts_end) < sum(cnts_start):
# success
return True
self.swapTeams()
return False
def outputTeamSchedule(self, slot, outCSV):
outCSV.writerow(('Match {}'.format(self.matchNum), self.tableName(slot), self.startTime(), self.endTime()))
return
class ScheduleModel(object):
# GLPK solver
# lineExpr = r'^\s*[0-9]+ matchAssign\[(?P<m>[^,]+),(?P<t1>[^,]+),(?P<t2>[^,]+)\]\s+\*\s+(?P<val>[0-9]+)'
# CBC solver (seems to be faster)
modelLineExpr = r'^\s*[0-9]+ (?P<type>match|judge)Assign\[(?P<i1>[^,]+),(?P<i2>[^,]+),(?P<i3>[^,]+)\]\s+(?P<val>[0-9]+)'
def __init__(self, config):
EventTime.eventStartTime = dateparse(config['startTime'])
self.eventDuration = 0
TimeSlot.travelTime = config['travelTime']
self.teams = ScheduleModel._readTeams(config['teams'])
self._createMatches(config['matchInfo'])
self.hasJudgePenalty = False
self._createJudgeSessions(config['judgeEvents'])
EventTime.eventEndTime = self.eventDuration
self.scheduleBlocks = config.get('scheduleBlocks', None)
# now that the schedule is defined, find all the time periods
self.setTimeBlocks()
return
def findTeam(self, index):
# quick method, but double check
if index == len(self.teams) + 1:
return Team(index, -1, 'DUMMY')
t = self.teams[index - 1]
assert(t.index == index)
return t
def findMatch(self, index):
m = self.matchList.matches[index - 1]
assert(m.index == index)
return m
def findJudgeEvent(self, index):
for e in self.judgeEvents.values():
if e.index == index:
return e
return None
def setTimeBlocks(self):
alltimes = set()
for e in self.judgeEvents.values():
for s in e.sessions:
alltimes.add(s.startTime())
alltimes.add(s.endTime(padded=True))
for e in self.matchList.matches:
alltimes.add(e.startTime())
alltimes.add(e.endTime(padded=True))
TimeSlot.setTimeBlocks(alltimes)
return
# ----------------------------------------------------------------------------------------------------
# Config methods
@staticmethod
def _readTeams(tList):
result = []
index = 1
for num, name in tList:
result.append(Team(index, num, name))
index += 1
return result
def _readBreaks(self, times):
breaks = []
for brkSt, brkEt in times:
st = EventTime(brkSt)
et = EventTime(brkEt)
breaks.append((st, et))
return breaks
def _createMatches(self, config):
self.matchList = MatchList()
self.matchList.nGamesPerTeam = config['gamesPerTeam']
self.matchList.tableNames = config['tableNames']
self.matchList.breakTime = config['matchBreak']
self.matchList.maxTeamMatchesPerFields = config.get('maxTeamMatchesPerFields', None)
resetAfterBreak = config.get('resetAfterBreak', False)
nTeams = len(self.teams)
nMatchesFloat = float(nTeams) * self.matchList.nGamesPerTeam / 2 + config.get('extraMatches', 0)
nMatches = int(math.ceil(nMatchesFloat))
self.matchList.dummyTeam = 1 if int(nMatchesFloat) != nMatches else 0
breakTimes = self._readBreaks(config['breakTimes'])
if 'resetAfterBreak' in config:
resetAfterBreak = [EventTime(t) for t in config['resetAfterBreak']]
else:
resetAfterBreak = ()
if 'oneFieldOnly' in config:
oneFieldOnly = [EventTime(t) for t in config['oneFieldOnly']]
else:
oneFieldOnly = {}
# ------------------------------
# compute matches times, names
startT = EventTime(0)
mLen = config['matchLen']
dt = mLen + self.matchList.breakTime
nTableSets = len(self.matchList.tableNames)
tblSet = 0
matchSession = 1
matchIndex = 0
while matchIndex < nMatches:
endT = startT + mLen
oneField = startT in oneFieldOnly
for tbl in config['tableNames'][tblSet]:
matchIndex += 1
self.matchList.matches.append(Match(matchIndex, startT, endT, matchSession, tbl))
if matchIndex >= nMatches:
break
if oneField:
break
matchSession += 1
startT += dt
for brkST, brkET in breakTimes:
if startT >= brkST and startT < brkET:
startT = brkET
break
if startT in resetAfterBreak:
tblSet = 0
else:
tblSet = (tblSet + 1) % nTableSets
print('Matches end at {}'.format(startT), file=sys.stderr)
self.eventDuration = max(self.eventDuration, int(startT))
if 'extendSessions' in config:
# extend over breaks so teams have decent amount of time
# can help if breaks don't happen because of overruns
for st, et, delT in config['extendSessions']:
startT1 = EventTime(st)
endT1 = EventTime(et)
for m in self.matchList.matches:
if m.startTime() >= startT1 and m.startTime() < endT1:
m.extendEnd = delT
return
def _createJudgeSessions(self, config):
self.judgeEvents = {}
eventIndex = 1
for judgeInfo in config:
event = JudgeEvent(eventIndex, judgeInfo['name'])
self.judgeEvents[judgeInfo['name']] = event
event.rooms = judgeInfo['rooms']
breakTimes = self._readBreaks(judgeInfo['breakTimes'])
startT = EventTime(0)
sLen = judgeInfo['sessionLen']
dt = sLen + judgeInfo['sessionBreak']
nSessFull = len(self.teams) / len(judgeInfo['rooms'])
nSess = int(math.ceil(len(self.teams) / len(judgeInfo['rooms'])))
for sessIndex in range(1, nSess+1):
pen = 0
if sessIndex > nSessFull:
pen = 10
self.hasJudgePenalty = True
endT = startT + sLen
session = JudgeSession(event, sessIndex, startT, endT, pen)
event.sessions.append(session)
startT += dt
for brkST, brkET in breakTimes:
if startT >= brkST and startT < brkET:
startT = brkET
break
if 'extendSessions' in judgeInfo:
# extend over lunch so teams have decent amount of time
st = EventTime(judgeInfo['extendSessions'][0])
et = EventTime(judgeInfo['extendSessions'][1])
delT = judgeInfo['extendSessions'][2]
for jS in event.sessions:
if jS.startT >= st and jS.startT < et:
jS.extendEnd = delT
if 'subEvents' in judgeInfo:
event.buildBlockSubschedule(judgeInfo['subEvents'])
eventIndex += 1
print('Judge {} ends at {}'.format(judgeInfo['name'], startT), file=sys.stderr)
self.eventDuration = max(self.eventDuration, int(startT))
return
# ----------------------------------------------------------------------------------------------------
# Output model file
def writeModel(self):
self._writeParams()
self._writeObjective()
self._handleScheduleBlocks()
self._handleFieldDistribution()
self._writeData()
return
def _writeParams(self):
maxTeams = len(self.teams)
if self.matchList.dummyTeam:
maxTeams += 1
print('# Model created', datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
print()
print('set teams := 1 .. %d;' % len(self.teams))
print('set matches := 1 .. %d;' % len(self.matchList.matches))
print('set judgeEvents := 1 .. %d;' % len(self.judgeEvents))
print('param nJudgeSessions{en in judgeEvents};')
print('set judgeSessions{en in judgeEvents} := 1 .. nJudgeSessions[en];')
print('set times := 0 .. %d;' % (TimeSlot.numTimeBlocks() - 1))
print()
print('var matchAssign{m in matches, t1 in teams, t2 in t1+1 .. %d}, binary;' % maxTeams)
# if self.hasMatchPenalty:
# print('param matchPenalties{m in matches}, default 0, >= 0;')
print('var judgeAssign{en in judgeEvents, j in judgeSessions[en], t in teams}, binary;')
if self.hasJudgePenalty:
print('param judgePenalties{en in judgeEvents,j in judgeSessions[en]}, default 0, >= 0;')
print()
print('param judgeEventTimes{en in judgeEvents,judgeSessions[en], tm in times}, binary, default 0;')
print('param matchTimes{m in matches, tm in times}, binary, default 0;')
return
def _writeObjective(self):
maxTeams = len(self.teams)
if self.matchList.dummyTeam:
maxTeams += 1
print()
print('minimize f:')
done = False
# if self.hasMatchPenalty:
# print(' sum{m in matches,t in teams,t2 in t+1 ..', nTeams+1, '} matchPenalties[m] * matchAssign[m,t,t2] +'.format_map(params))
# print(' sum{m in matches,t in 1 ..', nTeams, ',t2 in 1 .. t-1} matchPenalties[m] * matchAssign[m,t2,t]', end='')
# done = True
if self.hasJudgePenalty:
if done:
print(' +')
print(' sum{en in judgeEvents,j in judgeSessions[en],t in teams} judgePenalties[en,j] * judgeAssign[en,j,t]', end='')
done = True
if not done:
print(' 1', end='')
print(';')
print()
print('# number of matches for each team')
print('s.t. teamMatches{t in teams}:')
print(' (sum{m in matches, t2 in 1 .. t-1} matchAssign[m,t2,t]) +')
print(' (sum{m in matches, t2 in t+1 .. %d} matchAssign[m,t,t2]) = %d;' % (maxTeams, self.matchList.nGamesPerTeam))
if self.matchList.dummyTeam:
print('s.t. dummyMatch:')
print(' (sum{{t in teams}} matchAssign[{},t,{}]) = 1;'.format(len(self.matchList.matches), maxTeams))
print('s.t. dummyNonMatch{{t in teams,m in 1 .. {}}}:'.format(len(self.matchList.matches)-1))
print(' matchAssign[m,t,{}] = 0;'.format(maxTeams))
print('# only one pair per match')
print('s.t. teamsPerMatch{m in matches}:')
print(' sum{t1 in teams, t2 in t1+1 .. %d} matchAssign[m,t1,t2] <= 1;' % maxTeams)
print('# no re-matches')
print('s.t. rematches{t1 in teams, t2 in t1+1 .. %d}:' % maxTeams)
print(' sum{m in matches} matchAssign[m,t1,t2] <= 1;')
print()
print('# number of teams per judge slot')
for jName, event in self.judgeEvents.items():
print('s.t. {}Slots{{j in judgeSessions[{}]}}:'.format(jName, event.index))
print(' sum{t in teams} judgeAssign[%d,j,t] <= %d;' % (event.index, len(event.rooms)))
print('# teams get judged exactly once per event type')
print('s.t. teamJudgings{en in judgeEvents,t in teams}:')
print(' sum{j in judgeSessions[en]} judgeAssign[en,j,t] = 1;')
print()
print('# team can only be in once place at a time')
print('s.t. teamLocation{t in teams,tm in times}:')
print(' (sum{je in judgeEvents, j in judgeSessions[je]} judgeAssign[je,j,t] * judgeEventTimes[je,j,tm]) +')
print(' (sum{t2 in 1 .. t-1, m in matches} matchAssign[m,t2,t] * matchTimes[m,tm]) +')
print(' (sum{t2 in t+1 .. %d, m in matches} matchAssign[m,t,t2] * matchTimes[m,tm]) <= 1;' % maxTeams)
return
def _handleScheduleBlocks(self):
if self.scheduleBlocks is None:
return
maxTeams = len(self.teams)
if self.matchList.dummyTeam:
maxTeams += 1
print()
index = 0
for st, et, judgeEvts in self.scheduleBlocks:
startT = EventTime(st)
endT = EventTime(et)
startMatch = None
endMatch = None
for m in self.matchList.matches:
if startMatch is None and m.startTime() >= startT:
startMatch = m.index
if m.endTime() <= endT:
endMatch = m.index
if startMatch is not None and endMatch is None:
endMatch = self.matches.matchList[-1].index
print('s.t. scheduleBlock{}{{t in teams}}:'.format(index))
print(' (sum{{m in {0} .. {1}, t2 in t+1 .. {2}}} matchAssign[m,t,t2])'.format(startMatch, endMatch, maxTeams))
print(' + (sum{{m in {0} .. {1}, t2 in 1 .. t-1}} matchAssign[m,t2,t])'.format(startMatch, endMatch))
for m in judgeEvts:
startSess = None
endSess = None
evtIndex = self.judgeEvents[m].index
for s in self.judgeEvents[m].sessions:
if startSess is None and s.startTime() >= startT:
startSess = s.index
if s.endTime() <= endT:
endSess = s.index
if startSess is not None:
if startSess == endSess:
print(' + judgeAssign[{0},{1},t]'.format(evtIndex, startSess))
else:
print(' + (sum{{j in {1} .. {2}}} judgeAssign[{0},j,t])'.format(evtIndex, startSess, endSess))
print(' >= 1;')
index += 1
return
def _handleFieldDistribution(self):
maxMatch = self.matchList.maxTeamMatchesPerFields
if maxMatch is None:
return
nMatches = len(self.matchList.matches)
nFields = sum([len(x) for x in self.matchList.tableNames])
print()
print('# Add constraints to spread the teams across the different fields')
print('# Not required, and may slow down the model solving!!')
print('s.t. maxPerField{t in teams, sm in 1 .. %d}:' % nFields)
print(' (sum{{m in sm .. {} by {}, t2 in t+1 .. {}}} matchAssign[m,t,t2])'.format(nMatches, nFields, len(self.teams)))
print(' + (sum{{m in sm .. {} by {}, t2 in 1 .. t-1}} matchAssign[m,t2,t]) <= {};'.format(nMatches, nFields, maxMatch))
return
def _writeData(self):
print()
print('data;')
print('param nJudgeSessions :=')
first = True
for name, event in self.judgeEvents.items():
if not first:
print(',')
print(' {} {}'.format(event.index, len(event.sessions)), end='')
first = False
print(';')
if self.hasJudgePenalty:
print('param judgePenalties :=')
first = True
for name, event in self.judgeEvents.items():
for s in event.sessions:
if s.penalty > 0:
if not first:
print(',')
print(' [{},{}] {}'.format(event.index, s.index, s.penalty), end='')
first = False
print(';')
# if self.hasMatchPenalty:
# print('# applied only to non-dummy groups')
# print('param matchPenalties :=')
# # first = True
# # for match, mName, st, et, p in matchList:
# # if p > 0:
# # if not first: print ','
# # print " '%s' %d" % (mName, p),
# # first = False
# print(';')
print('param judgeEventTimes :=')
first = True
commentStr = ''
for name, event in self.judgeEvents.items():
for s in event.sessions:
for t in s.timeBlockRange(padded=True):
if not first:
print(', # %s' % commentStr)
print(" [%d,%d,%d] 1" % (event.index, s.index, t), end='')
commentStr = str(TimeSlot.timeBlockBoundaries[t])
first = False
print('; # %s' % commentStr)
print('param matchTimes :=')
first = True
commentStr = ''
for match in self.matchList.matches:
for t in match.timeBlockRange(padded=True):
if not first:
print(', # %s' % commentStr)
print(" [%d,%d] 1" % (match.index, t), end='')
commentStr = str(TimeSlot.timeBlockBoundaries[t])
first = False
print('; # %s' % commentStr)
print('end;')
# ----------------------------------------------------------------------------------------------------
def outputMatches(self):
self.matchList.outputSchedule(sys.stdout)
return
def outputJudging(self):
for jN, judgeEvt in sorted(self.judgeEvents.items()):
judgeEvt.outputSchedule(sys.stdout)
return
def formatOutput(self, outputBase, results):
with open(results) as infile:
self.readResults(infile)
self.minimizeDuplicateTables()
fname = '{}_matches.csv'.format(outputBase)
with open(fname, 'w') as outfile:
self.matchList.outputSchedule(outfile)
fname = '{}_judging.csv'.format(outputBase)
with open(fname, 'w') as outfile:
for jN, judgeEvt in sorted(self.judgeEvents.items()):
judgeEvt.outputSchedule(outfile)
fname = '{}_teams.csv'.format(outputBase)
totalDuplicateTables = 0
with open(fname, 'w') as outfile:
outCSV = csv.writer(outfile)
for team in sorted(self.teams):
d = team.countDuplicateTables()
totalDuplicateTables += d
print('Team {} min travel = {} duplicate tables = {}'.format(team.teamNumber, team.travelTime(), d))
team.outputSchedule(outCSV)
outCSV.writerow(())
print('Total duplicate tables =', totalDuplicateTables)
return
def readResults(self, infile):
while 1:
line = infile.readline()
if not line:
break
if re.match(r'^\s*[0-9]+ (matchAssign|judgeAssign)\[.*\]\s*$', line):
# line must be continued
line2 = infile.readline()
line += line2
# 14914 matchAssign[50,7,17] 1 0
m = re.search(ScheduleModel.modelLineExpr, line)
if m:
if m.group('type') == 'match':
# match assignment
val = int(m.group('val'))
if val:
match = self.findMatch(int(m.group('i1')))
match.assignTeam(self.findTeam(int(m.group('i2'))))
match.assignTeam(self.findTeam(int(m.group('i3'))))
else:
# judge assignment
val = int(m.group('val'))
if val:
evtInd = int(m.group('i1'))
sessInd = int(m.group('i2'))
session = self.findJudgeEvent(evtInd).findSession(sessInd)
session.assignTeam(self.findTeam(int(m.group('i3'))))
return
def minimizeDuplicateTables(self):
dopass = True
while dopass:
dopass = False
cnt = 0
for team in self.teams:
cnt += team.countDuplicateTables()
if team.minimizeDuplicateTables():
dopass = True
print("Minimize dup tables: total =", cnt)
return
def assignFakeSchedule(self):
it = 0
nt = len(self.teams)
for m in self.matchList.matches:
for ii in range(2):
m.assignTeam(self.teams[it])
it = (it + 1) % nt
it = 0
for evt in self.judgeEvents.values():
for s in evt.sessions:
for r in s.event.rooms:
if it < len(self.teams):
s.assignTeam(self.teams[it])
it += 1
return
# ====================================================================================================
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='FLL Schedule creater')
parser.add_argument('-b', '--build', action='store_true', help='Build model file')
parser.add_argument('-o', '--output', help='Formatted output base name')
parser.add_argument('-m', '--matches', action='store_true', help='Output (empty) match schedule')
parser.add_argument('-j', '--judging', action='store_true', help='Output (empty) judge schedule')
parser.add_argument('configfile', help='Config file (python)')
parser.add_argument('resultfile', nargs='?', help='Model result file')
args = parser.parse_args()
# Load the configuration file
config = {}
with open(args.configfile, 'rb') as file:
exec(file.read(), None, config)
# seed the random generator
seed()
model = ScheduleModel(config)
# for e in model.judgeEvents['Judging'].subSchedule.items():
# print(e)
# sys.exit(1)
if args.build:
model.writeModel()
elif args.matches:
model.assignFakeSchedule()
model.outputMatches()
elif args.judging:
model.assignFakeSchedule()
model.outputJudging()
elif args.output:
model.formatOutput(args.output, args.resultfile)
else:
parser.print_help()
| true |
9e83cfca28dfcdbeeef01f52082f359c143724a6 | Python | fengyehong123/Python_skill | /22-字典按照key和value进行排序.py | UTF-8 | 1,265 | 3.5 | 4 | [] | no_license | d = {
'zhao': 68,
'qian': 80,
'sun': 72,
'li': 90,
'zhou': 83,
'wu': 79,
'zheng': 62,
'wang': 87
}
# 字典.items()可以获取出 可迭代对象
print(d.items())
"""
dict_items([('zhao', 68), ('qian', 80), ('sun', 72), ('li', 90), ('zhou', 83), ('wu', 79), ('zheng', 62), ('wang', 87)])
"""
# print(d.items().sort(key=lambda x: x[1]))
# 通过lambda表达式,根据字典的value进行排序,默认是从小到大进行排序,通过 reverse=True 反转,从而从大到小进行排序
obj = sorted(d.items(), key=lambda y: y[1], reverse=True)
print(obj)
"""
[('li', 90), ('wang', 87), ('zhou', 83), ('qian', 80), ('wu', 79), ('sun', 72), ('zhao', 68), ('zheng', 62)]
"""
# 通过dict()函数,把可迭代对象转换为字典,从而实现根据字典的键从大到小进行排列
print(dict(obj))
"""
{'li': 90, 'wang': 87, 'zhou': 83, 'qian': 80, 'wu': 79, 'sun': 72, 'zhao': 68, 'zheng': 62}
"""
# 根据key进行排序
NN = {
'zhao': 68,
'qian': 80,
'sun': 72,
'li': 90,
'zhou': 83,
'wu': 79,
'zheng': 62,
'wang': 87
}
print(dict(sorted(NN.items(), key=lambda x: x[0], reverse=False)))
"""
{'li': 90, 'qian': 80, 'sun': 72, 'wang': 87, 'wu': 79, 'zhao': 68, 'zheng': 62, 'zhou': 83}
"""
| true |
c67d58844943c4ce33da4270fce0e91b3ee4b09b | Python | amolika20424/Simple_assembler_simulator | /Simple-Assembler/main.py | UTF-8 | 9,051 | 2.890625 | 3 | [] | no_license | import sys
output = []
error = {}
label_error = {}
REGISTERS = {"R0": "000", "R1": "001", "R2": "010", "R3": "011", "R4": "100", "R5": "101", "R6": "110", "FLAGS": "111"}
ISA = {
"add": "00000",
"sub": "00001",
"mov": "00010",
"ld": "00100",
"st": "00101",
"mul": "00110",
"div": "00111",
"rs": "01000",
"ls": "01001",
"xor": "01010",
"or": "01011",
"and": "01100",
"not": "01101",
"cmp": "01110",
"jmp": "01111",
"jlt": "10000",
"jgt": "10001",
"je": "10010",
"hlt": "10011"}
reg_values = {"R0": 0, "R1": 0, "R2": 0, "R3": 0, "R4": 0, "R5": 0, "R6": 0, "FLAGS": 0}
hlt_status = False
# when hlt appears status becomes True after this print error
var_status = False
# when var status is true variables can't be declared error
var = {}
labels = {}
# label name =[current_count,instruction]
############################# different types of ISA
def type_A(l):
if len(l) != 4:
error[current_count] = str("Line " + str(current_count) + ": Invalid Syntax of instruction")
return
if (l[1][0] != 'R' or l[2][0] != 'R' or l[3][0] != 'R'):
error[current_count] = str("Line " + str(current_count) + ": Invalid Syntax - error in instruction syntax")
return
elif (l[1] not in REGISTERS or l[2] not in REGISTERS or l[3] not in REGISTERS):
error[current_count] = str("Line " + str(current_count) + ": Invalid Register - Register does not exist")
return
opcode = str(ISA[l[0]])
unused = "00"
reg1 = str(REGISTERS[l[1]])
reg2 = str(REGISTERS[l[2]])
reg3 = str(REGISTERS[l[3]])
output.append(opcode + unused + reg1 + reg2 + reg3)
def type_B(l):
if len(l) != 3:
error[current_count] = str("Line " + str(current_count) + ": Syntax Error: Invalid Syntax of instruction")
return
if (l[1][0] != 'R' and l[2][0] != "$"):
error[current_count] = str(
"Line " + str(current_count) + ": Syntax Error: Invalid Syntax - error in instruction syntax")
return
elif (l[1] not in REGISTERS):
error[current_count] = str(
"Line " + str(current_count) + ": Syntax Error: Invalid Register - Register does not exist")
return
elif int(l[2][1:]) < 0 or int(l[2][1:]) > 255:
error[current_count] = str(
"Line " + str(current_count) + ": Syntax Error: Syntax Error: Value of Immediate must be between 0 and 255")
return
if ISA[l[0]] == "mov":
opcode = "00010"
else:
opcode = str(ISA[l[0]])
reg1 = str(REGISTERS[l[1]])
im = int(l[2][1:])
imm = '{0:08b}'.format(im)
output.append(opcode + reg1 + imm)
def type_C(l):
if len(l) != 3:
error[current_count] = str("Line " + str(current_count) + ": Syntax Error: Invalid Syntax of instruction")
return
if (l[1][0] != 'R' and l[2][0] != 'R'):
error[current_count] = str(
"Line " + str(current_count) + ": Syntax Error: Invalid Syntax - error in instruction syntax")
return
elif (l[1] not in REGISTERS or l[2] not in REGISTERS):
error[current_count] = str(
"Line " + str(current_count) + ": Syntax Error: Invalid Register - Register does not exist")
return
if l[0] == "mov":
opcode = "00011"
else:
opcode = str(ISA[l[0]])
unused = "00000"
reg1 = str(REGISTERS[l[1]])
reg2 = str(REGISTERS[l[2]])
output.append(opcode + unused + reg1 + reg2)
def type_D(l):
if len(l) != 3:
error[current_count] = str("Line " + str(current_count) + ": Syntax Error: Invalid Syntax of instruction")
return
if (l[1][0] != 'R'):
error[current_count] = str(
"Line " + str(current_count) + ": Syntax Error: Invalid Syntax - error in load instruction syntax")
return
elif (l[1] not in REGISTERS):
error[current_count] = str(
"Line " + str(current_count) + ": Syntax Error: Invalid Register - Register does not exist")
return
elif (l[2] not in var.keys()):
error[current_count] = str(
"Line " + str(current_count) + ": Syntax Error: Invalid Memory address: Variable has not been declared")
return
opcode = str(ISA[l[0]])
reg1 = str(REGISTERS[l[1]])
mem = '{0:08b}'.format((total_count - len(var)) + var[l[2]] - 1)
output.append(opcode + reg1 + mem)
def type_E(l):
if len(l) != 2:
error[current_count] = str("Line " + str(current_count) + ": Syntax Error: Invalid Syntax of instruction")
return
if (l[1] not in var) and (l[1] not in labels.keys()):
error[current_count] = str("Line " + str(
current_count) + ":Syntax Error: Invalid Memory address: Variable or label has not been declared")
return
opcode = str(ISA[l[0]])
unused = "000"
mem = '{0:08b}'.format(int(labels[l[1]][0]) - 2)
output.append(opcode + unused + str(mem))
#############################checking for ISA type
def check_ISA(l, current_count):
l0 = l[0]
if l0 in ["add", "sub", "mul", "xor", "or", "and"]:
type_A(l)
# ignoring mov
elif l0 in ["rs", "ls"]:
type_B(l)
# ignoring mov
elif l0 in ["not", "cmp"]:
type_C(l)
elif l0 in ["ld", "st"]:
type_D(l)
elif l0 in ["jmp", "jlt", "jgt", "je"]:
type_E(l)
elif l0 == "mov":
if l[2] not in REGISTERS.keys():
type_B(l)
else:
type_C(l)
def check_label(l, current_count):
name = l[0][:-1]
if name in var.keys():
label_error[current_count] = str(
"Line " + str(current_count) + " Error:Misuse of variable name as label and vice versa")
return
elif name in labels.keys():
label_error[current_count] = str("Line " + str(current_count) + " Error: Duplicate label")
return
else:
labels[name] = [current_count, l[1:]]
def f1(line, current_count):
global hlt_status
global var_status
l = line.split()
if l[0] == "hlt" and hlt_status == False:
hlt_status = True
output.append("1001100000000000")
return
elif l[0] == "hlt" and hlt_status == True:
error[current_count] = str("Line " + str(current_count) + current_count, " Error: multiple hlt instructions")
return
elif line == lines[-1] and l[0] == "hlt":
hlt_status = True
output.append("1001100000000000")
return
elif l[0] == "var" and var_status == True:
error[current_count] = str("Line " + str(current_count) + " Error: Variable must be declared at the starting ")
return
elif l[0] == "var":
if(len(l)==1):
error[current_count] = str(
"Line " + str(current_count) + " Syntax Error: nothing declared in front of var ")
return
var[l[1]] = current_count
return
elif "FLAGS" in line and l[0] != "mov":
error[current_count] = str("Line " + str(current_count) + " Error: illegal use of flags ")
return
if l[0][0:-1] in labels.keys():
if len(l) == 1:
error[current_count] = str("Line " + str(current_count) + " Error: label is empty ")
return
a = ""
for b in range(1, len(l) - 1):
a = a + l[b] + " "
a = a + l[-1]
f1(a, current_count)
return
if l[0] in ISA.keys():
if var_status != True:
var_status = True
check_ISA(l, current_count)
return
else:
error[current_count] = str("Line " + str(current_count) + " Syntax Error: Not a valid instruction ")
return
# input - assembler starts from here
total_count = 0
current_count = 0
lines = []
label_count = 1
for line in sys.stdin.readlines():
l = line.split()
if l[0][-1] == ":" and l[0] not in ISA:
check_label(l, label_count)
lines.append(line)
total_count += 1
label_count += 1
for line in lines:
current_count += 1
# print(current_count)
if line == "":
continue
elif (line.split()[0] == "hlt"):
hlt_status = True
output.append("1001100000000000")
elif (hlt_status == True):
error[current_count] = str("Line " + str(current_count) + " Error: Instruction not allowed after hlt")
else:
f1(line, current_count)
for lerror in label_error.keys():
error[lerror] = label_error[lerror]
if len(error) == 0:
if "1001100000000000" in output and output[-1] != "1001100000000000":
print("Syntax Error: Hlt instruction should be in the end")
elif output[-1] != "1001100000000000":
print("Syntax Error :Hlt instruction missing from end")
elif output.count("1001100000000000") > 1:
print("Syntax Error:Multiple hlt statements")
else:
for a in output:
print(a)
elif len(error) != 0:
for errors in sorted(error):
print(error[errors])
break
# /Users/amolikabansal/Downloads/CO_M21_Assignment_submission-main_two
| true |
0f53cd5c9fbafcb5ef4e9772b885f5ff950392ca | Python | umika/NonBlockingReaderThread | /NonBlockingReaderThread.py | UTF-8 | 2,077 | 2.859375 | 3 | [] | no_license | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
'''NonBlockingReaderThread
SetConsoleMode
http://msdn.microsoft.com/en-us/library/ms686033%28v=vs.85%29
ENABLE_LINE_INPUT = 0x0002
'''
import sys, os
import time
import threading
import Queue
class NonBlockingReaderThread(threading.Thread):
def __init__(self, pin, *args, **kwargs):
self.pin = pin
self.skip = threading.Event()
self.skip.clear()
self.que = Queue.Queue()
super(NonBlockingReaderThread, self).__init__(target=self.worker,
*args, **kwargs)
self.setDaemon(True) # to terminate this thread with the process
def run(self, *args, **kwargs):
super(NonBlockingReaderThread, self).run(*args, **kwargs)
def finalize(self):
self.skip.set()
self._Thread__stop()
def worker(self):
fd = self.pin.fileno()
while True: # set 'True' with handling exception or 'not self.skip.isSet()'
try:
time.sleep(0.04) # 40ms
s = os.read(fd, 1) # DO NOT use sys.stdin.read(), it is buffered
if s is None: continue
if len(s) > 0: self.que.put(s, False) # non blocking
except:
pass
def get_nonblocking(self):
try:
return self.que.get(False) # non blocking
except Queue.Empty, e:
return ''
def set_stdin_unbuffered(self):
import ctypes
m = ctypes.c_ulong(0)
h = ctypes.windll.kernel32.GetStdHandle(-10) # stdin
ctypes.windll.kernel32.GetConsoleMode(h, ctypes.byref(m)) # (m=487)
ENABLE_LINE_INPUT = 0x0002
n = ctypes.c_ulong(m.value & ~ENABLE_LINE_INPUT)
ctypes.windll.kernel32.SetConsoleMode(h, n) # (n=485)
if __name__ == '__main__':
rt = NonBlockingReaderThread(sys.stdin)
rt.start()
for i in xrange(20):
print 'main: %d' % i
time.sleep(0.5)
rt.finalize() # _Thread__stop() is the best way to terminate os.read(fd, 1).
# del rt # Delete thread but not terminate os.read(fd, 1) in worker thread.
# os.close(0) # force close stdin to terminate os.read(fd, 1) but *DEAD LOCK*
# rt.join(timeout=1.0) # must not call rt.join() because self.setDaemon(True)
| true |
934d0e773b9c9d5166d0a9f62431c2c09f585919 | Python | bestchanges/hello_python | /sessions/22/pavel_shchegolevatykh/philosophers_waiter.py | UTF-8 | 2,809 | 3.609375 | 4 | [] | no_license | # solution using waiter (arbitrator) to limit taking forks if only one left
# that way deadlocks are prevented but less people can eat at the same time
# compared to monitor (philosophers_better) solution
import threading
import time
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
log_formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
log_stream_handler = logging.StreamHandler()
log_stream_handler.setFormatter(log_formatter)
logger.addHandler(log_stream_handler)
class Waiter:
def __init__(self, max_value):
self.lock = threading.Semaphore(max_value)
def up(self):
self.lock.acquire()
def down(self):
self.lock.release()
class Fork:
def __init__(self, number):
self.number = number
self.lock = threading.Semaphore(1)
self.taken = False
def take(self):
self.lock.acquire()
self.taken = True
self.lock.release()
def put(self):
self.lock.acquire()
self.taken = False
self.lock.release()
class Philosopher(threading.Thread):
def __init__(self, philosopher_id, left_fork, right_fork, waiter):
threading.Thread.__init__(self)
self.philosopher_id = philosopher_id
self.left_fork = left_fork
self.right_fork = right_fork
self.waiter = waiter
def think(self):
logger.info(f'Philosopher {self.philosopher_id} is thinking...')
time.sleep(1)
def eat(self):
logger.info(f'Philosopher {self.philosopher_id} is eating...')
time.sleep(1)
def run(self):
while True:
self.think()
self.waiter.down()
logger.info(f'Philosopher {self.philosopher_id} calls waiter')
self.left_fork.take()
logger.info(f'Philosopher {self.philosopher_id} takes left fork')
self.right_fork.take()
logger.info(f'Philosopher {self.philosopher_id} takes right fork')
self.eat()
self.right_fork.put()
logger.info(f'Philosopher {self.philosopher_id} put right fork')
self.left_fork.put()
logger.info(f'Philosopher {self.philosopher_id} put left fork')
self.waiter.up()
logger.info(f'Philosopher {self.philosopher_id} says good bye to waiter')
def main():
number_of_philosophers = 5
# waiter for deadlock avoidance (n-1 available)
waiter = Waiter(number_of_philosophers-1)
forks = [Fork(i) for i in range(number_of_philosophers)]
philosophers = [Philosopher(i, forks[i], forks[(i+1) % number_of_philosophers], waiter) for i in range(number_of_philosophers)]
for i in range(number_of_philosophers):
philosophers[i].start()
if __name__ == '__main__':
main()
| true |
5dd17002be58e6709f030fb201e97c56fe999a25 | Python | IrshadBadarpura/Web-Search-Engine | /pr.py | UTF-8 | 2,368 | 2.546875 | 3 | [] | no_license | import pickle, os
def loadPickle(name):
with open(name, 'rb') as f:
return pickle.load(f)
def inlinkFunc(tfidf,crawled):
inlink ={}
for url in tfidf:
inlink[url] = []
for crawlUrl in crawled:
if url in crawled[crawlUrl][2]:
inlink[url].append(crawled[crawlUrl][0])
return inlink
def pqitoj(term, i, j, tfidf):
s = 0
for doc in webCrawled[i][2]:
if doc in tfidf and term in tfidf[doc]:
s += tfidf[doc][term]
return (tfidf[j][term] if term in tfidf[j] else 0)/s
def queryDependentPageRank(tfidf, crawled, inlink):
alpha = 0.85
queryDependentPageRank = {}
for url in tfidf:
queryDependentPageRank[url] = {}
for token in tfidf[url]:
queryDependentPageRank[url][token] = 1/len(tfidf[url])
iteration = 0
while( iteration < 10) :
corr = 0
for url in tfidf:
corr += 1
for token in tfidf[url]:
s = 0
for i in inlink[url]:
# if token in queryDependentPageRank[i]:
# s += queryDependentPageRank[i][url] * pqitoj(token, i, url, tfidf)
# print("Here")
# else :
# s += 0
s += (queryDependentPageRank[i][token] if token in queryDependentPageRank[i] else 0) * pqitoj(token, i, url, tfidf)
# print(s)
prQuery = tfidf[url][token]/ sum(tfidf[i][token] if token in tfidf[i] else 0 for i in tfidf)
queryDependentPageRank[url][token] = (1 - alpha) * prQuery + (alpha * s)
iteration += 1
return queryDependentPageRank
if __name__ == "__main__":
webCrawled = loadPickle("webCrawled_pages.pkl")
# print(webCrawled)
tfidf = loadPickle("tf-idf.pkl")
if os.path.exists('inlink.pkl'):
inlink = loadPickle("inlink.pkl")
else:
inlink = inlinkFunc(tfidf,webCrawled)
with open('inlink.pkl', 'wb') as f:
pickle.dump(inlink, f)
qr = queryDependentPageRank(tfidf, webCrawled, inlink)
# print(qr)
with open('queryDependentPageRank.pkl', 'wb') as f:
pickle.dump(qr, f)
qr = loadPickle('queryDependentPageRank.pkl')
| true |
78140e0002fa352afbddd1210b7de254afc23db6 | Python | prawnrao/NeuralNet | /NN/NeuralNet.py | UTF-8 | 6,631 | 3.4375 | 3 | [
"LicenseRef-scancode-public-domain"
] | permissive | from NN.Matrix import Matrix
import math
from random import choice
def __sig(x):
"""f(x) = 1 / (1 + e^(-x))"""
return 1 / (1 + math.e**(-x))
def __dsig(x):
"""f'(x) = f(x) * (1 - f(x))"""
return x * (1 - x)
def __relu(x):
"""f(x) = 0 for x < 0 \n\t = x for x >= 0"""
return max(0, x)
def __drelu(x):
"""f'(x) = 0 for x <= 0 \n\t = 1 for x >= 0"""
return 0 if x <= 0 else 1
class ActivationFunction(object):
"""Activation Function Object"""
def __init__(self, func, dfunc, name=""):
"""
Constructor for activation function object
Inputs:
-------
func: executable function
dfunc: derivitive of func
"""
self.name = name
self.func = func
self.dfunc = dfunc
def __repr__(self):
return """{} \n\t{} \n\t{}\n""".format(self.name,
self.func.__doc__,
self.dfunc.__doc__)
sigmoid = ActivationFunction(__sig, __dsig, 'Sigmoid')
ReLU = ActivationFunction(__relu, __drelu, 'ReLU')
class NeuralNet(object):
"""Neural Network Object"""
def __init__(self, input_nodes, hidden_nodes, output_nodes,
activation_func=sigmoid, learning_rate=0.1):
"""Constuctor for the neural network"""
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
self.ih_weights = Matrix.random_matrix(self.hidden_nodes,
self.input_nodes)
self.hidden_bias = Matrix(self.hidden_nodes, 1)
self.ho_weights = Matrix.random_matrix(self.output_nodes,
self.hidden_nodes)
self.output_bias = Matrix(self.output_nodes, 1)
self.__activation_function = activation_func
self.__learning_rate = learning_rate
@property
def activation_function(self):
return self.__activation_function
@activation_function.setter
def activation_function(self, activation_func):
self.__activation_function = activation_func
@property
def learning_rate(self):
return self.__learning_rate
@learning_rate.setter
def learning_rate(self, val):
self.__learning_rate = val
def feed_forward(self, inputs):
""" Applying weights, bias and activation function to
input and hiddens layer to get the output.
H = activation(W_ih x I + bias_h)
O = sigmoid(W_ho x H + bias_o)
"""
# Hidden nodes values
hidden = Matrix.matmul(self.ih_weights, inputs)
hidden.add(self.hidden_bias, inplace=True)
hidden.apply(self.activation_function.func, inplace=True)
# Output nodes values
output = Matrix.matmul(self.ho_weights, hidden)
output.add(self.output_bias, inplace=True)
output.apply(sigmoid.func, inplace=True)
return hidden, output
def errors(self, output, labels):
""" Calcultes the errors for the output and hidden layers"""
labels = Matrix.from_array(labels)
output_errors = labels.add(output.negative)
hidden_errors = Matrix.matmul(self.ho_weights.T, output_errors)
return hidden_errors, output_errors
def back_propogate(self, inputs, hidden, output, labels):
""" Propogates and assigns the weights back from the outputs
to the inputs
"""
hidden_errors, output_errors = self.errors(output, labels)
output_gradient = output.apply(sigmoid.dfunc)
output_gradient = self.learning_rate * Matrix.Hadamard(output_errors,
output_gradient)
delta_w_ho = Matrix.matmul(output_gradient, hidden.T)
hidden_gradient = hidden.apply(self.activation_function.dfunc)
hidden_gradient = self.learning_rate * Matrix.Hadamard(hidden_errors,
hidden_gradient)
delta_w_ih = Matrix.matmul(hidden_gradient, inputs.T)
return hidden_gradient, delta_w_ih, output_gradient, delta_w_ho
def train(self, inputs, labels, stocastic=True):
""" Trains the neural net, with the option of stocastic
or batch training
"""
# Converting inputs to matrix object
inputs = Matrix.from_array(inputs)
hidden, output = self.feed_forward(inputs)
if stocastic:
hidden_gradient, delta_w_ih, output_gradient, delta_w_ho = \
self.back_propogate(inputs, hidden, output, labels)
self.output_bias.add(output_gradient, inplace=True)
self.ho_weights.add(delta_w_ho, inplace=True)
self.hidden_bias.add(hidden_gradient, inplace=True)
self.ih_weights.add(delta_w_ih, inplace=True)
else:
return self.back_propogate(inputs, hidden, output, labels)
def batch_train(self, inputs_array, label_function,
batch_size=100):
""" Method used to train the neural net in batches"""
for i in range(batch_size):
inputs = choice(inputs_array)
label = label_function(inputs)
result = self.train(inputs, label, stocastic=False)
if i == 0:
hidden_gradient = result[0]
delta_w_ih = result[1]
output_gradient = result[2]
delta_w_ho = result[3]
else:
hidden_gradient.add(result[0], inplace=True)
delta_w_ih.add(result[1], inplace=True)
output_gradient.add(result[2], inplace=True)
delta_w_ho.add(result[3], inplace=True)
self.output_bias.add(output_gradient, inplace=True)
self.ih_weights.add(delta_w_ih, inplace=True)
self.hidden_bias.add(hidden_gradient, inplace=True)
self.ho_weights.add(delta_w_ho, inplace=True)
def predict(self, inputs):
""" Method to test the neural net for a given input"""
inputs = Matrix.from_array(inputs)
return self.feed_forward(inputs)[1].data[0][0]
def __repr__(self):
string = ""
string += "\tInputs: {}".format(self.input_nodes)
string += "\n\tHidden: {}".format(self.hidden_nodes)
string += "\n\toutput: {}".format(self.output_nodes)
string += "\n\tActivation: {}".format(self.__activation_function.name)
string += "\n\tLearning Rate: {}".format(self.__learning_rate)
return string
| true |
a5490b04eff403732becd302fcf6b79b43470db7 | Python | buq2/autopi-additions | /test_my_network.py | UTF-8 | 4,291 | 2.734375 | 3 | [
"BSD-3-Clause"
] | permissive | import my_network
import pytest
import datetime
import numpy as np
use_real_time = True
fake_time = datetime.datetime.now()
def faked_current_time(usage_reason):
global use_real_time
global fake_time
if use_real_time:
return datetime.datetime.now()
else:
return fake_time
class FakedNet:
def __init__(self):
self.received = 0
self.transmitted = 0
self.total_received = 0
self.total_transmitted = 0
self.name = 'net'
def progress_time(self, timedelta=datetime.timedelta(seconds=60*60)):
global fake_time
fake_time = fake_time + timedelta
def get_fake_network_interface_state(self):
out = {}
out[self.name] = {my_network.RECEIVED: self.received,
my_network.TRANSMITTED: self.transmitted}
return out
def increase_net_usage(self):
rec = np.random.randint(1, 10)
trans = np.random.randint(1, 10)
self.received += rec
self.transmitted += trans
self.total_received += rec
self.total_transmitted += trans
def reset(self):
# Set non totals to smaller than previously
self.received = int(np.min([np.random.randint(0, 10), self.received - 1]))
self.transmitted = int(np.min(
[np.random.randint(0, 10), self.transmitted - 1]))
self.total_received += self.received
self.total_transmitted += self.transmitted
@pytest.fixture
def net():
my_network.MIN_DIFFERENCE_BETWEEN_TIMESERIES_POINTS = datetime.timedelta(
seconds=0)
my_network.get_current_time = faked_current_time
my_network.clear()
net = FakedNet()
my_network.__get_network_interface_state = \
lambda: net.get_fake_network_interface_state()
return net
def check(net, state):
assert state[net.name][my_network.RECEIVED] == net.total_received
assert state[net.name][my_network.TRANSMITTED] == net.total_transmitted
def test_init(net):
assert net.received == 0
assert net.transmitted == 0
def test_without_reset(net):
for i in range(5):
state = my_network.get_network_usage()
check(net, state)
net.increase_net_usage()
def test_with_reset(net):
for i in range(5):
state = my_network.get_network_usage()
check(net, state)
net.increase_net_usage()
state = my_network.get_network_usage()
check(net, state)
net.reset()
state = my_network.get_network_usage()
check(net, state)
def test_with_reset2(net):
for i in range(5):
state = my_network.get_network_usage()
check(net, state)
net.increase_net_usage()
state = my_network.get_network_usage()
check(net, state)
net.reset()
state = my_network.get_network_usage()
check(net, state)
for i in range(5):
state = my_network.get_network_usage()
check(net, state)
net.increase_net_usage()
def test_with_reset3(net):
for i in range(5):
state = my_network.get_network_usage()
check(net, state)
net.increase_net_usage()
state = my_network.get_network_usage()
check(net, state)
net.reset()
state = my_network.get_network_usage()
check(net, state)
for i in range(5):
state = my_network.get_network_usage()
check(net, state)
net.increase_net_usage()
state = my_network.get_network_usage()
check(net, state)
net.reset()
state = my_network.get_network_usage()
check(net, state)
for i in range(5):
state = my_network.get_network_usage()
check(net, state)
net.increase_net_usage()
def test_removing_old_data(net):
global use_real_time
use_real_time = False
for i in range(5):
state = my_network.get_network_usage()
check(net, state)
net.increase_net_usage()
net.progress_time()
state = my_network.get_network_usage()
check(net, state)
net.progress_time(my_network.MAX_TIMESERIESPOINT_LIFE)
state = my_network.get_network_usage()
# Old data has been deleted
assert state[net.name][my_network.RECEIVED] == 0
assert state[net.name][my_network.TRANSMITTED] == 0
use_real_time = True
if __name__ == '__main__':
pytest.main()
| true |
a3fb88ab15c8dff974f7463781ad2743e22aea51 | Python | MSnaker/Djinnerator | /key.py | UTF-8 | 713 | 2.53125 | 3 | [
"Unlicense"
] | permissive | import password
import base64
from cryptography.fernet import Fernet
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
# Generate key, start fernet.
with open('salt.txt','rb') as fsalt:
salt = fsalt.readlines()[0]
pwd = b'banananas'
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
iterations=420069,
)
key = base64.urlsafe_b64encode(kdf.derive(pwd))
f = password.start_Fernet(salt, pwd)
proof = b'Bananarama was pretty good!'
enc_proof = f.encrypt(proof)
with open('key.txt','wb') as fkey:
fkey.write(key)
fkey.write(b'\n')
fkey.write(proof)
fkey.write(b'\n')
fkey.write(enc_proof)
| true |