hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a6987b7f5d13e03773ebbfeb594e4576981e0f1d | 72 | py | Python | Python/Matts_Lessons/comments.py | Josh-Luedke/Vision-Notes | 544e9ef53dbf34e19af5144012b90bfa19012c16 | [
"MIT"
] | null | null | null | Python/Matts_Lessons/comments.py | Josh-Luedke/Vision-Notes | 544e9ef53dbf34e19af5144012b90bfa19012c16 | [
"MIT"
] | null | null | null | Python/Matts_Lessons/comments.py | Josh-Luedke/Vision-Notes | 544e9ef53dbf34e19af5144012b90bfa19012c16 | [
"MIT"
] | null | null | null | # Here is something that I am typing
# print("I am printing something") | 36 | 37 | 0.736111 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 71 | 0.986111 |
a698ed363c72fb8db096b455d881f891dde49eb2 | 6,282 | py | Python | src/src/graph_environments.py | aka-cs/ia-sim-cmp | fa26f3d961a992698ca08f4213d6eae39f3ec039 | [
"MIT"
] | null | null | null | src/src/graph_environments.py | aka-cs/ia-sim-cmp | fa26f3d961a992698ca08f4213d6eae39f3ec039 | [
"MIT"
] | null | null | null | src/src/graph_environments.py | aka-cs/ia-sim-cmp | fa26f3d961a992698ca08f4213d6eae39f3ec039 | [
"MIT"
] | null | null | null | from __future__ import annotations
from .base_classes import Event, SetEvent, DeleteEvent, GenerateEvent, MapObject, Agent, Position, Generator, \
Environment
class GraphEnvironment(Environment):
"""
Implementación particular de entorno, en el que hay noción de localizaciones y adyacencias entre estas.
Está representado sobre un grafo.
"""
graph: {str: {str: float}}
objects: {str: {int: MapObject}}
generators: {str: Generator}
def __init__(self, graph: {str: {str: float}}, objects: {str: {int: MapObject}}, generators: {str: Generator}):
# Guardamos el grafo y los objetos del entorno.
self.graph = graph
self.objects = objects
self.generators = generators
self.counter = 0
# Nos aseguramos que la lista de objetos tenga el formato correcto.
# Por cada localización del grafo.
for place in graph:
# Si en el listado de objetos no existe esta localización, la añadimos sin objetos.
if place not in objects:
self.objects[place] = {}
# Si existe al menos una localización en la lista de objetos que no existe en el entorno,
# lanzamos excepción.
for place in objects:
for object_id in self.objects[place]:
self.counter = max(self.counter, object_id)
if place not in graph:
raise Exception("Invalid objects list.")
def next(self):
self.counter += 1
return self.counter
def get_places(self) -> [str]:
"""
Devuelve las localizaciones del entorno simulado.
"""
# Construimos una lista de localizaciones y la devolvemos.
return [place for place in self.graph]
def get_objects(self):
"""
Devuelve los objetos del entorno.
"""
# Lista para guardar las objetos del entorno.
map_objects = []
# Por cada destino del vehículo.
for place in self.objects:
# Añadimos las cargas asociadas a este destino.
map_objects.extend(self.get_all_objects(place))
# Devolvemos el listado de objetos.
return map_objects
def update_state(self, event: Event) -> [Event]:
"""
Dado un evento, actualiza el entorno simulado.
"""
events = []
# Si es un evento de borrado, borramos el elemento correspondiente en la posición dada.
if isinstance(event, DeleteEvent):
self.remove_object(event.position, event.object_id)
# Si es un evento de adición, añadimos el elemento correspondiente.
elif isinstance(event, SetEvent):
event.object.identifier = self.next()
self.set_object(event.object)
elif isinstance(event, GenerateEvent) and event.generator_name in self.generators:
map_object = self.generators[event.generator_name].generate(self.get_places())
map_object.identifier = self.next()
self.set_object(map_object)
next_genesis = self.generators[event.generator_name].next(event.time)
if next_genesis > event.time:
events.append(GenerateEvent(next_genesis, event.issuer_id, event.generator_name))
# Actualizamos cada objeto del entorno.
for map_object in self.get_objects():
# Si es un agente, actualizamos su estado.
if isinstance(map_object, Agent):
events.extend(map_object.update_state(event, self))
# Lanzamos los eventos obtenidos.
return events
def get_all_objects(self, position: str) -> [MapObject]:
"""
Devuelve el listado de objetos localizados en la posición dada del entorno simulado.
"""
# Construimos una lista con los objetos en la posición dadda y la devolvemos.
return [element for element in self.objects.get(position, {}).values()]
def get_object(self, position: str, identifier: int) -> MapObject:
"""
Devuelve el elemento del entorno simulado con el id especificado.
"""
# Si en la posición dada existe un objeto con el id especificado, lo devolvemos.
# En caso contrario devolvemos None.
if position in self.objects and identifier in self.objects[position]:
return self.objects[position][identifier]
def set_object(self, element: MapObject) -> None:
"""
Coloca al elemento dado en la posición especificada del entorno simulado.
"""
# Si la posición especificada existe.
if element.position in self.graph:
# Guardamos el objeto dado en la posición especificada.
self.objects[element.position][element.identifier] = element
def remove_object(self, position: str, identifier: int) -> None:
"""
Remueve al elemento dado en la posición especificada del entorno simulado.
"""
# Si en la posición dada existe un objeto con el id especificado, lo eliminamos.
if position in self.objects and identifier in self.objects[position]:
del self.objects[position][identifier]
class MapEnvironment(GraphEnvironment):
positions: {str: Position}
def __init__(self, graph: {str: {str: float}}, objects: {str: {int: MapObject}}, positions: {str: Position},
generators: {str: Generator}):
# Guardamos el grafo y los objetos del entorno.
super().__init__(graph, objects, generators)
# Guardamos las posiciones.
self.positions = positions
# Si existe al menos una localización en la lista de posiciones que no existe en el entorno,
# lanzamos excepción.
for place in positions:
if place not in graph:
raise Exception("Invalid positions list.")
# Si existe al menos una localización en la lista de objetos que no existe en el entorno,
# lanzamos excepción.
for place in graph:
if place not in positions:
raise Exception("Invalid positions list.")
def get_position(self, name: str) -> Position:
"""
Devuelve la posición asociada a la localización que recibe como argumento.
"""
return self.positions.get(name, None)
| 40.269231 | 115 | 0.639128 | 6,140 | 0.973213 | 0 | 0 | 0 | 0 | 0 | 0 | 2,437 | 0.386274 |
a69907bb61f411a2c1211e81e8529c57659a934d | 3,003 | py | Python | jsk_arc2017_common/node_scripts/candidates_publisher.py | pazeshun/jsk_apc | 0ff42000ad5992f8a31e719a5360a39cf4fa1fde | [
"BSD-3-Clause"
] | null | null | null | jsk_arc2017_common/node_scripts/candidates_publisher.py | pazeshun/jsk_apc | 0ff42000ad5992f8a31e719a5360a39cf4fa1fde | [
"BSD-3-Clause"
] | 2 | 2019-04-11T05:36:23.000Z | 2019-08-19T12:58:10.000Z | jsk_arc2017_common/node_scripts/candidates_publisher.py | pazeshun/jsk_apc | 0ff42000ad5992f8a31e719a5360a39cf4fa1fde | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import dynamic_reconfigure.server
from jsk_topic_tools import ConnectionBasedTransport
import json
import os.path as osp
import rospy
from std_msgs.msg import String
from jsk_arc2017_common.cfg import CandidatesPublisherConfig
from jsk_recognition_msgs.msg import Label
from jsk_recognition_msgs.msg import LabelArray
class CandidatesPublisher(ConnectionBasedTransport):
def __init__(self):
super(CandidatesPublisher, self).__init__()
self.pub = self.advertise(
'~output/candidates', LabelArray, queue_size=1)
self.srv = dynamic_reconfigure.server.Server(
CandidatesPublisherConfig, self._config_cb)
self.label_names = rospy.get_param('~label_names')
self.json_dir = rospy.get_param('~json_dir', None)
hz = rospy.get_param('~hz', 10.0)
self.timer = rospy.Timer(rospy.Duration(1.0 / hz), self._timer_cb)
def subscribe(self):
self.sub = rospy.Subscriber('~input/json_dir', String, self._cb)
def unsubscribe(self):
self.sub.unregister()
def _config_cb(self, config, level):
self.target_location = config.target_location
return config
def _cb(self, msg):
self.json_dir = msg.data
def _timer_cb(self, event):
if self.json_dir is None:
rospy.logwarn_throttle(10, 'Input json_dir is not set.')
return
if not osp.isdir(self.json_dir):
rospy.logfatal_throttle(
10, 'Input json_dir is not directory: %s' % self.json_dir)
return
filename = osp.join(self.json_dir, 'item_location_file.json')
if osp.exists(filename):
with open(filename) as location_f:
data = json.load(location_f)
bin_contents = {}
for bin_ in data['bins']:
bin_contents[bin_['bin_id']] = bin_['contents']
tote_contents = data['tote']['contents']
if self.target_location[:3] == 'bin':
contents = bin_contents[self.target_location[4]]
elif self.target_location == 'tote':
contents = tote_contents
else:
return
candidates_fixed = [l for l in self.label_names
if l.startswith('__')]
candidates = candidates_fixed + contents
label_list = [self.label_names.index(x) for x in candidates]
label_list = sorted(label_list)
labels = []
for label in label_list:
label_msg = Label()
label_msg.id = label
label_msg.name = self.label_names[label]
labels.append(label_msg)
msg = LabelArray()
msg.labels = labels
msg.header.stamp = rospy.Time.now()
self.pub.publish(msg)
if __name__ == '__main__':
rospy.init_node('candidates_publisher')
candidates_publisher = CandidatesPublisher()
rospy.spin()
| 34.918605 | 74 | 0.615718 | 2,520 | 0.839161 | 0 | 0 | 0 | 0 | 0 | 0 | 265 | 0.088245 |
a699cfaf6878aec2d41bc0a3750f8c53d818736b | 638 | py | Python | award/migrations/0002_auto_20190701_0754.py | maurinesinami/awards | 0f8e390a41a0c462cdb2104797daa4b59c986656 | [
"MIT"
] | null | null | null | award/migrations/0002_auto_20190701_0754.py | maurinesinami/awards | 0f8e390a41a0c462cdb2104797daa4b59c986656 | [
"MIT"
] | null | null | null | award/migrations/0002_auto_20190701_0754.py | maurinesinami/awards | 0f8e390a41a0c462cdb2104797daa4b59c986656 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-07-01 04:54
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('award', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='image',
old_name='image_caption',
new_name='image_description',
),
migrations.AddField(
model_name='image',
name='live_link',
field=models.CharField(default=2, max_length=30),
preserve_default=False,
),
]
| 23.62963 | 61 | 0.587774 | 482 | 0.755486 | 0 | 0 | 0 | 0 | 0 | 0 | 149 | 0.233542 |
a69c3846d34f45eb89021386411cd01ce1729087 | 1,480 | py | Python | tic_tac_toe_main.py | ilyakz/alpha-game2 | fe492a59cd99b99b74979f81d407156dc7d0a08d | [
"MIT"
] | null | null | null | tic_tac_toe_main.py | ilyakz/alpha-game2 | fe492a59cd99b99b74979f81d407156dc7d0a08d | [
"MIT"
] | null | null | null | tic_tac_toe_main.py | ilyakz/alpha-game2 | fe492a59cd99b99b74979f81d407156dc7d0a08d | [
"MIT"
] | null | null | null | from framework.Coach import Coach
from tic_tac_toe.TicTacToeGame import TicTacToeGame
#from tic_tac_toe.tensorflow.NNet import NNetWrapper as nn
from tic_tac_toe.keras.NNet import NNetWrapper as nn
#from cube_tic_tac_toe.CubeTicTacToeGame import CubeTicTacToeGame
#from cube_tic_tac_toe.tensorflow.NNet import NNetWrapper as nn
#from cube_tic_tac_toe.keras.NNet import NNetWrapper as nn
#from othello.OthelloGame import OthelloGame
#from othello.tensorflow.NNet import NNetWrapper as nn
from framework.utils import *
args = dotdict({
'numIters': 1000,
'numEps': 10, #было 100
'tempThreshold': 15,
'updateThreshold': 0.51,
'maxlenOfQueue': 200000,
'numMCTSSims': 10, #50
'arenaCompare': 50,
'cpuct': 1,
'checkpoint': './temp/keras_ttt/',
'load_folder_file': ('./temp/keras_ttt/', 'best.pth.tar'),
#'checkpoint': './temp/tensorflow_cube/',
#'load_folder_file': ('./temp/tensorflow_cube/', 'best.pth.tar'),
'load_model': False,
'numItersForTrainExamplesHistory': 20,
#'checkpoint': './temp/tensorflow_othello/',
#'load_folder_file': ('./temp/tensorflow_othello/', 'best.pth.tar'),
})
if __name__ == "__main__":
g = TicTacToeGame(3)
nnet = nn(g)
if args.load_model:
nnet.load_checkpoint(args.load_folder_file[0], args.load_folder_file[1])
c = Coach(g, nnet, args)
if args.load_model:
print("Load trainExamples from file")
c.loadTrainExamples()
c.learn()
| 31.489362 | 80 | 0.7 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 842 | 0.567385 |
a69c6da5a24875fb6294bf1da51edf46e3db4865 | 475 | py | Python | hw/hw09/tests/q1_3.py | ds-modules/Colab-demo | cccaff13633f8a5ec697cd4aeca9087f2feec2e4 | [
"BSD-3-Clause"
] | null | null | null | hw/hw09/tests/q1_3.py | ds-modules/Colab-demo | cccaff13633f8a5ec697cd4aeca9087f2feec2e4 | [
"BSD-3-Clause"
] | null | null | null | hw/hw09/tests/q1_3.py | ds-modules/Colab-demo | cccaff13633f8a5ec697cd4aeca9087f2feec2e4 | [
"BSD-3-Clause"
] | null | null | null | test = { 'name': 'q1_3',
'points': 1,
'suites': [ { 'cases': [ {'code': '>>> type(max_estimate) in set([int, np.int32, np.int64])\nTrue', 'hidden': False, 'locked': False},
{'code': '>>> max_estimate in observations.column(0)\nTrue', 'hidden': False, 'locked': False}],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'}]}
| 52.777778 | 144 | 0.414737 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 237 | 0.498947 |
a69c988cc5ea3fab5e018e4dee803ea9a79a1f3a | 938 | py | Python | backend/gsr_booking/management/commands/load_gsrs.py | pennlabs/penn-mobile | fb3b514a55afbf6f29dd8bd589b4e76bf52e3e90 | [
"MIT"
] | 2 | 2021-11-23T18:06:40.000Z | 2022-01-05T19:13:33.000Z | backend/gsr_booking/management/commands/load_gsrs.py | pennlabs/penn-mobile | fb3b514a55afbf6f29dd8bd589b4e76bf52e3e90 | [
"MIT"
] | 30 | 2021-10-17T23:29:44.000Z | 2022-03-31T02:03:13.000Z | backend/gsr_booking/management/commands/load_gsrs.py | pennlabs/penn-mobile | fb3b514a55afbf6f29dd8bd589b4e76bf52e3e90 | [
"MIT"
] | null | null | null | import csv
from django.core.management.base import BaseCommand
from gsr_booking.models import GSR
class Command(BaseCommand):
def handle(self, *args, **kwargs):
with open("gsr_booking/data/gsr_data.csv") as data:
reader = csv.reader(data)
for i, row in enumerate(reader):
if i == 0:
continue
# collects room information from csv
lid, gid, name, service = row
# gets image from s3 given the lid and gid
# TODO: fix image url!
image_url = (
f"https://s3.us-east-2.amazonaws.com/labs.api/gsr/lid-{lid}-gid-{gid}.jpg"
)
kind = GSR.KIND_WHARTON if service == "wharton" else GSR.KIND_LIBCAL
GSR.objects.create(lid=lid, gid=gid, name=name, kind=kind, image_url=image_url)
self.stdout.write("Uploaded GSRs!")
| 31.266667 | 95 | 0.559701 | 835 | 0.890192 | 0 | 0 | 0 | 0 | 0 | 0 | 230 | 0.245203 |
a69cf74b6c616c2a2b00c96da7d545e93b1d79f2 | 3,143 | py | Python | src/types_constraintes.py | julien-antigny/JConfig | ba344a3ecd5b104fb361733cb789aa6b03c41c28 | [
"Apache-2.0"
] | null | null | null | src/types_constraintes.py | julien-antigny/JConfig | ba344a3ecd5b104fb361733cb789aa6b03c41c28 | [
"Apache-2.0"
] | null | null | null | src/types_constraintes.py | julien-antigny/JConfig | ba344a3ecd5b104fb361733cb789aa6b03c41c28 | [
"Apache-2.0"
] | null | null | null | TYPES = {"int","float","str","bool","list"}
MATCH_TYPES = {"int": int, "float": float, "str": str, "bool": bool, "list": list}
CONSTRAINTS = {
"is_same_type": lambda x, y: type(x) == type(y),
"int" :{
"min_inc": lambda integer, min: integer >= min,
"min_exc": lambda integer, min: integer > min,
"max_inc": lambda integer, max: integer >= max,
"max_exc": lambda integer, max: integer > max,
"value_in": lambda integer, value_in: integer in value_in,
"value_out": lambda integer, value_out: not integer in value_out
},
"float": {
"min_inc": lambda integer, min: integer >= min,
"min_exc": lambda integer, min: integer > min,
"max_inc": lambda integer, max: integer >= max,
"max_exc": lambda integer, max: integer > max,
"value_in": lambda float_, value_in: float_ in value_in,
"value_out": lambda float_, value_out: not float_ in value_out
},
"str": {
"value_in": lambda string, value_in: string in value_in,
"value_out": lambda string, value_out: not string in value_out
},
"list": {
"equal_len": lambda list_, len_: len(list_) == len_,
"min_len" : lambda list_, min_len: len(list_) >= min_len,
"min_inc": lambda list_, min: all(x >= min for x in list_),
"min_exc": lambda list_, min: all(x > min for x in list_),
"max_inc": lambda list_, max: all(x <= max for x in list_),
"max_exc": lambda list_, max: all(x > max for x in list_)
}
}
CONSTRAINT_CHECKS = {
"int" :{
"min_inc": lambda min: type(min) in [int, float],
"min_exc": lambda min: type(min) in [int, float],
"max_inc": lambda max: type(max) in [int, float],
"max_exc": lambda max: type(max) in [int, float],
"value_in": lambda value_in: all([type(value) == int for value in value_in]),
"value_out": lambda value_out: all([type(value) == int for value in value_out])
},
"float": {
"min_inc": lambda min: type(min) in [int, float],
"min_exc": lambda min: type(min) in [int, float],
"max_inc": lambda max: type(max) in [int, float],
"max_exc": lambda max: type(max) in [int, float],
"value_in": lambda value_in: all([type(value) in [int,float] for value in value_in]),
"value_out": lambda value_out: all([type(value) in [int,float] for value in value_out])
},
"str": {
"value_in": lambda value_in: all([type(value) == str for value in value_in]),
"value_out": lambda value_out: all([type(value) == str for value in value_out])
},
"list": {
"equal_len": lambda len_: type(len_) == int and len_ > 0,
"min_len" : lambda min: type(min) == int and min >= 0,
"min_inc": lambda min: type(min) in [int,float],
"min_exc": lambda min: type(min) in [int,float],
"max_inc": lambda max: type(max) in [int,float],
"max_exc": lambda max: type(max) in [int,float],
}
}
| 40.818182 | 96 | 0.560929 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 500 | 0.159084 |
a69fa6818360c418e25ad57fb09af90cea7a16ff | 3,655 | py | Python | office365/sharepoint/changes/change_query.py | wreiner/Office365-REST-Python-Client | 476bbce4f5928a140b4f5d33475d0ac9b0783530 | [
"MIT"
] | null | null | null | office365/sharepoint/changes/change_query.py | wreiner/Office365-REST-Python-Client | 476bbce4f5928a140b4f5d33475d0ac9b0783530 | [
"MIT"
] | null | null | null | office365/sharepoint/changes/change_query.py | wreiner/Office365-REST-Python-Client | 476bbce4f5928a140b4f5d33475d0ac9b0783530 | [
"MIT"
] | null | null | null | from office365.runtime.client_value import ClientValue
class ChangeQuery(ClientValue):
"""Defines a query that is performed against the change log."""
def __init__(self,
alert=False, site=False, web=False, list_=False, item=False, activity=False, file=False, folder=False,
user=False, group=False,
view=False, content_type=False,
add=True, update=True, system_update=True, delete_object=True,
role_assignment_add=True, role_assignment_delete=True,
change_token_start=None, change_token_end=None, fetch_limit=None):
"""
:param int fetch_limit:
:param role_assignment_delete: Specifies whether deleting role assignments is included in the query.
:param role_assignment_add: Specifies whether adding role assignments is included in the query.
:param bool item: Gets or sets a value that specifies whether general changes to list items are included
in the query.
:param bool delete_object: Gets or sets a value that specifies whether delete changes are included in the query.
:param bool content_type: Gets or sets a value that specifies whether changes to content types are included
in the query.
:param bool alert: Gets or sets a value that specifies whether changes to alerts are included in the query.
:param bool add: Gets or sets a value that specifies whether add changes are included in the query.
:param bool view: Gets or sets a value that specifies whether changes to views are included in the query.
:param bool system_update: Gets or sets a value that specifies whether updates made using the item SystemUpdate
method are included in the query.
:param bool update: Gets or sets a value that specifies whether update changes are included in the query.
:param bool user: Gets or sets a value that specifies whether changes to users are included in the query.
:param bool folder: Gets or sets value that specifies whether changes to folders are included in the query.
:param bool file: Gets or sets a value that specifies whether changes to files are included in the query.
:param change_token_start: office365.sharepoint.changes.changeToken.ChangeToken
:param change_token_end: office365.sharepoint.changes.changeToken.ChangeToken
:param bool activity:
:param bool site: Gets or sets a value that specifies whether changes to site collections
are included in the query.
:param bool web: Gets or sets a value that specifies whether changes to Web sites are included in the query.
:param bool list_: Gets or sets a value that specifies whether changes to lists are included in the query.
"""
super().__init__()
self.Item = item
self.Alert = alert
self.ContentType = content_type
self.Web = web
self.Site = site
self.List = list_
self.Activity = activity
self.File = file
self.Folder = folder
self.User = user
self.Group = group
self.View = view
self.Add = add
self.Update = update
self.SystemUpdate = system_update
self.ChangeTokenStart = change_token_start
self.ChangeTokenEnd = change_token_end
self.DeleteObject = delete_object
self.RoleAssignmentAdd = role_assignment_add
self.RoleAssignmentDelete = role_assignment_delete
self.FetchLimit = fetch_limit
@property
def entity_type_name(self):
return 'SP.ChangeQuery'
| 55.378788 | 120 | 0.691108 | 3,597 | 0.984131 | 0 | 0 | 73 | 0.019973 | 0 | 0 | 2,277 | 0.622982 |
a6a06d990fb107f6d173990c09e667ce4615e46e | 283 | py | Python | backend0bit/serializers.py | piotrb5e3/0bit-backend | 1df105ab57d0ddde5868ae4b03b359e0c3f00b13 | [
"Apache-2.0"
] | null | null | null | backend0bit/serializers.py | piotrb5e3/0bit-backend | 1df105ab57d0ddde5868ae4b03b359e0c3f00b13 | [
"Apache-2.0"
] | 1 | 2020-06-05T19:21:04.000Z | 2020-06-05T19:21:04.000Z | backend0bit/serializers.py | piotrb5e3/0bit-backend | 1df105ab57d0ddde5868ae4b03b359e0c3f00b13 | [
"Apache-2.0"
] | null | null | null | from rest_framework import serializers
from backend0bit.models import StaticPage
class StaticPageSerializer(serializers.ModelSerializer):
class Meta:
model = StaticPage
fields = ('id', 'title', 'url', 'contents', 'order')
read_only_fields = ('order',)
| 25.727273 | 60 | 0.696113 | 198 | 0.699647 | 0 | 0 | 0 | 0 | 0 | 0 | 40 | 0.141343 |
a6a07499f9e41376f3c3ce20f6b665363c5200f6 | 1,529 | py | Python | visual_diarization_svm/clear_opt_flow_features_train.py | cvossos2046/visual_speaker_diarization_svm_project | bdc57d893ad9c04145568310c068f9c3e2305cf8 | [
"MIT"
] | null | null | null | visual_diarization_svm/clear_opt_flow_features_train.py | cvossos2046/visual_speaker_diarization_svm_project | bdc57d893ad9c04145568310c068f9c3e2305cf8 | [
"MIT"
] | null | null | null | visual_diarization_svm/clear_opt_flow_features_train.py | cvossos2046/visual_speaker_diarization_svm_project | bdc57d893ad9c04145568310c068f9c3e2305cf8 | [
"MIT"
] | null | null | null | def main():
video = "NET20070330_thlep_1_2"
file_path = "optical_flow_features_train/" + video
optical_flow_features_train = open(file_path + "/optical_flow_features_train.txt", "r")
clear_of_features_train = open(file_path + "/clear_opt_flow_features_train.txt", "w")
visual_ids = []
for line in optical_flow_features_train:
words = line.rstrip().split(' ')
if len(words) == 2:
if not int(words[1]) in visual_ids:
visual_ids.append(int(words[1]))
print("visual_ids", visual_ids)
optical_flow_features_train.close()
optical_flow_features_train = open(file_path + "/optical_flow_features_train.txt", "r")
for line in optical_flow_features_train:
words = line.rstrip().split(' ')
if len(words) == 2:
frame = int(words[0])
speaker_id = int(words[1])
else:
# every id that each speaker gets during the video
if speaker_id == 0 or speaker_id == 6 or speaker_id == 13 or speaker_id == 15 or speaker_id == 5 or speaker_id == 16 or speaker_id == 4 or speaker_id == 17 or speaker_id == 3 or speaker_id == 18:
clear_of_features_train.write(str(frame) + ' ' + str(speaker_id) + '\n')
for features in words:
clear_of_features_train.write(str(features) + ' ')
clear_of_features_train.write('\n')
optical_flow_features_train.close()
clear_of_features_train.close()
if __name__ == "__main__":
main()
| 39.205128 | 207 | 0.63244 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 258 | 0.168738 |
a6a08446e9c3af18bd3e2b4bc0b3c57b0b814990 | 2,906 | py | Python | src/executor/run_mol.py | ferchault/clockwork | bfc852b34095401adeb4e2b4fcaa4320f60960c7 | [
"MIT"
] | null | null | null | src/executor/run_mol.py | ferchault/clockwork | bfc852b34095401adeb4e2b4fcaa4320f60960c7 | [
"MIT"
] | 21 | 2019-01-04T10:55:39.000Z | 2019-09-23T11:02:59.000Z | src/executor/run_mol.py | ferchault/clockwork | bfc852b34095401adeb4e2b4fcaa4320f60960c7 | [
"MIT"
] | 1 | 2019-02-23T15:15:00.000Z | 2019-02-23T15:15:00.000Z | #!/usr/bin/env python
from redis import Redis
import uuid
import sys
import os
import subprocess
import shutil
import numpy as np
import itertools as it
import json
from rdkit import Chem
from rdkit.Chem import AllChem, ChemicalForceFields
redis = Redis.from_url("redis://" + os.environ.get("EXECUTOR_CONSTR", "127.0.0.1:6379/0"))
ENERGY_THRESHOLD = 1e-4
ANGLE_DELTA = 1e-7
FF_RELAX_STEPS = 50
def clockwork(resolution):
if resolution == 0:
start = 0
step = 360
n_steps = 1
else:
start = 360.0 / 2.0 ** (resolution)
step = 360.0 / 2.0 ** (resolution-1)
n_steps = 2 ** (resolution - 1)
return start, step, n_steps
def get_classical_constrained_geometry(sdfstr, torsions, molname, dihedrals, angles):
mol = Chem.MolFromMolBlock(sdfstr, removeHs=False)
ffprop = ChemicalForceFields.MMFFGetMoleculeProperties(mol)
ffc = ChemicalForceFields.MMFFGetMoleculeForceField(mol, ffprop)
conformer = mol.GetConformer()
# Set angles and constrains for all torsions
for dih_id, angle in zip(dihedrals, angles):
# Set clockwork angle
try: Chem.rdMolTransforms.SetDihedralDeg(conformer, *torsions[dih_id], float(angle))
except: pass
# Set forcefield constrain
ffc.MMFFAddTorsionConstraint(*torsions[dih_id], False, angle-ANGLE_DELTA, angle+ANGLE_DELTA, 1.0e10)
# reduce bad contacts
try:
ffc.Minimize(maxIts=FF_RELAX_STEPS, energyTol=1e-2, forceTol=1e-3)
except RuntimeError:
pass
atoms = [atom.GetSymbol() for atom in mol.GetAtoms()]
coordinates = conformer.GetPositions()
return f'{len(atoms)}\n\n' + '\n'.join([f'{element} {coords[0]} {coords[1]} {coords[2]}' for element, coords in zip(atoms, coordinates)])
def do_workpackage(molname, dihedrals, resolution):
ndih = len(dihedrals)
start, step, n_steps = clockwork(resolution)
scanangles = np.arange(start, start+step*n_steps, step)
# fetch input
sdfstr = redis.get(f'clockwork:{molname}:sdf').decode("ascii")
torsions = json.loads(redis.get(f'clockwork:{molname}:dihedrals').decode("ascii"))
accepted_geometries = []
accepted_energies = []
for angles in it.product(scanangles, repeat=ndih):
xyzfile = get_classical_constrained_geometry(sdfstr, torsions, molname, dihedrals, angles)
print (xyzfile)
#optxyzfile, energy, bonds = get_xtb_geoopt(xyzfile)
#if set(bonds) != set(refbonds):
# continue
#for i in range(len(accepted_energies)):
# if abs(accepted_energies[i] - energy) < ENERGY_THRESHOLD:
# # compare geometries optxyzfile vs accepted_geometries
#else:
# accepted_energies.append(energy)
# accepted_geometries.append(optxyzfile)
results = {}
results['mol'] = molname
results['ndih'] = ndih
results['res'] = resolution
results['geometries'] = accepted_geometries
results['energies'] = accepted_energies
return json.dumps(results)
do_workpackage("debug", (1, 2, 3), 2)
| 30.914894 | 139 | 0.714728 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 724 | 0.24914 |
a6a1239b6aa0686d999ec021a092f3122039794c | 3,855 | py | Python | eventsourcing/tests/processrecorder_testcase.py | h11r/eventsourcing | e53ef697bfef8b78a468dc52d342b0e39b7cb889 | [
"BSD-3-Clause"
] | null | null | null | eventsourcing/tests/processrecorder_testcase.py | h11r/eventsourcing | e53ef697bfef8b78a468dc52d342b0e39b7cb889 | [
"BSD-3-Clause"
] | null | null | null | eventsourcing/tests/processrecorder_testcase.py | h11r/eventsourcing | e53ef697bfef8b78a468dc52d342b0e39b7cb889 | [
"BSD-3-Clause"
] | null | null | null | from abc import ABC, abstractmethod
from timeit import timeit
from unittest.case import TestCase
from uuid import uuid4
from eventsourcing.persistence import (
RecordConflictError,
StoredEvent,
Tracking,
)
class ProcessRecordsTestCase(TestCase, ABC):
@abstractmethod
def create_recorder(self):
pass
def test_insert_select(self):
# Construct the recorder.
recorder = self.create_recorder()
# Get current position.
self.assertEqual(
recorder.max_tracking_id("upstream_app"),
0,
)
# Write two stored events.
originator_id1 = uuid4()
originator_id2 = uuid4()
stored_event1 = StoredEvent(
originator_id=originator_id1,
originator_version=1,
topic="topic1",
state=b"state1",
)
stored_event2 = StoredEvent(
originator_id=originator_id1,
originator_version=2,
topic="topic2",
state=b"state2",
)
stored_event3 = StoredEvent(
originator_id=originator_id2,
originator_version=1,
topic="topic3",
state=b"state3",
)
stored_event4 = StoredEvent(
originator_id=originator_id2,
originator_version=2,
topic="topic4",
state=b"state4",
)
tracking1 = Tracking(
application_name="upstream_app",
notification_id=1,
)
tracking2 = Tracking(
application_name="upstream_app",
notification_id=2,
)
# Insert two events with tracking info.
recorder.insert_events(
stored_events=[
stored_event1,
stored_event2,
],
tracking=tracking1,
)
# Get current position.
self.assertEqual(
recorder.max_tracking_id("upstream_app"),
1,
)
# Check can't insert third event with same tracking info.
with self.assertRaises(RecordConflictError):
recorder.insert_events(
stored_events=[stored_event3],
tracking=tracking1,
)
# Get current position.
self.assertEqual(
recorder.max_tracking_id("upstream_app"),
1,
)
# Insert third event with different tracking info.
recorder.insert_events(
stored_events=[stored_event3],
tracking=tracking2,
)
# Get current position.
self.assertEqual(
recorder.max_tracking_id("upstream_app"),
2,
)
# Insert fourth event without tracking info.
recorder.insert_events(
stored_events=[stored_event4],
)
# Get current position.
self.assertEqual(
recorder.max_tracking_id("upstream_app"),
2,
)
def test_performance(self):
# Construct the recorder.
recorder = self.create_recorder()
number = 100
notification_ids = iter(range(1, number + 1))
def insert():
originator_id = uuid4()
stored_event = StoredEvent(
originator_id=originator_id,
originator_version=0,
topic="topic1",
state=b"state1",
)
tracking1 = Tracking(
application_name="upstream_app",
notification_id=next(notification_ids),
)
recorder.insert_events(
stored_events=[
stored_event,
],
tracking=tracking1,
)
duration = timeit(insert, number=number)
print(self, f"{duration / number:.9f}")
| 26.047297 | 65 | 0.54345 | 3,633 | 0.942412 | 0 | 0 | 59 | 0.015305 | 0 | 0 | 604 | 0.15668 |
a6a173dfa16946d1b343a80b4e42d5cc67ea6e07 | 397 | py | Python | lolexport/log.py | dleiferives/lolexport | 894c97240893da829e96f46e2c4cdebf85846412 | [
"MIT"
] | 2 | 2021-02-23T09:21:07.000Z | 2022-03-25T15:02:50.000Z | lolexport/log.py | dleiferives/lolexport | 894c97240893da829e96f46e2c4cdebf85846412 | [
"MIT"
] | 5 | 2021-02-24T01:26:36.000Z | 2022-02-27T13:05:27.000Z | lolexport/log.py | dleiferives/lolexport | 894c97240893da829e96f46e2c4cdebf85846412 | [
"MIT"
] | 1 | 2022-02-27T02:17:17.000Z | 2022-02-27T02:17:17.000Z | from os import environ
import logging
from logzero import setup_logger # type: ignore[import]
# https://docs.python.org/3/library/logging.html#logging-levels
loglevel: int = logging.DEBUG # (10)
if "LOLEXPORT" in environ:
loglevel = int(environ["LOLEXPORT"])
# logzero handles this fine, can be imported/configured
# multiple times
logger = setup_logger(name="lolexport", level=loglevel)
| 28.357143 | 63 | 0.758186 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 195 | 0.491184 |
a6a1b6d7205087acf58bd99258d54c81c8cb5272 | 6,538 | py | Python | test_routes.py | Varini/CHALLENGE_SFNewsAPI | 439bf7dfc10f83466786f6bd054afc8ab07b1b58 | [
"MIT"
] | null | null | null | test_routes.py | Varini/CHALLENGE_SFNewsAPI | 439bf7dfc10f83466786f6bd054afc8ab07b1b58 | [
"MIT"
] | null | null | null | test_routes.py | Varini/CHALLENGE_SFNewsAPI | 439bf7dfc10f83466786f6bd054afc8ab07b1b58 | [
"MIT"
] | null | null | null | from operator import index
from fastapi.testclient import TestClient
from index import app
client = TestClient(app)
def test_get_item():
response = client.get("/articles/10000")
assert response.status_code == 200
assert response.json() == {"id": 10000,
"title": "NASA TV to Air Launch of Space Station Module, Departure of Another",
"url": "http://www.nasa.gov/press-release/nasa-tv-to-air-launch-of-space-station-module-departure-of-another",
"imageUrl": "https://www.nasa.gov/sites/default/files/thumbnails/image/mlm_at_baikonur.jpg?itok=SrfC6Yzm",
"newsSite": "NASA",
"summary": "NASA will provide live coverage of a new Russian science module’s launch and automated docking to the International Space Station, and the undocking of another module that has been part of the orbital outpost for the past 20 years.",
"publishedAt": "2021-07-13T20:22:00.000Z", "updatedAt": "2021-07-13T20:22:06.617Z",
"featured": False,
"launches": [{"id": "27fd5d5a-6935-4697-98b4-b409f029e2f0", "provider": "Launch Library 2"}],
"events": [{"id": 268, "provider": "Launch Library 2"}]}
def test_get_invalid_id():
id = 9999999999
response = client.get(f"/articles/{id}")
assert response.status_code == 404
assert response.json() == {"detail": f"Article ID: {id} not found"}
def test_get_non_integer_id():
response = client.get("/articles/abc")
assert response.status_code == 422
assert response.json()[
"detail"][0]["msg"] == "value is not a valid integer"
def test_create_item():
response = client.post("/articles/",
json={
"title": "No commercial crew test flights expected this year",
"url": "https://spaceflightnow.com/2018/10/06/no-commercial-crew-test-flights-expected-this-year/",
"imageUrl": "https://mk0spaceflightnoa02a.kinstacdn.com/wp-content/uploads/2018/10/ccp-countdown-header-326x245.jpg",
"newsSite": "Spaceflight Now",
"summary": "",
"publishedAt": "2018-10-05T22:00:00.000Z",
"updatedAt": "2021-05-18T13:43:19.589Z",
"featured": False,
"launches": [],
"events": []
}
)
request = client.get("/articles/?page_size=1")
id = request.json()[0]["id"]
assert response.status_code == 201
assert response.json() == {
"id": id,
"title": "No commercial crew test flights expected this year",
"url": "https://spaceflightnow.com/2018/10/06/no-commercial-crew-test-flights-expected-this-year/",
"imageUrl": "https://mk0spaceflightnoa02a.kinstacdn.com/wp-content/uploads/2018/10/ccp-countdown-header-326x245.jpg",
"newsSite": "Spaceflight Now",
"summary": "",
"publishedAt": "2018-10-05T22:00:00.000Z",
"updatedAt": "2021-05-18T13:43:19.589Z",
"featured": False,
"launches": [],
"events": []
}
def test_update_item():
request = client.get("/articles/?page_size=1")
id = request.json()[0]["id"]
response = client.put(f"/articles/{id}",
json={
"title": "Altered Title",
"url": "www.domain.com",
"imageUrl": "IMAGE.img",
"newsSite": "",
"summary": "",
"publishedAt": "2018-10-05T22:00:00.000Z",
"updatedAt": "2021-05-18T13:43:19.589Z",
"featured": True,
"launches": [
{
"id": "Altered",
"provider": "Altered Launch"
}
],
"events": [
{
"id": 1037,
"provider": "Altered Provider"
}
]
}
)
assert response.status_code == 200
assert response.json() == {
"id": id,
"title": "Altered Title",
"url": "www.domain.com",
"imageUrl": "IMAGE.img",
"newsSite": "",
"summary": "",
"publishedAt": "2018-10-05T22:00:00.000Z",
"updatedAt": "2021-05-18T13:43:19.589Z",
"featured": True,
"launches": [
{
"id": "Altered",
"provider": "Altered Launch"
}
],
"events": [
{
"id": 1037,
"provider": "Altered Provider"
}
]
}
def test_update_invalid_id():
id = 9999999999
response = client.put(f"/articles/{id}",
json={
"title": "Altered Title"
}
)
assert response.status_code == 404
assert response.json() == {"detail": f"Article ID: {id} not found"}
def test_update_non_integer_id():
response = client.put("/articles/abc")
assert response.status_code == 422
assert response.json()[
"detail"][0]["msg"] == "value is not a valid integer"
def test_delete_item():
request = client.get("/articles/?page_size=1")
id = request.json()[0]["id"]
response = client.delete(f"/articles/{id}")
assert response.status_code == 200
assert response.json() == f"Article ID: {id} was deleted."
def test_delete_invalid_id():
id = 9999999999
response = client.delete(f"/articles/{id}")
assert response.status_code == 404
assert response.json() == {"detail": f"Article ID: {id} not found"}
def test_delete_non_integer_id():
response = client.delete("/articles/abc")
assert response.status_code == 422
assert response.json()[
"detail"][0]["msg"] == "value is not a valid integer"
| 40.608696 | 277 | 0.487611 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,647 | 0.40474 |
a6a36c3ad6ad312dcf0a3b5b7faffce0dc407190 | 19,787 | py | Python | applications/javelin/controllers/jadmin.py | jjacobson93/javelin-web2py | d4de493156c6893acca74d4be7f4597c90c418f3 | [
"BSD-3-Clause"
] | null | null | null | applications/javelin/controllers/jadmin.py | jjacobson93/javelin-web2py | d4de493156c6893acca74d4be7f4597c90c418f3 | [
"BSD-3-Clause"
] | null | null | null | applications/javelin/controllers/jadmin.py | jjacobson93/javelin-web2py | d4de493156c6893acca74d4be7f4597c90c418f3 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Javelin Web2Py Admin Controller
"""
# metadata
__author__ = "Jeremy Jacobson"
__copyright__ = "(c) 2013, Jacobson and Varni, LLC"
__date__ = "7/12/2013"
__email__ = "jjacobson93@gmail.com"
__data__ = {'name' : 'jadmin', 'label' : 'Admin', 'description' : 'Only accessible to admins',
'icon' : 'briefcase', 'u-icon' : u'\uf0b1', 'color':'orange', 'required' : True}
import time
from datetime import datetime
from applications.javelin.ctr_data import ctr_enabled, get_ctr_data
from gluon.contrib import simplejson as json
from gluon.tools import Service
from gluon.storage import Storage
service = Service(globals())
DOC_TYPES = Storage(
CALLSLIP=Storage(value=0, label="Call Slips"),
ATTSHEETS=Storage(value=1, label="Attendance Sheets"),
NAMETAGS=Storage(value=2, label="Nametags")
)
@auth.requires_login()
@auth.requires_membership('admin')
def index():
"""Loads the index page for the 'Admin' controller
:returns: a dictionary to pass to the view with the list of ctr_enabled and the active module ('admin')
"""
ctr_data = get_ctr_data()
users = db().select(db.auth_user.ALL)
approvals = db(db.auth_user.registration_key=='pending').select(db.auth_user.ALL)
return dict(ctr_enabled=ctr_enabled, ctr_data=ctr_data, active_module='jadmin', users=users, approvals=approvals, doctypes=DOC_TYPES)
@auth.requires_login()
@auth.requires_membership('admin')
@service.json
def create_doc(doctype, data):
logger.debug("CREATE DOC CALLED")
import StringIO
from reportlab.platypus import SimpleDocTemplate, Paragraph, Table, TableStyle, Image, Spacer
from reportlab.platypus.flowables import PageBreak
from reportlab.lib.styles import ParagraphStyle
from reportlab.lib.enums import TA_CENTER, TA_LEFT
from reportlab.lib.pagesizes import letter, inch
from reportlab.lib import colors
io = StringIO.StringIO()
doc = SimpleDocTemplate(io, pagesize=letter,
rightMargin=0.18*inch, leftMargin=0.18*inch, topMargin=0.18*inch, bottomMargin=0)
elements = list()
doctype = int(doctype)
if data: data = json.loads(data)
if doctype == DOC_TYPES.CALLSLIP.value:
doc_title = "Call_Slips"
people = data['people']
message = data['message']
persons = list()
for p in people:
if p.startswith('group_'):
group = db(db.group_rec.group_id==p.replace('group_', '')).select(db.person.id,
join=db.group_rec.on(db.person.id==db.group_rec.person_id))
for g in group:
if g.id not in persons:
persons.append(g.id)
elif p.startswith('grade_'):
grade = db(db.person.grade==p.replace('grade_', '')).select(db.person.id)
for g in grade:
if g.id not in persons:
persons.append(g.id)
elif p == 'all_leaders':
leaders = db(db.person.leader==True).select(db.person.id)
for l in leaders:
if l.id not in persons:
persons.append(l.id)
elif p == 'all_people':
allpeople = db().select(db.person.id)
for a in allpeople:
if a.id not in persons:
persons.append(a.id)
else:
if p not in persons:
persons.append(p)
people = [Storage(id=pid, last_name=db(db.person.id==pid).select(db.person.last_name).first().last_name,
first_name=db(db.person.id==pid).select(db.person.first_name).first().first_name,
courses=['{}: {}'.format(c.period, c.room) for c in db().select(db.course.period, db.course.room,
join=db.course_rec.on((db.course.id==db.course_rec.course_id) & (db.course_rec.student_id==pid)),
orderby=db.course.period)]
) for pid in persons]
i = 0
centerStyle = ParagraphStyle(name='Center', alignment=TA_CENTER)
leftStyle = ParagraphStyle(name='Left', alignment=TA_LEFT)
tableStyle = TableStyle([('VALIGN',(0,0),(-1,-1),'TOP'),
('INNERGRID', (0,0), (-1,-1), 0.25, colors.black)])
page = list()
for person in people:
page.append([Paragraph("<para alignment='left'><br></para>" +\
"<para alignment='center'><font face='Times-Bold' size=16>Vintage Crusher Crew</font><br><br><br></para>" +\
"<para alignment='left'><font face='Times' size=14><b>Name:</b> {} {}</font><br><br></para>".format(person.first_name, person.last_name) +\
"<para alignment='left'><font face='Times' size=12><b>Rooms:</b> {}</font><br><br></para>".format(', '.join(person.courses)) +\
"<para alignment='left'><font face='Times' size=12><b>Message:</b></font><br></para>" +\
"<para alignment='left'><font face='Times' size=12>{}</font></para>".format(message), leftStyle)])
i = (i+1)%4
if i == 0:
table = Table(page, colWidths=[8*inch], rowHeights=[2.5*inch]*len(page))
table.setStyle(tableStyle)
elements.append(table)
elements.append(PageBreak())
page = list()
elif doctype == DOC_TYPES.ATTSHEETS.value:
pass
elif doctype == DOC_TYPES.NAMETAGS.value:
people = data['people']
event_name = data['event_name']
events = data['events']
present = data['present']
persons = list()
for p in people:
if p.startswith('group_'):
group = db(db.group_rec.group_id==p.replace('group_', '')).select(db.person.id,
join=db.group_rec.on(db.person.id==db.group_rec.person_id))
for g in group:
if g.id not in persons:
persons.append(g.id)
elif p.startswith('grade_'):
grade = db(db.person.grade==p.replace('grade_', '')).select(db.person.id)
for g in grade:
if g.id not in persons:
persons.append(g.id)
elif p == 'all_leaders':
leaders = db(db.person.leader==True).select(db.person.id)
for l in leaders:
if l.id not in persons:
persons.append(l.id)
elif p == 'all_people':
allpeople = db().select(db.person.id)
for a in allpeople:
if a.id not in persons:
persons.append(a.id)
else:
if p not in persons:
persons.append(p)
centerStyle = ParagraphStyle(name='Center', alignment=TA_CENTER)
leftStyle = ParagraphStyle(name='Left', alignment=TA_LEFT)
tableStyle = TableStyle([('VALIGN',(0,-1),(-1,-1),'TOP')])
label_num = 0
row_num = 0
labels = list()
for pid in persons:
row = db(db.person.id==pid).select(db.person.ALL).first()
label = list()
if label_num == 2:
table = Table([labels], colWidths=[4*inch,0.14*inch,4*inch], rowHeights=[2*inch]*(len(labels)/2))
table.setStyle(tableStyle)
elements.append(table)
label_num = 0
labels = list()
row_num += 1
if row_num == 5:
row_num = 0
elements.append(PageBreak())
header = Paragraph("<font face='Times-Bold' size=11>{} {}</font>".format(year, event_name), centerStyle)
label.append(header)
label.append(Spacer(1,11))
firstName = Paragraph("<font face='Times-Bold' size=18>{}</font>".format(row.first_name), centerStyle)
label.append(firstName)
label.append(Spacer(1, 11))
lastName = Paragraph("<font face='Times-Roman' size=11>{}</font>".format(row.last_name), centerStyle)
label.append(lastName)
label.append(Spacer(1,20))
# if row.crew.wefsk != '' or row.crew.wefsk != None or row.crew.wefsk != 'N/A':
# try:
# rooms = rotation(row.crew.wefsk.split('-')[0], row.crew.wefsk.split('-')[1])
# except:
# rooms = 'N/A'
# else:
# rooms = 'N/A'
label.append(Paragraph("<font face='Times-Roman' size=11>ID#: {}</font>".format(row.student_id), leftStyle))
label.append(Paragraph("<font face='Times-Roman' size=11>Crew #: {}</font>".format(row.crew), leftStyle))
# label.append(Paragraph("<font face='Times-Roman' size=11>Crew Room: {}</font>".format(row.crew.room), leftStyle))
# label.append(Paragraph("<font face='Times-Roman' size=11>W.E.F.S.K. Rotation: {}</font>".format(rooms), leftStyle))
labels.append(label)
if label_num == 0:
labels.append(Spacer(14, 144))
label_num += 1
doc_title = '_'.join(event_name.split())
doc.build(elements)
io.seek(0)
now = datetime.now().strftime('%Y-%m-%d')
filename = "{}_{}_{}.pdf".format(doc_title, now, int(time.time()))
file_id = db.file.insert(name=filename, file=db.file.file.store(io, filename))
db_file = db.file(file_id).file
return dict(filename=db_file)
@auth.requires_login()
@auth.requires_membership('admin')
@service.json
def update_names(names):
names = json.loads(names)
response = []
for name in names:
r = db.module_names.update_or_insert(name=name['name'], label=name['value'])
response.append(r)
errors = list()
for i in range(len(response)):
if response[i] == 0:
errors.append(names[i])
return dict(errors=errors)
@auth.requires_login()
@auth.requires_membership('admin')
@service.json
def approve_user(id):
response = db(db.auth_user.id==id).update(registration_key='')
return dict(response=response)
@auth.requires_login()
@auth.requires_membership('admin')
@service.json
def disapprove_user(id):
response = db(db.auth_user.id==id).delete()
return dict(response=response)
@auth.requires_login()
@auth.requires_membership('admin')
@service.json
def import_from_csv(csv_file):
"""Imports records into the database from a CSV file
:param file: the file to be imported
:param contains_ids: a boolean value which specifies if the records have ids; default is True
:returns: a dictionary with a response, either a 0 or 1, depending on success
"""
response = list()
lines = csv_file.rstrip().splitlines()
if len(lines) > 0:
columns = lines.pop(0).split(',')
for i in range(len(columns)):
columns[i] = '_'.join(columns[i].lower().split())
for line in lines:
record = dict()
line = line.split(',')
for i in range(len(line)):
record[columns[i]] = line[i]
record = dict((k,v) for k,v in record.items() if k in db.person.fields)
response.append(db.person.update_or_insert(db.person.id==record['id'], **record))
return dict(response=response)
@auth.requires_login()
@auth.requires_membership('admin')
@service.json
def import_from_query(csv_file, leaders):
"""Imports records into the database from a CSV file (in the form of the queries from VHS)
:param file: the file to be imported
:returns: a dictionary with a response, either a 0 or 1, depending on success
"""
import csv
import StringIO
leaders = True if leaders=="true" else False
def phone_format(n):
try:
return format(int(n[:-1]), ",").replace(",", "-") + n[-1]
except:
return None
if not leaders:
file_string = StringIO.StringIO(csv_file)
lines = list(csv.reader(file_string, skipinitialspace=True))
del file_string
del csv_file
# INSERT STUDENTS
student_ids = list()
teacher_ids = list()
course_ids = list()
columns = lines.pop(0)
while len(lines) > 0:
record = dict()
line = lines.pop(0)
student_id = line[columns.index('student_id')]
teacher_id = line[columns.index('teacher_id')]
course_id = line[columns.index('course_id')]
if student_id and student_id not in student_ids:
student_ids.append(student_id)
for i in range(len(line)):
record[columns[i]] = line[i]
record = dict((k,v) for k,v in record.items() if k in db.person.fields)
if record.get('cell_phone', None):
record['cell_phone'] = phone_format(record['cell_phone'])
if record.get('home_phone', None):
record['home_phone'] = phone_format(record['home_phone'])
db.person.update_or_insert(db.person.student_id==student_id, **record)
if teacher_id and teacher_id not in teacher_ids:
teacher_ids.append(teacher_id)
db.teacher.update_or_insert(db.teacher.teacher_id==teacher_id, **{
'teacher_id':line[columns.index('teacher_id')],
'teacher_name':line[columns.index('teacher_name')]})
if course_id and teacher_id and course_id not in course_ids:
course_ids.append(course_id)
teacher = db(db.teacher.teacher_id==teacher_id).select(db.teacher.id).first()
if teacher:
db.course.update_or_insert(db.course.course_id==course_id, **{
'course_id':line[columns.index('course_id')],
'code':line[columns.index('course_code')],
'title':line[columns.index('course_title')],
'period':line[columns.index('period')],
'room':line[columns.index('room')],
'teacher_id':teacher.id})
if course_id and student_id:
course = db(db.course.course_id==course_id).select().first()
student = db(db.person.student_id==student_id).select().first()
if course and student:
db.course_rec.update_or_insert((db.course_rec.course_id==course.id) &
(db.course_rec.student_id==student.id),
course_id=course.id,
student_id=student.id)
db.commit()
del record
del line
return dict(response=True)
else:
errors = list()
lines = list(csv.reader(StringIO.StringIO(csv_file), skipinitialspace=True))
columns = lines.pop(0)
short_tasks = {
'Team Sacrifice (Must have a car and willingness to work later than others)' : 'Team Sacrifice',
"Peer Support (Must be enrolled in Mr. Ward's Psychology or Peer Support class)" : 'Peer Support',
"Tutor/Study Buddy (Academic credits are available for this option)" : 'Tutor/Study Buddy',
"Database Manager (Must know Excel, Mail merge, and other technologies)" : 'Database Manager',
"Facebook Maintenance (You are responsible for up keeping on our page. Must be a FB addict)" : "Facebook Maintenance",
"Fundraising Team" : "Fundraising Team",
"TAs (Work with freshmen and Mr. Varni, Mr. Ward, or Mrs. Housley during the school day (Academic credits are available for this option)": "TAs",
"Posters & Propaganda" : "Posters & Propaganda",
"Public Outreach (Attend Parent Night, Back-to-School, other public events)" : 'Public Outreach',
"ASB Support (Those enrolled in 4th period Leadership class should check this option, but others are welcome as well)" : "ASB Support",
"L.O.C.s (Loyal Order of the Crushers. Attend home athletic and extracurricular events)": "L.O.C.s",
"Dirty 30 (Explain various aspects of high school culture to freshmen on Orientation Day afternoon)" : "Dirty 30",
"Set-up (Room Mapping) and Clean-up (Orientation Day only)": "Set-up and Clean-up",
"Homecoming Parade (Dress up and ride on our float! Easy!)" : "Homecoming Parade",
"Security/Safety (Helps keep freshmen in line; works with Peer Support on Orientation Day)": "Security/Safety",
"Food Prep & Clean-up (Orientation Day only)": "Food Prep & Clean-up",
"Fashion (Make costumes for House Hotties and Homecoming Parade)" : "Fashion",
'Burgundy Beauties and Golden Guns (Formerly "House Hotties")' : "Burgundy Beauties and Golden Guns",
"Audio-Visual (Responsible for music and videos during Orientation)" : "Audio-Visual",
"A-Team (Alumni only)": "A-Team"
}
task_teams = [task.name for task in db().select(db.groups.name)]
for line in lines:
record = dict()
for i in range(len(line)):
if columns[i] == 'last_name' or columns[i] == 'first_name':
line[i] = line[i].capitalize()
record[columns[i]] = line[i]
record = dict((k,v) for k,v in record.items() if k in db.person.fields)
if record.get('cell_phone', None):
record['cell_phone'] = phone_format(record['cell_phone'])
try:
person = db((db.person.last_name==record['last_name']) &
(db.person.first_name==record['first_name'])).select(db.person.ALL).first()
if person:
person_id = person.id
db(db.person.id==person_id).update(**record)
db(db.person.id==person_id).update(leader=True)
aTasks = line[columns.index('a_tasks')].split(',')
bTasks = line[columns.index('b_tasks')].split(',')
cTasks = line[columns.index('c_tasks')].split(',')
tasks_to_add = list()
for task in aTasks:
if task not in task_teams and task in short_tasks.values():
task_id = db.groups.insert(name=task)
tasks_to_add.append(task_id)
task_teams.append(task)
elif task in task_teams and task in short_tasks.values():
task_row = db(db.groups.name==task).select().first()
if task_row:
task_id = task_row.id
tasks_to_add.append(task_id)
for task in bTasks:
if task not in task_teams and task in short_tasks.values():
task_id = db.groups.insert(name=task)
tasks_to_add.append(task_id)
task_teams.append(task)
elif task in task_teams and task in short_tasks.values():
task_row = db(db.groups.name==task).select().first()
if task_row:
task_id = task_row.id
tasks_to_add.append(task_id)
for task in cTasks:
if task not in task_teams and task in short_tasks.values():
task_id = db.groups.insert(name=task)
tasks_to_add.append(task_id)
task_teams.append(task)
elif task in task_teams and task in short_tasks.values():
task_row = db(db.groups.name==task).select().first()
if task_row:
task_id = task_row.id
tasks_to_add.append(task_id)
for task in tasks_to_add:
if not db((db.group_rec.group_id==task_id) & (db.group_rec.person_id==person_id)).select().first():
db.group_rec.insert(group_id=task_id, person_id=person_id)
except:
errors.append(record['last_name'] + ", " + record['first_name'])
return dict(errors=errors)
@auth.requires_login()
@auth.requires_membership('admin')
@service.json
def get_person_group_data(query=None):
if query:
qlist = query.split()
query = query.lower()
students = db(((db.person.last_name.contains(qlist, all=True)) |
(db.person.first_name.contains(qlist, all=True))) ).select(
db.person.id, db.person.last_name, db.person.first_name,
orderby=db.person.last_name|db.person.first_name).as_list()
allfields = [{'text': 'All', 'children':[d for d in [{'id':'all_people', 'last_name':'All Students', 'first_name' : ''},
{'id':'all_leaders', 'last_name':'All Leaders', 'first_name' : ''}] if query in d['last_name'].lower()]}]
allfields = [] if not allfields[0]['children'] else allfields
gradefields = [{'text': 'By Grade', 'children':[d for d in [{'id':'grade_9', 'last_name': 'Freshmen', 'first_name': ''},
{'id':'grade_10', 'last_name': 'Sophomores', 'first_name': ''},
{'id':'grade_11', 'last_name': 'Juniors', 'first_name': ''},
{'id':'grade_12', 'last_name': 'Seniors', 'first_name': ''}] if query in d['last_name'].lower()]}]
gradefields = [] if not gradefields[0]['children'] else gradefields
taskteams = [{'text': 'Task Teams', 'children': [{'id':'group_' + str(g.id),
'last_name': g.name,
'first_name':''}
for g in db(db.groups.name.contains(qlist)).select(db.groups.ALL, orderby=db.groups.name)]}]
taskteams = [] if not taskteams[0]['children'] else taskteams
students = [] if not students else [{'text': 'Students', 'children':students}]
people = allfields +\
gradefields +\
taskteams +\
students
else:
students = db().select(db.person.id, db.person.last_name, db.person.first_name,
orderby=db.person.last_name|db.person.first_name).as_list()
people = [{'text': 'All', 'children':[{'id':'all_people', 'last_name':'All Students', 'first_name' : ''},
{'id':'all_leaders', 'last_name':'All Leaders', 'first_name' : ''}]}] +\
[{'text': 'By Grade', 'children':[{'id':'grade_9', 'last_name': 'Freshmen', 'first_name': ''},
{'id':'grade_10', 'last_name': 'Sophomores', 'first_name': ''},
{'id':'grade_11', 'last_name': 'Juniors', 'first_name': ''},
{'id':'grade_12', 'last_name': 'Seniors', 'first_name': ''} ]}] +\
[{'text': 'Task Teams', 'children': [{'id':'group_' + str(g.id),
'last_name': g.name,
'first_name':''}
for g in db().select(db.groups.ALL, orderby=db.groups.name)]}] +\
[{'text': 'Students', 'children':students}]
return people
@auth.requires_login()
@auth.requires_membership('admin')
def call():
"""Call function used when calling a function from an HTTP request"""
return service() | 35.588129 | 148 | 0.670086 | 0 | 0 | 0 | 0 | 18,949 | 0.957649 | 0 | 0 | 5,739 | 0.290039 |
a6a3e38e319e18c135bd1b7301d2bdabc8eba4b7 | 36,084 | py | Python | Testing.py | Eric-Fernandes-529/Final_Project_SW555 | 45498ed718e67ee082182ead526cb43eedd2ac56 | [
"MIT"
] | null | null | null | Testing.py | Eric-Fernandes-529/Final_Project_SW555 | 45498ed718e67ee082182ead526cb43eedd2ac56 | [
"MIT"
] | 1 | 2020-10-07T02:48:52.000Z | 2020-10-07T02:48:52.000Z | Testing.py | Eric-Fernandes-529/Final_Project_SW555 | 45498ed718e67ee082182ead526cb43eedd2ac56 | [
"MIT"
] | 1 | 2020-10-07T01:53:44.000Z | 2020-10-07T01:53:44.000Z | import unittest
import sys
#sys.path.append('../')
from models.Individual import Individual
from models.Family import Family
from models.Gedcom import Gedcom
class TestSprint1(unittest.TestCase):
def setUp(self):
SUPPORT_TAGS = {"INDI", "NAME", "SEX", "BIRT", "DEAT", "FAMC", "FAMS", "FAM", "MARR", "HUSB", "WIFE", "CHIL",
"DIV", "DATE", "HEAD", "TRLR", "NOTE"}
self.G1 = Gedcom('../testing_files/right.ged', SUPPORT_TAGS)
self.G2 = Gedcom('../testing_files/wrong.ged', SUPPORT_TAGS)
self.ind_1 = Individual("01")
self.ind_2 = Individual("02")
self.ind_3 = Individual("03")
self.fam_1 = Family("01")
self.fam_2 = Family("02")
def tearDown(self):
self.ind_1 = Individual("01")
self.ind_2 = Individual("02")
self.ind_3 = Individual("03")
self.fam_1 = Family("01")
self.fam_2 = Family("02")
def test_US11_no_bigamy(self):
self.ind_1.set_birthDate(["09", "APR", "1997"])
self.ind_2.set_birthDate(["19", "DEC", "1997"])
self.ind_1.add_to_family(self.fam_1)
self.fam_1.set_marriedDate(["01", "JUN", "2017"])
self.assertTrue(self.ind_1.no_bigamy())
self.fam_2.set_marriedDate(["05", "JUN", "2016"])
self.ind_1.add_to_family(self.fam_2)
self.assertFalse(self.ind_1.no_bigamy())
self.fam_2.set_divorcedDate(("01", "JAN", "2017"))
self.assertTrue(self.ind_1.no_bigamy())
self.fam_2.set_divorcedDate(("01", "AUG", "2017"))
self.assertFalse(self.ind_1.no_bigamy())
self.fam_1.set_divorcedDate(("01", "DEC", "2018"))
self.assertFalse(self.ind_1.no_bigamy())
self.fam_2.set_divorcedDate(("01", "JAN", "2017"))
self.assertTrue(self.ind_1.no_bigamy())
def test_US02_birth_before_marriage(self):
self.ind_1.set_birthDate(["09", "APR", "1997"])
self.ind_1.add_to_family(self.fam_1)
self.fam_1.set_marriedDate(["01", "JUN", "2018"])
self.assertTrue(self.ind_1.birth_before_marriage())
def test_US03_birth_before_death(self):
self.ind_1.set_birthDate(["09", "APR", "1997"])
self.ind_1.set_deathDate(["01", "JUN", "2018"])
self.assertTrue(self.ind_1.birth_before_death())
def test_US07_less_then_150_years_old(self):
self.ind_1.set_birthDate(["09", "APR", "1997"])
self.assertTrue(self.ind_1.less_then_150_years_old())
self.ind_2.set_birthDate(["09", "APR", "997"])
self.assertFalse(self.ind_2.less_then_150_years_old())
def test_US04_marriage_before_divorce(self):
t1 = Family("F01")
male1 = Individual("P01")
female1 = Individual("P02")
male1.set_deathDate(['8', 'SEP', '2010'])
female1.set_deathDate(['8', 'SEP', '2011'])
t1.set_husband(male1)
t1.set_wife(female1)
t1.set_marriedDate(['8', 'SEP', '2000'])
t1.set_divorcedDate(['8', 'SEP', '2009'])
# ---------------------------------
t2 = Family("F02")
male2 = Individual("P03")
female2 = Individual("P04")
male2.set_deathDate(['8', 'SEP', '2012'])
female2.set_deathDate(['8', 'SEP', '2013'])
t2.set_husband(male2)
t2.set_wife(female2)
t2.set_marriedDate(['8', 'SEP', '2005'])
t2.set_divorcedDate(['8', 'SEP', '2004'])
# ---------------------------------
assert t1.marriage_before_divorce() == True
assert t2.marriage_before_divorce() == False
def test_US05_marriage_before_death(self):
t1 = Family("F01")
male1 = Individual("P01")
female1 = Individual("P02")
male1.set_deathDate(['8', 'SEP', '2010'])
female1.set_deathDate(['8', 'SEP', '2011'])
t1.set_husband(male1)
t1.set_wife(female1)
t1.set_marriedDate(['8', 'SEP', '2000'])
# ---------------------------------
t2 = Family("F02")
male2 = Individual("P03")
female2 = Individual("P04")
male2.set_deathDate(['8', 'SEP', '1999'])
female2.set_deathDate(['9', 'SEP', '2011'])
t2.set_husband(male2)
t2.set_wife(female2)
t2.set_marriedDate(['8', 'SEP', '2000'])
# ---------------------------------
t3 = Family("F03")
male3 = Individual("P05")
female3 = Individual("P06")
male3.set_deathDate(['8', 'SEP', '2003'])
female3.set_deathDate(['9', 'SEP', '1998'])
t3.set_husband(male3)
t3.set_wife(female3)
t3.set_marriedDate(['8', 'SEP', '2000'])
# ---------------------------------
t4 = Family("F04")
male4 = Individual("P07")
female4 = Individual("P08")
male4.set_deathDate(['8', 'SEP', '1998'])
female4.set_deathDate(['9', 'SEP', '1999'])
t4.set_husband(male4)
t4.set_wife(female4)
t4.set_marriedDate(['8', 'SEP', '2000'])
# ---------------------------------
t5 = Family("F05")
male5 = Individual("P09")
female5 = Individual("P10")
male5.set_deathDate(['8', 'SEP', '2009'])
female5.set_deathDate(['8', 'SEP', '2009'])
t5.set_husband(male5)
t5.set_wife(female5)
t5.set_marriedDate(['8', 'SEP', '2009'])
# ---------------------------------
assert t1.marriage_before_death() == True
assert t2.marriage_before_death() == False
assert t3.marriage_before_death() == False
assert t4.marriage_before_death() == False
assert t5.marriage_before_death() == True
def test_US06_divorse_before_death(self):
t1 = Family("F01")
male1 = Individual("P01")
female1 = Individual("P02")
male1.set_deathDate(["5", "MAR", "2000"])
female1.set_deathDate(["9", "APR", "2002"])
t1.set_husband(male1)
t1.set_wife(female1)
t1.set_divorcedDate(["1", "JAN", "1999"])
# ---------------------------------
t2 = Family("F02")
male2 = Individual("P03")
female2 = Individual("P04")
male2.set_deathDate(["5", "MAR", "2000"])
female2.set_deathDate(["9", "APR", "2002"])
t2.set_husband(male2)
t2.set_wife(female2)
t2.set_divorcedDate(["1", "JAN", "2003"])
# ---------------------------------
assert t1.divorce_before_death() == True
assert t2.divorce_before_death() == False
def test_US08_birth_before_marriage_of_parents(self):
t1 = Family("F01")
male1 = Individual("P01")
female1 = Individual("P02")
child1 = Individual("P03")
t1.add_child(child1)
t1.set_marriedDate(['8', 'SEP', '2000'])
child1.set_birthDate(["6", "JAN", "1998"])
t1.set_husband(male1)
t1.set_wife(female1)
# ---------------------------------
t2 = Family("F02")
male2 = Individual("P04")
female2 = Individual("P05")
child2 = Individual("P06")
t2.add_child(child2)
t2.set_marriedDate(['8', 'SEP', '2000'])
child2.set_birthDate(["6", "JAN", "2001"])
t2.set_husband(male2)
t2.set_wife(female2)
# ---------------------------------
t3 = Family("F03")
male3 = Individual("P07")
female3 = Individual("P08")
child3 = Individual("P09")
t3.add_child(child3)
t3.set_marriedDate(['6', 'MAR', '2000'])
child3.set_birthDate(["6", "MAR", "2000"])
t3.set_husband(male3)
t3.set_wife(female3)
# ---------------------------------
assert t1.birth_before_marriage_of_parents() == False
assert t2.birth_before_marriage_of_parents() == True
assert t3.birth_before_marriage_of_parents() == False
def test_US09_birth_before_death_of_parent(self):
t1 = Family("F01")
male1 = Individual("P01")
female1 = Individual("P02")
child1 = Individual("P03")
t1.add_child(child1)
male1.set_deathDate(["5", "MAR", "2000"])
female1.set_deathDate(["9", "APR", "2002"])
child1.set_birthDate(["6", "JAN", "1998"])
t1.set_husband(male1)
t1.set_wife(female1)
# ---------------------------------
t2 = Family("F02")
male2 = Individual("P04")
female2 = Individual("P05")
child2 = Individual("P06")
t2.add_child(child2)
male2.set_deathDate(["5", "MAR", "2000"])
female2.set_deathDate(["9", "APR", "2002"])
child2.set_birthDate(["6", "JAN", "2001"])
t2.set_husband(male2)
t2.set_wife(female2)
# ---------------------------------
t3 = Family("F03")
male3 = Individual("P07")
female3 = Individual("P08")
child3 = Individual("P09")
t3.add_child(child3)
male3.set_deathDate(["5", "MAR", "2000"])
female3.set_deathDate(["9", "APR", "2002"])
child3.set_birthDate(["6", "MAR", "2000"])
t3.set_husband(male3)
t3.set_wife(female3)
# ---------------------------------
assert t1.birth_before_death_of_parents() == True
assert t2.birth_before_death_of_parents() == False
assert t3.birth_before_death_of_parents() == True
def test_US10_marriage_after_14(self):
t1 = Family("F01")
male1 = Individual("P01")
female1 = Individual("P02")
male1.set_birthDate(['8', 'SEP', '2000'])
female1.set_birthDate(['8', 'SEP', '2000'])
t1.set_husband(male1)
t1.set_wife(female1)
t1.set_marriedDate(['8', 'SEP', '2014'])
# --------------------------------------------------
t2 = Family("F02")
male2 = Individual("P03")
female2 = Individual("P04")
male2.set_birthDate(['7', 'SEP', '2000'])
female2.set_birthDate(['8', 'SEP', '2000'])
t2.set_husband(male2)
t2.set_wife(female2)
t2.set_marriedDate(['8', 'SEP', '2014'])
# --------------------------------------------------
t3 = Family("F03")
male3 = Individual("P05")
female3 = Individual("P06")
male3.set_birthDate(['8', 'SEP', '2000'])
female3.set_birthDate(['7', 'SEP', '2000'])
t3.set_husband(male3)
t3.set_wife(female3)
t3.set_marriedDate(['8', 'SEP', '2014'])
# --------------------------------------------------
t4 = Family("F04")
male4 = Individual("P07")
female4 = Individual("P08")
male4.set_birthDate(['1', 'SEP', '1990'])
female4.set_birthDate(['2', 'SEP', '1990'])
t4.set_husband(male4)
t4.set_wife(female4)
t4.set_marriedDate(['8', 'SEP', '2014'])
# --------------------------------------------------
t5 = Family("F05")
male5 = Individual("P09")
female5 = Individual("P10")
male5.set_birthDate(['09', 'APR', '1997'])
female5.set_birthDate(['19', 'DEC', '1997'])
t5.set_husband(male5)
t5.set_wife(female5)
t5.set_marriedDate(['1', 'JUN', '2007'])
# --------------------------------------------------
assert t1.marriage_after_14() == False
assert t2.marriage_after_14() == False
assert t3.marriage_after_14() == False
assert t4.marriage_after_14() == True
assert t5.marriage_after_14() == False
def test_US13_siblings_spacing(self):
t1 = Family("t1")
t2 = Family("t2")
t3 = Family("t3")
t4 = Family("t4")
t5 = Family("t5")
t6 = Family("t6")
p1 = Individual("p1")
p1.set_birthDate(("1", "JAN", "1990"))
p2 = Individual("p2")
p2.set_birthDate(("1", "JAN", "1990"))
p3 = Individual("p3")
p3.set_birthDate(("1", "SEP", "1990"))
p4 = Individual("p4")
p4.set_birthDate(("2", "JAN", "1990"))
p5 = Individual("p5")
p5.set_birthDate(("3", "JAN", "1990"))
p6 = Individual("p6")
p6.set_birthDate(("30", "MAY", "1990"))
# --------------------------------------------------
t1.add_child(p1)
t1.add_child(p2)
t2.add_child(p1)
t2.add_child(p3)
t3.add_child(p1)
t3.add_child(p4)
t4.add_child(p1)
t4.add_child(p5)
t5.add_child(p1)
t5.add_child(p6)
t6.add_child(p1)
t6.add_child(p3)
t6.add_child(p6)
# --------------------------------------------------
assert t1.siblings_spacing() == True
assert t2.siblings_spacing() == True
assert t3.siblings_spacing() == True
assert t4.siblings_spacing() == False
assert t5.siblings_spacing() == False
assert t6.siblings_spacing() == False
def test_US14_multiple_births_lessOrEqual_than_5(self):
t1 = Family("t1")
t2 = Family("t2")
t3 = Family("t3")
p1 = Individual("p1")
p1.set_birthDate(("1", "JAN", "1990"))
p2 = Individual("p2")
p2.set_birthDate(("1", "JAN", "1990"))
p3 = Individual("p3")
p3.set_birthDate(("1", "JAN", "1990"))
p4 = Individual("p4")
p4.set_birthDate(("3", "JAN", "1990"))
p5 = Individual("p5")
p5.set_birthDate(("2", "JAN", "1990"))
p6 = Individual("p6")
p6.set_birthDate(("30", "MAY", "1990"))
p7 = Individual("p7")
p7.set_birthDate(("2", "JAN", "1990"))
p8 = Individual("p8")
p8.set_birthDate(("2", "JAN", "1990"))
p9 = Individual("p9")
p9.set_birthDate(("2", "SEP", "1990"))
p10 = Individual("p10")
p10.set_birthDate(("2", "SEP", "1990"))
p11 = Individual("p11")
p11.set_birthDate(("3", "SEP", "1990"))
p12 = Individual("p12")
p12.set_birthDate(("3", "SEP", "1990"))
p13 = Individual("p13")
p13.set_birthDate(("3", "SEP", "1990"))
# -------------------------------
t1.add_child(p1)
t1.add_child(p2)
t1.add_child(p3)
t1.add_child(p4)
t1.add_child(p5)
t1.add_child(p6)
t1.add_child(p7)
t1.add_child(p8)
t2.add_child(p1)
t2.add_child(p2)
t2.add_child(p3)
t2.add_child(p4)
t2.add_child(p5)
t2.add_child(p6)
t3.add_child(p3)
t3.add_child(p4)
t3.add_child(p5)
t3.add_child(p6)
t3.add_child(p7)
t3.add_child(p8)
t3.add_child(p9)
t3.add_child(p10)
t3.add_child(p11)
t3.add_child(p12)
t3.add_child(p13)
# ---------------------------------
assert t1.multiple_births_lessOrEqual_than_5() == False
assert t2.multiple_births_lessOrEqual_than_5() == True
assert t3.multiple_births_lessOrEqual_than_5() == False
def test_US12_parents_not_too_old(self):
t1 = Family("t1")
t2 = Family("t2")
p1 = Individual("p1")
p2 = Individual("p2")
p3 = Individual("p3")
p4 = Individual("p4")
p5 = Individual("p5")
p6 = Individual("p6")
t1.set_wife(p1)
t1.set_husband(p2)
t1.add_child(p3)
t2.set_wife(p4)
t2.set_husband(p5)
t2.add_child(p6)
p1.set_birthDate(["1", "JAN", "1990"])
p2.set_birthDate(["1", "JAN", "1990"])
p4.set_birthDate(["1", "JAN", "1790"])
p5.set_birthDate(["1", "JAN", "1790"])
p3.set_birthDate(["1", "JAN", "2010"])
p6.set_birthDate(["1", "JAN", "2000"])
# ---------------------------------
assert t1.parents_not_too_old() == True
assert t2.parents_not_too_old() == False
def test_US15_Fewer_than_15_siblings(self):
t1 = Family("t1")
t2 = Family("t2")
p1 = Individual("p1")
p2 = Individual("p2")
p3 = Individual("p3")
p4 = Individual("p4")
p5 = Individual("p5")
p6 = Individual("p6")
p7 = Individual("p7")
p8 = Individual("p8")
p9 = Individual("p9")
p10 = Individual("p10")
p11 = Individual("p11")
p12 = Individual("p12")
p13 = Individual("p13")
p14 = Individual("p14")
p15= Individual("p15")
p16= Individual("p16")
p17= Individual("p17")
p18= Individual("p18")
p19= Individual("p19")
p20= Individual("p20")
p21= Individual("p21")
p22 = Individual("p22")
p23 = Individual("p23")
p24 = Individual("p24")
p25= Individual("p25")
p26 = Individual("p26")
p27 = Individual("p27")
p28 = Individual("p28")
# ---------------------------------
t1.add_child(p1)
t1.add_child(p2)
t1.add_child(p3)
t1.add_child(p4)
t1.add_child(p5)
t1.add_child(p6)
t1.add_child(p7)
t1.add_child(p8)
t1.add_child(p9)
t1.add_child(p10)
t1.add_child(p11)
t1.add_child(p12)
t2.add_child(p13)
t2.add_child(p14)
t2.add_child(p15)
t2.add_child(p16)
t2.add_child(p17)
t2.add_child(p18)
t2.add_child(p19)
t2.add_child(p20)
t2.add_child(p21)
t2.add_child(p22)
t2.add_child(p23)
t2.add_child(p24)
t2.add_child(p25)
t2.add_child(p26)
t2.add_child(p27)
t2.add_child(p28)
# ---------------------------------
assert t1.fewer_than_15_siblings() == True
assert t2.fewer_than_15_siblings() == False
def test_US21_Correct_Gender_For_Role(self):
t1 = Family("t1")
t2 = Family("t2")
p1 = Individual("p1")
p2 = Individual("p2")
p3 = Individual("p3")
p4 = Individual("p4")
# ---------------------------------
t1.set_wife(p1)
t1.set_husband(p2)
t2.set_wife(p3)
t2.set_husband(p4)
p1.set_gender('F')
p2.set_gender('M')
p4.set_gender('F')
p3.set_gender('M')
# ---------------------------------
assert t1.correct_gender_for_role() == True
assert t2.correct_gender_for_role() == False
def test_US24_Unique_families_by_spouses(self):
SUPPORT_TAGS = {"INDI", "NAME", "SEX", "BIRT", "DEAT", "FAMC", "FAMS", "FAM", "MARR", "HUSB", "WIFE", "CHIL",
"DIV", "DATE", "HEAD", "TRLR", "NOTE"}
self.G1 = Gedcom('testing_files/Jiashu_Wang.ged', SUPPORT_TAGS)
G2 = Gedcom('testing_files/MichealFahimGEDCOM.ged', SUPPORT_TAGS)
G3 = Gedcom('testing_files/mock-family.ged', SUPPORT_TAGS)
# ---------------------------------
assert self.G1.unique_families_by_spouses() == True
assert G2.unique_families_by_spouses() == True
assert G3.unique_families_by_spouses() == True
def test_US25_Unique_first_names_in_families(self):
SUPPORT_TAGS = {"INDI", "NAME", "SEX", "BIRT", "DEAT", "FAMC", "FAMS", "FAM", "MARR", "HUSB", "WIFE", "CHIL",
"DIV", "DATE", "HEAD", "TRLR", "NOTE"}
self.G1 = Gedcom('testing_files/Jiashu_Wang.ged', SUPPORT_TAGS)
G2 = Gedcom('testing_files/MichealFahimGEDCOM.ged', SUPPORT_TAGS)
G3 = Gedcom('testing_files/mock-family.ged', SUPPORT_TAGS)
# ---------------------------------
assert self.G1.unique_first_names_in_families() == True
assert G2.unique_first_names_in_families() == True
assert G3.unique_first_names_in_families() == True
def test_US22_UniqueId(self):
pass
# finished in main funciton
def test_US23_unique_name_and_birth_date(self):
SUPPORT_TAGS = {"INDI", "NAME", "SEX", "BIRT", "DEAT", "FAMC", "FAMS", "FAM", "MARR", "HUSB", "WIFE", "CHIL",
"DIV", "DATE", "HEAD", "TRLR", "NOTE"}
self.G1 = Gedcom('testing_files/Jiashu_Wang.ged', SUPPORT_TAGS)
G2 = Gedcom('testing_files/MichealFahimGEDCOM.ged', SUPPORT_TAGS)
G3 = Gedcom('testing_files/mock-family.ged', SUPPORT_TAGS)
# --------------------------------------------------
assert self.G1.unique_name_and_birth_date() == True
assert G2.unique_name_and_birth_date() == True
assert G3.unique_name_and_birth_date() == True
def test_US18_Siblings_should_not_marry(self):
t1 = Family("t1")
t2 = Family("t2")
t3 = Family("t3")
t4 = Family("t4")
t5 = Family("t5")
t6 = Family("t6")
p1 = Individual("p1")
p2 = Individual("p2")
p3 = Individual("p3")
p4 = Individual("p4")
# --------------------------------------------------
t1.set_husband(p1)
t1.set_wife(p2)
t4.set_husband(p3)
t4.set_wife(p4)
'''
t2.add_child(p1)
t3.add_child(p2)
t4.set_husband(p3)
t4.set_wife(p4)
t5.add_child(p3)
t5.add_child(p4)
'''
# --------------------------------------------------
p1.set_parentFamily(t2)
p2.set_parentFamily(t3)
p3.set_parentFamily(t5)
p4.set_parentFamily(t5)
# --------------------------------------------------
assert t1.siblings_should_not_marry() == True
#assert t2.siblings_should_not_marry() == True
assert t4.siblings_should_not_marry() == False
#assert t5.siblings_should_not_marry() == False
def test_US19_First_cousins_should_not_marry(self):
t1 = Family("t1")
t2 = Family("t2")
t3 = Family("t3")
t4 = Family("t4")
t5 = Family("t5")
t6 = Family("t6")
t7 = Family("t7")
t8 = Family("t8")
t9 = Family("t9")
p1 = Individual("p1")
p2 = Individual("p2")
p3 = Individual("p3")
p4 = Individual("p4")
p5 = Individual("p5")
p6 = Individual("p6")
p7 = Individual("p7")
p8 = Individual("p8")
# --------------------------------------------------
'''
t1.add_child(p1)
t1.add_child(p2)
t2.set_wife(p1)
t2.add_child(p3)
t3.set_wife(p2)
t3.add_child(p4)
t4.set_husband(p3)
t5.set_wife(p4)
t6.add_child(p5)
t6.add_child(p6)
t7.set_wife(p5)
t8.set_wife(p6)
t7.add_child(p7)
t8.add_child(p8)
t9.set_wife(p7)
t9.set_husband(p8)
'''
# --------------------------------------------------
p3.set_parentFamily(t1)
t1.set_husband(p8)
t1.set_wife(p7)
p8.set_parentFamily(t2)
p7.set_parentFamily(t3)
t2.add_child(p8)
t3.add_child(p7)
assert p3.first_cousins_should_not_marry()==True
#assert p4.first_cousins_should_not_marry()==True
def test_US16_Male_last_names(self):
t1 = Family("t1")
t2 = Family("t2")
t3 = Family("t3")
t4 = Family("t4")
t5 = Family("t5")
t6 = Family("t6")
t7 = Family("t7")
t8 = Family("t8")
t9 = Family("t9")
t10 = Family("t10")
p1 = Individual("p1")
p2 = Individual("p2")
p3 = Individual("p3")
p4 = Individual("p4")
p5 = Individual("p5")
p6 = Individual("p6")
p7 = Individual("p7")
p8 = Individual("p8")
p9 = Individual("p9")
p10 = Individual("p10")
# --------------------------------------------------
t1.set_husband(p1)
t1.add_child(p2)
t1.add_child(p3)
t2.set_husband(p2)
t3.set_husband(p3)
t2.add_child(p4)
t3.add_child(p5)
t4.set_husband(p4)
t5.set_husband(p5)
t6.set_husband(p6)
t6.add_child(p7)
t6.add_child(p8)
t7.set_husband(p7)
t8.set_husband(p8)
t7.add_child(p9)
t8.add_child(p10)
t9.set_husband(p9)
t10.set_husband(p10)
p1.set_gender("M")
p1.set_name("Charles Glass")
p2.set_gender("M")
p2.set_name("Charles Glass")
p3.set_gender("M")
p3.set_name("Charles Glass")
p4.set_gender("M")
p4.set_name("Charles Glass")
p5.set_gender("M")
p5.set_name("Charles Glass")
p6.set_gender("M")
p6.set_name("Charles Glass")
p7.set_gender("M")
p7.set_name("Charles Glass")
p8.set_gender("M")
p8.set_name("Charles WDNMD")
p9.set_gender("M")
p9.set_name("Charles Glass")
p10.set_gender("M")
p10.set_name("Charles Glass")
# --------------------------------------------------
assert t3.male_last_names()==True
assert t8.male_last_names()==False
def test_US17_No_marriages_to_descendants(self):
t1 = Family("t1")
t2 = Family("t2")
t3 = Family("t3")
t4 = Family("t4")
p1 = Individual("p1")
p2 = Individual("p2")
p3 = Individual("p3")
p4 = Individual("p4")
p5 = Individual("p5")
p6 = Individual("p6")
p7 = Individual("p7")
p8 = Individual("p8")
p9 = Individual("p9")
t1.set_husband(p1)
t1.set_wife(p2)
t1.add_child(p3)
t2.set_wife(p3)
t2.set_husband(p4)
t2.add_child(p5)
t3.set_husband(p6)
t3.set_wife(p7)
t3.add_child(p8)
t4.set_husband(p6)
t4.set_wife(p8)
t4.add_child(p9)
# --------------------------------------------------
#assert p3.no_marriages_to_descendants()==True
#assert p6.no_marriages_to_descendants()==False
#assert p8.no_marriages_to_descendants()==True
def test_US27_eInclude_individual_ags(self):
SUPPORT_TAGS = {"INDI", "NAME", "SEX", "BIRT", "DEAT", "FAMC", "FAMS", "FAM", "MARR", "HUSB", "WIFE", "CHIL",
"DIV", "DATE", "HEAD", "TRLR", "NOTE"}
self.G1 = Gedcom('testing_files/Jiashu_Wang.ged', SUPPORT_TAGS)
G2 = Gedcom('testing_files/MichealFahimGEDCOM.ged', SUPPORT_TAGS)
G3 = Gedcom('testing_files/mock-family.ged', SUPPORT_TAGS)
# --------------------------------------------------
'''
assert self.G1.include_individual_ages() == True
assert G2.include_individual_ages() == True
assert G3.include_individual_ages() == True
'''
def test_US28_Order_siblings_by_age(self):
t1 = Family("t1")
t2 = Family("t2")
p1 = Individual("p1")
p1.set_birthDate((1990, 4, 1))
p2 = Individual("p2")
p2.set_birthDate((1990, 1, 1))
p3 = Individual("p3")
p3.set_birthDate((1990, 9, 1))
p4 = Individual("p4")
p4.set_birthDate((1987, 1, 1))
p5 = Individual("p5")
p5.set_birthDate((2019, 1, 1))
p6 = Individual("p6")
p6.set_birthDate((2017, 5, 30))
p7 = Individual("p7")
p7.set_birthDate((2018, 3, 30))
p8 = Individual("p8")
p8.set_birthDate((2019, 8, 30))
# --------------------------------------------------
t1.add_child(p1)
t1.add_child(p2)
t1.add_child(p3)
t1.add_child(p4)
t1.add_child(p5)
t1.add_child(p6)
t2.add_child(p1)
t2.add_child(p2)
t2.add_child(p3)
t2.add_child(p4)
t2.add_child(p7)
t2.add_child(p8)
# --------------------------------------------------
assert t1.order_siblings_by_age() == [p4, p2, p1, p3, p6, p5]
assert t2.order_siblings_by_age() == [p4, p2, p1, p3,p7,p8]
def test_US20_Aunts_and_uncles(self):
t1 = Family("t1")
t2 = Family("t2")
t3 = Family("t3")
t4 = Family("t4")
t5 = Family("t5")
t6 = Family("t6")
t7 = Family("t7")
t8 = Family("t8")
t9 = Family("t9")
t10 = Family("t10")
t11 = Family("t11")
t12 = Family("t12")
p1 = Individual("p1")
p2 = Individual("p2")
p3 = Individual("p3")
p4 = Individual("p4")
p5 = Individual("p5")
p6 = Individual("p6")
p7 = Individual("p7")
p8 = Individual("p8")
p9 = Individual("p9")
p10 = Individual("p10")
p11 = Individual("p11")
# --------------------------------------------------
p11.set_parentFamily(t1)
t1.set_husband(p1)
t1.set_wife(p2)
p1.set_parentFamily(t2)
p2.set_parentFamily(t3)
#t2.set_husband(p3)
#t2.set_wife(p4)
#t3.set_husband(p5)
#t3.set_wife(p6)
t2.set_children([p1, p7, p8])
t3.set_children([p2, p9, p10])
'''
t1.add_child(p3)
t1.add_child(p4)
t2.set_husband(p3)
t3.set_wife(p4)
t2.add_child(p5)
t3.add_child(p6)
t4.set_husband(p7)
t4.set_wife(p8)
t4.add_child(p9)
t4.add_child(p10)
t5.set_husband(p9)
t5.add_child(p11)
t6.set_husband(p10)
t6.set_wife(p11)
'''
# --------------------------------------------------
assert p11.aunts_and_uncles()==True
#assert p10.aunts_and_uncles()==False
def test_US26_Corresponding_entries(self):
SUPPORT_TAGS = {"INDI", "NAME", "SEX", "BIRT", "DEAT", "FAMC", "FAMS", "FAM", "MARR", "HUSB", "WIFE", "CHIL",
"DIV", "DATE", "HEAD", "TRLR", "NOTE"}
self.G1 = Gedcom('testing_files/Jiashu_Wang.ged', SUPPORT_TAGS)
G2 = Gedcom('testing_files/MichealFahimGEDCOM.ged', SUPPORT_TAGS)
G3 = Gedcom('testing_files/mock-family.ged', SUPPORT_TAGS)
# --------------------------------------------------
assert self.G1.corresponding_entries() == True
assert G2.corresponding_entries() == True
assert G3.corresponding_entries() == True
def test_US29_list_deceased(self):
self.assertEqual(self.G1.listDeceased().len(),5 )
self.assertNotEqual(self.G1.listDeceased().len(),3 )
deceasedPeople = []
for indi in deceasedPeople:
self.assertIn(indi, self.G1.listDeceased())
#List all living married people in a GEDCOM file
def test_US30_list_living_married(self):
self.assertEqual(self.G1.listLivingMarried().len(),5 )
self.assertNotEqual(self.G1.listLivingMarried().len(),3 )
marriedPeople = []
for indi in marriedPeople:
self.assertIn(indi, self.G1.listLivingmarried())
#List all living people over 30 who have never been married in a GEDCOM file
def test_US31_list_living_single(self):
self.assertEqual(self.G1.listLivingSingle().len(),5 )
self.assertNotEqual(self.G1.listLivingSingle().len(),3 )
singlePeople = []
for indi in singlePeople:
self.assertIn(indi, self.G1.listLivingSingle())
#List all multiple births in a GEDCOM file
def test_US32_list_multiple_births(self):
self.assertEqual(self.G1.listMultipleBirths().len(),4 )
MultipleBirths = []
for birt in MultipleBirths:
self.assertIn(birt, self.G1.listMultipleBirths())
#List all orphaned children (both parents dead and child < 18 years old) in a GEDCOM file
def test_US33_list_orphans(self):
self.assertEqual(self.G1.listOrphans().len(),4)
OrphansPeople = []
for indi in OrphansPeople:
self.assertIn(indi, self.G1.listOrphans())
#List all couples who were married when the older spouse was more than twice as old as the younger spouse
def test_US34_list_large_age_differences(self):
self.assertEqual(self.G1.listLargeAgeDifferences().len(),4 )
ageDifferences = []
for birt in ageDifferences:
self.assertIn(birt, self.G1.listLargeAgeDifferences())
#List all people in a GEDCOM file who were born in the last 30 days
def test_US35_list_recent_births(self):
self.assertEqual(self.G1.listRecentBirths().len(),5 )
self.assertNotEqual(self.G1.listRecentBirths().len(),3 )
bornPeople =[]
for indi in bornPeople:
self.assertIn(indi, self.G1.listRecentBirths())
#list all people in a GEDCOM file who died in the last 30 days
def test_US36_ListRecentDeaths(self):
self.assertEqual(self.G1.listRecentDeaths().len(), 5)
self.assertNotEqual(self.G1.listRecentDeaths().len(), 3)
#manually input deceased people and append to the array
deceasedProple =[]
for indi in deceasedProple:
self.assertIn(indi, self.G1.listRecentDeaths())
#list all living spouses and descendants of people in the GEDCOM who died in the last 30 days
def test_US37_listRecentSurvivors(self):
self.assertEqual(self.G1.listRecentSurviors().len(),7)
self.assertNotEqual(self.G1.listRecentSurviors().len(), 8)
# manually input deceased people's relatives and append to the array
deceasedProple = []
for indi in deceasedProple:
self.assertIn(indi, self.G1.listRecentSurviors())
#list all living people in a GEDCOM file whose birthdays occur in the next 30 days
def test_US38_listUpcomingBirthdays(self):
self.assertEqual(self.G1.listUpcomingBirthdays().len(),6)
#manually input people with birthdays
birthdayPeople =[]
for indi in birthdayPeople:
self.assertIn(indi, self.G1.listUpcomingBirthdays())
# list all living people in a GEDCOM file whose marriage anniversaries occur in the next 30 days
def test_US39_UpcomingAnniversaries(self):
self.assertEqual(self.G1.upcomingAnniversaries().len(),4)
#manually input individuals who have anniversaries coming up
AnniversaryIndi = []
for indi in AnniversaryIndi:
self.assertIn(indi, self.G1.upcomingAnniversaries())
# list line numbers from GEDCOM source file when reporting errors
def test_US40_includeInputLineNumbers(self):
self.assertEqual(self.G1.includeInputLineNumbers().len(), 2)
self.assertTrue(self.G1.includeInputLineNumbers() == ['20','25'])
self.assertTrue(self.G2.includeInputLineNumbers() == ['15'])
# Accept and use dates without days or without days and months
def test_US41_IncludePartialDates(self):
self.assertTrue(self.G1.IncludePartialDates())
# All dates should be legitimate dates for the months specified(e.g. 2/30/2015 is not legitimate)
def test_US42_RejectIllegitimateDates(self):
self.assertTrue(self.G1.rejectIllegitimateDates())
self.assertFalse(self.G2.rejectIllegitimateDates())
def testInputValidation(self):
pass
if __name__ == '__main__':
print('Running unit tests')
unittest.main() | 36.633503 | 117 | 0.517044 | 35,842 | 0.993293 | 0 | 0 | 0 | 0 | 0 | 0 | 8,595 | 0.238194 |
a6a5d4525cdd7bb5aa80c9c5bcf3a0e256af0a40 | 9,307 | py | Python | energyPATHWAYS/supply_classes.py | Dhruv325/energypath-hub | 0b6b5d1a40faf5a686f5b479d61b9971084494fd | [
"MIT"
] | null | null | null | energyPATHWAYS/supply_classes.py | Dhruv325/energypath-hub | 0b6b5d1a40faf5a686f5b479d61b9971084494fd | [
"MIT"
] | null | null | null | energyPATHWAYS/supply_classes.py | Dhruv325/energypath-hub | 0b6b5d1a40faf5a686f5b479d61b9971084494fd | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 17 09:36:07 2015
@author: Ben
"""
from shared_classes import Stock, StockItem, SpecifiedStock
from datamapfunctions import DataMapFunctions, Abstract
import util
import numpy as np
import config as cfg
class SupplyStock(Stock, StockItem):
def __init__(self, id, drivers, sql_id_table='SupplyStock', sql_data_table='SupplyStockData',
primary_key='node_id', **kwargs):
Stock.__init__(self, id, drivers, sql_id_table='SupplyStock', sql_data_table='SupplyStockData',
primary_key='node_id', **kwargs)
StockItem.__init__(self)
def return_stock_slice(self, elements):
group = self.specified.loc[elements].transpose()
return group
class SupplySales(Abstract, DataMapFunctions):
def __init__(self, id, supply_node_id, sql_id_table, sql_data_table, primary_key, data_id_key, reference=False, scenario=None):
self.id = id
self.input_type = 'total'
self.supply_node_id = supply_node_id
self.sql_id_table = sql_id_table
self.sql_data_table = sql_data_table
self.scenario = scenario
self.mapped = False
if reference:
for col, att in util.object_att_from_table(self.sql_id_table, self.supply_node_id, primary_key):
setattr(self, col, att)
DataMapFunctions.__init__(self, data_id_key)
self.read_timeseries_data(supply_node_id=self.supply_node_id)
self.raw_values = util.remove_df_levels(self.raw_values, 'supply_technology')
else:
# measure specific sales does not require technology filtering
Abstract.__init__(self, self.id, primary_key=primary_key, data_id_key=data_id_key)
def calculate(self, vintages, years, interpolation_method=None, extrapolation_method=None):
self.vintages = vintages
self.years = years
self.remap(time_index_name='vintage',fill_timeseries=True, interpolation_method=interpolation_method, extrapolation_method=extrapolation_method, fill_value=np.nan)
self.convert()
def convert(self):
model_energy_unit = cfg.calculation_energy_unit
model_time_step = cfg.cfgfile.get('case', 'time_step')
if self.time_unit is not None:
# if sales has a time_unit, then the unit is energy and must be converted to capacity
self.values = util.unit_convert(self.values, unit_from_num=self.capacity_or_energy_unit,
unit_from_den=self.time_unit, unit_to_num=model_energy_unit,
unit_to_den=model_time_step)
else:
# if sales is a capacity unit, the model must convert the unit type to an energy unit for conversion ()
self.values = util.unit_convert(self.values, unit_from_num=cfg.ureg.Quantity(self.capacity_or_energy_unit)
* cfg.ureg.Quantity(model_time_step),
unit_from_den=model_time_step,
unit_to_num=model_energy_unit,
unit_to_den=model_time_step)
def reconcile_with_stock_levels(self, needed_sales_share_levels, needed_sales_names):
if not set(needed_sales_names).issubset(self.values.index.names):
# we can't have more specificity in sales share than in stock
raise ValueError('Sales share expressed as an intensity cannot have levels not in stock')
# pick up extra levels
self.values = util.expand_multi(self.values, needed_sales_share_levels,
needed_sales_names).sort_index()
class SupplySalesShare(Abstract, DataMapFunctions):
def __init__(self, id, supply_node_id, sql_id_table, sql_data_table, primary_key, data_id_key, reference=False, scenario=None):
self.id = id
self.supply_node_id = supply_node_id
self.sql_id_table = sql_id_table
self.sql_data_table = sql_data_table
self.scenario = scenario
self.mapped = False
self.input_type = 'intensity'
if reference:
for col, att in util.object_att_from_table(self.sql_id_table, self.supply_node_id, primary_key):
if att is not None:
setattr(self, col, att)
DataMapFunctions.__init__(self, data_id_key)
self.read_timeseries_data(supply_node_id=self.supply_node_id)
self.raw_values = util.remove_df_levels(self.raw_values, ['supply_node', 'supply_technology'])
else:
# measure specific sales share does not require technology filtering
Abstract.__init__(self, self.id, primary_key=primary_key, data_id_key=data_id_key)
def calculate(self, vintages, years):
self.vintages = vintages
self.years = years
self.remap(time_index_name='vintage')
def reconcile_with_stock_levels(self, needed_sales_share_levels, needed_sales_share_names):
if self.input_type == 'intensity':
if not set(self.values.index.names).issubset(needed_sales_share_names):
# we can't have more specificity in sales share than in stock
raise ValueError('Sales share expressed as an intensity cannot have levels not in stock')
# pick up extra levels
self.values = util.expand_multi(self.values, needed_sales_share_levels,
needed_sales_share_names).sort_index()
self.values.fillna(0, inplace=True)
elif self.input_type == 'total':
raise ValueError(
'A sales share type of total is not currently supported. Please normalize to sales share as a percentage')
# if not set(sales_share.values.index.names).issubset(stock.values.index.names):
# we have extra salesshare levels and we need to do a groupby sum
# sales_share.values = sales_share.values.groupby(level=needed_sales_share_levels).sum()
# todo: add logic here so that if stock and service demand
# has more specificity than sales share, we raise an exception
@staticmethod
def scale_reference_array_to_gap(ss_array, space_for_reference):
num_years, num_techs, num_techs = np.shape(ss_array)
ref_sums = np.sum(ss_array, axis=1)
# ignore where no reference is specified to avoid dividing by zero
vintage_no_ref, retiring_no_ref = np.nonzero(ref_sums)
factors = np.zeros(np.shape(ref_sums))
factors[vintage_no_ref, retiring_no_ref] += space_for_reference[vintage_no_ref, retiring_no_ref] / ref_sums[
vintage_no_ref, retiring_no_ref]
factors = np.reshape(np.repeat(factors, num_techs, axis=0), (num_years, num_techs, num_techs))
# gross up reference sales share with the need
return ss_array * factors
@staticmethod
def normalize_array(ss_array, retiring_must_have_replacement=True):
# Normalize to 1
sums = np.sum(ss_array, axis=1)
if np.any(sums == 0) and retiring_must_have_replacement:
raise ValueError('Every retiring technology must have a replacement specified in sales share')
# indicies needing scaling
vintage, retiring = np.nonzero(sums != 1)
# normalize all to 1
ss_array[vintage, :, retiring] = (ss_array[vintage, :, retiring].T / sums[vintage, retiring]).T
return ss_array
@staticmethod
def cap_array_at_1(ss_array):
# Normalize down to 1
sums = np.sum(ss_array, axis=1)
vintage, retiring = np.nonzero(sums > 1)
# normalize those greater than 1
ss_array[vintage, :, retiring] = (ss_array[vintage, :, retiring].T / sums[vintage, retiring]).T
return ss_array
class SupplySpecifiedStock(SpecifiedStock):
def __init__(self, id, sql_id_table, sql_data_table, scenario):
SpecifiedStock.__init__(self, id, sql_id_table, sql_data_table, scenario)
def convert(self):
"""
convert values to model currency and capacity (energy_unit/time_step)
"""
if self.values is not None:
model_energy_unit = cfg.calculation_energy_unit
model_time_step = cfg.cfgfile.get('case', 'time_step')
if self.time_unit is not None:
self.values = util.unit_convert(self.values, unit_from_num=self.capacity_or_energy_unit,
unit_from_den=self.time_unit, unit_to_num=model_energy_unit,
unit_to_den=model_time_step)
else:
self.values = util.unit_convert(self.values, unit_from_num=cfg.ureg.Quantity(self.capacity_or_energy_unit)
* cfg.ureg.Quantity(model_time_step),
unit_from_den = model_time_step,
unit_to_num=model_energy_unit,
unit_to_den=model_time_step)
| 51.137363 | 171 | 0.640701 | 8,992 | 0.966155 | 0 | 0 | 1,633 | 0.175459 | 0 | 0 | 1,777 | 0.190932 |
a6a6431d51a235cb7ff69e63e81efdad25385e1c | 2,485 | py | Python | 3.4 Knapsack Problem (Recursion with Memorization).py | INOS-soft/MOmmentum-SECList | 779db12933a5c351c3a5f3a3bc70d5f122033aba | [
"BSD-3-Clause",
"Apache-2.0",
"MIT"
] | null | null | null | 3.4 Knapsack Problem (Recursion with Memorization).py | INOS-soft/MOmmentum-SECList | 779db12933a5c351c3a5f3a3bc70d5f122033aba | [
"BSD-3-Clause",
"Apache-2.0",
"MIT"
] | null | null | null | 3.4 Knapsack Problem (Recursion with Memorization).py | INOS-soft/MOmmentum-SECList | 779db12933a5c351c3a5f3a3bc70d5f122033aba | [
"BSD-3-Clause",
"Apache-2.0",
"MIT"
] | 1 | 2021-04-20T18:57:55.000Z | 2021-04-20T18:57:55.000Z | """
2.Question 2
This problem also asks you to solve a knapsack instance, but a much bigger one.
This file (knapsack_big.txt) describes a knapsack instance, and it has the following format:
[knapsack_size][number_of_items]
[value_1] [weight_1]
[value_2] [weight_2]
...
For example, the third line of the file is "50074 834558", indicating that the second item has value 50074 and size 834558, respectively. As before, you should assume that item weights and the knapsack capacity are integers.
This instance is so big that the straightforward iterative implemetation uses an infeasible amount of time and space. So you will have to be creative to compute an optimal solution. One idea is to go back to a recursive implementation, solving subproblems --- and, of course, caching the results to avoid redundant work --- only on an "as needed" basis. Also, be sure to think about appropriate data structures for storing and looking up solutions to subproblems.
In the box below, type in the value of the optimal solution.
ADVICE: If you're not getting the correct answer, try debugging your algorithm using some small test cases. And then post them to the discussion forum!
"""
import sys
sys.setrecursionlimit(10**6) # set larger limit of recursion
def dataReader(filePath):
with open(filePath) as f:
data = f.readlines()
size, numItems = list(map(int, data[0].split()))
values, weights = [], []
for i in range(1, len(data)):
v, w = list(map(int, data[i].split()))
values.append(v)
weights.append(w)
return size, numItems, values, weights
def knapsackMemorization(size, numItems, values, weights):
# use recursion with memorization to calculate the "needed" values instead of every single value
def helper(size, numItems):
if size < 0: return None
if (numItems, size) in dp.keys():
return dp[(numItems, size)]
op1 = helper(size - weights[numItems - 1], numItems - 1)
op2 = helper(size, numItems - 1)
dp[(numItems, size)] = max(op1 + values[numItems - 1], op2) if op1 != None else op2
return dp[(numItems, size)]
# use dict instead of list to make better usage of memory
dp = {}
for i in range(size + 1):
dp[(0, i)] = 0
return helper(size, numItems)
def main():
filePath = "data/knapsack_big.txt"
size, numItems, values, weights = dataReader(filePath)
ans = knapsackMemorization(size, numItems, values, weights)
print(ans)
if __name__ == "__main__":
main()
| 36.014493 | 464 | 0.711871 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,424 | 0.573038 |
a6a650997a0e40d341164565e71707c592c13050 | 1,372 | py | Python | src/producto18.py | alonsosilvaallende/Datos-COVID19 | a52b586ce0c9eb41a3f7443a164402124ffef504 | [
"MIT"
] | null | null | null | src/producto18.py | alonsosilvaallende/Datos-COVID19 | a52b586ce0c9eb41a3f7443a164402124ffef504 | [
"MIT"
] | null | null | null | src/producto18.py | alonsosilvaallende/Datos-COVID19 | a52b586ce0c9eb41a3f7443a164402124ffef504 | [
"MIT"
] | null | null | null | '''
MIT License
Copyright (c) 2020 Sebastian Cornejo
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import csv
import pandas as pd
from os import listdir
if __name__ == '__main__':
# producto 18: tasa de incidencia total e histórica
df = pd.read_csv('../input/Tasadeincidencia.csv')
df.dropna(how='all', inplace=True)
df.to_csv('../output/producto18/TasadeIncidencia.csv') | 41.575758 | 78 | 0.78207 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,222 | 0.890022 |
a6a7113e619a2809e17b57f976bcac4480ea4305 | 2,125 | py | Python | src/linebot/app.py | zaurus-yusya/AtCoderStalker | e3ab389f00995d752d87710c03da905ed89554c4 | [
"MIT"
] | 7 | 2021-07-11T15:50:52.000Z | 2021-09-18T15:49:27.000Z | src/linebot/app.py | zaurus-yusya/AtCoderStalker | e3ab389f00995d752d87710c03da905ed89554c4 | [
"MIT"
] | 2 | 2020-05-30T16:18:33.000Z | 2020-05-30T16:23:40.000Z | src/linebot/app.py | zaurus-yusya/AtCoderStalker | e3ab389f00995d752d87710c03da905ed89554c4 | [
"MIT"
] | null | null | null | import json
import urllib.request
from linebot import (LineBotApi, WebhookHandler)
from linebot.models import (MessageEvent, TextMessage, PostbackEvent, FollowEvent, UnfollowEvent)
from linebot.exceptions import (LineBotApiError, InvalidSignatureError)
import os
import sys
import logging
import boto3
from boto3.dynamodb.conditions import Key
#他ファイルimport
from textmessage import textmessage
from postbackevent import postbackevent
import dynamodbfunctions
dynamodb = boto3.resource('dynamodb')
#channelの環境変数読み込み
channel_secret = os.environ["CHANNEL_SECRET"]
channel_access_token = os.environ["ACCESS_TOKEN"]
#値がなかったら実行終了
if (channel_secret is None) or (channel_access_token is None):
sys.exit(1)
line_bot_api = LineBotApi(channel_access_token)
handler = WebhookHandler(channel_secret)
def lambda_handler(event, context):
#署名の検証
if "x-line-signature" in event["headers"]:
signature = event["headers"]["x-line-signature"]
elif "X-Line-Signature" in event["headers"]:
signature = event["headers"]["X-Line-Signature"]
body = event["body"]
#テキストメッセージ受信時
@handler.add(MessageEvent, message=TextMessage)
def message(line_event):
textmessage(line_event, line_bot_api)
#ポストバック時
@handler.add(PostbackEvent)
def message(line_event):
postbackevent(line_event, line_bot_api)
#友だち追加時
@handler.add(FollowEvent)
def handle_follow(line_event):
line_user_id = line_event.source.user_id
#IDは昇順で連番 DBから割り振るユーザーIDを取得してから登録
user_id = dynamodbfunctions.get_new_user_id(os.environ["LINE_USER_TABLE"])
dynamodbfunctions.user_regist(line_user_id, user_id)
#友だち削除・ブロック時
@handler.add(UnfollowEvent)
def handle_unfollow(line_event):
line_user_id = line_event.source.user_id
dynamodbfunctions.user_delete(line_user_id)
try:
handler.handle(body, signature)
except LineBotApiError as e:
return{
'statusCode': 400,
}
except InvalidSignatureError:
return{
'statusCode': 400,
}
return{
'statusCode': 200,
}
| 28.716216 | 97 | 0.722353 | 0 | 0 | 0 | 0 | 738 | 0.319619 | 0 | 0 | 511 | 0.221308 |
a6a8e49393235dbee8fb04867d787d7bb8bf15f1 | 1,918 | py | Python | setup.py | burhan/solace | 40d2bc025ac3a78e67602f374c32355badafb4d2 | [
"BSD-3-Clause"
] | null | null | null | setup.py | burhan/solace | 40d2bc025ac3a78e67602f374c32355badafb4d2 | [
"BSD-3-Clause"
] | null | null | null | setup.py | burhan/solace | 40d2bc025ac3a78e67602f374c32355badafb4d2 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Solace
======
*a multilingual support system*
Solace is a multilingual support system developed at Plurk
for end user support. The application design is heavily
influenced by bulletin boards like phpBB and the new
stackoverflow programming community site.
For more information consult the `README` file or have a
look at the `website <http://opensource.plurk.com/solace/>`_.
"""
# we require setuptools because of dependencies and testing.
# we may provide a distutils fallback later.
from setuptools import setup
extra = {}
try:
import babel
except ImportError:
pass
else:
extra['message_extractors'] = {
'solace': [
('**.py', 'python', None),
('**/templates/**', 'jinja2', None),
('**.js', 'javascript', None)
]
}
try:
from solace import scripts
except ImportError:
pass
else:
extra['cmdclass'] = {
'runserver': scripts.RunserverCommand,
'initdb': scripts.InitDatabaseCommand,
'reset': scripts.ResetDatabaseCommand,
'make_testdata': scripts.MakeTestDataCommand,
'compile_catalog': scripts.CompileCatalogExCommand,
'compress_deps': scripts.CompressDependenciesCommand
}
setup(
name='Solace',
version='0.2',
license='BSD',
author='Armin Ronacher',
author_email='armin.ronacher@active-4.com',
description='Multilangual User Support Platform',
long_description=__doc__,
packages=['solace', 'solace.views', 'solace.i18n', 'solace.utils'],
zip_safe=False,
platforms='any',
test_suite='solace.tests.suite',
install_requires=[
'Werkzeug>=0.5.1',
'Jinja2>=2.4',
'Babel',
'SQLAlchemy>=0.5.5',
'creoleparser',
'simplejson',
'translitcodec'
],
tests_require=[
'lxml',
'html5lib'
], **extra
)
| 25.236842 | 71 | 0.624609 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 970 | 0.505735 |
a6a8e6192908dea4aad06b73839c0578612b9ce2 | 4,583 | py | Python | relation_engine/taxa/ncbi/loaders/ncbi_taxa_delta_loader.py | jayrbolton/arangodb_biochem_importer | b1c3eb16908ce47bf4c0b2ed792262612b6a019b | [
"MIT"
] | null | null | null | relation_engine/taxa/ncbi/loaders/ncbi_taxa_delta_loader.py | jayrbolton/arangodb_biochem_importer | b1c3eb16908ce47bf4c0b2ed792262612b6a019b | [
"MIT"
] | 17 | 2019-07-15T16:55:22.000Z | 2021-11-02T18:49:56.000Z | relation_engine/taxa/ncbi/loaders/ncbi_taxa_delta_loader.py | jayrbolton/arangodb_biochem_importer | b1c3eb16908ce47bf4c0b2ed792262612b6a019b | [
"MIT"
] | 6 | 2019-08-05T17:02:22.000Z | 2021-05-13T15:52:11.000Z | #!/usr/bin/env python
# TODO TEST
import argparse
import getpass
import os
from arango import ArangoClient
from relation_engine.taxa.ncbi.parsers import NCBINodeProvider
from relation_engine.taxa.ncbi.parsers import NCBIEdgeProvider
from relation_engine.taxa.ncbi.parsers import NCBIMergeProvider
from relation_engine.batchload.delta_load import load_graph_delta
from relation_engine.batchload.time_travelling_database import ArangoBatchTimeTravellingDB
_LOAD_NAMESPACE = 'ncbi_taxa'
NAMES_IN_FILE = 'names.dmp'
NODES_IN_FILE = 'nodes.dmp'
MERGED_IN_FILE = 'merged.dmp'
def parse_args():
parser = argparse.ArgumentParser(description="""
Load a NCBI taxonomy dump into an ArangoDB time travelling database, calculating and applying the
changes between the prior load and the current load, and retaining the prior load.
""".strip())
parser.add_argument('--dir', required=True,
help='the directory containing the unzipped dump files')
parser.add_argument(
'--arango-url',
required=True,
help='The url of the ArangoDB server (e.g. http://localhost:8528')
parser.add_argument(
'--database',
required=True,
help='the name of the ArangoDB database that will be altered')
parser.add_argument(
'--user',
help='the ArangoDB user name; if --pwd-file is not included a password prompt will be ' +
'presented. Omit to connect with default credentials.')
parser.add_argument(
'--pwd-file',
help='the path to a file containing the ArangoDB password and nothing else; ' +
'if --user is included and --pwd-file is omitted a password prompt will be presented.')
parser.add_argument(
'--load-registry-collection',
required=True,
help='the name of the ArangoDB collection where the load will be registered. ' +
'This is typically the same collection for all delta loaded data.')
parser.add_argument(
'--node-collection',
required=True,
help='the name of the ArangoDB collection into which taxa nodes will be loaded')
parser.add_argument(
'--edge-collection',
required=True,
help='the name of the ArangoDB collection into which taxa edges will be loaded')
parser.add_argument(
'--merge-edge-collection',
required=True,
help='the name of the ArangoDB collection into which merge edges will be loaded')
parser.add_argument(
'--load-version',
required=True,
help='the version of this load. This version will be added to a field in the nodes and ' +
'edges and will be used as part of the _key field.')
parser.add_argument(
'--load-timestamp',
type=int,
required=True,
help='the timestamp to be applied to the load, in unix epoch milliseconds. Any nodes ' +
'or edges created in this load will start to exist with this time stamp. ' +
'NOTE: the user is responsible for ensuring this timestamp is greater than any ' +
'other timestamps previously used to load data into the NCBI taxonomy DB.')
parser.add_argument(
'--release-timestamp',
type=int,
required=True,
help='the timestamp, in unix epoch milliseconds, when the data was released ' +
'at the source.')
return parser.parse_args()
def main():
a = parse_args()
nodes = os.path.join(a.dir, NODES_IN_FILE)
names = os.path.join(a.dir, NAMES_IN_FILE)
merged = os.path.join(a.dir, MERGED_IN_FILE)
client = ArangoClient(hosts=a.arango_url)
if a.user:
if a.pwd_file:
with open(a.pwd_file) as pwd_file:
pwd = pwd_file.read().strip()
else:
pwd = getpass.getpass()
db = client.db(a.database, a.user, pwd, verify=True)
else:
db = client.db(a.database, verify=True)
attdb = ArangoBatchTimeTravellingDB(
db,
a.load_registry_collection,
a.node_collection,
default_edge_collection=a.edge_collection,
merge_collection=a.merge_edge_collection)
with open(nodes) as in1, open(names) as namesfile, open(nodes) as in2, open(merged) as merge:
nodeprov = NCBINodeProvider(namesfile, in1)
edgeprov = NCBIEdgeProvider(in2)
merge = NCBIMergeProvider(merge)
load_graph_delta(_LOAD_NAMESPACE, nodeprov, edgeprov, attdb,
a.load_timestamp, a.release_timestamp, a.load_version, merge_source=merge)
if __name__ == '__main__':
main()
| 38.512605 | 99 | 0.670521 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,827 | 0.398647 |
a6a93fa998370452499035e80f03dcb57488747c | 516 | py | Python | listwords.py | corbinmcneill/bonkbot | 5d355d9b8d2377176fc8ec317a7cba03ff3bb37a | [
"MIT"
] | 1 | 2020-12-07T06:58:38.000Z | 2020-12-07T06:58:38.000Z | listwords.py | corbinmcneill/bonkbot | 5d355d9b8d2377176fc8ec317a7cba03ff3bb37a | [
"MIT"
] | 1 | 2021-01-06T06:36:11.000Z | 2021-01-06T09:06:15.000Z | listwords.py | corbinmcneill/bonkbot | 5d355d9b8d2377176fc8ec317a7cba03ff3bb37a | [
"MIT"
] | 2 | 2021-01-06T06:34:42.000Z | 2021-01-28T08:41:40.000Z | #!/usr/bin/env python3
import discord
import config
import util
from functools import reduce
from handler import Handler
class ListWordsHandler(Handler):
name = "listwords"
async def message_handler(self, message, jail, bonkbot):
print("Starting listwords handler")
if self.cf.get("list_words_trigger_phrase") in message.content.lower() and util.is_mentioned(message, bonkbot):
await message.channel.send(util.list_trigger_words())
return True
return False
| 28.666667 | 119 | 0.718992 | 392 | 0.75969 | 0 | 0 | 0 | 0 | 331 | 0.641473 | 88 | 0.170543 |
a6ab15c2903f3b2f130c56a38afb23e92c3c2863 | 12,984 | py | Python | threedi_custom_stats/presets.py | threedi/beta-plugins | 530a5542deda73201626f7a429f87ce64cbac51a | [
"MIT"
] | 1 | 2022-02-14T10:31:51.000Z | 2022-02-14T10:31:51.000Z | threedi_custom_stats/presets.py | threedi/beta-plugins | 530a5542deda73201626f7a429f87ce64cbac51a | [
"MIT"
] | 11 | 2019-04-08T14:11:45.000Z | 2021-07-02T14:28:04.000Z | threedi_custom_stats/presets.py | threedi/beta-plugins | 530a5542deda73201626f7a429f87ce64cbac51a | [
"MIT"
] | null | null | null | from typing import List
try:
from .threedi_result_aggregation import *
# from .aggregation_classes import *
# from .constants import *
from .style import *
except ImportError:
from threedi_result_aggregation import *
# from constants import *
from style import *
class Preset:
def __init__(self,
name: str,
description: str = '',
aggregations=None,
resample_point_layer: bool = False,
flowlines_style: Style = None,
cells_style: Style = None,
nodes_style: Style = None,
flowlines_style_param_values: dict = None,
cells_style_param_values: dict = None,
nodes_style_param_values: dict = None
):
if aggregations is None:
aggregations = list()
self.name = name
self.description = description
self.__aggregations = aggregations
self.resample_point_layer = resample_point_layer
self.flowlines_style = flowlines_style
self.cells_style = cells_style
self.nodes_style = nodes_style
self.flowlines_style_param_values = flowlines_style_param_values
self.cells_style_param_values = cells_style_param_values
self.nodes_style_param_values = nodes_style_param_values
def add_aggregation(self, aggregation: Aggregation):
self.__aggregations.append(aggregation)
def aggregations(self):
return self.__aggregations
# No preset selected
NO_PRESET = Preset(name='(no preset selected)',
aggregations=[]
)
# Maximum water level
max_wl_aggregations = [Aggregation(variable=AGGREGATION_VARIABLES.get_by_short_name('s1'),
method=AGGREGATION_METHODS.get_by_short_name('max'),
)
]
MAX_WL_PRESETS = Preset(name='Maximum water level',
description='Calculates the maximum water level for nodes and cells within the chosen '
'time filter.',
aggregations=max_wl_aggregations,
nodes_style=STYLE_SINGLE_COLUMN_GRADUATED_NODE,
cells_style=STYLE_SINGLE_COLUMN_GRADUATED_CELL,
nodes_style_param_values={'column': 's1_max'},
cells_style_param_values={'column': 's1_max'}
)
# Change in water level
change_wl_aggregations = [Aggregation(variable=AGGREGATION_VARIABLES.get_by_short_name('s1'),
method=AGGREGATION_METHODS.get_by_short_name('first'),
),
Aggregation(variable=AGGREGATION_VARIABLES.get_by_short_name('s1'),
method=AGGREGATION_METHODS.get_by_short_name('last'),
),
Aggregation(variable=AGGREGATION_VARIABLES.get_by_short_name('s1'),
method=AGGREGATION_METHODS.get_by_short_name('min'),
),
Aggregation(variable=AGGREGATION_VARIABLES.get_by_short_name('s1'),
method=AGGREGATION_METHODS.get_by_short_name('max'),
)
]
CHANGE_WL_PRESETS = Preset(name='Change in water level',
description='Calculates the difference in water level (last - first). In the styling '
'NULL values (when the cell is dry) are replaced by the cells lowest '
'pixel elevation (z_coordinate).',
aggregations=change_wl_aggregations,
cells_style=STYLE_CHANGE_WL,
cells_style_param_values={'first': 's1_first', 'last': 's1_last'}
)
# Flow pattern
flow_pattern_aggregations = [Aggregation(variable=AGGREGATION_VARIABLES.get_by_short_name('q_out_x'),
method=AGGREGATION_METHODS.get_by_short_name('sum'),
),
Aggregation(variable=AGGREGATION_VARIABLES.get_by_short_name('q_out_y'),
method=AGGREGATION_METHODS.get_by_short_name('sum'),
)]
FLOW_PATTERN_PRESETS = Preset(name='Flow pattern',
description='Generates a flow pattern map. The aggregation calculates total outflow per '
'node in x and y directions, resampled to grid_space. In the styling that is '
'applied, the shade of blue and the rotation of the arrows are based on the '
'resultant of these two.\n\n'
'To save the output to disk, save to GeoPackage (Export > Save features as),'
'copy the styling to the new layer (Styles > Copy Style / Paste Style). Then '
'save the styling as default in the GeoPackage (Properties > Style > Save as '
'Default > Save default style to Datasource Database). ',
aggregations=flow_pattern_aggregations,
resample_point_layer=True,
nodes_style=STYLE_VECTOR,
nodes_style_param_values={'x': 'q_out_x_sum',
'y': 'q_out_y_sum'}
)
# Timestep reduction analysis
ts_reduction_analysis_aggregations = [Aggregation(variable=AGGREGATION_VARIABLES.get_by_short_name('ts_max'),
method=AGGREGATION_METHODS.get_by_short_name('below_thres'),
threshold=1.0
),
Aggregation(variable=AGGREGATION_VARIABLES.get_by_short_name('ts_max'),
method=AGGREGATION_METHODS.get_by_short_name('below_thres'),
threshold=3.0
),
Aggregation(variable=AGGREGATION_VARIABLES.get_by_short_name('ts_max'),
method=AGGREGATION_METHODS.get_by_short_name('below_thres'),
threshold=5.0
)]
TS_REDUCTION_ANALYSIS_PRESETS = Preset(name='Timestep reduction analysis',
description='Timestep reduction analysis calculates the % of time that the flow '
'through each flowline limits the calculation timestep to below 1, '
'3, '
'or 5 seconds. \n\n'
'The styling highlights the flowlines that have a timestep of \n'
' < 1 s for 10% of the time and/or\n'
' < 3 s for 50% of the time and/or\n'
' < 5 s for 80% of the time;'
'\n\n'
'Replacing these flowlines with orifices may speed up the '
'simulation '
'without large impact on the results. Import the highlighted lines '
'from the aggregation result into your 3Di spatialite as '
'\'ts_reducers\' and use this query to replace line elements ('
'example '
'for v2_pipe):\n\n'
'-- Add orifice:\n'
'INSERT INTO v2_orifice(display_name, code, crest_level, sewerage, '
'cross_section_definition_id, friction_value, friction_type, '
'discharge_coefficient_positive, discharge_coefficient_negative, '
'zoom_category, crest_type, connection_node_start_id, '
'connection_node_end_id)\n'
'SELECT display_name, code, max(invert_level_start_point, '
'invert_level_end_point) AS crest_level, TRUE AS sewerage, '
'cross_section_definition_id, friction_value, friction_type, '
'1 AS discharge_coefficient_positive, '
'1 AS discharge_coefficient_negative, zoom_category, '
'4 AS crest_type, '
'connection_node_start_id, connection_node_end_id\n'
'FROM v2_pipe\n'
'WHERE id IN (SELECT spatialite_id FROM ts_reducers WHERE '
'content_type=\'v2_pipe\');\n\n'
'-- Remove pipe\n'
'DELETE FROM v2_pipe WHERE id IN (SELECT spatialite_id FROM '
'ts_reducers WHERE content_type=\'v2_pipe\');',
aggregations=ts_reduction_analysis_aggregations,
flowlines_style=STYLE_TIMESTEP_REDUCTION_ANALYSIS,
flowlines_style_param_values={'col1': 'ts_max_below_thres_1_0',
'col2': 'ts_max_below_thres_3_0',
'col3': 'ts_max_below_thres_5_0'
}
)
# Source or sink (mm)
source_sink_mm_aggregations = [Aggregation(variable=AGGREGATION_VARIABLES.get_by_short_name('rain_depth'),
method=AGGREGATION_METHODS.get_by_short_name('sum')
),
Aggregation(
variable=AGGREGATION_VARIABLES.get_by_short_name('infiltration_rate_simple_mm'),
method=AGGREGATION_METHODS.get_by_short_name('sum')
),
Aggregation(variable=AGGREGATION_VARIABLES.get_by_short_name('intercepted_volume_mm'),
method=AGGREGATION_METHODS.get_by_short_name('last')
)
]
SOURCE_SINK_MM_PRESETS = Preset(name='Source or sink (mm)',
description='Calculate by how many mm a node or cell is a net source or sink.'
'A positive results indicates a source, negative result a sink.',
aggregations=source_sink_mm_aggregations,
cells_style=STYLE_BALANCE,
cells_style_param_values={'positive_col1': 'rain_depth_sum',
'positive_col2': '',
'positive_col3': '',
'negative_col1': 'infiltration_rate_simple_mm_sum',
'negative_col2': 'intercepted_volume_mm_last',
'negative_col3': '',
}
)
PRESETS = [NO_PRESET, MAX_WL_PRESETS, CHANGE_WL_PRESETS, SOURCE_SINK_MM_PRESETS, FLOW_PATTERN_PRESETS,
TS_REDUCTION_ANALYSIS_PRESETS]
| 64.277228 | 120 | 0.456716 | 1,249 | 0.096195 | 0 | 0 | 0 | 0 | 0 | 0 | 3,380 | 0.26032 |
a6ae30671d32b4a1a187c08b2e996358d70fedaa | 4,776 | py | Python | script/build_template.py | lzztt/bbs.backend | ca83ba8badfd7cbdf85c75323aa9df751b5d11fe | [
"MIT"
] | null | null | null | script/build_template.py | lzztt/bbs.backend | ca83ba8badfd7cbdf85c75323aa9df751b5d11fe | [
"MIT"
] | null | null | null | script/build_template.py | lzztt/bbs.backend | ca83ba8badfd7cbdf85c75323aa9df751b5d11fe | [
"MIT"
] | 1 | 2018-09-18T02:14:34.000Z | 2018-09-18T02:14:34.000Z | #! python3
from sys import argv
from pathlib import Path
from re import compile
from enum import Enum
from inflection import camelize
RE_CLASS = compile(r'^use [a-zA-Z_][a-zA-Z0-9_\\]*;$')
RE_PARAM = compile(r'^[a-zA-Z_][a-zA-Z0-9_]* \$[a-zA-Z_][a-zA-Z0-9_]*,?$')
PARAM_BEGIN = r'function ('
PARAM_END = r') {'
PARAM_EMPTY = r'function () {'
HEAD_END = r'?>'
FOOT_BEGIN = r'<?php'
FOOT_END = r'};'
class Stage(Enum):
CLASS = 1
PARAMETER = 2
TEMPLATE = 3
def partition(file):
classes = [
r'use Exception;',
r'use lzx\html\Template;',
]
parameters = []
template = []
stage = Stage.CLASS
for line_no, line in enumerate(file, 1):
if line_no == 1:
continue
line = line.strip()
if stage == Stage.CLASS:
if not line:
continue
if RE_CLASS.fullmatch(line):
classes.append(line)
elif PARAM_BEGIN == line or PARAM_EMPTY == line:
stage = Stage.PARAMETER
continue
else:
raise Exception(f'Error: line {line_no}: {line}')
elif stage == Stage.PARAMETER:
if RE_PARAM.fullmatch(line):
parameters.append(line.rstrip(','))
elif PARAM_END == line:
continue
elif HEAD_END == line:
stage = Stage.TEMPLATE
continue
else:
raise Exception(f'Error: line {line_no}: {line}')
elif stage == Stage.TEMPLATE:
template.append(line)
if len(template) < 2 or template[-2] != FOOT_BEGIN or template[-1] != FOOT_END:
raise Exception(
f'Error: template should end with {FOOT_BEGIN}' + "\n" + FOOT_END)
template = template[:-2]
classes = list(set(classes))
classes.sort()
return (classes, parameters, template)
def func(parameter):
var_type = parameter.split(' ')[0]
var_name = parameter.split('$')[-1]
return '''
public function get''' + camelize(var_name) + f'(): ?{var_type}' + '''
{
if (array_key_exists(''' + f"'{var_name}'" + ''', $this->data)) {
return $this->data''' + f"['{var_name}'];" + '''
}
return null;
}
public function set''' + camelize(var_name) + f'({parameter}): self' + '''
{
if ($this->cache) {
throw new Exception(self::FINALIZED);
}
$this->data''' + f"['{var_name}'] = ${var_name};" + '''
return $this;
}
'''
def tpl_str(lines):
for i in range(len(lines)):
if lines[i] == r'<?php':
lines[i] = r'<?php '
elif lines[i] == r'?>':
lines[i] = r' ?>'
out = ''.join(lines)
# preserve newline for TEXT (non-HTML) template
if out.replace('?>', '').find('>') < 0:
out = "\n".join(lines)
return out
def php(namespace, cls_name, classes, parameters, template):
return r'''<?php
declare(strict_types=1);
/**
* DO NOT EDIT
* generated by script/build_template.py
*/
namespace ''' + namespace + ''';
''' + "\n".join(classes) + r'''
class ''' + cls_name + r''' extends Template
{
public function __construct()
{
}
''' + ''.join(func(p) for p in parameters) + r'''
public function __toString()
{
if (!$this->cache) {
foreach ($this->onBeforeRender as $callback) {
$callback($this);
}
extract($this->data);
ob_start();
?>
''' + tpl_str(template) + r'''
<?php
$output = ob_get_clean();
$this->cache = trim($output);
}
return $this->cache;
}
}
'''
if __name__ == '__main__':
for input_file in (Path(__file__).parent.parent / 'server' / 'theme' / 'roselife').glob('**/*.tpl.php'):
cls_name = camelize(input_file.name.replace(
'.tpl.php', '').replace('.', '_'))
p = input_file.absolute().with_name(f'{cls_name}.php').parts
i = p.index('theme')
output_file = Path(*p[:i]).joinpath('gen', *p[i:])
output_file.parent.mkdir(parents=True, exist_ok=True)
p = output_file.parent.parts
i = p.index('theme')
namespace = 'site\\gen\\' + '\\'.join(p[i:])
php_current = ''
if output_file.exists():
with output_file.open() as output:
php_current = output.read()
with input_file.open() as input:
classes, parameters, template = partition(input)
php_new = php(namespace, cls_name, classes, parameters, template)
if php_new != php_current:
with output_file.open('w') as output:
output.write(php_new)
print(f'updated: {input_file}')
| 25.677419 | 108 | 0.527219 | 67 | 0.014028 | 0 | 0 | 0 | 0 | 0 | 0 | 1,616 | 0.338358 |
a6b00053cb475a5d6e67bb02e64f93f6e7d1106d | 21,606 | py | Python | app.py | ucsc-cgp/cgp-dashboard | 0ec5d3e2374751be02487caf4efe7cd9dae18522 | [
"Apache-2.0"
] | 1 | 2018-07-09T16:21:29.000Z | 2018-07-09T16:21:29.000Z | app.py | ucsc-cgp/cgp-dashboard | 0ec5d3e2374751be02487caf4efe7cd9dae18522 | [
"Apache-2.0"
] | 46 | 2018-05-04T17:05:58.000Z | 2019-01-23T18:39:08.000Z | app.py | DataBiosphere/cgp-dashboard | 0ec5d3e2374751be02487caf4efe7cd9dae18522 | [
"Apache-2.0"
] | 5 | 2016-12-20T02:03:13.000Z | 2018-03-13T19:51:44.000Z | import os
import requests
from bouncer import Bouncer
from flask import Flask, url_for, redirect, \
render_template, session, request, Response, \
flash, get_flashed_messages, jsonify
from flask_login import LoginManager, login_required, login_user, \
logout_user, current_user, UserMixin
from oauthlib.oauth2 import OAuth2Error
from elasticsearch_dsl import Search
from requests_oauthlib import OAuth2Session
from requests.exceptions import HTTPError
from oauth2client.client import verify_id_token
from oauth2client.crypt import AppIdentityError
from urllib import urlencode
import urllib2
from decode_cookie import decodeFlaskCookie
from utils import redact_email, decrypt, encrypt, new_iv
import logging
basedir = os.path.abspath(os.path.dirname(__file__))
"""App Configuration"""
class Auth:
"""Google Project Credentials"""
CLIENT_ID = os.environ['GOOGLE_CLIENT_ID']
CLIENT_SECRET = os.environ['GOOGLE_CLIENT_SECRET']
DCC_DASHBOARD_HOST = 'localhost'
DCC_DASHBOARD_PORT = '5000'
DCC_DASHBOARD_PROTOCOL = 'https'
if 'DCC_DASHBOARD_HOST' in os.environ.keys():
DCC_DASHBOARD_HOST = os.environ['DCC_DASHBOARD_HOST']
if 'DCC_DASHBOARD_PORT' in os.environ.keys():
DCC_DASHBOARD_PORT = os.environ['DCC_DASHBOARD_PORT']
if 'DCC_DASHBOARD_PROTOCOL' in os.environ.keys():
DCC_DASHBOARD_PROTOCOL = os.environ['DCC_DASHBOARD_PROTOCOL']
REDIRECT_URI = DCC_DASHBOARD_PROTOCOL+'://'+DCC_DASHBOARD_HOST+'/gCallback'
AUTH_URI = 'https://accounts.google.com/o/oauth2/auth'
TOKEN_URI = 'https://accounts.google.com/o/oauth2/token'
USER_INFO = 'https://www.googleapis.com/userinfo/v2/me'
REVOKE_TOKEN = 'https://accounts.google.com/o/oauth2/revoke'
SCOPE = ['https://www.googleapis.com/auth/userinfo.profile',
'https://www.googleapis.com/auth/userinfo.email']
class Config:
"""Base config"""
APP_NAME = "Test Google Login"
SECRET_KEY = os.environ.get("SECRET_KEY") or "somethingsecret"
GOOGLE_SITE_VERIFICATION_CODE = os.environ.get("GOOGLE_SITE_VERIFICATION_CODE") or ""
# Make cookies secure so that the tokens stored in them are safe and only travel over https
SESSION_COOKIE_SECURE = True
REMEMBER_COOKIE_SECURE = True
class DevConfig(Config):
"""Dev config"""
DEBUG = True
class ProdConfig(Config):
"""Production config"""
DEBUG = False
config = {
"dev": DevConfig,
"prod": ProdConfig,
"default": DevConfig
}
"""APP creation and configuration"""
def set_prod_logging_level(logger, level):
for handler in logger.handlers:
if handler.__class__.__name__ == 'ProductionHandler':
handler.level = level
if not logger.isEnabledFor(level):
logger.setLevel(level)
"""APP creation and configuration"""
app = Flask(__name__)
app.config.from_object(config['prod'])
set_prod_logging_level(app.logger, logging.INFO)
login_manager = LoginManager(app)
login_manager.login_view = "login"
login_manager.session_protection = "strong"
# make a global bouncer instance to avoid needless re-instantiation
if os.getenv('EMAIL_WHITELIST_NAME'):
whitelist_checker = Bouncer(os.getenv('EMAIL_WHITELIST_NAME'))
else:
whitelist_checker = None
class User(UserMixin):
def __init__(self, user=None, name=None, picture=None):
"""
Pulls the user's info from the session. We use @property to keep the
session as the one source of truth, but allow access and setting of
user properties here.
"""
if user is not None:
session['email'] = user
if name is not None:
session['name'] = name
if picture is not None:
session['avatar'] = picture
# self._created_at = session.get('created_at', datetime.datetime.utcnow())
@property
def email(self):
return session.get('email', None)
@email.setter
def email(self, value):
session['email'] = value
@property
def name(self):
return session.get('name', None)
@name.setter
def name(self, value):
session['name'] = value
@property
def picture(self):
return session.get('avatar', None)
@picture.setter
def picture(self, value):
session['avatar'] = value
@property
def is_active(self):
return self.email is not None
@property
def is_authenticated(self):
return self.refresh_token is not None
@property
def is_anonymous(self):
return self.email is None
def get_id(self):
return self.email
@property
def access_token(self):
encrypted_token = session.get('access_token', None)
iv = session['access_iv']
return decrypt(encrypted_token, iv)
@access_token.setter
def access_token(self, value):
iv = new_iv()
session['access_iv'] = iv
session['access_token'] = encrypt(value, iv)
@property
def refresh_token(self):
encrypted_token = session.get('refresh_token', None)
iv = session['refresh_iv']
return decrypt(encrypted_token, iv)
@refresh_token.setter
def refresh_token(self, value):
# store the initialization vector in the session. It doesn't need to be secure
iv = new_iv()
session['refresh_iv'] = iv
session['refresh_token'] = encrypt(value, iv)
def logout(self):
"""Clean up all the stuff we left in the session cookie"""
# as per google's docs "The token can be an access token or a refresh token.
# If the token is an access token and it has a corresponding refresh token,
# the refresh token will also be revoked."
if session.get('access_token'):
res = requests.post(Auth.REVOKE_TOKEN, params={'token': session['access_token']},
headers={'content-type': 'application/x-www-form-urlencoded'})
if res.status_code != 200:
print('Failed to revoke tokens. Expected 200 response, received '
'{} with message: {}'.format(res.status_code, res.text))
for attr in 'email', 'name', 'avatar', 'access_token', 'refresh_token':
try:
del session[attr]
except KeyError:
print('Could not clear {} from session'.format(attr))
pass
@login_manager.user_loader
def load_user(user_id):
return User()
""" OAuth Session creation """
def get_google_auth(state=None, token=None):
if token:
return OAuth2Session(Auth.CLIENT_ID, token=token)
if state:
return OAuth2Session(
Auth.CLIENT_ID,
state=state,
redirect_uri=Auth.REDIRECT_URI)
oauth = OAuth2Session(
Auth.CLIENT_ID,
redirect_uri=Auth.REDIRECT_URI,
scope=Auth.SCOPE)
return oauth
def query_es_rna_seq(es_object, index, query_params, cardinality):
"""Returns the cardinality based from the inputs
GET burn_idx/_search
{
"query": {
"bool": {
"must": [
{
"regexp": {
"experimentalStrategy": "[rR][nN][aA][-][Ss][Ee][Qq]"
}
},{
"regexp":{
"software": "[Ss]pinnaker"
}
}
]
}
},
"aggs": {
"filtered_jobs":{
"cardinality": {
"field": "repoDataBundleId"
}
}
}
}
es_object -- the es object to query against
index -- the name of the index to query on
query_params -- tuple with form (query type, field, value)
cardinality -- field to get the cardinality from
"""
# Create search obejct
s = Search(using=es_object, index=index)
# Add the queries
s = reduce(lambda s, x: s.query(x[0], **{x[1]: x[2]}), query_params, s)
# Add the aggregates
s.aggs.metric("filtered_jobs", 'cardinality', field=cardinality,
precision_threshold="40000")
# Execute the query
response = s.execute()
return response.aggregations.filtered_jobs.value
@app.route('/')
def index():
"""
Render the main page.
"""
return html_rend('index')
def parse_token():
"""
Parses the Authorization token from the request header
:return: the bearer and token string
"""
authorization_header = request.headers.get("Authorization", None)
assert authorization_header is not None, "No Authorization header in the request"
parts = authorization_header.split()
# Return the bearer and token string
return parts[0], parts[1]
def new_google_access_token():
"""
Tries to get new access token.
If refresh fails an OAuth2Error will be raised
"""
refresh_token = current_user.refresh_token
oauth = get_google_auth()
extra = {
'client_id': Auth.CLIENT_ID,
'client_secret': Auth.CLIENT_SECRET,
}
# this call may throw an OAuth2Error
resp = oauth.refresh_token(Auth.TOKEN_URI, refresh_token=refresh_token, **extra)
current_user.access_token = resp['access_token']
return resp['access_token']
def make_request(url, headers):
try:
req = urllib2.Request(url, headers=headers)
handler = urllib2.urlopen(req)
content_type = handler.headers['content-type']
response = Response(handler.read(), mimetype=content_type)
content_encoding = 'content-encoding'
if content_encoding in handler.headers.keys():
response.headers[content_encoding] = handler.headers[
content_encoding]
return response
except urllib2.HTTPError as e:
return e.message, e.code
@app.route('/check_session/<cookie>')
def check_session(cookie):
if not request.headers.get("Authorization", None):
return jsonify({"error": "No Authorization header in the request"})
else:
# Make sure the auth token is the right one
try:
bearer, auth_token = parse_token()
assert bearer == "Bearer", "Authorization must start with Bearer"
assert auth_token == os.getenv("LOG_IN_TOKEN", 'ITS_A_SECRET!')
except AssertionError as e:
response = {
'error': e.message
}
return jsonify(response)
# we have to decode the cookie manually b/c we're not getting it automatically through
# flask, rather it has to be passed to and fro with node and client and dashboard
decoded_cookie = decodeFlaskCookie(os.getenv('SECRET_KEY', 'somethingsecret'), cookie)
email = decoded_cookie['email']
if email is None:
response = {
'error': 'No user is stored in the session. The user is not '
'logged in.'
}
else:
response = {
'email': email,
'name': decoded_cookie['name'],
'avatar': decoded_cookie['avatar']
}
return jsonify(response)
def _get_user_info_from_token(token=None):
"""
Try and get the user's info. By default the access token in the session is used.
returns the response object
"""
google = get_google_auth(token={
'access_token': current_user.access_token if token is None else token})
return google.get(Auth.USER_INFO)
def get_user_info(token=None):
"""
Get user's info, retry with refreshed token if failed, and raise ValueError
or OAuth2Error if failure
If access token is provided, use that first
"""
resp = _get_user_info_from_token(token=token)
if 400 <= resp.status_code < 500:
if token:
raise ValueError('The provided token was not accepted')
# token expired, try once more
try:
new_google_access_token()
except OAuth2Error:
# erase old tokens if they're broken / expired
app.logger.warning('Could not refresh access token')
session.pop('access_token')
session.pop('refresh_token')
raise
resp = _get_user_info_from_token()
# If there is a 5xx error, or some unexpected 4xx we will return the message but
# leave the token's intact b/c they're not necessarily to blame for the error.
if resp.status_code != 200:
raise ValueError(resp.text)
return resp.json()
@app.route('/me')
def me():
"""
returns information about the user making the request.
If there are any problems getting the user's info, refreshing the token, etc
then just return the anonymous user.
"""
# Do we have an access token?
if current_user.is_anonymous:
app.logger.debug('Request %s by user anonymous', request.path)
return jsonify({'name': 'anonymous'})
try:
user_data = get_user_info()
except (ValueError, OAuth2Error):
app.logger.error('Request path %s by unknown user', request.path)
return jsonify({'name': 'anonymous'})
output = dict((k, user_data[k]) for k in ('name', 'email'))
output['avatar'] = user_data['picture']
app.logger.info('Request path %s by user with email %s', request.path, user_data['email'])
return jsonify(output)
@app.route('/authorization')
def authorization():
"""
This endpoint determines if the caller is authorized of not.
If there is a bearer token, we try and use that. Otherwise we use
the access token in the session. If the token fails, then try and
refresh.
If we get a working token, then ping google for user info, get
their email and check it against bouncer.
The user needs to be logged in with Google in order to be
authorized. The method returns the following HTTP status
codes:
204 user is authorized regardless of whether user
is on the whitelist or not
401 user info is not available
403 user is not authorized
"""
try:
# parsing succeeds if there is an auth header
bearer, auth_token = parse_token()
except AssertionError:
auth_token = None
else:
if bearer != "Bearer":
return "Authorization must start with Bearer", 401
if auth_token is None and current_user.is_anonymous:
return "No token provided", 401
# use access token in session
try:
user_data = get_user_info(auth_token)
except ValueError as e:
return e.message, 401
except OAuth2Error as e:
return 'Failed to get user info: ' + e.message, 401
# Now that we have the user data we can verify the email
if whitelist_checker is None:
app.logger.info(
'Request path %s. No whitelist; User with email %s is logged in',
request.path, user_data['email'])
return '', 204
elif whitelist_checker.is_authorized(user_data['email']):
app.logger.info(
'Request path %s. User with email %s is authorized',
request.path, user_data['email'])
return '', 204
else:
app.logger.info(
'Request path %s. User with email %s is not authorized',
request.path, user_data['email'])
return '', 403
@app.route('/<name>.html')
def html_rend(name):
"""
Render templates based on their name.
Handle the templates differently depending
on its name.
"""
data = os.environ['DCC_DASHBOARD_SERVICE']
coreClientVersion = os.getenv('DCC_CORE_CLIENT_VERSION', '1.1.0')
if name == 'index':
whitelist_validation_required = bool(os.getenv('EMAIL_WHITELIST_NAME'))
contact_email = os.getenv('CONTACT_EMAIL', '')
return render_template(name + '.html',
whitelist_validation_required=whitelist_validation_required,
contact_email=contact_email)
if name == 'unauthorized':
return render_template(name + '.html')
return render_template(name + '.html')
@app.route('/file_browser/')
def html_rend_file_browser():
"""
Helper method to redirect URLs ending in <url>/file_browser/
to the file browser page.
"""
return redirect(url_for('html_rend', name='file_browser'))
@app.route('/boardwalk')
def boardwalk():
return redirect(url_for('boardwalk'))
@app.route('/privacy')
def privacy():
return redirect(url_for('privacy'))
@app.route('/unauthorized')
def unauthorized():
account = request.args.get('account')
project = os.getenv('PROJECT_NAME', '')
contact = os.getenv('CONTACT_EMAIL', '')
return render_template('unauthorized.html',
contact=contact, project=project, account=account)
@app.route('/login')
def login():
"""
Endpoint to Login into the page
"""
if current_user.is_authenticated:
app.logger.info('Request path %s. Current user with ID %s is authenticated; redirecting to index URL', request.path, current_user.get_id())
return redirect(url_for('index'))
google = get_google_auth()
auth_url, state = google.authorization_url(
Auth.AUTH_URI, access_type='offline',
prompt='select_account consent')
session['oauth_state'] = state
app.logger.info('Request path %s. Redirecting current user with ID %s to authorization URL', request.path, current_user.get_id())
return redirect(auth_url)
@app.route('/gCallback')
def callback():
"""
Callback method required by Google's OAuth 2.0
"""
if current_user is not None and current_user.is_authenticated:
app.logger.info('Request path %s. Current user with ID %s is authenticated; redirecting to index URL', request.path, current_user.get_id())
return redirect(url_for('index'))
if 'error' in request.args:
if request.args.get('error') == 'access_denied':
if current_user is not None:
app.logger.error('Request path %s. Current user with ID %s access is denied', request.path, current_user.get_id())
else:
app.logger.error('Request path %s. Access is denied for current user None', request.path)
return 'You are denied access.'
return 'Error encountered.'
if 'code' not in request.args and 'state' not in request.args:
if current_user is not None:
app.logger.info('Request path %s. Redirecting current user with ID %s to login URL', request.path, current_user.get_id())
else:
app.logger.info('Request path %s. Redirecting current user None to login URL', request.path)
return redirect(url_for('login'))
else:
google = get_google_auth(state=session['oauth_state'])
try:
token = google.fetch_token(
Auth.TOKEN_URI,
client_secret=Auth.CLIENT_SECRET,
authorization_response=request.url)
except HTTPError:
if current_user is not None:
app.logger.error('Request path %s. Could not fetch token for current user with ID %s', request.path, current_user.get_id())
else:
app.logger.error('Request path %s. Could not fetch token for current user None', request.path)
return 'HTTPError occurred.'
# Testing the token verification step.
try:
# jwt = verify_id_token(token['id_token'], Auth.CLIENT_ID)
verify_id_token(token['id_token'], Auth.CLIENT_ID)
except AppIdentityError:
app.logger.error('Request path %s. Could not verify token for current user with ID %s', request.path, current_user.get_id())
return 'Could not verify token.'
# Check if you have the appropriate domain
# Commenting this section out to let anyone with
# a google account log in.
# if 'hd' not in jwt or jwt['hd'] != 'ucsc.edu':
# flash('You must login with a ucsc.edu account. \
# Please try again.', 'error')
# return redirect(url_for('index'))
google = get_google_auth(token=token)
resp = google.get(Auth.USER_INFO)
if resp.status_code == 200:
user_data = resp.json()
email = user_data['email']
# If so configured, check for whitelist and redirect to
# unauthorized page if not in whitelist, e.g.,
if whitelist_checker is not None and not whitelist_checker.is_authorized(email):
app.logger.info('Request path %s. User with email %s is not authorized', request.path, user_data['email'])
return redirect(url_for('unauthorized', account=redact_email(email)))
user = User()
for attr in 'email', 'name', 'picture':
setattr(user, attr, user_data[attr])
user.refresh_token = token['refresh_token']
user.access_token = token['access_token']
login_user(user)
# Empty flashed messages
get_flashed_messages()
# Set a new success flash message
flash('You are now logged in!', 'success')
app.logger.info('Request path %s. User with email %s was logged in; redirecting to index URL', request.path, user_data['email'])
return redirect(url_for('boardwalk'))
app.logger.error('Could not fetch information for current user')
return 'Could not fetch your information.'
@app.route('/logout')
@login_required
def logout():
app.logger.info('Request path %s. Current user with ID %s will be logged out', request.path, current_user.get_id())
current_user.logout()
logout_user()
return redirect(url_for('index'))
if __name__ == '__main__':
app.run(host='0.0.0.0', port=80)
| 34.349762 | 147 | 0.643201 | 4,714 | 0.21818 | 0 | 0 | 12,146 | 0.562159 | 0 | 0 | 8,512 | 0.393965 |
a6b278baa9a78edbd910abdfa8c4d04de51fc371 | 1,260 | py | Python | setup.py | toinsson/pynatnetclient | 9743fb33e668b209022ab06e61bc4816e9ad9355 | [
"Apache-2.0"
] | 1 | 2021-03-05T18:23:06.000Z | 2021-03-05T18:23:06.000Z | setup.py | toinsson/pynatnetclient | 9743fb33e668b209022ab06e61bc4816e9ad9355 | [
"Apache-2.0"
] | null | null | null | setup.py | toinsson/pynatnetclient | 9743fb33e668b209022ab06e61bc4816e9ad9355 | [
"Apache-2.0"
] | null | null | null | import io, os, re
from os import path
from setuptools import find_packages
from distutils.core import setup
# pip's single-source version method as described here:
# https://python-packaging-user-guide.readthedocs.io/single_source_version/
def read(*names, **kwargs):
with io.open(
os.path.join(os.path.dirname(__file__), *names),
encoding=kwargs.get("encoding", "utf8")
) as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup(name='pynatnetclient',
version=find_version('pynatnetclient', '__init__.py'),
description='Python client to Optitrack.',
# long_description=long_description,
author='Antoine Loriette',
author_email='antoine.loriette@gmail.com',
url='https://github.com/toinsson/pynatnetclient',
license='Apache',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
],
keywords='optitrack',
packages=find_packages(),
) | 30.731707 | 75 | 0.653968 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 500 | 0.396825 |
a6b27c6859301d1606a8d5f073f133b9b73bc5e5 | 188 | py | Python | Code/circle.py | notha99y/Satellite-Scheduling | 6231eccf353f37ba643a7e37aa60525355f5d005 | [
"MIT"
] | 14 | 2018-04-06T22:36:30.000Z | 2022-02-15T02:36:58.000Z | Code/circle.py | notha99y/Satellite-Scheduling | 6231eccf353f37ba643a7e37aa60525355f5d005 | [
"MIT"
] | null | null | null | Code/circle.py | notha99y/Satellite-Scheduling | 6231eccf353f37ba643a7e37aa60525355f5d005 | [
"MIT"
] | 4 | 2018-04-06T22:36:57.000Z | 2022-02-15T02:37:00.000Z | import matplotlib.pyplot as plt
circle = plt.Circle((0,0),5, fill=False)
fig, ax = plt.subplots()
ax.add_artist(circle)
ax.set_xlim((-10, 10))
ax.set_ylim((-10, 10))
plt.show()
| 18.8 | 41 | 0.654255 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
a6b3572cf5f220699e22344092a457efc15b97ad | 358 | py | Python | cwk_3b.py | mas250/Python3 | 6ac6f0ffe7869cd7520b2ae0debf3650116a97b1 | [
"MIT"
] | 1 | 2019-12-28T12:31:28.000Z | 2019-12-28T12:31:28.000Z | cwk_3b.py | mas250/Python3 | 6ac6f0ffe7869cd7520b2ae0debf3650116a97b1 | [
"MIT"
] | null | null | null | cwk_3b.py | mas250/Python3 | 6ac6f0ffe7869cd7520b2ae0debf3650116a97b1 | [
"MIT"
] | null | null | null | prize_file_1 = open("/Users/MatBook/Downloads/prize3.txt")
List = []
prizes = []
for line in prize_file_1:
List.append(int(line))
first_line = List.pop(0)
for i in List:
print(i)
for j in List:
if i + j == first_line:
prizes.append((i,j))
print (List)
print( "you can have:", prizes)
| 13.769231 | 58 | 0.547486 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 52 | 0.145251 |
a6b3808aec7e81eab7bb1df7481e77ef7f5b409c | 1,684 | py | Python | DevNetworkPython/tagAnalysis.py | ManikHossain08/Metrics-Extraction-from-GitHub-MSR_Python | b846c8ea6d37246af1c202466b60c15d06c5ba8b | [
"MIT"
] | 1 | 2020-06-13T22:30:17.000Z | 2020-06-13T22:30:17.000Z | DevNetworkPython/tagAnalysis.py | ManikHossain08/Metrics-Extraction-from-GitHub-MSR | b846c8ea6d37246af1c202466b60c15d06c5ba8b | [
"MIT"
] | null | null | null | DevNetworkPython/tagAnalysis.py | ManikHossain08/Metrics-Extraction-from-GitHub-MSR | b846c8ea6d37246af1c202466b60c15d06c5ba8b | [
"MIT"
] | null | null | null | import os
import git
import csv
from datetime import datetime
from progress.bar import Bar
def tagAnalysis(repo: git.Repo, outputDir: str):
print("Analyzing tags")
tagInfo = []
tags = sorted(repo.tags, key=getTaggedDate)
lastTag = None
for tag in Bar('Processing').iter(tags):
commitCount = 0
if (lastTag == None):
commitCount = len(list(tag.commit.iter_items(repo, tag.commit)))
else:
sinceStr = formatDate(getTaggedDate(lastTag))
commitCount = len(list(tag.commit.iter_items(repo, tag.commit, after=sinceStr)))
tagInfo.append(dict(
path=tag.path,
date= formatDate(getTaggedDate(tag)),
commitCount= commitCount
))
lastTag = tag
# output non-tabular results
with open(os.path.join(outputDir, 'project.csv'), 'a', newline='') as f:
w = csv.writer(f, delimiter=',')
w.writerow(['Tag Count', len(tagInfo)])
# output tag info
print("Outputting CSVs")
with open(os.path.join(outputDir, 'tags.csv'), 'a', newline='') as f:
w = csv.writer(f, delimiter=',')
w.writerow(['Path', 'Date', 'Commit Count'])
for tag in sorted(tagInfo, key=lambda o: o['date']):
w.writerow([tag['path'], tag['date'], tag['commitCount']])
def getTaggedDate(tag):
date = None
if tag.tag == None:
date = tag.commit.committed_date
else:
date = tag.tag.tagged_date
date = datetime.fromtimestamp(date)
return date
def formatDate(value):
return value.strftime('%Y-%m-%d') | 30.071429 | 93 | 0.574822 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 209 | 0.124109 |
a6b564871deacaebf5076f647494e02a77ffcc72 | 2,129 | py | Python | keystoneworkout/benchmark.py | dstanek/keystone-exercises | 5023fe87896ffefb462936ca9e6a982b9d099d6c | [
"Apache-2.0"
] | null | null | null | keystoneworkout/benchmark.py | dstanek/keystone-exercises | 5023fe87896ffefb462936ca9e6a982b9d099d6c | [
"Apache-2.0"
] | null | null | null | keystoneworkout/benchmark.py | dstanek/keystone-exercises | 5023fe87896ffefb462936ca9e6a982b9d099d6c | [
"Apache-2.0"
] | null | null | null | import shelve
import sys
import threading
import time
class Benchmark(object):
def __init__(self, concurrency=10, iterations=10):
self.concurrency = concurrency
self.iterations = iterations
self.shelf = Shelf()
def __call__(self, f):
def wrapped(*args, **kwargs):
print 'Benchmarking %s...' % f.__name__,
sys.stdout.flush()
# build threads
threads = [threading.Thread(target=f, args=args, kwargs=kwargs)
for _ in range(self.concurrency)]
start = time.time()
for thread in threads:
thread.start()
while any(thread.is_alive() for thread in threads):
pass
end = time.time()
total_time = end - start
mean_time = total_time / (self.concurrency * self.iterations)
task_per_sec = (self.concurrency * self.iterations) / total_time
previous = self.shelf.get(f.__name__)
self.shelf.set(f.__name__, total_time)
if previous is not None:
percent_diff = 100.0 * (total_time - previous) / previous
print ('%2.3f seconds total (%+2.3f%%), %2.3f seconds per task, %2.3f tasks per second'
% (total_time, percent_diff, mean_time, task_per_sec))
else:
print ('%2.3f seconds total, %2.3f seconds per task, %2.3f tasks per second'
% (total_time, mean_time, task_per_sec))
return wrapped
class Shelf(object):
def __init__(self):
self.filename = '.keystoneworkout-benchmark-shelf'
def get(self, key):
shelf = shelve.open(self.filename)
try:
return shelf.get(key)
finally:
shelf.close()
def set(self, key, value):
shelf = shelve.open(self.filename)
try:
shelf[key] = value
finally:
shelf.close()
def delete(self, key):
shelf = shelve.open(self.filename)
try:
del shelf[key]
finally:
shelf.close()
| 30.855072 | 103 | 0.550493 | 2,069 | 0.971818 | 0 | 0 | 0 | 0 | 0 | 0 | 218 | 0.102395 |
a6b8534dfb59965e01c0a204829dc917ef20d463 | 5,938 | py | Python | src/dipus/search_js_t.py | shirou/dipus | 1c8a9cc89fb95a5c6ae99e692488496bd3fbec34 | [
"BSD-2-Clause"
] | null | null | null | src/dipus/search_js_t.py | shirou/dipus | 1c8a9cc89fb95a5c6ae99e692488496bd3fbec34 | [
"BSD-2-Clause"
] | null | null | null | src/dipus/search_js_t.py | shirou/dipus | 1c8a9cc89fb95a5c6ae99e692488496bd3fbec34 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
template = """/*
* * search_dipus
* * ~~~~~~~~~~~~~~
* *
* * Dipus JavaScript utilties for the full-text search.
* * This files is based on searchtools.js of Sphinx.
* *
* * :copyright: Copyright 2007-2012 by the Sphinx team.
* * :license: BSD, see LICENSE for details.
* *
* */
/**
* * helper function to return a node containing the
* * search summary for a given text. keywords is a list
* * of stemmed words, hlwords is the list of normal, unstemmed
* * words. the first one is used to find the occurance, the
* * latter for highlighting it.
* */
jQuery.makeSearchSummary = function(text, keywords, hlwords) {{
var textLower = text.toLowerCase();
var start = 0;
$.each(keywords, function() {{
var i = textLower.indexOf(this.toLowerCase());
if (i > -1)
start = i;
}});
start = Math.max(start - 120, 0);
var excerpt = ((start > 0) ? '...' : '') +
$.trim(text.substr(start, 240)) +
((start + 240 - text.length) ? '...' : '');
var rv = $('<div class="context"></div>').text(excerpt);
$.each(hlwords, function() {{
rv = rv.highlightText(this, 'highlighted');
}});
return rv;
}};
/**
* Search Module
*/
var Search = {{
_dipus_url: "{dipus_url}",
_index: null,
_pulse_status : -1,
init : function (){{
var params = $.getQueryParameters();
if (params.q) {{
var query = params.q[0];
$('input[name="q"]')[0].value = query;
this.performSearch(query);
}}
}},
stopPulse : function() {{
this._pulse_status = 0;
}},
startPulse : function() {{
if (this._pulse_status >= 0)
return;
function pulse() {{
Search._pulse_status = (Search._pulse_status + 1) % 4;
var dotString = '';
for (var i = 0; i < Search._pulse_status; i++)
dotString += '.';
Search.dots.text(dotString);
if (Search._pulse_status > -1)
window.setTimeout(pulse, 500);
}};
pulse();
}},
/**
* perform a search for something
*/
performSearch : function(query) {{
// create the required interface elements
this.out = $('#search-results');
this.title = $('<h2>' + _('Searching') + '</h2>').appendTo(this.out);
this.dots = $('<span></span>').appendTo(this.title);
this.status = $('<p style="display: none"></p>').appendTo(this.out);
this.output = $('<ul class="search"/>').appendTo(this.out);
$('#search-progress').text(_('Preparing search...'));
this.startPulse();
this.query(query);
}},
query : function(query) {{
var hlterms = [];
var highlightstring = '?highlight=' + $.urlencode(hlterms.join(" "));
$('#search-progress').empty();
var url = this._dipus_url + "?q=" + $.urlencode(query);
$.ajax({{
url: url,
dataType: 'jsonp',
success: function(json){{
for(var i = 0; i < json.hits.length; i++){{
var hit = json.hits[i];
var listItem = $('<li style="display:none"></li>');
var msgbody = hit._source.message;
if (DOCUMENTATION_OPTIONS.FILE_SUFFIX == '') {{
// dirhtml builder
var dirname = hit._source.path;
if (dirname.match(/\/index\/$/)) {{
dirname = dirname.substring(0, dirname.length-6);
}} else if (dirname == 'index/') {{
dirname = '';
}}
listItem.append($('<a/>').attr('href',
DOCUMENTATION_OPTIONS.URL_ROOT + dirname +
highlightstring + query).html(hit._source.title));
}} else {{
// normal html builders
listItem.append($('<a/>').attr('href',
hit._source.path + DOCUMENTATION_OPTIONS.FILE_SUFFIX +
highlightstring + query).html(hit._source.title));
}}
if (msgbody) {{
listItem.append($.makeSearchSummary(msgbody, Array(query), Array(query)));
Search.output.append(listItem);
listItem.slideDown(5);
}} else if (DOCUMENTATION_OPTIONS.HAS_SOURCE) {{
$.get(DOCUMENTATION_OPTIONS.URL_ROOT + '_sources/' +
hit._source.path + '.txt', function(data) {{
if (data != '') {{
listItem.append($.makeSearchSummary(data, Array(query), hlterms));
Search.output.append(listItem);
}}
listItem.slideDown(5);
}});
}} else {{
// no source available, just display title
Search.output.append(listItem);
listItem.slideDown(5);
}}
}};
Search.stopPulse();
Search.title.text(_('Search Results'));
if (json.hits.length === 0){{
Search.status.text(_('Your search did not match any documents. Please make sure that all words are spelled correctly and that you\\'ve selected enough categories.'));
}}else{{
Search.status.text(_('Search finished, found %s page(s) matching the search query.').replace('%s', json.hits.length));
}}
Search.status.fadeIn(500);
}},
error: function(XMLHttpRequest, textStatus, errorThrown) {{
console.log(textStatus, errorThrown);
}}
}});
}}
}};
$(document).ready(function() {{
Search.init();
}});
"""
| 35.556886 | 184 | 0.492253 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,923 | 0.997474 |
a6b86b9c9b7aaf978ba0544515d3c12d570959c7 | 508 | py | Python | src/offensivetextdetectionservice/src/config.py | alejgh/easidiomas | abe9e4dc6ccf27d28ea3b14ef0251f044a8c2261 | [
"MIT"
] | 1 | 2022-01-24T16:56:42.000Z | 2022-01-24T16:56:42.000Z | src/offensivetextdetectionservice/src/config.py | alejgh/easidiomas | abe9e4dc6ccf27d28ea3b14ef0251f044a8c2261 | [
"MIT"
] | null | null | null | src/offensivetextdetectionservice/src/config.py | alejgh/easidiomas | abe9e4dc6ccf27d28ea3b14ef0251f044a8c2261 | [
"MIT"
] | null | null | null | """ Configuration variables used in the application.
These variables should be setup as environment variables
in the docker-compose.yml file when launching all the services.
If these environment variables are not present, default values
are asigned to them.
"""
import os
KAFKA_ENDPOINT = os.environ.get('KAFKA_ENDPOINT') or 'localhost:9092'
KAFKA_INPUT_TOPIC = os.environ.get('INPUT_TOPIC') or 'posts'
KAFKA_LOGGING_TOPIC = os.environ.get('LOGGING_TOPIC') or 'service_logs'
SERVICE_KEY = 'topic_modeling' | 36.285714 | 71 | 0.797244 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 360 | 0.708661 |
a6b882cf867dd1d00939f625ef0b69c7a7489074 | 13,896 | py | Python | tsp_ac.py | Architecton/route-optimization-using-machine-learning | ae641bba4ac9a15656658f8cb1abdb83451aa753 | [
"MIT"
] | null | null | null | tsp_ac.py | Architecton/route-optimization-using-machine-learning | ae641bba4ac9a15656658f8cb1abdb83451aa753 | [
"MIT"
] | null | null | null | tsp_ac.py | Architecton/route-optimization-using-machine-learning | ae641bba4ac9a15656658f8cb1abdb83451aa753 | [
"MIT"
] | null | null | null | import numpy as np
import networkx as nx
import argparse
import random
from models.distance import get_dist_func
def get_fitness(solution, initial_node, node_list):
"""
Get fitness of solution encoded by permutation.
Args:
solution (numpy.ndarray): Solution encoded as a permutation
initial_node (int): Initial node in the permutation (equal to the first element - redundant)
node_list (list): List of node IDs in network
Returns:
(float): Fitness of specified solution
"""
# Append path back to initial node.
solution_aux = np.hstack((solution, initial_node))
# Compute fitness.
return np.sum([dist_func(node_list[el[0]], node_list[el[1]])
for el in [(solution_aux[idx], solution_aux[idx+1])
for idx in range(len(solution_aux)-1)]])
def get_inv_dist_mat(node_list):
"""
Get pairwise distance matrix for specified nodes in node list.
Args:
node_list (list): Nodes for which to compute the pairwise distances
Returns:
(numpy.ndarray): Matrix of pairwise distances
"""
# Initialize array.
dist_mat = np.zeros((len(node_list), len(node_list)), dtype=float)
# Compute pairwise distances
for idx1 in range(len(node_list)-1):
for idx2 in range(idx1+1, len(node_list)):
dist_mat[idx1, idx2] = dist_mat[idx2, idx1] = 1/dist_func(node_list[idx1], node_list[idx2])
# Return computed distance matrix.
return dist_mat
def aco(network, n_ants=100, max_it=500, rho=0.1, alpha=1.0, beta=1.0, q=1.0,
aug='relinking', p_mut=0.08, p_accept_worse=0.1, breeding_coeff=0.5):
"""
Perform ant colony optimization to estimate solution for travelling salesman problem.
Args:
network (object): Networkx representation of the graph
n_ants (int): Number of ants to use
max_it (int): Maximum number of iterations to perform
rho (float): Evaporation rate
alpha (float): Pheromone matrix power in transition probability matrix construction
beta (float): Inverse distance matrix power in transition probability matrix construction
q (float): Pheromone trail coefficient
aug (str): Algorithm augmentation to use. If None, use no augmentation. If equal to 'relinking' use path
relinking method. If equal to 'genetic' use replacement of worst ants with crossovers of best ants.
p_mut (float): Mutation probability
p_accept_worse (float): Probability of accepting a relinked solution that is worse than original.
breeding_coeff (float): Fraction of best ants to use in crossover and fraction of worst ants to
replace with offspring (genetic augmentation)
Returns:
(tuple): Best found solution, fitness of best solution, edgelists corresponding to solutions representing
the new global best solution.
"""
# Check aug parameter.
if aug is not None:
if aug not in {'relinking', 'genetic'}:
raise(ValueError('unknown value specified for aug parameter'))
# Initialize list for storing edge lists (for animating).
edgelists = []
# Initialize list of nodes (for converting enumerations to actual node IDs).
node_list = list(network.nodes())
# Set initial node.
initial_node = 0
# Initilize best found solution.
best_solution = {
'fitness' : np.inf,
'solution' : None
}
# Compute distance matrix for locations.
inv_dist_mat = get_inv_dist_mat(node_list)
# Initialize pheromone matrix.
pher_mat = 0.01*np.ones_like(inv_dist_mat, dtype=float)
# Initialize iteration index.
it_idx = 0
# Main iteration loop.
while it_idx < max_it:
# Increment iteration counter.
it_idx += 1
# Print iteration index and best fitness.
print('iteration: {0}'.format(it_idx))
print('best fitness: {0}'.format(best_solution['fitness']))
# Initialize array for storing ant solutions.
ant_solutions = np.empty((n_ants, len(node_list)), dtype=int)
# Initialize array for storing ant fitness values.
ant_fitness_vals = np.empty(n_ants, dtype=float)
# Build transition probability matrix.
p_mat = (pher_mat**alpha) * (inv_dist_mat**beta)
# Run ACO step.
for ant_idx in range(n_ants):
# Set initial node.
current_node = initial_node
# Get set of unvisited nodes.
unvisited = set(range(len(node_list)))
unvisited.remove(initial_node)
# Build ant's solution.
solution_nxt = np.empty(len(node_list), dtype=int)
solution_nxt[0] = initial_node
for step_idx in range(len(node_list) - 1):
unvisited_list = list(unvisited)
probs = p_mat[current_node, unvisited_list] / np.sum(p_mat[current_node, unvisited_list])
node_nxt = np.random.choice(unvisited_list, size=1, p=probs)[0]
unvisited.remove(node_nxt)
solution_nxt[step_idx+1] = node_nxt
current_node = node_nxt
# Compute fitness of solution and compare to global best.
fitness_solution = get_fitness(solution_nxt, initial_node, node_list)
ant_fitness_vals[ant_idx] = fitness_solution
if fitness_solution < best_solution['fitness']:
best_solution['fitness'] = fitness_solution
best_solution['solution'] = solution_nxt
solution_nxt_aug = np.hstack((solution_nxt, initial_node))
# Store edge list (for animating).
edgelists.append([(node_list[solution_nxt_aug[idx]], node_list[solution_nxt_aug[idx+1]])
for idx in range(len(solution_nxt_aug) - 1)])
# Store ant's solution.
ant_solutions[ant_idx, :] = solution_nxt
# Initialize matrix for accumulating pheromones (for pheromone update).
pher_add_mat = np.zeros_like(pher_mat, dtype=float)
if aug == 'relinking':
# If using relinking augmentation.
# Go over solutions.
for idx_solution in range(ant_solutions.shape[0]):
# Split solution at random point.
sec1, sec2 = np.split(ant_solutions[idx_solution], \
indices_or_sections=[np.random.randint(1, len(ant_solutions[idx_solution]))])
# Relink.
solution_mod = np.hstack((sec1, list(reversed(sec2))))
# Apply mutation with probability.
if np.random.rand() < p_mut:
p1 = np.random.randint(0, len(solution_mod))
p2 = np.random.randint(0, len(solution_mod))
solution_mod[[p1, p2]] = solution_mod[[p2, p1]]
# Compute fitness value of relinked solution.
fitness_mod = get_fitness(solution_mod, initial_node, node_list)
# If fitness better accept. Also accept with specified probability.
if (fitness_mod < ant_fitness_vals[idx_solution]) or (np.random.rand() < p_accept_worse):
ant_solutions[idx_solution, :] = solution_mod
ant_fitness_vals[idx_solution] = fitness_mod
if aug == 'genetic':
# If using genetic augmentation.
# Sort ants ant fitness values from best to worst.
p = ant_fitness_vals.argsort()
ant_fitness_vals = ant_fitness_vals[p]
ant_solutions = ant_solutions[p, :]
# Get number of new ants and initialize array for crossovers.
n_new_ants = int(np.ceil(breeding_coeff*ant_solutions.shape[0]))
ant_solutions_new = np.empty((n_new_ants, ant_solutions.shape[1]), dtype=int)
ant_fitness_vals_new = np.empty(ant_solutions_new.shape[0], dtype=float)
# Go over solutions for which to perform crossover.
for idx in range(0, ant_solutions_new.shape[0], 2):
# Get solutions and cut at random point.
ant_sol_1 = ant_solutions[idx, :]
ant_sol_2 = ant_solutions[idx+1, :]
c1 = ant_sol_1[:np.random.randint(1, len(ant_sol_1))]
c2 = ant_sol_2[:np.random.randint(1, len(ant_sol_2))]
# Append elements in second solution in order found.
offspring1 = np.hstack((c1, ant_sol_2[~np.in1d(ant_sol_2, c1)]))
offspring2 = np.hstack((c2, ant_sol_1[~np.in1d(ant_sol_1, c2)]))
# Apply mutations with specified probability.
if np.random.rand() < p_mut:
p1 = np.random.randint(0, len(offspring1))
p2 = np.random.randint(0, len(offspring1))
offspring1[[p1, p2]] = offspring1[[p2, p1]]
if np.random.rand() < p_mut:
p1 = np.random.randint(0, len(offspring2))
p2 = np.random.randint(0, len(offspring2))
offspring2[[p1, p2]] = offspring2[[p2, p1]]
# Set offspring and fitness values.
ant_solutions_new[idx, :] = offspring1
ant_solutions_new[idx+1, :] = offspring2
ant_fitness_vals_new[idx] = get_fitness(offspring1, initial_node, node_list)
ant_fitness_vals_new[idx+1] = get_fitness(offspring2, initial_node, node_list)
# Replace worst ants with offspring of best.
ant_solutions[-ant_solutions_new.shape[0]:] = ant_solutions_new
ant_fitness_vals[-len(ant_fitness_vals_new):] = ant_fitness_vals_new
# Compute and print diversity of solutions.
diversity = (np.mean(ant_fitness_vals) - np.min(ant_fitness_vals))/(np.max(ant_fitness_vals) - np.min(ant_fitness_vals))
print(diversity)
# Add pheromones to pheromone accumulation matrix (for next iteration).
for idx_sol, solution in enumerate(ant_solutions):
for idx in range(len(solution)-1):
pher_add_mat[solution[idx], solution[idx+1]] += q*(1/ant_fitness_vals[idx_sol])
pher_add_mat[solution[idx+1], solution[idx]] += q*(1/ant_fitness_vals[idx_sol])
# Update pheromone matrix.
pher_mat = (1-rho)*pher_mat + pher_add_mat
# Return best found solution, fitness value of best found solution and edgelist of network states
# corresponding to global best position updates.
return best_solution['solution'], best_solution['fitness'], edgelists
if __name__ == '__main__':
### PARSE ARGUMENTS ###
parser = argparse.ArgumentParser(description='Approximate solution to TSP using ant colony optimization.')
parser.add_argument('--num-nodes', type=int, default=50, help='Number of nodes to use')
parser.add_argument('--dist-func', type=str, default='geodesic', choices=['geodesic', 'learned'],
help='Distance function to use')
parser.add_argument('--prediction-model', type=str, default='gboosting', choices=['gboosting', 'rf'],
help='Prediction model to use for learned distance function')
parser.add_argument('--max-it', type=int, default=100, help='Maximum iterations to perform')
parser.add_argument('--n-ants', type=int, default=100, help='Number of ants to use')
parser.add_argument('--rho', type=float, default=0.1, help='Evaporation rate parameter')
parser.add_argument('--alpha', type=float, default=1.0, help='Alpha parameter in transition probability matrix update')
parser.add_argument('--beta', type=float, default=1.0, help='Beta parameter in transition probability matrix update')
parser.add_argument('--q', type=float, default=1.0, help='Pheromone update coefficient')
parser.add_argument('--aug', type=str, default=None, choices=['relinking', 'genetic'], help='Augmentation to use')
parser.add_argument('--p-mut', type=float, default=0.08, help='Mutation rate (augmentation)')
parser.add_argument('--p-accept-worse', type=float, default=0.08,
help='Probability of accepting a worse result of relinking (relinking augmentation)')
parser.add_argument('--breeding-coeff', type=float, default=0.5,
help='Fraction of best solution for which to perform crossover and fraction of worst solution to replace by offspring (genetic augmentation)')
args = parser.parse_args()
#######################
# Parse problem network.
network = nx.read_gpickle('./data/grid_data/grid_network.gpickle')
# Number of nodes to remove from network.
to_remove = network.number_of_nodes() - args.num_nodes
# Remove randomly sampled nodes to get specified number of nodes.
network.remove_nodes_from(random.sample(list(network.nodes), to_remove))
# Get distance function.
dist_func = get_dist_func(network, which=args.dist_func, prediction_model=args.prediction_model)
# Get solution using ant colony optimization.
solution_position, solution_fitness, edgelists = aco(network, n_ants=args.n_ants, max_it=args.max_it, rho=args.rho,
alpha=args.alpha, beta=args.beta, q=args.q, aug=args.aug, p_mut=args.p_mut,
p_accept_worse=args.p_accept_worse, breeding_coeff=args.breeding_coeff)
# Save list of edge lists for animation.
np.save('./results/edgelists/edgelist_tsp_ac.npy', list(map(np.vstack, edgelists)))
nx.write_gpickle(network, './results/networks/network_tsp_ac.gpickle')
# Print best solution fitness.
print('Fitness of best found solution: {0:.3f}'.format(solution_fitness))
| 45.411765 | 154 | 0.63637 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,295 | 0.381045 |
a6ba1034d83cf267912fbc83efe67828fa37bf25 | 2,393 | py | Python | samples/sample_file_handling.py | Wacom-Developer/universal-ink-library | 689ed90e09e912b8fc9ac249984df43a7b59aa59 | [
"Apache-2.0"
] | 5 | 2021-09-06T11:45:37.000Z | 2022-03-24T15:56:06.000Z | samples/sample_file_handling.py | Wacom-Developer/universal-ink-library | 689ed90e09e912b8fc9ac249984df43a7b59aa59 | [
"Apache-2.0"
] | null | null | null | samples/sample_file_handling.py | Wacom-Developer/universal-ink-library | 689ed90e09e912b8fc9ac249984df43a7b59aa59 | [
"Apache-2.0"
] | 2 | 2021-09-03T09:08:45.000Z | 2021-12-15T14:03:16.000Z | # -*- coding: utf-8 -*-
# Copyright © 2021 Wacom Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
from uim.codec.parser.uim import UIMParser
from uim.codec.parser.will import WILL2Parser
from uim.codec.writer.encoder.encoder_3_1_0 import UIMEncoder310
from uim.model.ink import InkModel
if __name__ == '__main__':
parser: UIMParser = UIMParser()
# Parse UIM v3.0.0
ink_model: InkModel = parser.parse('../ink/uim_3.0.0/1) Value of Ink 1.uim')
# Save the model, this will overwrite an existing file
with io.open('1) Value of Ink 1_3_0_0_to_3_1_0.uim', 'wb') as uim:
# Encode as UIM v3.1.0
uim.write(UIMEncoder310().encode(ink_model))
# ------------------------------------------------------------------------------------------------------------------
# Parse UIM v3.1.0
# ------------------------------------------------------------------------------------------------------------------
ink_model: InkModel = parser.parse('../ink/uim_3.1.0/1) Value of Ink 1 (3.1 delta).uim')
# Save the model, this will overwrite an existing file
with io.open('1) Value of Ink 1_3_1_0.uim', 'wb') as uim:
# Encode as UIM v3.1.0
uim.write(UIMEncoder310().encode(ink_model))
# ------------------------------------------------------------------------------------------------------------------
# Parse WILL 2 file from Inkspace (https://inkspace.wacom.com/)
# ------------------------------------------------------------------------------------------------------------------
parser: WILL2Parser = WILL2Parser()
ink_model_2: InkModel = parser.parse('../ink/will/elephant.will')
# Save the model, this will overwrite an existing file
with io.open('elephant.uim', 'wb') as uim:
# Encode as UIM v3.1.0
uim.write(UIMEncoder310().encode(ink_model_2))
| 50.914894 | 120 | 0.558713 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,636 | 0.683375 |
a6ba3b9a6eb8e00e8a17e5342d7063e3223bfdac | 8,504 | py | Python | net/LcaNet.py | SoufiyanBAHADI/ALCA | 7eea7402eb00f410a6fd9c6734f1926b1e31cff8 | [
"BSD-2-Clause"
] | 3 | 2021-10-04T02:00:54.000Z | 2022-03-09T18:52:31.000Z | net/LcaNet.py | SoufiyanBAHADI/ALCA | 7eea7402eb00f410a6fd9c6734f1926b1e31cff8 | [
"BSD-2-Clause"
] | 2 | 2022-01-23T22:32:28.000Z | 2022-03-12T02:21:11.000Z | net/LcaNet.py | SoufiyanBAHADI/ALCA | 7eea7402eb00f410a6fd9c6734f1926b1e31cff8 | [
"BSD-2-Clause"
] | null | null | null | """
Created on 30.09.2020
@author: Soufiyan Bahadi
@director: Jean Rouat
@co-director: Eric Plourde
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from managers.ContextManager import ContextManager
from managers.LearningManager import LearningManager
from managers.PlottingManager import PlottingManager
from net.HardShrink import HardShrink
from net.Sparsity import Sparsity
from utils import reconstruct, size
from constants import Example
class Lca(nn.Module):
def __init__(self,
cm: ContextManager,
lm: LearningManager,
pm: PlottingManager = None):
super(Lca, self).__init__()
# managers init
self.cm = cm
self.lm = lm
self.pm = pm
# weights
self.weights = None
# num_shifts
self.numshifts = None
# mini_batch and Residu
self.__mini_batch = None
# Final LCA outputs
self.spikegram = None
self.residual = None
# Final LCA error
self.loss = None
self.mse = None
self.sp_nb = None
@property
def mini_batch(self):
return self.__mini_batch
@mini_batch.setter
def mini_batch(self, value):
if value is not None:
self.__mini_batch = torch.from_numpy(value[:, None, :]).to(
self.cm.device)
def _criterion(self, activation):
mse = torch.nn.MSELoss(reduction='none')
recons = reconstruct(self.weights, activation, self.cm.stride)
err = 1 / 2 * torch.sum(mse(recons, self.mini_batch), dim=2)[:, 0]
return err, recons
def train(self, mode: bool = True):
super().train(mode)
self.cm.c.requires_grad_(mode)
self.cm.b.requires_grad_(mode)
self.cm.filter_ord.requires_grad_(mode)
def eval(self):
self.train(False)
def forward(self):
learning = self.cm.c.requires_grad # mode train or eval
sparsity = Sparsity.apply
shrink = HardShrink.apply
self.weights = self.cm.compute_weights()
self.num_shifts = (self.mini_batch.shape[-1] -
self.weights.shape[-1]) // self.cm.stride + 1
if learning:
# init states
# intern potential
init_u = torch.zeros(self.mini_batch.shape[0],
self.cm.num_channels,
self.num_shifts,
dtype=torch.float64,
requires_grad=True,
device=self.cm.device)
# activation
init_a = torch.zeros(self.mini_batch.shape[0],
self.cm.num_channels,
self.num_shifts,
dtype=torch.float64,
requires_grad=True,
device=self.cm.device)
# Hidden states
# intern potentials
state_u = [
init_u - self.cm.dt / self.cm.tau *
(-F.conv1d(
self.mini_batch, self.weights, stride=self.cm.stride) +
init_u + F.conv1d(F.conv_transpose1d(
init_a, self.weights, stride=self.cm.stride),
self.weights,
stride=self.cm.stride) - init_a)
]
# activations
state_a = [shrink(state_u[-1], self.cm.threshold)]
del init_u
del init_a
loss_, _ = self._criterion(state_a[-1])
loss_ = torch.mean(
self.lm.alpha * loss_ + self.lm.beta *
sparsity(state_u[-1], state_a[-1], self.cm.threshold))
else:
# if learning is not activated there is no need to define
# a buffer for hidden states
u = torch.zeros(self.mini_batch.shape[0],
self.cm.num_channels,
self.num_shifts,
requires_grad=True,
dtype=torch.float64,
device=self.cm.device)
for it in range(self.cm.iters):
if learning:
if len(state_u) < self.lm.buffer_size:
# Dynamics
state_u.append(
state_u[-1] + self.cm.dt / self.cm.tau *
(F.conv1d(self.mini_batch,
self.weights,
stride=self.cm.stride) - state_u[-1] -
F.conv1d(F.conv_transpose1d(
state_a[-1], self.weights, stride=self.cm.stride),
self.weights,
stride=self.cm.stride) + state_a[-1]))
# Activation
state_a.append(shrink(state_u[-1], self.cm.threshold))
else:
loss_.backward(retain_graph=True)
# Optimize c, b, filter_order
self.lm.optimizer.step()
self.lm.optimizer.zero_grad()
# recompute weights
with torch.no_grad():
self.weights.data = self.cm.compute_weights().data
# reset Loss
loss_ = 0
# last states are new init states
init_u = state_u[-1].detach().clone()
init_a = state_a[-1].detach().clone()
# clear the memory of all states whose loss was backpropagated
state_u.clear()
state_a.clear()
# compute first hidden states
state_u.append(
init_u + self.cm.dt / self.cm.tau *
(F.conv1d(self.mini_batch,
self.weights,
stride=self.cm.stride) - init_u -
F.conv1d(F.conv_transpose1d(
init_a, self.weights, stride=self.cm.stride),
self.weights,
stride=self.cm.stride) + init_a))
state_a.append(shrink(state_u[-1], self.cm.threshold))
del init_a
del init_u
# Loss computation
mse, recons = self._criterion(state_a[-1])
sp_err = sparsity(state_u[-1], state_a[-1], self.cm.threshold)
# Accumulate loss
loss_ += torch.mean(self.lm.alpha * mse +
self.lm.beta * sp_err)
else:
# Activation
a = shrink(u, self.cm.threshold)
# Loss computation
mse, recons = self._criterion(a)
sp_err = sparsity(u, a, self.cm.threshold)
loss = mse + sp_err
# Computing loss gradients
loss.sum().backward()
with torch.no_grad():
# Dynamics
u.data.sub_(u.grad, alpha=self.cm.dt / self.cm.tau)
u.grad.zero_()
if self.pm is not None:
if self.pm.track:
# Tracking data
self.pm.track_loss(2 * mse[Example.SIG_ID.value] / self.mini_batch.shape[-1], a[Example.SIG_ID.value], it)
if learning:
a = state_a[-1]
# Save residual
self.residual = self.mini_batch - recons
# Save spikegram
self.spikegram = a.detach().cpu().numpy().reshape(
(self.mini_batch.shape[0], self.cm.num_channels, -1))
# Save loss at the end of lca
mse = mse.detach().cpu().numpy()
sp_err = sp_err.detach().cpu().numpy()
self.loss = mse + sp_err
self.mse = 2 * mse / self.mini_batch.shape[-1] # torch.mean(self.residual[:, 0].detach()**2, dim=1).cpu().numpy() # residual energy divided by its dimension
self.sp_nb = np.linalg.norm(self.spikegram.reshape(
(self.spikegram.shape[0], -1)),
ord=0,
axis=1)
if self.pm is not None:
if not self.pm.track:
self.pm.append(self.mse, self.sp_nb)
| 37.795556 | 166 | 0.485889 | 8,015 | 0.942498 | 0 | 0 | 253 | 0.029751 | 0 | 0 | 891 | 0.104774 |
a6ba8aa475057ee102e8d38ba6b8631bbfab4990 | 8,254 | py | Python | util_tools/L2L_analysis_module.py | WeilabMSU/PretrainModels | 15370ded8c1c03ba0b9e123fe4c125815300d157 | [
"MIT"
] | 4 | 2021-12-22T08:35:47.000Z | 2022-02-04T23:05:19.000Z | util_tools/L2L_analysis_module.py | WeilabMSU/PretrainModels | 15370ded8c1c03ba0b9e123fe4c125815300d157 | [
"MIT"
] | null | null | null | util_tools/L2L_analysis_module.py | WeilabMSU/PretrainModels | 15370ded8c1c03ba0b9e123fe4c125815300d157 | [
"MIT"
] | null | null | null | '''
Analytic Hierarchy Process, AHP.
Base on Wasserstein distance
'''
from scipy.stats import wasserstein_distance
from sklearn.decomposition import PCA
import scipy
import numpy as np
import pandas as pd
import sys
import argparse
import os
import glob
import datasets_analysis_module as dam
class idx_analysis(object):
def __init__(self):
self.all_distribution_idx = {
'c': 0, 'C': 1, '(': 2, ')': 3, '1': 4, 'O': 5, '=': 6, '2': 7, 'N': 8, 'n': 9,
'3': 10, '[': 11, ']': 12, '@': 13, 'H': 14, 'F': 15, '-': 16, '4': 17, 'S': 18, 'Cl': 19,
'/': 20, 's': 21, 'o': 22, '.': 23, 'Br': 24, '5': 25, '+': 26, '#': 27, '\\': 28, '6': 29,
'I': 30, 'P': 31, 'Si': 32, '7': 33, '8': 34, 'B': 35, '%': 36, 'Na': 37, '9': 38, '0': 39,
'K': 40, 'Sn': 41, 'Se': 42, 'Li': 43, 'Zn': 44, 'Al': 45, 'b': 46, 'As': 47, 'Mg': 48, 'p': 49,
'Ca': 50, 'se': 51, 'Ag': 52, 'Te': 53, 'Ba': 54, 'Bi': 55, 'Rb': 56, 'Cs': 57, 'Sr': 58, 'te': 59,
'Be': 60, 'length': 61, 'symbol_type': 62
}
self.all_distribution_idx_reversed = {v: k for k, v in self.all_distribution_idx.items()}
def wasserstein_dis(distr_dict_0, distr_dict_1, dis_type='wasserstein'):
minus = 1e-15
sorted_keys_0 = np.sort(list(distr_dict_0.keys()))
max_value_0 = max(distr_dict_0.values())
values_0 = minus + np.array([distr_dict_0[k] for k in sorted_keys_0])/max_value_0
sorted_keys_1 = np.sort(list(distr_dict_1.keys()))
max_value_1 = max(distr_dict_1.values())
values_1 = minus + np.array([distr_dict_1[k] for k in sorted_keys_1])/max_value_1
if dis_type == 'wasserstein':
w_dis = wasserstein_distance(values_0, values_1)
elif dis_type == 'KL':
w_dis = np.mean(scipy.special.kl_div(values_0, values_1))
else:
w_dis = np.linalg.norm(np.array(values_0) - np.array(values_1))
return np.round(w_dis, 4)
def datasets_pair_analysis(
target_set_distribution,
pretrain_sets_distribution_path='PretrainedSetsDistribution.npy'
):
if not os.path.exists(pretrain_sets_distribution_path):
print(pretrain_sets_distribution_path, 'not the right file.')
print('PretrainedSetsDistribution.npy can not be found')
pretrained_sets_distribution = np.load(pretrain_sets_distribution_path, allow_pickle=True).item()
three_sets_prefix = ['c', 'cp', 'cpz']
all_wd_values = {k: {} for k in three_sets_prefix}
for i, prefix in enumerate(three_sets_prefix):
for j in range(63):
prefix_name = f"{prefix}-{j}"
all_wd_values[prefix][j] = wasserstein_dis(
target_set_distribution[str(j)],
pretrained_sets_distribution[prefix_name]
)
return all_wd_values
def rerange_distribution(target, combined_result):
distribute_dict = {}
if target == 'length':
min_len, max_len = 1, 256
distribute_dict = {k: 0 for k in range(min_len, max_len+1)}
for k, v in combined_result.items():
if k <= min_len:
distribute_dict[min_len] += v
elif k > min_len and k < max_len:
distribute_dict[k] = v
elif k >= max_len:
distribute_dict[max_len] += v
else:
print('Unexpected key from combined_result.(target: length)')
elif target == 'symbol_type':
min_len, max_len = 1, 61
distribute_dict = {k: 0 for k in range(min_len, max_len+1)}
for k, v in combined_result.items():
if k <= min_len:
distribute_dict[min_len] += v
elif k > min_len and k < max_len:
distribute_dict[k] = v
elif k >= max_len:
distribute_dict[max_len] += v
else:
print('Unexpected key from combined_result.(target: symbol_type)')
else:
distribute_dict = {k: 0 for k in [np.round(w, 2) for w in np.arange(0.0, 1.001, 0.01)]}
for k, v in combined_result.items():
if k in distribute_dict:
distribute_dict[k] += v
else:
print('Unexpected key {:s} from combined_result.(consider_symbol {:s})'.format(str(k), target))
return distribute_dict
def linear_ridgeclassifier(x, y):
from sklearn import linear_model
cla = linear_model.RidgeClassifier()
cla.fit(x, y)
return cla.score(x, y), cla.intercept_, cla
def data_norm(*args):
assert len(args) > 0, "Datasets' length needs > 0"
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(np.vstack(args))
norm_args = [scaler.transform(args[i]) for i in range(len(args))]
norm_args = norm_args if len(args) > 1 else norm_args[0]
return norm_args
def main_get_dis_customized_dataset(file='./temp_data/bbbp.smi', num_workers=1):
# savename = 'wasserstein_temp.csv'
dataname = os.path.split(file)[-1].split('.')[0]
ahp = idx_analysis()
all_features = []
target_set_distribution = {}
for k, v in ahp.all_distribution_idx.items():
ta = dam.target_analysis(k)
if k == 'length':
specific_func = ta.length_analysis
elif k == 'symbol_type':
specific_func = ta.symbol_type_analysis
else:
specific_func = ta.symbol_analysis
combined_result = dam.parallel_operation(file, num_workers, specific_func)
distribute_dict = rerange_distribution(k, combined_result)
target_set_distribution[str(v)] = distribute_dict
all_wd_values = datasets_pair_analysis(
target_set_distribution,
pretrain_sets_distribution_path='PretrainedSetsDistribution.npy',
)
# 3 to 1
for nd, (k, wd_dict) in enumerate(all_wd_values.items()):
all_features.append(list(wd_dict.values()))
final_features = pd.DataFrame(
np.reshape(all_features, [1, 63*3]), # (all_features),
index=[dataname],
columns=list(range(63*3)),
)
# final_features.to_csv(savename)
return final_features
def main_L2L(args):
filename = './wasserstein.csv' # This file contains the features used to train the decision model.
if not os.path.exists(filename):
print('No wasserstein.csv exists')
data_df = pd.read_csv(filename, header=0, index_col=0)
label = data_df['label'].values
features = data_df[[str(i) for i in range(np.shape(data_df.values)[-1]-1)]].values
# print(features.shape)
customized_dataset_feature = main_get_dis_customized_dataset(
file=args.input_dataset, num_workers=args.num_workers).values
all_features = np.vstack([features, customized_dataset_feature])
norm_all_features = data_norm(all_features)
features = norm_all_features[0: -1, :]
customized_dataset_feature = norm_all_features[-1, :]
all_score = []
all_inter = []
flag = 1
for redu_i in range(1, np.shape(features)[0]+1):
reducer = PCA(n_components=redu_i)
features_ = reducer.fit_transform(features)
score, inter_, model = linear_ridgeclassifier(features_, label)
all_score.append(score)
all_inter.append(inter_[0])
# print(redu_i, score)
if score - 1 == 0 and flag == 1:
customized_dataset_feature_ = reducer.transform(customized_dataset_feature[None, :])
get_scores = model.decision_function(customized_dataset_feature_)
# print(model.decision_function(features_))
flag = 0
# print(all_score)
# print(all_inter)
select_models = {0: 'model_chembl27', 1: 'model_chembl27_pubchem', 2: 'model_chembl27_pubchem_zinc'}
print(f'Select the pretrained {select_models[np.argmax(get_scores)]}, and the score is {np.max(get_scores)}')
def main(args):
main_L2L(args)
def parse_args(args):
parser = argparse.ArgumentParser(description='Datasets analysis')
parser.add_argument('--input_dataset', default='test.smi', type=str)
parser.add_argument('--num_workers', default=1, type=int)
args = parser.parse_args()
return args
def cli_main():
args = parse_args(sys.argv[1:])
# print(args)
main(args)
if __name__ == "__main__":
cli_main()
print('End!')
| 36.522124 | 113 | 0.63024 | 875 | 0.106009 | 0 | 0 | 0 | 0 | 0 | 0 | 1,358 | 0.164526 |
a6bb1c54381bdffa7fdca88fa66dee0901c84e20 | 7,581 | py | Python | src/opendr/_setup.py | daoran/opendr | bca25f6a43244fe9c219a24576181f94a0726923 | [
"Apache-2.0"
] | null | null | null | src/opendr/_setup.py | daoran/opendr | bca25f6a43244fe9c219a24576181f94a0726923 | [
"Apache-2.0"
] | null | null | null | src/opendr/_setup.py | daoran/opendr | bca25f6a43244fe9c219a24576181f94a0726923 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020-2022 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup
import os
from os.path import join
from configparser import ConfigParser
from setuptools import find_packages
from setuptools.command.install import install
import sys
author = 'OpenDR consortium'
author_email = 'tefas@csd.auth.gr'
url = 'https://github.com/opendr-eu/opendr'
license = 'LICENSE'
# Retrieve version
exec(open('src/opendr/_version.py').read())
try:
__version__
except NameError:
__version__ = '0.0'
# Read the long description
with open("description.txt") as f:
long_description = f.read()
# Disable AVX2 for BCOLZ to ensure wider compatibility
os.environ['DISABLE_BCOLZ_AVX2'] = 'true'
def get_packages(module=None):
packages = []
if module:
packages = find_packages(where="./src")
module_short_name = module
if module == 'engine':
packages = [x for x in packages if 'engine' in x]
else:
module_short_name = module.split("/")[1]
packages = [x for x in packages if module_short_name in x]
name = "opendr-toolkit-" + module_short_name.replace("_", "-")
else:
name = "opendr-toolkit"
packages.append('opendr.utils')
packages.append('opendr.perception')
packages.append('opendr.engine')
packages.append('opendr.control')
packages.append('opendr.planning')
packages.append('opendr.simulation')
packages.append('opendr')
return name, packages
def generate_manifest(module=None):
with open("MANIFEST.in", "w") as f:
if module == "engine":
f.write("recursive-include src/opendr/engine *\n")
f.write("include src/opendr/engine *\n")
f.write("include src/opendr/utils *\n")
elif module:
f.write("recursive-include " + join("src/opendr", module) + " *\n")
f.write("include " + join("src/opendr", module.split("/")[0]) + " *\n")
f.write("exclude src/opendr/__init__.py \n")
f.write("include description.txt \n")
f.write("include packages.txt \n")
f.write("include README.md \n")
f.write("include src/opendr/_version.py \n")
f.write("include src/opendr/_setup.py \n")
def get_description(module=None):
if module:
return 'Open Deep Learning Toolkit for Robotics (submodule: ' + module + ')'
else:
return 'Open Deep Learning Toolkit for Robotics'
def get_dependencies(current_module):
dependencies = []
skipped_dependencies = []
post_install = []
# Read all the dependencies.ini for each tool category
if current_module:
# Get all subfolders
paths = ['.']
for file in os.listdir(join("src/opendr", current_module)):
if os.path.isdir(join("src/opendr", current_module, file)):
paths.append(file)
for path in paths:
try:
parser = ConfigParser()
parser.read(join("src/opendr", current_module, path, 'dependencies.ini'))
try:
cur_deps = parser.get("runtime", "python").split('\n')
except Exception:
cur_deps = []
try:
opendr_deps = parser.get("runtime", "opendr").split('\n')
except Exception:
opendr_deps = []
try:
scripts = parser.get("runtime", "post-install").split('\n')
for x in scripts:
post_install.append(x)
except Exception:
pass
except Exception:
pass
# Add dependencies found (filter git-based ones and local ones)
for x in cur_deps:
if 'git' in x or '${OPENDR_HOME}' in x:
skipped_dependencies.append(x)
else:
dependencies.append(x)
for x in opendr_deps:
dependencies.append(x)
dependencies = list(set(dependencies))
skipped_dependencies = list(set(skipped_dependencies))
post_install = list(set(post_install))
else:
with open("packages.txt", "r") as f:
packages = [x.strip() for x in f.readlines()]
for package in packages:
if '/' in package:
dependencies.append('opendr-toolkit-' + package.split('/')[1].replace('_', '-'))
elif package != 'opendr':
dependencies.append('opendr-toolkit-' + package.replace('_', '-'))
return dependencies, skipped_dependencies, post_install
def get_data_files(module):
data_files = []
if module:
for root, dirs, files in os.walk(join("src", "opendr", module)):
for file in files:
file_extension = file.split(".")[-1]
# Add all files except from shared libraries
if file_extension != "so" and file_extension != "py":
data_files.append(join(root.replace("src/opendr/", ""), file))
return data_files
def build_package(module):
if module == "opendr":
# Flag to enable building opendr-metapackage
module = None
if module == 'perception/object_detection_2d':
from Cython.Build import cythonize
import numpy
extra_params = {
'ext_modules':
cythonize([join("src/opendr/perception/object_detection_2d/retinaface/algorithm/cython/*.pyx")]),
'include_dirs': [numpy.get_include()]}
else:
extra_params = {}
name, packages = get_packages(module)
dependencies, skipped_dependencies, post_install = get_dependencies(module)
generate_manifest(module)
# Define class for post installation scripts
class PostInstallScripts(install):
def run(self):
install.run(self)
import subprocess
# Install potential git and local repos during post installation
for package in skipped_dependencies:
if 'git' in package:
subprocess.call([sys.executable, '-m', 'pip', 'install', package])
if '${OPENDR_HOME}' in package:
subprocess.call([sys.executable, '-m', 'pip', 'install', package.replace('${OPENDR_HOME}', '.')])
if post_install:
for cmd in post_install:
print("Running ", cmd)
subprocess.call(cmd.split(' '))
setup(
name=name,
version=__version__,
description=get_description(module),
long_description=long_description,
author=author,
author_email=author_email,
packages=packages,
url=url,
license=license,
package_dir={"": "./src"},
install_requires=dependencies,
cmdclass={
'develop': PostInstallScripts,
'install': PostInstallScripts,
},
package_data={'': get_data_files(module)},
**extra_params
)
| 34.616438 | 117 | 0.597019 | 700 | 0.092336 | 0 | 0 | 0 | 0 | 0 | 0 | 2,336 | 0.308139 |
a6bf3ee264874a265742ece6f2b12936cf879cc1 | 13,836 | py | Python | modules/auto_split/splitter/mxnet_splitter.py | sophon-ai-algo/sophon-inference | f923413b76615e265af28fd1dd2b43e5eb303dcd | [
"Apache-2.0"
] | 18 | 2020-02-21T03:06:33.000Z | 2022-03-21T03:41:56.000Z | modules/auto_split/splitter/mxnet_splitter.py | sophon-ai-algo/sophon-inference | f923413b76615e265af28fd1dd2b43e5eb303dcd | [
"Apache-2.0"
] | null | null | null | modules/auto_split/splitter/mxnet_splitter.py | sophon-ai-algo/sophon-inference | f923413b76615e265af28fd1dd2b43e5eb303dcd | [
"Apache-2.0"
] | 6 | 2020-07-10T08:55:38.000Z | 2021-12-28T01:36:04.000Z | """ Copyright 2016-2022 by Bitmain Technologies Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os
#import re
import json
import copy
#import numpy as np
import mxnet as mx
from mxnet import gluon
import bmnetm
from ..common.base_splitter import Splitter
from ..external.mxnet_functions import load_json_file
from ..external.mxnet_functions import get_index_dict
from ..external.mxnet_functions import get_input_names_from_json
from ..external.mxnet_functions import get_output_names_from_json
from ..external.mxnet_functions import node_is_weight
from ..external.mxnet_functions import get_all_ops
from ..external.mxnet_functions import get_input_names_from_file
from ..external.mxnet_functions import get_output_names_from_file
from ..external.mxnet_functions import sym_has_params
from ..external.mxnet_functions import get_prefix_and_epoch
from ..external.mxnet_functions import load_mxnet_model
from ..external.mxnet_functions import infer_mxnet
def get_more_than_x(numbers, value):
""" Get numbers more than x in a list
"""
ret = list()
for i in numbers:
if i >= value:
ret.append(i)
return ret
def get_input_tensors(sub_graph):
""" Get all input tensor names of a sub_graph.
Args:
sub_graph: A SubGraph instance.
Returns:
A set contains all input tensor names of the sub_graph.
"""
input_tensors = copy.deepcopy(sub_graph.input_ops)
for name in sub_graph.input_subgraphs:
input_tensors |= sub_graph.input_subgraphs[name]
return input_tensors
def get_output_tensors(sub_graph):
""" Get all output tensor names of a sub_graph.
Args:
sub_graph: A SubGraph instance.
Returns:
A set contains all output tensor names of the sub_graph.
"""
output_tensors = copy.deepcopy(sub_graph.output_ops)
for name in sub_graph.output_subgraphs:
output_tensors |= sub_graph.output_subgraphs[name]
return output_tensors
def find_arg_nodes(nodes, input_names, ops, index_dict):
""" Find indexes of all argument nodes. Argument nodes are input tensors
and weights.
Args:
nodes: A json object contain all the nodes in a mxnet json file.
input_names: Names of input tensors.
ops: Names of operaters.
index_dict: A dict denotes relationships between name and index of nodes.
Returns:
A sorted list contains indexes of all argument nodes.
"""
arg_nodes = set(range(-len(input_names), 0))
for operator in ops:
index = index_dict[operator]
parent_ids = set([parent[0] for parent in nodes[index]["inputs"] \
if node_is_weight(nodes[parent[0]])])
arg_nodes |= parent_ids
arg_nodes_list = list(arg_nodes)
arg_nodes_list.sort()
return arg_nodes_list
def find_heads(output_tensors, index_dict):
""" Find indexes of all heads. Heads stand for output tensors.
Args:
# nodes: A json object contain all the nodes in a mxnet json file.
output_tensors: Names of output tensors.
index_dict: A dict denotes relationships between name and index of nodes.
Returns:
A sorted list contains indexes of heads.
"""
heads = list(set([index_dict[op] for op in output_tensors]))
heads.sort()
return heads
def find_split_sons(raw_nodes, parent_id, sub_ops_ids):
""" Find ids of sons given a parent id.
Args:
raw_nodes: A json object contain all the nodes of the raw mxnet json file.
parent_id: Id of a node.
sub_ops_ids: Ids of all ops in a sub graph.
Returns:
Ids of sons of the specified parent.
"""
split_ids = set()
if raw_nodes[parent_id]["op"] != "SliceChannel":
return split_ids
for op_id in sub_ops_ids:
for lst in raw_nodes[op_id]["inputs"]:
if lst[0] == parent_id:
split_ids.add(lst[1])
split_ids_list = list(split_ids)
split_ids_list.sort()
return split_ids_list
def gen_json(raw_json, sub_graph, index_dict, sub_json_path):
""" Generate json file of a subgraph.
Args:
raw_json: Json object read from json file of raw model.
sub_graph: A SubGraph instance.
index_dict: A dict denotes relationships between name and index of nodes.
sub_json_path: Path of json file to save.
Returns:
None.
"""
data = {"nodes":list(), "arg_nodes":list(), "heads":list(), "attrs":dict()}
nodes = raw_json["nodes"]
input_tensors = get_input_tensors(sub_graph)
output_tensors = get_output_tensors(sub_graph)
ops_ids = [index_dict[op] for op in sub_graph.ops]
input_ids = [index_dict[op] for op in input_tensors]
input_split_ids = list()
input_names = list()
for tensor in input_tensors:
parent_id = index_dict[tensor]
split_ids = find_split_sons(nodes, parent_id, ops_ids)
if not split_ids:
input_names.append(tensor)
data["nodes"].append({"op":"null", "name":tensor, "inputs":[]})
continue
input_split_ids.append(parent_id)
for i in split_ids:
name = tensor + "_" + str(i) + "_sophon_auto"
input_names.append(name)
data["nodes"].append({"op":"null", "name":name, "inputs":[]})
arg_nodes = find_arg_nodes(nodes, input_names, \
sub_graph.ops, index_dict)
total_node_ids = list((set(arg_nodes) | set(ops_ids)) - set(input_ids))
total_node_ids.sort()
# heads = find_heads(nodes, output_tensors, index_dict)
heads = find_heads(output_tensors, index_dict)
tmp_total_node_ids = get_more_than_x(total_node_ids, 0)
for i in tmp_total_node_ids:
#if i >= 0:
data["nodes"].append(nodes[i])
new_index_dict = get_index_dict(data["nodes"])
for node in data["nodes"]:
inputs = list()
for i in node["inputs"]:
if i[0] in input_split_ids:
new_input_name = nodes[i[0]]["name"] + "_" + str(i[1]) + \
"_sophon_auto"
inputs.append([new_index_dict[new_input_name], 0, 0])
else:
inputs.append([new_index_dict[nodes[i[0]]["name"]], i[1], i[2]])
node["inputs"] = inputs
data["arg_nodes"] = [total_node_ids.index(i) for i in arg_nodes]
data["attrs"] = raw_json["attrs"]
data["heads"] = list()
for i in heads:
if nodes[i]["op"] == "SliceChannel":
for j in range(int(nodes[i]["attrs"]["num_outputs"])):
data["heads"].append([new_index_dict[nodes[i]["name"]], j, 0])
else:
data["heads"].append([new_index_dict[nodes[i]["name"]], 0, 0])
formatted = json.dumps(data, indent=2, sort_keys=False)
with open(sub_json_path, 'w') as f_save:
f_save.write(formatted)
def gen_params(raw_params_path, sub_json_path, sub_params_path, input_tensors):
""" Get features which are intermediate results of the model.
Args:
raw_params_path: Path of params file of the raw mxnet model.
sub_json_path: Path of json file of the submodel.
sub_params_path: Path of params file of the submodel.
input_tensors: A list contains all input tensor names and shapes.
Format: [(tensor_name, numpy.ndarray), ]
Returns:
True for save parameters to file, False for no parameters and not save.
"""
sym = mx.sym.load(sub_json_path)
has_params = sym_has_params(sym, [item[0] for item in input_tensors])
output_names = get_output_names_from_file(sub_json_path)
internals = sym.get_internals()
outputs_ops = sym.get_internals().list_outputs()
outputs = list()
for name in output_names:
if name.endswith("sophon_auto"):
tokens = name.split('_')
out_name = "_".join(tokens[0:-3] + ["output" + tokens[-3]])
else:
out_name = name + '_output'
if out_name not in outputs_ops:
print("Wrong name: {}".format(name))
return None
outputs.append(internals[out_name])
inputs = list()
for item in input_tensors:
tensor_name = item[0]
inputs.append(mx.sym.var(tensor_name))
net = gluon.nn.SymbolBlock(outputs=outputs, inputs=inputs)
# Set the params
net.collect_params().load(raw_params_path, ctx=mx.cpu(), ignore_extra=True)
input_data = [mx.nd.array(item[1]) for item in input_tensors]
outputs = net(*input_data)
prefix, epoch = get_prefix_and_epoch(sub_params_path)
prefix = os.path.join(os.path.dirname(sub_params_path), prefix)
net.export(prefix, epoch=epoch)
return has_params
class MxnetSplitter(Splitter):
""" Split a Mxnet model into submodels.
"""
def initialize(self):
""" Load graph information from mxnet model descriptor.
ops: Information of all operators, exluding weight nodes.
Format: {op_name: (op_type, [parent_name])}.
input_ops: list, names of all input tensors.
output_ops: list, names of all output tensors.
json_path: Path to symbol file.
params_path: Path to parameter file.
is_dynamic: True means input tensor shapes may change.
sym_json: Json read from symbol file.
index_dict: Relationships between name and index of nodes.
Format: {node_name: node_index}
input_names: Input tensor names.
output_names: Output tensor names.
prefix: Prefix of saved model.
epoch: Epoch number of saved model.
"""
self.platform = 'mxnet'
required_args = ["json_path", "params_path", "dynamic", "input_tensors"]
for arg in required_args:
assert arg in self.model_descriptor.keys()
self.json_path = self.model_descriptor["json_path"]
self.ops, self.input_ops, self.output_ops = get_all_ops(self.json_path)
self.params_path = self.model_descriptor["params_path"]
self.sym_json = load_json_file(self.json_path)
self.index_dict = get_index_dict(self.sym_json["nodes"])
self.input_names = get_input_names_from_json(self.sym_json)
self.output_names = get_output_names_from_json(self.sym_json)
self.prefix, self.epoch = get_prefix_and_epoch(self.params_path)
self.input_tensors = self.model_descriptor["input_tensors"]
def get_op_name(self, op_name):
return op_name
def is_op_support(self, op_name):
param = {"op": self.ops[op_name][0]}
if self.ops[op_name][0] == 'null' or bmnetm.op_support(param):
return True
return False
def is_op_compute(self, op_name):
compute_list = [
'Convolution',
'Pooling',
'Activation',
'elemwise_add',
'FullyConnected',
'BatchNorm'
]
if self.ops[op_name][0] in compute_list:
return True
return False
def is_op_dangerous(self, op_name):
dangerous_list = [
]
if self.ops[op_name][0] in dangerous_list:
return True
return False
def is_input_op(self, op_name):
if op_name in self.input_ops:
return True
return False
def is_output_op(self, op_name):
if op_name in self.output_ops:
return True
return False
def get_inputs_list(self, op_name):
return self.ops[op_name][1]
def destroy(self):
pass
def save_subgraph(self, graph, save_folder, index, tensors):
""" Save submodel to files.
Args:
graph: A SubGraph instances.
save_folder: Folder path to save json file and params file.
index: Index of subgraph.
tensors: A dict contains tensor names and values.
Returns:
model_info: A dict contains model information.
Format: {"json": json_name, "params": params_name}
input_names: list, input tensor names of the submodel.
ouput_names: list, output tensor names of the submodel.
"""
model_info = dict()
json_name = '{}_{}-symbol.json'.format(self.prefix, index)
params_name = '{}_{}-{:0>4}.params'.format(self.prefix, index, self.epoch)
json_path = os.path.join(save_folder, json_name)
gen_json(self.sym_json, graph, self.index_dict, json_path)
input_names = get_input_names_from_file(json_path)
input_tensors = [(i, tensors[i]) for i in input_names]
params_path = os.path.join(save_folder, params_name)
has_params = gen_params(self.params_path, json_path, \
params_path, input_tensors)
model_info["json"] = json_name
if has_params:
model_info["params"] = params_name
input_names = get_input_names_from_file(json_path)
output_names = get_output_names_from_file(json_path)
return model_info, input_names, output_names
def infer_output_tensors(self, save_folder, model_info, input_names, \
output_names, tensors):
""" Get output shapes of the model.
Args:
save_folder: Folder path to save json files.
model_info: A dict contains model information.
Format: {"json": json_name, "params": params_name}
input_names: list, input tensor names.
ouput_names: list, output tensor names.
tensor_tensors: A dict contains tensor names and values.
Returns:
A list of numpy.ndarray, contains the output tensors.
"""
if "params" in model_info:
model = load_mxnet_model(device='cpu', folder=save_folder, \
json_file=model_info["json"], params=model_info['params'])
else:
model = load_mxnet_model(device='cpu', folder=save_folder, \
json_file=model_info["json"])
input_tensors = [(name, tensors[name]) for name in input_names]
required_outputs = [(name, None) for name in output_names]
outputs = infer_mxnet(model, input_tensors, required_outputs, device='cpu')
ret = [outputs[name] for name in output_names]
return ret
def get_tensor_dtype(self, tensor_name):
return 0
| 35.386189 | 79 | 0.700347 | 5,154 | 0.372507 | 0 | 0 | 0 | 0 | 0 | 0 | 5,215 | 0.376915 |
a6c033633c6c6cc98bdf46fde938f21a68a4d3ac | 1,221 | py | Python | src/heuristic.py | Maasouza/MinVertexCover | 3edf31bfa9a8979e86094961034efce61d5c6b86 | [
"MIT"
] | null | null | null | src/heuristic.py | Maasouza/MinVertexCover | 3edf31bfa9a8979e86094961034efce61d5c6b86 | [
"MIT"
] | null | null | null | src/heuristic.py | Maasouza/MinVertexCover | 3edf31bfa9a8979e86094961034efce61d5c6b86 | [
"MIT"
] | null | null | null | import networkx as nx
from util import *
def heuristic_cover(graph , preprocess = False):
"""
heuristica
se preprocess entao
realiza o preprocessamento para remover vertices com apenas um vizinho
retornando os vertices ja visitados
enquanto existir vertices no grafo
v = vertice de maior grau de G
marcado[v]=1
adiciona v a cobertura
para cada u vizinho de v
marcado[u] = 1
remove u do grafo
remove g do grafo
retorna cobertura
"""
start = time.time()
g = nx.Graph()
g.add_edges_from(graph.edges())
if(preprocess):
cover,marked,visited = pre_process(g)
else:
cover = [False for x in range(len(g.nodes()))]
marked = [False for x in range(len(g.nodes()))]
visited = 0
while(visited!=len(graph.nodes())):
v = max_degree_vertex(g)
cover[v]=True
visited+=1
for u in g.neighbors(v):
visited+=1
g.remove_node(u)
g.remove_node(v)
end = time.time()
print("--- Heuristica")
print("\tExec time: "+str((end-start))+" sec")
return cover
| 27.133333 | 82 | 0.556921 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 532 | 0.435708 |
a6c04b1be112a409e5c402b61de90de419055381 | 389 | py | Python | autharch_sharc/editor/migrations/0042_sharciiif_order.py | kingsdigitallab/autharch_sharc | 92de5fbec8cc72ce48a9e25eb634d40ac2cc83ca | [
"MIT"
] | null | null | null | autharch_sharc/editor/migrations/0042_sharciiif_order.py | kingsdigitallab/autharch_sharc | 92de5fbec8cc72ce48a9e25eb634d40ac2cc83ca | [
"MIT"
] | null | null | null | autharch_sharc/editor/migrations/0042_sharciiif_order.py | kingsdigitallab/autharch_sharc | 92de5fbec8cc72ce48a9e25eb634d40ac2cc83ca | [
"MIT"
] | null | null | null | # Generated by Django 3.0.10 on 2021-07-09 09:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('editor', '0041_themeobject_sort_order'),
]
operations = [
migrations.AddField(
model_name='sharciiif',
name='order',
field=models.IntegerField(default=1),
),
]
| 20.473684 | 50 | 0.601542 | 295 | 0.758355 | 0 | 0 | 0 | 0 | 0 | 0 | 103 | 0.264781 |
a6c079133086435312474069fd2c024714d94107 | 13,761 | py | Python | turtlebot3_dqn/src/turtlebot3_dqn/simulation_environment_real.py | 2529342549/turtlebot3_m_learning | 19fc961de8a993eafcd421186ad1c38473d04818 | [
"Apache-2.0"
] | 3 | 2020-01-27T09:23:50.000Z | 2022-03-24T09:58:48.000Z | turtlebot3_dqn/src/turtlebot3_dqn/simulation_environment_real.py | 2529342549/turtlebot3_machine_learning | bdb8cc0fa0110269cd3573d3f78011c3e0201e09 | [
"Apache-2.0"
] | null | null | null | turtlebot3_dqn/src/turtlebot3_dqn/simulation_environment_real.py | 2529342549/turtlebot3_machine_learning | bdb8cc0fa0110269cd3573d3f78011c3e0201e09 | [
"Apache-2.0"
] | 2 | 2020-01-27T09:23:54.000Z | 2021-09-20T04:07:13.000Z | #!/usr/bin/env python
#################################################################################
# Copyright 2018 ROBOTIS CO., LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#################################################################################
# Authors: Gilbert #
import rospy
import numpy as np
import math
import time
from math import pi
from geometry_msgs.msg import Twist, Point, Pose, PoseWithCovarianceStamped
from sensor_msgs.msg import LaserScan
from nav_msgs.msg import Odometry
from std_srvs.srv import Empty
from std_msgs.msg import String
from tf.transformations import euler_from_quaternion, quaternion_from_euler
from simulation_respawn_real import Respawn
# from nodes.turtlebot3_real_transmission_2 import Sender
# from gazebo_msgs.msg import ModelStates, ModelState
class Env():
def __init__(self, action_size):
self.goal_x = 0
self.goal_y = 0
self.start_x = 0
self.start_y = 0
self.start_orientation = PoseWithCovarianceStamped()
self.heading = 0
self.count = 0
self.action_size = action_size
self.initGoal = True
self.get_goalbox = False
self.position = Pose()
self.position_x, self.position_y = 0, 0
self.pub_cmd_vel = rospy.Publisher('cmd_vel', Twist, queue_size=1, latch = True)
self.sub_odom = rospy.Subscriber('odom', Odometry, self.getOdometry)
self.respawn_goal = Respawn()
self.action_memory = []
self.time_start = time.time()
self.orientation, self.yaw_init = 0, 0
self.goal_x_map, self.goal_y_map = 0, 0
def getGoalDistace(self):
goal_distance = round(math.hypot(self.goal_x - self.position.x, self.goal_y - self.position.y), 2)
return goal_distance
def getOdometry(self, odom):
self.position = odom.pose.pose.position
self.position_x, self.position_y = self.position.x, self.position.y
orientation = odom.pose.pose.orientation
self.orientation = orientation
orientation_list = [orientation.x, orientation.y, orientation.z, orientation.w]
_, _, yaw = euler_from_quaternion(orientation_list)
# print "odom yaw: ", yaw
goal_angle = math.atan2(self.goal_y - self.position.y , self.goal_x - self.position.x)
heading = goal_angle - yaw
if heading > pi:
heading -= 2 * pi
elif heading < -pi:
heading += 2 * pi
self.heading = round(heading, 2)
def getState(self, scan):
scan_range = []
scan_range2 = []
# print scan.ranges
heading = self.heading
min_range = 0.3
done = False
# no filter
# for i in range(len(scan.ranges)):
# if scan.ranges[i] == float('Inf'):
# scan_range.append(3.5)
# # zero Problem
# # elif np.isnan(scan.ranges[i]):
# # scan_range.append(0)
# elif scan.ranges[i] <= 0.07:
# scan_range.append(3.5)
# else:
# scan_range.append(scan.ranges[i])
# Filter
i = 0
while i <= len(scan.ranges)-1:
# print "length", len(scan_range)
if scan.ranges[i] == float('Inf'):
scan_range.append(3.5)
i += 1
elif scan.ranges[i] == 0:
k = 1
t = 0
if i == 0:
while scan.ranges[k]==0:
k += 1
while t <= k:
scan_range.append(scan.ranges[k])
t += 1
i = k + 1
else:
k = i
m = i
a = scan.ranges[i-1]
while scan.ranges[k]==0:
if k == 359:
while m <= k:
scan_range.append(a)
m += 1
for i in range(len(scan_range)):
if scan_range[i] < 0.12:
scan_range2.append(0.12)
else:
scan_range2.append(scan_range[i])
current_distance = round(math.hypot(self.goal_x - self.position.x, self.goal_y - self.position.y),2)
# if current_distance < 0.2:
if current_distance < 0.15:
vel_cmd = Twist()
self.get_goalbox = True
obstacle_min_range = round(min(scan_range), 2)
obstacle_angle = np.argmin(scan_range)
if min_range > min(scan_range) > 0:
done = True
return scan_range2 + [heading, current_distance, obstacle_min_range, obstacle_angle], done
k += 1
b = scan.ranges[k]
while m < k:
scan_range.append(max(a, b))
m += 1
i = k
else:
scan_range.append(scan.ranges[i])
i += 1
i=0
for i in range(len(scan_range)):
if scan_range[i] < 0.12:
scan_range2.append(0.12)
else:
scan_range2.append(scan_range[i])
obstacle_min_range = round(min(scan_range), 2)
obstacle_angle = np.argmin(scan_range)
if min_range > min(scan_range) > 0:
done = True
current_distance = round(math.hypot(self.goal_x - self.position.x, self.goal_y - self.position.y),2)
# if current_distance < 0.2:
if current_distance < 0.15:
vel_cmd = Twist()
self.get_goalbox = True
return scan_range2 + [heading, current_distance, obstacle_min_range, obstacle_angle], done
def setReward(self, state, done, action):
yaw_reward = []
obstacle_min_range = state[-2]
current_distance = state[-3]
heading = state[-4]
for i in range(5):
angle = -pi / 4 + heading + (pi / 8 * i) + pi / 2
tr = 1 - 4 * math.fabs(0.5 - math.modf(0.25 + 0.5 * angle % (2 * math.pi) / math.pi)[0])
yaw_reward.append(tr)
distance_rate = 2 ** (current_distance / self.goal_distance)
if obstacle_min_range < 0.5:
ob_reward = -5
else:
ob_reward = 0
reward = ((round(yaw_reward[action] * 5, 2)) * distance_rate) + ob_reward
if done:
rospy.loginfo("Near Collision!!")
reward = -200
# driving backwards last 25 actions ~5 seconds
t = 0
l = len(self.action_memory)
vel_cmd = Twist()
# while t <= 10:
# if len(self.action_memory) > 20:
# max_angular_vel = -1.5
# action = self.action_memory[l-t-1]
# ang_vel = ((-self.action_size + 1)/2 - action) * max_angular_vel * 0.5
# vel_cmd.linear.x = -0.15
# # vel_cmd.angular.z = ang_vel
# vel_cmd.angular.z = 0
# time_start = time.time()
# a=0
# self.pub_cmd_vel.publish(vel_cmd)
# t += 1
# else:
# t = 10
# stand still after collision
vel_cmd.linear.x = 0
vel_cmd.angular.z = 0
time_start = time.time()
a=0
while a < 1:
self.pub_cmd_vel.publish(vel_cmd)
a = time.time() - time_start
if self.get_goalbox:
rospy.loginfo("Goal!!")
print "start_position: ", self.start_x,"/ ", self.start_y
print "odom_position:", self.position.x,"/ " ,self.position.y
print "goal_position: ", self.goal_x,"/ ", self.goal_y
print "action: ", action
print "_______________________________________________________________"
reward = 500
self.get_goalbox = False
done = True
vel_cmd = Twist()
vel_cmd.linear.x = 0
vel_cmd.angular.z = 0
start = 0
start_1 = time.time()
while start - 5 < 0:
self.pub_cmd_vel.publish(vel_cmd)
start = time.time() - start_1
# self.pub_cmd_vel.publish(vel_cmd)
# self.goal_x, self.goal_y = self.respawn_goal.getPosition()
# self.goal_distance = self.getGoalDistace()
return reward, done
def speed(self, state):
# Calculate the data new with a filter
scan_range = []
speed = 0.15
speed_goal = 0
for i in range(len(state)):
if state[i] < 0.30:
scan_range.append(3.5)
else:
scan_range.append(state[i])
scan_range = state
obstacle_min_range = round(min(scan_range), 2)
goal_distance = scan_range[361]
# print obstacle_min_range
if obstacle_min_range >= 1:
speed = 0.15
elif obstacle_min_range < 1 and obstacle_min_range >= 0.3:
speed = 0.15 + ((obstacle_min_range-1)/7)
speed_goal = speed
if goal_distance < 0.5:
speed_goal = 0.15 + (goal_distance - 0.)/8
speed = min([speed, speed_goal])
return speed
def step(self, action):
time1 = time.time()
data = None
while data is None:
try:
data = rospy.wait_for_message('scan', LaserScan, timeout=5)
except:
pass
vel_cmd = Twist()
vel_cmd.linear.x = 0
state, done = self.getState(data)
reward, done = self.setReward(state, done, action)
if not done:
max_angular_vel = 1.5
# max_angular_vel = 0.15
ang_vel = ((self.action_size - 1)/2 - action) * max_angular_vel * 0.5
vel_cmd = Twist()
vel_cmd.linear.x = self.speed(state)
# vel_cmd.linear.x = 0.15
vel_cmd.angular.z = ang_vel
self.action_memory.append(-1*action)
time_start = time.time()
self.pub_cmd_vel.publish(vel_cmd)
if self.count % 2 == 0:
print "start_position: ", self.start_x,"/ ", self.start_y
print "odom_position:", self.position.x,"/ " ,self.position.y
print "goal_position: ", self.goal_x,"/ ", self.goal_y
print "goal_distance: ", state[-3],"/ obstacle_distance: ", state[-2]
print "Vel_linear: ",vel_cmd.linear.x , "action: ", action
print done
print "_____________________________________________________________"
self.count += 1
return np.asarray(state), reward, done
def reset(self):
# corrdinate receive, transformation
yaw_neu = 0
if self.initGoal:
self.start_x_map, self.start_y_map, start_orientation_2 = self.respawn_goal.getstartPosition()
self.goal_x_map, self.goal_y_map = self.respawn_goal.getPosition()
start_orientation_list = [start_orientation_2.x, start_orientation_2.y, start_orientation_2.z, start_orientation_2.w]
_, _, self.yaw_init = euler_from_quaternion(start_orientation_list)
self.initGoal = False
# self.goal_x, self.goal_y = self.goal_x_map, self.goal_y_map
else:
self.start_x_map, self.start_y_map = self.goal_x_map, self.goal_y_map
self.goal_x_map, self.goal_y_map = self.respawn_goal.getPosition()
orientation = self.orientation
orientation_list = [orientation.x, orientation.y, orientation.z, orientation.w]
_, _, yaw_neu = euler_from_quaternion(orientation_list)
print "yaw_neu:", yaw_neu
# self.goal_x_map, self.goal_y_map = self.goal_x, self.goal_y
print "Wait 3 sec"
time.sleep(3)
# in map coordinates
# diff_x = self.goal_x - self.start_x + self.position
# diff_y = self.goal_y - self.start_y + self.position
diff_x = self.goal_x_map - self.start_x_map
diff_y = self.goal_y_map - self.start_y_map
print "diff_x: ", diff_x
print "diff_y: ", diff_y
print "yaw_neu: ", yaw_neu
# yaw = yaw_neu + self.yaw_init
# print "yaw: ",yaw
# Transformation
yaw = self.yaw_init
self.goal_x = math.cos(yaw)*diff_x + math.sin(yaw)*diff_y + self.position_x
self.goal_y = -1*math.sin(yaw)*diff_x + math.cos(yaw)*diff_y + self.position_y
self.goal_distance = self.getGoalDistace()
data = None
while data is None:
try:
data = rospy.wait_for_message('scan', LaserScan, timeout=5)
except:
pass
self.goal_distance = self.getGoalDistace()
state, done = self.getState(data)
return np.asarray(state)
| 37.70137 | 129 | 0.534263 | 12,450 | 0.904731 | 0 | 0 | 0 | 0 | 0 | 0 | 2,877 | 0.209069 |
a6c1c0b4ec9e4d0b37b006d5d64e485ed7f8cc62 | 417 | py | Python | codeforces/anirudhak47/1328/B.py | anirudhakulkarni/codes | d7a907951033b57314dfc0b837123aaa5c25a39a | [
"MIT"
] | 3 | 2020-07-09T16:15:42.000Z | 2020-07-17T13:19:42.000Z | codeforces/anirudhak47/1328/B.py | anirudhakulkarni/codes | d7a907951033b57314dfc0b837123aaa5c25a39a | [
"MIT"
] | null | null | null | codeforces/anirudhak47/1328/B.py | anirudhakulkarni/codes | d7a907951033b57314dfc0b837123aaa5c25a39a | [
"MIT"
] | 1 | 2020-07-17T13:19:48.000Z | 2020-07-17T13:19:48.000Z | def classfinder(k):
res=1
while res*(res+1)//2<k:
res+=1
return res
# cook your dish here
for t in range(int(input())):
#n=int(input())
n,k=map(int,input().split())
clas=classfinder(k)
i=k-clas*(clas-1)//2
str=""
for z in range(n):
if z==n-clas-1 or z==n-i:
str+="b"
else:
str+="a"
print(str) | 18.954545 | 34 | 0.443645 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 46 | 0.110312 |
a6c2f3f7fadfd4ca0984002c0c949dd1121f320e | 3,827 | py | Python | ch05/ch0501_convnet.py | zhuyuanxiang/deep-learning-with-python-notebooks | 6b6b5670193f5a26321c36de3b547203e30dc8c7 | [
"MIT"
] | 6 | 2019-11-30T01:34:24.000Z | 2021-11-28T10:53:22.000Z | ch05/ch0501_convnet.py | zhuyuanxiang/deep-learning-with-python-notebooks | 6b6b5670193f5a26321c36de3b547203e30dc8c7 | [
"MIT"
] | null | null | null | ch05/ch0501_convnet.py | zhuyuanxiang/deep-learning-with-python-notebooks | 6b6b5670193f5a26321c36de3b547203e30dc8c7 | [
"MIT"
] | 4 | 2020-04-11T10:46:17.000Z | 2021-11-09T08:04:55.000Z | # -*- encoding: utf-8 -*-
"""
@Author : zYx.Tom
@Contact : 526614962@qq.com
@site : https://zhuyuanxiang.github.io
---------------------------
@Software : PyCharm
@Project : deep-learning-with-python-notebooks
@File : ch0501_convnet.py
@Version : v0.1
@Time : 2019-11-20 10:18
@License : (C)Copyright 2018-2019, zYx.Tom
@Reference : 《Python 深度学习,Francois Chollet》, Sec05,P
@Desc : 深度学习用于计算机视觉,卷积神经网络简介
"""
import os
import sys
import matplotlib.pyplot as plt
import numpy as np # pip install numpy<1.17,小于1.17就不会报错
import winsound
from keras import activations
from keras import layers
from keras import losses
from keras import metrics
from keras import models
from keras import optimizers
from keras.datasets import mnist
from keras.utils import to_categorical
# 屏蔽警告:Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# 设置数据显示的精确度为小数点后3位
np.set_printoptions(precision = 3, suppress = True, threshold = np.inf, linewidth = 200)
# to make this notebook's output stable across runs
seed = 42
np.random.seed(seed)
# Python ≥3.5 is required
assert sys.version_info >= (3, 5)
# numpy 1.16.4 is required
assert np.__version__ in ["1.16.5", "1.16.4"]
def get_convnet_model():
print("构造卷积神经网络模型")
model = models.Sequential()
# 网络输出张量的形状为:(height, width, channels)
model.add(layers.Conv2D(32, (3, 3), activation = activations.relu, input_shape = (28, 28, 1)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation = activations.relu))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation = activations.relu))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation = activations.relu))
model.add(layers.Dense(10, activation = activations.softmax))
# print(model.summary())
return model
print("* Code 3-1:加载数据集...")
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
print("\t训练数据集(train_labels):60000 条数据;测试数据集(test_labels):10000 条数据")
print("\t\t train_images.shape =", train_images.shape)
print("\t\t train_lables.shape =", train_labels.shape)
print("\t\t test_images.shape =", test_images.shape)
print("\t\t test_labels.shape =", test_labels.shape)
print("\t数据集中每条数据是一张图片")
print("\t\t train_images[0].shape =", train_images[0].shape)
print("\t\t test_images[0].shape =", test_images[0].shape)
print("\t每条数据描述一个图片对应的数字:0~9")
print("\t\t train_lables[:10] =", train_labels[:10])
print("\t\t test_labels[:10] =", test_labels[:10])
train_images = train_images.reshape((60000, 28, 28, 1)).astype('float32') / 255
test_images = test_images.reshape((10000, 28, 28, 1)).astype('float32') / 255
train_labels = to_categorical(train_labels)
test_labels = to_categorical(test_labels)
model = get_convnet_model()
model.compile(optimizer = optimizers.rmsprop(lr = 0.001),
loss = losses.categorical_crossentropy, metrics = [metrics.categorical_accuracy])
history = model.fit(train_images, train_labels, epochs = 20, batch_size = 64, verbose = 2, use_multiprocessing = True)
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose = 2, use_multiprocessing = True)
print("测试集的评估精度 =", test_acc)
loss = history.history['loss']
epochs_range = range(1, len(loss) + 1)
categorical_acc = history.history['categorical_accuracy']
plt.plot(epochs_range, loss, 'bo', label = "训练集的损失")
plt.title('不同数据集的损失')
plt.xlabel('Epochs--批次')
plt.ylabel('Loss--损失')
plt.legend()
plt.plot(epochs_range, categorical_acc, 'bo', label = "训练集的精确度")
plt.title('不同数据集的精确度')
plt.xlabel('Epochs--批次')
plt.ylabel('Accuracy--精确度')
plt.legend()
# 运行结束的提醒
winsound.Beep(600, 500)
if len(plt.get_fignums()) != 0:
plt.show()
pass
| 35.435185 | 118 | 0.709694 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,731 | 0.410871 |
a6c390b45f2052455e5898ffbc22af9be8ea36fa | 878 | py | Python | run.py | sulavmhrzn/facebook-autoreply-bot | 2196f392c03305a9d9eca9bd70e2e6dafc38c995 | [
"MIT"
] | null | null | null | run.py | sulavmhrzn/facebook-autoreply-bot | 2196f392c03305a9d9eca9bd70e2e6dafc38c995 | [
"MIT"
] | null | null | null | run.py | sulavmhrzn/facebook-autoreply-bot | 2196f392c03305a9d9eca9bd70e2e6dafc38c995 | [
"MIT"
] | null | null | null | from utils.app import SendBot
try:
from dotenv import load_dotenv
import os
except ModuleNotFoundError:
print('Required modules not found.')
exit()
load_dotenv()
env = input('Load environment variables? (y/n): ').lower()
options = ['y', 'n']
if env in options:
if env == 'n':
email = input('Email: ')
password = input('Password: ')
if email and password:
client = SendBot(email, password, max_tries=100)
# Sets active status
client.setActiveStatus(markAlive=False)
client.listen()
else:
print('Enter credentials.')
else:
client = SendBot(os.getenv('EMAIL'), os.getenv(
'PASSWORD'), max_tries=100)
# Sets active status
client.setActiveStatus(markAlive=False)
client.listen()
else:
print('Please type y or n')
| 24.388889 | 60 | 0.595672 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 193 | 0.219818 |
a6c3f96b7909d2e2755a500bcd6ce3c2ca94c43c | 11,416 | py | Python | template/tests/load_dat.py | ajmaurais/peptide_analyzer | 62f37d88fefd0a8cfb57a8c157cfc85692956360 | [
"MIT"
] | null | null | null | template/tests/load_dat.py | ajmaurais/peptide_analyzer | 62f37d88fefd0a8cfb57a8c157cfc85692956360 | [
"MIT"
] | null | null | null | template/tests/load_dat.py | ajmaurais/peptide_analyzer | 62f37d88fefd0a8cfb57a8c157cfc85692956360 | [
"MIT"
] | null | null | null |
import sys
import os
from collections import Counter
import pandas as pd
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../')
dat_std = pd.read_csv(os.path.dirname(os.path.abspath(__file__)) + '/data/std_output.tsv', sep='\t')
atom_counts = {'A': Counter({'C': 3,
'H': 5,
'O': 1,
'N': 1,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'C': Counter({'C': 5,
'H': 8,
'O': 2,
'N': 2,
'S': 1,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'D': Counter({'C': 4,
'H': 5,
'O': 3,
'N': 1,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'E': Counter({'C': 5,
'H': 7,
'O': 3,
'N': 1,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'F': Counter({'C': 9,
'H': 9,
'O': 1,
'N': 1,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'G': Counter({'C': 2,
'H': 3,
'O': 1,
'N': 1,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'H': Counter({'C': 6,
'H': 7,
'O': 1,
'N': 3,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'I': Counter({'C': 6,
'H': 11,
'O': 1,
'N': 1,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'K': Counter({'C': 6,
'H': 12,
'O': 1,
'N': 2,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'L': Counter({'C': 6,
'H': 11,
'O': 1,
'N': 1,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'M': Counter({'C': 5,
'H': 9,
'O': 1,
'N': 1,
'S': 1,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'N': Counter({'C': 4,
'H': 6,
'O': 2,
'N': 2,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'P': Counter({'C': 5,
'H': 7,
'O': 1,
'N': 1,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'Q': Counter({'C': 5,
'H': 8,
'O': 2,
'N': 2,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'R': Counter({'C': 6,
'H': 12,
'O': 1,
'N': 4,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'S': Counter({'C': 3,
'H': 5,
'O': 2,
'N': 1,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'T': Counter({'C': 4,
'H': 7,
'O': 2,
'N': 1,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'V': Counter({'C': 5,
'H': 9,
'O': 1,
'N': 1,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'W': Counter({'C': 11,
'H': 10,
'O': 1,
'N': 2,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'Y': Counter({'C': 9,
'H': 9,
'O': 2,
'N': 1,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'U': Counter({'C': 5,
'H': 8,
'O': 2,
'N': 2,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 1,
'Cl': 0,
'Br': 0}),
'C_term': Counter({'C': 0,
'H': 1,
'O': 1,
'N': 0,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'N_term': Counter({'C': 0,
'H': 1,
'O': 0,
'N': 0,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0}),
'*': Counter({'C': 24,
'H': 36,
'O': 3,
'N': 6,
'S': 0,
'P': 0,
'(15)N': 0,
'(2)H': 0,
'(13)C': 0,
'Se': 0,
'Cl': 0,
'Br': 0})}
| 38.053333 | 100 | 0.102313 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,314 | 0.115102 |
a6c79b97d437b297bf666481ac065486d65e213c | 8,576 | py | Python | tools/nntool/graph/manipulations/dimensions.py | gemenerik/gap_sdk | afae64d239db6d73f79c90c2ca2c832b6361f109 | [
"Apache-2.0"
] | null | null | null | tools/nntool/graph/manipulations/dimensions.py | gemenerik/gap_sdk | afae64d239db6d73f79c90c2ca2c832b6361f109 | [
"Apache-2.0"
] | null | null | null | tools/nntool/graph/manipulations/dimensions.py | gemenerik/gap_sdk | afae64d239db6d73f79c90c2ca2c832b6361f109 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import logging
from typing import Sequence
from generation.naming_convension import (DefaultNamingConvension,
NamingConvension)
from utils.graph import GraphView
# from graph.verify import verify_graph
from ..dim import Dim, MissMatchedInputsError, MoreThanOneInputError
from ..types import (ConcatParameters, ConstantInputParameters, EdgeParameters,
InputParameters, OutputParameters, Parameters,
SingleInputAndOutput)
from .set_aliases import set_aliases
LOG = logging.getLogger("nntool." + __name__)
def set_out_edges_multi(G, node: Parameters, dims: Sequence[Dim], step_idx: int,
naming_convension: NamingConvension, update_graph, edge_type: str = "in_out"):
# clone the dims first so that the edge dims are the same objects as the node output dims
if update_graph:
dims = node.set_output_size(dims)
out_edges = G.indexed_out_edges(node)
is_multi_out = len(out_edges) > 1
for edge_idx, edge_group in enumerate(out_edges):
if not edge_group:
continue
edge_order = edge_idx if is_multi_out else None
ename = naming_convension.get_edge_name(
node, step_idx, edge_type, edge_order=edge_order)
eparams = EdgeParameters(
ename, dims[edge_idx], node, edge_idx, step_idx, edge_type, edge_order=edge_order)
for edge in edge_group:
edge.params = eparams
LOG.debug("%s %s", node.name, ename)
def set_out_edges_one(G, node: Parameters, dim: Dim, step_idx: int,
naming_convension: NamingConvension, update_graph, edge_type: str = "in_out"):
ename = naming_convension.get_edge_name(node, step_idx, edge_type)
eparams = EdgeParameters(ename, dim, node, 0, step_idx, edge_type)
for edge in G.out_edges(node.name):
assert edge.from_idx == 0, "Only for use with nodes that have one output"
edge.params = eparams
LOG.debug("%s %s", node.name, ename)
if update_graph:
eparams.dims = node.set_output_size([dim])[0]
else:
eparams.dims = node.out_dims[0]
def validate_one_in_edge(G, node: Parameters, update_graph, expect_named: bool = True):
edges = G.in_edges(node.name)
if len(edges) != 1:
if len(edges) > 1:
raise MoreThanOneInputError(f'more than one edge on {node.name}')
else:
raise ValueError(f'{node.name} is not connected')
eparams = edges[0].params
assert eparams is not None, "edge parameters not yet set"
assert not expect_named or eparams.dims.has_keys(
['c', 'h', 'w']), "dimensions not yet set"
if update_graph:
eparams.dims = node.set_input_size([eparams.dims])[0]
return eparams
def validate_multi_in_edge(G, node: Parameters, update_graph, expect_named: bool = True):
dims = []
for edge in G.indexed_in_edges(node.name):
if edge is None:
dims.append(None)
continue
eparams = edge.params
assert eparams is not None, "edge parameters not yet set"
assert not expect_named or eparams.dims.has_keys(
['c', 'h', 'w']), "dimensions not yet set"
dims.append(eparams.dims)
if update_graph:
try:
dims = node.set_input_size(dims)
except MissMatchedInputsError as exc:
raise ValueError(f'missmatched inputs on node {node.name}') from exc
return dims
def add_dimensions_concat(G, node: Parameters, step_idx: int,
naming_convension: NamingConvension,
indexes, update_graph):
del indexes
in_dims = validate_multi_in_edge(G, node, update_graph, expect_named=False)
if update_graph:
out_dims = node.get_output_size(in_dims)
else:
out_dims = node.out_dims
set_out_edges_one(G, node, out_dims[0], step_idx, naming_convension, update_graph )
def add_dimensions_constant(G, node: Parameters, step_idx: int,
naming_convension: NamingConvension, indexes, update_graph):
node.index = indexes['constant']
indexes['constant'] += 1
constant_dims = node.get_output_size(None)
set_out_edges_one(G, node, constant_dims[0], step_idx,
naming_convension, update_graph, edge_type="in")
def add_dimensions_input(G, node: Parameters, step_idx: int,
naming_convension: NamingConvension, indexes, update_graph):
node.index = indexes['input']
indexes['input'] += 1
input_dims = node.get_output_size(None)
node.set_input_size(input_dims)
set_out_edges_one(G, node, input_dims[0], step_idx,
naming_convension, update_graph , edge_type="in")
def add_dimensions_output(G, node: Parameters, step_idx: int,
naming_convension: NamingConvension, indexes, update_graph):
node.index = indexes['output']
indexes['output'] += 1
eparams = validate_one_in_edge(G, node, update_graph, expect_named=False)
eparams.edge_type = "out"
eparams.name = naming_convension.get_edge_name(node, step_idx, "out")
# set the dimensions of the output node
if update_graph:
node.set_output_size(node.get_output_size([eparams.dims]))
def add_dimensions_unknown_single(G, node: Parameters, step_idx: int,
naming_convension: NamingConvension, indexes, update_graph):
del indexes
eparams = validate_one_in_edge(G, node, update_graph, expect_named=False)
if update_graph:
out_dims = node.get_output_size([eparams.in_dims])
else:
out_dims = node.out_dims
set_out_edges_one(G, node, out_dims[0], step_idx, naming_convension, update_graph)
def add_dimensions_unknown(G, node: Parameters, step_idx: int,
naming_convension: NamingConvension, update_graph):
in_dims = validate_multi_in_edge(G, node, update_graph, expect_named=False)
if update_graph:
out_dims = node.get_output_size(in_dims)
else:
out_dims = node.out_dims
set_out_edges_multi(G, node, out_dims,
step_idx, naming_convension, update_graph)
OP_ROUTINES = {
InputParameters: add_dimensions_input,
OutputParameters: add_dimensions_output,
ConcatParameters: add_dimensions_concat,
ConstantInputParameters: add_dimensions_constant,
SingleInputAndOutput: add_dimensions_unknown_single
}
def add_dimensions(G: GraphView, naming_convension: NamingConvension = None, update_graph=True) -> list:
""" Walks graph setting all edge names and dimensions
"""
if naming_convension is None:
naming_convension = DefaultNamingConvension(G)
for edge in G.edges():
edge.params = None
steps = []
indexes = {'input': 0, 'output': 0, 'constant': 0}
inputs = G.inputs()
# inputs = sorted(
# G.inputs(),
# key=lambda node: ("a" + node.name if isinstance(node, InputParameters)
# else "b" + (str(node.step_idx) if node.step_idx else node.name)))
LOG.debug("inputs: %s", [node.name for node in inputs])
def add_step(step, idx):
if len(steps) <= idx:
steps.extend([None] * (idx + 1 - len(steps)))
steps[idx] = step
for node in G.topological_sort(inputs):
LOG.debug("add dimensions to: %s", node.name)
if update_graph:
node.step_idx = len(steps)
add_step({'node': node}, node.step_idx)
if node.__class__ in OP_ROUTINES:
OP_ROUTINES[node.__class__](
G, node, node.step_idx, naming_convension, indexes, update_graph)
else:
add_dimensions_unknown(G, node, node.step_idx, naming_convension, update_graph)
set_aliases(G)
# verify_graph(G, throw_exception=True)
return steps
| 40.64455 | 104 | 0.672225 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,610 | 0.187733 |
a6c7dfeaf23ecebc079761c27c16e7671b2bf8e6 | 334 | py | Python | data/ratings/tezxt.py | SLAB-NLP/Akk | baa07b0fdf8c7d8623fbd78508867c30a8a7ff6d | [
"MIT"
] | 5 | 2021-09-14T07:09:07.000Z | 2021-11-15T19:43:13.000Z | data/ratings/tezxt.py | SLAB-NLP/Akk | baa07b0fdf8c7d8623fbd78508867c30a8a7ff6d | [
"MIT"
] | null | null | null | data/ratings/tezxt.py | SLAB-NLP/Akk | baa07b0fdf8c7d8623fbd78508867c30a8a7ff6d | [
"MIT"
] | 1 | 2021-11-15T19:43:19.000Z | 2021-11-15T19:43:19.000Z | with open(r"D:\Drive\לימודים\מאגרי מידע\זמני\ancient-text-processing\jsons_unzipped\saao\saa01\catalogue.json","r",encoding="utf_8") as file:
catalog = eval(file.read())["members"]
rulers = []
for c in catalog:
cat = catalog[c]
if cat["period"] == "Neo-Assyrian" and cat.get("ruler"):
rulers += cat["ruler"]
| 33.4 | 141 | 0.652695 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 175 | 0.49435 |
a6c9ce3f66a98ad170b4f87e31f76e548b232e16 | 528 | py | Python | abc185/abc185_d.py | Vermee81/practice-coding-contests | 78aada60fa75f208ee0eef337b33b27b1c260d18 | [
"MIT"
] | null | null | null | abc185/abc185_d.py | Vermee81/practice-coding-contests | 78aada60fa75f208ee0eef337b33b27b1c260d18 | [
"MIT"
] | null | null | null | abc185/abc185_d.py | Vermee81/practice-coding-contests | 78aada60fa75f208ee0eef337b33b27b1c260d18 | [
"MIT"
] | null | null | null | # https://atcoder.jp/contests/abc185/tasks/abc185_d
from math import ceil
N, M = map(int, input().split())
if M == 0:
print(1)
exit()
a_arr = list(map(int, input().split()))
a_arr.sort()
blanks = [0]
for i in range(M):
if i == 0:
blanks.append(a_arr[i] - 1)
continue
blanks.append(a_arr[i] - a_arr[i - 1] - 1)
blanks.append(N - a_arr[-1])
minimum = N
for b in blanks:
if b == 0:
continue
minimum = min(minimum, b)
ans = 0
for b in blanks:
ans += ceil(b / minimum)
print(ans)
| 20.307692 | 51 | 0.585227 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 51 | 0.096591 |
a6ca3adc2fc881cf47636f14b970e81283f26529 | 9,771 | py | Python | code_rejected/model_resnet_and_new_loss.py | rcalfredson/objects_counting_dmap | 5194306d60f987741c4ec0f22eff990422fbd405 | [
"Apache-2.0"
] | null | null | null | code_rejected/model_resnet_and_new_loss.py | rcalfredson/objects_counting_dmap | 5194306d60f987741c4ec0f22eff990422fbd405 | [
"Apache-2.0"
] | null | null | null | code_rejected/model_resnet_and_new_loss.py | rcalfredson/objects_counting_dmap | 5194306d60f987741c4ec0f22eff990422fbd405 | [
"Apache-2.0"
] | null | null | null | """The implementation of U-Net and FCRN-A models."""
from typing import Tuple
import numpy as np
import torch
from torch import nn
from torchvision.models import resnet
from model_config import DROPOUT_PROB
class UOut(nn.Module):
"""Add random noise to every layer of the net."""
def forward(self, input_tensor: torch.Tensor):
if not self.training:
return input_tensor
with torch.cuda.device(0):
return input_tensor + 2*DROPOUT_PROB*torch.cuda.FloatTensor(
input_tensor.shape).uniform_() - DROPOUT_PROB
class ResNet(nn.Module):
def __init__(self, module, in_channels, out_channels, stride):
super().__init__()
self.module = module
self.in_channels = in_channels
self.out_channels = out_channels
self.stride = stride
def forward(self, inputs):
output = self.module(inputs)
skip = None
if self.stride != 1 or self.in_channels != self.out_channels:
skip = nn.Sequential(
nn.Conv2d(in_channels=self.in_channels, out_channels=self.out_channels,
kernel_size=1, stride=self.stride, bias=False),
nn.BatchNorm2d(self.out_channels))
identity = inputs
if skip is not None:
skip = skip.cuda()
identity = skip(inputs)
output += identity
return output
class BlockBuilder:
"""Create convolutional blocks for building neural nets."""
def __init__(self, dropout: bool):
self.dropout = dropout
def conv_block(self, channels: Tuple[int, int],
size: Tuple[int, int],
stride: Tuple[int, int] = (1, 1),
N: int = 1):
"""
Create a block with N convolutional layers with ReLU activation function.
The first layer is IN x OUT, and all others - OUT x OUT.
Args:
channels: (IN, OUT) - no. of input and output channels
size: kernel size (fixed for all convolution in a block)
stride: stride (fixed for all convolution in a block)
N: no. of convolutional layers
Returns:
A sequential container of N convolutional layers.
"""
# a single convolution + batch normalization + ReLU block
def block(in_channels):
# layers = [
# nn.Conv2d(in_channels=in_channels,
# out_channels=channels[1],
# kernel_size=size,
# stride=stride,
# bias=False,
# padding=(size[0] // 2, size[1] // 2)),
# nn.ReLU()
# ]
# if self.dropout:
# layers.append(UOut())
# layers.append(nn.BatchNorm2d(num_features=channels[1]))
layers = [ResNet(nn.Sequential(nn.Conv2d(in_channels=in_channels,
out_channels=channels[1],
kernel_size=size,
stride=stride,
bias=False,
padding=(size[0] // 2, size[1] // 2))), in_channels, channels[1], stride),
nn.ReLU()]
if self.dropout:
layers.append(UOut())
postActivation = nn.Sequential(*layers)
return nn.Sequential(postActivation, nn.BatchNorm2d(num_features=channels[1]))
# create and return a sequential container of convolutional layers
# input size = channels[0] for first block and channels[1] for all others
return nn.Sequential(*[block(channels[bool(i)]) for i in range(N)])
class ConvCat(nn.Module):
"""Convolution with upsampling + concatenate block."""
def __init__(self,
channels: Tuple[int, int],
size: Tuple[int, int],
stride: Tuple[int, int] = (1, 1),
N: int = 1,
dropout: bool = False):
"""
Create a sequential container with convolutional block (see conv_block)
with N convolutional layers and upsampling by factor 2.
"""
super(ConvCat, self).__init__()
bb = BlockBuilder(dropout)
self.conv = nn.Sequential(
bb.conv_block(channels, size, stride, N),
nn.Upsample(scale_factor=2)
)
def forward(self, to_conv: torch.Tensor, to_cat: torch.Tensor):
"""Forward pass.
Args:
to_conv: input passed to convolutional block and upsampling
to_cat: input concatenated with the output of a conv block
"""
return torch.cat([self.conv(to_conv), to_cat], dim=1)
class FCRN_A(nn.Module):
"""
Fully Convolutional Regression Network A
Ref. W. Xie et al. 'Microscopy Cell Counting with Fully Convolutional
Regression Networks'
"""
def __init__(self, N: int = 1, input_filters: int = 3, dropout: bool = True, ** kwargs):
"""
Create FCRN-A model with:
* fixed kernel size = (3, 3)
* fixed max pooling kernel size = (2, 2) and upsampling factor = 2
* no. of filters as defined in an original model:
input size -> 32 -> 64 -> 128 -> 512 -> 128 -> 64 -> 1
Args:
N: no. of convolutional layers per block (see conv_block)
input_filters: no. of input channels
"""
super(FCRN_A, self).__init__()
bb = BlockBuilder(dropout)
self.model = nn.Sequential(
# downsampling
bb.conv_block(channels=(input_filters, 32), size=(3, 3), N=N),
nn.MaxPool2d(2),
bb.conv_block(channels=(32, 64), size=(3, 3), N=N),
nn.MaxPool2d(2),
bb.conv_block(channels=(64, 128), size=(3, 3), N=N),
nn.MaxPool2d(2),
# "convolutional fully connected"
bb.conv_block(channels=(128, 512), size=(3, 3), N=N),
# upsampling
nn.Upsample(scale_factor=2),
bb.conv_block(channels=(512, 128), size=(3, 3), N=N),
nn.Upsample(scale_factor=2),
bb.conv_block(channels=(128, 64), size=(3, 3), N=N),
nn.Upsample(scale_factor=2),
bb.conv_block(channels=(64, 1), size=(3, 3), N=N),
)
def forward(self, input: torch.Tensor):
"""Forward pass."""
return self.model(input)
class UNet(nn.Module):
"""
U-Net implementation.
Ref. O. Ronneberger et al. "U-net: Convolutional networks for biomedical
image segmentation."
"""
def __init__(self, filters: int = 64, input_filters: int = 3,
dropout: bool = False, **kwargs):
"""
Create U-Net model with:
* fixed kernel size = (3, 3)
* fixed max pooling kernel size = (2, 2) and upsampling factor = 2
* fixed no. of convolutional layers per block = 2 (see conv_block)
* constant no. of filters for convolutional layers
Args:
filters: no. of filters for convolutional layers
input_filters: no. of input channels
"""
super(UNet, self).__init__()
# first block channels size
initial_filters = (input_filters, filters)
# channels size for downsampling
down_filters = (filters, filters)
# channels size for upsampling (input doubled because of concatenate)
up_filters = (2 * filters, filters)
bb = BlockBuilder(dropout)
# downsampling
self.block1 = bb.conv_block(channels=initial_filters, size=(3, 3), N=2)
self.block2 = bb.conv_block(channels=down_filters, size=(3, 3), N=2)
self.block3 = bb.conv_block(channels=down_filters, size=(3, 3), N=2)
# upsampling
self.block4 = ConvCat(channels=down_filters, size=(3, 3), N=2)
self.block5 = ConvCat(channels=up_filters, size=(3, 3), N=2)
self.block6 = ConvCat(channels=up_filters, size=(3, 3), N=2)
# density prediction
self.block7 = bb.conv_block(channels=up_filters, size=(3, 3), N=2)
self.density_pred = nn.Conv2d(in_channels=filters, out_channels=1,
kernel_size=(1, 1), bias=False)
def forward(self, input: torch.Tensor):
"""Forward pass."""
# use the same max pooling kernel size (2, 2) across the network
pool = nn.MaxPool2d(2)
# downsampling
block1 = self.block1(input)
pool1 = pool(block1)
block2 = self.block2(pool1)
pool2 = pool(block2)
block3 = self.block3(pool2)
pool3 = pool(block3)
# upsampling
block4 = self.block4(pool3, block3)
block5 = self.block5(block4, block2)
block6 = self.block6(block5, block1)
# density prediction
block7 = self.block7(block6)
return self.density_pred(block7)
# --- PYTESTS --- #
def run_network(network: nn.Module, input_channels: int):
"""Generate a random image, run through network, and check output size."""
sample = torch.ones((1, input_channels, 224, 224))
result = network(input_filters=input_channels)(sample)
assert result.shape == (1, 1, 224, 224)
def test_UNet_color():
"""Test U-Net on RGB images."""
run_network(UNet, 3)
def test_UNet_grayscale():
"""Test U-Net on grayscale images."""
run_network(UNet, 1)
def test_FRCN_color():
"""Test FCRN-A on RGB images."""
run_network(FCRN_A, 3)
def test_FRCN_grayscale():
"""Test FCRN-A on grayscale images."""
run_network(FCRN_A, 1)
| 34.896429 | 127 | 0.566472 | 8,855 | 0.906253 | 0 | 0 | 0 | 0 | 0 | 0 | 3,393 | 0.347252 |
a6ca5dc72a2a53aa7dccb88c27d8a3fa44186150 | 1,175 | py | Python | tests/conftest.py | dclimber/python-kzt-exchangerates | 60eca52b776f889848d631be43c843bd9bd50d06 | [
"MIT"
] | 1 | 2021-05-15T15:19:00.000Z | 2021-05-15T15:19:00.000Z | tests/conftest.py | dclimber/python-kzt-exchangerates | 60eca52b776f889848d631be43c843bd9bd50d06 | [
"MIT"
] | null | null | null | tests/conftest.py | dclimber/python-kzt-exchangerates | 60eca52b776f889848d631be43c843bd9bd50d06 | [
"MIT"
] | null | null | null | import pytest
from pathlib import Path
import xml.etree.ElementTree as ET
@pytest.fixture
def date_for_tests():
return '24.04.2013'
@pytest.fixture
def result_date():
return '2013-04-24'
@pytest.fixture
def latest_url():
return 'https://nationalbank.kz/rss/rates_all.xml'
@pytest.fixture
def dated_url(date_for_tests):
return 'https://nationalbank.kz/rss/get_rates.cfm?fdate={}'.format(
date_for_tests)
@pytest.fixture
def sample_rss():
# rss file for 2013-04-24 (date for tests)
file = Path("sample_rss.xml")
text = file.read_text()
rss = ET.fromstring(text)
return rss
@pytest.fixture
def supported_currencies():
# currencies from sample_rss.xml
return ['AUD', 'GBP', 'BYR', 'BRL', 'HUF', 'HKD', 'DKK', 'AED', 'USD',
'EUR', 'CAD', 'CNY', 'KWD', 'KGS', 'LVL', 'LTL', 'MYR', 'MDL',
'NOK', 'PLN', 'SAR', 'RUB', 'XDR', 'SGD', 'TJS', 'TRY', 'UZS',
'UAH', 'CZK', 'SEK', 'CHF', 'ZAR', 'KRW', 'JPY', 'KZT']
@pytest.fixture
def target_currencies():
return ['AUD', 'GBP', 'DKK', 'AED', 'USD', 'EUR', 'CAD', 'CNY', 'KWD']
| 24.479167 | 79 | 0.577872 | 0 | 0 | 0 | 0 | 1,080 | 0.919149 | 0 | 0 | 429 | 0.365106 |
a6ca838341623a89031c7d01f921b6b241d6b6fa | 10,756 | py | Python | tests/test_UI.py | HubLot/buildH | 21201b55991b46337cab05508c0942611d00f477 | [
"BSD-3-Clause"
] | 13 | 2020-12-21T14:43:08.000Z | 2022-02-16T03:35:14.000Z | tests/test_UI.py | HubLot/buildH | 21201b55991b46337cab05508c0942611d00f477 | [
"BSD-3-Clause"
] | 137 | 2019-08-14T17:00:15.000Z | 2022-03-29T14:48:38.000Z | tests/test_UI.py | HubLot/buildH | 21201b55991b46337cab05508c0942611d00f477 | [
"BSD-3-Clause"
] | 6 | 2019-08-30T08:00:22.000Z | 2022-01-19T20:06:24.000Z | """
Unit tests for buildH.
Test functions from module UI
"""
import os
import pathlib
import pytest
from buildh import UI, lipids, BuildHError
import sys
DIR_DATA = "test_data"
PATH_ROOT_DATA = pathlib.Path(__file__).parent / DIR_DATA
# Ignore some MDAnalysis warnings for this test file
pytestmark = pytest.mark.filterwarnings('ignore::UserWarning')
# path for the Berger POPC files
PATH_DATA = PATH_ROOT_DATA / "Berger_POPC"
@pytest.fixture
def cd_tmp_dir(tmp_path):
"""Change directory to a temporary one."""
os.chdir(tmp_path)
# Move to a temporary directory because some output files are written in the current directory.
@pytest.mark.usefixtures("cd_tmp_dir")
class TestMain:
"""Test class for the main function of buildH."""
# Arguments of the main function
args = {
"coord_file" : str(PATH_DATA / "2POPC.pdb"),
"traj_file" : None,
"def_file" : str(PATH_DATA / "OP_def_BergerPOPC.def"),
"out_file" : "OP_buildH.out",
"prefix_traj_ouput" : None,
"begin" : None,
"end" : None,
"dic_lipid" : None
}
# Default output used for assessement
stdout_output = "Results written to OP_buildH.out"
def setup_class(self):
"""Initialize attributes."""
# Create correct lipid topology dict
lipids_tops = lipids.read_lipids_topH([lipids.PATH_JSON/"Berger_POPC.json"])
dic_lipid = lipids_tops["Berger_POPC"]
self.args["dic_lipid"] = dic_lipid
def test_main_minimal(self, capsys):
"""Test main with minimal arguments."""
UI.main(**self.args)
captured = capsys.readouterr().out
assert self.stdout_output in captured
def test_main_output(self, capsys):
"""Test main with user defined output name."""
args = self.args.copy()
args["out_file"] = "text.txt"
UI.main(**args)
captured = capsys.readouterr().out
assert "Results written to text.txt" in captured
def test_main_subdef(self, capsys):
"""Test main with partial def file."""
args = self.args.copy()
args["def_file"] = str(PATH_DATA / "OP_def_HP_BergerPOPC.def")
UI.main(**args)
captured = capsys.readouterr().out
assert self.stdout_output in captured
def test_main_traj(self, capsys):
"""Test main with trajectory."""
args = self.args.copy()
args["traj_file"] = str(PATH_DATA / "2POPC.xtc")
UI.main(**args)
captured = capsys.readouterr().out
assert self.stdout_output in captured
assert "Dealing with frame 10 at 10000.0 ps." in captured
def test_main_traj_slice(self, capsys):
"""Test main with sliced trajectory."""
args = self.args.copy()
args["traj_file"] = str(PATH_DATA / "2POPC.xtc")
args["end"] = 3000
UI.main(**args)
captured = capsys.readouterr().out
assert self.stdout_output in captured
assert "Dealing with frame 3 at 3000.0 ps." in captured
# Make sure we stop at frame 3
assert "Dealing with frame 10 at 10000.0 ps." not in captured
def test_main_traj_output(self, capsys):
"""Test main with trajectory and output trajectory."""
args = self.args.copy()
args["traj_file"] = str(PATH_DATA / "2POPC.xtc")
args["prefix_traj_ouput"] = "test"
UI.main(**args)
captured = capsys.readouterr().out
assert self.stdout_output in captured
assert "Writing new pdb with hydrogens." in captured
assert "Writing trajectory with hydrogens in xtc file." in captured
def test_fail_main_coord_topology_mismatch(self):
"""Test when coord file and topology doesn't match."""
args = self.args.copy()
lipids_tops = lipids.read_lipids_topH([lipids.PATH_JSON/"CHARMM36_POPC.json"])
dic_lipid = lipids_tops["CHARMM36_POPC"]
args["dic_lipid"] = dic_lipid
with pytest.raises(BuildHError) as err:
UI.main(**args)
assert "The topology chosen does not match the structure provided" in str(err.value)
def test_fail_main_coord_def_mismatch(self):
"""Test when coord file and def file doesn't match."""
args = self.args.copy()
args["def_file"] = str(PATH_DATA / "op_wrong1.def")
with pytest.raises(BuildHError) as err:
UI.main(**args)
assert f"Atoms defined in {args['def_file']} are missing in the structure" in str(err.value)
def test_fail_main_subdef_traj(self,):
"""Test main with partial def file and a output trajectory. Must fail."""
args = self.args.copy()
args["def_file"] = str(PATH_DATA / "OP_def_HP_BergerPOPC.def")
args["traj_file"] = str(PATH_DATA / "2POPC.xtc")
args["prefix_traj_ouput"] = "test"
with pytest.raises(BuildHError) as err:
UI.main(**args)
assert "Error on the number of H's to rebuild" in str(err.value)
# Move to a temporary directory because some output files are written in the current directory.
@pytest.mark.usefixtures("cd_tmp_dir")
class TestCLI:
"""Test class for the entry point of buildH.
Mimic the command line arguments.
"""
# Arguments of the CLI
common_args = [
"buildH",
"-c", str(PATH_DATA / "2POPC.pdb"),
"-d", str(PATH_DATA / "OP_def_BergerPOPC.def")
]
def test_CLI_minimal(self, capsys):
"""Test working CLI with minimal arguments."""
sys.argv = self.common_args + ["-l", "Berger_POPC"]
UI.entry_point()
captured = capsys.readouterr().out
assert "Results written to OP_buildH.out" in captured
def test_CLI_traj(self, capsys):
"""Test working CLI with all trajectory arguments."""
sys.argv = (self.common_args + ["-t", str(PATH_DATA / "2POPC.xtc")]
+ ["-l", "Berger_POPC"] + ["-o", "out.txt"]
+ ["-opx", "base"] + ["-b", "1000", "-e", "10000"])
UI.entry_point()
captured = capsys.readouterr().out
assert "Results written to out.txt" in captured
assert "Dealing with frame 10 at 10000.0 ps." in captured
assert "Writing new pdb with hydrogens." in captured
assert "Writing trajectory with hydrogens in xtc file." in captured
def test_CLI_user_json(self, capsys):
"""Test working CLI with JSON topology file."""
sys.argv = (self.common_args + ["-l", "Berger_POPC"]
+ ["-lt", str(PATH_ROOT_DATA / "Berger_POPC.json")])
UI.entry_point()
captured = capsys.readouterr().out
assert "Results written to OP_buildH.out" in captured
def test_fails_CLI_lipidtype(self, capsys):
"""Fail tests when passing a wrong lipid type."""
sys.argv = self.common_args + ["-l", "PPHA"]
with pytest.raises(SystemExit) as err:
UI.entry_point()
# Make sur the exception is thrown
assert err.type == SystemExit
assert "Lipid PPHA is not supported" in capsys.readouterr().err
def test_fails_CLI_slice(self, capsys):
"""Fail tests when passing a slice option without a trajectory."""
sys.argv = sys.argv = self.common_args + ["-l", "Berger_POPC", "-e", "1000"]
with pytest.raises(SystemExit) as err:
UI.entry_point()
assert err.type == SystemExit
assert "Slicing is only possible with a trajectory file." in capsys.readouterr().err
# Move to a temporary directory because some output files are written in the current directory.
@pytest.mark.usefixtures("cd_tmp_dir")
class TestLaunch:
"""Test class for the launch function of buildH.
This is the function called when using buildH as a module.
"""
# Arguments of the main function
args = {
"coord_file" : str(PATH_DATA / "2POPC.pdb"),
"def_file" : str(PATH_DATA / "OP_def_BergerPOPC.def"),
"lipid_type" : "Berger_POPC",
"traj_file" : None,
"out_file" : "OP_buildH.out",
"prefix_traj_ouput" : None,
"begin" : None,
"end" : None,
"lipid_jsons" : None
}
# Default output used for assessement
stdout_output = "Results written to OP_buildH.out"
def test_launch_minimal(self, capsys):
"""Test launch with minimal arguments."""
UI.launch(**self.args)
captured = capsys.readouterr().out
assert "Results written to OP_buildH.out" in captured
def test_launch_traj(self, capsys):
"""Test launch with all trajectory arguments."""
args = self.args.copy()
args["traj_file"] = str(PATH_DATA / "2POPC.xtc")
args["out_file"] = "out.txt"
args["prefix_traj_ouput"] = "basename"
args["begin"] = 0
args["end"] = 10000
UI.launch(**args)
captured = capsys.readouterr().out
assert "Results written to out.txt" in captured
assert "Dealing with frame 10 at 10000.0 ps." in captured
assert "Writing new pdb with hydrogens." in captured
assert "Writing trajectory with hydrogens in xtc file." in captured
def test_launch_user_json(self, capsys):
"""Test launch with JSON topology file."""
args = self.args.copy()
args["lipid_jsons"] = [str(PATH_ROOT_DATA / "Berger_POPC.json")]
UI.launch(**args)
captured = capsys.readouterr().out
assert "Results written to OP_buildH.out" in captured
def test_fail_launch_json(self):
"""Test fail launch with a wrong argument for JSON topology file."""
args = self.args.copy()
# Pass a string instead of a list
args["lipid_jsons"] = str(PATH_ROOT_DATA / "Berger_POPC.json")
with pytest.raises(TypeError) as err:
UI.launch(**args)
assert "a list is expected for argument 'lipid_jsons'" in str(err.value)
def test_fail_launch_file(self):
"""Test fail launch with a non existant file."""
args = self.args.copy()
# Pass a string instead of a list
args["traj_file"] = "nofile.xtc"
with pytest.raises(FileNotFoundError) as err:
UI.launch(**args)
assert "nofile.xtc does not exist." in str(err.value)
def test_fails_launch_lipidtype(self, capsys):
"""Fail tests when passing a wrong lipid type."""
args = self.args.copy()
args["lipid_type"] = "PPHA"
with pytest.raises(BuildHError) as err:
UI.launch(**args)
assert err.type == BuildHError
assert "Lipid PPHA is not supported" in str(err.value)
| 35.853333 | 100 | 0.622257 | 9,794 | 0.910562 | 0 | 0 | 10,022 | 0.931759 | 0 | 0 | 4,271 | 0.397081 |
a6ccca58625760a3a85f825d48b3bbe69cfdf917 | 7,844 | py | Python | train.py | s-broda/capstoneproject | 816fe144db6dc7eb430e5e1cc14937c63a8fc4b0 | [
"MIT"
] | null | null | null | train.py | s-broda/capstoneproject | 816fe144db6dc7eb430e5e1cc14937c63a8fc4b0 | [
"MIT"
] | 7 | 2020-03-24T18:13:33.000Z | 2022-02-10T01:12:31.000Z | train.py | s-broda/capstoneproject | 816fe144db6dc7eb430e5e1cc14937c63a8fc4b0 | [
"MIT"
] | null | null | null | import os
import argparse
import json
from datetime import datetime
import numpy as np
from sklearn.utils.class_weight import compute_class_weight
from sklearn.metrics import balanced_accuracy_score
from sklearn.metrics import confusion_matrix
from tensorflow import keras
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
import bert # https://github.com/kpe/bert-for-tf2/
from onecycle import OneCycleScheduler # https://www.avanwyk.com/tensorflow-2-super-convergence-with-the-1cycle-policy/
from imdb import get_imdb_data
from tweets import get_tweets_data
from amazon import get_reviews_data
parser = argparse.ArgumentParser()
current_time = datetime.now().strftime("%Y%m%d-%H%M%S")
parser.add_argument("--experiment_name", type=str, default=current_time, help="Insert string defining your experiment. Defaults to datetime.now()")
parser.add_argument("--task", type=str, required=True, help="One of imdb, reviews, or tweets.")
parser.add_argument("--subtask", type=str, default="german", help="One of german or multi. Ignored for imdb task.")
parser.add_argument("--ckpt_name", type=str, default="bert_model.ckpt", help="Name of BERT checkpoint to load.")
parser.add_argument("--bert_base_path", type=str, default="D:/bert_models/", help="Where to find BERT models.")
parser.add_argument("--model_name", type=str, default=None, help="Name of BERT model. Default depends on task.")
parser.add_argument("--data_dir", type=str, default="data", help="Data directory.")
parser.add_argument("--log_dir", type=str, default="D:\\logs", help="Log directory.")
# training parameters
parser.add_argument("--batch_size", type=int, default=2, help="Batch size.")
parser.add_argument("--patience", type=int, default=3, help="Patience for early stopping.")
parser.add_argument("--learning_rate", type=float, default=2e-5, help="Learning rate.")
parser.add_argument("--max_seq_length", type=int, default=512, help="Maximum frequence length.")
parser.add_argument("--no_class_weights", action='store_true', help="Don't use class weights.")
parser.add_argument("--num_epochs", type=int, default=3, help="Maximum number of epochs.")
parser.add_argument("--test_size", type=float, default=None, help="Test size. Default depends on task.")
parser.add_argument("--num_categories", type=int, default=None, help="Number of categoroies. Defaults to 2 for imdb, 3 otherwise.")
parser.add_argument("--polarized", action='store_true', help="For reviews data: if true and num_categories=3, count only 1 and 5 as pos/neg")
# read variables
ARGS = parser.parse_args()
experiment_name = ARGS.experiment_name
batch_size = ARGS.batch_size
learning_rate = ARGS.learning_rate
max_seq_length = ARGS.max_seq_length
ckpt_name = ARGS.ckpt_name
use_class_weights = not ARGS.no_class_weights
num_epochs = ARGS.num_epochs
task = ARGS.task
bert_base_path = ARGS.bert_base_path
num_categories = ARGS.num_categories
model_name = ARGS.model_name
test_size = ARGS.test_size
subtask = ARGS.subtask
data_dir = ARGS.data_dir
log_dir = ARGS.log_dir
patience = ARGS.patience
polarized = ARGS.polarized
print('Experiment name is ' + experiment_name + '.')
if task == "imdb":
if model_name == None:
model_name = "uncased_L-12_H-768_A-12"
if num_categories == None:
num_categories = 2
elif task == "tweets":
if model_name == None:
model_name = "bert_base_german_cased" if subtask == "german" else "multi_cased_L-12_H-768_A-12"
if num_categories == None:
num_categories = 3
if test_size == None:
test_size = 0.2
elif task == "reviews":
if model_name == None:
model_name = "bert_base_german_cased" if subtask == "german" else "multi_cased_L-12_H-768_A-12"
if num_categories == None:
num_categories = 3
if test_size == None:
test_size = 0.5
else:
raise Exception('No such task.')
ARGS.model_name = model_name
ARGS.num_categories = num_categories
ARGS.test_size = test_size
log_dir = os.path.join(log_dir, experiment_name)
data_dir = os.path.join(data_dir, task)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
config = vars(ARGS)
json.dump(config, open(os.path.join(log_dir, 'config.json'), 'w'), indent=4, sort_keys=True)
if subtask != 'german' and subtask != 'multi':
raise Exception("No such subtask.")
def get_data(task, subtask, num_categories, data_dir, tokenizer, max_seq_length, test_size):
if task == "imdb":
print("Ignoging test_size for imdb data.")
return get_imdb_data(data_dir, tokenizer, max_seq_length)
elif task == "tweets":
return get_tweets_data(data_dir, subtask, num_categories, tokenizer, max_seq_length, test_size)
elif task == "reviews":
return get_reviews_data(data_dir, subtask, num_categories, tokenizer, max_seq_length, test_size, polarized)
else:
raise Exception('No such task.')
if __name__ == "__main__":
bert_path = os.path.join(bert_base_path, model_name)
model_ckpt = os.path.join(bert_path, ckpt_name)
do_lower_case = model_name.find("uncased") != -1
bert.bert_tokenization.validate_case_matches_checkpoint(do_lower_case, model_ckpt)
vocab_file = os.path.join(bert_path, "vocab.txt")
tokenizer = bert.bert_tokenization.FullTokenizer(vocab_file, do_lower_case)
( train_input_ids,
train_input_masks,
train_segment_ids,
train_labels,
test_input_ids,
test_input_masks,
test_segment_ids,
test_labels
) = get_data(task, subtask, num_categories, data_dir, tokenizer, max_seq_length, test_size)
steps = np.ceil(train_input_ids.shape[0] / batch_size) * num_epochs
lr_schedule = OneCycleScheduler(learning_rate, steps)
es = EarlyStopping(monitor='val_SparseCategoricalAccuracy', mode='max', verbose=1, patience=patience)
mc = ModelCheckpoint(os.path.join(log_dir, 'best_model.h5'), monitor='val_SparseCategoricalAccuracy', mode='max', save_best_only=True, save_weights_only=True)
bert_params = bert.params_from_pretrained_ckpt(bert_path)
l_bert = bert.BertModelLayer.from_params(bert_params, name="bert")
in_id = keras.layers.Input(shape=(max_seq_length,), name="input_ids")
bert_output = l_bert(in_id)[:, 0, :]
dropout = keras.layers.Dropout(0.5)(bert_output)
dense = keras.layers.Dense(768, activation="relu")(dropout)
dropout = keras.layers.Dropout(0.5)(dense)
pred = keras.layers.Dense(num_categories, activation=None)(dropout)
model = keras.models.Model(inputs=in_id, outputs=pred)
opt = keras.optimizers.Nadam()
model.compile(loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), optimizer=opt, metrics=['SparseCategoricalAccuracy'])
bert.load_bert_weights(l_bert, model_ckpt)
model.summary()
tensorboard_callback = keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=0,
write_graph=False, write_images=False, update_freq=1000)
y = np.concatenate([train_labels, test_labels]).flatten()
wgt = compute_class_weight('balanced', np.unique(y), y)
if not use_class_weights:
wgt = (wgt * 0 + 1) / num_categories
print('Class weights:', wgt)
model.fit(
train_input_ids,
train_labels,
class_weight=wgt,
validation_data=(test_input_ids, test_labels),
shuffle=True,
epochs=num_epochs,
batch_size=batch_size,
callbacks=[tensorboard_callback, es, mc, lr_schedule]
)
model.load_weights(os.path.join(log_dir, 'best_model.h5'))
print("Reloaded best parameters.")
y_pred = model.predict(test_input_ids)
y_pred = np.argmax(y_pred, axis=1)
matrix = confusion_matrix(test_labels, y_pred)
print(matrix.diagonal()/matrix.sum(axis=1))
BMAC = balanced_accuracy_score(test_labels, y_pred)
print(BMAC) | 44.316384 | 162 | 0.729475 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,670 | 0.212902 |
a6cd531ba3259edc8b54ccf233a89ab0a561de13 | 2,179 | py | Python | tests/events/test_api_gateway_authorizer.py | chuckwondo/aws-lambda-typing | 8417ab67f2492be1508fe38b2c34bc106619a56d | [
"MIT"
] | 29 | 2021-01-07T13:35:16.000Z | 2022-03-25T07:20:54.000Z | tests/events/test_api_gateway_authorizer.py | chuckwondo/aws-lambda-typing | 8417ab67f2492be1508fe38b2c34bc106619a56d | [
"MIT"
] | 13 | 2021-02-28T00:31:00.000Z | 2022-03-29T15:24:01.000Z | tests/events/test_api_gateway_authorizer.py | chuckwondo/aws-lambda-typing | 8417ab67f2492be1508fe38b2c34bc106619a56d | [
"MIT"
] | 5 | 2021-02-27T13:50:42.000Z | 2022-01-13T15:05:44.000Z | from aws_lambda_typing.events import (
APIGatewayRequestAuthorizerEvent,
APIGatewayTokenAuthorizerEvent,
)
def test_api_gateway_token_authorizer_event() -> None:
event: APIGatewayTokenAuthorizerEvent = {
"type": "TOKEN",
"authorizationToken": "allow",
"methodArn": "arn:aws:execute-api:us-west-2:123456789012:ymy8tbxw7b/*/GET/", # noqa: E501
}
def test_api_gateway_request_authorizer_event() -> None:
event: APIGatewayRequestAuthorizerEvent = {
"type": "REQUEST",
"methodArn": "arn:aws:execute-api:us-east-1:123456789012:abcdef123/test/GET/request", # noqa: E501
"resource": "/request",
"path": "/request",
"httpMethod": "GET",
"headers": {
"X-AMZ-Date": "20170718T062915Z",
"Accept": "*/*",
"HeaderAuth1": "headerValue1",
"CloudFront-Viewer-Country": "US",
"CloudFront-Forwarded-Proto": "https",
"CloudFront-Is-Tablet-Viewer": "false",
"CloudFront-Is-Mobile-Viewer": "false",
"User-Agent": "...",
},
"queryStringParameters": {"QueryString1": "queryValue1"},
"pathParameters": {},
"stageVariables": {"StageVar1": "stageValue1"},
"requestContext": {
"path": "/request",
"accountId": "123456789012",
"resourceId": "05c7jb",
"stage": "test",
"requestId": "...",
"identity": {
"apiKey": "...",
"sourceIp": "...",
"clientCert": {
"clientCertPem": "CERT_CONTENT",
"subjectDN": "www.example.com",
"issuerDN": "Example issuer",
"serialNumber": "a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:a1:a1", # noqa: E501
"validity": {
"notBefore": "May 28 12:30:02 2019 GMT",
"notAfter": "Aug 5 09:36:04 2021 GMT",
},
},
},
"resourcePath": "/request",
"httpMethod": "GET",
"apiId": "abcdef123",
},
}
| 36.316667 | 107 | 0.502065 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,083 | 0.497017 |
a6ce33e31ccfc0113bca5c9e83c41d2a6a6f5182 | 37,722 | py | Python | Tools/SinGED/scene_manager.py | willcassella/SinGE | f4c1a5736585c0f154cc6aabe7f7aa634175ebfd | [
"MIT"
] | 9 | 2016-10-14T16:18:31.000Z | 2021-12-13T00:36:49.000Z | Tools/SinGED/scene_manager.py | willcassella/SinGE | f4c1a5736585c0f154cc6aabe7f7aa634175ebfd | [
"MIT"
] | null | null | null | Tools/SinGED/scene_manager.py | willcassella/SinGE | f4c1a5736585c0f154cc6aabe7f7aa634175ebfd | [
"MIT"
] | null | null | null | # scene_manager.py
from copy import deepcopy
from . import editor_session
# Recursively updates a dictionary a with b.
# This assumes that b has a structure that is a subset of a
# Returns whether the dictionary a was modified.
def recursive_update(a, b):
modified = False
for k, v in b.items():
if isinstance(v, dict):
modified = recursive_update(a[k], v) or modified
else:
modified = a[k] != v or modified
a[k] = v
return modified
class Node(object):
NULL_ID = 0
def __init__(self):
self.id = None
self.fake_id = None
self.root = None
self.name = ""
self.local_position = [0.0, 0.0, 0.0]
self.local_scale = [1.0, 1.0, 1.0]
self.local_rotation = [0.0, 0.0, 0.0, 1.0]
self.user_data = None
self.destroyed = False
def get_root_id(self):
return self.root.id if self.root is not None else Node.NULL_ID
def is_real(self):
return self.id > Node.NULL_ID
def is_fake(self):
return self.id < Node.NULL_ID
class ComponentType(object):
def __init__(self, type_name):
self.type_name = type_name
self.instances = dict()
self.new_instances = set()
self.changed_instances = set()
self.destroyed_instances = set()
self.new_instance_callback = None
self.update_instance_callback = None
self.destroy_instance_callback = None
def get_instance(self, node):
return self.instances.get(node, None)
def set_new_instance_callback(self, callback):
self.new_instance_callback = callback
def set_update_instance_callback(self, callback):
self.update_instance_callback = callback
def set_destroy_instance_callback(self, callback):
self.destroy_instance_callback = callback
def request_new_instance(self, node):
assert(not node.destroyed and node not in self.instances)
instance = ComponentInstance(node, self)
self.instances[node] = instance
self.new_instances.add(instance)
if self.new_instance_callback is not None:
self.new_instance_callback(instance)
return instance
def request_destroy_instance(self, node):
if node not in self.instances:
return
# Get the instance
instance = self.instances[node]
if instance.destroyed:
return
# Destroy the instance
instance.destroyed = True
self.destroyed_instances.add(instance)
# If the user callback exists, run it
if self.destroy_instance_callback is not None:
self.destroy_instance_callback(instance)
class ComponentInstance(object):
def __init__(self, node, type_v):
self.type = type_v
self.node = node
self.destroyed = False
self.is_loaded = False
self.value = None
self.changed_props = dict()
self.loaded_callbacks = list()
def _set_property(self, prop_name, value):
old_value = self.value[prop_name]
if isinstance(value, dict):
assert(isinstance(old_value, dict))
changed = recursive_update(old_value, value)
else:
changed = old_value != value
self.value[prop_name] = value
return changed
def get_value_immediate(self, default=None):
if not self.is_loaded:
return default
return self.value
def get_value_async(self, callback):
if not self.is_loaded:
self.loaded_callbacks.append(lambda instance: callback(instance.value))
return
callback(self.value)
def set_value(self, value):
for prop_name, prop_val in value.items():
self.set_property(prop_name, prop_val)
def server_set_value(self, seq_num, value):
modified = False
for prop_name, prop_val in value.items():
# If this property was not expected to be changed, or it's the final change
if seq_num == 0 or prop_name not in self.changed_props or self.changed_props[prop_name] == seq_num:
modified = self._set_property(prop_name, prop_val) or modified
# Remove it from the change table
self.changed_props.pop(prop_name, None)
return modified
def get_property_immediate(self, prop_name, default=None):
if not self.is_loaded:
return default
return self.value[prop_name]
def get_sub_property_immediate(self, prop_path, default=None):
if not self.is_loaded:
return default
value = self.value
for prop_name in prop_path:
value = value[prop_name]
return value
def get_property_async(self, prop_name, callback):
if not self.is_loaded:
self.loaded_callbacks.append(lambda instance: callback(instance.value[prop_name]))
return
callback(self.value[prop_name])
def set_property(self, prop_name, value):
if not self.is_loaded:
value = deepcopy(value)
self.loaded_callbacks.append(lambda instance: instance.set_property(prop_name, value))
return
changed = self._set_property(prop_name, value)
if changed:
self.changed_props[prop_name] = None
self.type.changed_instances.add(self)
# Run the modified callback
if self.type.update_instance_callback is not None:
self.type.update_instance_callback(self)
def set_sub_property_immediate(self, prop_path, value):
if not self.is_loaded:
return False
return
outer_prop_name = prop_path[0]
inner_prop_name = prop_path[-1]
old_value = self.value
for prop_name in prop_path[:-1]:
old_value = old_value[prop_name]
modified = recursive_update(old_value, {inner_prop_name: value})
if modified:
self.changed_props[outer_prop_name] = None
self.type.changed_instances.add(self)
# Run the update callback
if self.type.update_instance_callback is not None:
self.type.update_instance_callback(self)
return True
class SceneManager(object):
def __init__(self):
self._next_fake_node_id = -1
self._new_node_callback = None
self._update_node_callback = None
self._destroy_node_callback = None
self._nodes = dict()
self._unsent_new_nodes = dict()
self._sent_new_nodes = dict()
self._destroyed_nodes = set()
self._node_changed_roots = dict()
self._node_changed_names = dict()
self._node_changed_local_transforms = dict()
self._new_components = dict()
self._components = dict()
self._sent_scene_query = False
self._save_scene_path = ''
self._generate_lightmaps_query = None
self._lightmaps_generated_callback = None
def register_handlers(self, session):
session.add_query_handler('get_scene', self._get_scene_query)
session.add_response_handler('get_scene', self._get_scene_response)
session.add_query_handler('new_node', self._new_node_query)
session.add_response_handler('new_node', self._new_node_response)
session.add_query_handler('destroy_node', self._destroy_node_query)
session.add_response_handler('destroy_node', self._destroy_node_response)
session.add_query_handler('node_root_update', self._node_root_update_query)
session.add_response_handler('node_root_update', self._node_root_update_response)
session.add_query_handler('node_name_update', self._node_name_update_query)
session.add_response_handler('node_name_update', self._node_name_update_response)
session.add_query_handler('node_local_transform_update', self._node_local_transform_update_query)
session.add_response_handler('node_local_transform_update', self._node_local_transform_update_response)
session.add_query_handler('new_component', self._new_component_query)
session.add_response_handler('new_component', self._new_component_response)
session.add_query_handler('destroy_component', self._destroy_component_query)
session.add_response_handler('destroy_component', self._destroy_component_response)
session.add_query_handler('component_property_update', self._component_property_update_query)
session.add_response_handler('component_property_update', self._component_property_update_response)
session.add_query_handler('save_scene', self._save_scene_query)
session.add_query_handler('gen_lightmaps', self._gen_lightmaps_query)
session.add_response_handler('gen_lightmaps', self._gen_lightmaps_response)
def _get_scene_query(self, seq_number, priority):
# Unused arguments
del seq_number, priority
if not self._sent_scene_query:
self._sent_scene_query = True
return True # Actual value doesn't matter
def _get_scene_response(self, seq_number, response):
# Unused arguments
del seq_number
if response is None:
return
# Store all new nodes
new_nodes = set()
root_ids = dict()
new_components = dict()
# For each node in the scene
for node_id_str, value in response['nodes'].items():
node_id = int(node_id_str)
# Insert a new entry into the nodes table
node = Node()
node.id = node_id
self._nodes[node_id] = node
root_ids[node] = value['root']
# Initialize the node
node.name = value['name']
node.local_position = value['lpos']
node.local_rotation = value['lrot']
node.local_scale = value['lscale']
# Add the node to the list of newly created nodes
new_nodes.add(node)
# Add nodes to roots
for node, root_id in root_ids.items():
node.root = self.get_node(root_id)
# For each component type
for component_type_name, instances in response['components'].items():
component_type = self._components.setdefault(component_type_name, ComponentType(component_type_name))
# Stupid serialization system corner case
if instances is None:
continue
new_instances = list()
# For each instance of this component type
for node_id_str, value in instances.items():
node_id = int(node_id_str)
node = self._nodes[node_id]
# Add the component instance object
instance = ComponentInstance(node, component_type)
component_type.instances[node] = instance
instance.value = value
instance.is_loaded = True
instance.loaded_callbacks = None
new_instances.append(instance)
if component_type.new_instance_callback is not None:
new_components[component_type] = new_instances
# Run the 'new_node' callback on all new nodes
if self._new_node_callback is not None:
for node in new_nodes:
self._new_node_callback(self, node)
# Run the 'update_node' callback on all new nodes
if self._update_node_callback is not None:
for node in new_nodes:
self._update_node_callback(self, node)
# Run the 'new_instance' callback on all components
for component_type, instances in new_components.items():
for instance in instances:
component_type.new_instance_callback(instance)
def _new_node_query(self, seq_number, priority):
# Unused arguments
del priority
if len(self._unsent_new_nodes) == 0:
return None
message = dict()
for fake_id, node in self._unsent_new_nodes.items():
if node.destroyed:
continue
# Only send fake id and name, other properties will be updated later
node_dict = message[fake_id] = dict()
node_dict['name'] = node.name
# Reset the table of unsent nodes
self._sent_new_nodes[seq_number] = self._unsent_new_nodes
self._unsent_new_nodes = dict()
return message
def _new_node_response(self, seq_number, response):
# Check if these nodes correspond to nodes that we requested
if seq_number not in self._sent_new_nodes:
# Create new nodes
new_nodes = list()
root_ids = dict()
for node_response in response.values():
node = Node()
node.id = node_response['id']
node.name = node_response['name']
node.local_position = node_response.get('lpos', [0.0, 0.0, 0.0])
node.local_rotation = node_response.get('lrot', [0.0, 0.0, 0.0, 1.0])
node.local_scale = node_response.get('lscale', [1.0, 1.0, 1.0])
root_ids[node] = node_response.get('root', Node.NULL_ID)
self._nodes[node.id] = node
new_nodes.append(node)
print("Received unrequested new node, id={}".format(node.id))
# Set node roots
for node, root_id in root_ids.items():
node.root = self.get_node(root_id)
# Call 'new_node' on all created nodes
if self._new_node_callback is not None:
for new_node in new_nodes:
self._new_node_callback(self, new_node)
# Call 'update_node' on all created nodes
if self._update_node_callback is not None:
for new_node in new_nodes:
self._update_node_callback(self, new_node)
return
# Get the nodes that were supposed to go with this sequence number
pending_nodes = self._sent_new_nodes[seq_number]
del self._sent_new_nodes[seq_number]
assert(len(pending_nodes) == len(response))
updated_nodes = list()
for fake_id_str, node_response in response.items():
fake_id = int(fake_id_str)
node = pending_nodes[fake_id]
# Apply Id
node.id = node_response['id']
self._nodes[node.id] = node
# If the node has been destroyed, don't add it to be updated
if node.destroyed:
continue
updated_nodes.append(node)
print("Allocated node id {} for fake node {}".format(node.id, node.fake_id))
# Call the update function on updated nodes
if self._update_node_callback is not None:
for node in updated_nodes:
self._update_node_callback(self, node)
def _destroy_node_query(self, seq_number, priority):
# Unused arguments
del seq_number, priority
if len(self._destroyed_nodes) == 0:
return None
message = list()
remaining = set()
for node in self._destroyed_nodes:
# If the node isn't real yet (so they created it and then immediately destroyed it), don't destroy it yet
if node.is_fake():
remaining.add(node)
continue
message.append(node.id)
self._destroyed_nodes = remaining
return message
def _destroy_node_response(self, seq_number, response):
# Unused arguments
del seq_number
destroyed_nodes = list()
# Figure out which ones haven't actually been destroyed yet
for node_id in response:
if node_id in self._nodes:
# Destroy the node
node = self._nodes[node_id]
destroyed_nodes.append(node)
# Destroy them
for node in destroyed_nodes:
self.request_destroy_node(node)
def _node_root_update_query(self, seq_number, priority):
# Unused arguments
del priority
if len(self._node_changed_roots) == 0:
return None
message = dict()
for node, existing_seq_num in list(self._node_changed_roots.items()):
# If the node was destroyed, remove it from the list and continue
if node.destroyed:
del self._node_changed_roots[node]
continue
# If this node is fake, don't add it to the query yet
if node.is_fake():
continue
# If this message has already been sent out skip it
if existing_seq_num is not None:
continue
# If this node's root is null, add it to the query
if node.root is None:
message[node.id] = Node.NULL_ID
self._node_changed_roots[node] = seq_number
continue
# If this node's root is fake, don't add it to the query yet
if node.root.is_fake():
continue
# Otherwise, add it to the message
message[node.id] = node.root.id
self._node_changed_roots[node] = seq_number
if len(message) == 0:
return None
return message
def _node_root_update_response(self, seq_number, response):
updated_nodes = list()
# For each node and root in the response
for node_id_str, root_id in response.items():
node_id = int(node_id_str)
node = self._nodes[node_id]
# If this node's root was not expected to be changed, or the change is final
if seq_number == 0 or node not in self._node_changed_roots or self._node_changed_roots[node] == seq_number:
# If the new root is different than the old
if node.get_root_id() != root_id:
node.root = self.get_node(root_id)
updated_nodes.append(node)
# Remove it from the changed root table
self._node_changed_roots.pop(node, None)
# Call the update callback, if any
if self._update_node_callback is not None:
for node in updated_nodes:
self._update_node_callback(node)
def _node_name_update_query(self, seq_number, priority):
# Unused parameters
del priority
if len(self._node_changed_names) == 0:
return None
message = dict()
for node, existing_seq_num in list(self._node_changed_names.items()):
# If the node was destroyed, remove it from the table and continue
if node.destroyed:
del self._node_changed_names[node]
continue
# If the node is fake, don't add it yet
if node.is_fake():
continue
# If the node's query hasn't been responded to yet, ignore it
if existing_seq_num is not None:
continue
# Add it to the query
message[node.id] = node.name
self._node_changed_names[node] = seq_number
if len(message) == 0:
return None
return message
def _node_name_update_response(self, seq_number, response):
updated_nodes = list()
# For each node and name in the response
for node_id_str, name in response.items():
node_id = int(node_id_str)
node = self._nodes[node_id]
# If the node's name was not expected to be changed, or the change is final
if seq_number == 0 or node not in self._node_changed_names or self._node_changed_names[node] == seq_number:
# If the new name is different from the old one
if node.name != name:
node.name = name
updated_nodes.append(node)
# Remove it from the changed table
self._node_changed_names.pop(node, None)
# Call the user callback on all updated nodes
if self._update_node_callback is not None:
for node in updated_nodes:
self._update_node_callback(self, node)
def _node_local_transform_update_query(self, seq_number, priority):
# Setting the transform is not a high priority update
if priority != editor_session.EditorSession.PRIORITY_ANY:
return None
if len(self._node_changed_local_transforms) == 0:
return None
message = dict()
for node, existing_seq_num in list(self._node_changed_local_transforms.items()):
# If the node was destroyed, remove it and continue
if node.destroyed:
del self._node_changed_local_transforms[node]
continue
# If the node is fake, don't add it yet
if node.is_fake():
continue
# If the node is in the table for a previously sent query
if existing_seq_num is not None:
continue
# Add it to the query
entry = message[node.id] = dict()
entry['lpos'] = node.local_position.copy()
entry['lrot'] = node.local_rotation.copy()
entry['lscale'] = node.local_scale.copy()
self._node_changed_local_transforms[node] = seq_number
if len(message) == 0:
return None
return message
def _node_local_transform_update_response(self, seq_number, response):
updated_nodes = list()
# For each transformed node, and it's new transform
for node_id_str, trans in response.items():
node_id = int(node_id_str)
node = self._nodes[node_id]
# If the node's name was not expected to be changed, or the change is final
if seq_number == 0 \
or node not in self._node_changed_local_transforms \
or self._node_changed_local_transforms[node] == seq_number:
# If the new transform is different than the old one
different = node.local_position != trans['lpos']
different = different or node.local_scale != trans['lscale']
different = different or node.local_rotation != trans['lrot']
# Update the node
if different:
node.local_position = trans['lpos']
node.local_scale = trans['lscale']
node.local_rotation = trans['lrot']
updated_nodes.append(node)
# Remove it from the change table
self._node_changed_local_transforms.pop(node, None)
# Call the update callback
if self._update_node_callback is not None:
for node in updated_nodes:
self._update_node_callback(self, node)
def _new_component_query(self, seq_number, priority):
# Unused arguments
del seq_number
if priority != editor_session.EditorSession.PRIORITY_ANY:
return
# Construct the message
message = dict()
# For each component type
for component_type_name, component_type in self._components.items():
remaining = set()
new_instances = list()
for instance in component_type.new_instances:
# If the node was destroyed, don't add it
if instance.node.destroyed:
continue
# If the node is fake, don't add it YET
if instance.node.is_fake():
remaining.add(instance)
continue
# Add it to the message
new_instances.append(instance.node.id)
# Reset the new instance set
component_type.new_instances = remaining
# Add it to the message only if new components were actually created
if len(new_instances) == 0:
continue
message[component_type_name] = new_instances
if len(message) == 0:
return None
return message
def _new_component_response(self, seq_number, response):
# For each component type and set of instances in the response
for component_type_name, instances in response.items():
# Get the component type object
component_type = self._components[component_type_name]
new_instances = list()
loaded_instances = list()
updated_instances = list()
# For each newly created instance
for node_id_str, value in instances.items():
node_id = int(node_id_str)
node = self._nodes[node_id]
# If an instance doesn't already exist, create it
if node not in component_type.instances:
instance = ComponentInstance(node, component_type)
component_type.instances[node] = instance
instance.is_loaded = True
instance.value = value
new_instances.append(instance)
continue
# Get the existing instance
instance = component_type.instances[node]
# If the instance hasn't been loaded
if not instance.is_loaded:
instance.value = value
instance.is_loaded = True
loaded_instances.append(instance)
continue
# Update the value
modified = instance.server_set_value(seq_number, value)
if modified:
updated_instances.append(instance)
# Call the new instance callback, if one exists
if component_type.new_instance_callback is not None:
for instance in new_instances:
component_type.new_instance_callback(instance)
# Run callbacks for loaded instances
for instance in loaded_instances:
for callback in instance.loaded_callbacks:
callback(instance)
instance.loaded_callbacks = None
# Run the instance update callback, if one exists
if component_type.update_instance_callback is not None:
for instance in updated_instances:
component_type.update_instance_callback(instance)
def _destroy_component_query(self, seq_number, priority):
# Unused arguments
del seq_number, priority
# Create the message
message = dict()
for component_type_name, component_type in self._components.items():
destroyed_instances = list()
remaining = set()
for instance in component_type.destroyed_instances:
# If the node was destroyed, don't add it; it will be destroyed anyway (or was already)
if instance.node.destroyed:
continue
# If the node is fake, don't add it YET
if instance.node.is_fake():
remaining.add(instance)
continue
# If the instance hasn't been loaded yet, don't add it YET (it might not have been created yet)
if not instance.is_loaded:
remaining.add(instance)
continue
# Add it to the destroyed list
destroyed_instances.append(instance.node.id)
# Reset the destroyed instance set
component_type.destroyed_instances = remaining
# Only add the list to the query if it actually has anything
if len(destroyed_instances) == 0:
continue
message[component_type_name] = destroyed_instances
if len(message) == 0:
return None
return message
def _destroy_component_response(self, seq_number, response):
# Unused arguments
del seq_number
# For each component type with destroyed instances
for component_type_name, instance_ids in response.items():
component_type = self._components[component_type_name]
destroyed_instances = list()
# For each destroyed instance
for node_id in instance_ids:
# If the node has been destroyed, skip it
if node_id not in self._nodes:
continue
# Get the node
node = self._nodes[node_id]
# If the instance has already been destroyed, skip it
if node not in component_type.instances:
continue
# Get the instance
instance = component_type.instances[node]
destroyed_instances.append(instance)
# Remove the instance
instance.destroyed = True
del component_type.instances[node]
# Run the user callback
if component_type.destroy_instance_callback is not None:
for instance in destroyed_instances:
component_type.destroy_instance_callback(instance)
def _component_property_update_query(self, seq_number, priority):
# Unused parameters
del priority
message = dict()
# For each component type
for component_type_name, component_type in self._components.items():
updated_instances = dict()
# For each instance of this component type that was changed
remaining = set()
for changed_instance in component_type.changed_instances:
updated_props = dict()
# If this instance is destroyed, skip it
if changed_instance.destroyed or changed_instance.node.destroyed:
continue
# If the instance is not real, or it hasn't been loaded yet, don't add it YET
if changed_instance.node.is_fake() or not changed_instance.is_loaded:
remaining.add(changed_instance)
continue
# For each property of this instance that was changed
for changed_prop_name, existing_seq_num in changed_instance.changed_props.items():
# If this property change has not been sent yet, add it to the query
if existing_seq_num is None:
updated_props[changed_prop_name] = deepcopy(changed_instance.value[changed_prop_name])
changed_instance.changed_props[changed_prop_name] = seq_number
# Only add this instance as changed if something was actually changed
if len(updated_props) == 0:
continue
updated_instances[changed_instance.node.id] = updated_props
# Reset the set of changed instances
component_type.changed_instances = remaining
# Only add this component type if something was actually changed
if len(updated_instances) == 0:
continue
message[component_type_name] = updated_instances
# Only send the message if something was changed
if len(message) == 0:
return None
return message
def _component_property_update_response(self, seq_number, response):
# For each component type in the response
for component_type_name, instances in response.items():
component_type = self._components[component_type_name]
updated_instances = list()
# For each instance in the response
for node_id_str, value in instances.items():
node_id = int(node_id_str)
node = self._nodes[node_id]
# Get the component instance
instance = component_type.instances[node]
# Set the value
modified = instance.server_set_value(seq_number, value)
if modified:
updated_instances.append(instance)
# If there's a callback for this component type
if component_type.update_instance_callback is not None:
for instance in updated_instances:
component_type.update_instance_callback(instance)
def _save_scene_query(self, seq_number, priority):
# Unused arguments
del seq_number, priority
if len(self._save_scene_path) == 0:
return None
message = {
'path': self._save_scene_path,
}
self._save_scene_path = ''
return message
def _gen_lightmaps_query(self, seq_number, priority):
# Unused arguments
del seq_number, priority
message = self._generate_lightmaps_query
self._generate_lightmaps_query = None
return message
def _gen_lightmaps_response(self, seq_number, response):
# Unused parameters
del seq_number
if self._lightmaps_generated_callback is not None:
self._lightmaps_generated_callback(response)
def get_node(self, node_id):
if node_id == Node.NULL_ID:
return None
return self._nodes[node_id]
def get_component_type(self, component_type_name):
return self._components[component_type_name]
def get_node_components(self, node):
result = list()
for component_type in self._components.values():
instance = component_type.get_instance(node)
if instance is not None:
result.append(instance)
return result
def set_new_node_callback(self, callback):
self._new_node_callback = callback
def set_update_node_callback(self, callback):
self._update_node_callback = callback
def set_destroy_node_callback(self, callback):
self._destroy_node_callback = callback
def set_new_component_callback(self, component_type_name, callback):
# Get or set the component type, since this may be called before any queries are run
component = self._components.setdefault(component_type_name, ComponentType(component_type_name))
component.set_new_instance_callback(callback)
def set_update_component_callback(self, component_type_name, callback):
# Get or set the component type, since this may be called before any queries are run
component = self._components.setdefault(component_type_name, ComponentType(component_type_name))
component.set_update_instance_callback(callback)
def set_destroy_component_callback(self, component_type_name, callback):
# Get or set the component type, since this may be called before any queries are run
component = self._components.setdefault(component_type_name, ComponentType(component_type_name))
component.set_destroy_instance_callback(callback)
def set_lightmaps_generated_callback(self, callback):
self._lightmaps_generated_callback = callback
def save_scene(self, path):
self._save_scene_path = path
def generate_lightmaps(self, light_dir, light_intensity, ambient, num_indirect_sample_sets, num_accumulation_steps, num_post_steps, lightmap_path):
self._generate_lightmaps_query = {
'light_direction': light_dir,
'light_intensity': light_intensity,
'ambient': ambient,
'num_indirect_sample_sets': num_indirect_sample_sets,
'num_accumulation_steps': num_accumulation_steps,
'post_process_steps': num_post_steps,
'lightmap_path': lightmap_path
}
def request_new_node(self, user_data):
# Reserve fake node id
fake_id = self._next_fake_node_id
self._next_fake_node_id -= 1
# Construct the node object
node = Node()
node.id = fake_id
node.fake_id = fake_id
node.user_data = user_data
# Insert it into the table
self._nodes[fake_id] = node
self._unsent_new_nodes[fake_id] = node
return node
def request_destroy_node(self, node):
# If the node has already been destroyed, just continue
if node.destroyed:
return
# Find all of the nodes's children
children = list()
for child_node in self._nodes.values():
if child_node.root == node:
children.append(child_node)
# Destroy the children
for child_node in children:
self.request_destroy_node(child_node)
# Destroy all of the components
for component_type in self._components.values():
component_type.request_destroy_instance(node)
# Remove the node from the node dictionary
del self._nodes[node.id]
if node.fake_id is not None:
del self._nodes[node.fake_id]
# Add it to the destroyed nodes set
self._destroyed_nodes.add(node)
# Run the callback
if self._destroy_node_callback is not None:
self._destroy_node_callback(self, node)
def mark_name_dirty(self, node):
assert(node in self._nodes.values())
self._node_changed_names[node] = None
def mark_root_dirty(self, node):
assert(node in self._nodes.values())
self._node_changed_roots[node] = None
def mark_local_transform_dirty(self, node):
assert(node in self._nodes.values())
self._node_changed_local_transforms[node] = None
| 37.05501 | 151 | 0.616961 | 37,210 | 0.986427 | 0 | 0 | 0 | 0 | 0 | 0 | 6,512 | 0.172631 |
a6cea9ab25c6ee3d7b3a6630ab209a88876c39c1 | 713 | py | Python | airflow/pyspark/weekday/avg_temperature.py | juliocnsouzadev/gcp-data-engineer | c32a516440c8989f28a33234a05a02873c7fc5b8 | [
"MIT"
] | null | null | null | airflow/pyspark/weekday/avg_temperature.py | juliocnsouzadev/gcp-data-engineer | c32a516440c8989f28a33234a05a02873c7fc5b8 | [
"MIT"
] | null | null | null | airflow/pyspark/weekday/avg_temperature.py | juliocnsouzadev/gcp-data-engineer | c32a516440c8989f28a33234a05a02873c7fc5b8 | [
"MIT"
] | null | null | null | #!/usr/bin/python
from pyspark.sql import SparkSession
spark = (
SparkSession.builder.master("yarn")
.appName("bigquery-analytics-avg-temperature")
.getOrCreate()
)
bucket = "01-logistics-backup"
spark.conf.set("temporaryGcsBucket", bucket)
history = (
spark.read.format("bigquery").option("table", "vehicle_analytics.history").load()
)
history.createOrReplaceTempView("history")
avg_temperature = spark.sql(
"SELECT vehicle_id, date, AVG(temperature) AS avg_temperature FROM history GROUP BY vehicle_id, date"
)
avg_temperature.show()
avg_temperature.printSchema()
avg_temperature.write.format("bigquery").option(
"table", "vehicle_analytics.avg_temperature"
).mode("append").save()
| 26.407407 | 105 | 0.748948 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 314 | 0.440393 |
a6cf3da01e7d9f5279818dd26034a1596d124cb2 | 1,814 | py | Python | chapter09/idqn/ddqn_agent.py | roiyeho/drl-book | 1db635fd508e5b17ef8bfecbe49a79f55503a1f1 | [
"MIT"
] | null | null | null | chapter09/idqn/ddqn_agent.py | roiyeho/drl-book | 1db635fd508e5b17ef8bfecbe49a79f55503a1f1 | [
"MIT"
] | null | null | null | chapter09/idqn/ddqn_agent.py | roiyeho/drl-book | 1db635fd508e5b17ef8bfecbe49a79f55503a1f1 | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
from tensorflow import keras
from idqn.dqn_agent import DQNAgent
class DoubleDQNAgent(DQNAgent):
def __init__(self, env, config):
"""
:param env: the gym environment where the agent will run
:param config: a set of hyperparameters
"""
super().__init__(env, config)
def train(self, observations, actions, rewards, next_observations, dones):
# Use the online network to select the best actions for the next observations
next_q_values = self.q_network.predict(next_observations)
best_next_actions = np.argmax(next_q_values, axis=1)
# Use the target network to estimate the Q-values of these best actions
next_best_q_values = self.target_network.predict(next_observations)
next_best_q_values = next_best_q_values[np.arange(len(next_best_q_values)), best_next_actions]
target_q_values = rewards + (1 - dones) * self.config.gamma * next_best_q_values
with tf.GradientTape() as tape:
# Forward pass: compute the Q-values for the observations in the batch
all_q_values = self.q_network(observations)
# Mask out the Q-values for the non-chosen actions
mask = tf.one_hot(actions, self.n_actions)
q_values = tf.reduce_sum(all_q_values * mask, axis=1)
# Compute the loss between the targets and the Q-values
loss_fn = keras.losses.Huber()
loss = loss_fn(target_q_values, q_values)
# Perform a gradient descent step to minimize the loss with respect
# to the model's trainable variables
gradients = tape.gradient(loss, self.q_network.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.q_network.trainable_variables)) | 46.512821 | 102 | 0.695149 | 1,705 | 0.939912 | 0 | 0 | 0 | 0 | 0 | 0 | 554 | 0.305402 |
a6d0eab6e2d70e95ab1bd7c0ef87edee8250bf73 | 3,267 | py | Python | reader.py | SimGGG/abae-pytorch | a3be72738204d1a61879fb84754ff28febff52d5 | [
"MIT"
] | null | null | null | reader.py | SimGGG/abae-pytorch | a3be72738204d1a61879fb84754ff28febff52d5 | [
"MIT"
] | null | null | null | reader.py | SimGGG/abae-pytorch | a3be72738204d1a61879fb84754ff28febff52d5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import gensim
import numpy as np
from sklearn.cluster import MiniBatchKMeans
def read_data_batches(path, batch_size=50, minlength=5):
"""
Reading batched texts of given min. length
:param path: path to the text file ``one line -- one normalized sentence''
:return: batches iterator
"""
batch = []
for line in open(path, encoding="utf-8"):
line = line.strip().split()
# lines with less than `minlength` words are omitted
if len(line) >= minlength:
batch.append(line)
if len(batch) >= batch_size:
yield batch
batch = []
if len(batch) > 0:
yield batch
def text2vectors(text, w2v_model, maxlen, vocabulary):
"""
Token sequence -- to a list of word vectors;
if token not in vocabulary, it is skipped; the rest of
the slots up to `maxlen` are replaced with zeroes
:param text: list of tokens
:param w2v_model: gensim w2v model
:param maxlen: max. length of the sentence; the rest is just cut away
:return:
"""
acc_vecs = []
for word in text:
if word in w2v_model.wv.index_to_key and (vocabulary is None or word in vocabulary):
acc_vecs.append(w2v_model.wv[word])
# padding for consistent length with ZERO vectors
if len(acc_vecs) < maxlen:
acc_vecs.extend([np.zeros(w2v_model.vector_size)] * (maxlen - len(acc_vecs)))
return acc_vecs
def get_w2v(path):
"""
Reading word2vec model given the path
"""
return gensim.models.Word2Vec.load(path)
def read_data_tensors(path, word_vectors_path=None,
batch_size=50, vocabulary=None,
maxlen=100, pad_value=0, minsentlength=5):
"""
Data for training the NN -- from text file to word vectors sequences batches
:param path:
:param word_vectors_path:
:param batch_size:
:param vocabulary:
:param maxlen:
:param pad_value:
:param minsentlength:
:return:
"""
w2v_model = get_w2v(word_vectors_path)
for batch in read_data_batches(path, batch_size, minsentlength):
batch_vecs = []
batch_texts = []
for text in batch:
vectors_as_list = text2vectors(text, w2v_model, maxlen, vocabulary)
batch_vecs.append(np.asarray(vectors_as_list[:maxlen], dtype=np.float32))
batch_texts.append(text)
yield np.stack(batch_vecs, axis=0), batch_texts
def get_centroids(w2v_model, aspects_count):
"""
Clustering all word vectors with K-means and returning L2-normalizes
cluster centroids; used for ABAE aspects matrix initialization
"""
km = MiniBatchKMeans(n_clusters=aspects_count, verbose=0, n_init=100)
m = []
for k in w2v_model.wv.key_to_index:
m.append(w2v_model.wv[k])
m = np.matrix(m)
km.fit(m)
clusters = km.cluster_centers_
# L2 normalization
norm_aspect_matrix = clusters / np.linalg.norm(clusters, axis=-1, keepdims=True)
return norm_aspect_matrix
if __name__ == "__main__":
for b in read_data_tensors("preprocessed_data/listings.txt", "word_vectors/listings.w2v", batch_size=3):
print(b[0].shape, b[1][:2])
| 28.408696 | 108 | 0.642792 | 0 | 0 | 1,492 | 0.456688 | 0 | 0 | 0 | 0 | 1,217 | 0.372513 |
a6d132395b8e7021c469015245123b41854214f8 | 2,375 | py | Python | service.py | deeso/python-listcurator | 238bbc45eb53f93f3c01a9b2052938e598770dd1 | [
"Apache-2.0"
] | null | null | null | service.py | deeso/python-listcurator | 238bbc45eb53f93f3c01a9b2052938e598770dd1 | [
"Apache-2.0"
] | null | null | null | service.py | deeso/python-listcurator | 238bbc45eb53f93f3c01a9b2052938e598770dd1 | [
"Apache-2.0"
] | null | null | null | import web, argparse, os, logging, sys
from listcurator.service.listcurator import *
from listcurator.service.listcurator import *
from sqlalchemy import *
PORT = 45000
HOST = '0.0.0.0'
ML_SAVE_DIR = 'managed_lists'
SAVE_DIR = os.path.join(os.getcwd(), ML_SAVE_DIR)
LOGGER_NAME = "managed_lists_webservice"
ML_LOG_FILE = "managed_lists_webservice.log"
ML_SQLITE_FILE = "managed_lists.db"
LOGGER_LOCATION = os.path.join(os.getcwd(), ML_LOG_FILE)
parser = argparse.ArgumentParser(description="List Management Web Service")
parser.add_argument('-host', default=HOST, type=str)
parser.add_argument('-port', default=PORT, type=int)
parser.add_argument('-working_dir', default=SAVE_DIR, type=str)
parser.add_argument('-save_loc', default=None, type=str)
parser.add_argument('-sqlite_db', default=None, type=str)
parser.add_argument('-sqlite_uri', default=None, type=str)
parser.add_argument('-log', default=None, type=str)
parser.add_argument('-log_console', default=False, action="store_true")
parser.add_argument('-config_file', default=None, type=str)
parser.add_argument('-no_auth_users', default=False, action="store_true")
class AppOverride(web.application):
def run(self, host=HOST, port = PORT, *middleware):
return web.httpserver.runsimple(self.wsgifunc(*middleware), (host, port))
def run_server(host, port, config_file, working_location, sqlitefile, auth_users=True):
log_mgr = InitializeLogMgr()
auth_mgr = InitializeAuth(sourcetype="rawconfig", source=config_file, auth_users=auth_users)
list_mgr = InitializeListMgr(sqlitefile, working_location=working_location)
app = AppOverride(ListCuratorUrls, globals())
app.run(host=host, port=port)
if __name__ == "__main__":
args = parser.parse_args()
args.log = os.path.join(args.working_dir, ML_LOG_FILE) if args.log is None else args.log
args.save_loc = os.path.join(args.working_dir, ML_SAVE_DIR) if args.save_loc is None \
else args.save_loc
AUTH_USERS = args.no_auth_users
sqlitefile = args.sqlite_db if not args.sqlite_db is None \
else os.path.join(args.working_dir, ML_SQLITE_FILE)
use_uri = True if not args.sqlite_uri is None else False
sqlitecon = None
run_server(args.host, args.port, args.config_file, args.working_dir, sqlitefile, auth_users=not args.no_auth_users) | 45.673077 | 119 | 0.743579 | 173 | 0.072842 | 0 | 0 | 0 | 0 | 0 | 0 | 286 | 0.120421 |
a6d31d9e8aa8ac0ef8afc1f1dce26c8c8fb6d88e | 422 | py | Python | py/tests/test_cli.py | sthagen/odata-url-parser | b05397c5fb9f33bcd2b883f82bda0a5a388eadae | [
"MIT"
] | 2 | 2020-09-11T20:01:08.000Z | 2020-09-12T11:40:43.000Z | py/tests/test_cli.py | sthagen/python-odata_url_parser | b05397c5fb9f33bcd2b883f82bda0a5a388eadae | [
"MIT"
] | 11 | 2020-09-10T20:55:45.000Z | 2020-09-12T12:51:02.000Z | py/tests/test_cli.py | sthagen/python-odata_url_parser | b05397c5fb9f33bcd2b883f82bda0a5a388eadae | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# pylint: disable=missing-docstring,unused-import,reimported
import io
import pytest # type: ignore
import odata_url_parser.cli as cli
import odata_url_parser.odata_url_parser as oup
def test_main_ok_minimal(capsys):
job = ['does not matter']
report_expected = job[0]
assert cli.main(job) is None
out, err = capsys.readouterr()
assert out.strip() == report_expected.strip()
| 26.375 | 60 | 0.725118 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 114 | 0.270142 |
a6d3a31bdbcc36b5cf3b5e120684f5d29b4647d6 | 3,400 | py | Python | mathics/core/attributes.py | tirkarthi/mathics-core | 6b07500b935f23dc332f4ec3fac1d71ac4c8fc04 | [
"Apache-2.0"
] | 90 | 2021-09-11T14:14:00.000Z | 2022-03-29T02:08:29.000Z | mathics/core/attributes.py | tirkarthi/mathics-core | 6b07500b935f23dc332f4ec3fac1d71ac4c8fc04 | [
"Apache-2.0"
] | 187 | 2021-09-13T01:00:41.000Z | 2022-03-31T11:52:52.000Z | mathics/core/attributes.py | tirkarthi/mathics-core | 6b07500b935f23dc332f4ec3fac1d71ac4c8fc04 | [
"Apache-2.0"
] | 10 | 2021-10-05T15:44:26.000Z | 2022-03-21T12:34:33.000Z | # -*- coding: utf-8 -*-
# The builtin's attributes are stored in a bit set.
# Each bit represets a attribute, if that is 0, the builtin doesn't has the
# property, if that is 1, the builtin has the property.
# The Builtin class has the property Protected by default, but if you overrides
# the attributes you need to add Protected if the builtin is not Unprotected
# (the most of the cases).
# To check if a builtin has an attribute, you do:
# ATTRIBUTE_NAME & attributes
# To set all the attributes of a builtin you do:
# attributes = ATTRIBUTE1 | ATTRIBUTE2 | ATTRIBUTE3 | ...
# To add an attribute to a builtin you do:
# attributes = ATTRIBUTE_NAME | attributes
# To remove an attribute you do:
# attributes = ~ATTRIBUTE_NAME & attributes
from typing import Dict, List
# fmt: off
no_attributes = 0b0000000000000000
# alphabetical order
constant = 0b00000000000000001
flat = 0b00000000000000010
hold_all = 0b00000000000000100
hold_all_complete = 0b00000000000001000
hold_first = 0b00000000000010000
hold_rest = 0b00000000000100000
listable = 0b00000000001000000
locked = 0b00000000010000000
n_hold_all = 0b00000000100000000
n_hold_first = 0b00000001000000000
n_hold_rest = 0b00000010000000000
numeric_function = 0b00000100000000000
one_identity = 0b00001000000000000
orderless = 0b00010000000000000
protected = 0b00100000000000000
read_protected = 0b01000000000000000
sequence_hold = 0b10000000000000000
# fmt: on
attribute_number_to_string: Dict[int, str] = {
constant: "System`Constant",
flat: "System`Flat",
hold_all: "System`HoldAll",
hold_all_complete: "System`HoldAllComplete",
hold_first: "System`HoldFirst",
hold_rest: "System`HoldRest",
listable: "System`Listable",
locked: "System`Locked",
n_hold_all: "System`NHoldAll",
n_hold_first: "System`NHoldFirst",
n_hold_rest: "System`NHoldRest",
numeric_function: "System`NumericFunction",
one_identity: "System`OneIdentity",
orderless: "System`Orderless",
protected: "System`Protected",
read_protected: "System`ReadProtected",
sequence_hold: "System`SequenceHold",
}
attribute_string_to_number: Dict[str, int] = {
"System`Constant": constant,
"System`Flat": flat,
"System`HoldAll": hold_all,
"System`HoldAllComplete": hold_all_complete,
"System`HoldFirst": hold_first,
"System`HoldRest": hold_rest,
"System`Listable": listable,
"System`Locked": locked,
"System`NHoldAll": n_hold_all,
"System`NHoldFirst": n_hold_first,
"System`NHoldRest": n_hold_rest,
"System`NumericFunction": numeric_function,
"System`OneIdentity": one_identity,
"System`Orderless": orderless,
"System`Protected": protected,
"System`ReadProtected": read_protected,
"System`SequenceHold": sequence_hold,
}
def attributes_bitset_to_list(attributes_bitset: int) -> List[int]:
bit = 1
while attributes_bitset >= bit:
# Bitwise AND.
# e.g.: 0b1000101 & 0b0000100 = 0b0000100
# e.g.: 0b0100110 & 0b0011000 = 0b0000000
if attributes_bitset & bit:
# Convert the attribute to a string.
yield attribute_number_to_string[attributes_bitset & bit]
# Go to the next attribute by doubling "bit".
# e.g.: 0b010 (2) -> 0b100 (4)
bit <<= 1
| 34 | 79 | 0.701471 | 0 | 0 | 506 | 0.148824 | 0 | 0 | 0 | 0 | 1,601 | 0.470882 |
a6d47efb044c92d4dfec30a904a3f3088fdb915c | 1,005 | py | Python | openapi_documentor/openapi/views.py | codeasashu/openapi-documentor | dde825edaac85bb117d06adf0a4eabf1f5da44f5 | [
"MIT"
] | null | null | null | openapi_documentor/openapi/views.py | codeasashu/openapi-documentor | dde825edaac85bb117d06adf0a4eabf1f5da44f5 | [
"MIT"
] | 5 | 2021-04-06T07:46:47.000Z | 2022-03-02T13:12:20.000Z | openapi_documentor/openapi/views.py | codeasashu/openapi-documentor | dde825edaac85bb117d06adf0a4eabf1f5da44f5 | [
"MIT"
] | null | null | null | from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import get_object_or_404
from django.views.generic import DetailView, ListView
from taggit.models import Tag
from .models import Document
class OpenapiListView(LoginRequiredMixin, ListView):
model = Document
context_object_name = "apis"
paginate_by = 10
api_list_view = OpenapiListView.as_view()
class OpenapiDetailView(LoginRequiredMixin, DetailView):
model = Document
context_object_name = "api"
api_detail_view = OpenapiDetailView.as_view()
class OpenapiTaggedView(LoginRequiredMixin, ListView):
context_object_name = "apis"
paginate_by = 10
template_name = "document_list.html"
def get_queryset(self):
slug = self.kwargs.get("tag", None)
if slug:
tag = get_object_or_404(Tag, slug=slug)
return Document.objects.filter(tags=tag)
else:
return Document.objects.none()
api_tagged_view = OpenapiTaggedView.as_view()
| 24.512195 | 57 | 0.734328 | 637 | 0.633831 | 0 | 0 | 0 | 0 | 0 | 0 | 42 | 0.041791 |
a6d6233f8b8a3e3c27f3cfdb765619296305bc07 | 2,798 | py | Python | scripts/version.py | DrSensor/scdlang | 0152b8ea7d3da79a6bcce1144d59e5b037316cb9 | [
"UPL-1.0"
] | 80 | 2019-04-25T09:54:53.000Z | 2022-01-02T03:08:31.000Z | scripts/version.py | DrSensor/scdlang | 0152b8ea7d3da79a6bcce1144d59e5b037316cb9 | [
"UPL-1.0"
] | 32 | 2019-02-05T00:35:49.000Z | 2019-08-30T13:16:35.000Z | scripts/version.py | DrSensor/scdlang | 0152b8ea7d3da79a6bcce1144d59e5b037316cb9 | [
"UPL-1.0"
] | 2 | 2019-10-01T23:01:16.000Z | 2022-03-22T14:19:47.000Z | #!/usr/bin/env python
from tomlkit.toml_file import TOMLFile
from glob import glob
from os import path
from sys import argv, stdin
from pampy import match
from functools import reduce
import operator as op
import re
re_version = r"\d+\.\d+\.\d+-?"
def increment(version, major=None, minor=None, patch=None):
version = v = [int(ver) for ver in version.split(".")]
if isinstance(major, int):
version = [v[0] + major, 0, 0]
if isinstance(minor, int):
version = [v[0], v[1] + minor, 0]
if isinstance(patch, int):
version = [v[0], v[1], v[2] + patch]
return ".".join([str(ver) for ver in version])
# fmt: off
def change_version(version):
return match(
argv[1],
"major", increment(version, major=1),
"minor", increment(version, minor=1),
"patch", increment(version, patch=1),
re.compile(re_version), lambda target: target.strip("-"),
"major-", increment(version, major=-1),
"minor-", increment(version, minor=-1),
"patch-", increment(version, patch=-1),
)
def docker_release():
re_sep = r"(?:=|\s+)"
re_version_label = r"(version%s[\"']?(%s)[\"']?)" % (re_sep, re_version)
for docker_file in glob("docker/*.Dockerfile"):
with open(docker_file, "r+") as file:
dockerfile = file.read()
(version, v) = re.findall(re_version_label, dockerfile, re.IGNORECASE)[0]
new_version = re.sub(re_version, change_version(v), version)
file.seek(0) # workaround for read & overwrite file
file.write(dockerfile.replace(version, new_version))
file.truncate()
def cargo_release(project, internal_dependencies=[None]):
project_path = path.join(project, "Cargo.toml")
file = TOMLFile(project_path)
content = file.read()
dependencies = content.get('dependencies') or {}
build_dependencies = content.get('build-dependencies') or {}
new_version = change_version(content['package']['version'])
content['package']['version'] = new_version
for local in internal_dependencies:
if dependencies.get(local) is not None:
dependencies[local]['version'] = new_version
if build_dependencies.get(local) is not None:
build_dependencies[local]['version'] = new_version
file.write(content)
def cargo_workspace_release():
workspace = TOMLFile("Cargo.toml").read()['workspace']
paths = reduce(op.concat, [glob(p) for p in workspace['members']], [])
project_names = [TOMLFile(f"{path}/Cargo.toml").read()['package']['name'] for path in paths]
for project in paths:
cargo_release(project, project_names)
if not stdin.isatty():
print(change_version(stdin.read()))
else:
cargo_workspace_release()
docker_release()
| 33.710843 | 96 | 0.640815 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 375 | 0.134024 |
a6d814af1778f60073b49257626a57797ebe43ba | 1,326 | py | Python | qhcfp/urls.py | kilinger/firon-qhcst | 057e5b65f454083599502c3b178c88399f11c8b7 | [
"BSD-3-Clause"
] | null | null | null | qhcfp/urls.py | kilinger/firon-qhcst | 057e5b65f454083599502c3b178c88399f11c8b7 | [
"BSD-3-Clause"
] | null | null | null | qhcfp/urls.py | kilinger/firon-qhcst | 057e5b65f454083599502c3b178c88399f11c8b7 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^$', 'regist.views.regist_index'),
url(r'^admin/', include(admin.site.urls)),
url(r'', include('accounts.urls')),
url(r'^regist/', include('regist.urls')),
url(r'^member/', include('member.urls')),
url(r'^message/', include('message.urls')),
url(r'^wechat/', include('wechat.urls')),
url(r'^captcha/', include('captcha.urls')),
url(r'^i18n/setlang/$', 'django.views.i18n.set_language', name='set_language'),
)
from django.conf import settings
if settings.DEBUG:
urlpatterns = patterns('',
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
url(r'', include('django.contrib.staticfiles.urls')),
) + urlpatterns
from django.views.generic import TemplateView
urlpatterns += patterns('',
url(r'', include('doodoll_kit.magicpages.urls')),
url(r'^$', TemplateView.as_view(template_name='index.html')),
)
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns += patterns(
'',
url(r'^__debug__/', include(debug_toolbar.urls)),
)
| 30.837209 | 83 | 0.634238 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 472 | 0.355958 |
a6d89dd7c7ead3e11cb5faaf6c84a4ae68e8bc94 | 10,123 | py | Python | ecosante/newsletter/tasks/import_in_sb.py | betagouv/ecosante | cc7dd76bb65405ba44f432197de851dc7e22ed38 | [
"MIT"
] | null | null | null | ecosante/newsletter/tasks/import_in_sb.py | betagouv/ecosante | cc7dd76bb65405ba44f432197de851dc7e22ed38 | [
"MIT"
] | 167 | 2020-06-30T08:59:38.000Z | 2021-03-18T14:36:22.000Z | ecosante/newsletter/tasks/import_in_sb.py | betagouv/ecosante | cc7dd76bb65405ba44f432197de851dc7e22ed38 | [
"MIT"
] | 2 | 2020-04-08T11:56:17.000Z | 2020-04-09T14:04:15.000Z | from flask import current_app
from datetime import datetime
from uuid import uuid4
import os
from flask.helpers import url_for
import sib_api_v3_sdk
from sib_api_v3_sdk.rest import ApiException
from ecosante.newsletter.models import Newsletter, NewsletterDB, Inscription
from ecosante.extensions import db, sib, celery
from ecosante.utils import send_log_mail
def get_all_contacts(limit=100):
contacts_api = sib_api_v3_sdk.ContactsApi(sib)
contacts = []
offset = 0
while True:
result = contacts_api.get_contacts(limit=100, offset=offset)
contacts += result.contacts
if len(result.contacts) < limit:
break
offset += limit
return contacts
def get_blacklisted_contacts():
return [c for c in get_all_contacts() if c['emailBlacklisted']]
def deactivate_contacts():
for contact in get_blacklisted_contacts():
db_contact = Inscription.active_query().filter(Inscription.mail==contact['email']).first()
if not db_contact or not db_contact.is_active:
continue
db_contact.unsubscribe()
def import_and_send(task, seed, preferred_reco, remove_reco, only_to, force_send=False):
task.update_state(
state='STARTED',
meta={
"progress": 0,
"details": "Prise en compte de la désincription des membres"
}
)
deactivate_contacts()
task.update_state(
state='STARTED',
meta={
"progress": 0,
"details": "Suppression des anciennes listes"
}
)
list_ids_to_delete = get_lists_ids_to_delete()
contacts_api = sib_api_v3_sdk.ContactsApi(sib)
for i, list_id in enumerate(list_ids_to_delete, 1):
contacts_api.delete_list(list_id)
task.update_state(
state='STARTED',
meta={
"progress": 0,
"details": f"Suppression des anciennes listes ({i}/{len(list_ids_to_delete)})"
}
)
task.update_state(
state='STARTED',
meta={
"progress": 0,
"details": "Constitution de la liste"
}
)
newsletters = list(
map(
NewsletterDB,
Newsletter.export(
preferred_reco=preferred_reco,
user_seed=seed,
remove_reco=remove_reco,
only_to=only_to
)
)
)
if current_app.config['ENV'] == 'production':
db.session.add_all(newsletters)
db.session.commit()
task.update_state(
state='STARTED',
meta={
"progress" :0,
"details": "Construction des listes SIB d'envoi"
}
)
result = import_(task, newsletters, force_send, 2)
result['progress'] = 100
if current_app.config['ENV'] == 'production':
db.session.commit()
return result
def send(campaign_id, test=False):
if current_app.config['ENV'] == 'production' or test:
send_email_api = sib_api_v3_sdk.EmailCampaignsApi(sib)
send_email_api.send_email_campaign_now(campaign_id=campaign_id)
def import_(task, newsletters, force_send=False, overhead=0, test=False, mail_list_id=None):
mail_list_id_set = mail_list_id is not None
errors = []
now = datetime.now()
total_nb_requests = 4 + len(newsletters) + overhead
nb_requests = 0
if mail_list_id == None:
lists_api = sib_api_v3_sdk.ListsApi(sib)
r = lists_api.create_list(
sib_api_v3_sdk.CreateList(
name=f'{now} - mail',
folder_id=int(os.getenv('SIB_FOLDERID', 5)) if not test else int(os.getenv('SIB_FOLDERID', 1653))
)
)
mail_list_id = r.id
nb_requests += 1
if task:
task.update_state(
state='STARTED',
meta={
"progress": (nb_requests/total_nb_requests)*100,
"details": f"Création de la liste"
}
)
for i, nl in enumerate(newsletters):
if nl.label is None and not force_send:
errors.append({
"type": "no_air_quality",
"nl_id": nl.id,
"region": nl.inscription.commune.departement.region.nom,
"ville": nl.inscription.commune.nom,
"insee": nl.inscription.commune.insee
})
current_app.logger.error(f"No qai for {nl.inscription.mail}")
elif not nl.something_to_show and force_send:
errors.append({
"type": "nothing_to_show",
"nl_id": nl.id,
"region": nl.inscription.commune.departement.region.nom,
"ville": nl.inscription.commune.nom,
"insee": nl.inscription.commune.insee
})
current_app.logger.error(f"Nothing to show for {nl.inscription.mail}")
else:
if current_app.config['ENV'] == 'production' and not mail_list_id_set:
nl.mail_list_id = mail_list_id
db.session.add(nl)
if i % 100 == 0:
db.session.commit()
if current_app.config['ENV'] == 'production' or test:
db.session.commit()
contact_api = sib_api_v3_sdk.ContactsApi(sib)
request_contact_import = sib_api_v3_sdk.RequestContactImport()
request_contact_import.list_ids = [mail_list_id]
request_contact_import.email_blacklist = False
request_contact_import.sms_blacklist = False
request_contact_import.update_existing_contacts = True
request_contact_import.empty_contacts_attributes = True
request_contact_import.file_url = url_for(
'newsletter.export',
secret_slug=os.getenv("CAPABILITY_ADMIN_TOKEN"),
mail_list_id=mail_list_id,
_external=True,
_scheme='https'
)
request_contact_import.notify_url = url_for(
'newsletter.send_campaign',
secret_slug=os.getenv("CAPABILITY_ADMIN_TOKEN"),
now=now,
mail_list_id=mail_list_id,
_external=True,
_scheme='https'
)
current_app.logger.debug("About to send newsletter with params")
current_app.logger.debug(request_contact_import)
try:
contact_api.import_contacts(request_contact_import)
current_app.logger.debug("Newsletter sent")
except ApiException as e:
current_app.logger.error("Exception when calling ContactsApi->import_contacts: %s\n" % e)
return {
"state": "STARTED",
"progress": (nb_requests/total_nb_requests)*100,
"details": "Terminé",
"errors": errors
}
def create_campaign(now, mail_list_id, test=False):
if current_app.config['ENV'] == 'production' or test:
template_id = int(os.getenv('SIB_EMAIL_TEMPLATE_ID', 526))
email_campaign_api = sib_api_v3_sdk.EmailCampaignsApi(sib)
transactional_api = sib_api_v3_sdk.TransactionalEmailsApi(sib)
template = transactional_api.get_smtp_template(int(template_id))
r = email_campaign_api.create_email_campaign(
sib_api_v3_sdk.CreateEmailCampaign(
sender=sib_api_v3_sdk.CreateEmailCampaignSender(
email=template.sender.email,
name=template.sender.name
),
name = f'{now}',
template_id = template_id,
subject = template.subject,
reply_to = "newsletter@recosante.beta.gouv.fr",
recipients = sib_api_v3_sdk.CreateEmailCampaignRecipients(
list_ids=[mail_list_id]
),
header="Aujourd'hui, la qualité de l'air autour de chez vous est…",
tag='newsletter' if not test else 'test_newsletter'
)
)
email_campaign_id = r.id
else:
email_campaign_id = 0
return email_campaign_id
def format_errors(errors):
if not errors:
return ''
r = ''
r2 = ''
regions = dict()
errors_types = {
"no_air_quality": "Pas de qualité de l’air",
"nothing_to_show": "Aucune donnée à montrer"
}
for error in errors:
r += f"{errors_types.get(error['type'], error['type'])} pour la ville de {error['ville']} ({error['insee']}) région: '{error['region']}'\n"
r2 += f"{error['ville']}, {error['insee']}, {error['region']}\n"
regions.setdefault(error['region'], 0)
regions[error['region']] += 1
r += '\n'
for region, i in regions.items():
r += f'La région {region} a eu {i} erreurs\n'
r += '\n'
r += r2
return r
@celery.task(bind=True)
def import_send_and_report(self, only_to=None, force_send=False, report=False):
current_app.logger.error("Début !")
new_task_id = str(uuid4())
self.update_state(
state='STARTED',
meta={
"progress": 0,
"details": f"Lancement de la tache: '{new_task_id}'",
}
)
result = import_and_send(self, str(uuid4()), None, [], only_to, force_send)
if report:
errors = format_errors(result['errors'])
body = """
Bonjour,
Il n’y a pas eu d’erreur lors de l’envoi de la newsletter
Bonne journée !
""" if not errors else f"""
Bonjour,
Il y a eu des erreurs lors de l’envoi de la newsletter :
{errors}
Bonne journée
"""
send_log_mail("Rapport d’envoi de la newsletter", body, name="Rapport recosante", email="rapport-envoi@recosante.beta.gouv.fr")
self.update_state(
state='SUCESS',
meta={
"progress": 100,
"details": f"Fin",
}
)
return result
def get_lists_ids_to_delete():
api_instance = sib_api_v3_sdk.ContactsApi(sib)
offset = 10
api_response = api_instance.get_lists(limit=10, offset=offset)
ids = []
while True:
ids = ids + [r['id'] for r in api_response.lists]
if not api_response.lists:
break
offset += 10
api_response = api_instance.get_lists(limit=10, offset=offset)
return ids | 35.149306 | 147 | 0.603675 | 0 | 0 | 0 | 0 | 1,023 | 0.100798 | 0 | 0 | 1,970 | 0.194108 |
a6d986c40e24945e64957667011e08483bed5806 | 672 | py | Python | RecoVertex/BeamSpotProducer/scripts/copyFromCastor.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | RecoVertex/BeamSpotProducer/scripts/copyFromCastor.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | RecoVertex/BeamSpotProducer/scripts/copyFromCastor.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | #!/usr/bin/env python
import sys,os,commands
from CommonMethods import *
def main():
if len(sys.argv) < 3:
error = "Usage: cpFromCastor fromDir toDir (optional filter)"
exit(error)
user = os.getenv("USER")
castorDir = "/castor/cern.ch/cms/store/caf/user/" + user + "/" + sys.argv[1] + "/"
filter = ""
if len(sys.argv) > 3:
filter = sys.argv[3]
fileList = ls(castorDir,filter)
destDir = sys.argv[2]
copiedFiles = cp(castorDir,destDir,fileList)
if len(copiedFiles) != len(fileList):
error = "ERROR: I couldn't copy all files from castor"
exit(error)
if __name__ == "__main__":
main()
| 29.217391 | 86 | 0.604167 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 181 | 0.269345 |
a6da3998b2da3e2208b50eecda9469f494a116aa | 630 | py | Python | app/config.py | midnights-straychild/weatherman | 50354f0639fbcdde01e1ac6290bf71379581868b | [
"MIT"
] | null | null | null | app/config.py | midnights-straychild/weatherman | 50354f0639fbcdde01e1ac6290bf71379581868b | [
"MIT"
] | null | null | null | app/config.py | midnights-straychild/weatherman | 50354f0639fbcdde01e1ac6290bf71379581868b | [
"MIT"
] | null | null | null | class Config:
conf = {
"labels": {
"pageTitle": "Weatherman V0.0.1"
},
"db.database": "weatherman",
"db.username": "postgres",
"db.password": "postgres",
"navigation": [
{
"url": "/",
"name": "Home"
},
{
"url": "/cakes",
"name": "Cakes"
},
{
"url": "/mqtt",
"name": "MQTT"
}
]
}
def get_config(self):
return self.conf
def get(self, key):
return self.conf[key]
| 21 | 44 | 0.339683 | 629 | 0.998413 | 0 | 0 | 0 | 0 | 0 | 0 | 191 | 0.303175 |
a6db049249a0dd90f23b37710611b72301a81d77 | 9,521 | py | Python | app.py | Terrence-Cummings/sqlalchemy-challenge | 6426661fcb1430beb66a90a0cc59a0cec9df8575 | [
"ADSL"
] | null | null | null | app.py | Terrence-Cummings/sqlalchemy-challenge | 6426661fcb1430beb66a90a0cc59a0cec9df8575 | [
"ADSL"
] | null | null | null | app.py | Terrence-Cummings/sqlalchemy-challenge | 6426661fcb1430beb66a90a0cc59a0cec9df8575 | [
"ADSL"
] | null | null | null | #Dependencies, libraries, and imports
from matplotlib import style
style.use('fivethirtyeight')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import datetime as dt
#SQLalchemy libraries and functions
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, inspect, MetaData
from sqlalchemy import Column, Integer, String, Float
from sqlalchemy.ext.declarative import declarative_base
#VROOM, VROOM!
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
#Use automap to get table structures and reflect into classes
Base = automap_base()
Base.prepare(engine, reflect=True)
#See what classes have been created. Classes created should match tables found by Inspector
classes_created = Base.classes.keys()
#Single variable to represent each Class associated with the automapped Base
Measurement = Base.classes.measurement
Station = Base.classes.station
#Classes are now all setup. Start query session.
session = Session(engine)
# Design a query to retrieve the last 12 months of precipitation data and plot the results
#Find the earliest date in the Measurement table by query. Convert to python dictionary, read date as text, convert to datetime.
earliest_date_query = session.query(Measurement.date).order_by(Measurement.date).first()
ed_dict=earliest_date_query._asdict()
earliest_date = ed_dict['date']
earliest_date_dt = dt.datetime.strptime(earliest_date, "%Y-%m-%d")
#Find the latest date in the Measurement table by query. Convert to python dictionary, read date as text, convert to datetime.
latest_date_query = session.query(Measurement.date).order_by(Measurement.date.desc()).first()
ld_dict=latest_date_query._asdict()
latest_date = ld_dict['date']
latest_date_dt = dt.datetime.strptime(latest_date, "%Y-%m-%d")
# Calculate the date 1 year ago from the latest data point in the database
year_ago_latest_dt = latest_date_dt - dt.timedelta(days=365)
year_ago_latest = dt.datetime.strftime(year_ago_latest_dt, "%Y-%m-%d")
# What are the most active stations? (i.e. what stations have the most rows)?
stat_freq = session.query(Measurement.station, func.count(Measurement.station)).group_by(Measurement.station).order_by(func.count(Measurement.station).desc()).all()
max_stat_freq = stat_freq[0][0]
session.close()
#BEGIN FLASK APP
from flask import Flask, jsonify
app = Flask(__name__)
@app.route("/")
def welcome():
print("Server received request for 'Home' page...")
return (
f"Welcome to Surf's Up weather API!<br>"
f"We collect precipitation and temperature data from weather stations on the island of Oahu in Hawaii.<br><br>"
f"Earliest date of data = {earliest_date}<br>"
f"Latest date of data = {latest_date}<br><br>"
f"Available URL Routes:<br><br>"
f"Below URL returns JSON of precipitation on Oahu on each day between {year_ago_latest} and {latest_date}.<br>"
f"Copy this URL to browser:<br>"
f"/api/v1.0/precipitation<br><br>"
f"Below URL returns JSON of temperature at station {max_stat_freq} on Oahu on each day between {year_ago_latest} and {latest_date}.<br>"
f"Copy this URL to browser:<br>"
f"/api/v1.0/temperature<br><br>"
f"Below URL returns JSON of the weather stations on Oahu.<br>"
f"Copy this URL to browser:<br>"
f"/api/v1.0/stations<br><br>"
f"Below URL returns the max, min, and avg temperature on Oahu encompassing the START and END dates provided by the user in the URL.<br>"
f"If no END date provided in the URL then END date is assume to be {latest_date}<br>"
f"Copy this URL to browser and replace START/END with dates in YYYY-MM-DD format:<br>"
f"/api/v1.0/START/END"
)
@app.route("/api/v1.0/precipitation/")
def precipitation():
print("Server received request for 'Precipitation' page...")
session = Session(engine)
#Query precipitation observations for last year
date_prcp_query = session.query(Measurement.date, Measurement.prcp).\
filter(Measurement.date<=latest_date_dt).filter(Measurement.date>=year_ago_latest_dt)
session.close()
#Save as df and grab only the max precip observation for each day in the last year
date_prcp_df = pd.DataFrame(date_prcp_query, columns=['Date', 'Precipitation'])
date_prcp_df.set_index('Date', inplace=True)
date_prcp_df.dropna(inplace=True)
date_prcp_df.sort_index(inplace=True)
date_prcp_max = date_prcp_df.groupby('Date')[['Precipitation']].max()
#Turn into dictionary for jsonification
prcp_query_dict = date_prcp_max.to_dict()
return jsonify(prcp_query_dict)
@app.route("/api/v1.0/stations/")
def stations():
print("Server received request for 'Stations' page...")
session = Session(engine)
#Query all the weather station details
station_query = session.query(Station.station, Station.name, Station.latitude, Station.longitude, Station.elevation)
station_df = pd.DataFrame(station_query, columns = ['station', 'name', 'latitude', 'longitude', 'elevation'])
station_df.set_index('station', inplace=True)
station_df.dropna(inplace=True)
session.close()
#Make a dictionary of weather station characteristics for jsonification
station_dict = station_df.to_dict(orient='index')
return jsonify(station_dict)
@app.route("/api/v1.0/temperature/")
def temperatures():
print("Server received request for 'Temperatures' page...")
session = Session(engine)
#Query temperature observations for the last year at the station with the most observations
tobs_date_query = session.query(Measurement.date, Measurement.tobs).\
filter(Measurement.date<=latest_date_dt).filter(Measurement.date>=year_ago_latest_dt).\
filter(Measurement.station==max_stat_freq)
session.close()
#Save query as df
tobs_date_df = pd.DataFrame(tobs_date_query, columns=['Date','Temperature'])
tobs_date_df.set_index('Date', inplace=True)
tobs_date_df.dropna(inplace=True)
#Transform df into dictionary for jsonification
tobs_date_dict = tobs_date_df.to_dict()
return jsonify(tobs_date_dict)
@app.route("/api/v1.0/<start>/")
def temp_start(start):
#Control on START date within database date range
if start<earliest_date or start>latest_date:
return (
f"START must be between {earliest_date} and {latest_date}.<br>"
f"/api/v1.0/START"
)
print("Server received request for 'Min, Max, Avg Start End' page...")
session = Session(engine)
#Query max, min, and avg temperature between START date and last date in database
TMAX = session.query(func.max(Measurement.tobs)).\
filter(Measurement.date<=latest_date).filter(Measurement.date>=start).all()
TMIN = session.query(func.min(Measurement.tobs)).\
filter(Measurement.date<=latest_date).filter(Measurement.date>=start).all()
TAVG = session.query(func.avg(Measurement.tobs)).\
filter(Measurement.date<=latest_date).filter(Measurement.date>=start).all()
session.close()
#Round TAVG for presentation
TAVG = round(TAVG[0][0],1)
#Calc number of days in the query for information
days_obs = latest_date_dt - dt.datetime.strptime(start, "%Y-%m-%d")
days_obs = days_obs.days
return (
f"The maximum temperature on Oahu for the {days_obs} days between {start} and {latest_date} was {TMAX[0][0]}.<br>"
f"The minimum temperature on Oahu for the {days_obs} days between {start} and {latest_date} was {TMIN[0][0]}.<br>"
f"The average temperature on Oahu for the {days_obs} days between {start} and {latest_date} was {TAVG}.<br>"
)
@app.route("/api/v1.0/<start>/<end>/")
def temp_start_end(start, end):
#Check START and END dates are within the range of database dates
if start<earliest_date or start>latest_date or end<earliest_date or end>latest_date:
return (
f"START and END must be between {earliest_date} and {latest_date}.<br>"
f"/api/v1.0/START/END"
)
#Allow for START and END interchanged in URL
if end<start:
start_temp = start
start = end
end = start_temp
print("Server received request for 'Min, Max, Avg Start End' page...")
session = Session(engine)
#Query max, min, and avg temperature between START date and END date in database
TMAX = session.query(func.max(Measurement.tobs)).\
filter(Measurement.date<=end).filter(Measurement.date>=start).all()
TMIN = session.query(func.min(Measurement.tobs)).\
filter(Measurement.date<=end).filter(Measurement.date>=start).all()
TAVG = session.query(func.avg(Measurement.tobs)).\
filter(Measurement.date<=end).filter(Measurement.date>=start).all()
session.close()
#Round TAVG for presentation
TAVG = round(TAVG[0][0],1)
#Calc number of days in the query for information
days_obs = dt.datetime.strptime(end, "%Y-%m-%d") - dt.datetime.strptime(start, "%Y-%m-%d")
days_obs = days_obs.days
return (
f"The maximum temperature on Oahu for the {days_obs} days between {start} and {end} was {TMAX[0][0]}.<br>"
f"The minimum temperature on Oahu for the {days_obs} days between {start} and {end} was {TMIN[0][0]}.<br>"
f"The average temperature on Oahu for the {days_obs} days between {start} and {end} was {TAVG}.<br>"
)
if __name__ == "__main__":
app.run(debug=True)
| 44.283721 | 164 | 0.714316 | 0 | 0 | 0 | 0 | 7,029 | 0.738263 | 0 | 0 | 4,417 | 0.463922 |
a6db579ee6c1383d11f407805e230fc8f9c23875 | 5,257 | py | Python | eval/gen_histories.py | DBCobra/CobraBench | d48697248948decc206cfba0a6e40fea8a772ff9 | [
"MIT"
] | 1 | 2021-03-03T06:52:50.000Z | 2021-03-03T06:52:50.000Z | eval/gen_histories.py | DBCobra/CobraBench | d48697248948decc206cfba0a6e40fea8a772ff9 | [
"MIT"
] | 1 | 2021-03-05T09:36:50.000Z | 2021-03-08T12:02:53.000Z | eval/gen_histories.py | DBCobra/CobraBench | d48697248948decc206cfba0a6e40fea8a772ff9 | [
"MIT"
] | 1 | 2021-03-03T06:57:02.000Z | 2021-03-03T06:57:02.000Z | #!/usr/bin/python
import subprocess
import sys
import os
from gen_config import Config
def set_default(config):
config.set_db('rocksdb')
config.confs['THREAD_NUM'] = 24
config.confs['MAX_FZ_TXN_NUM'] = 20
config.confs['LOCAL_LOG'] = True
config.confs['CLOUD_LOG'] = False
config.confs['COBRA_FD'] = "/tmp/cobra/"
config.confs['COBRA_FD_LOG'] = "/tmp/cobra/log/"
config.confs['USE_NEW_EPOCH_TXN'] = False
def set_benchmark(config, bench):
config.confs['SKIP_LOADING'] = False
if bench == "chengRW":
config.confs['BENCH_TYPE'] = 0
config.confs['NUM_KEYS'] = 10000
config.confs['OP_PER_CHENGTXN'] = 8
config.confs['RATIO_READ'] = 50
config.confs['RATIO_UPDATE'] = 50
elif bench == "chengRM":
config.confs['BENCH_TYPE'] = 0
config.confs['NUM_KEYS'] = 10000
config.confs['OP_PER_CHENGTXN'] = 8
config.confs['RATIO_READ'] = 90
config.confs['RATIO_UPDATE'] = 10
elif bench == "tpcc":
config.confs['BENCH_TYPE'] = 1
#config.confs['SKIP_LOADING'] = True
elif bench == "rubis":
config.confs['BENCH_TYPE'] = 3
config.confs['RUBIS_USERS_NUM'] = 20000
elif bench == "twitter":
config.confs['BENCH_TYPE'] = 4
config.confs['TWITTER_USERS_NUM'] = 1000
else:
assert False, "no such workload: " + bench
def decide_experiments(bench_type):
ks = []
for i in range(200):
ks.append(i*1000)
exp1 = {
'tpcc' : [ks[10]],
'rubis' : [ks[10]],
'chengRM' : [ks[10]],
'twitter' : [ks[10]],
'chengRW' : [100, 200, 300, 400, 500, ks[1], ks[2], ks[4], ks[6], ks[8], ks[10], ks[12], ks[14], ks[16]]
}
exp2 = {
'tpcc' : [ks[100]],
'rubis' : [ks[100]],
'chengRM' : [ks[100]],
'twitter' : [ks[100]],
'chengRW' : [ks[100]]
}
if bench_type == "one-shot":
return exp1
elif bench_type == "scaling":
return exp2
assert False
def long_run(dst_path, exps):
assert len(exps) == 1
subprocess.call('mkdir -p ' + dst_path, shell=True)
for bench in exps:
for txn_num in exps[bench]:
# clear database, old traces
subprocess.call('rm -r /tmp/cobra/log; rm -r /tmp/rocksdb/', shell=True)
# re-construct folders
subprocess.call('mkdir -p /tmp/cobra/log; mkdir /tmp/rocksdb/', shell=True)
# set up different config
config = Config("../config.yaml.default")
set_default(config)
config.confs['MAX_FZ_TXN_NUM'] = 100 # 100*24=2.4k
config.confs['TXN_NUM'] = txn_num
# remote verifier
config.confs['LOCAL_REMOTE_LOG'] = True
config.confs['WAIT_BETWEEN_TXNS'] = 100
config.confs['THROUGHPUT_PER_WAIT'] = 200 # 2k throughput
config.confs['THREAD_NUM'] = 24
config.confs['VERIFIER_HOSTNAME'] = "13.59.213.34"
# config.confs['DEBUG_LIB_FLAG'] = True
set_benchmark(config, bench)
config.all_set = True # hacky way
# dump as config
config.dump_to()
# run the benchmarks
subprocess.call('java -ea -jar ../target/txnTest-1-jar-with-dependencies.jar local', shell=True)
# save the traces
subprocess.call('mv /tmp/cobra/log/ ' + dst_path + "/" + bench + "-" + str(txn_num), shell=True)
def gen_hist(dst_path, exps):
# a loop of all different configs
#size=[1000, 2000, 4000, 6000, 8000, 10000, 100000, 1000000]
#benchmark = ['tpcc', 'chengRW', 'chengRM', 'rubis', 'twitter']
subprocess.call('mkdir -p ' + dst_path, shell=True)
for bench in exps:
for txn_num in exps[bench]:
# clear database, old traces
subprocess.call('rm -r /tmp/cobra/log; rm -r /tmp/rocksdb/', shell=True)
# re-construct folders
subprocess.call('mkdir -p /tmp/cobra/log; mkdir /tmp/rocksdb/', shell=True)
# set up different config
config = Config("../config.yaml.default")
set_default(config)
config.confs['TXN_NUM'] = txn_num
set_benchmark(config, bench)
config.all_set = True # hacky way
# dump as config
config.dump_to()
# run the benchmarks
subprocess.call('java -ea -jar ../target/txnTest-1-jar-with-dependencies.jar local', shell=True)
# save the traces
subprocess.call('mv /tmp/cobra/log/ ' + dst_path + "/" + bench + "-" + str(txn_num), shell=True)
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: gen_histories.py [one-shot|scaling|longrun] <location>")
exit(1)
mode = sys.argv[1]
tpath = sys.argv[2]
if not os.listdir(tpath) :
print("Target %s is empty" % tpath)
else:
print("Target %s is not empty!" % tpath)
exit(1)
if mode == "longrun":
exps = {
#'twitter' : [100000008],
'chengRM' : [100000007], #10M
}
long_run(tpath, exps)
else:
exps = decide_experiments(mode)
print(exps)
gen_hist(tpath, exps)
| 32.450617 | 112 | 0.56439 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,816 | 0.345444 |
a6dc9452149f59b3c7ccae49e0070336f130ec2c | 16,339 | py | Python | src/backend/api/utils/rclone_connection.py | dtenenba/motuz | 1b54295d2790f756bcb2de61f667f60b7fd5c340 | [
"MIT"
] | null | null | null | src/backend/api/utils/rclone_connection.py | dtenenba/motuz | 1b54295d2790f756bcb2de61f667f60b7fd5c340 | [
"MIT"
] | null | null | null | src/backend/api/utils/rclone_connection.py | dtenenba/motuz | 1b54295d2790f756bcb2de61f667f60b7fd5c340 | [
"MIT"
] | null | null | null | from collections import defaultdict
import functools
import json
import logging
import re
import subprocess
import threading
import time
import os
from .abstract_connection import AbstractConnection, RcloneException
class RcloneConnection(AbstractConnection):
def __init__(self):
self._job_status = defaultdict(functools.partial(defaultdict, str)) # Mapping from id to status dict
self._job_text = defaultdict(str)
self._job_error_text = defaultdict(str)
self._job_percent = defaultdict(int)
self._job_exitstatus = {}
self._stop_events = {} # Mapping from id to threading.Event
self._latest_job_id = 0
def verify(self, data):
credentials = self._formatCredentials(data, name='current')
user = data.owner
bucket = getattr(data, 'bucket', None)
if bucket is None:
bucket = ''
command = [
'sudo',
'-E',
'-u', user,
'rclone',
'lsjson',
'current:{}'.format(bucket),
]
self._logCommand(command, credentials)
try:
result = self._execute(command, credentials)
return {
'result': True,
'message': 'Success',
}
except subprocess.CalledProcessError as e:
returncode = e.returncode
return {
'result': False,
'message': 'Exit status {}'.format(returncode),
}
def ls(self, data, path):
credentials = self._formatCredentials(data, name='current')
user = data.owner
command = [
'sudo',
'-E',
'-u', user,
'rclone',
'lsjson',
'current:{}'.format(path),
]
self._logCommand(command, credentials)
try:
result = self._execute(command, credentials)
files = json.loads(result)
return {
'files': files,
'path': path,
}
except subprocess.CalledProcessError as e:
raise RcloneException(sanitize(str(e)))
def mkdir(self, data, path):
credentials = self._formatCredentials(data, name='current')
user = data.owner
command = [
'sudo',
'-E',
'-u', user,
'rclone',
'touch',
'current:{}/.keep'.format(path),
]
self._logCommand(command, credentials)
try:
result = self._execute(command, credentials)
return {
'message': 'Success',
}
except subprocess.CalledProcessError as e:
raise RcloneException(sanitize(str(e)))
def copy(self,
src_data,
src_resource_path,
dst_data,
dst_resource_path,
user,
copy_links,
job_id=None
):
credentials = {}
if src_data is None: # Local
src = src_resource_path
else:
credentials.update(self._formatCredentials(src_data, name='src'))
src = 'src:{}'.format(src_resource_path)
if dst_data is None: # Local
dst = dst_resource_path
else:
credentials.update(self._formatCredentials(dst_data, name='dst'))
dst = 'dst:{}'.format(dst_resource_path)
if copy_links:
option_copy_links = '--copy-links'
else:
option_copy_links = ''
command = [
'sudo',
'-E',
'-u', user,
'rclone',
'copyto',
src,
dst,
option_copy_links,
'--progress',
'--stats', '2s',
]
command = [cmd for cmd in command if len(cmd) > 0]
self._logCommand(command, credentials)
if job_id is None:
job_id = self._get_next_job_id()
else:
if self._job_id_exists(job_id):
raise ValueError('rclone copy job with ID {} already exists'.fromat(job_id))
self._stop_events[job_id] = threading.Event()
try:
self._execute_interactive(command, credentials, job_id)
except subprocess.CalledProcessError as e:
raise RcloneException(sanitize(str(e)))
return job_id
def copy_text(self, job_id):
return self._job_text[job_id]
def copy_error_text(self, job_id):
return self._job_error_text[job_id]
def copy_percent(self, job_id):
return self._job_percent[job_id]
def copy_stop(self, job_id):
self._stop_events[job_id].set()
def copy_finished(self, job_id):
return self._stop_events[job_id].is_set()
def copy_exitstatus(self, job_id):
return self._job_exitstatus.get(job_id, -1)
def _logCommand(self, command, credentials):
bash_command = "{} {}".format(
' '.join("{}='{}'".format(key, value) for key, value in credentials.items()),
' '.join(command),
)
logging.info(sanitize(bash_command))
def _formatCredentials(self, data, name):
"""
Credentials are of the form
RCLONE_CONFIG_CURRENT_TYPE=s3
^ ^ ^ ^
[mandatory ][name ][key][value]
"""
prefix = "RCLONE_CONFIG_{}".format(name.upper())
credentials = {}
credentials['{}_TYPE'.format(prefix)] = data.type
def _addCredential(env_key, data_key, *, value_functor=None):
value = getattr(data, data_key, None)
if value is not None:
if value_functor is not None:
value = value_functor(value)
credentials[env_key] = value
if data.type == 's3':
_addCredential(
'{}_REGION'.format(prefix),
's3_region'
)
_addCredential(
'{}_ACCESS_KEY_ID'.format(prefix),
's3_access_key_id'
)
_addCredential(
'{}_SECRET_ACCESS_KEY'.format(prefix),
's3_secret_access_key'
)
_addCredential(
'{}_ENDPOINT'.format(prefix),
's3_endpoint'
)
_addCredential(
'{}_V2_AUTH'.format(prefix),
's3_v2_auth'
)
elif data.type == 'azureblob':
_addCredential(
'{}_ACCOUNT'.format(prefix),
'azure_account'
)
_addCredential(
'{}_KEY'.format(prefix),
'azure_key'
)
_addCredential(
'{}_SAS_URL'.format(prefix),
'azure_sas_url'
)
elif data.type == 'swift':
_addCredential(
'{}_USER'.format(prefix),
'swift_user'
)
_addCredential(
'{}_KEY'.format(prefix),
'swift_key'
)
_addCredential(
'{}_AUTH'.format(prefix),
'swift_auth'
)
_addCredential(
'{}_TENANT'.format(prefix),
'swift_tenant'
)
elif data.type == 'google cloud storage':
_addCredential(
'{}_CLIENT_ID'.format(prefix),
'gcp_client_id'
)
_addCredential(
'{}_SERVICE_ACCOUNT_CREDENTIALS'.format(prefix),
'gcp_service_account_credentials'
)
_addCredential(
'{}_PROJECT_NUMBER'.format(prefix),
'gcp_project_number'
)
_addCredential(
'{}_OBJECT_ACL'.format(prefix),
'gcp_object_acl'
)
_addCredential(
'{}_BUCKET_ACL'.format(prefix),
'gcp_bucket_acl'
)
elif data.type == 'sftp':
_addCredential(
'{}_HOST'.format(prefix),
'sftp_host',
)
_addCredential(
'{}_PORT'.format(prefix),
'sftp_port',
)
_addCredential(
'{}_USER'.format(prefix),
'sftp_user',
)
_addCredential(
'{}_PASS'.format(prefix),
'sftp_pass',
value_functor=self._obscure,
)
elif data.type == 'dropbox':
_addCredential(
'{}_TOKEN'.format(prefix),
'dropbox_token',
)
elif data.type == 'onedrive':
_addCredential(
'{}_TOKEN'.format(prefix),
'onedrive_token',
)
_addCredential(
'{}_DRIVE_ID'.format(prefix),
'onedrive_drive_id',
)
_addCredential(
'{}_DRIVE_TYPE'.format(prefix),
'onedrive_drive_type',
)
elif data.type == 'webdav':
_addCredential(
'{}_URL'.format(prefix),
'webdav_url',
)
_addCredential(
'{}_USER'.format(prefix),
'webdav_user',
)
_addCredential(
'{}_PASS'.format(prefix),
'webdav_pass',
value_functor=self._obscure,
)
else:
logging.error("Connection type unknown: {}".format(data.type))
return credentials
def _get_next_job_id(self):
self._latest_job_id += 1
while self._job_id_exists(self._latest_job_id):
self._latest_job_id += 1
return self._latest_job_id
def _job_id_exists(self, job_id):
return job_id in self._job_status
def _obscure(self, password):
"""
Calls `rclone obscure password` and returns the result
"""
return self._execute(["rclone", "obscure", password])
def _execute(self, command, env={}):
full_env = os.environ.copy()
full_env.update(env)
try:
byteOutput = subprocess.check_output(
command,
stderr=subprocess.PIPE,
env=full_env
)
output = byteOutput.decode('UTF-8').rstrip()
return output
except subprocess.CalledProcessError as err:
if (err.stderr is None):
raise
stderr = err.stderr.decode('UTF-8').strip()
if len(stderr) == 0:
raise
raise RcloneException(stderr)
def _execute_interactive(self, command, env, job_id):
thread = threading.Thread(target=self.__execute_interactive, kwargs={
'command': command,
'env': env,
'job_id': job_id,
})
thread.daemon = True
thread.start()
def __execute_interactive(self, command, env={}, job_id=0):
stop_event = self._stop_events[job_id]
full_env = os.environ.copy()
full_env.update(env)
process = subprocess.Popen(
command,
env=full_env,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
reset_sequence1 = '\x1b[2K\x1b[0' # + 'G'
reset_sequence2 = '\x1b[2K\x1b[A\x1b[2K\x1b[A\x1b[2K\x1b[A\x1b[2K\x1b[A\x1b[2K\x1b[A\x1b[2K\x1b[A\x1b[2K\x1b[0' # + 'G'
while not stop_event.is_set():
line = process.stdout.readline().decode('utf-8')
if len(line) == 0:
if process.poll() is not None:
stop_event.set()
else:
time.sleep(0.5)
continue
line = line.strip()
q1 = line.find(reset_sequence1)
if q1 != -1:
line = line[q1 + len(reset_sequence1):]
q2 = line.find(reset_sequence2)
if q2 != -1:
line = line[q2 + len(reset_sequence1):]
line = line.replace(reset_sequence1, '')
line = line.replace(reset_sequence2, '')
match = re.search(r'(ERROR.*)', line)
if match is not None:
error = match.groups()[0]
logging.error(error)
self._job_error_text[job_id] += error
self._job_error_text[job_id] += '\n'
continue
match = re.search(r'([A-Za-z ]+):\s*(.*)', line)
if match is None:
logging.info("No match in {}".format(line))
time.sleep(0.5)
continue
key, value = match.groups()
self._job_status[job_id][key] = value
self.__process_status(job_id)
self._job_percent[job_id] = 100
self.__process_status(job_id)
exitstatus = process.poll()
self._job_exitstatus[job_id] = exitstatus
for _ in range(1000):
line = process.stderr.readline().decode('utf-8')
if len(line) == 0:
break
line = line.strip()
self._job_error_text[job_id] += line
self._job_error_text[job_id] += '\n'
logging.info("Copy process exited with exit status {}".format(exitstatus))
stop_event.set() # Just in case
def __process_status(self, job_id):
self.__process_text(job_id)
self.__process_percent(job_id)
def __process_text(self, job_id):
headers = [
'GTransferred',
'Errors',
'Checks',
'Transferred',
'Elapsed time',
'Transferring',
]
status = self._job_status[job_id]
text = '\n'.join(
'{:>12}: {}'.format(header, status[header])
for header in headers
)
self._job_text[job_id] = text
def __process_percent(self, job_id):
status = self._job_status[job_id]
match = re.search(r'(\d+)\%', status['GTransferred'])
if match is not None:
self._job_percent[job_id] = match[1]
return
match = re.search(r'(\d+)\%', status['Transferred'])
if match is not None:
self._job_percent[job_id] = match[1]
return
self._job_percent[job_id] = -1
def sanitize(string):
sanitizations_regs = [
# s3
(r"(RCLONE_CONFIG_\S*_ACCESS_KEY_ID=')(\S*)(\S\S\S\S')", r"\1***\3"),
(r"(RCLONE_CONFIG_\S*_SECRET_ACCESS_KEY=')(\S*)(')", r"\1***\3"),
# Azure
(r"(RCLONE_CONFIG_\S*_KEY=')(\S*)(')", r"\1***\3"),
(r"(RCLONE_CONFIG_\S*_SAS_URL=')(\S*)(')", r"\1***\3"),
# Swift
(r"(RCLONE_CONFIG_\S*_KEY=')(\S*)(')", r"\1***\3"),
# GCP
(r"(RCLONE_CONFIG_\S*_CLIENT_ID=')(\S*)(\S\S\S\S')", r"\1***\3"),
(r"(RCLONE_CONFIG_\S*_SERVICE_ACCOUNT_CREDENTIALS=')([^']*)(')", r"\1{***}\3"),
# SFTP / WebDAV
(r"(RCLONE_CONFIG_\S*_PASS=')([^']*)(')", r"\1{***}\3"),
# Dropbox / Onedrive
(r"(RCLONE_CONFIG_\S*_TOKEN=')([^']*)(')", r"\1{***}\3"),
]
for regex, replace in sanitizations_regs:
string = re.sub(regex, replace, string)
return string
def main():
import time
import os
class CloudConnection:
pass
data = CloudConnection()
data.__dict__ = {
'type': 's3',
'region': os.environ['MOTUZ_REGION'],
'access_key_id': os.environ['MOTUZ_ACCESS_KEY_ID'],
'secret_access_key': os.environ['MOTUZ_SECRET_ACCESS_KEY'],
}
connection = RcloneConnection()
# result = connection.ls('/fh-ctr-mofuz-test/hello/world')
job_id = 123
import random
connection.copy(
src_data=None, # Local
src_resource_path='/tmp/motuz/mb_blob.bin',
dst_data=data,
dst_resource_path='/fh-ctr-mofuz-test/hello/world/{}'.format(random.randint(10, 10000)),
job_id=job_id
)
while not connection.copy_finished(job_id):
print(connection.copy_percent(job_id))
time.sleep(0.1)
if __name__ == '__main__':
main()
| 27.787415 | 127 | 0.505723 | 14,347 | 0.878083 | 0 | 0 | 0 | 0 | 0 | 0 | 2,899 | 0.177428 |
a6df1b58a3503a041b065c001a210524d030e7a9 | 493 | py | Python | src/apps/account/test_utils.py | plitzenberger/graphene-auth-examples | 93694f10977feb35f73ffe1f84dea631fd6d17dc | [
"MIT"
] | 71 | 2017-06-09T13:02:15.000Z | 2021-06-15T20:00:38.000Z | src/apps/account/test_utils.py | plitzenberger/graphene-auth-examples | 93694f10977feb35f73ffe1f84dea631fd6d17dc | [
"MIT"
] | 13 | 2017-07-11T16:08:40.000Z | 2019-07-01T04:33:17.000Z | src/apps/account/test_utils.py | plitzenberger/graphene-auth-examples | 93694f10977feb35f73ffe1f84dea631fd6d17dc | [
"MIT"
] | 14 | 2017-05-18T16:27:30.000Z | 2019-09-20T12:57:17.000Z | import pytest
from django.core import mail
from test_fixtures.users import user
from .utils import send_activation_email, send_password_reset_email
@pytest.mark.django_db
def test_send_activtion_email(user, rf):
request = rf.request()
send_activation_email(user, request)
assert len(mail.outbox) == 1
@pytest.mark.django_db
def test_send_password_reset_email(user, rf):
request = rf.request()
send_password_reset_email(user, request)
assert len(mail.outbox) == 1
| 23.47619 | 67 | 0.770791 | 0 | 0 | 0 | 0 | 337 | 0.68357 | 0 | 0 | 0 | 0 |
a6e0bdf1eeef5e0d534429ccf40177b615819121 | 1,749 | py | Python | mmfashion/apis/test_fashion_recommender.py | RyanJiang0416/mmfashion | 89f56e3e631b4f5c1403f7e8897396cc02b5aa91 | [
"Apache-2.0"
] | 952 | 2019-10-31T01:49:07.000Z | 2022-03-29T11:33:27.000Z | mmfashion/apis/test_fashion_recommender.py | RyanJiang0416/mmfashion | 89f56e3e631b4f5c1403f7e8897396cc02b5aa91 | [
"Apache-2.0"
] | 135 | 2019-11-02T07:09:04.000Z | 2022-03-17T06:08:11.000Z | mmfashion/apis/test_fashion_recommender.py | RyanJiang0416/mmfashion | 89f56e3e631b4f5c1403f7e8897396cc02b5aa91 | [
"Apache-2.0"
] | 239 | 2019-10-31T02:08:40.000Z | 2022-03-22T03:14:38.000Z | from __future__ import division
import torch
from mmcv.parallel import MMDataParallel
from ..datasets import build_dataloader
from .env import get_root_logger
def test_fashion_recommender(model,
dataset,
cfg,
distributed=False,
validate=False,
logger=None):
if logger is None:
logger = get_root_logger(cfg.log_level)
# start testing predictor
if distributed: # to do
_dist_test(model, dataset, cfg, validate=validate)
else:
_non_dist_test(model, dataset, cfg, validate=validate)
def _process_embeds(dataset, model, cfg):
data_loader = build_dataloader(
dataset,
cfg.data.imgs_per_gpu,
cfg.data.workers_per_gpu,
len(cfg.gpus.test),
dist=False,
shuffle=False)
print('dataloader built')
embeds = []
with torch.no_grad():
for data in data_loader:
embed = model(data['img'], return_loss=False)
embeds.append(embed.data.cpu())
embeds = torch.cat(embeds)
return embeds
def _non_dist_test(model, dataset, cfg, validate=False):
model = MMDataParallel(model, device_ids=cfg.gpus.test).cuda()
model.eval()
embeds = _process_embeds(dataset, model, cfg)
metric = model.module.triplet_net.metric_branch
# compatibility auc
auc = dataset.test_compatibility(embeds, metric)
# fill-in-blank accuracy
acc = dataset.test_fitb(embeds, metric)
print('Compat AUC: {:.2f} FITB: {:.1f}\n'.format(
round(auc, 2), round(acc * 100, 1)))
def _dist_test(model, dataset, cfg, validate=False):
raise NotImplementedError
| 26.5 | 66 | 0.62207 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 133 | 0.076043 |
a6e0d961ca054cd2c16be2efbdd6fc8a2f205473 | 4,834 | py | Python | pyvx/build_cbackend.py | hakanardo/pyvx | 683eeabfa1932b9e8038848356790ea822bb0007 | [
"MIT"
] | 7 | 2015-03-07T18:58:48.000Z | 2020-12-02T15:47:42.000Z | pyvx/build_cbackend.py | hakanardo/pyvx | 683eeabfa1932b9e8038848356790ea822bb0007 | [
"MIT"
] | 3 | 2016-12-02T19:04:33.000Z | 2020-12-09T05:09:06.000Z | pyvx/build_cbackend.py | hakanardo/pyvx | 683eeabfa1932b9e8038848356790ea822bb0007 | [
"MIT"
] | 3 | 2016-08-15T03:16:04.000Z | 2020-01-17T06:42:52.000Z | import os
import re
import sys
from cffi import FFI
from pyvx import __backend_version__
mydir = os.path.dirname(os.path.abspath(__file__))
def build(name, openvx_install, default):
pwd = os.getcwd()
os.chdir(os.path.dirname(mydir))
assert name != 'default'
hdr = os.path.join(openvx_install, 'include', 'VX', 'vx.h')
if not os.path.exists(hdr):
print("ERROR: Can't find header", hdr)
exit(-1)
lib = os.path.join(openvx_install, 'bin', 'libopenvx.so')
if not os.path.exists(lib):
print("ERROR: Can't find lib", lib)
exit(-1)
defs= dict(VX_API_ENTRY='', VX_API_CALL='', VX_CALLBACK='', VX_MAX_KERNEL_NAME='256')
if os.name == 'nt':
defs['VX_API_CALL'] = '__stdcall'
defs['VX_CALLBACK'] = '__stdcall'
ffi = FFI()
# vx.h
vx = open(os.path.join(mydir, "cdefs", "vx.h")).read()
vx = re.subn(r'(#define\s+[^\s]+)\s.*', r'\1 ...', vx)[0] # Remove specifics from #defines
ffi.cdef(vx)
# vx_vendors.h
ffi.cdef(open(os.path.join(mydir, "cdefs", "vx_vendors.h")).read())
# vx_types.h
types = open(os.path.join(mydir, "cdefs", "vx_types.h")).read()
for k,v in defs.items():
types = types.replace(k, v)
types = re.subn(r'(#define\s+[^\s]+)\s.*', r'\1 ...', types)[0] # Remove specifics from #defines
types = re.subn(r'(/\*.*?\*/)', r'', types)[0] # Remove some one line comments
types = re.subn(r'=.*,', r'= ...,', types)[0] # Remove specifics from enums
types = re.subn(r'\[\s*[^\s]+?.*?\]', r'[...]', types)[0] # Remove specific array sizes
ffi.cdef(types)
ffi.cdef('''
char *_get_FMT_REF(void);
char *_get_FMT_SIZE(void);
int _get_KERNEL_BASE(int vendor, int lib);
char *_get_backend_version();
char *_get_backend_name();
char *_get_backend_install_path();
''')
# vx_kernels.h
kernels = open(os.path.join(mydir, "cdefs", "vx_kernels.h")).read()
kernels = re.subn(r'=.*,', r'= ...,', kernels)[0] # Remove specifics from enums
ffi.cdef(kernels)
# vx_api.h
api = open(os.path.join(mydir, "cdefs", "vx_api.h")).read()
for k, v in defs.items():
api = api.replace(k, v)
ffi.cdef(api)
# vx_nodes.h
nodes = open(os.path.join(mydir, "cdefs", "vx_nodes.h")).read()
for k, v in defs.items():
nodes = nodes.replace(k, v)
ffi.cdef(nodes)
# vxu.h
vxu = open(os.path.join(mydir, "cdefs", "vxu.h")).read()
for k, v in defs.items():
vxu = vxu.replace(k, v)
ffi.cdef(vxu)
ffi.set_source("pyvx.backend.%s" % name, """
#include <VX/vx.h>
#include <VX/vxu.h>
char *_get_FMT_REF(void) {return VX_FMT_REF;}
char *_get_FMT_SIZE(void) {return VX_FMT_SIZE;}
int _get_KERNEL_BASE(int vendor, int lib) {return VX_KERNEL_BASE(vendor, lib);}
char *_get_backend_version() {return "%s";}
char *_get_backend_name() {return "%s";}
char *_get_backend_install_path() {return "%s";}
""" % (__backend_version__.decode("utf8"), name, openvx_install),
include_dirs=[os.path.join(openvx_install, 'include')],
library_dirs=[os.path.join(openvx_install, 'bin')],
extra_link_args=['-Wl,-rpath=' + os.path.abspath(os.path.join(openvx_install, 'bin'))],
libraries=['openvx', 'vxu'])
ffi.compile()
default_file_name = os.path.join('pyvx', 'backend', '_default.py')
if default or not os.path.exists(default_file_name):
fd = open(default_file_name, 'w')
fd.write("from pyvx.backend.%s import ffi, lib\n" % name)
fd.close()
import pyvx.backend as backend
assert backend.ffi.string(backend.lib._get_backend_version()) == __backend_version__
assert backend.ffi.string(backend.lib._get_backend_name()).decode("utf8") == name
assert backend.ffi.string(backend.lib._get_backend_install_path()).decode("utf8") == openvx_install
names = {}
exec("import pyvx.backend.%s as backend" % name, names)
backend = names['backend']
assert backend.ffi.string(backend.lib._get_backend_version()) == __backend_version__
assert backend.ffi.string(backend.lib._get_backend_name()).decode("utf8") == name
assert backend.ffi.string(backend.lib._get_backend_install_path()).decode("utf8") == openvx_install
print('')
print("Succesfully built backend pyvx.backend.%s in %s" % (name, mydir))
print('')
if __name__ == '__main__':
args = sys.argv[1:]
default = '--default' in args
if default:
args.remove('--default')
if len(args) == 2:
name, openvx_install = args
build(name, openvx_install, default)
else:
print("Usage: %s [--default] <name> <openvx install path>" % sys.argv[0])
| 36.074627 | 107 | 0.604055 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,713 | 0.354365 |
a6e312bee0b6e88f70b4dd7852b96dd49c779b25 | 1,774 | py | Python | t2s/trackinfo.py | frenchytheasian/tracklist-to-spotify | d227c7b2d68ac130a35a218492cd307727906098 | [
"MIT"
] | null | null | null | t2s/trackinfo.py | frenchytheasian/tracklist-to-spotify | d227c7b2d68ac130a35a218492cd307727906098 | [
"MIT"
] | null | null | null | t2s/trackinfo.py | frenchytheasian/tracklist-to-spotify | d227c7b2d68ac130a35a218492cd307727906098 | [
"MIT"
] | null | null | null | import requests
from bs4 import BeautifulSoup
class TrackInfo:
"""
An object containing track information and operations necessary for
scraping the info off of 1001tracklists.com
"""
def __init__(self, url):
self.url = url
self.tracklist_id = self.url.split('tracklist/')[1].split('/')[0] # Get id from url
self.tracks = []
self.track_names = []
self.artist_names = []
self.spotify_links = []
self._soup = self._get_soup()
self._track_soup = self._soup.find_all("div", class_="fontL")
self.fill_info()
def _get_soup(self):
"""Get HTML soup of current webpage"""
headers = {'User-Agent': 'Mozilla/5.0'}
page = requests.get(self.url, headers=headers)
soup = BeautifulSoup(page.content, "html.parser")
return soup
def get_tracklist_title(self):
"""Scrapes the webpage for the tracklist title"""
title = self._soup.find("h1", id="pageTitle")
return(title.text)
def fill_info(self):
"""Fill class arrays with all links, artist, and track on the page"""
print(f"Generating data for{self.get_tracklist_title()}")
for tracks in self._track_soup:
track = tracks.find("meta", itemprop="name")['content']
self.tracks.append(track)
split = track.split(' - ')
track_name, artist_name = split[1], split[0]
self.track_names.append(track_name)
self.artist_names.append(artist_name)
def main():
track = TrackInfo("https://www.1001tracklists.com/tracklist/9l2wdv1/two-friends-big-bootie-mix-018-2020-10-26.html")
for song in track.tracks:
print(song)
if __name__ == "__main__":
main() | 30.586207 | 120 | 0.616122 | 1,497 | 0.843856 | 0 | 0 | 0 | 0 | 0 | 0 | 567 | 0.319617 |
a6e381a0ddef00ffd917ab6e0453f03462c4d295 | 47,823 | py | Python | gbmodel/model_sqlalchemy.py | BartMassey/capstone360 | 4079ce9963e30e38bb1ce5d35fde438af20cd7bd | [
"MIT"
] | null | null | null | gbmodel/model_sqlalchemy.py | BartMassey/capstone360 | 4079ce9963e30e38bb1ce5d35fde438af20cd7bd | [
"MIT"
] | 89 | 2019-04-04T06:05:51.000Z | 2019-12-03T06:51:22.000Z | gbmodel/model_sqlalchemy.py | BartMassey/capstone360 | 4079ce9963e30e38bb1ce5d35fde438af20cd7bd | [
"MIT"
] | 1 | 2019-10-22T22:00:55.000Z | 2019-10-22T22:00:55.000Z | import os
import sys
import datetime
import logging
import traceback
from extensions import db
from sqlalchemy import exc, func
sys.path.append(os.getcwd())
def handle_exception():
# Get exception information
exception_details = sys.exc_info()
# Rollback the db (so the session doesn't crash)
db.session.rollback()
# Log the error message
error = "Gbmodel - {}: {}".format(exception_details[0].__name__, exception_details[1])
logging.error(error)
traceback.print_tb(exception_details[2])
class professors(db.Model):
"""
Class for the professors table
Table column data imported automatically
"""
__table__ = db.Model.metadata.tables['professors']
def get_professor(self, id):
"""
Get a professor with the given id
Input: professor id
Output: the professor object associated with the given id
"""
try:
result = professors.query.filter(professors.id == id).first()
except exc.SQLAlchemyError:
handle_exception()
result = None
if result is None:
return False
return result
def get_all_professors(self):
"""
Get a list of all professors in the database (by id)
Input: none
Output: a list of professors
"""
try:
profs = professors().query.all()
lists = []
for i in profs:
temp = i
lists.append(temp)
except exc.SQLAlchemyError:
handle_exception()
profs = None
if profs is None:
return False
return lists
def check_professor(self, prof_id):
"""
Checks if professor ID exists in the DB
Input: professor ID given
Output: True if it exists, False otherwise
"""
try:
prof_id = prof_id.strip().lower()
result = professors().query.filter_by(id=prof_id).first()
except exc.SQLAlchemyError:
handle_exception()
result = None
if result is not None:
return True
return False
def prof_id(self, name):
"""
Gets the id of the professor with the given name, if he is found. Returns -1 otherwise
Input: professor name
Output: return professor's id
"""
try:
prof = professors.query.filter_by(name=name).first()
except exc.SQLAlchemyError:
handle_exception()
prof = None
if prof is None:
return -1
return prof.id
class teams(db.Model):
__table__ = db.Model.metadata.tables['teams']
def get_max_team_id(self):
"""
Calculate the next id for a newly added team
if the table is empty, returns 1
Otherwise, return the max id+1
"""
try:
max_id = db.session.query(func.max(teams.id)).scalar()
except exc.SQLAlchemyError:
handle_exception()
max_id = None
if max_id is None:
return 1
else:
return max_id + 1
def check_dup_team(self, t_name, session_id):
"""
Check if the new team name already existed in the given session
Input: name of the new team and session id of the selected session
Output: return False if the team already exists, True otherwise
"""
try:
result = teams().query.filter_by(name=t_name,
session_id=session_id).first()
except exc.SQLAlchemyError:
handle_exception()
result = None
if result is not None:
return False
return True
def insert_team(self, session_id, t_name):
"""
Insert a team to database
Input: self, session id and name of the new team
"""
id = self.get_max_team_id()
new_team = teams(id=id, session_id=session_id, name=t_name)
db.session.add(new_team)
db.session.commit()
return id
def get_team_session_id(self, session_id):
"""
Get a list of all of the teams in a session
Input: session id of the selected session
Output: list of teams and their info from the selected session
"""
try:
if str(session_id) == '0':
team = teams.query.filter_by(session_id=session_id).all()
return team
elif session_id:
team = teams.query.filter_by(session_id=session_id).all()
return team
else:
return None
except exc.SQLAlchemyError:
handle_exception()
return None
def remove_team_from_session(self, name, session_id):
"""
Remove a team and all the students from that team
Input: name of the team and session id
Output: True if the operation completed successfully. False if something went wrong
"""
try:
student = students()
removed_student = removed_students()
result = teams.query.filter(teams.name == name,
teams.session_id == session_id).first()
# get students to delete
tid = result.id
list_students = student.get_students(tid)
if list_students is not None:
for i in list_students:
result = students.query.filter(students.name == i,
students.session_id == session_id).first()
removed_student.add_student(result)
student_list = students.query.filter(students.tid == tid,
students.session_id == session_id).all()
# remove reports
reviews = reports.query.filter(reports.tid == tid).all()
for review in reviews:
db.session.delete(review)
# remove students
for i in student_list:
db.session.delete(i)
db.session.commit()
team = teams.query.filter(teams.id == tid, teams.session_id == session_id).first()
db.session.delete(team)
db.session.commit()
return True
except exc.SQLAlchemyError:
handle_exception()
return False
def remove_team(self, name, session_id):
"""
Remove a team and all the students from that team
Input: name of the team and session id
Output: delete a team
move all student in the team to unassigned student
"""
try:
# Get the team slated for removal
teams_obj = teams()
team = teams_obj.query.filter(teams.name == name,
teams.session_id == session_id).first()
# Get the students on the team
student_list = students.query.filter(students.tid == team.id,
students.session_id == session_id).all()
# If we are trying to remove a team with students on it...
if student_list:
# Jump ship if the team is the empty team. We don't delete the empty team if there are
# students in it
if name == "":
return False
# Otherwise, move all the students on the team to the empty team
empty_team_id = teams_obj.get_tid_from_name("", session_id)
if empty_team_id is None:
empty_team_id = teams_obj.insert_team(session_id, "")
for student in student_list:
student.midterm_done = False
student.final_done = False
student.tid = empty_team_id
# Remove all of the review submitted with team id
reviews = reports.query.filter(reports.tid == team.id).all()
for review in reviews:
db.session.delete(review)
# Now, remove the team
db.session.delete(team)
# Commit db changes
db.session.commit()
# Indicate operation successful
return True
except exc.SQLAlchemyError:
# Log exception, and rollback db changes
handle_exception()
return False
def dashboard(self, session_id):
"""
Return a lists of sessions from the database
and a list of teams + students from a selected session
Input: session id of the selected session
"""
student = students()
session = capstone_session()
today = datetime.datetime.now()
sessions = session.get_sessions()
if self.get_team_session_id(session_id) is None:
return None, sessions
tids = [row.id for row in self.get_team_session_id(session_id)]
team_names = [row.name for row in self.get_team_session_id(session_id)]
lists = [[] for _ in range(len(tids))]
flag = 0
for i in range(len(tids)):
# Get min and max
try:
# Query to get the min & max student points of their final
final_points = db.session.query(
func.max(reports.points).label("max_points"),
func.min(reports.points).label("min_points"),
reports.reviewee).filter_by(tid=tids[i], is_final=True).filter(
reports.reviewee != reports.reviewer).group_by(reports.reviewee)
# Query to get the min & max student points of their midterm
midterm_points = db.session.query(
func.max(reports.points).label("max_points"),
func.min(reports.points).label("min_points"),
reports.reviewee).filter_by(tid=tids[i], is_final=False).filter(
reports.reviewee != reports.reviewer).group_by(reports.reviewee)
# Query to get the students in the students table
team_members = student.query.filter_by(tid=tids[i], session_id=session_id)
except exc.SQLAlchemyError:
handle_exception()
return 'Error'
temp = [team_names[i]]
for team_member in team_members:
# Checks whether the review is within the midterm dates
if session.check_review_state(session_id, today) == "midterm":
for m in midterm_points:
if (team_member.id == m.reviewee): # If the student's ID matches the review ID
params = {"name": team_member.name,
"id": team_member.id,
"active": "Midterm: ",
"min_points": m.min_points,
"max_points": m.max_points,
"lead": int(team_member.is_lead)}
temp.append(params)
flag = 1
# Checks whether the review is within the final dates
elif session.check_review_state(session_id, today) == "final":
for f in final_points:
if (team_member.id == f.reviewee): # If the student's ID matches the review ID
params = {"name": team_member.name,
"id": team_member.id,
"active": "Final: ",
"min_points": f.min_points,
"max_points": f.max_points,
"lead": int(team_member.is_lead)}
temp.append(params)
flag = 1
if flag == 0:
params = {"name": team_member.name,
"id": team_member.id,
"active": "",
"min_points": "",
"max_points": "",
"lead": int(team_member.is_lead)}
temp.append(params)
flag = 0
lists[i] = temp
return lists, sessions
def get_team_from_id(self, team_id):
"""
Get the team object associated with the given id
Input: team_id
Output: a team object, if found. None otherwise
"""
try:
result = teams.query.filter(teams.id == team_id).first()
except exc.SQLAlchemyError:
handle_exception()
return None
return result
# Return a tid.
def get_tid_from_name(self, team_name, ses_id):
"""
Get the team with the given name in the session identified by the given session id
Input: self, team_name, session_id
Output: the team, if we found it
"""
try:
result = teams.query.filter(teams.name == team_name,
teams.session_id == ses_id).first()
except exc.SQLAlchemyError:
handle_exception()
return None
if result is not None:
return result.id
else:
return None
class students(db.Model):
__table__ = db.Model.metadata.tables['students']
def check_dup_student(self, id, session_id):
"""
Check if a student already exits in a session
Input: id of the student and selected session id
Output: return False if the student was already in
return True otherwise
"""
try:
result = students.query.filter_by(id=id, session_id=session_id).first()
except exc.SQLAlchemyError:
handle_exception()
result = None
if result is not None:
return False
return True
def insert_student(self, name, email_address, id, session_id, t_name):
"""
Add new student
Input: student name, student email address, student id, team name and id of the selected session
Output: return False if student id already exists in the current session
add student to the database and return True otherwise
"""
try:
result = teams.query.filter(teams.name == t_name, teams.session_id == session_id).first()
tid = result.id
new_student = students(id=id,
tid=tid,
session_id=session_id,
name=name,
email_address=email_address,
is_lead=False,
midterm_done=False,
final_done=False,
active="open")
db.session.add(new_student)
db.session.commit()
except exc.SQLAlchemyError:
handle_exception()
return False
return True
def get_students(self, tid):
"""
Get a list of the names of all students from a given team
Input: team id, session id
Output: list of student names, if everything succeeds. None otherwise
"""
try:
result = [r.name for r in students.query.filter_by(tid=tid)]
except exc.SQLAlchemyError:
handle_exception()
return None
return result
def get_team_members(self, tid):
"""
Get all members of a team
Input: team id as tid
Output: A list of student objects representing the students on that team
"""
try:
mems = students.query.filter_by(tid=tid).distinct().all()
except exc.SQLAlchemyError:
handle_exception()
return None
return mems
def get_students_in_session(self, session_id):
"""
Gets a list of students in the given session, ordered by team (in ascending order)
Input: session_id
Output: the list of students
"""
# https://stackoverflow.com/questions/4186062/sqlalchemy-order-by-descending
# https://docs.sqlalchemy.org/en/13/orm/query.html
try:
results = students.query.filter(
students.session_id == session_id).order_by(students.tid.asc()).all()
except exc.SQLAlchemyError:
handle_exception()
return None
return results
def get_user_sessions(self, student_id):
"""
Returns all capstone sessions that a user belongs to
Input: student_id: The database id of the student to retrieve capstone session ids for
output: an array of objects representing the rows for each capstone the student belongs to
"""
try:
results = [] # to store objects
# get all matching records
student_records = students.query.filter_by(id=student_id).all()
if student_records is not None:
# for each record, add the capstone the id points to
for rec in student_records:
cap = capstone_session().get_sess_by_id(rec.session_id)
if cap is not None:
results.append(cap)
return results
except exc.SQLAlchemyError:
handle_exception()
return None
def get_student_in_session(self, sid, session_id):
"""
Get a student from the students table
Input: student id, session id
Output: the student that we found, or none if nothing was found
"""
try:
result = students.query.filter(students.id == sid, students.session_id == session_id).first()
except exc.SQLAlchemyError:
handle_exception()
return None
return result
def remove_student(self, sts, t_name, session_id):
"""
Remove a list of selected students
Input: list of students, team name and session id
Output: return False of the list of student is empty or if something went wrong
otherwise, remove student from the team
"""
try:
if t_name is None or sts is None:
return False
removed_student = removed_students()
team = teams.query.filter(teams.name == t_name,
teams.session_id == session_id).first()
for i in sts:
student = students.query.filter(students.name == i,
students.tid == team.id,
students.session_id == session_id).first()
removed_student.add_student(student)
st = students.query.filter(students.id == student.id,
students.session_id == session_id).first()
db.session.delete(st)
db.session.commit()
except exc.SQLAlchemyError:
handle_exception()
return False
return True
def validate(self, id):
"""
validate cas username with student id in the database
Input: student id
Output: object of found student
"""
try:
result = students.query.filter_by(id=id).first()
except exc.SQLAlchemyError:
handle_exception()
result = None
if result is None:
return False
else:
return result
# Get the single student matching the id passed in
# input: student id of the student to retrieve
# output: the student's capstone session id value
def get_student(self, s_id):
try:
return students.query.filter_by(id=s_id).first()
except exc.SQLAlchemyError:
handle_exception()
return None
def update_team(self, name, s_id, t_id):
try:
students.query.filter_by(name=name,
session_id=s_id).\
update(dict(tid=t_id))
db.session.commit()
return True
except exc.SQLAlchemyError:
handle_exception()
return False
def check_team_lead(self, s_id, sess_id):
"""
Check if the student passed in by id is the team lead
Input: student id of the student to check
Output: True if the student is a team lead, False otherwise
"""
try:
student = students.query.filter(students.id == s_id, students.session_id == sess_id).first()
if student.is_lead == 1:
return True
else:
return False
except exc.SQLAlchemyError:
handle_exception()
return False
def get_unassigned_students(self, s_id):
"""
Get students from a session that do not have a team.
Input: session id to grab students
Output: Students who have no team.
"""
try:
empty_team = teams.query.filter_by(name="", session_id=s_id).first()
if empty_team:
return students.query.filter_by(session_id=s_id, tid=empty_team.id).all()
else:
return None
# https://stackoverflow.com/questions/6470428/catch-multiple-exceptions-in-one-line-except-block
except (exc.SQLAlchemyError, AttributeError):
handle_exception()
return None
def edit_student(self, id, new_name, new_email):
"""
Allows students to edit their name and email address
Input: student's new email and name and current user id
Output: apply new name and email to students in student table
"""
try:
# Find the student
student = students.query.filter(students.id == id).all()
if student is None:
return False
# Change name and/or email, if either of them are non-blank
for i in student:
if new_name != '':
i.name = new_name
if new_email != '':
i.email_address = new_email
db.session.commit()
return True
except exc.SQLAlchemyError:
handle_exception()
return False
def set_lead(self, session_id, team_name, lead):
"""
Professor can set a lead for each team
Input: self, chosen session id, team name and lead name
Output: set True to team lead and False to the rest of students in the team
"""
# Sanity check inputs
if team_name is None or lead is None:
return False
# Set team lead status
try:
# Find the team
team = teams.query.filter(teams.session_id == session_id, teams.name == team_name).first()
if team is None:
return False
# Get list of students in the given team
student = students.query.filter(students.tid == team.id).all()
for i in student:
if i.name == lead:
i.is_lead = True
else:
i.is_lead = False
db.session.commit()
return True
except exc.SQLAlchemyError:
handle_exception()
return False
def set_active(self, session_id, option):
"""
Sets the active attribute in student
For a student to be able to access their reviews, "open" must be set
Inputs: The capstone session id of the class to set as active or not. Option as 'open' or 'close'.
"Open" to allow students to submit/edit reviews, "close" to not allow review submission.
Outputs: True to indicate success, False to indicate an error.
"""
try:
student = students.query.filter(students.session_id == session_id).all()
# check option, set accordingly
if option == "open":
for i in student:
i.active = 'open'
db.session.commit()
elif option == "close":
for i in student:
i.active = 'close'
db.session.commit()
else:
# mismatch, return false
return False
# success, so return true
return True
except exc.SQLAlchemyError:
handle_exception()
return False
class capstone_session(db.Model):
__table__ = db.Model.metadata.tables['capstone_session']
def get_max(self):
"""
Calculate the next id for a newly added session
if the table is empty, returns 1
Otherwise, return the max id+1
"""
try:
max_id = db.session.query(func.max(capstone_session.id)).scalar()
except exc.SQLAlchemyError:
handle_exception()
max_id = None
if max_id is None:
return 1
else:
return max_id + 1
def insert_session(self, term, year, professor_id):
"""
Add a current session (only if it wasn't in the database)
Input: starting term and year of the session
Output: return id of the added session
"""
term = term.strip().lower()
year = year.strip().lower()
e_term = None
e_year = 0
terms = ["fall", "winter", "spring", "summer"]
for i in range(len(terms)):
if terms[i] == term:
e_term = terms[(i+1) % 4]
e_term = e_term.capitalize()
if term == 'fall':
e_year = int(year)+1
else:
e_year = year
id = self.get_max()
term = term.capitalize()
year = year.capitalize()
prof_id = professor_id.lower()
new_sess = capstone_session(id=id,
start_term=term,
start_year=year,
end_term=e_term,
end_year=e_year,
professor_id=prof_id)
db.session.add(new_sess)
db.session.commit()
return id
def remove_session(self, session_id):
"""
Removes an entire session with all the teams and students
Input: session id
"""
try:
team = teams()
session_teams = team.query.filter_by(session_id=session_id).all()
del_session = capstone_session.query.filter(capstone_session.id == session_id).first()
for t in session_teams:
team_name = t.name
team.remove_team_from_session(team_name, session_id)
db.session.delete(del_session)
db.session.commit()
return True
except exc.SQLAlchemyError:
handle_exception()
return None
def get_sess_by_id(self, id):
"""
Get the capstone session object associated with the given id
inputs: id of capstone session to retrieve
outputs: capstone session object if found, none otherwise
"""
try:
# query for session and return
return capstone_session.query.filter_by(id=id).first()
except exc.SQLAlchemyError:
handle_exception()
return None
def check_term_name(self, s_term):
"""
Checks if the name of the term is valid
Input: start term of new session
Output: return True if valid, False otherwise
"""
s_term = s_term.strip().lower()
terms = ["fall", "winter", "spring", "summer"]
for i in range(len(terms)):
if terms[i] == s_term:
return True
return False
def check_term_year(self, s_year):
"""
Checks if the year of the term is valid
Input: start year of new session
Output: return False if invalid, True otherwise
"""
check_year = s_year.isdigit()
if not check_year:
return False
return True
def check_session_id_valid(self, v_id):
"""
Checks if the returned session ID is greater than
or equal to 0
"""
check_id = v_id.isdigit()
if check_id < 0:
return False
return True
def check_dup_session(self, s_term, s_year, p_id):
"""
Check if the new session name already exists in the database
Input: start term & year of the new session
Output: return False if the team already exists, True otherwise
"""
try:
s_term = s_term.strip().lower().capitalize()
s_year = s_year.strip().lower().capitalize()
p_id = p_id.strip().lower()
result = capstone_session().query.filter_by(
start_term=s_term, start_year=s_year, professor_id=p_id).first()
except exc.SQLAlchemyError:
handle_exception()
result = None
if result is not None:
return False
return True
def get_session_id(self, term, year, prof):
"""
Get id of a selected session
Input: term and year
Output: if the term and year are not found, add them to the database and
return added session id. Otherwise, return the id of the session
"""
prof_id = professors().prof_id(prof)
try:
id = capstone_session.query.filter(capstone_session.start_term == term,
capstone_session.start_year == year,
capstone_session.professor_id == prof_id).first()
except exc.SQLAlchemyError:
handle_exception()
id = None
if id is None:
prof_id = professors().prof_id(prof)
return self.insert_session(term, str(year), prof_id)
else:
return id.id
def get_sessions(self):
"""
Get a list of session to display on the drop downs
Input: only self
Output: list of sessions (includes start term, year and professor name)
"""
caps = capstone_session.query.all()
lists = []
for i in caps:
prof = professors.query.filter(professors.id == i.professor_id).first()
if prof is not None:
temp = str(i.start_term) + " - " + str(i.start_year) + " (" + str(prof.name) + ")"
lists.append(temp)
return lists
def get_active_sessions(self):
"""
Get a list of active capstone sessions
Input: self
Output: the list of currently active capstone sessions
"""
# Calculate the start term and year of the sessions we expect to be active
currentDate = datetime.datetime.now()
month = int(currentDate.month)
if month in range(1, 3):
# Fall term of last year
start_term_1 = "Fall"
start_year_1 = currentDate.year - 1
# Winter term of current year
start_term_2 = "Winter"
start_year_2 = currentDate.year
else:
# Both terms will start in the same year
start_year_1 = currentDate.year
start_year_2 = currentDate.year
# Winter and Spring terms
if month in range(3, 6):
start_term_1 = "Winter"
start_term_2 = "Spring"
# Spring and Summer terms
elif month in range(6, 9):
start_term_1 = "Spring"
start_term_2 = "Summer"
# Summer and Fall terms
else:
start_term_1 = "Summer"
start_term_2 = "Fall"
# Query the db for active sessions using the start term and year information we calculated above
try:
# https://stackoverflow.com/questions/7942547/using-or-in-sqlalchemy
# Algorithm: SELECT * FROM CAPSTONE_SESSION WHERE
# (start_term = start_term_1 AND start_year = start_year_1)
# OR
# (start_term = start_term_2 AND start_year = start_year_2)
return capstone_session.query.filter(((capstone_session.start_year == start_year_1) &
(capstone_session.start_term == start_term_1)) |
((capstone_session.start_year == start_year_2) &
(capstone_session.start_term == start_term_2))).all()
except exc.SQLAlchemyError:
handle_exception()
return None
def check_dates(self, start, end):
"""
Check if start and end dates are valid
Input: start and end dates
Output: Return 0 if valid (both start and end date being empty is valid)
Return 1 if start date is after the end date
Return 2 if either start date or end date is empty (but not both)
"""
params = {'start': start, 'end': end}
if params['start'] and params['end']:
if int(params['start']) > int(params['end']):
return 1
else:
return 0
elif params['start'] is None and params['end'] is None:
return 0
return 2
def date_error(self, params):
"""
This method handles error message for inserting dates
Input: parameter of dates (start/end dates for midterm/final)
Output: error message
"""
error_msg = None
for i in params:
if params[i]:
params[i] = params[i].replace('-', '')
else:
params[i] = None
mid = self.check_dates(params['midterm_start'], params['midterm_end'])
final = self.check_dates(params['final_start'], params['final_end'])
if mid == 2:
error_msg = "Please fill out both start and end dates for the Midterm dates"
return error_msg
if final == 2:
error_msg = "Please fill out both start and end dates for the Final dates"
return error_msg
elif mid == 1 or final == 1:
error_msg = "Please choose an end date that starts after the start date"
return error_msg
return error_msg
def split_dates(self, params):
"""
Split dates into integer year, month and day
to convert the string to datetime object
Input: parameter of dates
Outout: parameter of datetime objects
"""
for i in params:
if params[i]:
params[i] = params[i].split('-')
params[i] = datetime.datetime(int(params[i][0]), int(params[i][1]), int(params[i][2]))
else:
params[i] = None
return params
def insert_dates(self, midterm_start, midterm_end, final_start, final_end, session_id):
"""
Insert a start and end date for midterm and final review
Input: start and end date for midterm review and final reviews
Output: update the dates in the database
"""
review_dates = {'midterm_start': midterm_start,
'midterm_end': midterm_end,
'final_start': final_start,
'final_end': final_end}
dates = self.split_dates(review_dates)
params = {'midterm_start': dates['midterm_start'],
'midterm_end': dates['midterm_end'],
'final_start': dates['final_start'],
'final_end': dates['final_end'],
'session_id': session_id}
for i in params:
if params[i]:
params[i] = params[i]
else:
params[i] = None
session = capstone_session.query.filter(capstone_session.id == session_id).first()
session.midterm_start = params['midterm_start']
session.midterm_end = params['midterm_end']
session.final_start = params['final_start']
session.final_end = params['final_end']
db.session.commit()
return True
def check_review_state(self, session_id, date):
"""
Given a capstone session id to check and a date,
this method determines the currently available review if any
Inputs: a capstone session id and a date which should be a python date time object
Outputs: 'final' if date is after the final start date for the session
'midterm' if the date is between the midterm and final start dates.
'error' otherwise
"""
try:
# get the session
session = capstone_session.query.filter(capstone_session.id == session_id).first()
# check if final exists:
if session.final_start is not None:
# if after final period, return final
if date >= session.final_start:
return 'final'
elif session.midterm_start is not None:
# otherwise if midterm exists, check if after midterm and return if so
if date >= session.midterm_start:
return 'midterm'
else:
return 'Error'
elif session.midterm_start is not None:
# if only midterm exists, check midterm
if date >= session.midterm_start:
return 'midterm'
else:
# no dates set, so error
return 'Error'
except exc.SQLAlchemyError:
handle_exception()
return 'Error'
def check_not_late(Self, session_id, date, type):
"""
This method is for determining is a review is late. It receives the type of review to check
and compares the date sent into the method with the review's end period
Inputs: session_id -- the value of the id for the capstone session to check
date: the date that the review is submitted, type: "midterm" or "final" should be received
Outputs: True -- the review is within the open period (the review is NOT late)
or False -- the review IS late or an error was experienced
"""
try:
# get the session
session = capstone_session.query.filter(capstone_session.id == session_id).first()
# check the type:
if type == 'midterm':
# check if midterm date exists
if session.midterm_end is not None:
# check date to see if its currently or before the midterm start state
if date <= session.midterm_end:
# on time
return True
else:
# late
return False
else:
# error
return False
elif type == 'final':
# check if final date exists
if session.final_end is not None:
# check date
if date <= session.final_end:
# on time
return True
else:
# late
return False
else:
# error
return False
else:
# error
return False
except exc.SQLAlchemyError:
handle_exception()
return False
class reports(db.Model):
__table__ = db.Model.metadata.tables['reports']
def get_reports_for_student(self, student_id, session_id, is_final=None):
"""
Gets all available reports for a student, optionally filtering to only midterms or finals
Input: student id, session_id and is_final (is_final indicates if we are filtering for final reviews
or not. is_final = true indicates we are looking for final reviews. is_final = false indicates
we are looking for midterm reviews. is_final = None indicates we want both.
Output: the available reports for the student
"""
try:
reviews = {}
if is_final is not None:
reviews = reports.query.filter(reports.reviewee == student_id,
reports.session_id == session_id,
reports.is_final == is_final).all()
else:
reviews = reports.query.filter(reports.reviewee == student_id,
reports.session_id == session_id).all()
return reviews
except exc.SQLAlchemyError:
handle_exception()
return None
def get_report(self, reviewer_id, reviewee_id, team_id, is_final):
"""
Get a review from the database using the given information
Input: reviewer_id (a student id), reviewee_id (a student id), team_id, is_final (indicates if the
review is a final review or not)
Output: the review, if it was found, or None if it wasn't or if there was a problem
"""
try:
return reports.query.filter(reports.reviewer == reviewer_id,
reports.tid == team_id,
reports.is_final == is_final,
reports.reviewee == reviewee_id).first()
except exc.SQLAlchemyError:
handle_exception()
return None
def get_team_reports(self, tid, is_final):
"""
This method is for getting the reports of an entire team
Inputs: tid -- team id of reports to retrieve, is_final - if it's the second term
Outputs: result - all report objects for the team
"""
try:
result = reports.query.filter(reports.tid == tid,
reports.is_final == is_final).distinct().all()
return result
except exc.SQLAlchemyError:
handle_exception()
return None
def insert_report(self, sess_id, time, reviewer, tid, reviewee, tech,
ethic, com, coop, init, focus, cont, lead, org, dlg,
points, strn, wkn, traits, learned, proud, is_final, late):
"""
Stages a report to be inserted into the database -- This does NOT commit the add!
Inputs: Arguments for each individual field of the report
Outputs: true if adding was successful, false if not
"""
try:
# Build Report object from method input
new_report = reports(session_id=sess_id,
time=time,
reviewer=reviewer,
tid=tid,
reviewee=reviewee,
tech_mastery=tech,
work_ethic=ethic,
communication=com,
cooperation=coop,
initiative=init,
team_focus=focus,
contribution=cont,
leadership=lead,
organization=org,
delegation=dlg,
points=points,
strengths=strn,
weaknesses=wkn,
traits_to_work_on=traits,
what_you_learned=learned,
proud_of_accomplishment=proud,
is_final=is_final,
is_late=late)
# add the report and return true for success
db.session.add(new_report)
print('Adding Report to Session')
return True
except exc.SQLAlchemyError:
# if error, return false
handle_exception()
return False
def commit_reports(self, id, state, sess_id, success):
"""
Method to commit changes to the DB through the model while updating the user's state
input: None
output: True if successful, false otherwise
"""
# if adding reports was not successful, rollback changes to session
try:
if success is False:
try:
print('Rolling Back Reports')
db.session.rollback()
except exc.SQLAlchemyError:
return False
return False
# update appropriate student 'done' attribute
print('Finding Student')
student = students.query.filter_by(id=id, session_id=sess_id).first()
if state == 'midterm':
student.midterm_done = 1
elif state == 'final':
student.final_done = 1
else:
return False
print('Committing Reports')
db.session.commit()
return True
except exc.SQLAlchemyError:
handle_exception()
print('Rolling Back Reports')
return False
def commit_updates(self, success):
"""
This method is for committing review updates
input: success -- a boolean object indicating whether to proceed
with committing (true) or to roll back (false)
output: False -- commit was not made, True - commit was made successfully
"""
try:
if success is False:
print('Rolling Back Edits')
db.session.rollback()
return False
else:
print('Committing Edits')
db.session.commit()
return True
except exc.SQLAlchemyError:
handle_exception()
print('Rolling Back Edits')
return False
class removed_students(db.Model):
__table__ = db.Model.metadata.tables['removed_students']
def add_student(self, s):
"""
Insert removed students into remocved_students table
Input: student info
Output: return False if the info is empty
Otherwise, add student to the list and return True
"""
if s is None:
return False
current_date = datetime.datetime.now()
removed_student = removed_students(id=s.id,
tid=s.tid,
session_id=s.session_id,
name=s.name,
is_lead=s.is_lead,
midterm_done=s.midterm_done,
final_done=s.final_done,
removed_date=current_date)
db.session.add(removed_student)
db.session.commit()
return True
| 38.1668 | 109 | 0.533885 | 47,280 | 0.988646 | 0 | 0 | 0 | 0 | 0 | 0 | 15,309 | 0.320118 |
a6e3c3ffa2830e6dc6e8d6bc0393272aef1d0fd6 | 349 | py | Python | tab2comma.py | Guerillero/GeolocationFun | f61be4f2b3e0a6a2c4641f83ae29ff161eb861fe | [
"MIT"
] | 1 | 2016-03-11T10:26:08.000Z | 2016-03-11T10:26:08.000Z | tab2comma.py | Guerillero/GeolocationFun | f61be4f2b3e0a6a2c4641f83ae29ff161eb861fe | [
"MIT"
] | null | null | null | tab2comma.py | Guerillero/GeolocationFun | f61be4f2b3e0a6a2c4641f83ae29ff161eb861fe | [
"MIT"
] | null | null | null | #Converts the geo_data tvs into a more ArcMap friendly csv
import csv
import sys
fin = open('geo_data.tsv', 'r')
fout = open('geo_data.csv', 'w')
csv.field_size_limit(sys.maxsize)
tabfile = csv.reader(fin, dialect=csv.excel_tab)
commafile = csv.writer(fout, dialect=csv.excel)
for row in tabfile:
commafile.writerow(row)
print "done"
| 20.529412 | 58 | 0.724928 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 98 | 0.280802 |
a6e4911a102a56bec265217b599cb065b431fc4f | 106 | py | Python | srv/service/__init__.py | mantou22/SC_system | 0c048c1ba678e378e62bb046b39c1a0f7792adee | [
"MulanPSL-1.0"
] | null | null | null | srv/service/__init__.py | mantou22/SC_system | 0c048c1ba678e378e62bb046b39c1a0f7792adee | [
"MulanPSL-1.0"
] | 1 | 2021-09-01T03:28:39.000Z | 2021-09-01T03:28:39.000Z | srv/service/__init__.py | mantou22/SC_system | 0c048c1ba678e378e62bb046b39c1a0f7792adee | [
"MulanPSL-1.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
@author:MT
@file:__init__.py.py
@time:2021/8/21 23:02
""" | 15.142857 | 24 | 0.575472 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 104 | 0.981132 |
a6e5dd3f1d3b5d0f461d38590a4be156f592fc44 | 600 | py | Python | Python/Introduction/write-a-function.py | lakshika1064/Hackerrank_Solutions-Python | 50ca205c5a3a9a4f294dcda077c390209eb57ecc | [
"MIT"
] | 1 | 2020-08-18T08:14:41.000Z | 2020-08-18T08:14:41.000Z | Python/Introduction/write-a-function.py | lakshika1064/Hackerrank_Solutions-Python | 50ca205c5a3a9a4f294dcda077c390209eb57ecc | [
"MIT"
] | null | null | null | Python/Introduction/write-a-function.py | lakshika1064/Hackerrank_Solutions-Python | 50ca205c5a3a9a4f294dcda077c390209eb57ecc | [
"MIT"
] | null | null | null | def is_leap(year):
leap = False
if year % 4 == 0:
if year % 100 == 0:
if year % 400 == 0:
leap = True
else:
leap = False
else:
leap = True
return leap
year = int(input())
print(is_leap(year))
'''
In the Gregorian calendar, three conditions are used to identify leap years:
The year can be evenly divided by 4, is a leap year, unless:
The year can be evenly divided by 100, it is NOT a leap year, unless:
The year is also evenly divisible by 400. Then it is a leap year.
''' | 23.076923 | 77 | 0.553333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 307 | 0.511667 |
a6e636c3407d17a05d2d215c473b50da1c8bf471 | 464 | py | Python | systemfixtures/tests/test_users.py | alejdg/systemfixtures | d1c42d83c3dca2a36b52e8fc214639ebcb1cd8a1 | [
"MIT"
] | 13 | 2017-01-24T15:25:47.000Z | 2022-01-06T23:56:06.000Z | systemfixtures/tests/test_users.py | cjwatson/systemfixtures | 6ff52e224585d8fab2908dc08a22fe36dcaf93d4 | [
"MIT"
] | 10 | 2017-03-08T09:36:01.000Z | 2022-02-09T11:08:00.000Z | systemfixtures/tests/test_users.py | cjwatson/systemfixtures | 6ff52e224585d8fab2908dc08a22fe36dcaf93d4 | [
"MIT"
] | 5 | 2017-03-08T09:30:51.000Z | 2022-02-05T23:22:25.000Z | import pwd
from testtools import TestCase
from ..users import FakeUsers
class FakeUsersTest(TestCase):
def setUp(self):
super(FakeUsersTest, self).setUp()
self.users = self.useFixture(FakeUsers())
def test_real(self):
info = pwd.getpwnam("root")
self.assertEqual(0, info.pw_uid)
def test_fake(self):
self.users.add("foo", 123)
info = pwd.getpwnam("foo")
self.assertEqual(123, info.pw_uid)
| 21.090909 | 49 | 0.642241 | 387 | 0.834052 | 0 | 0 | 0 | 0 | 0 | 0 | 16 | 0.034483 |
a6e82e8401f083b412aeb15f384d0aa8ee6b7b91 | 5,666 | py | Python | StatisticalModelling.py | bdolenc/Zemanta-challenge | 5ece77c48bf6da4e96de6bceb910ac77496f54e2 | [
"MIT"
] | null | null | null | StatisticalModelling.py | bdolenc/Zemanta-challenge | 5ece77c48bf6da4e96de6bceb910ac77496f54e2 | [
"MIT"
] | null | null | null | StatisticalModelling.py | bdolenc/Zemanta-challenge | 5ece77c48bf6da4e96de6bceb910ac77496f54e2 | [
"MIT"
] | null | null | null | #The code is published under MIT license.
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn import cross_validation
from sklearn.cross_validation import StratifiedKFold
from sklearn.metrics import roc_curve, auc
from sklearn.linear_model import LogisticRegression
import pandas as pd
import csv
import numpy as np
def prepare_data(learn_file, labels_file):
"""
Open learning set, cluster labels
and change ZIP codes with corresponding
cluster label. Return X and y for learning.
"""
print "---preparing data...",
l_set = pd.read_csv(learn_file, sep='\t')
# t_set = pd.read_csv(test_file, sep='\t', header=None, names=['click', 'creative_id', 'zip', 'domain', 'page'])
# t_set = pd.read_csv(test_file, sep='\t', header=None, names=['creative_id', 'zip', 'domain', 'page'])
l_set = l_set.iloc[::5, :]
# t_set = t_set.iloc[::5, :]
#replace NaN values with zero.
l_set = l_set.fillna(0)
# t_set = t_set.fillna(0)
with open(labels_file, mode='r') as file_in:
reader = csv.reader(file_in)
c_labels = {float(rows[0]): rows[1] for rows in reader}
#change ZIP with label
l_set['zip'] = l_set['zip'].convert_objects(convert_numeric=True).dropna()
l_set['zip'] = l_set['zip'].map(c_labels.get)
# Change ZIP with label
# t_set['zip'] = t_set['zip'].convert_objects(convert_numeric=True).dropna()
# t_set['zip'] = t_set['zip'].map(c_labels.get)
l_set = l_set.reindex(np.random.permutation(l_set.index))
print "done---"
#remove where ZIP None - for testing on part data
# l_set = l_set[l_set.zip.notnull()]
# t_set = t_set[t_set.zip.notnull()]
#X for learning features, y for click
X = l_set[['creative_id', 'zip', 'domain']]
y = l_set['click']
# X_sub = t_set[['creative_id', 'zip', 'domain']]
# y_sub = t_set['click']
#Replace domain with numeric
unique_d = set(X['domain'])
# print len(unique_d)
# unique_d |= set(X_sub['domain'])
dict_d = {}
for c, d in enumerate(unique_d):
dict_d[d] = c
X['domain'] = X['domain'].map(dict_d.get)
X = X.fillna(0)
# X_sub['domain'] = X_sub['domain'].map(dict_d.get)
# X_sub = X_sub.fillna(0)
return X, y, # X_sub, y_sub
def random_forest(X, y, n_estimators):
"""
Scikit Random Forest implementation
with 100 trees, testing on 0.4 part
of the data, and train on 0.6.
"""
#Scale data
#X = StandardScaler().fit_transform(X)
#split data to train and test
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.4)
# print X_train
# print y_train
# create rfc object
forest = RandomForestClassifier(n_estimators=n_estimators)
#fit training data
prob = forest.fit(X_train, y_train, ).predict_proba(X_test)
#compute ROC
fpr, tpr, thresholds = roc_curve(y_test, prob[:, 1])
roc_auc = auc(fpr, tpr)
#print fpr, tpr, thresholds
print "AUC Random Forest: " + str(roc_auc)
def stacking_scikit(X, y, n_estimators):
"""
Stacking with classifiers from scikit-learn
library. Based on example
https://github.com/log0/vertebral/blob/master/stacked_generalization.py
"""
X = X.as_matrix()
y = y.as_matrix()
base_classifiers = [RandomForestClassifier(n_estimators=n_estimators),
ExtraTreesClassifier(n_estimators=n_estimators),
GradientBoostingClassifier(n_estimators=n_estimators)]
clf_names = ["Random Forest", "Extra Trees Classifier", "Gradient Boosting Classifier"]
# Divide data on training and test set
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.2)
# Arrays for classifier results
out_train = np.zeros((X_train.shape[0], len(base_classifiers)))
out_test = np.zeros((X_test.shape[0], len(base_classifiers)))
t_cv = list(StratifiedKFold(y_train, n_folds=5))
for i, clf in enumerate(base_classifiers):
print "Training classifier " + clf_names[i]
cv_probabilities = np.zeros((X_test.shape[0], len(t_cv)))
# cross validation train
for j, (train_i, test_i) in enumerate(t_cv):
X_train_0 = X_train[train_i]
y_train_0 = y_train[train_i]
X_test_0 = X_train[test_i]
# train each classifier
clf.fit(X_train_0, y_train_0)
# Get probabilities for click on internal test data
proba = clf.predict_proba(X_test_0)
out_train[test_i, i] = proba[:, 1]
# Probabilities for test data
proba_test = clf.predict_proba(X_test)
cv_probabilities[:, j] = proba_test[:, 1]
# Average of predictions
out_test[:, i] = cv_probabilities.mean(1)
print "Stacking with Logistic regression"
stack_clf = LogisticRegression(C=10)
stack_clf.fit(out_train, y_train)
stack_prediction = stack_clf.predict_proba(out_test)
#compute ROC
fpr, tpr, thresholds = roc_curve(y_test, stack_prediction[:, 1])
roc_auc = auc(fpr, tpr)
print "AUC Stacking: " + str(roc_auc)
#write to file
np.savetxt(fname="results.txt", X=stack_prediction[:, 1], fmt="%0.6f")
learning_set = "C:\BigData\Zemanta_challenge_1_data/training_set.tsv"
learning_part = "C:\BigData\Zemanta_challenge_1_data/training_part.tsv"
test_set = "C:\BigData\Zemanta_challenge_1_data/test_set.tsv"
labels = "hc_results.csv"
X, y = prepare_data(learning_set, labels)
random_forest(X, y, 10)
stacking_scikit(X, y, 10)
| 35.192547 | 116 | 0.667137 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,248 | 0.396753 |
a6e8d7ed37187c0ba83698ca8f5232fa4215e1e9 | 487 | py | Python | setup.py | bkbilly/AlarmPI | 8106769d83c1f1d697173c5e352e4e3cb3d5c4ec | [
"MIT"
] | 55 | 2016-03-08T19:24:28.000Z | 2022-02-16T22:10:39.000Z | setup.py | bkbilly/AlarmPI | 8106769d83c1f1d697173c5e352e4e3cb3d5c4ec | [
"MIT"
] | 18 | 2017-09-02T10:40:58.000Z | 2020-09-25T20:46:11.000Z | setup.py | bkbilly/AlarmPI | 8106769d83c1f1d697173c5e352e4e3cb3d5c4ec | [
"MIT"
] | 9 | 2018-05-17T12:54:11.000Z | 2021-07-23T01:40:22.000Z | from setuptools import setup, find_packages
REQUIRES = [
'Flask>=1.1.1',
'Flask-SocketIO>=4.2.1',
'Flask-Login>=0.4.1',
'requests>=2.22.0',
'pytz>=2019.2',
'paho-mqtt>=1.4.0',
'RPi.GPIO>=0.7.0',
]
setup(
name='AlarmPI',
version='4.7',
description='Home Security System',
author='bkbilly',
author_email='bkbilly@hotmail.com',
packages=find_packages(),
install_requires=REQUIRES,
# long_description=open('README.md').read()
)
| 20.291667 | 47 | 0.61191 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 233 | 0.478439 |
a6e98b236e9721997e3c77aa490d7419aefda036 | 5,115 | py | Python | AntiFire/model_utils.py | MikhailKitikov/AntiFire | 5b148a4f1b8f9be402a30af6dc2b0a5982327a71 | [
"MIT"
] | null | null | null | AntiFire/model_utils.py | MikhailKitikov/AntiFire | 5b148a4f1b8f9be402a30af6dc2b0a5982327a71 | [
"MIT"
] | null | null | null | AntiFire/model_utils.py | MikhailKitikov/AntiFire | 5b148a4f1b8f9be402a30af6dc2b0a5982327a71 | [
"MIT"
] | null | null | null | from keras.layers.pooling import AveragePooling2D, MaxPooling2D
from keras.applications.mobilenet_v2 import MobileNetV2
from keras.applications.nasnet import NASNetMobile
from keras.applications import ResNet50V2
from keras.layers.core import Dropout
from keras.layers.core import Flatten
from keras.layers.core import Dense
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.layers import LSTM
from keras.models import Sequential
from keras.layers import BatchNormalization
from keras.layers import Input, Conv2D
from keras.models import Model
from collections import deque
import tensorflow as tf
import numpy as np
def create_model_head(baseModel):
headModel = baseModel.output
headModel = AveragePooling2D(pool_size=(3, 3))(headModel)
headModel = Flatten(name="flatten")(headModel)
headModel = Dense(64, activation="relu")(headModel)
headModel = Dropout(0.5)(headModel)
headModel = Dense(1, activation="sigmoid")(headModel)
new_model = Model(inputs=baseModel.input, outputs=headModel)
return new_model
def load_mobilenetv2():
weights_path = '../Models/Trained models/mobileNetv2.h5'
baseNet = MobileNetV2(weights=None, include_top=False, input_tensor=Input(shape=(224, 224, 3)))
model = create_model_head(baseNet)
model.load_weights(weights_path)
return model
def load_nasnetmobile():
weights_path = '../Models/Trained models/nasnetMobile.h5'
baseNet = NASNetMobile(weights=None, include_top=False, input_tensor=Input(shape=(224, 224, 3)))
model = create_model_head(baseNet)
model.load_weights(weights_path)
return model
def load_resnet50():
weights_path = '../Models/Trained models/resnet50v2.h5'
baseNet = ResNet50V2(weights=None, include_top=False, input_tensor=Input(shape=(224, 224, 3)))
model = create_model_head(baseNet)
model.load_weights(weights_path)
return model
def load_FireNet():
model = Sequential()
data_input_shape = (224,224,3)
model.add(Convolution2D(128, (3,3),padding='same',activation='relu', input_shape=data_input_shape))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.1))
model.add(Convolution2D(64, (3,3),padding='same',activation='relu'))
model.add(BatchNormalization())
model.add(AveragePooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Convolution2D(128, (3,3),padding='same',activation='relu'))
model.add(BatchNormalization())
model.add(AveragePooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Convolution2D(64, (3,3),padding='same',activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.1))
model.add(Flatten())
model.add(Dense(64, activation='relu', name='high_output'))
model.add(Dropout(0.5))
model.add(Dense(32, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(1, activation='sigmoid'))
weights_path = '../Models/Trained models/FireNet_large_new.h5'
model.load_weights(weights_path)
return model
def load_FireNetStack():
model = load_FireNet()
intermediate_layer_model = Model(inputs=model.input,
outputs=model.get_layer('high_output').output)
return intermediate_layer_model
def load_FireNetMobile():
model = Sequential()
data_input_shape = (64,64,3)
model.add(Convolution2D(64, (3,3),padding='same',activation='relu', input_shape=data_input_shape))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.1))
model.add(Convolution2D(32, (5,5),padding='same',activation='relu'))
model.add(BatchNormalization())
model.add(AveragePooling2D(pool_size=(2, 2)))
model.add(Dropout(0.1))
model.add(Convolution2D(32, (3,3),padding='same',activation='relu'))
model.add(BatchNormalization())
model.add(AveragePooling2D(pool_size=(2, 2)))
model.add(Dropout(0.1))
model.add(Flatten())
model.add(Dense(64, activation='relu', name='low_output'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
weights_path = '../Models/Trained models/FireNetMobile.h5'
model.load_weights(weights_path)
return model
def load_FireNetMobileStack():
model = load_FireNetMobile()
intermediate_layer_model = Model(inputs=model.input,
outputs=model.get_layer('low_output').output)
return intermediate_layer_model
def load_LSTM():
n_timesteps = 10
n_features = 640
model2 = Sequential()
model2.add(LSTM(100, input_shape=(n_timesteps, n_features), return_sequences=True))
model2.add(Dropout(0.5))
model2.add(LSTM(200, return_sequences=False))
model2.add(Dropout(0.5))
model2.add(Dense(100, activation='relu'))
model2.add(Dense(1, activation='sigmoid'))
weights_path = '../Models/Trained models/LSTM.h5'
model2.load_weights(weights_path)
return model2
| 33.214286 | 103 | 0.71652 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 456 | 0.08915 |
a6ec7b00bceb0ccf36922a5e75396e5957211a11 | 2,379 | py | Python | test/functional/omni_graceperiod.py | fiscalobject/uniasset | 54337e5bfae4af6b1ac453937038201835de15c4 | [
"MIT"
] | 39 | 2021-09-07T18:17:20.000Z | 2022-02-25T19:10:34.000Z | test/functional/omni_graceperiod.py | fiscalobject/uniasset | 54337e5bfae4af6b1ac453937038201835de15c4 | [
"MIT"
] | 2 | 2021-12-31T20:42:29.000Z | 2022-01-06T09:05:10.000Z | test/functional/omni_graceperiod.py | fiscalobject/uniasset | 54337e5bfae4af6b1ac453937038201835de15c4 | [
"MIT"
] | 10 | 2021-09-09T09:33:23.000Z | 2022-02-11T15:37:50.000Z | #!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test grace period."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class OmniGracePeriod(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [['-omniactivationallowsender=any']]
def sendactivation(self, address, coinbase_address, heights, expected):
# Min client version for feature activation
minClientVersion = 0
for height in heights:
activation_block = self.nodes[0].getblockcount() + height + 1
txid = self.nodes[0].omni_sendactivation(address, 3, activation_block, minClientVersion)
self.nodes[0].generatetoaddress(1, coinbase_address)
# Checking the transaction was valid...
result = self.nodes[0].omni_gettransaction(txid)
assert_equal(result['valid'], expected)
def run_test(self):
self.log.info("test grace period")
# Preparing some mature Bitcoins
coinbase_address = self.nodes[0].getnewaddress()
self.nodes[0].generatetoaddress(101, coinbase_address)
# Obtaining a master address to work with
address = self.nodes[0].getnewaddress()
# Funding the address with some testnet BTC for fees
self.nodes[0].sendtoaddress(address, 0.1)
self.nodes[0].generatetoaddress(1, coinbase_address)
# A relative activation height of blocks is smaller than the grace period and not allowed
self.sendactivation(address, coinbase_address, [-100, 0, 1, 2, 4], False)
# A relative activation height of blocks is too far in the future and not allowed
self.sendactivation(address, coinbase_address, [11, 288, 12289, 999999], False)
# A relative activation height of blocks is within the grace period and accepted
activationMinBlocks = 5
activationMaxBlocks = 10
self.sendactivation(address, coinbase_address, [activationMinBlocks, activationMinBlocks + 1, activationMaxBlocks - 1, activationMaxBlocks], True)
if __name__ == '__main__':
OmniGracePeriod().main()
| 42.482143 | 154 | 0.701976 | 1,972 | 0.82892 | 0 | 0 | 0 | 0 | 0 | 0 | 759 | 0.319042 |
a6efb681feeb49e4829de2d74d70c18a039c51a6 | 719 | py | Python | PythonEdition/03_lengthOfLongestSubstring.py | cxiaolong/Algorithm-Practice | 6f3d3f4b14a3fc170a3dc47b2ab24f8e37cb941c | [
"MIT"
] | null | null | null | PythonEdition/03_lengthOfLongestSubstring.py | cxiaolong/Algorithm-Practice | 6f3d3f4b14a3fc170a3dc47b2ab24f8e37cb941c | [
"MIT"
] | null | null | null | PythonEdition/03_lengthOfLongestSubstring.py | cxiaolong/Algorithm-Practice | 6f3d3f4b14a3fc170a3dc47b2ab24f8e37cb941c | [
"MIT"
] | null | null | null | class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
occ = set()
n = len(s)
max_length = 0
cur = 0
for i in range(n):
if i != 0:
# 左指针向右移动一格,移除一个字符
occ.remove(s[i-1])
while cur < n and s[cur] not in occ:
occ.add(s[cur])
cur += 1
max_length = max(max_length, cur-i)
return max_length
if __name__ == '__main__':
s = Solution()
s1 = "abcabcbb"
s2 = "bbbbb"
s3 = "pwwkew"
print(s.lengthOfLongestSubstring(s1))
print(s.lengthOfLongestSubstring(s2))
print(s.lengthOfLongestSubstring(s3))
print(s.lengthOfLongestSubstring("")) | 27.653846 | 54 | 0.527121 | 480 | 0.639148 | 0 | 0 | 0 | 0 | 0 | 0 | 87 | 0.115846 |
a6efcbaa9ab60d9cd78abbd8c9ede3ab97ef6d45 | 3,739 | py | Python | main.py | eternal-forces/profielwerkstuk | efcd5a7b796dec66b95b99a40f4c43ea5958fb8f | [
"Apache-2.0"
] | 1 | 2020-12-07T07:24:25.000Z | 2020-12-07T07:24:25.000Z | main.py | eternal-forces/profielwerkstuk | efcd5a7b796dec66b95b99a40f4c43ea5958fb8f | [
"Apache-2.0"
] | null | null | null | main.py | eternal-forces/profielwerkstuk | efcd5a7b796dec66b95b99a40f4c43ea5958fb8f | [
"Apache-2.0"
] | null | null | null | import pyglet
import os
from classes.car import Car
from classes.improvedCircuit import circuit
from classes.Vector import Vector2D
### MAIN LOOP
# config = pyglet.gl.Config(sample_buffers=1, samples=4)
window = pyglet.window.Window(resizable=False, width=1920, height=1080, vsync=True)
#inner_points = [[18,3],[8,3],[5,4],[3,6],[2,9],[2,12],[3,14],[4,14],[6,12],[7,8],[8,7],[12,6],[16,6],[19,9],[20,11],[16,13],[13,12],[12,14],[13,15],[17,16],[20,15],[22,13],[23,8],[21,5]] #Bonk Circuit
#outer_points = [[18,0],[8,0],[2,3],[0,9],[0,14],[2,16],[5,16],[8,12],[9,9],[12,8],[15,8],[17,10],[16,11],[12,10],[11,11],[10,13],[10,15],[12,17],[17,17],[20,16],[23,14],[25,8],[23,4]] #Bonk Circuit
#inner_points = [[20,3],[7,3],[6,4.5],[7,6],[15,6],[17.5,9.5],[17.5,12],[15,15],[7,15],[6,16.5],[7,18],[20,18],[21,16.5],[21,4.5]] #Sigma Falls
#outer_points = [[21,0],[6,0],[2.5,3],[2.5,6.5],[6,9],[13,9],[14,10.5],[13,12],[6,12],[2.5,15],[2.5,18.5],[6,21],[21,21],[23.5,19],[23.5,2.5]] #Sigma Falls
#inner = [Vector2D(i[0],i[1]) for i in inner_points]
#outer = [Vector2D(i[0],i[1]) for i in outer_points]
#checkpoints = [[[10,-1],[10,4]],[[4,1],[6,4]],[[0,6],[3,7]],[[-1,13],[3,12]],[[4,13],[7,15]],[[6,9],[10,11]],[[11,5],[12,9]],[[15,10],[18,7]],[[15,10],[14,13]],[[9,14],[13,13]],[[15,17],[16,15]],[[21,12],[24,15]],[[22,8],[25,6]],[[19,5],[20,1]],[[15,-1],[15,4]]]
#circuit_checkpoints = []
#for i, checkpoint in enumerate(checkpoints):
# circuit_checkpoints.append([])
# for point in checkpoint:
# circuit_checkpoints[i].append(Vector2D(point[0],point[1]))
dir_path = os.path.dirname(os.path.realpath(__file__))
path = dir_path + '/' + 'circuits/SIGMA_FALLS_GA.json'
batch = pyglet.graphics.Batch()
topper = pyglet.graphics.OrderedGroup(3)
foreground = pyglet.graphics.OrderedGroup(2)
background = pyglet.graphics.OrderedGroup(1)
circuitLayer = pyglet.graphics.OrderedGroup(0)
running = True
circ = circuit.fromJSON(path, window=[1920,1080], method="fromFullPoints")
car = Car(circ.startingPoint.x,circ.startingPoint.y)
car.position = circ.startingPoint
backGround = pyglet.sprite.Sprite(circ.background, x=0,y=0, batch=batch, group=circuitLayer)
foreGround = pyglet.sprite.Sprite(circ.backgroundTopper, x=0,y=0, batch=batch, group=topper)
key = pyglet.window.key
key_handler = key.KeyStateHandler()
speed = 1.0
@window.event
def on_close():
running = False
@window.event
def on_draw():
render()
def update(dt):
window.push_handlers(key_handler)
if(running):
car.update(dt, key, key_handler)
circ.carCollidedWithCheckpoint(car)
hitbox = car.generateHitbox()
car.mathIntersect(circ.vertices)
if circ.collidedWithCar(hitbox) == True:
car.dead = True
circ.reset()
car.reset()
else:
pyglet.app.exit()
def render():
window.clear()
e = car.draw(batch, foreground)
#a = car.eyes(batch, background)
#b = circ.draw(batch, window.get_size(), background)
#c = car.hitbox(batch, background)
d = car.intersectEyes(batch, circ.vertices, background)
#f = circ.generateVisualCheckpoints(batch, foreground)
moreLines = []
for line in circ.vertices:
pointA, pointB = line.getEndPoints()
#moreLines.append(pyglet.shapes.Line(pointA.x, pointA.y, pointB.x, pointB.y, width=5, color=(255,0,0), batch=batch, group=foreground))
#moreLines.append(pyglet.shapes.Circle(pointA.x, pointA.y, 10, color=(0,255,0), batch=batch, group=foreground))
#moreLines.append(pyglet.shapes.Circle(pointB.x, pointB.y, 10, color=(255,0,0),batch=batch, group=foreground))
batch.draw()
if __name__ == "__main__":
pyglet.clock.schedule_interval(update, 1/60.0)
pyglet.app.run() | 41.087912 | 263 | 0.641883 | 0 | 0 | 0 | 0 | 90 | 0.024071 | 0 | 0 | 1,920 | 0.513506 |
a6f04c096ac02af94095dc0b90b868e0e2b87e2f | 2,750 | py | Python | gefen-hdsdi2dvi.py | timvideos/panacontrol | 3fbaec8d9491255735b8f685fc05bd1abc078a96 | [
"Apache-2.0"
] | null | null | null | gefen-hdsdi2dvi.py | timvideos/panacontrol | 3fbaec8d9491255735b8f685fc05bd1abc078a96 | [
"Apache-2.0"
] | 2 | 2015-01-06T02:36:27.000Z | 2015-01-20T00:06:51.000Z | gefen-hdsdi2dvi.py | timvideos/panacontrol | 3fbaec8d9491255735b8f685fc05bd1abc078a96 | [
"Apache-2.0"
] | 2 | 2015-01-05T21:20:54.000Z | 2022-01-13T00:20:48.000Z | #!/usr/bin/python
import fcntl
import struct
import sys
import termios
import time
import math
import os
class SerialPort(object):
def __init__(self, tty_name):
self.tty_name = tty_name
self.tty = None
self.old_termios = None
self.InitTTY()
def __del__(self):
if self.tty and self.old_termios:
fd = self.tty.fileno()
termios.tcsetattr(fd, termios.TCSAFLUSH, self.old_termios)
def InitTTY(self):
#self.tty = open(self.tty_name, 'rb+', 0)
#fd = open("/dev/ttyUSB0", O_RDWR | O_NOCTTY | O_NONBLOCK);
#fcntl(fd, F_SETFL, 0);
ttyfd = os.open(self.tty_name, os.O_RDWR | os.O_NOCTTY | os.O_NONBLOCK)
fcntl.fcntl(ttyfd, fcntl.F_SETFL, 0)
self.tty = os.fdopen(ttyfd, 'rb+', 0)
fd = self.tty.fileno()
self.old_termios = termios.tcgetattr(fd)
new_termios = [termios.IGNPAR, # iflag
0, # oflag
termios.B115200 | termios.CS8 |
termios.CLOCAL | termios.CREAD, # cflag
0, # lflag
termios.B115200, # ispeed
termios.B115200, # ospeed
self.old_termios[6] # special characters
]
termios.tcsetattr(fd, termios.TCSANOW, new_termios)
#fcntl.ioctl(self.fd, termios.TIOCMBIS, TIOCM_RTS_str)
#control = fcntl.ioctl(fd, termios.TIOCMGET, struct.pack('I', 0))
#print '%04X' % struct.unpack('I',control)[0]
#fcntl.ioctl(fd, termios.TIOCMBIC, struct.pack('I', termios.TIOCM_RTS))
#fcntl.ioctl(fd, termios.TIOCMBIC, struct.pack('I', termios.TIOCM_DTR))
#control = fcntl.ioctl(fd, termios.TIOCMGET, struct.pack('I', 0))
#print '%04X' % struct.unpack('I',control)[0]
def ReadByte(self):
return self.tty.read(1)
def WriteByte(self, byte):
return self.tty.write(byte)
pass
def main():
input_buffer = []
try:
tty_name = sys.argv[1]
except IndexError:
tty_name = '/dev/ttyS0'
port = SerialPort(tty_name)
for i in "\r\r\r":
port.WriteByte(i)
for s in ["#FRAME 8\r","#OUTPUT 8\r"]: #LIST\r",]: #"#DEVTYPE\r","#DEVERSION\r",'#LIST\r',"#OUTPUT_8\r",]:
for i in s:
port.WriteByte(i)
print "Wrote %r\nWaiting for response!" % (s,)
response = False
while True:
r = ['']
while r[-1] != '\r':
r.append(port.ReadByte())
#sys.stdout.write(repr(r[-1]))
#sys.stdout.flush()
if "".join(r).strip() != "":
print "Response %r" % ("".join(r),)
response = True
break
else:
print "Empty"
if response:
break
if __name__ == '__main__':
main()
| 26.960784 | 108 | 0.560727 | 1,834 | 0.666909 | 0 | 0 | 0 | 0 | 0 | 0 | 861 | 0.313091 |
a6f141e9d9f97e34bca576e7230af21be66d021b | 18,603 | py | Python | main.py | 3ntr0phy/Binance_New_Coins_Scraper | 8d5dadf937f818f079aa64b3bc56381d7caff56b | [
"MIT"
] | null | null | null | main.py | 3ntr0phy/Binance_New_Coins_Scraper | 8d5dadf937f818f079aa64b3bc56381d7caff56b | [
"MIT"
] | null | null | null | main.py | 3ntr0phy/Binance_New_Coins_Scraper | 8d5dadf937f818f079aa64b3bc56381d7caff56b | [
"MIT"
] | null | null | null | import os
import re
import time
import json
import requests
import threading
import traceback
from json_manage import *
from binance_key import *
from config import *
from datetime import datetime, timedelta
import dateutil.parser as dparser
ARTICLES_URL = 'https://www.binance.com/bapi/composite/v1/public/cms/article/catalog/list/query?catalogId=48&pageNo=1&pageSize=30'
ARTICLE = 'https://www.binance.com/bapi/composite/v1/public/cms/article/detail/query?articleCode='
existing_assets = ["BTC","LTC","ETH","NEO","BNB","QTUM","EOS","SNT","BNT","GAS","BCC","USDT","HSR","OAX","DNT","MCO","ICN","ZRX","OMG","WTC","YOYO","LRC","TRX","SNGLS","STRAT","BQX","FUN","KNC","CDT","XVG","IOTA","SNM","LINK","CVC","TNT","REP","MDA","MTL","SALT","NULS","SUB","STX","MTH","ADX","ETC","ENG","ZEC","AST","GNT","DGD","BAT","DASH","POWR","BTG","REQ","XMR","EVX","VIB","ENJ","VEN","ARK","XRP","MOD","STORJ","KMD","RCN","EDO","DATA","DLT","MANA","PPT","RDN","GXS","AMB","ARN","BCPT","CND","GVT","POE","BTS","FUEL","XZC","QSP","LSK","BCD","TNB","ADA","LEND","XLM","CMT","WAVES","WABI","GTO","ICX","OST","ELF","AION","WINGS","BRD","NEBL","NAV","VIBE","LUN","TRIG","APPC","CHAT","RLC","INS","PIVX","IOST","STEEM","NANO","AE","VIA","BLZ","SYS","RPX","NCASH","POA","ONT","ZIL","STORM","XEM","WAN","WPR","QLC","GRS","CLOAK","LOOM","BCN","TUSD","ZEN","SKY","THETA","IOTX","QKC","AGI","NXS","SC","NPXS","KEY","NAS","MFT","DENT","IQ","ARDR","HOT","VET","DOCK","POLY","VTHO","ONG","PHX","HC","GO","PAX","RVN","DCR","USDC","MITH","BCHABC","BCHSV","REN","BTT","USDS","FET","TFUEL","CELR","MATIC","ATOM","PHB","ONE","FTM","BTCB","USDSB","CHZ","COS","ALGO","ERD","DOGE","BGBP","DUSK","ANKR","WIN","TUSDB","COCOS","PERL","TOMO","BUSD","BAND","BEAM","HBAR","XTZ","NGN","DGB","NKN","GBP","EUR","KAVA","RUB","UAH","ARPA","TRY","CTXC","AERGO","BCH","TROY","BRL","VITE","FTT","AUD","OGN","DREP","BULL","BEAR","ETHBULL","ETHBEAR","XRPBULL","XRPBEAR","EOSBULL","EOSBEAR","TCT","WRX","LTO","ZAR","MBL","COTI","BKRW","BNBBULL","BNBBEAR","HIVE","STPT","SOL","IDRT","CTSI","CHR","BTCUP","BTCDOWN","HNT","JST","FIO","BIDR","STMX","MDT","PNT","COMP","IRIS","MKR","SXP","SNX","DAI","ETHUP","ETHDOWN","ADAUP","ADADOWN","LINKUP","LINKDOWN","DOT","RUNE","BNBUP","BNBDOWN","XTZUP","XTZDOWN","AVA","BAL","YFI","SRM","ANT","CRV","SAND","OCEAN","NMR","LUNA","IDEX","RSR","PAXG","WNXM","TRB","EGLD","BZRX","WBTC","KSM","SUSHI","YFII","DIA","BEL","UMA","EOSUP","TRXUP","EOSDOWN","TRXDOWN","XRPUP","XRPDOWN","DOTUP","DOTDOWN","NBS","WING","SWRV","LTCUP","LTCDOWN","CREAM","UNI","OXT","SUN","AVAX","BURGER","BAKE","FLM","SCRT","XVS","CAKE","SPARTA","UNIUP","UNIDOWN","ALPHA","ORN","UTK","NEAR","VIDT","AAVE","FIL","SXPUP","SXPDOWN","INJ","FILDOWN","FILUP","YFIUP","YFIDOWN","CTK","EASY","AUDIO","BCHUP","BCHDOWN","BOT","AXS","AKRO","HARD","KP3R","RENBTC","SLP","STRAX","UNFI","CVP","BCHA","FOR","FRONT","ROSE","HEGIC","AAVEUP","AAVEDOWN","PROM","BETH","SKL","GLM","SUSD","COVER","GHST","SUSHIUP","SUSHIDOWN","XLMUP","XLMDOWN","DF","JUV","PSG","BVND","GRT","CELO","TWT","REEF","OG","ATM","ASR","1INCH","RIF","BTCST","TRU","DEXE","CKB","FIRO","LIT","PROS","VAI","SFP","FXS","DODO","AUCTION","UFT","ACM","PHA","TVK","BADGER","FIS","OM","POND","ALICE","DEGO","BIFI","LINA"]
key_words = ['Futures', 'Isolated', 'Margin', 'Launchpool', 'Launchpad', 'Cross', 'Perpetual']
filter_List = ['body', 'type', 'catalogId', 'catalogName', 'publishDate']
file = 'announcements.json'
schedules_file = 'scheduled_order.json'
executed_trades_file = 'executed_trades.json'
executed_sells_file = 'executed_sells_trades.json'
executed_queque = []
pair_Dict = {}
cnf = load_config('config.yml')
client = load_binance_creds(r'auth.yml')
telegram_status = True
telegram_keys=[]
if os.path.exists('telegram.yml'):
telegram_keys = load_config('telegram.yml')
else: telegram_status = False
def telegram_bot_sendtext(bot_message):
send_text = 'https://api.telegram.org/bot' + str(telegram_keys['telegram_key']) + '/sendMessage?chat_id=' + str(telegram_keys['chat_id']) + '&parse_mode=Markdown&text=' + bot_message
response = requests.get(send_text)
return response.json()['result']['message_id']
def telegram_delete_message(message_id):
send_text = 'https://api.telegram.org/bot' + str(telegram_keys['telegram_key']) + '/deleteMessage?chat_id=' + str(telegram_keys['chat_id']) + '&message_id=' + str(message_id)
requests.get(send_text)
class Send_Without_Spamming():
def __init__(self):
self.id =0000
self.first = True
def send(self, message):
if telegram_status:
if self.first:
self.first = False
self.id = telegram_bot_sendtext(message)
else:
telegram_delete_message(self.id)
self.id = telegram_bot_sendtext(message)
else:
print(message)
def kill(self, pair):
if telegram_status:
telegram_delete_message(self.id)
del pair_Dict[pair]
def killSpam(pair):
try:
pair_Dict[pair].kill(pair)
except Exception:
pass
def sendSpam(pair, message):
try:
pair_Dict[pair].send(message)
except Exception:
pair_Dict[pair] = Send_Without_Spamming()
pair_Dict[pair].send(message)
tp = cnf['TRADE_OPTIONS']['TP']
sl = cnf['TRADE_OPTIONS']['SL']
tsl_mode = cnf['TRADE_OPTIONS']['ENABLE_TSL']
tsl = cnf['TRADE_OPTIONS']['TSL']
ttp = cnf['TRADE_OPTIONS']['TTP']
pairing = cnf['TRADE_OPTIONS']['PAIRING']
ammount = cnf['TRADE_OPTIONS']['QUANTITY']
frequency = cnf['TRADE_OPTIONS']['RUN_EVERY']
test_mode = cnf['TRADE_OPTIONS']['TEST']
delay_mode = cnf['TRADE_OPTIONS']['CONSIDER_DELAY']
percentage = cnf['TRADE_OPTIONS']['PERCENTAGE']
existing_assets.remove(pairing)
regex = '\S{2,6}?/'+ pairing
def sendmsg(message):
print(message)
if telegram_status:
threading.Thread(target=telegram_bot_sendtext, args=(message,)).start()
else:
print(message)
def ping_binance():
sum = 0
for i in range(3):
time_before = datetime.timestamp(datetime.now())
client.ping()
time_after = datetime.timestamp(datetime.now())
sum += (time_after - time_before)
return (sum / 3)
####announcements
def get_Announcements():
unfiltered_Articles = requests.get(ARTICLES_URL).json()['data']['articles']
articles = []
for article in unfiltered_Articles:
flag = True
for word in key_words:
if word in article['title']:
flag = False
if flag:
articles.append(article)
for article in articles:
for undesired_Data in filter_List:
if undesired_Data in article:
del article[undesired_Data]
return articles
def get_Pair_and_DateTime(ARTICLE_CODE):
new_Coin = requests.get(ARTICLE+ARTICLE_CODE).json()['data']['seoDesc']
try:
datetime = dparser.parse(new_Coin, fuzzy=True, ignoretz=True)
raw_pairs = re.findall(regex, new_Coin)
pairs = []
for pair in raw_pairs:
present= False
for j in existing_assets:
if j in pair:
present = True
break
if present == False:
pairs.append(pair.replace('/', ''))
return [datetime, pairs]
except Exception as e:
print(e)
return None
####orders
def get_price(coin):
return client.get_ticker(symbol=coin)['lastPrice']
def create_order(pair, usdt_to_spend, action):
try:
order = client.create_order(
symbol = pair,
side = action,
type = 'MARKET',
quoteOrderQty = usdt_to_spend,
recvWindow = "10000"
)
except Exception as exception:
wrong = traceback.format_exc(limit=None, chain=True)
sendmsg(wrong)
return order
def executed_orders():
global executed_queque
while True:
if len(executed_queque) > 0:
if os.path.exists(executed_trades_file):
existing_file = load_json(executed_trades_file)
existing_file += executed_queque
else:
existing_file = executed_queque
save_json(executed_trades_file, existing_file)
executed_queque = []
time.sleep(0.1)
def schedule_Order(time_And_Pair, announcement):
try:
scheduled_order = {'time':time_And_Pair[0].strftime("%Y-%m-%d %H:%M:%S"), 'pairs':time_And_Pair[1]}
sendmsg(f'Scheduled an order for: {time_And_Pair[1]} at: {time_And_Pair[0]}')
update_json(schedules_file, scheduled_order)
update_json(file, announcement)
except Exception as exception:
wrong = traceback.format_exc(limit=None, chain=True)
sendmsg(wrong)
def place_Order_On_Time(time_till_live, pair, threads):
delay = 0
global executed_queque
try:
if delay_mode:
delay = (ping_binance() * percentage)
time_till_live = (time_till_live - timedelta(seconds = delay))
time_to_wait = ((time_till_live - datetime.utcnow()).total_seconds() - 10)
time.sleep(time_to_wait)
order = {}
if test_mode:
price = get_price(pair)
while True:
if (datetime.utcnow() - timedelta(seconds = 1) <= time_till_live <= datetime.utcnow() - timedelta(seconds = delay * 0.9)):
order = {
"symbol": pair,
"transactTime": datetime.timestamp(datetime.now()),
"price": price,
"origQty": ammount/float(price),
"executedQty": ammount/float(price),
"cummulativeQuoteQty": ammount,
"status": "FILLED",
"type": "MARKET",
"side": "BUY"
}
break
else:
while True:
if (datetime.utcnow() - timedelta(seconds = 1) <= time_till_live <= datetime.utcnow() - timedelta(seconds = delay * 0.9)):
order = create_order(pair, ammount, 'BUY')
break
order['tp'] = tp
order['sl'] = sl
amount = order['executedQty']
price =order['price']
if price <= 0.00001:
price = get_price(pair)
sendmsg(f'Bougth {amount} of {pair} at {price}')
executed_queque.append(order)
except Exception as exception:
wrong = traceback.format_exc(limit=None, chain=True)
sendmsg(wrong)
######
def check_Schedules():
try:
if os.path.exists(schedules_file):
unfiltered_schedules = load_json(schedules_file)
schedules = []
for schedule in unfiltered_schedules:
flag = True
datetime = dparser.parse(schedule['time'], fuzzy=True, ignoretz=True)
if datetime < datetime.utcnow():
flag = False
if flag:
schedules.append(schedule)
for pair in schedule['pairs']:
threading.Thread(target=place_Order_On_Time, args=(datetime, pair, threading.active_count() + 1)).start()
sendmsg(f'Found scheduled order for: {pair} adding it to new thread')
save_json(schedules_file, schedules)
except Exception as exception:
wrong = traceback.format_exc(limit=None, chain=True)
sendmsg(wrong)
def sell():
while True:
try:
flag_update = False
not_sold_orders = []
order = []
if os.path.exists(executed_trades_file):
order = load_json(executed_trades_file)
if len(order) > 0:
for coin in list(order):
# store some necesarry trade info for a sell
stored_price = float(coin['fills'][0]['price'])
coin_tp = coin['tp']
coin_sl = coin['sl']
volume = round(float(coin['executedQty']) - float(coin['fills'][0]['commission']),2)
symbol = coin['symbol']
last_price = get_price(symbol)
# update stop loss and take profit values if threshold is reached
if float(last_price) > stored_price + (stored_price * float(coin_tp) /100) and tsl_mode:
# increase as absolute value for TP
new_tp = float(last_price) + (float(last_price)*ttp /100)
# convert back into % difference from when the coin was bought
new_tp = float( (new_tp - stored_price) / stored_price*100)
# same deal as above, only applied to trailing SL
new_sl = float(last_price) - (float(last_price)*tsl /100)
new_sl = float((new_sl - stored_price) / stored_price*100)
# new values to be added to the json file
coin['tp'] = new_tp
coin['sl'] = new_sl
not_sold_orders.append(coin)
flag_update = True
threading.Thread(target=sendSpam, args=(symbol, f'Updated tp: {round(new_tp, 3)} and sl: {round(new_sl, 3)} for: {symbol}')).start()
# close trade if tsl is reached or trail option is not enabled
elif float(last_price) < stored_price - (stored_price*sl /100) or float(last_price) > stored_price + (stored_price*tp /100) and not tsl_mode:
try:
# sell for real if test mode is set to false
if not test_mode:
sell = client.create_order(symbol = symbol, side = 'SELL', type = 'MARKET', quantity = volume, recvWindow = "10000")
sendmsg(f"Sold {symbol} at {(float(last_price) - stored_price) / float(stored_price)*100}")
killSpam(symbol)
flag_update = True
# remove order from json file by not adding it
except Exception as exception:
wrong = traceback.format_exc(limit=None, chain=True)
sendmsg(wrong)
# store sold trades data
else:
if os.path.exists(executed_sells_file):
sold_coins = load_json(executed_sells_file)
else:
sold_coins = []
if not test_mode:
sold_coins.append(sell)
else:
sell = {
'symbol':symbol,
'price':last_price,
'volume':volume,
'time':datetime.timestamp(datetime.now()),
'profit': float(last_price) - stored_price,
'relative_profit': round((float(last_price) - stored_price) / stored_price*100, 3)
}
sold_coins.append(sell)
save_json(executed_sells_file, sold_coins)
else:
not_sold_orders.append(coin)
if flag_update: save_json(executed_trades_file, not_sold_orders)
time.sleep(0.2)
except Exception as exception:
wrong = traceback.format_exc(limit=None, chain=True)
sendmsg(wrong)
def main():
if os.path.exists(file):
existing_Anouncements = load_json(file)
else:
existing_Anouncements = get_Announcements()
for announcement in existing_Anouncements:
time_And_Pair = get_Pair_and_DateTime(announcement['code'])
if time_And_Pair is not None:
if time_And_Pair[0] >= datetime.utcnow() and len(time_And_Pair[1]) > 0:
schedule_Order(time_And_Pair, announcement)
sendmsg(f'Found new announcement preparing schedule for: {time_And_Pair[1]}')
save_json(file, existing_Anouncements)
threading.Thread(target=check_Schedules, args=()).start()
threading.Thread(target=sell, args=()).start()
threading.Thread(target=executed_orders, args=()).start()
while True:
new_Anouncements = get_Announcements()
for announcement in new_Anouncements:
if not announcement in existing_Anouncements:
time_And_Pair = get_Pair_and_DateTime(announcement['code'])
if time_And_Pair is not None:
if time_And_Pair[0] >= datetime.utcnow() and len(time_And_Pair[1]) > 0 :
schedule_Order(time_And_Pair, announcement)
for pair in time_And_Pair[1]:
threading.Thread(target=place_Order_On_Time, args=(time_And_Pair[0], pair, threading.active_count() + 1)).start()
sendmsg(f'Found new announcement preparing schedule for {pair}')
existing_Anouncements = load_json(file)
threading.Thread(target=sendSpam, args=("sleep", f'Done checking announcements going to sleep for: {frequency} seconds&disable_notification=true')).start()
threading.Thread(target=sendSpam, args=("ping", f'Current Average delay: {ping_binance()}&disable_notification=true')).start()
time.sleep(frequency)
#TODO:
# posible integration with AWS lambda ping it time before the coin is listed so it can place a limit order a little bti more than opening price
if __name__ == '__main__':
try:
if not test_mode:
sendmsg('Warning runnig it on live mode')
sendmsg('starting')
sendmsg(f'Aproximate delay: {ping_binance()}')
main()
except Exception as exception:
wrong = traceback.format_exc(limit=None, chain=True)
sendmsg(wrong)
#debuggin order
#{
# "time": "2021-09-24 10:00:00",
# "pairs": [
# "DFUSDT",
# "SYSUSDT"
# ]
#} | 40.975771 | 2,739 | 0.563619 | 593 | 0.031877 | 0 | 0 | 0 | 0 | 0 | 0 | 5,165 | 0.277643 |
a6f54e48526554985a473b45ed63c07b4e9862ba | 522 | py | Python | misc/osutils.py | KartikaySrivadtava/dl-for-har-ea1e9babb2b178cc338dbc72db974325c193c781 | f4fa436000a46df80ec083c8e3692cd21787e5b3 | [
"MIT"
] | null | null | null | misc/osutils.py | KartikaySrivadtava/dl-for-har-ea1e9babb2b178cc338dbc72db974325c193c781 | f4fa436000a46df80ec083c8e3692cd21787e5b3 | [
"MIT"
] | null | null | null | misc/osutils.py | KartikaySrivadtava/dl-for-har-ea1e9babb2b178cc338dbc72db974325c193c781 | f4fa436000a46df80ec083c8e3692cd21787e5b3 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
import os
import errno
import numpy as np
def mkdir_if_missing(dir_path):
try:
os.makedirs(dir_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def get_free_gpu():
os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')
memory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]
print('Assigning workflow to GPU: ' + str(np.argmax(memory_available)))
return np.argmax(memory_available)
| 24.857143 | 80 | 0.670498 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 91 | 0.17433 |
a6f626e57a1d82eddc8ff7025f8974f6515c91cf | 113 | py | Python | docs/constants.py | djangothon/django-mptt-docs | 802aebdbd3181ec006f4711b0a03d3d2a00d6af9 | [
"BSD-3-Clause"
] | 1 | 2017-09-02T20:06:41.000Z | 2017-09-02T20:06:41.000Z | docs/constants.py | djangothon/django-mptt-docs | 802aebdbd3181ec006f4711b0a03d3d2a00d6af9 | [
"BSD-3-Clause"
] | null | null | null | docs/constants.py | djangothon/django-mptt-docs | 802aebdbd3181ec006f4711b0a03d3d2a00d6af9 | [
"BSD-3-Clause"
] | null | null | null | """HackerEarth docs constants"""
DOC_URL_DICT = {
# 'doc_name': 'doc_url1',
# 'doc_name': 'doc_url2',
}
| 16.142857 | 32 | 0.60177 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 82 | 0.725664 |
a6f7219a8f54935a9b88e311e47f1df9fac18b53 | 7,618 | py | Python | main.py | wsuzume/proj | 127c9793b8fd85024aa99840bcf60b6d781702e4 | [
"MIT"
] | null | null | null | main.py | wsuzume/proj | 127c9793b8fd85024aa99840bcf60b6d781702e4 | [
"MIT"
] | null | null | null | main.py | wsuzume/proj | 127c9793b8fd85024aa99840bcf60b6d781702e4 | [
"MIT"
] | null | null | null | import os
import sys
import json
import argparse
script_content = """\
#!/bin/sh
gpython=${PYENV_ROOT}/versions/$(pyenv global)/bin/python
gproj=${PYENV_ROOT}/versions/$(pyenv global)/bin/proj
if [[ $1 =~ ^[^\-] ]] ; then
result=$(exec $gpython $gproj --echo $1)
exit_code=$?
if test $exit_code -eq 0 ; then
if test $# -eq 1 ; then
unset PROJ_ARGS
else
PROJ_ARGS=${@:2}
fi
# deactivate if the end script is setted
deactivate_script=$(exec $gpython $gproj --deactivate)
deactivate_exit_code=$?
if test $deactivate_exit_code -eq 0 ; then
source $deactivate_script
fi
# change directory
cd $result
echo "Project:" `pwd`
# activate if the start script is setted
activate_script=$(exec $gpython $gproj --activate)
activate_exit_code=$?
if test $activate_exit_code -eq 0 ; then
source $activate_script
fi
elif test $exit_code -eq 1 ; then
echo $result
fi
elif [ $# -eq 1 ] && [ "$1" = "--activate" ] || [ "$1" = "--deactivate" ] ; then
specified_script=$(exec $gpython $gproj $1)
exit_code=$?
if test $exit_code -eq 0 ; then
source $specified_script
fi
else
(exec $gpython $gproj "$@")
fi
"""
projrc_content ="""\
alias proj='source ~/.config/proj/proj'
"""
conf_dir = os.path.expanduser('~/.config/proj')
proj_script = os.path.join(conf_dir, 'proj')
projrc = os.path.join(conf_dir, 'projrc')
project_settings = os.path.join(conf_dir, 'projects.json')
local_conf_dir = os.path.expanduser('./.proj')
def check_config():
if not os.path.exists(conf_dir):
print('proj config directory does not exists.')
print(f'Creating at \'{conf_dir}\'')
os.makedirs(conf_dir)
if not os.path.exists(proj_script):
with open(proj_script, 'w') as f:
f.write(script_content)
if not os.path.exists(projrc):
with open(projrc, 'w') as f:
f.write(projrc_content)
def load_config():
if not os.path.exists(project_settings):
return {}
with open(project_settings, 'r') as f:
projects = json.load(f)
return projects
def main():
check_config()
projects = load_config()
parser = argparse.ArgumentParser()
# echo project path
parser.add_argument('--echo', nargs='?', default=None, const='', metavar='project_name')
# register current directory as [project_name]
parser.add_argument('--init', nargs='?', default=None, const='', metavar='project_name')
# remove registered project from list
parser.add_argument('--remove', nargs='?', default=None, const='', metavar='project_name')
# register startup script for the current project
## startup script is executed when you enter the project by proj command
parser.add_argument('--startwith', nargs='?', default=None, const='', metavar='file_name')
parser.add_argument('--echo-startwith', nargs='?', default=None, const='', metavar='file_name')
# register leaving script for the current project
## leaving script is executed when you leave the project by proj command
parser.add_argument('--endwith', nargs='?', default=None, const='', metavar='file_name')
parser.add_argument('--echo-endwith', nargs='?', default=None, const='', metavar='file_name')
# set alias
## if local alias;
## this alias is automatically activated when you enter the project by proj command,
## and automatically unaliased when you leave the project by proj command.
## the configuration is saved in '.proj/aliases'
## if global alias;
## this alias is always activated automatically.
## the configuration is saved in '~/.config/proj/aliases'
parser.add_argument('--alias')
# remove alias
parser.add_argument('--unalias')
# activate local project settings
## 1. activate local aliases
## 2. run the script file which registered as --startwith
parser.add_argument('--activate', action='store_true')
# deactivate local project settings
## 1. run the script file which registered as --endwith
## 2. deactivate local aliases
parser.add_argument('--deactivate', action='store_true')
# backup local setting to the directory which registered as --set-origin
parser.add_argument('--backup')
# restore local setting from the directory which registered as --set-origin
parser.add_argument('--restore')
# set backup directory
parser.add_argument('--set-origin')
# set remote backup
parser.add_argument('--remote-backup')
# show config and status of the project
parser.add_argument('--show')
#parser.add_argument('--global') #globalで設定
args = parser.parse_args()
if args.echo is not None:
if args.echo in projects:
print(projects[args.echo])
sys.exit(0)
else:
print(f'Error: project \'{args.echo}\' is not registered.')
sys.exit(1)
local_conf = {
'start': '',
'end': '',
}
if args.activate:
if os.path.exists(os.path.join(local_conf_dir, 'config.json')):
with open(os.path.join(local_conf_dir, 'config.json'), 'r') as f:
local_conf = json.load(f)
if 'start' in local_conf and local_conf['start'] != '':
abspath = os.path.abspath(local_conf_dir)
script_file = os.path.join(abspath, 'scripts', local_conf['start'])
if os.path.exists(script_file):
print(script_file)
sys.exit(0)
sys.exit(1)
if args.deactivate:
if os.path.exists(os.path.join(local_conf_dir, 'config.json')):
with open(os.path.join(local_conf_dir, 'config.json'), 'r') as f:
local_conf = json.load(f)
if 'end' in local_conf and local_conf['end'] != '':
abspath = os.path.abspath(local_conf_dir)
script_file = os.path.join(abspath, 'scripts', local_conf['end'])
if os.path.exists(script_file):
print(script_file)
sys.exit(0)
sys.exit(1)
if args.init is not None:
if os.path.exists(os.path.join(local_conf_dir, 'config.json')):
print('already registered')
sys.exit(0)
if args.init == '':
print(f'Error: project name required.')
sys.exit(1)
elif args.init in projects:
print(f'Error: project \'{args.init}\' is already registered.')
print(f'project directory -> {projects[args.init]}')
sys.exit(1)
else:
print('OK:', os.getcwd())
projects[args.init] = os.getcwd()
with open(project_settings, 'w') as f:
json.dump(projects, f, indent=2)
with open(os.path.join(local_conf_dir, 'config.json'), 'w') as f:
json.dump(local_conf, f, indent=2)
sys.exit(0)
if args.remove is not None:
if args.remove in projects:
path = projects[args.remove]
projects.pop(args.remove)
with open(project_settings, 'w') as f:
json.dump(projects, f, indent=2)
print('removed:', args.remove, path)
sys.exit(0)
else:
print(f'Error: project \'{args.echo}\' is not registered.')
sys.exit(1)
#if args.set_startup is not None:
# if args.set_startup
for k, v in projects.items():
print(k, ':', v)
sys.exit(0)
if __name__ == '__main__':
main()
| 34.627273 | 99 | 0.606327 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,534 | 0.463536 |
a6f88fc52c3755838f73b21f8411977db57b5ed5 | 1,114 | py | Python | tools_box/tools_box/report/helpdesk_report/helpdesk_report.py | maisonarmani/Tools_Box | 4f8cc3a0deac1be50a3ac80758a10608faf58454 | [
"MIT"
] | 4 | 2017-09-25T23:34:08.000Z | 2020-07-17T23:52:26.000Z | tools_box/tools_box/report/helpdesk_report/helpdesk_report.py | maisonarmani/Tools_Box | 4f8cc3a0deac1be50a3ac80758a10608faf58454 | [
"MIT"
] | null | null | null | tools_box/tools_box/report/helpdesk_report/helpdesk_report.py | maisonarmani/Tools_Box | 4f8cc3a0deac1be50a3ac80758a10608faf58454 | [
"MIT"
] | 5 | 2017-06-02T01:58:32.000Z | 2022-02-22T16:59:01.000Z | # Copyright (c) 2013, bobzz.zone@gmail.com and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
def execute(filters=None):
#S/N Date Doc id Request Type Subject Raised By Status Assigned to
columns, data = ["Date:Datetime:200","Doc id:Link/Helpdesk Ticket:200","Request Type:Link/Request Type:150","Subject:Data:200","Raised By::150","Status:Data:150","Assigned to:Link/Employee:200"], []
request=""
status=""
raised=""
if filters.get("request"):
request = """ and request_type = '{}' """.format(filters.get("request"))
if filters.get("status"):
status = """ and status = '{}' """.format(filters.get("status"))
if filters.get("raised"):
raised = """ and raised_by = '{}' """.format(filters.get("raised"))
data=frappe.db.sql("""select addtime(opening_date,opening_time) as "date",name,request_type,subject,raised_by_name,status,assigned_to_name
from `tabHelpdesk Ticket`
where (opening_date between "{}" and "{}") {} {} {} """.format(filters.get("from"),filters.get("to"),request,status,raised),as_list=1)
return columns, data
| 48.434783 | 199 | 0.708259 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 697 | 0.625673 |
a6fa7ec8acfd9ea1e153741c57cc223290236b4d | 5,516 | py | Python | relay/__init__.py | ldesgoui/relay | 263c3245052c501b0285be9ebab3dcb58ca8bfec | [
"MIT"
] | null | null | null | relay/__init__.py | ldesgoui/relay | 263c3245052c501b0285be9ebab3dcb58ca8bfec | [
"MIT"
] | null | null | null | relay/__init__.py | ldesgoui/relay | 263c3245052c501b0285be9ebab3dcb58ca8bfec | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Relay
~~~~~
Relay is an irc micro-framework that smells too much like a web framework
Copyright (c) 2015, ldesgoui <relay at ldesgoui dot xyz>
See LICENSE for more informations.
"""
from collections import defaultdict
import logging
import os
import socket
from . import constants
from . import parse
class Relay(object):
DEFAULT_ROUTE = ":{sender} {command} {args}"
DEFAULT_CONFIG = dict(user="", port=6667)
def __init__(self, name):
self.handlers = defaultdict(set)
self.client = dict(Relay.DEFAULT_CONFIG)
self.logger = logging.getLogger(name)
self.state = defaultdict(dict)
def __repr__(self):
classname = self.__class__.__name__
try:
client = "{nick}!{user}@{host}:{port}".format(**self.client)
except KeyError:
client = "not fully configured"
routes = len(self.handlers)
handlers = sum(map(len, self.handlers.values()))
return "<{} {}, {} routes, {} handlers>".format(
classname, client, routes, handlers)
__str__ = __repr__
def handler(self, arg):
""" @register decorator """
def decorator(func, route=arg):
func.relay_route = route
self.register(func)
return func
if callable(arg):
""" decorator was not given arguments, it takes DEFAULT_ROUTE """
return decorator(func=arg, route=Relay.DEFAULT_ROUTE)
return decorator
def register(self, func, route=None):
"""
Used to register a function as a handler
This function's arguments should match the routes's results
or at least catch *args and **kwargs.
This cannot be used with bound methods, as of yet.
"""
if route is not None and hasattr(func, "relay_route"):
self.logger.warn("Overriding route for `{}`: from `{}` to `{}`"
.format(func, func.relay_route, route))
if route is None:
if not hasattr(func, "relay_route"):
raise AttributeError("Cannot register a handler with no route")
else:
route = func.relay_route
self.logger.debug("Registering handle: `{route}` -> `{func}`"
.format(route=route, func=func.__qualname__))
self.handlers[route].add(func)
def _from_env(self, values):
if values is True:
values = ["host", "port", "user", "nick", "password"]
if not isinstance(values, dict):
values = {key: "RELAY_{}".format(key.upper()) for key in values}
config = dict()
for key, env_key in values.items():
val = os.getenv(env_key, None)
if not val:
continue
config[key] = val
self.config(**config)
def config(self, **options):
for key, val in options.items():
if key == 'from_env':
self._from_env(val)
continue
if key not in ["host", "port", "user", "nick", "password"]:
continue
self.client[key] = val
return self
def run(self, **options):
"""
The client in itself
TODO: make this better, faster, stronger :)
"""
if 'host' not in self.client or 'nick' not in self.client:
raise ValueError("Cannot run, missing configuration.")
self.logger.info("Connecting")
sock = socket.socket()
sock.connect((self.client['host'], self.client['port']))
self.logger.info("Connected")
def send(message):
sock.send(("{message}\r\n".format(message=message)).encode())
self.logger.debug("Send: {message}".format(message=message))
self.send = send
send("NICK {nick}".format(**self.client))
user = self.client.get('user', None) or self.client['nick']
send("USER {0} {0} {0} :{0}".format(user))
if 'password' in self.client:
send("PASS {password}".format(**self.client))
data = sock.makefile()
while 42:
for line in data:
line = line.strip()
if not line:
continue
self.logger.debug("Recv: {message}".format(message=line))
for route, handlers in self.handlers.items():
try:
args, kwargs = parse.match(route, line)
except ValueError:
continue
for handler in handlers:
outs = handler(*args, state=self.state[handler], **kwargs)
for out in outs or []:
send(out.format(*args, **kwargs))
def _register(route):
def decorator(func):
func.relay_route = route
return func
return decorator
@_register("PING :{ball}")
def auto_pong(*args, **kwargs):
""" answer to PING requests """
yield "PONG :{ball}"
def auto_join(channels):
@_register(Relay.DEFAULT_ROUTE)
def auto_join_closure(*args, **kwargs):
""" always re-join channels {} """.format(channels)
command = kwargs['command']
if command == '376':
yield "JOIN {}".format(", ".join(channels))
args = kwargs['arguments'].split(' ')
if command == 'KICK' and self.config['nick'] in args[1]:
yield "JOIN {}".format(args[0])
return auto_join_closure
| 32.069767 | 82 | 0.553662 | 4,470 | 0.81037 | 561 | 0.101704 | 530 | 0.096084 | 0 | 0 | 1,350 | 0.244743 |
a6faa0e8a2b4027e6b39c35c091009b368bde331 | 1,192 | py | Python | src/views/panels/grade.py | abelfodil/inf1900-grader | bc3522eb8bf03ce08914c6988e43cdff919fe352 | [
"MIT"
] | 1 | 2022-03-22T07:10:52.000Z | 2022-03-22T07:10:52.000Z | src/views/panels/grade.py | abelfodil/inf1900-grader | bc3522eb8bf03ce08914c6988e43cdff919fe352 | [
"MIT"
] | 11 | 2019-01-05T02:07:29.000Z | 2021-04-21T06:17:31.000Z | src/views/panels/grade.py | abelfodil/inf1900-grader | bc3522eb8bf03ce08914c6988e43cdff919fe352 | [
"MIT"
] | 5 | 2018-12-24T17:56:18.000Z | 2021-03-13T05:44:46.000Z | from urwid import Edit, IntEdit, LineBox
from src.models.grade import AssignmentType, grade
from src.models.state import state
from src.views.widgets.form import Form
from src.views.widgets.radio import RadioGroup
class GradePanel(Form):
def __init__(self):
grading_directory = LineBox(Edit(("header", "Grading directory\n\n"), state.grading_directory))
subdirectories = LineBox(Edit(("header", "Subdirectories\n\n"), state.subdirectories))
assignment_type = RadioGroup("Assignment type", AssignmentType, state.assignment_type)
deadline = LineBox(Edit(("header", "Deadline\n\n"), state.deadline))
assignment_sname = LineBox(Edit(("header", "Assignment short name\n\n"), state.assignment_sname))
assignment_lname = LineBox(Edit(("header", "Assignment long name\n\n"), state.assignment_lname))
grid_elements = [
{"grading_directory": grading_directory, "subdirectories": subdirectories},
{"assignment_type": assignment_type, "deadline": deadline},
{"assignment_sname": assignment_sname, "assignment_lname": assignment_lname},
]
super().__init__("Grade", grid_elements, grade)
| 45.846154 | 105 | 0.707215 | 974 | 0.817114 | 0 | 0 | 0 | 0 | 0 | 0 | 272 | 0.228188 |
a6fb4989bf3b2c02f28bef63f1a4592c2f3ef589 | 751 | py | Python | Python3/no53_Maximum_Subarray.py | mistwave/leetcode | 38eb0556f865fd06f517ca45253d00aaca39d70b | [
"MIT"
] | null | null | null | Python3/no53_Maximum_Subarray.py | mistwave/leetcode | 38eb0556f865fd06f517ca45253d00aaca39d70b | [
"MIT"
] | null | null | null | Python3/no53_Maximum_Subarray.py | mistwave/leetcode | 38eb0556f865fd06f517ca45253d00aaca39d70b | [
"MIT"
] | null | null | null | class Solution(object):
def maxSubArray1(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
this = maxsum = 0
for i in range(len(nums)):
this += nums[i]
if this > maxsum:
maxsum = this
elif this < 0:
this = 0
return maxsum if maxsum != 0 else max(nums)
def maxSubArray(self, nums):
"""
http://alfred-sun.github.io/blog/2015/03/11/ten-basic-algorithms-for-programmers/
:type nums: List[int]
:rtype: int
"""
start = maxsum = nums[0]
for num in nums[1:]:
start = max(num, start + num)
maxsum = max(maxsum, start)
return maxsum
| 25.896552 | 89 | 0.480692 | 750 | 0.998668 | 0 | 0 | 0 | 0 | 0 | 0 | 220 | 0.292943 |
a6fe29f9e05c9d7234744039de99dfa3803cb44a | 7,536 | py | Python | software/stdMac_python/main.py | ATMakersOrg/IKeysAdapter | 49da2b0f6a4399d322f998337d1d40fe1db2c725 | [
"MIT"
] | null | null | null | software/stdMac_python/main.py | ATMakersOrg/IKeysAdapter | 49da2b0f6a4399d322f998337d1d40fe1db2c725 | [
"MIT"
] | null | null | null | software/stdMac_python/main.py | ATMakersOrg/IKeysAdapter | 49da2b0f6a4399d322f998337d1d40fe1db2c725 | [
"MIT"
] | null | null | null | # Trinket IO demo
# Welcome to CircuitPython 3.1.1 :)
import board
import adafruit_dotstar as dotstar
import time
import busio
import struct
from adafruit_hid.keyboard import Keyboard
from adafruit_hid.mouse import Mouse
from qwertyMAC import *
overlay = webaccess
# One pixel connected internally!
dot = dotstar.DotStar(board.APA102_SCK, board.APA102_MOSI, 1, brightness=0.7)
uart = busio.UART(board.TX, board.RX, baudrate=115200)
#while True:
# for led in range(0,9):
# onMsg = struct.pack('bbb',2,led,3)
# uart.write(onMsg)
# time.sleep(.3)
# offMsg = struct.pack('bbb',2,led,0)
# uart.write(offMsg)
# time.sleep(.2)
kbd = Keyboard()
mouse = Mouse()
lastKey = 0
######################### MAIN LOOP ##############################
CLIENT = True
dot[0] = (0,0,60)
if CLIENT:
dot[0] = (255,0,255)
WRITE_DELAY=.005
POLL = struct.pack('b',1)
cellData = bytearray(2)
i = 0
shiftState= False
altState = False
ctrlState = False
commandState = False
dragState = False
#def updateToggles(key, pressed):
# print(("TOGGLE:",key, pressed))
# if (key == Keycode.LEFT_SHIFT):
# shiftState = pressed
# if (pressed):
# msg = struct.pack('bbb',1,1)
# uart.write(msg)
# else:
# msg = struct.pack('bbb',1,0)
# uart.write(msg)
def pressKey(newKey):
global kbd,shiftState,ctrlState,altState,commandState,uart
if (newKey == Keycode.LEFT_SHIFT):
shiftState = not shiftState
val = 0
if (shiftState):
val = 1
msg = struct.pack('bbb',2,1,val)
uart.write(msg)
kbd.press(newKey)
return
if (newKey == Keycode.CONTROL):
ctrlState= not ctrlState
val = 0
if (ctrlState):
val = 1
msg = struct.pack('bbb',2,5,val)
uart.write(msg)
kbd.press(newKey)
return
if (newKey == Keycode.LEFT_ALT):
altState = not altState
val = 0
if (altState):
val = 1
msg = struct.pack('bbb',2,2,val)
uart.write(msg)
kbd.press(newKey)
return
if (newKey == Keycode.COMMAND):
commandState = not commandState
val = 0
if (commandState):
val = 1
msg = struct.pack('bbb',2,6,val)
uart.write(msg)
kbd.press(newKey)
return
keys = [newKey]
if (shiftState):
print("Adding Shift")
keys.append(Keycode.LEFT_SHIFT)
shiftState = False
msg = struct.pack('bbb',2,1,0)
uart.write(msg)
if (altState):
print("Adding ALT")
keys.append(Keycode.LEFT_ALT)
altState = False
msg = struct.pack('bbb',2,2,0)
uart.write(msg)
if (ctrlState):
print("Adding CONTROl")
keys.append(Keycode.CONTROL)
ctrlState= False
msg = struct.pack('bbb',2,1,0)
uart.write(msg)
if (commandState):
print("Adding COMMAND")
keys.append(Keycode.COMMAND)
commandState= False
msg = struct.pack('bbb',2,6,0)
uart.write(msg)
kbd.press(*keys)
overlayId = 0
while True:
time.sleep(0.025) # make bigger to slow down
uart.reset_input_buffer()
# print("SENDING POLL")
uart.write(POLL)
uart.write(struct.pack('BBB',1 if shiftState else 0,
1 if altState else 0,
1 if ctrlState else 0))
time.sleep(WRITE_DELAY)
response = uart.read(1)
if response is None:
print("No response")
continue
newOverlay=response[0]
if (newOverlay != overlayId):
print(("New Overlay: ", newOverlay))
overlayId = newOverlay
if (overlayId == 0):
overlay = webaccess
elif(overlayId == 5):
overlay = qwerty
time.sleep(WRITE_DELAY)
response = uart.read(1)
if response is None:
continue
numCells = response[0]
# print("Got Count: ", numCells)
cellCount = 0
while (cellCount < numCells):
uart.readinto(cellData)
# print("Got Data: ", cellData)
(idx,) = struct.unpack('<H', cellData)
col = idx//24
row = idx % 24
action = overlay[row//3][col//2]
# print((row//3, col//2, action),end=',')
newKey = overlay[row//3][col//2]
if (action > 0):
if (lastKey != 0):
if (lastKey != newKey):
kbd.release(lastKey)
pressKey(newKey)
else:
pressKey(newKey)
lastKey = newKey
else:
if (action < -99):
#These are shortcuts
index = (-1 * action) - 100
sc = shortcuts[index]
#Reset the lights & states for a shortcut
shiftState = False
msg = struct.pack('bbb',2,1,0)
uart.write(msg)
altState = False
msg = struct.pack('bbb',2,2,0)
uart.write(msg)
ctrlState = False
msg = struct.pack('bbb',2,5,0)
uart.write(msg)
commandState = False
msg = struct.pack('bbb',2,6,0)
uart.write(msg)
#if this is a list, we send each item
if (type(sc) is list):
print(sc)
for codes in sc:
if (type(codes) is tuple):
kbd.press(*codes)
kbd.release_all()
else:
kbd.press(codes)
kbd.release(codes)
time.sleep(.1)
else:
kbd.press(*sc)
kbd.release_all
elif (action == MOUSE_NW):
mouse.move(-MOUSE_INCR,-MOUSE_INCR)
elif (action == MOUSE_N):
mouse.move(0,-MOUSE_INCR)
elif (action == MOUSE_NE):
mouse.move(MOUSE_INCR,-MOUSE_INCR)
elif (action == MOUSE_W):
mouse.move(-MOUSE_INCR,0)
elif (action == MOUSE_E):
mouse.move(MOUSE_INCR,0)
elif (action == MOUSE_SW):
mouse.move(-MOUSE_INCR,MOUSE_INCR)
elif (action == MOUSE_S):
mouse.move(0,MOUSE_INCR)
elif (action == MOUSE_SE):
mouse.move(MOUSE_INCR,MOUSE_INCR)
elif (action == MOUSE_CLICK):
mouse.click(Mouse.LEFT_BUTTON)
time.sleep(.3)
elif (action == MOUSE_RIGHT_CLICK):
mouse.click(Mouse.RIGHT_BUTTON)
time.sleep(.3)
elif (action == MOUSE_DBL_CLICK):
mouse.click(Mouse.LEFT_BUTTON)
mouse.click(Mouse.LEFT_BUTTON)
time.sleep(.3)
elif (action == MOUSE_DRAG):
print(("Mouse drag: ",dragState))
if lastKey != MOUSE_DRAG:
dragState = True
lastKey = MOUSE_DRAG
mouse.press(Mouse.LEFT_BUTTON)
else:
dragState = False
mouse.release(Mouse.LEFT_BUTTON)
time.sleep(.3)
lastKey = newKey
cellCount = cellCount + 1
if (cellCount == 0):
if (lastKey != 0):
lastKey = 0
kbd.release_all() | 29.669291 | 77 | 0.503052 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,143 | 0.151672 |
a6fed8cafb5d0edd2dc00b848834bd0e891d7ca5 | 379 | py | Python | Drawing Book/code.py | swy20190/HackerRankChallenge | c7f73e72daa5a9f892e07ab8fc1bc4d71f240c2a | [
"MIT"
] | null | null | null | Drawing Book/code.py | swy20190/HackerRankChallenge | c7f73e72daa5a9f892e07ab8fc1bc4d71f240c2a | [
"MIT"
] | null | null | null | Drawing Book/code.py | swy20190/HackerRankChallenge | c7f73e72daa5a9f892e07ab8fc1bc4d71f240c2a | [
"MIT"
] | null | null | null | import os
def pageCount(n, p):
# Write your code here
front_flip = int(p/2)
end_flip = int(n/2)-front_flip
return min(front_flip, end_flip)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input().strip())
p = int(input().strip())
result = pageCount(n, p)
fptr.write(str(result) + '\n')
fptr.close()
| 16.478261 | 47 | 0.591029 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 52 | 0.137203 |
a6ff14ad05f8ebe1c83e39372f5bf76d98c7dc1d | 4,944 | py | Python | pylearn2/sampling/replace_samples.py | CKehl/pylearn2 | 086a198b9f437cf03c35d606e6b3b56b4634ebd8 | [
"BSD-3-Clause"
] | null | null | null | pylearn2/sampling/replace_samples.py | CKehl/pylearn2 | 086a198b9f437cf03c35d606e6b3b56b4634ebd8 | [
"BSD-3-Clause"
] | null | null | null | pylearn2/sampling/replace_samples.py | CKehl/pylearn2 | 086a198b9f437cf03c35d606e6b3b56b4634ebd8 | [
"BSD-3-Clause"
] | null | null | null | '''
Created on Apr 16, 2015
@author: christian
'''
from optparse import OptionParser
import numpy
import os
from os import listdir
from os.path import isfile, join
import cPickle
import glob
def unpickle(file):
fo = open(file, 'rb')
dictionary = cPickle.load(fo)
fo.close()
return dictionary
if __name__ == '__main__':
optionParser = OptionParser("usage: %prog -i INPUT_FILE -m META_FILE -t SOURCE_TAG -r DESTINATION_TAG")
optionParser.add_option("-i", "--input", action="store", dest="input", type="string", metavar="FILE", help="pickled python dataset file")
optionParser.add_option("-m", "--meta", action="store", dest="meta", type="string", metavar="FILE", help="pickled python metadata file")
optionParser.add_option("-t", "--tag", action="store", dest="tag", type="int", help="selected tag to add the image", default = 0)
optionParser.add_option("-r", "--replace_tag", action="store", dest="rtag", type="int", help="replacement tag number for <tag>", default = 0)
(options, args) = optionParser.parse_args()
my_obj = dict()
meta_obj = dict()
label_str = ''
meta_label_str = ''
test_img_array = None
test_array = []
test_classes = []
test_obj = dict()
if options.input != None:
my_obj = unpickle(options.input)
in_dir = os.path.dirname(options.input)
test_obj = unpickle(os.path.join(in_dir, "test"))
else:
exit()
ds = 0
if("fine_labels" in my_obj.keys()):
ds = 1 #CIFAR-100
label_str = 'fine_labels'
meta_label_str = 'fine_label_names'
else:
ds = 0 #CIFAR-10 and combined
label_str = 'labels'
meta_label_str = 'label_names'
meta_inputs = []
if(options.meta == None) or (options.meta == ""):
meta_inputs = glob.glob(os.path.dirname(options.input)+os.path.sep+"*meta*")
else:
meta_inputs.append(options.meta)
meta_obj = unpickle(meta_inputs[0])
num_img_base_array = [0]*(len(my_obj[label_str]))
img_base_array = [[0]*3072]*(my_obj['data'].shape[0])
img_array = numpy.array(img_base_array, dtype=numpy.uint8)
class_array = [0]*(my_obj['data'].shape[0])
for i in range(0, my_obj['data'].shape[0]):
data_entry = my_obj['data'][i]
tag_no = my_obj[label_str][i]
img_array[i] = data_entry
class_array[i] = tag_no
num_img_base_array[tag_no]+=1
# Test array generation
tcursor_point = 0
#print "Test Data fieldsize: "+str(test_obj['data'].shape[0])
for i in range(0, test_obj['data'].shape[0]):
data_entry = test_obj['data'][i]
tag_no = test_obj['labels'][i]
#print "Test Image: "+str(i)+" => Tag: "+str(tag_no)
test_array.append(data_entry.tolist())
test_classes.append(tag_no)
tcursor_point+=1
tag_img_number = num_img_base_array[options.tag]
img_of_tag = []
for i in range(0, len(class_array)):
if(class_array[i] == options.tag):
img_of_tag.append(i)
print "Data with selected tag: "+str(img_of_tag)+" ("+str(tag_img_number)+")"
print "Dataset size before replacement: "+str(len(class_array))+" | "+str(img_array.shape[0])
for i in range(0, len(img_of_tag)):
class_array[img_of_tag[i]]=options.rtag
del num_img_base_array[options.tag]
print "Dataset size after replacement: "+str(len(class_array))+" | "+str(img_array.shape[0])
print "Label dictionary before replacement: "+str(len(meta_obj[meta_label_str]))
del meta_obj[meta_label_str][options.tag]
print "Label dictionary after replacement: "+str(len(meta_obj[meta_label_str]))
# re-adapt mapping
for i in range(0, len(class_array)):
if(class_array[i] > options.tag):
class_array[i]-=1
################
# TESTING DATA #
################
del img_of_tag[:]
for i in range(0, len(test_classes)):
if(test_classes[i] == options.tag):
img_of_tag.append(i)
for i in range(0, len(img_of_tag)):
test_classes[img_of_tag[i]]=options.rtag
# re-adapt mapping
for i in range(0, len(test_classes)):
if(test_classes[i] > options.tag):
test_classes[i]-=1
out_obj = dict()
out_obj['data']=img_array
out_obj['labels']=class_array
out_dir = os.path.dirname(options.input)
#fo = open(os.path.join(options.output,"experiment"), 'wb')
cPickle.dump(out_obj, open(os.path.join(out_dir,"experiment_rp"), "wb"), protocol=2)
test_n_obj = dict()
test_img_array = numpy.array(test_array, dtype=numpy.uint8)
test_n_obj['data']=test_img_array
test_n_obj['labels']=test_classes
cPickle.dump(test_n_obj, open(os.path.join(out_dir,"test_rp"), "wb"), protocol=2)
meta_obj_out = dict()
meta_obj_out['label_names'] = meta_obj[meta_label_str]
cPickle.dump(meta_obj_out, open(os.path.join(out_dir,"meta_rp"), "wb"), protocol=2)
| 34.573427 | 145 | 0.63835 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,123 | 0.227144 |
a6ff7fff18ec58c6987302477424ca3924375eef | 4,332 | py | Python | GAN/CGAN/cgan_mnist.py | fengjiran/scholar_project | 35e86b7a8d0226ad0fee3b2983821a3f331f68aa | [
"Apache-2.0"
] | 3 | 2017-08-20T08:47:18.000Z | 2019-06-21T06:09:27.000Z | GAN/CGAN/cgan_mnist.py | fengjiran/scholar_project | 35e86b7a8d0226ad0fee3b2983821a3f331f68aa | [
"Apache-2.0"
] | null | null | null | GAN/CGAN/cgan_mnist.py | fengjiran/scholar_project | 35e86b7a8d0226ad0fee3b2983821a3f331f68aa | [
"Apache-2.0"
] | null | null | null | from __future__ import division
import numpy as np
from keras.models import Model, Sequential
from keras.optimizers import SGD, Adam
from keras.layers import Input, Dense, Dropout, LeakyReLU, concatenate
from keras.layers.normalization import BatchNormalization
from keras.regularizers import l1, l1_l2
from keras.datasets import mnist
from keras.utils.np_utils import to_categorical
from PIL import Image
import h5py
class CGAN(object):
"""Simple MLP CGAN."""
def __init__(self,
latent_dim=100,
image_shape=(28, 28),
batch_size=100,
epochs=100):
self.latent_dim = latent_dim
self.image_shape = image_shape
self.batch_size = batch_size
self.epochs = epochs
# Construct the generator
p_z = Input(shape=(100,))
x = Dense(units=200,
kernel_regularizer=l1(1e-5))(p_z)
x = LeakyReLU(0.2)(x)
condition_y = Input(shape=(10,))
y = Dense(units=1000,
kernel_regularizer=l1(1e-5))(condition_y)
y = LeakyReLU(0.2)(y)
merge_xy = concatenate([x, y], axis=1)
g_outputs = Dense(units=784,
activation='tanh',
kernel_regularizer=l1(1e-5))(merge_xy)
self.generator = Model(inputs=[p_z, condition_y],
outputs=g_outputs)
# Construct the discriminator
d_x = Input(shape=(784,))
d_condition_y = Input(shape=(10,))
d_input = concatenate([d_x, d_condition_y], axis=1)
d_input = Dense(units=128,
kernel_regularizer=l1(1e-5))(d_input)
d_input = LeakyReLU(0.2)(d_input)
d_output = Dense(units=1,
activation='sigmoid',
kernel_regularizer=l1(1e-5))(d_input)
self.discriminator = Model(inputs=[d_x, d_condition_y],
outputs=d_output)
print self.generator.summary()
print self.discriminator.summary()
def train(self):
d_optim = Adam(lr=2e-4, beta_1=0.5)
g_optim = Adam(lr=2e-4, beta_1=0.5)
self.discriminator.compile(optimizer=d_optim,
loss='binary_crossentropy')
self.generator.compile(optimizer=g_optim,
loss='binary_crossentropy')
latent = Input(shape=(self.latent_dim,))
g_condition = Input(shape=(10,))
d_condition = Input(shape=(10,))
# Get the fake image
fake = self.generator([latent, g_condition])
# we only want to be able to train generation for the combined model
self.discriminator.trainable = False
d_output = self.discriminator([fake, d_condition])
combined_model = Model(inputs=[latent, g_condition, d_condition],
outputs=d_output)
combined_model.compile(optimizer=g_optim,
loss='binary_crossentropy')
(X_train, y_train), (X_test, y_test) = mnist.load_data('/home/richard/datasets/mnist.npz')
X_train = (X_train.astype(np.float32) - 127.5) / 127.5
X_train = X_train.reshape((X_train.shape[0], X_train.shape[1] * X_train.shape[1]))
condition = []
for i in range(10):
condition.extend([i] * 10)
condition = np.asarray(condition)
# one-hot encode
condition = to_categorical(condition, 10)
for epoch in range(self.epochs):
print 'Epoch {} of {}'.format(epoch + 1, self.epochs)
num_batches = int(X_train.shape[0] / self.batch_size)
for index in range(num_batches):
noise = np.random.normal(loc=0.0,
scale=1.0,
size=(self.batch_size, self.latent_dim))
image_batch = X_train[index * self.batch_size:(index + 1) * self.batch_size]
generated_images = self.generator.predict([noise, condition], verbose=0)
X = np.concatenate((image_batch, generated_images))
if __name__ == '__main__':
model = CGAN()
| 35.801653 | 99 | 0.560018 | 3,845 | 0.887581 | 0 | 0 | 0 | 0 | 0 | 0 | 323 | 0.074561 |
a6ffb2cce71e31e979d8246a2d1f4abb5ecfebb0 | 1,516 | py | Python | user/migrations/0002_auto_20200816_0510.py | moewahed/trade_cycle | 8ace51f08781a568ef087234b65a7864236dfcaf | [
"MIT"
] | null | null | null | user/migrations/0002_auto_20200816_0510.py | moewahed/trade_cycle | 8ace51f08781a568ef087234b65a7864236dfcaf | [
"MIT"
] | null | null | null | user/migrations/0002_auto_20200816_0510.py | moewahed/trade_cycle | 8ace51f08781a568ef087234b65a7864236dfcaf | [
"MIT"
] | null | null | null | # Generated by Django 2.2 on 2020-08-16 02:10
import django.core.validators
from django.db import migrations, models
import user.model_addon
class Migration(migrations.Migration):
dependencies = [
('user', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='user',
options={'verbose_name': 'Account', 'verbose_name_plural': 'Accounts'},
),
migrations.AlterField(
model_name='user',
name='cover_pic',
field=models.ImageField(default='default/img/cover.png', help_text='Limits:<ul><li>Size 4MB</li><li>Dimensions Range: Width & height (400-2600)</li></ul>', upload_to=user.model_addon.UploadToPathAndRename('upload/img/cover'), validators=[django.core.validators.FileExtensionValidator(['png', 'jpg', 'jpeg', 'PNG', 'JPG'])], verbose_name='Cover Image'),
),
migrations.AlterField(
model_name='user',
name='is_active',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='user',
name='profile_pic',
field=models.ImageField(default='default/img/profile.png', help_text='Limits:<ul><li>Size 2MB</li><li>Dimensions Range: Width & height (200-1600)</li></ul>', upload_to=user.model_addon.UploadToPathAndRename('upload/img/profile'), validators=[django.core.validators.FileExtensionValidator(['png', 'jpg', 'jpeg'])], verbose_name='Profile Image'),
),
]
| 43.314286 | 364 | 0.64314 | 1,371 | 0.904354 | 0 | 0 | 0 | 0 | 0 | 0 | 508 | 0.335092 |