max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
backend/entities/i_serializable.py | GroupLe/grouple-face-tagger | 0 | 12766051 | <filename>backend/entities/i_serializable.py
from typing import Dict
class ISerializable:
def __init__(self, *args, **kwargs):
raise NotImplemented
def to_json(self) -> Dict:
raise NotImplemented | 2.203125 | 2 |
tests/core/test_test.py | adrianvisovan/rasa | 0 | 12766052 | from pathlib import Path
import pytest
import rasa.core.test
from _pytest.capture import CaptureFixture
from rasa.core.agent import Agent
async def test_testing_warns_if_action_unknown(
capsys: CaptureFixture,
e2e_bot_agent: Agent,
e2e_bot_test_stories_with_unknown_bot_utterances: Path,
):
await rasa.core.test.test(
e2e_bot_test_stories_with_unknown_bot_utterances, e2e_bot_agent
)
output = capsys.readouterr().out
assert "Test story" in output
assert "contains the bot utterance" in output
assert "which is not part of the training data / domain" in output
| 2.328125 | 2 |
demo.py | Tilix4/cgwire-demo | 0 | 12766053 | <reponame>Tilix4/cgwire-demo
import os
import random
import gazu
gazu.set_host("http://localhost:8080/api")
gazu.log_in("<EMAIL>", "<PASSWORD>tp<PASSWORD>")
persons = [
{
"first_name": "Alicia",
"last_name": "Cooper",
"email": "<EMAIL>",
"phone": "+33 6 82 38 19 08",
"role": "user",
"name": "alicia"
},
{
"first_name": "Michael",
"last_name": "Byrd",
"email": "<EMAIL>",
"phone": "+33 6 32 45 12 45",
"role": "user",
"name": "michael"
},
{
"first_name": "Ann",
"last_name": "Kennedy",
"email": "<EMAIL>",
"phone": "+33 6 32 45 12 45",
"role": "user",
"name": "ann"
},
{
"first_name": "Brennan",
"last_name": "Mason",
"email": "<EMAIL>",
"phone": "+33 6 43 42 13 21",
"role": "user",
"name": "brennan"
},
{
"first_name": "David",
"last_name": "Penna",
"email": "<EMAIL>",
"phone": "+33 6 08 98 92 12",
"role": "user",
"name": "david"
},
{
"first_name": "Rachel",
"last_name": "Shelton",
"email": "<EMAIL>",
"phone": "+33 6 92 38 91 23",
"role": "user",
"name": "rachel"
},
{
"first_name": "Frank",
"last_name": "Rousseau",
"email": "<EMAIL>",
"phone": "+33 6 22 18 13 88",
"role": "admin",
"name": "frank"
}
]
for person in persons:
personfull = gazu.person.new_person(
person["first_name"],
person["last_name"],
person["email"],
person["phone"],
person["role"]
)
gazu.person.set_avatar(personfull, "fixtures/fake_user/%s.png" % person["name"])
alicia = gazu.person.get_person_by_full_name("<NAME>")
brennan = gazu.person.get_person_by_full_name("<NAME>")
david = gazu.person.get_person_by_full_name("<NAME>")
bbb = gazu.project.new_project("Big Buck Bunny")
agent327 = gazu.project.new_project("Agent 327")
caminandes = gazu.project.new_project("Caminandes", production_type="tvshow")
characters = gazu.asset.new_asset_type("Characters")
props = gazu.asset.new_asset_type("Props")
environment = gazu.asset.new_asset_type("Environment")
fx = gazu.asset.new_asset_type("FX")
asset_desc = [
(characters, "Lama"),
(characters, "Oti"),
(characters, "Pingoo"),
(environment, "Mine"),
(environment, "Pool"),
(environment, "Railroad"),
(environment, "Oil Machine"),
(fx, "Smoke"),
(fx, "Wind"),
(props, "Berry"),
(props, "Flower"),
(props, "Mine Cart"),
(props, "Train")
]
assets = []
shots = []
for (asset_type, asset_name) in asset_desc:
assets.append(
gazu.asset.new_asset(caminandes, asset_type, asset_name)
)
sequences = []
for episode_name in ["E01"]:
episode = gazu.shot.new_episode(caminandes, episode_name)
for sequence_name in ["SE01", "SE02", "SE03"]:
sequence = gazu.shot.new_sequence(
caminandes, sequence_name, episode=episode)
sequences.append(sequence)
for shot_name in [
"SH001",
"SH002",
"SH003",
"SH004",
"SH005",
"SH006",
"SH007",
"SH008",
"SH009",
"SH010",
"SH011"
]:
shots.append(
gazu.shot.new_shot(
caminandes,
sequence,
shot_name,
nb_frames=random.randrange(20, 90, 1)
)
)
for episode_name in ["E02"]:
episode = gazu.shot.new_episode(caminandes, episode_name)
for sequence_name in ["SE01", "SE02"]:
sequence = gazu.shot.new_sequence(
caminandes,
sequence_name,
episode=episode
)
sequences.append(sequence)
for shot_name in ["SH001", "SH002", "SH003"]:
shots.append(
gazu.shot.new_shot(
caminandes,
sequence,
shot_name,
nb_frames=random.randrange(20, 90, 1)
)
)
for episode_name in ["E03"]:
episode = gazu.shot.new_episode(caminandes, episode_name)
for sequence_name in ["SE01", "SE02", "SE03"]:
sequence = gazu.shot.new_sequence(
caminandes, sequence_name, episode=episode)
sequences.append(sequence)
for shot_name in [
"SH001",
"SH002",
"SH003",
"SH004",
"SH005",
"SH006",
"SH007"
]:
shots.append(
gazu.shot.new_shot(
caminandes,
sequence,
shot_name,
nb_frames=random.randrange(20, 90, 1)
)
)
modeling = gazu.task.get_task_type_by_name("Modeling")
setup = gazu.task.get_task_type_by_name("Rigging")
storyboard = gazu.task.get_task_type_by_name("Storyboard")
layout = gazu.task.get_task_type_by_name("Layout")
animation = gazu.task.get_task_type_by_name("Animation")
render = gazu.task.get_task_type_by_name("Rendering")
compositing = gazu.task.get_task_type_by_name("Compositing")
for asset in assets:
gazu.task.new_task(asset, modeling)
gazu.task.new_task(asset, setup)
for shot in shots:
gazu.task.new_task(shot, storyboard)
gazu.task.new_task(shot, render)
gazu.task.new_task(shot, compositing)
animation_task = gazu.task.new_task(shot, animation)
if shot["parent_id"] == sequences[0]["id"]:
gazu.task.assign_task(animation_task, alicia)
if shot["parent_id"] == sequences[1]["id"]:
gazu.task.assign_task(animation_task, brennan)
if shot["parent_id"] == sequences[2]["id"]:
gazu.task.assign_task(animation_task, david)
lama = gazu.asset.get_asset_by_name(caminandes, "Lama")
pingoo = gazu.asset.get_asset_by_name(caminandes, "Pingoo")
berry = gazu.asset.get_asset_by_name(caminandes, "Berry")
casting = [
{
"asset_id": lama["id"],
"nb_occurences": 1
},
{
"asset_id": pingoo["id"],
"nb_occurences": 1
},
{
"asset_id": berry["id"],
"nb_occurences": 2
}
]
gazu.casting.update_shot_casting(caminandes, shots[0], casting)
gazu.casting.update_shot_casting(caminandes, shots[1], casting)
gazu.casting.update_shot_casting(caminandes, shots[2], casting)
gazu.casting.update_shot_casting(caminandes, shots[3], casting)
gazu.client.upload(
"/pictures/thumbnails/projects/%s" % caminandes["id"],
"fixtures/v1.png"
)
file_paths_modeling = [
"fixtures/th_assets/lama.png",
"fixtures/th_assets/ep01/oti.png",
"fixtures/th_assets/ep01/pingoo.png",
"fixtures/th_assets/ep01/mine.png",
"fixtures/th_assets/ep01/pool.png",
"fixtures/th_assets/ep01/railroad.jpg",
"fixtures/th_assets/ep01/oil_machine.png",
"fixtures/th_assets/ep01/smoke.png",
"fixtures/th_assets/ep01/wind.png",
"fixtures/th_assets/ep01/berry.png",
"fixtures/th_assets/ep01/flower.png",
"fixtures/th_assets/ep01/cart.png",
"fixtures/th_assets/ep01/train.png",
]
file_paths_sb = [
"fixtures/th_shots/ep01/SB/caminandes_llamigos_E01_SE01_SH01.png",
"fixtures/th_shots/ep01/SB/caminandes_llamigos_E01_SE01_SH02.png",
"fixtures/th_shots/ep01/SB/caminandes_llamigos_E01_SE01_SH03.png",
"fixtures/th_shots/ep01/SB/caminandes_llamigos_E01_SE01_SH04.png",
"fixtures/th_shots/ep01/SB/caminandes_llamigos_E01_SE01_SH05.png",
"fixtures/th_shots/ep01/SB/caminandes_llamigos_E01_SE01_SH06.png",
"fixtures/th_shots/ep01/SB/caminandes_llamigos_E01_SE01_SH07.png",
"fixtures/th_shots/ep01/SB/caminandes_llamigos_E01_SE01_SH08.png",
"fixtures/th_shots/ep01/SB/caminandes_llamigos_E01_SE01_SH09.png",
"fixtures/th_shots/ep01/SB/caminandes_llamigos_E01_SE01_SH10.png",
"fixtures/th_shots/ep01/SB/caminandes_llamigos_E01_SE01_SH11.png",
"fixtures/th_shots/ep01/SB/caminandes_llamigos_E01_SE02_SH01.png",
"fixtures/th_shots/ep01/SB/caminandes_llamigos_E01_SE02_SH02.png",
"fixtures/th_shots/ep01/SB/caminandes_llamigos_E01_SE02_SH03.png",
"fixtures/th_shots/ep01/SB/caminandes_llamigos_E01_SE02_SH04.png",
"fixtures/th_shots/ep01/SB/caminandes_llamigos_E01_SE02_SH05.png",
"fixtures/th_shots/ep01/SB/caminandes_llamigos_E01_SE02_SH06.png",
"fixtures/th_shots/ep01/SB/caminandes_llamigos_E01_SE02_SH07.png",
"fixtures/th_shots/ep01/SB/caminandes_llamigos_E01_SE02_SH08.png",
"fixtures/th_shots/ep01/SB/caminandes_llamigos_E01_SE02_SH09.png",
"fixtures/th_shots/ep01/SB/caminandes_llamigos_E01_SE02_SH10.png",
"fixtures/th_shots/ep01/SB/caminandes_llamigos_E01_SE02_SH11.png",
"fixtures/th_shots/ep01/SB/caminandes_llamigos_E01_SE03_SH01.png",
"fixtures/th_shots/ep01/SB/caminandes_llamigos_E01_SE03_SH02.png",
"fixtures/th_shots/ep01/SB/caminandes_llamigos_E01_SE03_SH03.png",
"fixtures/th_shots/ep01/SB/caminandes_llamigos_E01_SE03_SH04.png",
"fixtures/th_shots/ep01/SB/caminandes_llamigos_E01_SE03_SH05.png",
"fixtures/th_shots/ep01/SB/caminandes_llamigos_E01_SE03_SH06.png",
"fixtures/th_shots/ep01/SB/caminandes_llamigos_E01_SE03_SH07.png",
"fixtures/th_shots/ep01/SB/caminandes_llamigos_E01_SE03_SH08.png",
"fixtures/th_shots/ep01/SB/caminandes_llamigos_E01_SE03_SH09.png",
"fixtures/th_shots/ep01/SB/caminandes_llamigos_E01_SE03_SH10.png",
"fixtures/th_shots/ep01/SB/caminandes_llamigos_E01_SE03_SH11.png",
]
file_paths_animation = [
"fixtures/th_shots/ep01/Anim/caminandes_llamigos_E01_SE01_SH01.png",
"fixtures/th_shots/ep01/Anim/caminandes_llamigos_E01_SE01_SH02.png",
"fixtures/th_shots/ep01/Anim/caminandes_llamigos_E01_SE01_SH03.png",
"fixtures/th_shots/ep01/Anim/caminandes_llamigos_E01_SE01_SH04.png",
"fixtures/th_shots/ep01/Anim/caminandes_llamigos_E01_SE01_SH05.png",
"fixtures/th_shots/ep01/Anim/caminandes_llamigos_E01_SE01_SH06.png",
"fixtures/th_shots/ep01/Anim/caminandes_llamigos_E01_SE01_SH07.png",
"fixtures/th_shots/ep01/Anim/caminandes_llamigos_E01_SE01_SH08.png",
"fixtures/th_shots/ep01/Anim/caminandes_llamigos_E01_SE01_SH09.png",
"fixtures/th_shots/ep01/Anim/caminandes_llamigos_E01_SE01_SH10.png",
"fixtures/th_shots/ep01/Anim/caminandes_llamigos_E01_SE01_SH11.png",
"fixtures/th_shots/ep01/Anim/caminandes_llamigos_E01_SE02_SH01.png",
"fixtures/th_shots/ep01/Anim/caminandes_llamigos_E01_SE02_SH02.png",
"fixtures/th_shots/ep01/Anim/caminandes_llamigos_E01_SE02_SH03.png",
"fixtures/th_shots/ep01/Anim/caminandes_llamigos_E01_SE02_SH04.png",
"fixtures/th_shots/ep01/Anim/caminandes_llamigos_E01_SE02_SH05.png",
"fixtures/th_shots/ep01/Anim/caminandes_llamigos_E01_SE02_SH06.png",
"fixtures/th_shots/ep01/Anim/caminandes_llamigos_E01_SE02_SH07.png",
"fixtures/th_shots/ep01/Anim/caminandes_llamigos_E01_SE02_SH08.png",
"fixtures/th_shots/ep01/Anim/caminandes_llamigos_E01_SE02_SH09.png",
"fixtures/th_shots/ep01/Anim/caminandes_llamigos_E01_SE02_SH10.png",
"fixtures/th_shots/ep01/Anim/caminandes_llamigos_E01_SE02_SH11.png",
"fixtures/th_shots/ep01/Anim/caminandes_llamigos_E01_SE03_SH01.png",
"fixtures/th_shots/ep01/Anim/caminandes_llamigos_E01_SE03_SH02.png",
"fixtures/th_shots/ep01/Anim/caminandes_llamigos_E01_SE03_SH03.png",
"fixtures/th_shots/ep01/Anim/caminandes_llamigos_E01_SE03_SH04.png",
"fixtures/th_shots/ep01/Anim/caminandes_llamigos_E01_SE03_SH05.png",
"fixtures/th_shots/ep01/Anim/caminandes_llamigos_E01_SE03_SH06.png",
"fixtures/th_shots/ep01/Anim/caminandes_llamigos_E01_SE03_SH07.png",
"fixtures/th_shots/ep01/Anim/caminandes_llamigos_E01_SE03_SH08.png",
"fixtures/th_shots/ep01/Anim/caminandes_llamigos_E01_SE03_SH09.png",
"fixtures/th_shots/ep01/Anim/caminandes_llamigos_E01_SE03_SH10.png",
"fixtures/th_shots/ep01/Anim/caminandes_llamigos_E01_SE03_SH11.png",
]
file_paths_render = [
"fixtures/th_shots/ep01/render/caminandes_llamigos_E01_SE01_SH01.png",
"fixtures/th_shots/ep01/render/caminandes_llamigos_E01_SE01_SH02.png",
"fixtures/th_shots/ep01/render/caminandes_llamigos_E01_SE01_SH03.png",
"fixtures/th_shots/ep01/render/caminandes_llamigos_E01_SE01_SH04.png",
"fixtures/th_shots/ep01/render/caminandes_llamigos_E01_SE01_SH05.png",
"fixtures/th_shots/ep01/render/caminandes_llamigos_E01_SE01_SH06.png",
"fixtures/th_shots/ep01/render/caminandes_llamigos_E01_SE01_SH07.png",
"fixtures/th_shots/ep01/render/caminandes_llamigos_E01_SE01_SH08.png",
"fixtures/th_shots/ep01/render/caminandes_llamigos_E01_SE01_SH09.png",
"fixtures/th_shots/ep01/render/caminandes_llamigos_E01_SE01_SH10.png",
"fixtures/th_shots/ep01/render/caminandes_llamigos_E01_SE01_SH11.png",
"fixtures/th_shots/ep01/render/caminandes_llamigos_E01_SE02_SH01.png",
"fixtures/th_shots/ep01/render/caminandes_llamigos_E01_SE02_SH02.png",
"fixtures/th_shots/ep01/render/caminandes_llamigos_E01_SE02_SH03.png",
"fixtures/th_shots/ep01/render/caminandes_llamigos_E01_SE02_SH04.png",
"fixtures/th_shots/ep01/render/caminandes_llamigos_E01_SE02_SH05.png",
"fixtures/th_shots/ep01/render/caminandes_llamigos_E01_SE02_SH06.png",
"fixtures/th_shots/ep01/render/caminandes_llamigos_E01_SE02_SH07.png",
"fixtures/th_shots/ep01/render/caminandes_llamigos_E01_SE02_SH08.png",
"fixtures/th_shots/ep01/render/caminandes_llamigos_E01_SE02_SH09.png",
"fixtures/th_shots/ep01/render/caminandes_llamigos_E01_SE02_SH10.png",
"fixtures/th_shots/ep01/render/caminandes_llamigos_E01_SE02_SH11.png",
"fixtures/th_shots/ep01/render/caminandes_llamigos_E01_SE03_SH01.png",
"fixtures/th_shots/ep01/render/caminandes_llamigos_E01_SE03_SH02.png",
"fixtures/th_shots/ep01/render/caminandes_llamigos_E01_SE03_SH03.png",
"fixtures/th_shots/ep01/render/caminandes_llamigos_E01_SE03_SH04.png",
"fixtures/th_shots/ep01/render/caminandes_llamigos_E01_SE03_SH05.png",
"fixtures/th_shots/ep01/render/caminandes_llamigos_E01_SE03_SH06.png",
"fixtures/th_shots/ep01/render/caminandes_llamigos_E01_SE03_SH07.png",
"fixtures/th_shots/ep01/render/caminandes_llamigos_E01_SE03_SH08.png",
"fixtures/th_shots/ep01/render/caminandes_llamigos_E01_SE03_SH09.png",
"fixtures/th_shots/ep01/render/caminandes_llamigos_E01_SE03_SH10.png",
"fixtures/th_shots/ep01/render/caminandes_llamigos_E01_SE03_SH11.png",
]
movie_file_paths_animation = [
"fixtures/th_shots/ep01/Anim/caminandes_llamigos_E01_SE01_SH01.mp4",
"fixtures/th_shots/ep01/Anim/caminandes_llamigos_E01_SE01_SH02.mp4",
"fixtures/th_shots/ep01/Anim/caminandes_llamigos_E01_SE01_SH03.mp4",
"fixtures/th_shots/ep01/Anim/caminandes_llamigos_E01_SE01_SH04.mp4",
"fixtures/th_shots/ep01/Anim/caminandes_llamigos_E01_SE01_SH05.mp4",
"fixtures/th_shots/ep01/Anim/caminandes_llamigos_E01_SE01_SH06.mp4",
]
movie_file_paths_render = [
"fixtures/th_shots/ep01/render/caminandes_llamigos_E01_SE01_SH01.mp4",
"fixtures/th_shots/ep01/render/caminandes_llamigos_E01_SE01_SH02.mp4",
"fixtures/th_shots/ep01/render/caminandes_llamigos_E01_SE01_SH03.mp4",
"fixtures/th_shots/ep01/render/caminandes_llamigos_E01_SE01_SH04.mp4",
"fixtures/th_shots/ep01/render/caminandes_llamigos_E01_SE01_SH05.mp4",
"fixtures/th_shots/ep01/render/caminandes_llamigos_E01_SE01_SH06.mp4",
]
done = gazu.task.get_task_status_by_name("Done")
wfa = gazu.task.get_task_status_by_name("Waiting For Approval")
wip = gazu.task.get_task_status_by_name("Work In Progress")
for (index, asset) in enumerate(assets):
task_modeling = gazu.task.get_task_by_name(asset, modeling)
if index < len(file_paths_modeling) and \
os.path.exists(file_paths_modeling[index]):
comment = gazu.task.add_comment(task_modeling, wfa, "New preview")
preview_file = gazu.task.add_preview(
task_modeling,
comment,
file_paths_modeling[index]
)
gazu.task.set_main_preview(asset, preview_file)
comment = gazu.task.add_comment(task_modeling, done, "Done")
task_setup = gazu.task.get_task_by_name(asset, setup)
comment = gazu.task.add_comment(task_setup, wip, "Getting started")
for (index, shot) in enumerate(shots):
if index < len(file_paths_sb) and \
os.path.exists(file_paths_sb[index]):
task_sb = gazu.task.get_task_by_name(shot, storyboard)
comment = gazu.task.add_comment(task_sb, wfa, "New preview")
preview_file = gazu.task.add_preview(
task_sb,
comment,
file_paths_sb[index]
)
gazu.task.set_main_preview(shot, preview_file)
comment = gazu.task.add_comment(task_sb, done, "Done")
if index < len(file_paths_animation) and \
os.path.exists(file_paths_animation[index]):
task_animation = gazu.task.get_task_by_name(shot, animation)
comment = gazu.task.add_comment(task_animation, wfa, "New preview")
preview_file = gazu.task.add_preview(
task_animation,
comment,
file_paths_animation[index]
)
gazu.task.set_main_preview(shot, preview_file)
comment = gazu.task.add_comment(task_animation, done, "Done")
if index < len(file_paths_render) and \
os.path.exists(file_paths_render[index]):
task_render = gazu.task.get_task_by_name(shot, render)
comment = gazu.task.add_comment(task_render, wfa, "New preview")
preview_file = gazu.task.add_preview(
task_render,
comment,
file_paths_render[index]
)
gazu.task.set_main_preview(shot, preview_file)
comment = gazu.task.add_comment(task_render, done, "Done")
if index < len(movie_file_paths_animation) and \
os.path.exists(movie_file_paths_animation[index]):
task_animation = gazu.task.get_task_by_name(shot, animation)
comment = gazu.task.add_comment(task_animation, wfa, "New preview")
preview_file = gazu.task.add_preview(
task_animation,
comment,
movie_file_paths_animation[index]
)
comment = gazu.task.add_comment(task_animation, done, "Done")
if index < len(movie_file_paths_render) and \
os.path.exists(movie_file_paths_render[index]):
task_render = gazu.task.get_task_by_name(shot, render)
comment = gazu.task.add_comment(task_render, wfa, "New preview")
preview_file = gazu.task.add_preview(
task_render,
comment,
movie_file_paths_render[index]
)
comment = gazu.task.add_comment(task_render, done, "Done")
| 2.234375 | 2 |
stompy/model/delft/dfm_to_ptm.py | rustychris/stompy | 17 | 12766054 | <reponame>rustychris/stompy
"""
Augment DFM/DWAQ output so that it can be used as hydro input
for FISH-PTM.
in FISH_PTM.inp, set subgrid bathy to false.
Requirements on the run:
DFM and DWAQ output must be synchronized, with the same start/stop/interval
settings. In most cases this just means that MapInterval and WaqInterval
have the same value in the MDU file.
MapFormat: in theory this can be 1,3 or 4, meaning old-style netcdf (1,3) or
UGRID-ish netcdf (4). Experience with subversion dflowfm rev 52184 and 53925
shows a bug when combining MPI, DWAQ output, and UGRID.
To get to a working setup as quickly as possible, development is focusing on
converting single-core UGRID w/ DWAQ output.
Variables required by PTM at each output interval include (those listed
in get_netcdf_hydro_record):
h_flow_avg
v_flow_avg
Mesh2_edge_wet_area
Mesh2_face_wet_area
Mesh2_face_water_volume
Mesh2_salinity_3d
Mesh2_vertical_diffusivity_3D
Mesh2_sea_surface_elevation
Mesh2_edge_bottom_layer
Mesh2_edge_top_layer
Mesh2_face_bottom_layer
Mesh2_face_bottom_layer
An example metadata description:
double h_flow_avg(nMesh2_edge=76593, nMesh2_layer_3d=54, nMesh2_data_time=483);
:standard_name = "ocean_volume_transport_across_line";
:long_name = "horizontal volume flux average over integration interval";
:coordinates = "Mesh2_edge_x Mesh2_edge_y Mesh2_edge_lon Mesh2_edge_lat Mesh2_edge_z_3d";
:mesh = "Mesh2";
:grid_mapping = "Mesh2_crs";
:location = "edge";
:units = "m3 s-1";
:_ChunkSizes = 11949, 18, 1; // int
This is the average volume flux at an edge (each j,k) over the preceeding time interval.
"""
##
import sys
import six
import os
import glob
import netCDF4
import numpy as np
import xarray as xr
import logging
log=logging.getLogger('dfm_to_ptm')
try:
profile
except NameError:
def profile(x):
return x
from ... import utils
from ... import io as dio
from . import dfm_grid
from ...grid import unstructured_grid
from . import waq_scenario as waq
from . import dflow_model as dfm
class DFlowToPTMHydro(object):
overwrite=False
time_slice=slice(None)
grd_fn=None
write_nc=True
# Allow for the possibility that DWAQ cells are not in the same order
# as the DFM map output cells
remap_waq_elements=True
def __init__(self,mdu_path,output_fn,**kwargs):
utils.set_keywords(self,kwargs)
self.mdu_path=mdu_path
self.output_fn=output_fn
self.model=dfm.DFlowModel.load(mdu_path)
self.nprocs=self.model.num_procs
if self.nprocs>1:
log.warning("Brave - trying an MPI run")
# check the naming of DFM output files
self.open_dflow_output()
# this relies on some info from open_dflow_output
self.open_waq_output()
if self.grd_fn is not None:
self.write_grd(self.grd_fn)
if self.write_nc:
self.initialize_output()
try:
self.initialize_output_variables()
self.write_time_steps()
finally:
# helps netCDF4 release the dataset and not block
# subsequent runs in the case of an error on this
# run.
self.close()
def open_dflow_output(self):
"""
open dfm netcdf output as (1) original (2) with renames,
and (3) as unstructured_grid
"""
# HERE - this only works with serial runs.
# maybe I can use the info already assembled in waq to merge
# things here?
# incoming dataset from DFM:
map_fns=self.model.map_outputs()
if len(map_fns)>1:
map_fns=[map_fn for map_fn in map_fns if '_merged_' in map_fn]
if len(map_fns)==1:
log.info("Found multiple map files but only 1 merged map file. Will use that")
assert len(map_fns)==1,"Not ready for multi processor or time-divided output"
self.map_ds=xr.open_dataset(map_fns[0])
# Additionally trim to subset of times here:
subset_ds=self.map_ds.isel(time=self.time_slice)
if 'mesh2d' in subset_ds:
face_dim=subset_ds.mesh2d.attrs.get('face_dimension','nmesh2d_face')
edge_dim=subset_ds.mesh2d.attrs.get('edge_dimension','nmesh2d_edge')
node_dim=subset_ds.mesh2d.attrs.get('node_dimension','nmesh2d_node')
max_side_dim=subset_ds.mesh2d.attrs.get('max_face_nodes_dimension',
'max_nmesh2d_face_nodes')
else:
face_dim='nmesh2d_face'
edge_dim='nmesh2d_edge'
node_dim='nmesh2d_node'
# shallow copy of that with renames for PTM compatibility
self.mod_map_ds=subset_ds.rename({'time':'nMesh2_data_time',
face_dim:'nMesh2_face',
edge_dim:'nMesh2_edge',
node_dim:'nMesh2_node',
max_side_dim:'nMaxMesh2_face_nodes',
'mesh2d_face_x':'Mesh2_face_x',
'mesh2d_face_y':'Mesh2_face_y',
'mesh2d_edge_x':'Mesh2_edge_x',
'mesh2d_edge_y':'Mesh2_edge_y'
})
self.g=unstructured_grid.UnstructuredGrid.from_ugrid(self.map_ds)
# copy depth into a field where it is expected by the code that
# writes a ptm grid. note this is a positive:up quantity
self.g.add_cell_field('depth',self.g.cells['mesh2d_flowelem_bl'])
# set markers as ptm expects:
# 0: internal, 1 external, 2 flow, 3 open
# from DFM: 0=> internal closed, 1=>internal, 2=>flow or stage bc, 3=>closed
# map to -1 (error), 0=>internal, 1=>closed, 2=>flow. no easy way to
# distinguish flow from stage bc right here.
translator=np.array([-1,0,2,1])
self.g.edges['mark'][:]=translator[ self.map_ds.mesh2d_edge_type.values.astype(np.int32) ]
self.g.cells['mark'][:]=0
# punt, and call any cell adjacent to a marked edge BC a stage-bc cell
bc_edges=np.nonzero(self.g.edges['mark']>1)[0]
bc_cells=self.g.edge_to_cells(bc_edges).max(axis=1) # drop the negative neighbors
self.g.cells['mark'][bc_cells]=1
# regardless of the how DFM was configured, we will set edge
# depths to the shallower of the cells
e2c=self.g.edge_to_cells()
n1=e2c[:,0] ; n2=e2c[:,1]
# but no info right here on flow/open boundaries.
n1=np.where(n1>=0,n1,n2)
n2=np.where(n2>=0,n2,n1)
edge_depths=np.maximum( self.g.cells['depth'][n1],
self.g.cells['depth'][n2] )
self.g.add_edge_field('depth',edge_depths)
# flip edges to keep invariant that external cells are always
# second.
e2c=self.g.edge_to_cells()
to_flip=e2c[:,0]<0
for fld in ['nodes','cells']:
a=self.g.edges[fld][to_flip][:,0].copy()
b=self.g.edges[fld][to_flip][:,1].copy()
self.g.edges[fld][to_flip,0] = b
self.g.edges[fld][to_flip,1] = a
self.flipped=to_flip
def write_grd(self,grd_fn):
self.g.write_ptm_gridfile(grd_fn,overwrite=self.overwrite,
subgrid=True)
def template_ds(self):
"""
Construct an xarray dataset with static geometry and basic
dimensions.
"""
out_ds=self.g.write_to_xarray(mesh_name="Mesh2",
node_coordinates="Mesh2_node_x Mesh2_node_y",
face_node_connectivity='Mesh2_face_nodes',
edge_node_connectivity='Mesh2_edge_nodes',
face_dimension='nMesh2_face',
edge_dimension='nMesh2_edge',
node_dimension='nMesh2_node')
out_ds=out_ds.rename({
'maxnode_per_face':'nMaxMesh2_face_nodes',
'node_per_edge':'Two'
})
# Additional grid information:
# xarray wants the dimension made explicit here -- don't know why.
out_ds['Mesh2_face_x']=('nMesh2_face',),self.mod_map_ds['Mesh2_face_x']
out_ds['Mesh2_face_y']=('nMesh2_face',),self.mod_map_ds['Mesh2_face_y']
out_ds['Mesh2_edge_x']=('nMesh2_edge',),self.mod_map_ds['Mesh2_edge_x']
out_ds['Mesh2_edge_y']=('nMesh2_edge',),self.mod_map_ds['Mesh2_edge_y']
e2c=self.g.edge_to_cells()
out_ds['Mesh2_edge_faces']=('nMesh2_edge','Two'),e2c
face_edges=np.array([self.g.cell_to_edges(c,pad=True)
for c in range(self.g.Ncells())] )
out_ds['Mesh2_face_edges']=('nMesh2_face','nMaxMesh2_face_nodes'),face_edges
out_ds['Mesh2_face_depth']=('nMesh2_face',),-self.mod_map_ds['mesh2d_flowelem_bl'].values
out_ds['Mesh2_face_depth'].attrs.update({'positive':'down',
'unit':'m',
'standard_name':'sea_floor_depth_below_geoid',
'mesh':'Mesh2',
'long_name':'Mean elevation of bed in face'})
# recreate edge bed level based on a constant bedlevtype
bedlevtype=int(self.model.mdu['geometry','BedLevType'])
if bedlevtype==3:
edge_z=self.map_ds.mesh2d_node_z.values[self.g.edges['nodes']].mean(axis=1)
elif bedlevtype==4:
edge_z=self.map_ds.mesh2d_node_z.values[self.g.edges['nodes']].min(axis=1)
else:
raise Exception("Only know how to deal with bed level type 3,4 not %d"%bedlevtype)
# mindful of positive-down sign convention needed by PTM
out_ds['Mesh2_edge_depth']=('nMesh2_edge',),-edge_z
out_ds['Mesh2_edge_depth'].attrs.update({'positive':'down',
'unit':'m',
'standard_name':'sea_floor_depth_below_geoid',
'mesh':'Mesh2',
'long_name':'Mean elevation of bed on edge'})
out_ds['Mesh2_data_time']=self.mod_map_ds.nMesh2_data_time
if 1:
# This may not be necessary -- this keeps the freesurface
# from appearing below the bed in cells, but these should
# appear as dry based on ktop/kbot.
# also, writing anything time-varying beyond the time stamps themselves
# should probably be handled in the time loop [TODO]
s1=np.maximum( self.mod_map_ds.mesh2d_s1, -out_ds['Mesh2_face_depth'])
out_ds['Mesh2_sea_surface_elevation']=('nMesh2_face','nMesh2_data_time'),s1.T
if 1: # edge and cell marks
edge_marks=self.g.edges['mark']
assert not np.any(edge_marks<0),"Need to implement internal closed edges"
# this gets us to 0: internal, 1:boundary, 2: boundary_closed
# this looks like 1 for stage or flow BC, 2 for land, 0 for internal.
out_ds['Mesh2_edge_bc']=('nMesh2_edge',),edge_marks
# 'facemark':'Mesh2_face_bc'
out_ds['Mesh2_face_bc']=('nMesh2_face',),self.g.cells['mark']
if 1: # layers
ucx=self.mod_map_ds['mesh2d_ucx']
if ucx.ndim==2:
self.nkmax=1
self.map_2d=True
else:
self.nkmax=ucx.shape[-1] # is it safe to assume nkmax is last in DFM?
self.map_2d=False
out_ds['nMesh2_layer_3d']=('nMesh2_layer_3d',),np.arange(self.nkmax)
# based on sample output, the last of these is not used,
# so this would be interfaces, starting with the top of the lowest layer.
# fabricate something in sigma coordinates for now.
sigma_layers=np.linspace(-1,0,self.nkmax+1)[1:]
out_ds['Mesh2_layer_3d']=('nMesh2_layer_3d',),sigma_layers
attrs=dict(standard_name="ocean_sigma_coordinate",
dz_min=0.001, # not real, but ptm tries to read this.
long_name="sigma layer coordinate at flow element top",
units="",
positive="up", # kind of baked into the sigma definition
formula_terms="sigma: Mesh2_layer_3d eta: Mesh2_sea_surface_elevation bedlevel: Mesh2_face_depth"
)
out_ds['Mesh2_layer_3d'].attrs.update(attrs)
# this would be for adding more scalars to be extracted at particle positions
# Just guessing with 1 -- maybe 0 is more appropriate?
out_ds['nsp']=('nsp',),np.arange(1)
# from http://cfconventions.org/Data/cf-conventions/cf-conventions-1.0/build/apd.html
# z(n,k,j,i) = eta(n,j,i) + sigma(k)*(depth(j,i)+eta(n,j,i))
# sigma coordinate definition has z=positive:up baked in, likewise depth is
# positive down, and eta positive up, with sigma ranging from -1 (bed) to 0 (surface)
# for writing the output, use xarray to initialize the file, but
# the big data part is best handled directly by netCDF4 so we can
# control how much data is in RAM at a time.
return out_ds
def initialize_output(self):
base_ds=self.template_ds()
if os.path.exists(self.output_fn):
if self.overwrite:
os.unlink(self.output_fn)
# maybe not strictly necessary, but might be more
# scalable or flexible in the future
base_ds.encoding['unlimited_dims']=['nMesh2_data_time']
base_ds.to_netcdf(self.output_fn)
# and re-open as direct netCDF4 for heavy writing
self.out_nc=netCDF4.Dataset(self.output_fn,mode="a")
def initialize_output_variables(self):
""" Add the time-varying variables to the netcdf output.
"""
# PTM expects time last
self.cell_3d_data_dims=('nMesh2_face','nMesh2_layer_3d','nMesh2_data_time')
self.edge_3d_data_dims=('nMesh2_edge','nMesh2_layer_3d','nMesh2_data_time')
self.cell_2d_data_dims=('nMesh2_face','nMesh2_data_time')
self.edge_2d_data_dims=('nMesh2_edge','nMesh2_data_time')
# Scalar-ish variables
if 'mesh2d_sa1' not in self.mod_map_ds:
print("Will fabricate salinity=0")
# seems that PTM wants this to exist regardless, so better
# to fabricate salinity.
self.salt_var=self.out_nc.createVariable('Mesh2_salinity_3d',
np.float64, self.cell_3d_data_dims)
self.nut_var=self.out_nc.createVariable('Mesh2_vertical_diffusivity_3d',
np.float64, self.cell_3d_data_dims)
# Layer index variables
self.edge_k_bot_var=self.out_nc.createVariable('Mesh2_edge_bottom_layer',
np.int32,
self.edge_2d_data_dims)
self.edge_k_top_var=self.out_nc.createVariable('Mesh2_edge_top_layer',
np.int32,
self.edge_2d_data_dims)
self.cell_k_bot_var=self.out_nc.createVariable('Mesh2_face_bottom_layer',
np.int32,
self.cell_2d_data_dims)
self.cell_k_top_var=self.out_nc.createVariable('Mesh2_face_top_layer',
np.int32,
self.cell_2d_data_dims)
# hydro variables
self.h_flow_var=self.out_nc.createVariable('h_flow_avg',np.float64,self.edge_3d_data_dims)
# what are the expectations for surface/bed vertical velocity?
self.v_flow_var=self.out_nc.createVariable('v_flow_avg',np.float64,self.cell_3d_data_dims)
self.vol_var=self.out_nc.createVariable('Mesh2_face_water_volume',np.float64,self.cell_3d_data_dims)
self.A_edge_var=self.out_nc.createVariable('Mesh2_edge_wet_area',np.float64,self.edge_3d_data_dims)
self.A_face_var=self.out_nc.createVariable('Mesh2_face_wet_area',np.float64,self.cell_3d_data_dims)
def write_time_strings(self):
# special handling for character array which xarray botches
time_strings=[ utils.to_datetime(t).strftime('%Y-%m-%d %H:%M:%S')
for t in self.mod_map_ds.nMesh2_data_time.values ]
# fish ptm expects this to be a 2D array, with string length
# first, followed by time index.
time_string_array=np.array( [ np.frombuffer( t.encode(),dtype='S1' )
for t in time_strings ] )
self.out_nc.createDimension('date_string_length',time_string_array.shape[1])
time_string_var=self.out_nc.createVariable('Mesh2_data_time_string','c',
('date_string_length','nMesh2_data_time'))
time_string_var[:]=time_string_array.T
def open_waq_output(self):
# non-working multiprocessor code
# self.hyd=waq.HydroMultiAggregator(run_prefix=self.model.mdu.name,
# path=self.model.run_dir,
# agg_shp=self.model.grid)
# assert self.nprocs==self.waq.infer_nprocs(),"Failed to determine number of processors"
self.hyd_fn=os.path.join(self.model.mdu.base_path,
"DFM_DELWAQ_%s"%self.model.mdu.name,
"%s.hyd"%self.model.mdu.name)
self.hyd=waq.HydroFiles(self.hyd_fn)
self.hyd.infer_2d_links()
self.poi0=self.hyd.pointers-1
self.init_waq_mappings()
def init_waq_mappings(self):
"""
establish mapping from hydro links to grid edges.
sets
self.link_to_edge_sign: [ (j from grid, +-1 to indicate flipped), ...]
(indexed by waq link indexes)
self.element_to_cell: [i from grid, ...] (indexed by waq 2D element)
- if self.remap_waq_elements is False, this is just np.arange(g.Ncells()).
Otherwise it will be decided based on geometry
"""
if self.remap_waq_elements:
hg=self.hyd.grid()
node_map,edge_map,cell_map=hg.match_to_grid(self.g)
self.element_to_cell=cell_map
else:
self.element_to_cell=np.arange(self.hyd.n_seg)
assert self.hyd.n_seg == self.g.Ncells()
self.cell_to_element=utils.invert_permutation(self.element_to_cell)
# link_to_edge_sign=[] # an edge index in g.edges, and a +-1 sign for whether the link is aligned the same.
# use array to allow for vector operations later
link_to_edge_sign=np.zeros( (len(self.hyd.links),2), np.int32)
link_to_edge_sign[:,0]=-1 # no edge
link_to_edge_sign[:,0]=0 # 0 sign
mapped_edges={} # make sure we don't map multiple links onto the same edge
for link_idx,(l_from,l_to) in enumerate(self.hyd.links):
if l_from>=0:
i_from=self.element_to_cell[l_from]
else:
i_from=l_from
if l_to>=0:
i_to =self.element_to_cell[l_to]
else:
i_to = l_to
if l_from>=0 and l_to>=0:
j=self.g.cells_to_edge(i_from,i_to)
assert j is not None
j_cells=self.g.edge_to_cells(j)
if j_cells[0]==i_from and j_cells[1]==i_to:
sign=1
elif j_cells[1]==i_from and j_cells[0]==i_to:
sign=-1
else:
assert False,"We have lost our way"
link_to_edge_sign[link_idx,:]=[j,sign]
assert j not in mapped_edges
mapped_edges[j]=link_idx
else:
assert l_to>=0,"Was only expecting 'from' for the link to be negative"
nbr_cells=np.array(self.g.cell_to_cells(i_to))
nbr_edges=np.array(self.g.cell_to_edges(i_to))
potential_edges=nbr_edges[nbr_cells<0]
if len(potential_edges)==1:
j=potential_edges[0]
elif len(potential_edges)==0:
print("No boundary edge for link %d->%d to an edge"%(l_from,l_to))
link_to_edge_sign[link_idx,:]=[9999999,0] # may be able to relax this
continue
else:
print("Link %d->%d could map to %d edges - will choose first unclaimed"
%(l_from,l_to,len(potential_edges)))
# may not have enough information to know which boundary
for j in potential_edges:
if j in mapped_edges:
continue
break
else:
raise Exception("Couldn't find an edge for link %d->%d"%(l_from,l_to))
mapped_edges[j]=link_idx
j_cells=self.g.edge_to_cells(j)
if j_cells[0]==i_to:
link_to_edge_sign[link_idx,:]=[j,-1]
elif j_cells[1]==i_to:
link_to_edge_sign[link_idx,:]=[j,1]
else:
assert False,"whoa there"
self.link_to_edge_sign=link_to_edge_sign
@profile
def write_time_steps(self):
"""
The heavy lifting writing out hydro fields at each time step.
"""
times=self.mod_map_ds.nMesh2_data_time.values
e2c=self.g.edge_to_cells()
# when possible use the DWAQ areas, but for 2D, will use this
# area since there isn't a dwaq cell area.
Ac=self.g.cells_area()
# this writes all time strings at once -- maybe that keeps those
# small data contiguous for fast scanning. Calling it from this
# method maybe consolidates time step selection locations.
self.write_time_strings()
# "safe" versions of the cells on either side of an edge
c1=e2c[:,0].copy() ; c2=e2c[:,1].copy()
c2[c2<0]=c1[c2<0]
c1[c1<0]=c2[c1<0] # unnecessary, but hey..
# Make it clear that the step-integrated values are not valid
# for i=0
self.h_flow_var[:,:,0]=np.nan
self.A_edge_var[:,:,0]=np.nan
self.edge_k_top_var[:,0]=0
self.v_flow_var[:,:,0]=np.nan
self.A_face_var[:,:,0]=np.nan
# will need seg_k below
self.hyd.infer_2d_elements()
for ti,t in enumerate(times):
if True: # ti%24==0:
print("%d/%d t=%s"%(ti,len(times),t))
cell_water_depth=self.mod_map_ds.mesh2d_waterdepth.isel(nMesh2_data_time=ti)
if 0:
# try setting edges to be dry by k_top=0. this is specific to nk=1
# all or nothing for sigma layers
self.cell_k_top_var[:,ti] = np.where(cell_water_depth>0,self.nkmax,0)
else:
# based on looking at untrim output, seems that cells are *not*
# dried out by setting cell_top=0, though they do show a zero wet area.
self.cell_k_top_var[:,ti] = self.nkmax
# HERE: this should probably use edge area and length, so that
# fluxes
# edge eta is taken from the higher freesurface
eta_cell=self.out_nc['Mesh2_sea_surface_elevation'][:,ti]
edge_eta=np.maximum( eta_cell[c1], eta_cell[c2] )
edge_water_depth=edge_eta + self.out_nc['Mesh2_edge_depth'][:]
# bed never moves in this code
self.cell_k_bot_var[:,ti] = 1
self.edge_k_bot_var[:,ti] = 1
def copy_3d_cell(src,dst):
# Copies single time step of cell-centered 3D data.
# src: string name of variable in mod_map_ds
# dst: netCDF variable to assign to.
src_data=self.mod_map_ds[src].isel(nMesh2_data_time=ti).values
if self.map_2d:
dst[:,0,ti]=src_data
else:
dst[:,:,ti]=src_data
if 'mesh2d_sa1' in self.mod_map_ds:
copy_3d_cell('mesh2d_sa1',self.salt_var)
else:
self.salt_var[:,:,ti]=0.0
if self.nkmax>1:
copy_3d_cell('mesh2d_viw',self.nut_var)
else:
# punt - would be nice to calculate something based on
# velocity, roughness, etc.
self.nut_var[:,:,ti]=0.0
# h_flow gets interesting as we have to read dwaq output
# dwaq uses seconds from reference time
hyd_t_sec=(t-utils.to_dt64(self.hyd.time0))/np.timedelta64(1,'s')
# time-step integrated quantities in PTM reflect the preceding interval
# but DWAQ integrated quantities reflect the following interval.
if ti+1<len(times):
# flows is all horizontal flows, layer by layer, surface to bed,
# and then vertical flows.
# only flow edges get flows, though.
flows=self.hyd.flows(hyd_t_sec)
areas=self.hyd.areas(hyd_t_sec)
# compose exch_to_2d_link,link_to_edge_sign, weed out unmapped
# links, and copy into h_flow_avg. start with naive loops
h_flow_avg=np.zeros((self.g.Nedges(),self.nkmax),np.float64)
h_area_avg=np.zeros_like(h_flow_avg)
# vectorized
# just the horizontal exchanges
exchs=np.arange(self.hyd.n_exch_x+self.hyd.n_exch_y)
Qs=flows[exchs]
# for horizontal exchanges in a sigma grid, this is a safe way
# to get layer:
ks=self.hyd.seg_k[self.poi0[exchs][:,1]]
links=self.hyd.exch_to_2d_link['link'][exchs]
link_sgns=self.hyd.exch_to_2d_link['sgn'][exchs]
js_j_sgns=self.link_to_edge_sign[links,:]
js=js_j_sgns[:,0]
j_sgns=js_j_sgns[:,1]
sgns=np.where(js>=0,link_sgns*j_sgns,0)
# so far this is k in the DWAQ world, surface to bed.
# but now we assign in the untrim sense, bed to surface.
ptm_ks=self.nkmax-ks-1
h_flow_avg[js,ptm_ks]=Qs*sgns
h_area_avg[js,ptm_ks]=np.where(js>=0,areas[exchs],0.0)
self.h_flow_var[:,:,ti+1]=h_flow_avg
self.A_edge_var[:,:,ti+1]=h_area_avg
# edge ktop is based on presence of flux rather than geometry of freesurface
# and bed, since eta is instantaneous. Could also use areas, but fluxes
# are more 'fundamental'
edge_is_wet=np.any( h_flow_avg!=0.0, axis=1)
self.edge_k_top_var[:,ti+1] = np.where(edge_is_wet,self.nkmax,0)
v_flow_avg=np.zeros((self.g.Ncells(),self.nkmax), np.float64)
v_area_avg=np.zeros_like(v_flow_avg)
if self.nkmax>1:
# this is here for future reference, but most of the
# other code is not ready for 3D, and this code has not been
# tested.
for exch in range(self.hyd.n_exch_x+self.hyd.n_exch_y,self.hyd.n_exch):
# negate, because dwaq records this relative to the exchange,
# which is top-segment to next segment down.
Q=-flows[exch]
assert self.poi0[exch][0] >=0,"Wasn't expecting BC exchanges in the vertical"
seg_up,seg_down=self.poi0[exch][0,:2]
k_upper=self.hyd.seg_k[seg_up]
k_lower=self.hyd.seg_k[seg_down]
elt=self.hyd.seg_to_2d_element[seg_up]
assert k_upper+1==k_lower,"Thought this was a given"
assert elt==self.hyd.seg_to_2d_element[seg_down],"Maybe this wasn't a vertical exchange"
# based on looking at the untrim output, this should be recorded to
# the k of the lower layer, but also flipped to be bed->surface
# ordered
# no longer assume that dwaq cells are numbered the same as dfm cells.
cell=self.element_to_cell[elt]
v_flow_avg[cell,nkmax-k_lower-1]=Q
if k_upper==0: # repeat top flux
v_flow_avg[cell,nkmax-k_upper-1]=Q
else:
# At least populate the area, though it may not make a difference
v_area_avg[:,0] = np.where(cell_water_depth>0,Ac,0.0)
self.v_flow_var[:,:,ti+1]=v_flow_avg
self.A_face_var[:,:,ti+1]=v_area_avg
# Instantaneous values
# Mesh2_face_water_volume
vols=self.hyd.volumes(hyd_t_sec)
# assume again that cells are numbered the same.
# they come to us ordered by first all the top layer, then the second
# layer, on down to the bed. convert to 3D, and reorder the layers
# used to reshape by self.g.Ncells().
vols=vols.reshape( (self.nkmax,self.hyd.n_2d_elements) )[::-1,:] # [k,element]
vols=vols.T # [element,k]
vols=vols[self.cell_to_element,:] # [cell,k]
self.vol_var[:,:,ti] = vols
def close(self):
self.out_nc.close()
self.out_nc=None
# for vertical flows, there are nkmax layers, but nkmax-1
# internal flux faces, or nkmax+1 total faces. how is the
# staggering expected to be handled? best guess:
# v_flow_avg[ k=10 ] is the volume flux between volume[k=10]
# and volume[k=11], i.e. transport with the volume above
# this one. it looks like, at least in the untrim output, that
# the last flux is repeated. one confusing point is that in the
# case of a dry surface layer, we'd expect to see something
# like [Qa, Qb, Qc, Qc, 0], but instead I've seen
# [Qa, Qb, Qc, Qd, Qd]. I don't understand what that's about.
# look at cell 18, around time index 200, 201
if 0:
# Testing
mdu_path="/home/rusty/src/csc/dflowfm/runs/20180807_grid98_17/flowfm.mdu"
converter=DFlowToPTMHydro(mdu_path,'test_hydro.nc',time_slice=slice(0,2),
grd_fn='test_sub.grd',overwrite=True)
elif __name__=='__main__':
# Command line use:
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("mdu",help="MDU filename, e.g. 'my_run/flowfm.mdu'")
parser.add_argument("output",help="Output filename, e.g. 'hydro.nc'" )
parser.add_argument("--times",help="Time indexes, e.g. 0:10, 5")
parser.add_argument("--subgrid","-s",help="Write fake subgrid output_sub.grd too",
action='store_true')
parser.add_argument("--skip-nc","-n",help="Do not write netcdf, usu. in conjunction with --subgrid",
action='store_true')
args=parser.parse_args()
kwargs={}
if args.times is not None:
# parsing python slice syntax
parts=[int(p) if p else None
for p in args.times.split(':')]
kwargs['time_slice']=slice(*parts)
if args.subgrid:
kwargs['grd_fn']=args.output.replace('.nc','_sub.grd')
assert kwargs['grd_fn']!=args.output,"Output filename should end in .nc"
if args.skip_nc:
kwargs['write_nc']=False
converter=DFlowToPTMHydro(args.mdu,args.output,**kwargs)
| 1.257813 | 1 |
wsabie.py | xdshang/wsabie | 19 | 12766055 | from itertools import cycle
import numpy as np
from scipy import sparse
import h5py
from evaluation import load_nuswide, normalize
rng = np.random.RandomState(1701)
transformer = []
batch_size = 100
def load():
_, label, _, label_name, _, data = load_nuswide('nuswide-decaf.npz', 'train')
data = data.toarray()
data = normalize(data, axis = 1)
label = label.tolil()
return data, label, label_name
def save(fname, I, W):
h5out = h5py.File(fname + '.h5', 'w')
Iset = h5out.create_dataset('I', data = I)
Wset = h5out.create_dataset('W', data = W)
h5out.close()
def projection(X):
norm_X = np.linalg.norm(X, axis = 1)
for i in range(len(norm_X)):
if norm_X[i] > 1:
X[i, :] *= 1. / norm_X[i]
return X
def initialize_word_embeddings(label_name, embed_dim):
import gensim
model = gensim.models.Word2Vec.load_word2vec_format('GoogleNews-vectors-negative300.bin', binary=True)
assert model.syn0.shape[1] == embed_dim
W = []
for name in label_name:
W.append(model[name])
return np.asarray(W)
def train(I, W, data, label, lr_I = 0.001, lr_W = 0.001, maxIter = None):
it = 0
loss = 0
sampleIter = cycle(rng.permutation(label.shape[0]))
universe = set(range(label.shape[1]))
I = projection(I)
W = projection(W)
print('Start training with lr_I {}, lr_W {}, maxIter {}'.format(lr_I, lr_W, maxIter))
while True:
# update
sampleId = sampleIter.next()
feat = np.dot(data[sampleId], I)
# obtain label and vlabel (violate label)
l = label.rows[sampleId]
if len(l) == 0:
continue
vl = list(universe.difference(l))
vllen = len(vl)
delta_feat = np.zeros(feat.shape)
delta_W = np.zeros(W.shape)
for y in l:
score = np.dot(W[y, :], feat)
margin = -1
esN = 0
while margin <= 0 and esN < (vllen - 1):
vy = vl[rng.randint(vllen)]
vscore = np.dot(W[vy, :], feat)
margin = vscore - score + 1
esN += 1
if margin > 0:
rank = transformer[(vllen - 1) / esN]
loss += rank * margin
# gradient
delta_feat += (W[y, :] - W[vy, :]) * rank
temp = feat * rank
delta_W[y, :] += temp
delta_W[vy, :] -= temp
I += np.tensordot(data[sampleId], delta_feat, axes = 0) * (lr_I / len(l))
W += delta_W * (lr_W / len(l))
if lr_I > 0.:
I = projection(I)
if lr_W > 0.:
W = projection(W)
it += 1
if maxIter is not None and it == maxIter:
print('Finished training at iteration {} with loss: {}'.format(it, loss / ((it - 1) % batch_size + 1)))
break
if it % batch_size == 0:
print('\titer: {}\tloss: {}'.format(it, loss / batch_size))
loss = 0
# save
if it % label.shape[0] == 0:
print('saving model...')
save('models/wsabie_model_iter_{}'.format(it), I, W)
return I, W
if __name__ == '__main__':
embed_dim = 300
random_init_W = True
# load data
data, label, label_name = load()
print('Data shape: {}'.format(data.shape))
print('Label shape: {}'.format(label.shape))
# initialize transformer
transformer = [0] * (label.shape[1] + 1)
for i in range(label.shape[1]):
transformer[i + 1] = transformer[i] + 1. / (i + 1)
# initialize model
I = rng.rand(data.shape[1], embed_dim).astype(data.dtype)
if random_init_W:
W = rng.rand(label.shape[1], embed_dim).astype(data.dtype)
else:
W = initialize_word_embeddings(label_name, embed_dim)
# train loop
I, W = train(I, W, data, label, lr_I = 0.001, lr_W = 0.00001,
maxIter = 2 * data.shape[0])
# save to hdf5 file
save('models/wsabie_model', I, W)
| 2.1875 | 2 |
release/stubs.min/System/Windows/Forms/__init___parts/ToolStripItemEventArgs.py | YKato521/ironpython-stubs | 0 | 12766056 | <filename>release/stubs.min/System/Windows/Forms/__init___parts/ToolStripItemEventArgs.py
class ToolStripItemEventArgs(EventArgs):
"""
Provides data for System.Windows.Forms.ToolStripItem events.
ToolStripItemEventArgs(item: ToolStripItem)
"""
def __getitem__(self, *args):
""" x.__getitem__(y) <==> x[y] """
pass
@staticmethod
def __new__(self, item):
""" __new__(cls: type,item: ToolStripItem) """
pass
Item = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets a System.Windows.Forms.ToolStripItem for which to handle events.
Get: Item(self: ToolStripItemEventArgs) -> ToolStripItem
"""
| 1.765625 | 2 |
xlib/avecl/_internal/info/TileInfo.py | jkennedyvz/DeepFaceLive | 3 | 12766057 | import numpy as np
from ..AShape import AShape, AShape
class TileInfo:
"""
Tile info.
arguments
shape AShape
tiles Iterable of ints
errors during the construction:
ValueError
result:
.o_shape AShape
.axes_slices list of slice() to fetch original shape
from o_shape for each tile
"""
__slots__ = ['o_shape', 'axes_slices']
def __init__(self, shape, tiles):
if len(tiles) != shape.ndim:
raise ValueError(f'tiles should match shape.ndim {shape.ndim}')
self.o_shape = AShape(dim*tiles[i] for i,dim in enumerate(shape))
c = [0]*shape.ndim
axes_offsets = []
for n in range(np.prod(tiles)):
axes_offsets.append( c.copy() )
for i in range(shape.ndim-1,-1,-1):
c[i] += 1
if c[i] < tiles[i]:
break
c[i] = 0
axes_slices = []
for axes_offset in axes_offsets:
sl = []
for axis,tile in enumerate(axes_offset):
axis_size = shape[axis]
sl.append( slice(axis_size*tile, axis_size*(tile+1)) )
axes_slices.append(tuple(sl))
self.axes_slices = tuple(axes_slices) | 2.609375 | 3 |
data/data_utils.py | stalhabukhari/HDC-Net | 23 | 12766058 | import random
import pickle
import numpy as np
import torch
M = 2**32 - 1
def init_fn(worker):
seed = torch.LongTensor(1).random_().item()
seed = (seed + worker) % M
np.random.seed(seed)
random.seed(seed)
def add_mask(x, mask, dim=1):
mask = mask.unsqueeze(dim)
shape = list(x.shape); shape[dim] += 21
new_x = x.new(*shape).zero_()
new_x = new_x.scatter_(dim, mask, 1.0)
s = [slice(None)]*len(shape)
s[dim] = slice(21, None)
new_x[s] = x
return new_x
def sample(x, size):
#https://gist.github.com/yoavram/4134617
i = random.sample(range(x.shape[0]), size)
return torch.tensor(x[i], dtype=torch.int16)
#x = np.random.permutation(x)
#return torch.tensor(x[:size])
def pkload(fname):
with open(fname, 'rb') as f:
return pickle.load(f)
_shape = (240, 240, 155)
def get_all_coords(stride):
return torch.tensor(
np.stack([v.reshape(-1) for v in
np.meshgrid(
*[stride//2 + np.arange(0, s, stride) for s in _shape],
indexing='ij')],
-1), dtype=torch.int16)
_zero = torch.tensor([0])
def gen_feats():
x, y, z = 240, 240, 155
feats = np.stack(
np.meshgrid(
np.arange(x), np.arange(y), np.arange(z),
indexing='ij'), -1).astype('float32')
shape = np.array([x, y, z])
feats -= shape/2.0
feats /= shape
return feats
| 2.28125 | 2 |
main.py | Magicalbat/Tilemap-Editor | 0 | 12766059 | <reponame>Magicalbat/Tilemap-Editor
import pygame
pygame.init()
width = int(320 * 3)
height = int(180 * 3)
win = pygame.display.set_mode((width, height), pygame.SCALED | pygame.RESIZABLE, 8)
pygame.display.set_caption("Tilemap Editor")
clock = pygame.time.Clock()
fps = 60
import json, copy, sys, os, math
from easygui import buttonbox
data = ""
with open("profile.json", 'r') as f:
data = f.read()
profile = json.loads(data)
if not profile["tileset"]:
print("Error: Tileset required!")
sys.exit()
else:
with open(profile["tileset"], 'r') as f:
data = f.read()
tileset = json.loads(data)
tileSize = tileset['tileSize']
defaultAutotile = -1
collisionTiles = set([i for i, tile in enumerate(tileset["tiles"]) if tile["collision"]])
autotiles = {}
for i, tile in enumerate(tileset["tiles"]):
if tile["enableAutotile"]:
autotiles[int(tile["autotile"], 2)] = i
if "defaultAutotile" in tile:
defaultAutotile = i
if defaultAutotile == -1:
defaultAutotile = [i for i in range(len(tileset["tiles"])) if tileset["tiles"][i]["enableAutotile"]][0]
from scripts.input import Input
from scripts.text import Text
from scripts.common import *
inp = Input()
inp.loadWithDictionary(profile["input"])
text = Text()
text.loadFontImg("res/text.png", scale=(2,2))
# TILES
tileSize = tileset["tileSize"]
tileImgs = loadSpriteSheet(\
tileset["imgPath"],\
(tileset["tileSize"], tileset["tileSize"]),\
tileset["imgTileSheetDim"],\
tileset["imgTilePadding"], tileset["tileNum"],\
(0,0,0)\
)
#tileImgs = loadSpriteSheet("res/tiles.png", (12,12), (4,4), (1,1), 16, (0,0,0))
currentTile = 0
prevTile = 0
# TILEMAP
layers = 2
currentLayer = 0
drawTiles = [{} for _ in range(layers)]
def getSurroundingBitwise(x1, y1):
surrounding = 0b0000
for i, (x2, y2) in enumerate([(-1, 0), (1, 0), (0, -1), (0, 1)]):
testPos = (x1 + x2, y1 + y2)
pStr = f"{int(testPos[0])};{int(testPos[1])}"
if pStr in drawTiles[currentLayer]:
surrounding = modifyBit(surrounding, i, 1)
return surrounding
# UNDO / REDO
changeHistory = []
currentChangeLog = [[{}, {}] for _ in range(layers)]
undoing = False
undoIndex = 0
def tryResetUndo():
global undoing, changeHistory, undoIndex
if undoing:
undoing = False
changeHistory = changeHistory[:undoIndex]
def saveChange():
global changeHistory, currentChangeLog
changeHistory.append(copy.deepcopy(currentChangeLog))
currentChangeLog = [[{}, {}] for _ in range(len(drawTiles))]
if len(changeHistory) > 10:
changeHistory = changeHistory[-10:]
# SCROLL
scroll = pygame.math.Vector2((0,0))
startScrollDrag = pygame.math.Vector2((0,0))
prevState = None
# GRID
gridVisible = False
gridSurf = createGrid(width + 2 * tileSize, height + 2 * tileSize, tileSize)
# BOX SELECT
startSelectionPos = endSelectionPos = pygame.math.Vector2((0,0))
def getSelectionTileRect():
tileRect = pygame.Rect((0,0,0,0))
if startSelectionPos.x < endSelectionPos.x:
tileRect.x = startSelectionPos.x
tileRect.w = endSelectionPos.x - startSelectionPos.x + 1
else:
tileRect.x = endSelectionPos.x
tileRect.w = startSelectionPos.x - endSelectionPos.x + 1
if startSelectionPos.y < endSelectionPos.y:
tileRect.y = startSelectionPos.y
tileRect.h = endSelectionPos.y - startSelectionPos.y + 1
else:
tileRect.y = endSelectionPos.y
tileRect.h = startSelectionPos.y - endSelectionPos.y + 1
return tileRect
# CURSOR INIT
editState = EditStates.PENCIL
changeCursorFromState(editState)
def changeState(state):
global editState
editState = state
changeCursorFromState(editState)
sideBarFraction = 0.2
sideBarDim = (int(width * sideBarFraction), height)
sideBar = pygame.Surface(sideBarDim).convert()
sideBar.set_colorkey((0,0,0))
sideBarCol = profile["colors"]["Side Bar"]
tempDim = (int(sideBarDim[0] * 0.8), int(sideBarDim[1] * 0.6))
normalTS = TileSelection(
tempDim, (max(0, sideBarCol[0] - 15), max(0, sideBarCol[1] - 15), max(0, sideBarCol[2] - 15)),\
pygame.math.Vector2((int((sideBarDim[0] - tempDim[0]) / 2), 56)), tileSize / 2, profile["scroll speed"],\
-1 + profile["reverse scroll"] * 2, tileImgs, tileSize
)
del tempDim
currentTS = normalTS
# EXTRA DATA
extraDataKeys = profile["extra data"]
extraData = {key : [] for key in extraDataKeys}
extraDataCols = profile["extra data colors"]
extraDataImgs = []
extraDataAlphaImgs = []
for i, data in enumerate(extraDataKeys):
tempSurf = pygame.Surface((tileSize, tileSize)).convert()
tempSurf.fill(extraDataCols[i % len(extraDataCols)])
tempSurf.blit(text.createTextSurf(data[0]), (0,0))
extraDataImgs.append(tempSurf.copy())
tempSurf.set_alpha(128)
extraDataAlphaImgs.append(tempSurf.copy())
extraDataTS = TileSelection(
normalTS.dim, normalTS.col, normalTS.pos, normalTS.indent,\
normalTS.scrollSpeed, normalTS.scrollDir, extraDataImgs, tileSize
)
extraDataMode = False
tileViewDim = (int(width * (1 - sideBarFraction)), height)
tileViewPos = pygame.math.Vector2((int(width * sideBarFraction), 0))
tileView = pygame.Surface(tileViewDim).convert()
tileView.set_colorkey((0,0,0))
tileViewCol = profile["colors"]["Tileview"]
tilePreviewSurf = pygame.Surface((tileSize, tileSize)).convert()
tilePreviewSurf.fill((0,255,0))
tilePreviewSurf.set_alpha(64)
def loadMap(filePath):
global drawTiles, extraData, layers
if os.path.exists(filePath):
loadedMap = {}
with open(filePath, 'r') as f: loadedMap = json.loads(f.read())
drawTiles = loadedMap["drawTiles"]
#extraData = loadedMap["extraData"]
for key, item in loadedMap["extraData"].items():
extraData[key] = item
layers = len(drawTiles)
else:
print(f"Could not open file at \"{filePath}\".")
if profile["load map"]: loadMap(profile["load map"])
elif len(sys.argv) > 1: loadMap(sys.argv[1])
def getSaveData():
return {
"drawTiles" : drawTiles,
"chunks" : generateChunks(drawTiles, collisionTiles, tileSize),
"extraData" : extraData
}
currentSavedData = copy.deepcopy(getSaveData())
def saveMap(filePath="output.json"):
global currentSavedData
currentSavedData = copy.deepcopy(getSaveData())
output = currentSavedData
with open(filePath, 'w') as f:
if profile["export"]["indent"]:
f.write(json.dumps(output, indent=profile["export"]["indent"]))
else:
f.write(json.dumps(output))
running = True
while running:
clock.tick(fps)
delta = clock.get_time() / 1000
inp.passiveUpdate()
mousePos = pygame.math.Vector2(pygame.mouse.get_pos())
for event in pygame.event.get():
if event.type == pygame.QUIT:
if getSaveData() != currentSavedData:
choice = buttonbox("", "Save changes before closing?", ("Save", "Do not save", "Cancel"), default_choice="Save", cancel_choice="Cancel")
if choice == "Save":
saveMap()
if choice != "Cancel":
running = False
else:
running = False
if event.type == pygame.KEYDOWN:
inp.eventUpdate(event.key, True)
if event.type == pygame.KEYUP:
inp.eventUpdate(event.key, False)
if event.type == pygame.MOUSEWHEEL:
if mousePos.x < tileViewPos.x:
scrollAmount = currentTS.scrollSpeed * delta * event.y * currentTS.scrollDir
currentTS.scroll += scrollAmount
for r in currentTS.rects:
r[1] -= scrollAmount
if inp.isActionJustPressed("Change Layer"):
currentLayer += 1
currentLayer %= len(drawTiles)
if inp.isActionJustPressed("Extra Data"):
extraDataMode = not extraDataMode
currentTS = normalTS if not extraDataMode else extraDataTS
if extraDataMode:
normalTS.prevTile = currentTile
currentTile = extraDataTS.prevTile
changeCursorFromState(EditStates.PENCIL)
else:
extraDataTS.prevTile = currentTile
currentTile = normalTS.prevTile
changeCursorFromState(editState)
if inp.isActionJustPressed("Save") and inp.isActionPressed("Control"):
saveMap()
if editState != EditStates.SCROLL_GRAB and not extraDataMode:
if inp.isActionJustPressed("Pencil"): changeState(EditStates.PENCIL)
if inp.isActionJustPressed("Box Select") and inp.isActionReleased("Control"): changeState(EditStates.BOX_SELECT)
if inp.isActionJustPressed("Bucket"): changeState(EditStates.BUCKET)
if inp.isActionJustPressed("Color Picker"): changeState(EditStates.COLOR_PICKER)
tvMousePos = pygame.math.Vector2((mousePos.x - tileViewPos.x, mousePos.y)) # Tile View Mouse Pos
tvMousePos += scroll
tileMousePos = pygame.math.Vector2((math.floor(tvMousePos.x / tileSize), math.floor(tvMousePos.y / tileSize)))
clampedListMousePos = [math.floor(tileMousePos.x * tileSize), math.floor(tileMousePos.y * tileSize)]
mousePosStr = f"{math.floor(tileMousePos.x)};{math.floor(tileMousePos.y)}"
if mousePos.x < tileViewPos.x:
if inp.isMouseButtonJustPressed(0):
for i, r in enumerate(currentTS.rects):
rect = pygame.Rect(r)
if rect.collidepoint(mousePos):
currentTile = i
else:
if inp.isMouseButtonJustPressed(1) or inp.isActionJustPressed("Alt Scroll Grab"):
startScrollDrag = tvMousePos
prevState = editState
changeState(EditStates.SCROLL_GRAB)
if inp.isMouseButtonPressed(1) or inp.isActionPressed("Alt Scroll Grab"):
scroll += startScrollDrag - tvMousePos
if inp.isMouseButtonJustReleased(1) or inp.isActionJustReleased("Alt Scroll Grab"):
changeState(prevState)
prevState = EditStates.NONE
if inp.isMouseButtonJustReleased(0) or inp.isMouseButtonJustReleased(2):
if currentChangeLog != [[{}, {}] for _ in range(len(drawTiles))]:
saveChange()
if inp.isActionJustPressed("Undo") and inp.isActionPressed("Control"):
if not undoing:
undoing = True
undoIndex = len(changeHistory)
if undoing:
undoIndex -= 1
undoIndex = max(0, undoIndex)
if undoIndex >= 0:
for i in range(len(drawTiles)):
updateDictionary(drawTiles[i], changeHistory[undoIndex][currentLayer][0], changeHistory[undoIndex][currentLayer][1], True)
if undoing and inp.isActionJustPressed("Undo") and inp.isActionPressed("Control") and inp.isActionPressed("Shift"):
undoIndex = min(len(changeHistory) - 1, undoIndex)
if undoIndex >= 0:
for i in range(len(drawTiles)):
updateDictionary(drawTiles[i], changeHistory[undoIndex][currentLayer][0], changeHistory[undoIndex][currentLayer][1], False)
undoIndex += 1
if not extraDataMode:
if inp.isMouseButtonPressed(2):
tryResetUndo()
if editState == EditStates.PENCIL:
if mousePosStr in drawTiles[currentLayer]:
currentChangeLog[currentLayer][0][mousePosStr] = drawTiles[currentLayer][mousePosStr]
drawTiles[currentLayer].pop(mousePosStr)
currentChangeLog[currentLayer][1][mousePosStr] = None
if inp.isMouseButtonJustPressed(0):
tryResetUndo()
if editState == EditStates.COLOR_PICKER:
if mousePosStr in drawTiles[currentLayer]:
currentTile = drawTiles[currentLayer][mousePosStr]
changeState(EditStates.PENCIL)
elif editState == EditStates.BOX_SELECT:
startSelectionPos = tileMousePos
elif editState == EditStates.BUCKET: # BUCKET FILL
startPos = (tileMousePos.x, tileMousePos.y)
queue = [startPos]
while queue:
curPos = queue.pop()
if abs(curPos[0] - startPos[0]) > 20 or abs(curPos[1] - startPos[1]) > 20: continue
pStr = f"{int(curPos[0])};{int(curPos[1])}"
if pStr in drawTiles[currentLayer]: continue
currentChangeLog[currentLayer][0][pStr] = None
drawTiles[currentLayer][pStr] = currentTile
currentChangeLog[currentLayer][1][pStr] = currentTile
queue.insert(0, (curPos[0] + 1, curPos[1]))
queue.insert(0, (curPos[0] - 1, curPos[1]))
queue.insert(0, (curPos[0], curPos[1] + 1))
queue.insert(0, (curPos[0], curPos[1] - 1))
if inp.isMouseButtonPressed(0):
tryResetUndo()
if editState == EditStates.PENCIL:
if mousePosStr not in currentChangeLog[currentLayer][0]:
currentChangeLog[currentLayer][0][mousePosStr] = drawTiles[currentLayer][mousePosStr] if mousePosStr in drawTiles[currentLayer] else None
drawTiles[currentLayer][mousePosStr] = currentTile
currentChangeLog[currentLayer][1][mousePosStr] = currentTile
if editState == EditStates.BOX_SELECT:
endSelectionPos = tileMousePos
if editState == EditStates.BOX_SELECT:
if inp.isActionJustPressed("Selection Delete"):
sRect = getSelectionTileRect()
for x in range(sRect.w):
for y in range(sRect.h):
pStr = f"{int(sRect.x + x)};{int(sRect.y + y)}"
if pStr in drawTiles[currentLayer]:
currentChangeLog[currentLayer][0][pStr] = drawTiles[currentLayer][pStr]
drawTiles[currentLayer].pop(pStr)
currentChangeLog[currentLayer][1][pStr] = None
saveChange()
elif inp.isActionJustPressed("Selection Fill"):
sRect = getSelectionTileRect()
for x in range(sRect.w):
for y in range(sRect.h):
pStr = f"{int(sRect.x + x)};{int(sRect.y + y)}"
currentChangeLog[currentLayer][0][pStr] = drawTiles[currentLayer][pStr] if pStr in drawTiles[currentLayer] else None
drawTiles[currentLayer][pStr] = currentTile
currentChangeLog[currentLayer][1][pStr] = currentTile
saveChange()
elif inp.isActionJustPressed("Selection Autotile"):
sRect = getSelectionTileRect()
for x in range(sRect.w):
for y in range(sRect.h):
pStr = f"{int(sRect.x + x)};{int(sRect.y + y)}"
if pStr in drawTiles[currentLayer]:
surrounding = getSurroundingBitwise(sRect.x + x, sRect.y + y)
t = autotiles[surrounding] if surrounding in autotiles else defaultAutotile
currentChangeLog[currentLayer][0][pStr] = drawTiles[currentLayer][pStr]
drawTiles[currentLayer][pStr] = t
currentChangeLog[currentLayer][1][pStr] = t
saveChange()
else:
if inp.isMouseButtonPressed(0):
if clampedListMousePos not in extraData[extraDataKeys[currentTile]]:
extraData[extraDataKeys[currentTile]].append(clampedListMousePos)
elif inp.isMouseButtonPressed(2):
if clampedListMousePos in extraData[extraDataKeys[currentTile]]:
extraData[extraDataKeys[currentTile]].remove(clampedListMousePos)
# TILE VIEW DRAW
if inp.isActionJustPressed("Grid"):
gridVisible = not gridVisible
tileView.fill(tileViewCol)
if gridVisible:
#pos = (-scroll.x % tileSize, -scroll.y % tileSize)
tileView.blit(gridSurf, pygame.math.Vector2((-scroll.x % tileSize - tileSize, -scroll.y % tileSize - tileSize)))
for i, layer in enumerate(drawTiles):
tempSurf = pygame.Surface(tileViewDim).convert()
tempSurf.set_colorkey((0,0,0))
for pStr, imgIndex in layer.items():
pStr = pStr.split(';')
tilePos = pygame.math.Vector2((int(pStr[0]), int(pStr[1])))
tempSurf.blit(tileImgs[imgIndex], tilePos * tileSize - scroll)
if i != currentLayer:
tempSurf.set_alpha(64)
tileView.blit(tempSurf, (0,0))
for i, tiles in enumerate(extraData.values()):
for pos in tiles:
tileView.blit(extraDataAlphaImgs[i], (pos[0] - scroll.x, pos[1] - scroll.y))
if editState != EditStates.BOX_SELECT and prevState != EditStates.BOX_SELECT: tileView.blit(tilePreviewSurf, (tileMousePos * tileSize) - scroll)
else:
r = getSelectionTileRect()
pygame.draw.rect(tileView, (255,255,255), (r.x * tileSize - scroll.x, r.y * tileSize - scroll.y, r.w * tileSize, r.h * tileSize), width=1)
# SIDEBAR DRAW
sideBar.fill(sideBarCol)
pygame.draw.rect(sideBar, currentTS.col, currentTS.rect)
sideBar.blit(currentTS.surf, (currentTS.pos[0], currentTS.pos[1] - currentTS.scroll))
stRect = currentTS.rects[currentTile % len(currentTS.rects)] # Selected tile rect
pygame.draw.rect(sideBar, (0,255,255), (stRect[0] - 1, stRect[1] - 1, stRect[2] + 1, stRect[3] + 1), width=1)
#for r in tselectionRects:
# pygame.draw.rect(sideBar, (0,255,0), r)
pygame.draw.rect(sideBar, sideBarCol, (0, 0, sideBarDim[0], currentTS.rect.y))
pygame.draw.rect(sideBar, sideBarCol, (0, currentTS.rect.bottom, sideBarDim[0], sideBarDim[1] - currentTS.rect.bottom))
sideBar.blit(text.createTextSurf(f"({tileMousePos.x},{tileMousePos.y})"), (2, 2))
sideBar.blit(text.createTextSurf(f"Layer: {currentLayer}"), (2, 18))
sideBar.blit(currentTS.imgs[currentTile], (2, 36))
if extraDataMode:
sideBar.blit(text.createTextSurf(extraDataKeys[currentTile]), (2, sideBar.get_height()-20))
win.blit(tileView, tileViewPos)
win.blit(sideBar, (0,0))
pygame.display.update()
pygame.quit()
| 2.796875 | 3 |
upbit/migrations/0016_auto_20210618_0735.py | lindychi/longbit | 0 | 12766060 | <filename>upbit/migrations/0016_auto_20210618_0735.py
# Generated by Django 3.2 on 2021-06-18 07:35
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('upbit', '0015_auto_20210618_0725'),
]
operations = [
migrations.AddField(
model_name='coinmarket',
name='ticker_update',
field=models.DateTimeField(default=datetime.datetime(2021, 6, 18, 7, 35, 0, 604091, tzinfo=utc)),
),
migrations.AlterField(
model_name='market',
name='update_date',
field=models.DateTimeField(default=datetime.datetime(2021, 6, 11, 7, 35, 0, 603556, tzinfo=utc)),
),
]
| 1.679688 | 2 |
smartshark/migrations/0006_auto_20160607_1703.py | benjaminLedel/serverSHARK | 1 | 12766061 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-07 15:03
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('smartshark', '0005_auto_20160607_1657'),
]
operations = [
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('url', models.URLField()),
],
),
migrations.AddField(
model_name='pluginexecution',
name='project',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='smartshark.Project'),
preserve_default=False,
),
]
| 1.664063 | 2 |
iiopoll2Influx.py | wiederma/roomclimate | 0 | 12766062 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Requires python3 package for influxDB to import InfluxDBClient:
sudo apt install python3-influxdb
API documentation for python InfluxDBClient:
https://influxdb-python.readthedocs.io/en/latest/api-documentation.html#
users = db_client.get_list_users()
print (users)
API documentation for influxdb
https://docs.influxdata.com/influxdb/v1.4/introduction/
conntect to command line interface of influxdb:
influx
> show users
> show databases
> use <DATABASE>
> show series
'''
import time
import json
import sys
import argparse
from influxdb import InfluxDBClient
def main():
parser = argparse.ArgumentParser(
description='''This application reads json from std in,
and pushes received data to an InfluxDB''',
)
parser.add_argument(
'--db_name',
type=str,
default='',
help='the name of the InfluxDB data is written to',
)
parser.add_argument(
'--db_host',
type=str,
default='localhost',
help='the host providing the influxdb',
)
parser.add_argument(
'--db_port',
type=int,
default=8086,
help='the port of the influxdb',
)
parser.add_argument(
'--db_user',
type=str,
default='',
help='the user allowed to write to the InfluxDB',
)
parser.add_argument(
'--db_password',
type=str,
default='',
help='the passwort for the specified user to access the InfluxDB',
)
parser.add_argument(
'--testing',
action='store_true',
default=False,
help='enabling testing mode without writing to InfluxDB',
)
args = parser.parse_args()
#
# Try to establish a DB connection
#
if args.testing == True:
print('testing mode, no DB used ...', file=sys.stderr)
else:
print('connection to InfluxDB ...', file=sys.stderr)
try:
print('db_host: ' + args.db_host, file=sys.stderr)
print('db_name: ' + args.db_name, file=sys.stderr)
db_client = InfluxDBClient(
args.db_host,
args.db_port,
args.db_user,
args.db_password,
args.db_name
)
except Exception as e:
print(e, file=sys.stderr)
raise OSError('connection to InfluxDB cannot be established!')
print('... connection established', file=sys.stderr)
print('... waiting for incomming data', file=sys.stderr)
#
# read from stdin and write to InfluxDB
#
# json_body = [
# {
# "measurement": "cpu_load_short",
# "tags": {
# "host": "server01",
# "region": "us-west"
# },
# "time": "2009-11-10T23:00:00Z",
# "fields": {
# "Float_value": 0.64,
# "Int_value": 3,
# "String_value": "Text",
# "Bool_value": True
# }
# }
# ]
# {
# "location": "wohnzimmer",
# "sensor": "dht11@0",
# "host": "the-crowsnest",
# "time": "2018-02-18T17:21:46.617120",
# "temperature": "18000",
# "pressure": null,
# "humidity_relative": "45000"
# }
while True:
try:
for line in sys.stdin:
# json.loads is for loading from strings
# json.load is for loading form other resources
data = json.loads(line)
mymeasurement = "roomclimate"
myhost = data['host']
mysensor = data['sensor']
mylocation = data['location']
mytime = data['time']
# sensor readings are pushed to database as they are
# further processing needs to be done by display / grafana
if data['temperature'] != None:
mytemperature = int(data['temperature'])
else:
# catch edge case, if sensor has no temperature
mytemperature = 0
if data['humidity_relative'] != None:
# BME280 Sensor has humidity_relative data as float
# 2019-02-06: nach update hat der BME280 die Luftfeuchtigkeit im passenden Format, *1000 nun nicht mehr erforderlich
# if mysensor == 'bme280':
# myhumidity = int(float(data['humidity_relative'])*1000)
# else:
myhumidity = int(data['humidity_relative'])
else:
# catch edge case, if sensor has no humidity_relative
myhumidity = 0
if data['pressure'] != None:
# BME280 Sensor has pressure data as float
if mysensor == 'bme280':
mypressure = int(float(data['pressure'])*1000)
else:
mypressure = int(data['pressure'])
else:
# catch edge case, if sensor has no pressure
mypressure = 0
json_body = [
{
"measurement": mymeasurement,
"tags": {
"host": myhost,
"sensor": mysensor,
"location": mylocation,
},
"time": mytime,
"fields": {
"temperature": mytemperature,
"humidity_relative": myhumidity,
"pressure": mypressure,
},
}
]
if args.testing == True:
print("measurement:"+ mymeasurement, file=sys.stderr)
print("host:" + myhost, file=sys.stderr)
print("sensor:" + mysensor, file=sys.stderr)
print("location:" + mylocation, file=sys.stderr)
print("time:" + mytime, file=sys.stderr)
print("temperature:"+ str(mytemperature), file=sys.stderr)
print("humidity_relative:"+ str(myhumidity), file=sys.stderr)
print("pressure:" + str(mypressure), file=sys.stderr)
print("\n", file=sys.stderr)
#print(json_body, file=sys.stderr)
else:
db_client.write_points(json_body)
except Exception as e:
print(e, file=sys.stderr)
continue
#
# main function
#
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('Shutting down iiopoll2influx.py', file=sys.stderr)
except Exception as e:
print(e)
| 2.8125 | 3 |
my_app/__main__.py | fschuch/fastapi_project | 0 | 12766063 | <gh_stars>0
import uvicorn
from . import app
uvicorn.run(app) | 1.125 | 1 |
botlet/utils/queue.py | Youka/botlet | 0 | 12766064 | <reponame>Youka/botlet<filename>botlet/utils/queue.py
""" Convenience queues """
from queue import Empty, Full, Queue
from typing import Generic, Optional, TypeVar
# Any type variable for following generics
T = TypeVar('T') # pylint: disable=C0103
class SafeQueue(Generic[T]):
""" Thread-safe queue simplified and generic """
def __init__(self, maxsize: int = 256):
self._queue = Queue(maxsize)
def get(self, timeout: Optional[float] = None) -> Optional[T]:
""" Extracts an item from queue if not empty """
try:
return self._queue.get_nowait() if timeout is None else self._queue.get(timeout=timeout)
except Empty:
return None
def put(self, item: T) -> bool:
""" Puts an item into queue, return True on success or False on already full """
try:
self._queue.put_nowait(item)
return True
except Full:
return False
| 2.609375 | 3 |
tests/test_set_questions_file_path.py | MTelford/diary-tool | 0 | 12766065 | <filename>tests/test_set_questions_file_path.py
import unittest
import pathlib
from diary_tool_main.diary_tool import set_questions_file_path
class TestSetQuestionsFilePath(unittest.TestCase):
def test_set_questions_file_path(self):
"""
Tests that function sets file path for questions
correctly
"""
# actual path to function
main_path = set_questions_file_path()
current_directory = str(pathlib.Path(__file__).parent.resolve())
# our fabricated path to test against actual path in main
func_path = current_directory.replace(
'tests', 'diary_tool_main/questions')
self.assertEqual(main_path, func_path)
if __name__ == '__main__':
unittest.main()
| 3.0625 | 3 |
models/mlhead_clus_server.py | caifederated/mlhead-release | 1 | 12766066 | <filename>models/mlhead_clus_server.py
# This is my code to train n model and saved them
import random
import os
import tensorflow as tf
import numpy as np
import shutil
import tempfile
from tensorflow.python import pywrap_tensorflow
from baseline_constants import BYTES_WRITTEN_KEY, BYTES_READ_KEY, LOCAL_COMPUTATIONS_KEY
from mh_constants import VARIABLE_PARAMS, MODEL_PARAMS
from mlhead_utilfuncs import input_fn
from kmean_model import KmeanModel
class Mlhead_Clus_Server:
def __init__(self, client_model, dataset, model, num_clusters, num_clients):
self.modeldir = "/scratch/tmpmodel"
if not os.path.exists(self.modeldir):
os.makedirs(self.modeldir)
# if num_clusters < 2:
# raise Exception("Sorry, cluster number must be 2 or more")
if num_clusters > 10:
raise Exception("Sorry, cluster number must less than 10")
if num_clusters != -1:
self.model = client_model.get_params() # global model of the server.
self.selected_clients = [] # this variable keeps random, unclustered clients
self.set_model_path(dataset, model)
self._x_dimensions = self.get_model_x_dimensions(dataset, model)
self._variable = self.get_model_variable(dataset, model)
self._num_clusters = num_clusters
self._learned = None
self._shuffledkeys = None
self._clusterModel = KmeanModel(num_clients, self._x_dimensions, \
self._num_clusters, 99999)
"""
cluster_membership is a list of cluster dictionary,
each contains {'member':list of clients,
'center': a center vector, 'attention': a attention vector,
'loss': mean valdation loss of each client of this cluster}
"""
self._cluster_membership = list()
for _ in range(num_clusters):
self._cluster_membership.append({"member": [], "center": None, "attention": [], "loss": None})
@property
def path(self):
return self._path
@property
def x_dimensions(self):
return self._x_dimensions
@property
def variable(self):
return self._variable
@property
def selected(self):
return [c for c in self.selected_clients]
@property
def clusters(self):
return [c for c in self._clusters_membership]
def select_clients(self, my_round, possible_clients):
"""Selects num_clients clients randomly from possible_clients.
Note that within function, num_clients is set to
min(num_clients, len(possible_clients)).
Args:
possible_clients: Clients from which the server can select.
num_clients: Number of clients to select; default 20
Return:
list of (num_train_samples, num_test_samples)
"""
num_clients = len(possible_clients)
np.random.seed(my_round * 50)
self.selected_clients = np.random.choice(possible_clients, num_clients, replace=False)
return [(c.num_train_samples, c.num_test_samples) for c in self.selected_clients]
def train_model(self):
"""Trains self.model on given clients.
"""
clients = self.selected_clients
tot_clients = len(clients)
done_idx = 0
for counter, c in enumerate(clients, 1):
"""
Note: this is a trick. and it's equal to clear the session of client
and make sure the graph has been re-initialized.
"""
c_file = self.get_chkpfile( 'write_%s.ckpt' % c.id )
"""
I think this one will run faster than training
since now I have already initial the value with normal
distribution but no training at first round
"""
if os.path.exists(c_file):
os.remove(c_file)
c.save_model(c_file)
done_percentil = float(done_idx + 1) * 25
if self.get_percentil(counter, tot_clients) >= done_percentil:
done_idx += 1
print( "%g%% clients has done" % done_percentil)
def get_percentil(self, counter, len):
fraction = float((counter + 1) / len)
return fraction * 100
def set_model_path(self, dataset, model):
self._path = os.path.join('/scratch/leaf/ckpt_runtime', dataset, model)
if not os.path.exists(self._path):
os.makedirs(self._path)
def get_model_x_dimensions(self, dataset, model):
key = "%s.%s" % (dataset, model)
d = MODEL_PARAMS[ key ]
return d[0] * d[1]
def get_model_variable(self, dataset, model):
key = "%s.%s" % (dataset, model)
v = VARIABLE_PARAMS[ key ]
return v
def get_chkpfile(self, id_ckpt):
return os.path.join(self._path, id_ckpt)
def train_iteation(self, data):
train_data = lambda: input_fn(data)
self._kmeans.train(train_data)
cluster_centers = self._kmeans.cluster_centers()
# print("cluster centers:", cluster_centers)
score = self._kmeans.score(train_data)
return score
def get_init_point_data(self):
#points = np.random.normal(loc=0.5, scale=0.5, size= (len(self.selected), self._x_dimensions))
points = np.random.uniform(-0.149, 0.149, (len(self.selected), self._x_dimensions))
c_dict = {}
for x, client in enumerate(self.selected):
c_dict[client.id] = points[x]
self._shuffledkeys = list(c_dict.keys())
return c_dict
def run_clustering(self, prev_score, data):
random.shuffle(self._shuffledkeys)
features = list()
for x in self._shuffledkeys:
features.append(data[x])
labels = self._clusterModel.assign_clusters(np.array(features))
return None, self.eval_clustermembership(labels)
def train_kmeans(self, prev_score, data):
"""We are using pre-made tensorflow estimators to
train and predict.
Args:
prev_score: a sum of the distance between each sample
to their nearest center
data: list of weights of user model
Return:
updated score
"""
seed = np.random.randint(5667799881, size=1)[0]
temp_name = next(tempfile._get_candidate_names())
temp_modeldir = os.path.join(self.modeldir, temp_name)
if not os.path.exists(temp_modeldir):
os.makedirs(temp_modeldir)
self._kmeans = tf.contrib.factorization.KMeansClustering(
model_dir=temp_modeldir,
random_seed=seed,
num_clusters=self._num_clusters, use_mini_batch=False)
# composed of weights of every client model
score = self.train_iteation(data)
# evaluate the samples to compute the distance between
# each sample and each center, forming a matrix have each
# row for each sample, and the column is distance to each
# center.
y = self._kmeans.transform(lambda: input_fn(data))
point_distance = list(y)
self._learned = np.argmin(point_distance , 1)
# removing not used files of this model
#shutil.rmtree(temp_modeldir, ignore_errors=True)
return None, score
def eval_clustermembership(self, labels):
"""Transfrom the input data,
get the min distance of each point to cluster centers,
then return the index of cluster center whose distance for a sample
is the mininum
return a list of (num_clients, clients)
"""
for _, cluster in enumerate(self._cluster_membership):
cluster["member"] = list()
sort_clients = list()
# to sync the sorted clients with shuffle step
for i in self._shuffledkeys:
for client in self.selected_clients:
if client.id == i:
sort_clients.append(client)
break
for x in range(len(labels)):
grp_id = labels[x]
self._cluster_membership[grp_id]["member"].append(sort_clients[x])
clus = [ (len(cluster["member"]), cluster["member"]) for cl_id, cluster in enumerate(self._cluster_membership)]
for c in clus:
# I wanna to set first flag to true if any
# cluster is only = 0
if c[0] == 0:
self._clusterModel.first = True
break;
return clus
| 2.765625 | 3 |
ml/rl/polyfill/decorators.py | sailfish009/ReAgent | 0 | 12766067 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
class ClassPropertyDescriptor:
def __init__(self, fget):
self.fget = fget
def __get__(self, obj, klass=None):
if klass is None:
klass = type(obj)
return self.fget.__get__(obj, klass)()
def classproperty(func):
"""
Allow for getter property usage on classmethods
cf: https://stackoverflow.com/questions/5189699/how-to-make-a-class-property
"""
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
return ClassPropertyDescriptor(func)
| 3.109375 | 3 |
wt-app/apps/wt_articles/management/commands/import_turk_result.py | NickRuiz/wikitrans | 1 | 12766068 | <reponame>NickRuiz/wikitrans
from django.db.transaction import commit_on_success
from django.core.management.base import BaseCommand
from django.core.management.color import no_style
from django.utils.encoding import smart_str
from wt_articles.models import SourceSentence, SourceArticle, TranslatedSentence, TranslatedArticle
from wt_articles.management.commands.import_turk_source import unicode_csv_reader
import sys
import os
import csv
import re
from optparse import make_option
from datetime import datetime
try:
set
except NameError:
from sets import Set as set # Python 2.3 fallback
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--source-lang', dest='source_lang',
help='The two-letter language expression of the source language: e.g. hi'),
make_option('--target-lang', dest='target_lang',
help='The two-letter language expression of the target language: e.g. en'),
make_option('--result-file', dest='result_file',
help='The result input for Amazon: Hindi-Batch_188107_result.csv'),
)
help = 'Installs the named mechanical turk result files in the database.'
args = "--source-lang <lang> --target-lang <lang> --result-file <file>"
def handle(self, *labels, **options):
from django.db.models import get_apps
from django.core import serializers
from django.db import connection, transaction
from django.conf import settings
self.style = no_style()
source_lang = options.get('source_lang', None)
target_lang = options.get('target_lang', None)
result_file = options.get('result_file', None)
if not source_lang:
print 'source-lang is not specified'
return
elif len(source_lang) != 2:
print 'source-lang ' + source_lang + ' is not valid'
return
elif not target_lang:
print 'target-lang is not specified'
return
elif len(target_lang) != 2:
print 'target-lang ' + target_lang + ' is not valid'
return
if not os.path.exists(result_file):
print 'result-file does not exist'
return
# Generate dictionary of article id => article title
return self.parse_result_file(result_file, source_lang, target_lang)
@commit_on_success
def parse_result_file(self, result_file, source_lang, target_lang):
f = open(result_file, 'r')
csv_reader = unicode_csv_reader(f)
headers = csv_reader.next()
header_map = {}
for i,h in enumerate(headers):
header_map[h] = i
# not assuming a specific order for the fields
sa = None
cur_aid = -1
segment_ids = [header_map[x] for x in ['Input.seg_id%d' % i for i in range(1,11)]]
segments = [header_map[x] for x in ['Input.seg%d' % i for i in range(1,11)]]
translations = [header_map[x] for x in ['Answer.translation%d' % i for i in range(1,11)]]
ta = None
has_title = 'Input.article' in header_map
for line in csv_reader:
if has_title:
title = line[header_map['Input.article']] + ' (translated)'
else:
title = 'Noname (translated)'
approved = (line[header_map['AssignmentStatus']] == 'Approved')
for i in range(10):
try:
(aid, seg_id) = line[segment_ids[i]].split('_')
except ValueError:
# treating this basically like an eof
break
if cur_aid != int(aid):
if sa:
# save the previous SourceArticle
sa.save(manually_splitting=True)
# check if the document is already imported
if not has_title:
title = aid + ' ' + title
try:
sa = SourceArticle.objects.filter(language = source_lang).get(doc_id = aid)
sa.sentences_processed = True
cur_aid = int(aid)
sa.language = source_lang
sa.doc_id = aid
sa.timestamp = datetime.now()
sa.title = title
sa.save(manually_splitting=True)
# get an id for the SourceArticle instance
except SourceArticle.DoesNotExist:
# make a new sa object
sa = SourceArticle()
sa.sentences_processed = True
cur_aid = int(aid)
language = source_lang
sa.language = language
sa.doc_id = aid
sa.timestamp = datetime.now()
sa.title = title
sa.save(manually_splitting=True)
# get an id for the SourceArticle instance
if ta:
# save the previous target article
ta.save()
# check if the target article has been translated and imported
try:
ta = TranslatedArticle.objects.filter(article = sa).get(language = target_lang)
# if there is one, do not touch unknown fields.
ta.title = title
ta.timestamp = datetime.now()
ta.language = target_lang
ta.approved = approved
ta.save()
except TranslatedArticle.DoesNotExist:
# make a new TranslatedSentence object
ta = TranslatedArticle()
ta.article = sa
ta.title = title
ta.timestamp = datetime.now()
ta.language = target_lang
ta.approved = approved
ta.save()
end_of_paragraph = True
tag_id = 'Input.tag%d' % i
if tag_id in header_map:
tag = line[header_map[tag_id]]
end_of_paragraph = re.search("LastSentence", tag) or False
seg = line[segments[i]]
try:
# do not touch end_of_paragraph because we do not know
ss = sa.sourcesentence_set.get(segment_id = seg_id)
ss.text = seg
ss.segment_id = seg_id
ss.end_of_paragraph = end_of_paragraph
ss.save()
except SourceSentence.DoesNotExist:
ss = SourceSentence()
ss.article = sa
ss.text = seg
ss.segment_id = seg_id
ss.end_of_paragraph = end_of_paragraph
ss.save()
sa.source_text += seg + u'\n'
translation = line[translations[i]]
try:
ts = ta.sentences.get(segment_id = seg_id)
ts.source_sentence = ss
ts.text = translation
ts.translated_by = line[header_map['WorkerId']]
ts.language = target_lang
date_string = line[header_map['SubmitTime']]
df = date_string.split(' ')
tf = df[3].split(':')
ts.translation_date = datetime(int(df[5]), ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'].index(df[1]) + 1,
int(df[2]), int(tf[0]), int(tf[1]), int(tf[2]))
ts.approved = approved
ts.end_of_paragraph = ss.end_of_paragraph
ts.save()
except TranslatedSentence.DoesNotExist:
ts = TranslatedSentence()
ts.segment_id = seg_id
ts.source_sentence = ss
ts.text = translation
ts.translated_by = line[header_map['WorkerId']]
ts.language = target_lang
date_string = line[header_map['SubmitTime']]
df = date_string.split(' ')
tf = df[3].split(':')
ts.translation_date = datetime(int(df[5]), ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'].index(df[1]) + 1,
int(df[2]), int(tf[0]), int(tf[1]), int(tf[2]))
ts.approved = approved
ts.end_of_paragraph = ss.end_of_paragraph
ts.save()
ta.sentences.add(ts)
if sa:
sa.save(manually_splitting=True)
if ta:
ta.save()
| 2.171875 | 2 |
example_site/example_site/urls.py | madtreat/django-address | 0 | 12766069 | <gh_stars>0
from django.contrib import admin
from django.urls import path
from person import views as person
urlpatterns = [
path('', person.home, name='home'),
path('admin/', admin.site.urls),
]
| 1.609375 | 2 |
MineAchieve.py | VanderCat/QuoteVkBot | 0 | 12766070 | <reponame>VanderCat/QuoteVkBot<gh_stars>0
#32 + txtw + 6
#bottom offset 17
from PIL import ImageFont, Image, ImageDraw, ImageFilter
import os
import random
def text_wrap(text,font,writing,max_width):
lines = [[]]
words = text.split()
for word in words:
# try putting this word in last line then measure
lines[-1].append(word)
w = writing.multiline_textsize('\n'.join([' '.join(line) for line in lines]), font=font)[0]
if w > max_width: # too wide
# take it back out, put it on the next line, then measure again
lines.append([lines[-1].pop()])
w = writing.multiline_textsize('\n'.join([' '.join(line) for line in lines]), font=font)[0]
return '\n'.join([' '.join(line) for line in lines]) #Function by https://stackoverflow.com/users/13639308/chris-jones
def mk(object, alpha=False):
#Setup font
Font = ImageFont.truetype("font/mineedit.ttf", 8)
#Setup message
print(object.textbody.maintext[0])
message = object.textbody.maintext[0].split("\n")
print(message)
img = Image.new("1", (1, 1), 1)
d = ImageDraw.Draw(img)
txtmeasure1 = d.multiline_textsize(message[0],Font)
txtmeasure2 = d.multiline_textsize(message[1],Font)
if txtmeasure1[0]+32>txtmeasure2[0]:
class width:
front = txtmeasure1[0]
back = txtmeasure1[0] +32 +6
backend = -4
image = txtmeasure1[0] +32 +6
else:
class width:
front = txtmeasure2[0] -29
back = txtmeasure2[0]
backend = +5
image = txtmeasure2[0] +9
print(width)
texture_atlas = Image.open("Images/index.png")
class texture:
class front:
start = texture_atlas.crop((0,3,4,23))
loop = texture_atlas.crop((4,3,5,23))
end = texture_atlas.crop((196,3,200,23))
class back:
start = texture_atlas.crop((0,55,4,75))
loop = texture_atlas.crop((4,55,5,75))
end = texture_atlas.crop((196,55,200,75))
blob = texture_atlas.crop((1,155,25,179))
#prepare image
color = ()
if alpha == True:
color = (0,0,0,0)
else:
color = (255,255,255,255)
img = Image.new("RGBA", (width.image, 37), color) #(32 + txtmeasure[0] + 6)
d = ImageDraw.Draw(img)
temp = Image.new("RGBA", img.size, (0,0,0,0))
temp.paste(texture.back.start, (0,17))
temp.paste(texture.back.loop.resize((width.back+1,20),0), (4,17))
temp.paste(texture.back.end, (width.back+width.backend,17))
img.alpha_composite(temp)
temp = Image.new("RGBA", img.size, (0,0,0,0))
temp.paste(texture.front.start, (0,2))
temp.paste(texture.front.loop.resize((32+width.front+6,20),0), (4,2))
temp.paste(texture.front.end, (32+width.front+2,2))
img.alpha_composite(temp)
temp = Image.new("RGBA", img.size, (0,0,0,0))
temp.paste(texture.blob, (4,0))
img.alpha_composite(temp)
rand = os.listdir(path="Images/items/")
rand = rand[random.randint(0,len(rand)-1)]
if len(message) == 2:
message.append(random)
else:
message[2] = message[2]+".png"
temp = Image.new("RGBA", img.size, (0,0,0,0))
try:
temp.paste(Image.open("Images/items/"+message[2]), (8,4))
except:
temp.paste(Image.open("Images/items/"+rand), (8,4))
img.alpha_composite(temp)
#7shad 6txt
d.text((33,9),message[0],fill=(0,0,0),font=Font)
d.text((32,8),message[0],fill=(255,255,255),font=Font)
d.text((5,25),message[1],fill=(110,235,110),font=Font)
return img.resize((img.size[0]*object.settings.size,img.size[1]*object.settings.size),0)
if __name__ == "__main__":
class obj:
class textbody:
maintext = ["цвл майsdasdasн\nверхний "]
class settings:
size = 8
width = 512
minheight = 128
margin = [40,40,40,40] # [top,right,bottom,left]
mk(obj).show()#.save("test.png") | 2.65625 | 3 |
test/test_harness.py | DivyanshuLohani/doctor | 11 | 12766071 | <reponame>DivyanshuLohani/doctor<filename>test/test_harness.py
import pytest
from doctor.docs.base import BaseHarness
from doctor.resource import ResourceAnnotation
from .types import Age, Color, ItemId, Name
from .utils import add_doctor_attrs
@pytest.fixture
def annotation():
def logic(age: Age, name: Name, item_id: ItemId=None, color: Color='green'):
pass
logic = add_doctor_attrs(logic)
annotation = ResourceAnnotation(logic, 'GET')
return annotation
def test_get_example_values(annotation):
harness = BaseHarness('/api')
route = '/foo'
actual = harness._get_example_values(route, annotation)
expected = {
'age': 34,
'color': 'blue',
'item_id': 1,
'name': 'John',
}
assert expected == actual
| 2.359375 | 2 |
tests/data/pipeline/test_regex.py | vikigenius/fintopics | 0 | 12766072 | # -*- coding: utf-8 -*-
import pytest
from fintopics.data.pipeline.regex import RegexExtractionPipeline
@pytest.fixture()
def regex_pipeline():
"""Creates a RegexExtractionPipeline fixture."""
return RegexExtractionPipeline()
@pytest.mark.asyncio
async def test_header_removal(datadir, regex_pipeline):
"""Tests removal of header."""
header_file = datadir / 'header.txt'
text = header_file.read_text()
actual_text = await regex_pipeline.coroutine(text)
assert actual_text['text'].strip() == ''
@pytest.mark.asyncio
async def test_exhibit_removal(datadir, regex_pipeline):
"""Tests removal of header."""
exhibit_file = datadir / 'exhibit.txt'
text = exhibit_file.read_text()
actual_text = await regex_pipeline.coroutine(text)
assert actual_text['text'].strip() == ''
| 2.46875 | 2 |
funnel/views/__init__.py | jace/goafunnel | 0 | 12766073 | # -*- coding: utf-8 -*-
from .index import *
from .login import *
from .space import *
from .section import *
from .usergroup import *
from .proposal import *
from .commentvote import *
from .venue import *
from .schedule import *
from .session import *
| 1.03125 | 1 |
modules/callbacks.py | jeusgao/jobot_factory_nlp_simple | 0 | 12766074 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2021-01-05 14:57:15
# @Author : <NAME> (<EMAIL>)
import os
import json
import time
from backend import keras
class TrainingCallbacks(keras.callbacks.Callback):
def __init__(self, task_path='', log_name='training'):
self.task_path = task_path
self.log_name = log_name
def on_train_begin(self, logs):
with open(f'{self.task_path}/{self.log_name}_logs.json', 'w') as f:
f.write('')
# if os.path.exists(f'{self.task_path}/{self.log_name}_logs.json'):
# os.remove(f'{self.task_path}/{self.log_name}_logs.json')
def on_train_end(self, logs):
txt = json.dumps({
'EPOCH': 'Finished',
'time': time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime()),
'scores': logs
})
with open(f'{self.task_path}/{self.log_name}_logs.json', 'a') as f:
f.write(f'{txt}\n')
os.remove(f'{self.task_path}/state.json')
def on_epoch_begin(self, epoch, logs):
txt = json.dumps({
'EPOCH': epoch,
'state': 'Begin',
'time': time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime())
})
with open(f'{self.task_path}/{self.log_name}_logs.json', 'a') as f:
f.write(f'{txt}\n')
def on_epoch_end(self, epoch, logs):
txt = json.dumps({
'EPOCH': epoch,
'state': 'end',
'time': time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime()),
'scores': str(logs)
})
with open(f'{self.task_path}/{self.log_name}_logs.json', 'a') as f:
f.write(f'{txt}\n')
def on_train_batch_end(self, epoch, logs):
txt = json.dumps({
'batch': epoch,
'time': time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime()),
'scores': str(logs)
})
with open(f'{self.task_path}/{self.log_name}_logs.json', 'a') as f:
f.write(f'\t{txt}\n')
class EvaluatingCallbacks(keras.callbacks.Callback):
def __init__(self, task_path='', log_name='evaluating'):
self.task_path = task_path
self.log_name = log_name
def on_test_begin(self, logs=None):
print('Evaluating ...')
with open(f'{self.task_path}/{self.log_name}_logs.json', 'w') as f:
f.write('')
# if os.path.exists(f'{self.task_path}/{self.log_name}_logs.json'):
# os.remove(f'{self.task_path}/{self.log_name}_logs.json')
def on_test_end(self, logs=None):
txt = json.dumps({
'EPOCH': 'Finished',
'time': time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime()),
'scores': str(logs)
})
with open(f'{self.task_path}/{self.log_name}_logs.json', 'a') as f:
f.write(f'{txt}\n')
os.remove(f'{self.task_path}/state.json')
def on_test_batch_end(self, batch, logs=None):
txt = json.dumps({
'batch': batch,
'time': time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime()),
'scores': str(logs)
})
with open(f'{self.task_path}/{self.log_name}_logs.json', 'a') as f:
f.write(f'\t{txt}\n')
| 2.453125 | 2 |
copulpy/clsCES.py | briqInstitute/copulpy | 1 | 12766075 | """Provide the constant elasticity of substitution function."""
import numpy as np
from copulpy.config_copulpy import IS_DEBUG
from copulpy.clsMeta import MetaCls
class CESCls(MetaCls):
"""CES class."""
def __init__(self, alpha, y_weight, discount_factor):
"""Initialize class."""
self.attr = dict()
self.attr['discount_factor'] = discount_factor
self.attr['y_weight'] = y_weight
self.attr['alpha'] = alpha
self._check_attributes()
def evaluate(self, v_1, v_2):
"""Evaluate the CES function."""
self._additional_checks('evaluate_in', v_1, v_2)
y_weight, discount_factor, alpha = self.get_attr('y_weight', 'discount_factor', 'alpha')
rslt = (v_1 ** alpha + y_weight * v_2 ** alpha) ** (1 / alpha)
rslt = discount_factor * rslt
self._additional_checks('evaluate_out')
return rslt
def _check_attributes(self):
"""Check the attributes of the class."""
alpha, y_weights, discount_factors = self.get_attr('alpha', 'y_weight', 'discount_factor')
np.testing.assert_equal(alpha >= 0, True)
np.testing.assert_equal(np.all(y_weights >= 0), True)
np.testing.assert_equal(np.all(discount_factors >= 0), True)
@staticmethod
def _additional_checks(label, *args):
"""Perform some additional checks on selected features of the class instance."""
# We only run these tests during debugging as otherwise the performance deteriorates.
if not IS_DEBUG:
return
if label in ['evaluate_in']:
for var in args:
np.testing.assert_equal(np.all(var >= 0), True)
elif label in ['evaluate_out']:
rslt, = args
np.testing.assert_equal(np.all(0.0 <= rslt), True)
else:
raise NotImplementedError
| 2.625 | 3 |
SimpleCV/examples/manipulation/RotationExample.py | nikhilgk/SimpleCV | 2 | 12766076 | #!/usr/bin/python
from SimpleCV import *
from numpy import linspace
from scipy.interpolate import UnivariateSpline
import sys, time, socket
#settings for the project)
srcImg = "../../sampleimages/orson_welles.jpg"
font_size = 20
sleep_for = 3 #seconds to sleep for
draw_color = Color.RED
while True:
image = Image(srcImg)
image.drawText("Original Size", 10,10, color=draw_color, fontsize=font_size)
image.show()
time.sleep(sleep_for)
rot = image.rotate(45)
rot.drawText("Rotated 45 degrees", 10,10, color=draw_color, fontsize=font_size)
rot.show()
time.sleep(sleep_for)
rot = image.rotate(45, scale=0.5)
rot.drawText("Rotated 45 degrees and scaled", 10,10, color=draw_color, fontsize=font_size)
rot.show()
time.sleep(sleep_for)
rot = image.rotate(45,scale=0.5, point = (0,0) )
rot.drawText("Rotated 45 degrees and scaled around a point", 10,10, color=draw_color, fontsize=font_size)
rot.show()
time.sleep(sleep_for)
rot = image.rotate(45,"full")
rot.drawText("Rotated 45 degrees and full", 10,10, color=draw_color, fontsize=font_size)
rot.show()
time.sleep(sleep_for)
atrans = image.shear([(image.width/2,0),(image.width-1,image.height/2),(image.width/2,image.height-1)])
atrans.drawText("Affine Transformation", 10,10, color=draw_color, fontsize=font_size)
atrans.show()
time.sleep(sleep_for)
ptrans = image.warp([(image.width*0.05,image.height*0.03),(image.width*0.9,image.height*0.1),(image.width*0.8,image.height*0.7),(image.width*0.2,image.height*0.9)])
ptrans.drawText("Perspective Transformation", 10,10, color=draw_color, fontsize=font_size)
ptrans.show()
time.sleep(sleep_for)
| 2.75 | 3 |
scoreboard_auroc.py | nialky/credef | 0 | 12766077 | <gh_stars>0
from sklearn.metrics import classification_report, accuracy_score
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, plot_roc_curve, roc_auc_score
ax = plt.gca()
for i in classifiers:
i.fit(X_balanced, y_balanced)
plot_roc_curve(i, X_test_balanced, y_test_balanced, ax = ax)
plt.xlabel('False Positive Rate', fontsize = 15)
plt.ylabel('True Positive Rate', fontsize = 15)
#keys = pd.Series(data = keys, index = val).sort_index().tolist()
accuracy_table = {'Models' : keys, 'Accuracy Score': val}
final_score = pd.DataFrame(accuracy_table)
ax = final_score.plot('Models', 'Accuracy Score', color = 'black', linestyle = 'solid', linewidth = 2,
marker = 'o', markersize = 10, figsize = (20, 5), ylim = (0.4, 1),
fontsize = 12, rot = 0, xlabel = 'Models').legend(loc = 'upper left', prop = {'size': 15})
| 2.671875 | 3 |
pySDC/projects/FastWaveSlowWave/plot_stifflimit_specrad.py | brownbaerchen/pySDC | 20 | 12766078 | <gh_stars>10-100
import matplotlib
matplotlib.use('Agg')
import numpy as np
from matplotlib import pyplot as plt
from pylab import rcParams
from pySDC.implementations.problem_classes.FastWaveSlowWave_0D import swfw_scalar
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.implementations.collocation_classes.gauss_radau_right import CollGaussRadau_Right
from pySDC.core.Step import step
# noinspection PyShadowingNames
def compute_specrad():
"""
Routine to compute spectral radius and norm of the error propagation matrix E
Returns:
numpy.nparray: list of number of nodes
numpy.nparray: list of fast lambdas
numpy.nparray: list of spectral radii
numpy.nparray: list of norms
"""
problem_params = dict()
# SET VALUE FOR lambda_slow AND VALUES FOR lambda_fast ###
problem_params['lambda_s'] = np.array([1.0 * 1j], dtype='complex')
problem_params['lambda_f'] = np.array([50.0 * 1j, 100.0 * 1j], dtype='complex')
problem_params['u0'] = 1.0
# initialize sweeper parameters
sweeper_params = dict()
# SET TYPE OF QUADRATURE NODES ###
sweeper_params['collocation_class'] = CollGaussRadau_Right
# initialize level parameters
level_params = dict()
level_params['dt'] = 1.0
t0 = 0.0
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = swfw_scalar # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = imex_1st_order # pass sweeper (see part B)
description['level_params'] = level_params # pass level parameters
description['step_params'] = dict() # pass step parameters
nodes_v = np.arange(2, 10)
specrad = np.zeros((3, np.size(nodes_v)))
norm = np.zeros((3, np.size(nodes_v)))
for i in range(0, np.size(nodes_v)):
sweeper_params['num_nodes'] = nodes_v[i]
description['sweeper_params'] = sweeper_params # pass sweeper parameters
# now the description contains more or less everything we need to create a step
S = step(description=description)
L = S.levels[0]
P = L.prob
u0 = S.levels[0].prob.u_exact(t0)
S.init_step(u0)
QE = L.sweep.QE[1:, 1:]
QI = L.sweep.QI[1:, 1:]
Q = L.sweep.coll.Qmat[1:, 1:]
nnodes = L.sweep.coll.num_nodes
dt = L.params.dt
assert nnodes == nodes_v[i], 'Something went wrong during instantiation, nnodes is not correct, got %s' % nnodes
for j in range(0, 2):
LHS = np.eye(nnodes) - dt * (P.params.lambda_f[j] * QI + P.params.lambda_s[0] * QE)
RHS = dt * ((P.params.lambda_f[j] + P.params.lambda_s[0]) * Q -
(P.params.lambda_f[j] * QI + P.params.lambda_s[0] * QE))
evals, evecs = np.linalg.eig(np.linalg.inv(LHS).dot(RHS))
specrad[j + 1, i] = np.linalg.norm(evals, np.inf)
norm[j + 1, i] = np.linalg.norm(np.linalg.inv(LHS).dot(RHS), np.inf)
if L.sweep.coll.left_is_node:
# For Lobatto nodes, first column and row are all zeros, since q_1 = q_0; hence remove them
QI = QI[1:, 1:]
Q = Q[1:, 1:]
# Eigenvalue of error propagation matrix in stiff limit: E = I - inv(QI)*Q
evals, evecs = np.linalg.eig(np.eye(nnodes - 1) - np.linalg.inv(QI).dot(Q))
norm[0, i] = np.linalg.norm(np.eye(nnodes - 1) - np.linalg.inv(QI).dot(Q), np.inf)
else:
evals, evecs = np.linalg.eig(np.eye(nnodes) - np.linalg.inv(QI).dot(Q))
norm[0, i] = np.linalg.norm(np.eye(nnodes) - np.linalg.inv(QI).dot(Q), np.inf)
specrad[0, i] = np.linalg.norm(evals, np.inf)
print("Spectral radius of infinitely fast wave case > 1.0 for M=%2i" % nodes_v[np.argmax(specrad[0, :] > 1.0)])
print("Spectral radius of > 1.0 for M=%2i" % nodes_v[np.argmax(specrad[1, :] > 1.0)])
return nodes_v, problem_params['lambda_f'], specrad, norm
# noinspection PyShadowingNames
def plot_specrad(nodes_v, lambda_f, specrad, norm):
"""
Plotting function for spectral radii and norms
Args:
nodes_v (numpy.nparray): list of number of nodes
lambda_f (numpy.nparray): list of fast lambdas
specrad (numpy.nparray): list of spectral radii
norm (numpy.nparray): list of norms
"""
fs = 8
rcParams['figure.figsize'] = 2.5, 2.5
rcParams['pgf.rcfonts'] = False
fig = plt.figure()
plt.plot(nodes_v, specrad[0, :], 'rd-', markersize=fs - 2, label=r'$\lambda_{fast} = \infty$')
plt.plot(nodes_v, specrad[1, :], 'bo-', markersize=fs - 2,
label=r'$\lambda_{fast} = %2.0f $' % lambda_f[0].imag)
plt.plot(nodes_v, specrad[2, :], 'gs-', markersize=fs - 2,
label=r'$\lambda_{fast} = %2.0f $' % lambda_f[1].imag)
plt.xlabel(r'Number of nodes $M$', fontsize=fs)
plt.ylabel(r'Spectral radius $\sigma\left( \mathbf{E} \right)$', fontsize=fs, labelpad=2)
plt.legend(loc='lower right', fontsize=fs, prop={'size': fs})
plt.xlim([np.min(nodes_v), np.max(nodes_v)])
plt.ylim([0, 1.0])
plt.yticks(fontsize=fs)
plt.xticks(fontsize=fs)
filename = 'data/stifflimit-specrad.png'
fig.savefig(filename, bbox_inches='tight')
fig = plt.figure()
plt.plot(nodes_v, norm[0, :], 'rd-', markersize=fs - 2, label=r'$\lambda_{fast} = \infty$')
plt.plot(nodes_v, norm[1, :], 'bo-', markersize=fs - 2,
label=r'$\lambda_{fast} = %2.0f $' % lambda_f[0].imag)
plt.plot(nodes_v, norm[2, :], 'gs-', markersize=fs - 2,
label=r'$\lambda_{fast} = %2.0f $' % lambda_f[1].imag)
plt.xlabel(r'Number of nodes $M$', fontsize=fs)
plt.ylabel(r'Norm $\left|| \mathbf{E} \right||_{\infty}$', fontsize=fs, labelpad=2)
plt.legend(loc='lower right', fontsize=fs, prop={'size': fs})
plt.xlim([np.min(nodes_v), np.max(nodes_v)])
plt.ylim([0, 2.4])
plt.yticks(fontsize=fs)
plt.xticks(fontsize=fs)
filename = 'data/stifflimit-norm.png'
fig.savefig(filename, bbox_inches='tight')
if __name__ == "__main__":
nodes_v, lambda_f, specrad, norm = compute_specrad()
plot_specrad(nodes_v, lambda_f, specrad, norm)
| 2.34375 | 2 |
misc/dosvfont.py | frank-deng/fun-ucdos | 16 | 12766079 | #!/usr/bin/env python3
import struct, sys, traceback;
if (len(sys.argv) < 4):
print("Usage: %s ANK16.FNT KANJI16.FNT FONT.BMP"%sys.argv[0]);
exit(1);
ank16 = None;
with open(sys.argv[1], 'rb') as f:
data = f.read();
ank16 = [data[i:i+16] for i in range(0, 256*16, 16)];
kanji16 = None;
charTable = {};
for row in range(0xa1, 0xff):
for col in range(0xa1, 0xff):
code = "%02x%02x"%(row & 0x7F, col & 0x7F);
char = struct.pack('BB', row, col).decode('euc-jp', errors='ignore');
sjis = char.encode('shift-jis', errors='ignore');
if (len(char) > 0 and len(sjis) > 0):
charTable[code] = {
'eucjp': (row & 0x7f, col & 0x7f),
'utf8': char,
'sjis': (sjis[0], sjis[1]),
'pixel': None,
};
with open(sys.argv[2], 'rb') as f:
for k in sorted(charTable.keys()):
ch = charTable[k];
row, col = ch['sjis'];
if row <= 0x84:
f.seek(512 + ((row - 0x81) * 189 + (col - 0x40)) * 32);
else:
f.seek(512 + ((row - 0x81) * 189 + (col - 0x40) - 378) * 32);
pixelData = f.read(32);
if pixelData != b'\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0':
ch['pixel'] = pixelData;
def writeBMP(f, metadata, x, y, data):
offset = metadata[2];
width = metadata[4];
height = metadata[5];
if (x < 0 or y < 0):
print("Error pos: ",x,y);
return;
f.seek(offset + (((height - y - 1) * width) >> 3) + x);
f.write(data);
def writeChar(f, metadata, x, y, data):
for i in range(16):
d = data[i*2];
writeBMP(f, metadata, x*2, y*16+i, struct.pack('B', (~d & 0xFF)));
d = data[i*2 + 1];
writeBMP(f, metadata, x*2+1, y*16+i, struct.pack('B', (~d & 0xFF)));
f = open(sys.argv[3], 'r+b');
try:
metadata = struct.unpack('<HLxxxxLLLLHHLLLLLL', f.read(14+40));
for ch, ank in enumerate(ank16):
if not ((ch >= 0x20 and ch <= 0x7e) or (ch >= 0xA1 and ch <= 0xdf)):
continue;
for i, d in enumerate(ank):
writeBMP(f, metadata, ch, i, struct.pack('B', (~d & 0xFF)));
for k in sorted(charTable.keys()):
ch = charTable[k];
row, col = ch['eucjp'];
if (None != ch['pixel'] and len(ch['pixel']) == 32):
writeChar(f, metadata, row - 0x20, col, ch['pixel']);
for ch, ank in enumerate(ank16):
if (ch >= 0x21 and ch <= 0x7e):
ank2 = b'';
for i, d in enumerate(ank):
ank2 += (struct.pack('B', d) + b'\0');
writeChar(f, metadata, 0x29 - 0x20, ch, ank2);
elif (ch >= 0xA1 and ch <= 0xDF):
ank2 = b'';
for i, d in enumerate(ank):
ank2 += (struct.pack('B', d) + b'\0');
writeChar(f, metadata, 0x2A - 0x20, ch & 0x7f, ank2);
except Exception as e:
traceback.print_exc();
finally:
f.close();
'''
def printPixels(data):
if (None == data):
return;
pixels = '';
for i, row in enumerate(data):
for j in range(8):
if (row & (1 << (7-j))):
pixels += '#';
else:
pixels += '.';
if (i & 1):
pixels += '\n';
print(pixels);
print('\n');
if (len(sys.argv) > 1 and sys.argv[1] == 'l'):
with open('KANJI16.FNT', 'rb') as f:
for i in range(8192):
f.seek(512 + i * 32);
print("%04x %d"%(i, i));
printPixels(f.read(32));
exit();
for k in sorted(charTable.keys()):
ch = charTable[k];
print(k, "%02x%02x"%ch['sjis'], ch['utf8']);
printPixels(ch['pixel']);
'''
| 2.359375 | 2 |
expenseManager/app/frmPageReport.py | PacktPublishing/Python-Programming | 0 | 12766080 | from UtilGp import UtilGp
from dBase import ndb
from TranData import DbTranData
from frmPageShop import Shop
class Report:
@staticmethod
def show():
UtilGp.Login(Report.reportMenu, ndb.loadTranByItem,'Reports (Login)')
@staticmethod
def showDateRange():
UtilGp.sleep(2);UtilGp.clear();UtilGp.title('Reports(%s)' % ('Transactions between two dates',))
fromDate = input("From Date(dd-MM-YYYY):")
toDate = input("To Date (dd-MM-YYYY):")
fromDate = UtilGp.strToDate(fromDate + ' 00:00:00')
toDate = UtilGp.strToDate(toDate + ' 23:59:00')
#print(fromDate,toDate)
dbTran=DbTranData.queryByDateRange(ndb.dbTran,fromDate,toDate)
DbTranData.printTran(dbTran)
print("****End of Report****");print();
from frmPageShop import Shop
Shop.showFailure()
@staticmethod
def showBySortedAmount():
UtilGp.sleep(2);UtilGp.clear();UtilGp.title('Reports(%s)' % ('All Transactions sorted on amount',))
dbTran = DbTranData.queryAll(ndb.dbTran)
dbTran.sort(key=lambda x:x.price)
#print(dbTran)
DbTranData.printTran(dbTran)
print("****End of Report****");print();
from frmPageShop import Shop
Shop.showFailure()
@staticmethod
def showAmountRange():
UtilGp.sleep(2);UtilGp.clear();UtilGp.title('Reports(%s)' % ('Transactions within amount range',))
fromAmount = float(input("From Amount:"))
toAmount = float(input("To Amount:"))
#print(fromDate,toDate)
dbTran=DbTranData.queryByAmount(ndb.dbTran,fromAmount,toAmount)
DbTranData.printTran(dbTran)
print("****End of Report****");print();
Shop.showFailure()
@staticmethod
def showByCat():
UtilGp.sleep(2);UtilGp.clear();UtilGp.title('Reports(%s)' % ('Transactions of a Category',))
for valueCat in ndb.dbCat:
print("%d %s"%(valueCat.catId,valueCat.category))
choiceCatId = int(input("Enter Catgory ID\nYour Choice:"))
isExist=False;resCat=None;
for valueCat in ndb.dbCat:
if valueCat.catId==choiceCatId:
isExist=True;resCat=valueCat;
break
if isExist:
dbTran = DbTranData.queryByCategory(ndb.dbTran, choiceCatId)
DbTranData.printTran(dbTran)
print("****End of Report****");print();
Shop.showFailure()
else:
print("You Category Not Exist.")
Shop.showFailure()
@staticmethod
def showCatTot():
UtilGp.sleep(2);UtilGp.clear();UtilGp.title('Reports(%s)'%('Total Amount Spent on a Category',))
for valueCat in ndb.dbCat:
print("%d %s"%(valueCat.catId,valueCat.category))
choiceCatId = int(input("Enter Catgory ID\nYour Choice:"))
isExist=False;resCat=None;
for valueCat in ndb.dbCat:
if valueCat.catId==choiceCatId:
isExist=True;resCat=valueCat;
break
if isExist:
dbTran = DbTranData.queryByCategory(ndb.dbTran, choiceCatId)
amount = sum(list(map(lambda x:x.price,dbTran)))
UtilGp.printCaptionData(('Category','Total Amount Spent is'),
(valueCat.category,'Rs.%.2f'%(amount,)), 45)
#print("Total Amount Spent for category %s is Rs.%f"%(valueCat.category,amount))
print("****End of Report****");print();
Shop.showFailure()
else:
print("You Category Not Exist.")
Shop.showFailure()
@staticmethod
def reportMenu():
UtilGp.sleep(2);UtilGp.clear();UtilGp.title('Reports')
print("1-All transactions done between two dates")
print("2-All transactions that fall within a specified amount range")
print("3-All transactions done on a category")
print("4-Total amount spent on a category")
print("5-Transactions sorted based on amount")
choice=int(input("Your Choice:"))
if choice == 1:
Report.showDateRange()
elif choice == 2:
Report.showAmountRange()
elif choice == 3:
Report.showByCat()
elif choice == 4:
Report.showCatTot()
elif choice == 5:
Report.showBySortedAmount() | 2.234375 | 2 |
plugins/show_source/show_source.py | likev/gauravssnl.github.io | 2 | 12766081 | import os
import logging
from six.moves.urllib.parse import urljoin
import six
from pelican import signals
from pelican.utils import pelican_open
if not six.PY3:
from codecs import open
logger = logging.getLogger(__name__)
source_files = []
PROCESS = ['articles', 'pages', 'drafts']
def link_source_files(generator):
"""
Processes each article/page object and formulates copy from and copy
to destinations, as well as adding a source file URL as an attribute.
"""
# Get all attributes from the generator that are articles or pages
posts = [
getattr(generator, attr, None) for attr in PROCESS
if getattr(generator, attr, None) is not None]
# Work on each item
for post in posts[0]:
if not 'SHOW_SOURCE_ON_SIDEBAR' in generator.settings and \
not 'SHOW_SOURCE_IN_SECTION' in generator.settings:
return
# Only try this when specified in metadata or SHOW_SOURCE_ALL_POSTS
# override is present in settings
if 'SHOW_SOURCE_ALL_POSTS' in generator.settings or \
'show_source' in post.metadata:
# Source file name can be optionally set in config
show_source_filename = generator.settings.get(
'SHOW_SOURCE_FILENAME', '{}.txt'.format(post.slug)
)
try:
# Get the full path to the original source file
source_out = os.path.join(
post.settings['OUTPUT_PATH'], post.save_as
)
# Get the path to the original source file
source_out_path = os.path.split(source_out)[0]
# Create 'copy to' destination for writing later
copy_to = os.path.join(
source_out_path, show_source_filename
)
# Add file to published path
source_url = urljoin(
post.save_as, show_source_filename
)
except Exception:
return
# Format post source dict & populate
out = dict()
out['copy_raw_from'] = post.source_path
out['copy_raw_to'] = copy_to
logger.debug('Linked %s to %s', post.source_path, copy_to)
source_files.append(out)
# Also add the source path to the post as an attribute for tpls
post.show_source_url = source_url
def _copy_from_to(from_file, to_file):
"""
A very rough and ready copy from / to function.
"""
with pelican_open(from_file) as text_in:
encoding = 'utf-8'
with open(to_file, 'w', encoding=encoding) as text_out:
text_out.write(text_in)
logger.info('Writing %s', to_file)
def write_source_files(*args, **kwargs):
"""
Called by the `page_writer_finalized` signal to process source files.
"""
for source in source_files:
_copy_from_to(source['copy_raw_from'], source['copy_raw_to'])
def register():
"""
Calls the shots, based on signals
"""
signals.article_generator_finalized.connect(link_source_files)
signals.page_generator_finalized.connect(link_source_files)
signals.page_writer_finalized.connect(write_source_files)
| 2.171875 | 2 |
main.py | darrenlee01/awap2021-public | 0 | 12766082 | <gh_stars>0
from game.game import Game
import time
BOARD_NAME = './board/board1.yaml'
def example_network_generator(station_locations, feedback):
network = {
0: [0, 1, 2],
1: [2, 3, 4, 2]
}
return network
if __name__ == '__main__':
# Load the board file with station locations/types
g = Game(BOARD_NAME)
SIM_ATTEMPTS = g.getGameConstants('SIM_ATTEMPTS')
MAX_TRAINS = g.getGameConstants('MAX_TRAINS')
"""
Get a dictionary that maps a location (x, y) to a tuple (stationID, shapeID),
where (x, y) is a cartesian location of the station stationID with shape shapeID.
"""
station_locations = g.getStationLocations()
feedback = None
for i in range(SIM_ATTEMPTS):
tic = time.perf_counter() # Start a timer
"""
Write your code here to take create a network!
The network should be a dictionary that maps (trainColorID)->[stationID1,
stationID2, stationID3...].
Routes have to be path or single cycle (no "lollipops or figure 8's etc).
Hint: you may want to pass `station_locations` and `feedback` from the
previous run into your function.
"""
network = example_network_generator(station_locations, feedback)
# Get the run time of your code
runtime = time.perf_counter() - tic
if runtime > 10.0:
raise TimeoutError("Run time for your network generator cannot exceed 10 seconds.")
"""
Simulate commute using your network, and get back a feedback object. You
can query the object to get helpful stats like feedback.get_avg_wait_time().
"""
feedback = g.simulate(network)
# Print your score from this run
feedback.print_score(runtime)
"""
Display an animation of the simulation. You can comment this out if you
want to skip the animation
"""
g.visualize(speed=10) | 3.46875 | 3 |
extras/utils.py | narutonz/analyser | 0 | 12766083 | <filename>extras/utils.py
from configparser import ConfigParser
from pymongo import MongoClient
from math import pow, sqrt
__author__ = '<NAME>'
__version__ = '1.5'
class Database(object):
def __init__(self, col_name):
self.client = MongoClient()
self.db = self.client['Tweets']
if col_name is None:
self.col = self.db['final_db']
else:
self.col = self.db[col_name]
def secs_to_hrs(seconds, process):
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
if d > 0:
print(process + ' {} days: {} hours: {} mins: {:.2f} secs'.format(d, h, m, s))
else:
print(process + ' {} hours: {} mins: {:.2f} secs'.format(h, m, s))
def get_params(config_filename='./extras/config.ini', **kwargs):
config_object = {}
if config_filename is not None:
config_file = ConfigParser()
config_file.read(config_filename)
if kwargs is not None:
if kwargs.pop('mongo', False) is True:
mongo_dict = {'db_name': config_file['MONGO_PARAMETERS']['db_name'],
'col_name': config_file['MONGO_PARAMETERS']['col_name']}
config_object.update(mongo_dict)
if kwargs.pop('tw_auth', False) is True:
tw_auth_dict = {'cons_key': config_file['TWITTER_STREAM_AUTH']['consumer_key'],
'cons_sec': config_file['TWITTER_STREAM_AUTH']['consumer_secret'],
'acc_token': config_file['TWITTER_STREAM_AUTH']['access_token'],
'acc_token_sec': config_file['TWITTER_STREAM_AUTH']['access_token_secret']}
config_object.update(tw_auth_dict)
if kwargs.pop('tw_user_auth', False) is True:
tw_auth_dict = {'cons_key': config_file['TWITTER_USER_TWEETS_AUTH']['consumer_key'],
'cons_sec': config_file['TWITTER_USER_TWEETS_AUTH']['consumer_secret'],
'acc_token': config_file['TWITTER_USER_TWEETS_AUTH']['access_token'],
'acc_token_sec': config_file['TWITTER_USER_TWEETS_AUTH']['access_token_secret']}
config_object.update(tw_auth_dict)
if kwargs.pop('lda', False) is True:
lda_dict = {'corpus_file': config_file['LDA_FILES']['corpus_filename'],
'dict_file': config_file['LDA_FILES']['dict_filename'],
'lda_file': config_file['LDA_FILES']['lda_filename'],
'num_topics': int(config_file['LDA_PARAMETERS']['num_topics']),
'workers': int(config_file['LDA_PARAMETERS']['workers']),
'chunks': int(config_file['LDA_PARAMETERS']['chunks']),
'passes': int(config_file['LDA_PARAMETERS']['passes']),
'alpha': config_file['LDA_PARAMETERS']['alpha']}
config_object.update(lda_dict)
return config_object
def cosine_similarity(a, b):
try:
result = dot_product(a, b) / magnitude(a, b)
except ZeroDivisionError:
result = 0
return result
def dot_product(a, b):
dot = 0
common_items = list(set(a.keys()) & set(b.keys()))
for k in common_items:
dot += a[k] * b[k]
return dot
def magnitude(a, b):
mgn_a = 0
mgn_b = 0
for v in a.values():
mgn_a += pow(v, 2)
for v in b.values():
mgn_b += pow(v, 2)
return sqrt(mgn_a) * sqrt(mgn_b)
def normalize_counts(x):
count_total = sum(x.values())
for k in x.keys():
x[k] = x[k] / count_total
return x
def reset_tweets(db_name='final_db'):
_mongo = Database(db_name)
cursor = _mongo.col.find()
update = {'$set': {'tokens': [],
'topics': [],
'doc_sim': [],
'tokenized': False}}
for doc in cursor:
_mongo.col.update_one({'_id': doc['_id']})
| 2.59375 | 3 |
docs/configuration/properties.py | keotl/jivago | 12 | 12766084 | from jivago.config.properties.application_properties import ApplicationProperties
from jivago.inject.annotation import Component
from jivago.lang.annotations import Inject
@Component
class MyComponent(object):
@Inject
def __init__(self, application_properties: ApplicationProperties):
self.application_properties = application_properties
def do_something(self):
print(self.application_properties["my_property"])
| 2.296875 | 2 |
u2pl/models/decoder.py | Haochen-Wang409/U2PL | 96 | 12766085 | import torch
import torch.nn as nn
from torch.nn import functional as F
from .base import ASPP, get_syncbn
class dec_deeplabv3(nn.Module):
def __init__(
self,
in_planes,
num_classes=19,
inner_planes=256,
sync_bn=False,
dilations=(12, 24, 36),
):
super(dec_deeplabv3, self).__init__()
norm_layer = get_syncbn() if sync_bn else nn.BatchNorm2d
self.aspp = ASPP(
in_planes, inner_planes=inner_planes, sync_bn=sync_bn, dilations=dilations
)
self.head = nn.Sequential(
nn.Conv2d(
self.aspp.get_outplanes(),
256,
kernel_size=3,
padding=1,
dilation=1,
bias=False,
),
norm_layer(256),
nn.ReLU(inplace=True),
nn.Dropout2d(0.1),
nn.Conv2d(256, num_classes, kernel_size=1, stride=1, padding=0, bias=True),
)
def forward(self, x):
aspp_out = self.aspp(x)
res = self.head(aspp_out)
return res
class dec_deeplabv3_plus(nn.Module):
def __init__(
self,
in_planes,
num_classes=19,
inner_planes=256,
sync_bn=False,
dilations=(12, 24, 36),
rep_head=True,
):
super(dec_deeplabv3_plus, self).__init__()
norm_layer = get_syncbn() if sync_bn else nn.BatchNorm2d
self.rep_head = rep_head
self.low_conv = nn.Sequential(
nn.Conv2d(256, 256, kernel_size=1), norm_layer(256), nn.ReLU(inplace=True)
)
self.aspp = ASPP(
in_planes, inner_planes=inner_planes, sync_bn=sync_bn, dilations=dilations
)
self.head = nn.Sequential(
nn.Conv2d(
self.aspp.get_outplanes(),
256,
kernel_size=3,
padding=1,
dilation=1,
bias=False,
),
norm_layer(256),
nn.ReLU(inplace=True),
nn.Dropout2d(0.1),
)
self.classifier = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=1, bias=True),
norm_layer(256),
nn.ReLU(inplace=True),
nn.Dropout2d(0.1),
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),
norm_layer(256),
nn.ReLU(inplace=True),
nn.Dropout2d(0.1),
nn.Conv2d(256, num_classes, kernel_size=1, stride=1, padding=0, bias=True),
)
if self.rep_head:
self.representation = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=1, bias=True),
norm_layer(256),
nn.ReLU(inplace=True),
nn.Dropout2d(0.1),
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),
norm_layer(256),
nn.ReLU(inplace=True),
nn.Dropout2d(0.1),
nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0, bias=True),
)
def forward(self, x):
x1, x2, x3, x4 = x
aspp_out = self.aspp(x4)
low_feat = self.low_conv(x1)
aspp_out = self.head(aspp_out)
h, w = low_feat.size()[-2:]
aspp_out = F.interpolate(
aspp_out, size=(h, w), mode="bilinear", align_corners=True
)
aspp_out = torch.cat((low_feat, aspp_out), dim=1)
res = {"pred": self.classifier(aspp_out)}
if self.rep_head:
res["rep"] = self.representation(aspp_out)
return res
class Aux_Module(nn.Module):
def __init__(self, in_planes, num_classes=19, sync_bn=False):
super(Aux_Module, self).__init__()
norm_layer = get_syncbn() if sync_bn else nn.BatchNorm2d
self.aux = nn.Sequential(
nn.Conv2d(in_planes, 256, kernel_size=3, stride=1, padding=1),
norm_layer(256),
nn.ReLU(inplace=True),
nn.Dropout2d(0.1),
nn.Conv2d(256, num_classes, kernel_size=1, stride=1, padding=0, bias=True),
)
def forward(self, x):
res = self.aux(x)
return res
| 2.296875 | 2 |
address_parser/__init__.py | lenshin/address | 3 | 12766086 | from .parser import AddressParser | 1.132813 | 1 |
20211217/simulation_betas.py | dongxulee/lifeCycleRefine | 0 | 12766087 | <gh_stars>0
from simulation import *
import os.path
from multiprocessing import Pool
AgentType = ["poorHigh", "poorLow", "richHigh", "richLow"]
Beta_r = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10]
Gamma = [3.0]
def mapOverBeta(beta_r):
for agentType in AgentType:
for gamma in Gamma:
fileName = agentType + "_" + str(beta_r) + "_" + str(gamma)
if os.path.exists("waseozcbkhm_" + fileName + ".npy"):
break
if not os.path.exists(fileName + ".npy"):
break
'''
Constants
'''
# discounting factor
beta = 1/(1+beta_r)
# utility function parameter
print("agentType: ", agentType)
print("beta: ", beta)
print("gamma: ", gamma)
simulation(beta_r, agentType, gamma, fileName)
p = Pool(10)
p.map(mapOverBeta, Beta_r) | 2.5 | 2 |
src/03_processing/process_reviews.py | williecostello/BetterReads | 2 | 12766088 | import argparse
import os
from process_utils import clean_reviews, make_sentences, clean_sentences
# Set script arguments
parser = argparse.ArgumentParser()
parser.add_argument('-g', '--goodreads', action='store_true', help='Set collection to GoodReads (as opposed to UCSD)')
args = parser.parse_args()
# Set whether to scrape just the top 300 reviews or 300 reviews from each rating
goodreads = args.goodreads
# Set file directories
read_dir = 'data/01_raw/goodreads/' if goodreads \
else f'data/02_intermediate/ucsd_reviews/'
write_dir = f'data/03_processed/'
# If output directory does not exist, create it
if not os.path.isdir(write_dir):
os.mkdir(write_dir)
# List review files to be read in, ignoring hidden files
file_list = [f for f in os.listdir(read_dir) if f.endswith('.csv')]
# Set loop variables
num_files = len(file_list)
file_index = 0
# Loop through files in file list
for file_name in file_list[file_index:]:
file_index += 1
print('-------------------------------------------------------------------')
print(f'Processing file {file_index} of {num_files}: {file_name}\n')
# Clean file's review text
reviews_df = clean_reviews(file_name, read_dir)
# Tokenize reviews into sentences
all_sentences_df = make_sentences(reviews_df)
# Clean sentences
sentences_df = clean_sentences(all_sentences_df)
sentences_df.to_csv(f'{write_dir}{file_name}', index=False)
| 2.921875 | 3 |
TensorRT/python/trt_run.py | xiaochus/DeepModelDeploy | 4 | 12766089 | import os
import numpy as np
import tensorrt as trt
from .utils import common, calibrator
class TRTModel:
def __init__(self, onnx_path, plan_path, mode="fp16", calibration_cache="calibration.cache",
calibration_dataset="", calibration_image_size="",
calibration_mean=[], calibration_std=[]):
"""
:param onnx_path: local path of onnx file.
:param plan_path: trt plan file to read/save.
:param mode: inference mode, fp16/int8.
:param calibration_cache: int8 cache file of calibration.
:param calibration_dataset: dataset.txt for calibration.
:param calibration_image_size: iamge size (w, h) for calibration.
:param calibration_mean: image mean for calibration.
:param calibration_std: image std for calibration.
"""
self.trt_logger = trt.Logger()
self.onnx_path = onnx_path
self.plan_path = plan_path
self.mode = mode
# for int8 calibration
if self.mode == "int8":
self.calib = self._get_calibrator(calibration_cache, calibration_dataset,
calibration_image_size, calibration_mean, calibration_std)
# init
self.engine = self._get_engine()
self.execution_context = self.engine.create_execution_context()
self.inputs, self.outputs, self.bindings, self.stream = common.allocate_buffers(self.engine)
def _get_calibrator(self, cache, dataset, size, mean, std):
if not os.path.exists(dataset):
raise Exception("Calibration dataset: {} not exist!".format(self.calibration_dataset))
calib = calibrator.EntropyCalibrator(dataset, cache, size, mean, std)
return calib
def _check_network(self, network):
"""check network
:param network: INetworkDefinition
"""
if not network.num_outputs:
raise Exception("No output node found!")
input_nodes = [network.get_input(i) for i in range(network.num_inputs)]
output_nodes = [network.get_output(i) for i in range(network.num_outputs)]
print("Network description")
for i, inp in enumerate(input_nodes):
print("Input node {} | Name {} | Shape {}".format(i, inp.name, inp.shape))
print("Total layers: {}".format(network.num_layers))
for i in range(network.num_layers):
layer = network.get_layer(i)
print("index {}, layer name: {}".format(i, layer.name))
for i, out in enumerate(output_nodes):
print("Output node {} | Name {} | Shape {}".format(i, out.name, out.shape))
def _parse_onnx(self):
"""takes an ONNX file and creates a TensorRT engine to run inference with
"""
dynamic = False
flag = common.EXPLICIT_BATCH
with trt.Builder(self.trt_logger) as builder, builder.create_network(flag) as network, builder.create_builder_config() as config, trt.OnnxParser(network, self.trt_logger) as parser, trt.Runtime(self.trt_logger) as runtime:
config.max_workspace_size = common.GiB(1)
builder.max_batch_size = 1
if self.mode == "fp16":
config.set_flag(trt.BuilderFlag.FP16)
print("set FP16 mode.")
if self.mode == "int8":
config.set_flag(trt.BuilderFlag.INT8)
config.int8_calibrator = self.calib
print("set INT8 mode.")
# Parse model file
print('Loading ONNX file from path {}...'.format(self.onnx_path))
with open(self.onnx_path, 'rb') as model:
print('Beginning ONNX file parsing')
if not parser.parse(model.read()):
print('ERROR: Failed to parse the ONNX file.')
for error in range(parser.num_errors):
print(parser.get_error(error))
return None
print('Completed parsing of ONNX file')
# check netowrk
self._check_network(network)
# build engine
print('Building an engine from file {}; this may take a while...'.format(self.onnx_path))
plan = builder.build_serialized_network(network, config)
engine = runtime.deserialize_cuda_engine(plan)
print("Completed creating Engine")
# save engine
with open(self.plan_path, "wb") as f:
f.write(plan)
return engine
def _get_engine(self):
"""generate tensorrt runtime engine
"""
if os.path.exists(self.plan_path):
print('Load trt plan from: {}'.format(self.plan_path))
with open(self.plan_path, "rb") as f, trt.Runtime(self.trt_logger) as runtime:
return runtime.deserialize_cuda_engine(f.read())
else:
if os.path.exists(self.onnx_path):
return self._parse_onnx()
else:
raise Exception("ONNX model file {} not exist!".format(self.onnx_path))
def forward(self, image_tensors):
"""do infernece
:param image_tensors: list, inputs tensor of model.
:return outputs: list, outputs tensor of model.
"""
for i, image_tensor in enumerate(image_tensors):
image = np.array([image_tensor], dtype=np.float32, order='C')
self.inputs[i].host = image
trt_outputs = common.do_inference_v2(self.execution_context,
bindings=self.bindings,
inputs=self.inputs,
outputs=self.outputs,
stream=self.stream)
return trt_outputs
| 2.28125 | 2 |
eclcli/network/v2/gateway_interface.py | hanasuke/eclcli | 32 | 12766090 | <filename>eclcli/network/v2/gateway_interface.py
from eclcli.common import command
from eclcli.common import utils
from ..networkclient.common import utils as to_obj
class ListGwInterface(command.Lister):
def get_parser(self, prog_name):
parser = super(ListGwInterface, self).get_parser(prog_name)
parser.add_argument(
'--name',
metavar="name",
help="filter by name")
parser.add_argument(
'--id',
metavar="id",
help="filter by id")
parser.add_argument(
'--status',
metavar="status",
help="filter by status")
parser.add_argument(
'--service_type',
metavar="service_type",
help="filter by service_type")
parser.add_argument(
'--internet_gw_id',
metavar="internet_gw_id",
help="filter by internet gateway id")
parser.add_argument(
'--aws_gw_id',
metavar="aws_gw_id",
help="filter by aws gateway id")
parser.add_argument(
'--interdc_gw_id',
metavar="interdc_gw_id",
help="filter by interdc gateway id")
parser.add_argument(
'--vpn_gw_id',
metavar="vpn_gw_id",
help="filter by vpn gateway id")
parser.add_argument(
'--fic_gw_id',
metavar="fic_gw_id",
help="filter by fic gateway id")
parser.add_argument(
'--netmask',
metavar="netmask",
help="filter by netmask")
parser.add_argument(
'--network_id',
metavar="network_id",
help="filter by network id")
parser.add_argument(
'--gw_vipv4',
metavar="gw_vipv4",
help="filter by gateway ipv4")
parser.add_argument(
'--gw_vipv6',
metavar="gw_vipv6",
help="filter by gateway ipv6")
parser.add_argument(
'--primary_ipv4',
metavar="primary_ipv4",
help="filter by primary ipv4")
parser.add_argument(
'--primary_ipv6',
metavar="primary_ipv6",
help="filter by primary ipv6")
parser.add_argument(
'--secondary_ipv4',
metavar="secondary_ipv4",
help="filter by secondary ipv4")
parser.add_argument(
'--secondary_ipv6',
metavar="secondary_ipv6",
help="filter by secondary ipv6")
parser.add_argument(
'--vrid',
metavar="vrid",
help="filter by vrid")
return parser
def take_action(self, parsed_args):
network_client = self.app.client_manager.network
columns = (
'id',
'name',
'service_type',
'network_id',
'status',
)
column_headers = (
'ID',
'Name',
'Service Type',
'Network',
'Status',
)
search_opts = {}
if parsed_args.name:
search_opts.update({"name": parsed_args.name})
if parsed_args.id:
search_opts.update({"id": parsed_args.id})
if parsed_args.status:
search_opts.update({"status": parsed_args.status})
if parsed_args.service_type:
search_opts.update({"service_type": parsed_args.service_type})
if parsed_args.interdc_gw_id:
search_opts.update({"interdc_gw_id": parsed_args.interdc_gw_id})
if parsed_args.internet_gw_id:
search_opts.update({"internet_gw_id": parsed_args.internet_gw_id})
if parsed_args.aws_gw_id:
search_opts.update({"aws_gw_id": parsed_args.aws_gw_id})
if parsed_args.vpn_gw_id:
search_opts.update({"vpn_gw_id": parsed_args.vpn_gw_id})
if parsed_args.fic_gw_id:
search_opts.update({"fic_gw_id": parsed_args.fic_gw_id})
if parsed_args.netmask:
search_opts.update({"netmask": parsed_args.netmask})
if parsed_args.network_id:
search_opts.update({"network_id": parsed_args.network_id})
if parsed_args.gw_vipv4:
search_opts.update({"gw_vipv4": parsed_args.gw_vipv4})
if parsed_args.gw_vipv6:
search_opts.update({"gw_vipv6": parsed_args.gw_vipv6})
if parsed_args.primary_ipv4:
search_opts.update({"primary_ipv4": parsed_args.primary_ipv4})
if parsed_args.primary_ipv6:
search_opts.update({"primary_ipv6": parsed_args.primary_ipv6})
if parsed_args.secondary_ipv4:
search_opts.update({"secondary_ipv4": parsed_args.secondary_ipv4})
if parsed_args.secondary_ipv6:
search_opts.update({"secondary_ipv6": parsed_args.secondary_ipv6})
if parsed_args.vrid:
search_opts.update({"vrid": parsed_args.vrid})
data = [to_obj.GwInterface(gw_interface)
for gw_interface in network_client.list_gw_interfaces(
**search_opts).get('gw_interfaces')]
return (column_headers,
(utils.get_item_properties(
s, columns,
) for s in data))
class ShowGwInterface(command.ShowOne):
def get_parser(self, prog_name):
parser = super(ShowGwInterface, self).get_parser(prog_name)
parser.add_argument(
'gw_interface_id',
metavar="GATEWAY_INTERFACE_ID",
help="ID of Gateway Interface to show."
)
return parser
def take_action(self, parsed_args):
network_client = self.app.client_manager.network
gw_interface_id = parsed_args.gw_interface_id
dic = network_client.show_gw_interface(gw_interface_id).get('gw_interface')
columns = dic # utils.get_columns(dic)
obj = to_obj.GwInterface(dic)
data = utils.get_item_properties(obj, columns,)
return columns, data
class CreateGwInterface(command.ShowOne):
def get_parser(self, prog_name):
parser = super(CreateGwInterface, self).get_parser(prog_name)
parser.add_argument(
'--name',
metavar='<string>',
help='Name of Gateway interface to create.')
parser.add_argument(
'--description',
metavar='<string>',
help='Description of Gateway interface to create.')
parser.add_argument(
'--service_type',
metavar='{vpn|internet|interdc|fic}',
choices=["vpn", "internet", "interdc", "fic"],
required=True,
help='Service type of Gateway interface to create')
parser.add_argument(
'--vrid',
metavar='VRID',
type=int,
required=True,
help='VRRP ID of Gateway interface to create.')
parser.add_argument(
'--network_id',
metavar='NETWORK_ID',
required=True,
help='Network ID of Gateway interface to create.')
parser.add_argument(
'--netmask',
metavar='NETMASK',
required=True,
type=int,
help='Netmask of Gateway interface to create.')
parser.add_argument(
'--primary_ipv4',
metavar='<ipv4>',
required=True,
help='Primary IPv4 of Gateway interface to create.')
parser.add_argument(
'--secondary_ipv4',
metavar='<ipv4>',
required=True,
help='Secondary IPv4 of Gateway interface to create.')
parser.add_argument(
'--gw_vipv4',
metavar='<ipv4>',
required=True,
help='Secondary IPv4 of Gateway interface to create.')
group = parser.add_mutually_exclusive_group()
group.add_argument(
'--internet_gw_id',
metavar='INTERNET_GATEWAY_ID',
help='Internet Gateway ID of Gateway interface to create.')
group.add_argument(
'--interdc_gw_id',
metavar='INTERDC_GATEWAY_ID',
help='InterDC Gateway ID of Gateway interface to create.')
group.add_argument(
'--vpn_gw_id',
metavar='VPN_GATEWAY_ID',
help='VPN Gateway ID of Gateway interface to create.')
group.add_argument(
'--fic_gw_id',
metavar='FIC_GATEWAY_ID',
help='FIC Gateway ID of Gateway interface to create.')
parser.add_argument(
'--primary_ipv6',
metavar='<ipv6>',
help='Primary IPv6 of Gateway interface to create.')
parser.add_argument(
'--secondary_ipv6',
metavar='<ipv6>',
help='Secondary IPv6 of Gateway interface to create.')
parser.add_argument(
'--gw_vipv6',
metavar='<ipv6>',
help='Secondary IPv6 of Gateway interface to create.')
return parser
def take_action(self, parsed_args):
network_client = self.app.client_manager.network
body = {'gw_interface': {}}
utils.update_dict(
parsed_args,
body['gw_interface'],
['name', 'description', 'service_type',
'primary_ipv4', 'secondary_ipv4', 'gw_vipv4',
'internet_gw_id', 'vpn_gw_id', 'fic_gw_id', 'interdc_gw_id',
'primary_ipv6', 'secondary_ipv6', 'gw_vipv6',
'vrid', 'network_id', 'netmask'])
dic = network_client.create_gw_interface(body).get('gw_interface')
columns = utils.get_columns(dic)
obj = to_obj.GwInterface(dic)
data = utils.get_item_properties(
obj, columns, )
return columns, data
class SetGwInterface(command.ShowOne):
def get_parser(self, prog_name):
parser = super(SetGwInterface, self).get_parser(prog_name)
parser.add_argument(
'gw_interface',
metavar='GATEWAY_INTERFACE_ID',
help='ID of gateway interface to update.')
parser.add_argument(
'--name',
metavar='<string>',
help='Name of Gateway interface to create.')
parser.add_argument(
'--description',
metavar='<string>',
help='Description of Gateway interface to create.')
return parser
def take_action(self, parsed_args):
network_client = self.app.client_manager.network
body = {'gw_interface': {}}
gw_interface_id = parsed_args.gw_interface
utils.update_dict(
parsed_args,
body['gw_interface'],
['name', 'description'])
dic = network_client.update_gw_interface(
gw_interface_id, body).get('gw_interface')
columns = utils.get_columns(dic)
obj = to_obj.GwInterface(dic)
data = utils.get_item_properties(
obj, columns, )
return columns, data
class DeleteGwInterface(command.Command):
def get_parser(self, prog_name):
parser = super(DeleteGwInterface, self).get_parser(prog_name)
parser.add_argument(
'gw_interface_id',
metavar="GATEWAY_INTERFACE_ID",
nargs="+",
help="ID(s) of Gateway Interface to delete."
)
return parser
def take_action(self, parsed_args):
network_client = self.app.client_manager.network
for giid in parsed_args.gw_interface_id:
network_client.delete_gw_interface(giid)
| 2.265625 | 2 |
flask/migrations/versions/324666fdfa8a_.py | schinke/solid-fortnight-ba | 1 | 12766091 | """empty message
Revision ID: 324666fdfa8a
Revises: <PASSWORD>
Create Date: 2016-08-04 13:45:37.492317
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<PASSWORD>'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(u'prod_process_association_product_id_fkey', 'prod_process_association', type_='foreignkey')
op.create_foreign_key(None, 'prod_process_association', 'product', ['product_id'], ['id'], ondelete='CASCADE')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'prod_process_association', type_='foreignkey')
op.create_foreign_key(u'prod_process_association_product_id_fkey', 'prod_process_association', 'product', ['product_id'], ['id'])
### end Alembic commands ###
| 1.492188 | 1 |
webauthn/helpers/decode_credential_public_key.py | MasterKale/py_webauthn | 0 | 12766092 | from typing import Union
from attr import define
from cbor2 import decoder
from .cose import COSECRV, COSEKTY, COSEAlgorithmIdentifier, COSEKey
from .exceptions import InvalidPublicKeyStructure, UnsupportedPublicKeyType
@define
class DecodedOKPPublicKey:
kty: COSEKTY
alg: COSEAlgorithmIdentifier
crv: COSECRV
x: bytes
@define
class DecodedEC2PublicKey:
kty: COSEKTY
alg: COSEAlgorithmIdentifier
crv: COSECRV
x: bytes
y: bytes
@define
class DecodedRSAPublicKey:
kty: COSEKTY
alg: COSEAlgorithmIdentifier
n: bytes
e: bytes
def decode_credential_public_key(
key: bytes,
) -> Union[DecodedOKPPublicKey, DecodedEC2PublicKey, DecodedRSAPublicKey]:
"""
Decode a CBOR-encoded public key and turn it into a data structure.
Supports OKP, EC2, and RSA public keys
"""
# Occassionally we might be given a public key in an "uncompressed" format,
# typically from older U2F security keys. As per the FIDO spec this is indicated by
# a leading 0x04 "uncompressed point compression method" format byte. In that case
# we need to fill in some blanks to turn it into a full EC2 key for signature
# verification
#
# See https://fidoalliance.org/specs/fido-v2.0-id-20180227/fido-registry-v2.0-id-20180227.html#public-key-representation-formats
if key[0] == 0x04:
return DecodedEC2PublicKey(
kty=COSEKTY.EC2,
alg=COSEAlgorithmIdentifier.ECDSA_SHA_256,
crv=COSECRV.P256,
x=key[1:33],
y=key[33:65],
)
decoded_key: dict = decoder.loads(key)
kty = decoded_key[COSEKey.KTY]
alg = decoded_key[COSEKey.ALG]
if not kty:
raise InvalidPublicKeyStructure("Credential public key missing kty")
if not alg:
raise InvalidPublicKeyStructure("Credential public key missing alg")
if kty == COSEKTY.OKP:
crv = decoded_key[COSEKey.CRV]
x = decoded_key[COSEKey.X]
if not crv:
raise InvalidPublicKeyStructure("OKP credential public key missing crv")
if not x:
raise InvalidPublicKeyStructure("OKP credential public key missing x")
return DecodedOKPPublicKey(
kty=kty,
alg=alg,
crv=crv,
x=x,
)
elif kty == COSEKTY.EC2:
crv = decoded_key[COSEKey.CRV]
x = decoded_key[COSEKey.X]
y = decoded_key[COSEKey.Y]
if not crv:
raise InvalidPublicKeyStructure("EC2 credential public key missing crv")
if not x:
raise InvalidPublicKeyStructure("EC2 credential public key missing x")
if not y:
raise InvalidPublicKeyStructure("EC2 credential public key missing y")
return DecodedEC2PublicKey(
kty=kty,
alg=alg,
crv=crv,
x=x,
y=y,
)
elif kty == COSEKTY.RSA:
n = decoded_key[COSEKey.N]
e = decoded_key[COSEKey.E]
if not n:
raise InvalidPublicKeyStructure("RSA credential public key missing n")
if not e:
raise InvalidPublicKeyStructure("RSA credential public key missing e")
return DecodedRSAPublicKey(
kty=kty,
alg=alg,
n=n,
e=e,
)
raise UnsupportedPublicKeyType(f'Unsupported credential public key type "{kty}"')
| 2.46875 | 2 |
derivations/clustering.py | probcomp/distributions | 1 | 12766093 | <filename>derivations/clustering.py<gh_stars>1-10
# Copyright (c) 2014, Salesforce.com, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name of Salesforce.com nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
A parameter-free clustering prior based on partition entropy.
The Dirichlet prior is often used in nonparametric mixture models to model
the partitioning of observations into clusters.
This class implements a parameter-free alternative to the Dirichlet prior
that preserves exchangeability, preserves asymptotic convergence rate of
density estimation, and has an elegant interpretation as a
minimum-description-length prior.
Motivation
----------
In conjugate mixture models, samples from the posterior are sufficiently
represented by a partitioning of observations into clusters, say as an
assignment vector X with cluster labels X_i for each observation i.
In our ~10^6-observation production system, we found the data size of this
assignment vector to be a limiting factor in query latency, even after
lossless compression. To address this problem, we tried to incorporate the
Shannon entropy of this assignment vector directly into the prior, as
an information criterion. Surprisingly, the resulting low-entropy prior
enjoys a number of properties:
- The low-entropy prior enjoys similar asymptotic convergence as the
Dirichlet prior.
- The probability of a clustering is elegant and easy to evaluate
(up to an unknown normalizing constant).
- The resulting distribution resembles a CRP distribution with parameter
alpha = exp(-1), but slightly avoids small clusters.
- MAP estimates are minimum-description-length, as measured by assignment
vector complexity.
- The low-entropy prior is parameter free, unlike the CRP, Pitman-Yor, or
Mixture of Finite Mixture models.
A difficulty is that the prior depends on dataset size, and is hence not a
proper nonparametric generative model.
'''
import os
from collections import defaultdict
import numpy
from numpy import log, exp
import math
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot, font_manager
import parsable
from distributions.lp.special import fast_log
from distributions.io.stream import json_stream_load, json_stream_dump
parsable = parsable.Parsable()
assert exp # pacify pyflakes
DEFAULT_MAX_SIZE = 47
ROOT = os.path.dirname(os.path.abspath(__file__))
TEMP = os.path.join(ROOT, 'clustering.data')
if not os.path.exists(TEMP):
os.makedirs(TEMP)
CACHE = {}
def savefig(stem):
for extension in ['png', 'pdf']:
name = '{}/{}.{}'.format(TEMP, stem, extension)
print 'saving', name
pyplot.tight_layout()
pyplot.savefig(name)
def get_larger_counts(small_counts):
large_counts = defaultdict(lambda: 0.0)
for small_shape, count in small_counts.iteritems():
# create a new partition
large_shape = (1,) + small_shape
large_counts[large_shape] += count
# add to each existing partition
for i in xrange(len(small_shape)):
large_shape = list(small_shape)
large_shape[i] += 1
large_shape.sort()
large_shape = tuple(large_shape)
large_counts[large_shape] += count
return dict(large_counts)
def get_smaller_probs(small_counts, large_counts, large_probs):
assert len(large_counts) == len(large_probs)
small_probs = {}
for small_shape, count in small_counts.iteritems():
prob = 0.0
# create a new partition
large_shape = (1,) + small_shape
prob += large_probs[large_shape] / large_counts[large_shape]
# add to each existing partition
for i in xrange(len(small_shape)):
large_shape = list(small_shape)
large_shape[i] += 1
large_shape.sort()
large_shape = tuple(large_shape)
prob += large_probs[large_shape] / large_counts[large_shape]
small_probs[small_shape] = count * prob
return small_probs
def get_counts(size):
'''
Count partition shapes of a given sample size.
Inputs:
size = sample_size
Returns:
dict : shape -> count
'''
assert 0 <= size
cache_file = '{}/counts.{}.json.bz2'.format(TEMP, size)
if cache_file not in CACHE:
if os.path.exists(cache_file):
flat = json_stream_load(cache_file)
large = {tuple(key): val for key, val in flat}
else:
if size == 0:
large = {(): 1.0}
else:
small = get_counts(size - 1)
large = get_larger_counts(small)
print 'caching', cache_file
json_stream_dump(large.iteritems(), cache_file)
CACHE[cache_file] = large
return CACHE[cache_file]
def enum_counts(max_size):
return [get_counts(size) for size in range(1 + max_size)]
def get_log_z(shape):
return sum(n * math.log(n) for n in shape)
def get_log_Z(counts):
return numpy.logaddexp.reduce([
get_log_z(shape) + math.log(count)
for shape, count in counts.iteritems()
])
def get_probs(size):
counts = get_counts(size).copy()
for shape, count in counts.iteritems():
counts[shape] = get_log_z(shape) + math.log(count)
log_Z = numpy.logaddexp.reduce(counts.values())
for shape, log_z in counts.iteritems():
counts[shape] = math.exp(log_z - log_Z)
return counts
def get_subprobs(size, max_size):
'''
Compute probabilities of shapes of partial assignment vectors.
Inputs:
size = sample_size
max_size = dataset_size
Returns:
dict : shape -> prob
'''
assert 0 <= size
assert size <= max_size
cache_file = '{}/subprobs.{}.{}.json.bz2'.format(TEMP, size, max_size)
if cache_file not in CACHE:
if os.path.exists(cache_file):
flat = json_stream_load(cache_file)
small_probs = {tuple(key): val for key, val in flat}
else:
if size == max_size:
small_probs = get_probs(size)
else:
small_counts = get_counts(size)
large_counts = get_counts(size + 1)
large_probs = get_subprobs(size + 1, max_size)
small_probs = get_smaller_probs(
small_counts,
large_counts,
large_probs)
print 'caching', cache_file
json_stream_dump(small_probs.iteritems(), cache_file)
CACHE[cache_file] = small_probs
return CACHE[cache_file]
def enum_probs(max_size):
return [get_probs(size) for size in range(max_size + 1)]
@parsable.command
def priors(N=100):
'''
Plots different partition priors.
'''
X = numpy.array(range(1, N + 1))
def plot(Y, *args, **kwargs):
Y = numpy.array(Y)
Y -= numpy.logaddexp.reduce(Y)
pyplot.plot(X, Y, *args, **kwargs)
def crp(alpha):
assert 0 < alpha
prob = numpy.zeros(len(X))
prob[1:] = log(X[1:] - 1)
prob[0] = log(alpha)
return prob
def n_log_n(n):
return n * log(n)
def entropy():
prob = numpy.zeros(len(X))
prob[1:] = n_log_n(X[1:]) - n_log_n(X[1:] - 1)
return prob
def plot_crp(alpha):
plot(crp(eval(alpha)), label='CRP({})'.format(alpha))
def plot_entropy():
plot(entropy(), 'k--', linewidth=2, label='low-entropy')
pyplot.figure(figsize=(8, 4))
plot_entropy()
plot_crp('0.01')
plot_crp('0.1')
plot_crp('exp(-1)')
plot_crp('1.0')
plot_crp('10.0')
pyplot.title('Posterior Predictive Curves of Clustering Priors')
pyplot.xlabel('category size')
pyplot.ylabel('log(probability)')
pyplot.xscale('log')
pyplot.legend(loc='best')
savefig('priors')
def get_pairwise(counts):
size = sum(iter(counts).next())
paired = 0.0
for shape, prob in counts.iteritems():
paired += prob * sum(n * (n - 1) for n in shape) / (size * (size - 1))
return paired
@parsable.command
def pairwise(max_size=DEFAULT_MAX_SIZE):
'''
Plot probability that two points lie in same cluster,
as function of data set size.
'''
all_counts = enum_probs(max_size)
sizes = range(2, len(all_counts))
probs = [get_pairwise(all_counts[i]) for i in sizes]
pyplot.figure()
pyplot.plot(sizes, probs, marker='.')
pyplot.title('\n'.join([
'Cohabitation probability depends on dataset size',
'(unlike the CRP or PYP)'
]))
pyplot.xlabel('# objects')
pyplot.ylabel('P[two random objects in same cluster]')
pyplot.xscale('log')
# pyplot.yscale('log')
pyplot.ylim(0, 1)
savefig('pairwise')
def get_color_range(size):
scale = 1.0 / (size - 1.0)
return [
(scale * t, 0.5, scale * (size - t - 1))
for t in range(size)
]
def approximate_postpred_correction(subsample_size, dataset_size):
t = numpy.log(1.0 * dataset_size / subsample_size)
return t * (0.45 - 0.1 / subsample_size - 0.1 / dataset_size)
def ad_hoc_size_factor(subsample_size, dataset_size):
return numpy.exp(
approximate_postpred_correction(subsample_size, dataset_size))
@parsable.command
def postpred(subsample_size=10):
'''
Plot posterior predictive probability and approximations,
fixing subsample size and varying cluster size and dataset size.
'''
size = subsample_size
max_sizes = [size] + [2, 3, 5, 8, 10, 15, 20, 30, 40, 50]
max_sizes = sorted(set(s for s in max_sizes if s >= size))
colors = get_color_range(len(max_sizes))
pyplot.figure(figsize=(12, 8))
Y_max = 0
large_counts = get_counts(size)
for max_size, color in zip(max_sizes, colors):
large_probs = get_subprobs(size, max_size)
small_probs = get_subprobs(size - 1, max_size)
def plot(X, Y, **kwargs):
pyplot.scatter(
X, Y,
color=color,
edgecolors='none',
**kwargs)
plot([], [], label='max_size = {}'.format(max_size))
max_small_prob = max(small_probs.itervalues())
for small_shape, small_prob in small_probs.iteritems():
X = []
Y = []
# create a new partition
n = 1
large_shape = (1,) + small_shape
prob = large_probs[large_shape] / large_counts[large_shape]
X.append(n)
Y.append(prob)
singleton_prob = prob
# add to each existing partition
for i in range(len(small_shape)):
n = small_shape[i] + 1
large_shape = list(small_shape)
large_shape[i] += 1
large_shape.sort()
large_shape = tuple(large_shape)
prob = large_probs[large_shape] / large_counts[large_shape]
X.append(n)
Y.append(prob)
X = numpy.array(X)
Y = numpy.array(Y)
Y /= singleton_prob
alpha = small_prob / max_small_prob
plot(X, Y, alpha=alpha)
Y_max = max(Y_max, max(Y))
X = numpy.array(range(1, size + 1))
# entropy
entropy = numpy.array([
x * (x / (x - 1.0)) ** (x - 1.0) if x > 1 else 1
for x in X
])
Y = entropy / entropy.min()
pyplot.plot(X, Y, 'k--', label='entropy', linewidth=2)
# CRP
alpha = math.exp(-1)
Y = numpy.array([x - 1 if x > 1 else alpha for x in X])
Y /= Y.min()
pyplot.plot(X, Y, 'g-', label='CRP(exp(-1))'.format(alpha))
# ad hoc
factors = ad_hoc_size_factor(size, numpy.array(max_sizes))
for factor in factors:
Y = entropy.copy()
Y[0] *= factor
Y /= Y.min()
pyplot.plot(X, Y, 'r--')
pyplot.plot([], [], 'r--', label='ad hoc')
pyplot.yscale('log')
pyplot.xscale('log')
pyplot.title(
'Adding 1 point to subsample of {} points out of {} total'.format(
size, max_size))
pyplot.xlabel('cluster size')
pyplot.ylabel('posterior predictive probability')
pyplot.xlim(1, size * 1.01)
pyplot.ylim(1, Y_max * 1.01)
pyplot.legend(
prop=font_manager.FontProperties(size=10),
loc='upper left')
savefig('postpred')
def true_postpred_correction(subsample_size, dataset_size):
'''
Compute true postpred constant according to size-based approximation.
'''
large_counts = get_counts(subsample_size)
large_probs = get_subprobs(subsample_size, dataset_size)
small_probs = get_subprobs(subsample_size - 1, dataset_size)
numer = 0
denom = 0
for small_shape, small_prob in small_probs.iteritems():
probs = []
# create a new partition
n = 1
large_shape = (1,) + small_shape
prob = large_probs[large_shape] / large_counts[large_shape]
probs.append((n, prob))
# add to each existing partition
for i in range(len(small_shape)):
n = small_shape[i] + 1
large_shape = list(small_shape)
large_shape[i] += 1
large_shape.sort()
large_shape = tuple(large_shape)
prob = large_probs[large_shape] / large_counts[large_shape]
probs.append((n, prob))
total = sum(prob for _, prob in probs)
singleton_prob = probs[0][1]
for n, prob in probs[1:]:
weight = small_prob * prob / total
baseline = -math.log(n * (n / (n - 1.0)) ** (n - 1.0))
correction = math.log(singleton_prob / prob) - baseline
numer += weight * correction
denom += weight
return numer / denom if denom > 0 else 1.0
@parsable.command
def dataprob(subsample_size=10, dataset_size=50):
'''
Plot data prob approximation.
This tests the accuracy of LowEntropy.score_counts(...).
'''
true_probs = get_subprobs(subsample_size, dataset_size)
naive_probs = get_probs(subsample_size)
shapes = true_probs.keys()
# apply ad hoc size factor
approx_probs = naive_probs.copy()
factor = ad_hoc_size_factor(subsample_size, dataset_size)
print 'factor =', factor
for shape in shapes:
approx_probs[shape] *= factor ** (len(shape) - 2)
X = numpy.array([true_probs[shape] for shape in shapes])
Y0 = numpy.array([naive_probs[shape] for shape in shapes])
Y1 = numpy.array([approx_probs[shape] for shape in shapes])
pyplot.figure()
pyplot.scatter(X, Y0, color='blue', edgecolors='none', label='naive')
pyplot.scatter(X, Y1, color='red', edgecolors='none', label='approx')
pyplot.xlabel('true probability')
pyplot.ylabel('approximation')
LB = min(X.min(), Y0.min(), Y1.min())
UB = max(X.max(), Y0.max(), Y1.max())
pyplot.xlim(LB, UB)
pyplot.ylim(LB, UB)
pyplot.plot([LB, UB], [LB, UB], 'k--')
pyplot.xscale('log')
pyplot.yscale('log')
pyplot.title('\n'.join([
'Approximate data probability',
'subsample_size = {}, dataset_size = {}'.format(
subsample_size,
dataset_size),
]))
pyplot.legend(
prop=font_manager.FontProperties(size=10),
loc='lower right')
savefig('dataprob')
def true_dataprob_correction(subsample_size, dataset_size):
'''
Compute true normalization correction.
'''
naive_probs = get_probs(subsample_size)
factor = ad_hoc_size_factor(subsample_size, dataset_size)
Z = sum(
prob * factor ** (len(shape) - 1)
for shape, prob in naive_probs.iteritems()
)
return -math.log(Z)
def approximate_dataprob_correction(subsample_size, dataset_size):
n = math.log(subsample_size)
N = math.log(dataset_size)
return 0.061 * n * (n - N) * (n + N) ** 0.75
@parsable.command
def normalization(max_size=DEFAULT_MAX_SIZE):
'''
Plot approximation to partition function of low-entropy clustering
distribution for various set sizes.
'''
pyplot.figure()
all_counts = enum_counts(max_size)
sizes = numpy.array(range(1, 1 + max_size))
log_Z = numpy.array([
get_log_Z(all_counts[size]) for size in sizes
])
log_z_max = numpy.array([get_log_z([size]) for size in sizes])
coeffs = [
(log_Z[i] / log_z_max[i] - 1.0) * sizes[i] ** 0.75
for i in [1, -1]
]
coeffs += [0.27, 0.275, 0.28]
for coeff in coeffs:
print coeff
approx = log_z_max * (1 + coeff * sizes ** -0.75)
X = sizes ** -0.75
Y = (log_Z - approx) / log_Z
pyplot.plot(X, Y, marker='.', label='coeff = {}'.format(coeff))
pyplot.xlim(0, 1)
pyplot.xlabel('1 / size')
pyplot.ylabel('approx error')
pyplot.title(
'log(Z) ~ log(z_max) * (1 + coeff * size ** -0.75)')
pyplot.legend(loc='best')
savefig('normalization')
@parsable.command
def approximations(max_size=DEFAULT_MAX_SIZE):
'''
Plot both main approximations for many (subsample, dataset) sizes:
(1) normalization constant, and
(2) postpred size factor
'''
sizes = [1, 2, 3, 4, 5, 7, 10, 15, 20, 30, 40, 50, 60]
sizes = [size for size in sizes if size <= max_size]
keys = [(x, y) for x in sizes for y in sizes if x <= y]
truth1 = {}
truth2 = {}
approx1 = {}
approx2 = {}
for key in keys:
size, max_size = key
# postpred correction
if size > 1:
truth1[key] = true_postpred_correction(size, max_size)
approx1[key] = approximate_postpred_correction(size, max_size)
# normalization correction
truth2[key] = true_dataprob_correction(size, max_size)
approx2[key] = approximate_dataprob_correction(size, max_size)
fig, (ax1, ax2) = pyplot.subplots(2, 1, sharex=True, figsize=(12, 8))
ax1.set_title('Approximation accuracies of postpred and dataprob')
ax2.set_ylabel('log(Z correction)')
ax1.set_ylabel('log(singleton postpred correction)')
ax2.set_xlabel('subsample size')
ax1.set_xlim(min(sizes) * 0.95, max(sizes) * 1.05)
ax1.set_xscale('log')
ax2.set_xscale('log')
def plot(ax, X, y, values, *args, **kwargs):
Y = [values[x, y] for x in X if (x, y) in values]
X = [x for x in X if (x, y) in values]
ax.plot(X, Y, *args, alpha=0.5, marker='.', **kwargs)
for max_size in sizes:
X = [n for n in sizes if n <= max_size]
plot(ax1, X, max_size, truth1, 'k-')
plot(ax1, X, max_size, approx1, 'r-')
plot(ax2, X, max_size, truth2, 'k-')
plot(ax2, X, max_size, approx2, 'r-')
plot(ax1, [], None, {}, 'r-', label='approximation')
plot(ax1, [], None, {}, 'k-', label='truth')
ax1.legend(loc='upper right')
savefig('approximations')
@parsable.command
def fastlog():
'''
Plot accuracy of fastlog term in cluster_add_score.
'''
X = numpy.array([2.0 ** i for i in range(20 + 1)])
Y0 = numpy.array([x * math.log(1. + 1. / x) for x in X])
Y1 = numpy.array([x * fast_log(1. + 1. / x) for x in X])
Y2 = numpy.array([1.0 for x in X])
fig, (ax1, ax2) = pyplot.subplots(2, 1, sharex=True)
ax1.plot(X, Y0, 'ko', label='math.log')
ax1.plot(X, Y1, 'r-', label='lp.special.fast_log')
ax1.plot(X, Y2, 'b-', label='asymptote')
ax2.plot(X, numpy.abs(Y1 - Y0), 'r-', label='lp.special.fast_log')
ax2.plot(X, numpy.abs(Y2 - Y0), 'b-', label='asymptote')
ax1.set_title('lp.special.fast_log approximation')
ax1.set_ylabel('n log(1 + 1 / n)')
ax2.set_ylabel('approximation error')
ax2.set_xlabel('n')
ax1.set_xscale('log')
ax2.set_xscale('log')
ax2.set_yscale('log')
ax1.legend(loc='best')
ax2.legend(loc='best')
savefig('fastlog')
def number_table(numbers, width=5):
lines = []
line = ''
for i, number in enumerate(numbers):
if i % width == 0:
if line:
lines.append(line + ',')
line = ' %0.8f' % number
else:
line += ', %0.8f' % number
if line:
lines.append(line)
return '\n'.join(lines)
@parsable.command
def code(max_size=DEFAULT_MAX_SIZE):
'''
Generate C++ code for clustering partition function.
'''
all_counts = enum_counts(max_size)
sizes = range(1 + max_size)
log_Z = [
get_log_Z(all_counts[size]) if size else 0
for size in sizes
]
size = sizes[-1]
coeff = (log_Z[-1] / get_log_z([size]) - 1.0) * size ** 0.75
print '# Insert this in src/clustering.cc:'
lines = [
'// this code was generated by derivations/clustering.py',
'static const float log_partition_function_table[%d] =' %
(max_size + 1),
'{',
number_table(log_Z),
'};',
'',
'// this code was generated by derivations/clustering.py',
'template<class count_t>',
'float Clustering<count_t>::LowEntropy::log_partition_function (',
' count_t sample_size) const',
'{',
' // TODO incorporate dataset_size for higher accuracy',
' count_t n = sample_size;',
' if (n < %d) {' % (max_size + 1),
' return log_partition_function_table[n];',
' } else {',
' float coeff = %0.8ff;' % coeff,
' float log_z_max = n * fast_log(n);',
' return log_z_max * (1.f + coeff * powf(n, -0.75f));',
' }',
'}',
]
print '\n'.join(lines)
print
print '# Insert this in distributions/dbg/clustering.py:'
lines = [
'# this code was generated by derivations/clustering.py',
'log_partition_function_table = [',
number_table(log_Z),
']',
'',
'',
'# this code was generated by derivations/clustering.py',
'def log_partition_function(sample_size):',
' # TODO incorporate dataset_size for higher accuracy',
' n = sample_size',
' if n < %d:' % (max_size + 1),
' return LowEntropy.log_partition_function_table[n]',
' else:',
' coeff = %0.8f' % coeff,
' log_z_max = n * log(n)',
' return log_z_max * (1.0 + coeff * n ** -0.75)',
]
print '\n'.join(lines)
print
@parsable.command
def plots():
'''
Generate all plots.
'''
priors()
pairwise()
postpred()
dataprob()
approximations()
normalization()
fastlog()
if __name__ == '__main__':
parsable.dispatch()
| 1.40625 | 1 |
google-basketball-scores.py | beepboop271/python-scripts | 0 | 12766094 | # Sketchy script to pull basketball scores from the google
# thingy for the 2019 nba finals, raptors v warriors
# PYTHON 2 (old lol)
import urllib2
import re
import time
headers = {"User-Agent": "Mozilla/5.0"}
url = "https://www.google.com/search?q=raptors+game&rlz=1C1CHBF_enCA722CA722&oq=raptors+game&aqs=chrome..69i57j69i60l5.1126j0j7&sourceid=chrome&ie=UTF-8#sie=m;/g/11h1m4tzw3;3;/m/05jvx;dt;fp;1;;"
request = urllib2.Request(url, headers=headers)
response = urllib2.urlopen(request)
page = response.read()
i = page.index("imso_mh__l-tm-sc imso_mh__scr-it")
s = page[i+20:i+200]
match = re.match(r"[^0-9]+([0-9]+)[^0-9]+([0-9]+)[^0-9]+", s)
g, r = map(int, match.group(1, 2))
print "g r"
print g, r
gl = g
rl = r
while True:
request = urllib2.Request(url, headers=headers)
response = urllib2.urlopen(request)
page = response.read()
try:
i = page.index("imso_mh__l-tm-sc imso_mh__scr-it")
s = page[i+20:i+200]
match = re.match(r"[^0-9]+([0-9]+)[^0-9]+([0-9]+)[^0-9]+", s)
g, r = map(int, match.group(1, 2))
if g-gl > 0:
print g, r
if g-gl == 3:
print "unepic gamer moment"
if r-rl > 0:
print g, r
if r-rl == 3:
print "epic gamer moment"
gl = g
rl = r
except(ValueError):
print "error"
time.sleep(1)
| 3.09375 | 3 |
vito/pyutils.py | snototter/vito | 0 | 12766095 | <filename>vito/pyutils.py
#!/usr/bin/env python
# coding=utf-8
"""Utilities for timing, logging, etc."""
import timeit
import re
import sys
import os
import datetime
import argparse
import subprocess
import traceback
# Timing code, similar to MATLAB's tic/toc
__tictoc_timers = {}
def tic(label='default'):
"""Start a timer."""
__tictoc_timers[label] = timeit.default_timer()
def toc(label='default', seconds=False):
"""Stop timer and print elapsed time."""
if label in __tictoc_timers:
elapsed = timeit.default_timer() - __tictoc_timers[label]
if seconds:
print('[{:s}] Elapsed time: {:.3f} s'.format(label, elapsed))
else:
print('[{:s}] Elapsed time: {:.2f} ms'.format(label, 1000.0*elapsed))
def ttoc(label='default', seconds=False):
"""Stop timer and return elapsed time."""
if label in __tictoc_timers:
elapsed = timeit.default_timer() - __tictoc_timers[label]
if seconds:
return elapsed
else:
return 1000.0*elapsed
def toc_nsec(label='default', nsec=0.5, seconds=False):
"""Stop timer and print elapsed time (mute output for nsec seconds)."""
if label in __tictoc_timers:
elapsed = timeit.default_timer() - __tictoc_timers[label]
if seconds:
s = '[{:s}] Elapsed time: {:.3f} s'.format(label, elapsed)
else:
s = '[{:s}] Elapsed time: {:.2f} ms'.format(label, 1000.0*elapsed)
log_nsec(s, nsec, label)
# Log only once every x sec
__log_timers = {}
def log_nsec(string, nsec, label='default'):
"""Display 'string' only once every nsec seconds (floating point number). Use it to avoid spamming your terminal."""
if label in __log_timers:
elapsed = timeit.default_timer() - __log_timers[label]
if elapsed < nsec:
return
print(string)
__log_timers[label] = timeit.default_timer()
# Math
# def rand_mod(m):
# """Correctly sample a random number modulo m (avoiding modulo bias)"""
# # python's random lib has random.uniform(a,b), a <= N <= b
# return random.uniform(0, m-1)
#
# Problem in C/C++:
# rand() returns a number in [0, RAND_MAX], assume RAND_MAX=10, we want mod 3:
# rand() = 0, 3, 6, 9; then mod3 = 0; prob(0) = 4/11
# rand() = 1, 4, 7, 10; then mod3 = 1; prob(1) = 4/11
# rand() = 2, 5, 8; then mod3 = 2; prob(2) = 3/11 !!!
# see also: https://stackoverflow.com/a/10984975/400948
def compare(a, b):
"""Python3 replacement for Python 2.x cmp(), see
https://docs.python.org/3.0/whatsnew/3.0.html#ordering-comparisons
"""
return (a > b) - (a < b)
def compare_version_strings(v1, v2):
"""Compares version strings, returns -1/0/+1 if v1 less, equal or greater v2"""
# Based on https://stackoverflow.com/a/1714190/400948
def normalize_version_string(v):
return [int(x) for x in re.sub(r'(\.0+)*$', '', v).split(".")]
return compare(normalize_version_string(v1), normalize_version_string(v2))
# Make unicode strings, works for Python 2 & 3
try:
to_unicode = unicode
except NameError:
to_unicode = str
def slugify(s):
"""Converts a string to a slug (strip special characters,
replace white space, convert to lowercase...) to be used for file names or
URLs."""
import unicodedata
s = unicodedata.normalize('NFKD', to_unicode(s)).encode('ascii', 'ignore').decode('ascii')
s = to_unicode(re.sub(r'[^\w\s-]', '', s).strip().lower())
s = to_unicode(re.sub(r'[-\s]+', '-', s))
return s
def find_first_index(l, x):
"""Returns the first index of element x within the list l."""
for idx in range(len(l)):
if l[idx] == x:
return idx
raise ValueError("'{}' is not in list".format(x))
def find_last_index(l, x):
"""Returns the last index of element x within the list l"""
for idx in reversed(range(len(l))):
if l[idx] == x:
return idx
raise ValueError("'{}' is not in list".format(x))
def argsort(seq, indices_only=False):
"""Returns the sorted indices and the sorted array (seq) if indices_only=False."""
if indices_only:
return sorted(range(len(seq)), key=seq.__getitem__)
else:
from operator import itemgetter
return zip(*sorted(enumerate(seq), key=itemgetter(1)))
def in_ospath(name):
"""Check whether 'name' is on PATH."""
# Search the PATH variable, taken from https://stackoverflow.com/a/5227009
for path in os.environ['PATH'].split(os.pathsep):
if os.path.exists(os.path.join(path, name)):
return True
return False
def is_tool(name):
"""Check whether 'name' is on PATH and marked as executable."""
if sys.version_info >= (3, 3):
# Taken from https://stackoverflow.com/a/34177358
from shutil import which
return which(name) is not None
else:
return in_ospath(name) # pragma: no cover
def safe_shell_output(*args):
"""Executes the given shell command and returns the output
with leading/trailing whitespace trimmed. For example:
* sso('ls')
* sso('ls', '-l', '-a')
Returns the tuple (True/False, output/error_message)
"""
try:
# with open(os.devnull, 'wb') as devnull:
# by = subprocess.check_output(list(args), stderr=devnull)
by = subprocess.check_output(list(args))
out = by.decode('utf-8').strip()
success = True
except:
out = traceback.format_exc(limit=3)
success = False
return success, out
def date_str(delimiter=['', '', '-', '', ''], dt=None):
"""Returns a YYYY*MM*DD*hh*mm*ss string using the given delimiters.
Provide less delimiter to return shorter strings, e.g.
delimiter=['-'] returns YYYY-MM
delimiter=['',''] returns YYYYMMDD
etc.
Useful to generate timestamped output folder/file names.
You can pass a custom datetime.datetime object dt. If dt is None,
datetime.datetime.now() will be taken
"""
if dt is None:
now = datetime.datetime.now()
else:
now = dt
res_str = now.strftime('%Y')
month = now.strftime('%m')
day = now.strftime('%d')
hour = now.strftime('%H')
minute = now.strftime('%M')
sec = now.strftime('%S')
num_delim = len(delimiter)
if num_delim == 0:
return res_str
res_str += '{:s}{:s}'.format(delimiter[0], month)
if num_delim == 1:
return res_str
res_str += '{:s}{:s}'.format(delimiter[1], day)
if num_delim == 2:
return res_str
res_str += '{:s}{:s}'.format(delimiter[2], hour)
if num_delim == 3:
return res_str
res_str += '{:s}{:s}'.format(delimiter[3], minute)
if num_delim == 4:
return res_str
res_str += '{:s}{:s}'.format(delimiter[4], sec)
if num_delim > 5:
raise RuntimeError('Too many delimiter, currently we only support formating up until seconds')
return res_str
################################################################################
# Data validation (e.g. argument parsing)
def check_positive_int(value):
iv = int(value)
if iv <= 0:
raise argparse.ArgumentTypeError("%s must be > 0" % value)
return iv
def check_positive_real(value):
fv = float(value)
if fv <= 0:
raise argparse.ArgumentTypeError("%s must be > 0.0" % value)
return fv
| 2.53125 | 3 |
softlearning/samplers/utils.py | abhishekunique/RND-ashwin | 0 | 12766096 | <gh_stars>0
from collections import defaultdict
import numpy as np
from softlearning import replay_pools
from . import (
dummy_sampler,
remote_sampler,
base_sampler,
simple_sampler,
active_sampler,
goal_sampler,
pool_sampler,
nn_sampler,
classifier_sampler)
def get_sampler_from_variant(variant, *args, **kwargs):
SAMPLERS = {
'DummySampler': dummy_sampler.DummySampler,
'RemoteSampler': remote_sampler.RemoteSampler,
'Sampler': base_sampler.BaseSampler,
'SimpleSampler': simple_sampler.SimpleSampler,
'ActiveSampler': active_sampler.ActiveSampler,
'GoalSampler': goal_sampler.GoalSampler,
'PoolSampler': pool_sampler.PoolSampler,
'NNSampler': nn_sampler.NNSampler,
'PoolSampler': pool_sampler.PoolSampler,
'ClassifierSampler': classifier_sampler.ClassifierSampler,
}
sampler_params = variant['sampler_params']
sampler_type = sampler_params['type']
sampler_args = sampler_params.get('args', ())
sampler_kwargs = sampler_params.get('kwargs', {}).copy()
sampler = SAMPLERS[sampler_type](
*sampler_args, *args, **sampler_kwargs, **kwargs)
return sampler
DEFAULT_PIXEL_RENDER_KWARGS = {
'mode': 'rgb_array',
'width': 256,
'height': 256,
}
DEFAULT_HUMAN_RENDER_KWARGS = {
'mode': 'human',
'width': 500,
'height': 500,
}
def rollout(env,
policy,
path_length,
sampler_class=simple_sampler.SimpleSampler,
sampler_kwargs=None,
callback=None,
render_kwargs=None,
break_on_terminal=True):
pool = replay_pools.SimpleReplayPool(env, max_size=path_length)
if sampler_kwargs:
sampler = sampler_class(
max_path_length=path_length,
min_pool_size=None,
batch_size=None,
**sampler_kwargs)
else:
sampler = sampler_class(
max_path_length=path_length,
min_pool_size=None,
batch_size=None)
sampler.initialize(env, policy, pool)
render_mode = (render_kwargs or {}).get('mode', None)
if render_mode == 'rgb_array':
render_kwargs = {
**DEFAULT_PIXEL_RENDER_KWARGS,
**render_kwargs
}
elif render_mode == 'human':
render_kwargs = {
**DEFAULT_HUMAN_RENDER_KWARGS,
**render_kwargs
}
else:
render_kwargs = None
images = []
infos = defaultdict(list)
t = 0
for t in range(path_length):
observation, reward, terminal, info = sampler.sample()
for key, value in info.items():
infos[key].append(value)
if callback is not None:
callback(observation)
if render_kwargs:
if render_mode == 'rgb_array':
#note: this will only work for mujoco-py environments
if hasattr(env.unwrapped, 'imsize'):
imsize = env.unwrapped.imsize
else:
imsize = 200
imsize_flat = imsize*imsize*3
#for goal conditioned stuff
#if observation['observations'].shape[0] == 2*imsize_flat:
# image1 = observation['observations'][:imsize_flat].reshape(48,48,3)
# image2 = observation['observations'][imsize_flat:].reshape(48,48,3)
# image1 = (image1*255.0).astype(np.uint8)
# image2 = (image2*255.0).astype(np.uint8)
# image = np.concatenate([image1, image2], axis=1)
if 'pixels' in observation.keys() and observation['pixels'].shape[-1] == 6:
pixels = observation['pixels']
image1 = pixels[:, :, :3]
image2 = pixels[:, :, 3:]
image = np.concatenate([image1, image2], axis=1)
else:
image = env.render(**render_kwargs)
images.append(image)
else:
image = env.render(**render_kwargs)
images.append(image)
if terminal:
policy.reset()
if break_on_terminal: break
assert pool._size == t + 1
path = pool.batch_by_indices(np.arange(pool._size))
path['infos'] = infos
if render_mode == 'rgb_array':
path['images'] = np.stack(images, axis=0)
return path
def rollouts(n_paths, *args, **kwargs):
paths = [rollout(*args, **kwargs) for i in range(n_paths)]
return paths
| 2.0625 | 2 |
parser/fase2/team07/Tytus_SQLPARSER_G8/Instrucciones/plpgsql/condicional_if.py | Josue-Zea/tytus | 35 | 12766097 | from Instrucciones.TablaSimbolos.Instruccion import Instruccion
from Instrucciones.TablaSimbolos.Tipo import Tipo_Dato
from Instrucciones.Excepcion import Excepcion
class If(Instruccion):
'''
Esta clase representa la instrucción if.
La instrucción if recibe como parámetro una expresión lógica y la lista
de instrucciones a ejecutar si la expresión lógica es verdadera.
'''
def __init__(self,expLogica,instrucciones,strGram, linea, columna, strSent):
Instruccion.__init__(self,None,linea,columna,strGram,strSent)
self.expLogica = expLogica
self.instrucciones = instrucciones
def ejecutar(self, tabla, arbol):
pass
def traducir(self, tabla, arbol,cadenaTraducida):
#Si existe algun error en la expresion logica se devuelve el error
expresion_logica = self.expLogica.traducir(tabla, arbol,cadenaTraducida)
if isinstance(expresion_logica, Excepcion):
return expresion_logica
if expresion_logica.tipo.tipo == Tipo_Dato.BOOLEAN or expresion_logica.tipo.tipo == Tipo_Dato.ID:
#Inicia traduccion
codigo = expresion_logica.codigo
codigo += "\tlabel " + expresion_logica.etiquetaV.replace(":","") + "\n"
for i in self.instrucciones:
instruccion_if = i.traducir(tabla, arbol,cadenaTraducida)
if isinstance(instruccion_if, Excepcion):
return instruccion_if
codigo += instruccion_if
if cadenaTraducida == "":
codigo += "\tlabel " + expresion_logica.etiquetaF.replace(":","") + "\n"
else:
#cadenaTraducida traera la etiqueta de salida si es un elsif
codigo += "\tgoto " + cadenaTraducida + "\n"
codigo += "\tlabel " + expresion_logica.etiquetaF.replace(":","") + "\n"
return codigo
# ...
# if temporal_logico:
# goto L1
# goto L2
# label L1
# instrucciones_if
# label L2
# ...
else:
error = Excepcion('42804',"Semántico","La expresion logica debe ser de tipo boolean",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
class Ifelse(Instruccion):
'''
Esta clase representa la instrucción if else.
La instrucción if else recibe como parámetro una expresión lógica y las listas
de instrucciones a ejecutar si la expresión lógica es verdadera o falsa.
'''
def __init__(self,expLogica,instrIfVerdadero,instrIfFalso,strGram, linea, columna, strSent):
Instruccion.__init__(self,None,linea,columna,strGram,strSent)
self.expLogica = expLogica
self.instrIfVerdadero = instrIfVerdadero
self.instrIfFalso = instrIfFalso
def ejecutar(self, tabla, arbol):
pass
def traducir(self, tabla, arbol,cadenaTraducida):
#Si existe algun error en la expresion logica se devuelve el error
expresion_logica = self.expLogica.traducir(tabla, arbol,cadenaTraducida)
if isinstance(expresion_logica, Excepcion):
return expresion_logica
if expresion_logica.tipo.tipo == Tipo_Dato.BOOLEAN or expresion_logica.tipo.tipo == Tipo_Dato.ID:
#Inicia traduccion
codigo = expresion_logica.codigo
etiquetaSalida = arbol.generaEtiqueta()
codigo += "\tlabel " + expresion_logica.etiquetaF.replace(":","") + "\n"
for inst in self.instrIfFalso:
instruccion_ifFalso = inst.traducir(tabla, arbol,cadenaTraducida)
if isinstance(instruccion_ifFalso, Excepcion):
return instruccion_ifFalso
codigo += instruccion_ifFalso
codigo += "\tgoto ." + etiquetaSalida + "\n"
codigo += "\tlabel " + expresion_logica.etiquetaV.replace(":","") + "\n"
for i in self.instrIfVerdadero:
instruccion_if = i.traducir(tabla, arbol,cadenaTraducida)
if isinstance(instruccion_if, Excepcion):
return instruccion_if
codigo += instruccion_if
codigo += "\tlabel ." + etiquetaSalida + "\n"
return codigo
# ...
# if temporal_logico
# goto L1
# goto L2
# label L2
# instrucciones_ifFalso
# goto L3
# label L1
# instrucciones_if
# label L3
# ...
else:
error = Excepcion('42804',"Semántico","La expresion logica debe ser de tipo boolean",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
class IfElseIf(Instruccion):
'''
Esta clase representa la instrucción if elseif.
La instrucción if elseif recibe como parámetro una expresión lógica principal y la lista
de instrucciones, asi como una lista de elseif que contienen respectiva expresion logica e instrucciones
a ejecutar.
'''
def __init__(self,expLogica,instrIfVerdadero,l_elseif,strGram, linea, columna, strSent):
Instruccion.__init__(self,None,linea,columna,strGram,strSent)
self.expLogica = expLogica
self.instrIfVerdadero = instrIfVerdadero
self.l_elseif = l_elseif
def ejecutar(self, tabla, arbol):
pass
def traducir(self, tabla, arbol,cadenaTraducida):
#Si existe algun error en la expresion logica se devuelve el error
expresion_logica = self.expLogica.traducir(tabla, arbol,cadenaTraducida)
if isinstance(expresion_logica, Excepcion):
return expresion_logica
if expresion_logica.tipo.tipo == Tipo_Dato.BOOLEAN or expresion_logica.tipo.tipo == Tipo_Dato.ID:
#Inicia traduccion
etiquetaSalida = arbol.generaEtiqueta()
codigo = expresion_logica.codigo
codigo += "\tlabel " + expresion_logica.etiquetaF.replace(":","") + "\n"
#Sentencias elseif
for s_if in self.l_elseif:
sentencia_if = s_if.traducir(tabla,arbol,etiquetaSalida)
if isinstance(sentencia_if, Excepcion):
return sentencia_if
codigo += sentencia_if
#Label si el primer if es verdadero
codigo += "\tgoto " + etiquetaSalida + "\n"
codigo += "\tlabel " + expresion_logica.etiquetaV.replace(":","") + "\n"
#instrucciones if principal
for i in self.instrIfVerdadero:
instruccion_if = i.traducir(tabla, arbol,cadenaTraducida)
if isinstance(instruccion_if, Excepcion):
return instruccion_if
codigo += instruccion_if
codigo += "\tlabel " + etiquetaSalida + "\n"
return codigo
# ...
# if temporal_logico:
# goto L1
# goto L10
# label L10
# ................
#
# if temporal_logico2:
# goto L3
# goto L4
# Label L3
# instrucciones_elseif
# goto L2
# label L4
#
# ....................
# label L1
# instrucciones_if
# label L2
# ...
else:
error = Excepcion('42804',"Semántico","La expresion logica debe ser de tipo boolean",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
class IfElseIfElse(Instruccion):
'''
Esta clase representa la instrucción if elseif else.
La instrucción if elseif else recibe como parámetro una expresión lógica principal y la lista
de instrucciones, asi como una lista de elseif que contienen respectiva expresion logica e instrucciones
a ejecutar y las instrucciones si todas son falsas.
'''
def __init__(self,expLogica,instrIfVerdadero,l_elseif,instrIfFalso,strGram, linea, columna, strSent):
Instruccion.__init__(self,None,linea,columna,strGram,strSent)
self.expLogica = expLogica
self.instrIfVerdadero = instrIfVerdadero
self.l_elseif = l_elseif
self.instrIfFalso = instrIfFalso
def ejecutar(self, tabla, arbol):
pass
def traducir(self, tabla, arbol,cadenaTraducida):
#Si existe algun error en la expresion logica se devuelve el error
expresion_logica = self.expLogica.traducir(tabla, arbol,cadenaTraducida)
if isinstance(expresion_logica, Excepcion):
return expresion_logica
if expresion_logica.tipo.tipo == Tipo_Dato.BOOLEAN or expresion_logica.tipo.tipo == Tipo_Dato.ID:
#Inicia traduccion
codigo = expresion_logica.codigo
etiquetaF = arbol.generaEtiqueta()
codigo += "\tlabel " + expresion_logica.etiquetaF.replace(":","") + "\n"
#Sentencias elseif
for s_if in self.l_elseif:
sentencia_if = s_if.traducir(tabla,arbol,etiquetaF)
if isinstance(sentencia_if, Excepcion):
return sentencia_if
codigo += sentencia_if
#instrucciones si todos son falsos
for instr in self.instrIfFalso:
instruccion_falsa = instr.traducir(tabla, arbol,cadenaTraducida)
if isinstance(instruccion_falsa, Excepcion):
return instruccion_falsa
codigo += instruccion_falsa
codigo += "\tgoto " + etiquetaF + "\n"
#Label si el primer if es verdadero
codigo += "\tlabel " + expresion_logica.etiquetaV.replace(":","") + "\n"
#instrucciones if principal
for i in self.instrIfVerdadero:
instruccion_if = i.traducir(tabla, arbol,cadenaTraducida)
if isinstance(instruccion_if, Excepcion):
return instruccion_if
codigo += instruccion_if
codigo += "\tlabel " + etiquetaF + "\n"
return codigo
# ...
# if temporal_logico:
# goto L1
# goto L10
# label L10
# ................
#
# if temporal_logico2:
# goto L3
# goto L4
# Label L3
# instrucciones_elseif
# goto L2
# label L4
#
# ....................
# instrucciones_ifFalso
# goto L2
# label L1
# instrucciones_if
# label L2
# ...
else:
error = Excepcion('42804',"Semántico","La expresion logica debe ser de tipo boolean",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error | 3.203125 | 3 |
Word Embedding - Brown corpus - Kmeans - Nearest neighbour/src/create_representation.py | farzanaaswin0708/Data-Science-Projects | 0 | 12766098 | #!/usr/bin/env python
import nltk
from nltk.corpus import brown
import numpy as np
from math import log
from config import *
"""
convert word list file to a map from word to id
"""
def word2map(filename):
word2idx = {};
with open(filename) as f:
for line in f:
word2idx[line.strip('\n')] = len(word2idx);
return word2idx;
if __name__ == "__main__":
# add nltk serach path
nltk.data.path.append(DATA_HOME);
# get brown text stream
print ("getting text stream...")
brown_text = list(filter(lambda x: x.isalpha(), map(lambda x: x.lower(), brown.words())));
M = len(brown_text);
# mapping word to index
print ("generating word map...")
V2id = word2map(DATA_HOME + "V.txt");
C2id = word2map(DATA_HOME + "C.txt");
print (V2id);
print (C2id);
# prepare for the calculation of Pr(c) and Pr(c|w)
# use ones to apply laplace smoothing
print ("counting context appearance...");
window_count = np.ones((V_SIZE, C_SIZE));
core_count = np.ones((1, C_SIZE));
for i in range(M):
w = brown_text[i];
if w not in V2id:#has_key(w):
continue;
wid = V2id[w];
for j in range(i - HALF_WINDOW, i + HALF_WINDOW + 1):
if j < 0 or j >= M or j == i:
continue;
c = brown_text[j];
if c not in C2id:
continue;
cid = C2id[c];
window_count[wid][cid] += 1;
core_count[0][cid] += 1;
#print (window_count)
#print (core_count)
# calculate Pr(c) and Pr(c|w)
print ("calculating probability...");
pcw, pc = window_count, core_count;
for i in range(len(pcw)):
pcw[i] = pcw[i] / pcw[i].sum();
pc = pc / pc.sum();
# calculate pointwise mutual information
phi = np.zeros((V_SIZE, C_SIZE));
for i in range(V_SIZE):
for j in range(C_SIZE):
phi[i][j] = max(0, log(pcw[i][j] / pc[0][j]));
# save representation matrix to file
print ("saving representation...");
np.save("representation-" + str(C_SIZE) + ".npy", phi);
| 3.078125 | 3 |
LeetCode/daily/820.单词的压缩编码.py | xmmmmmovo/MyAlgorithmSolutions | 1 | 12766099 | <reponame>xmmmmmovo/MyAlgorithmSolutions
#
# @lc app=leetcode.cn id=820 lang=python3
#
# [820] 单词的压缩编码
#
from typing import List
# @lc code=start
from collections import defaultdict
from functools import reduce
class Solution:
def minimumLengthEncoding(self, words: List[str]) -> int:
"""
反转后缀排序判断
o(n∑len(wi))
"""
res = 0
# 这里先转set去重 可以省不少时间
words = list(set(words))
words.sort(key=lambda w: w[::-1])
for i in range(len(words)):
if i+1 < len(words) and words[i+1].endswith(words[i]):
pass
else:
res += len(words[i]) + 1
return res
class Solution2:
def minimumLengthEncoding(self, words: List[str]) -> int:
"""
字典树解法
单词倒序可以存字典树
算叶子节点深度
"""
words = list(set(words))
# 命名lambda
def Trie(): return defaultdict(Trie)
# 相当于一个套娃 就是一个字典里面套另一个lambda
trie = Trie()
#reduce(..., S, trie) is trie[S[0]][S[1]][S[2]][...][S[S.length - 1]]
nodes = [reduce(dict.__getitem__, word[::-1], trie)
for word in words]
# Add word to the answer if it's node has no neighbors
return sum(len(word) + 1
for i, word in enumerate(words)
if len(nodes[i]) == 0)
# @lc code=end
if __name__ == "__main__":
assert Solution2().minimumLengthEncoding(["time", "me", "bell"]) == 10
| 3.171875 | 3 |
src/pyai/loss/mse.py | lab-a1/pyai | 0 | 12766100 | <reponame>lab-a1/pyai
import numpy as np
from pyai.loss.base import BaseLoss
from pyai import Tensor
class MSELoss(BaseLoss):
"""
Mean-Squared Error, or L2 loss.
Equation: MSE = \frac{\sum_{i=1}^{n}(y_{true}-y_{predicted})^2}{n}
"""
def loss(self, y_true: Tensor, y_predicted: Tensor) -> Tensor:
return np.mean(np.square(y_true - y_predicted))
def gradients(self, y_true: Tensor, y_predicted: Tensor) -> Tensor:
"""
Equation: \frac{d_{loss}}{d_{y\_predicted}}
"""
return (y_true - y_predicted) * 2
if __name__ == "__main__":
criterion = MSELoss()
a = np.array([2.2, 3])
b = np.array([1.8, 4])
loss = criterion(a, b)
gradients = criterion.gradients(a, b)
gradients_lim = criterion.gradients_lim(a, b)
print(loss, loss.shape)
print(gradients, gradients.shape)
print(gradients_lim, gradients_lim.shape)
| 2.828125 | 3 |
tests/test_noncoap_tcp_client.py | mguc/aiocoap | 229 | 12766101 | # This file is part of the Python aiocoap library project.
#
# Copyright (c) 2012-2014 <NAME> <http://sixpinetrees.blogspot.com/>,
# 2013-2014 <NAME> <<EMAIL>>
#
# aiocoap is free software, this file is published under the MIT license as
# described in the accompanying LICENSE file.
"""Confront a CoAP over TCP server with a client that speaks so bad protocol it
is easier to mock with sending byte sequences than with aiocoap"""
import asyncio
import unittest
import aiocoap
from .test_server import WithTestServer, precise_warnings, no_warnings, asynctest
from .common import tcp_disabled
@unittest.skipIf(tcp_disabled, "TCP disabled in environment")
class TestNoncoapTCPClient(WithTestServer):
def setUp(self):
super().setUp()
self.mock_r, self.mock_w = self.loop.run_until_complete(
asyncio.open_connection(
self.serveraddress,
aiocoap.COAP_PORT))
def tearDown(self):
self.mock_w.close()
super().tearDown()
@staticmethod
def _read_as_messages(encoded: bytes):
"""Process the encoded data into CoAP-over-TCP messages, return them as
a list and trailing (unrecognized / incomplete) data."""
messages = []
while True:
size = aiocoap.transports.tcp._extract_message_size(encoded)
if size is not None:
size = sum(size)
if size is None or size > len(encoded):
return messages, encoded
messages.append(aiocoap.transports.tcp._decode_message(encoded[:size]))
encoded = encoded[size:]
async def should_abort_early(self, request: bytes):
"""Send request bytes, expect that the server closes the connection
after having sent possibly a CSM and an abort"""
self.mock_w.write(request)
r = await self.mock_r.read() # timing out would be a typical failure case here too
parsed, trail = self._read_as_messages(r)
self.assertEqual(trail, b"", "Leftover data after closing message")
if parsed[0].code == aiocoap.CSM:
# don't discard the CSM unconditionallly: the server might have
# read the request data before sending its own initial CSM.
parsed.pop(0)
self.assertEqual(len(parsed), 1, "Not exactly one (presumably abort) message received")
self.assertEqual(parsed[0].code, aiocoap.ABORT, "Received message is not an abort message")
async def should_idle(self, request: bytes, timeout=0.1):
"""Send request bytes, expect that the server sends CSM and does not
close the connection, awaiting more from the client.
Returns all messages received until the timeout."""
self.mock_w.write(request)
triggered_eof = False
async def kill_read():
"""After a timeout, synthesize an end-of-file condition into the
reader, hoping this doesn't beak too much."""
nonlocal triggered_eof
await asyncio.sleep(timeout)
triggered_eof = True
self.mock_r.feed_eof()
self.loop.create_task(kill_read())
r = await self.mock_r.read() # timing out would be a typical failure case here too
self.assertEqual(triggered_eof, True, "Server closed connection prematurely")
parsed, trail = self._read_as_messages(r)
# if this happens, the server is either sending garbage (announcing
# something long and not following up), or the timeout should be
# increased
self.assertEqual(trail, b"", "Leftover data after reading timeout")
if parsed[0].code == aiocoap.CSM:
# don't discard the CSM unconditionallly: the server might have
# read the request data before sending its own initial CSM.
parsed.pop(0)
return parsed
async def should_idle_quietly(self, request: bytes, timeout=0.1):
"""should_idle, but assert that no messages were returned"""
messages = await self.should_idle(request, timeout)
# it's not a per-spec wrong thing to do, but highly unusual
self.assertEqual(messages, [], "Server sent messages on its own")
@precise_warnings(["Aborting connection: Failed to parse message"])
@asynctest
async def test_http_get(self):
await self.should_abort_early(b'GET /.well-known/core HTTP/1.0')
@precise_warnings(["Aborting connection: No CSM received"])
@asynctest
async def test_early_get(self):
await self.should_abort_early(b'\0\x01')
@no_warnings
@asynctest
async def test_incomplete_small(self):
await self.should_idle_quietly(b'\0')
@no_warnings
@asynctest
async def test_incomplete_large1(self):
# announcing but not sending 1 bytes extlen
await self.should_idle_quietly(b'\xd0')
@no_warnings
@asynctest
async def test_incomplete_large2(self):
# sending one out of four bytes extlen
# a server could in theory reject this on grounds of "no matter what
# you say next, my buffer ain't large enough"
await self.should_idle_quietly(b'\xf0\0')
@no_warnings
@asynctest
async def test_incomplete_large3(self):
# announcing a 269 byte long message, but not even sendin the code
await self.should_idle_quietly(b'\xe0\0\0')
@precise_warnings(['Aborting connection: Overly large message announced'])
@asynctest
async def test_incomplete_large4(self):
# announcing the longest possible message, this should excede
# everyone's max-message-size.
#
# blocking to read more would be acceptable behavior as well.
await self.should_abort_early(b'\xf0\xff\xff\xff\xff')
@precise_warnings(['Aborting connection: Failed to parse message'])
@asynctest
async def test_wrong_tkl(self):
# send an unspecified token length of 15.
# the rest of the message is an empty CSM, so if the server were to
# extrapolate from the meaning of tkl 0..8, it'd read it as OK.
await self.should_abort_early(b'\x0fxxxxxxxxxxxxxxx\xe1')
# Fun inside the CSM
@no_warnings
@asynctest
async def test_exotic_elective_csm_option(self):
# send option number something-even (something-odd plus 269) as an empty option
await self.should_idle_quietly(b'\x30\xe1\xe0\xf1\xf1')
@precise_warnings(['Aborting connection: Option not supported'])
@asynctest
async def test_exotic_compulsory_csm_option(self):
# send option number something-odd (something-even plus 269) as an empty option
await self.should_abort_early(b'\x30\xe1\xe0\xf2\xf2')
@precise_warnings(['Aborting connection: Option not supported'])
@asynctest
async def test_exotic_compulsory_csm_option_late(self):
# send an empty CSM, and after that the one from compulsory_csm_option
await self.should_abort_early(b'\0\xe1\x30\xe1\xe0\xf2\xf2')
| 2.359375 | 2 |
settings.py | jwross24/twitoff | 0 | 12766102 | <reponame>jwross24/twitoff
"""Allow the application to see the environment variables."""
from dotenv import load_dotenv
load_dotenv()
| 1.375 | 1 |
Audio_to_Text/audio2text.py | navanil018/TemoCtor | 0 | 12766103 | #!/usr/bin/python3
# <NAME>
# audio to speech using google speech api
# 11/7/19
# Mac speech_recognition library installation
# pip3 install SpeechRecognition
# brew install portaudio
# pip3 install pyaudio
# pip3 install pydub
# Testing speech_recognization
# python3 -m speech_recognition
#Program usage
#usage: python3 ./audio2text.py audio.wav
#import library
import speech_recognition as sr
import sys
import os
from pydub import AudioSegment
from pydub.silence import split_on_silence
from textblob import TextBlob
# a function that splits the audio file into chunks
# and applies speech recognition
def silence_based_conversion(path):
# open the audio file stored in
# the local system as a wav file.
song = AudioSegment.from_wav(path)
# open a file where we will concatenate
# and store the recognized text
fh = open("output.txt", "w+")
# split track where silence is 0.5 seconds
# or more and get chunks
chunks = split_on_silence(
song,
# must be silent for at least 0.5 seconds
# or 500 ms. adjust this value based on user
# requirement. if the speaker stays silent for
# longer, increase this value. else, decrease it.
min_silence_len = 400,
# consider it silent if quieter than -16 dBFS
# adjust this per requirement
silence_thresh = -16
)
# create a directory to store the audio chunks.
try:
os.mkdir('audio_chunks')
except(FileExistsError):
pass
# move into the directory to
# store the audio files.
os.chdir('audio_chunks')
i = 0
# process each chunk
for chunk in chunks:
# export audio chunk and save it in
# the current directory.
# print("saving chunk{0}.wav".format(i))
chunk.export("chunk{0}.wav".format(i), format ="wav")
# the name of the newly created chunk
file = 'chunk'+str(i)+'.wav'
# print("Processing chunk "+str(i))
# create a speech recognition object
r = sr.Recognizer()
# recognize the chunk
with sr.AudioFile(file) as source:
file = r.record(source)
try:
# try converting it to text
rec = r.recognize_google(file)
# write the output to the file.
fh.write(rec+". ")
# catch any errors.
except sr.UnknownValueError:
print("Could not understand audio")
except sr.RequestError as e:
print("Could not request results. check your internet connection")
i += 1
os.chdir('..')
os.system('rm -rf audio_chunks/')
def textAnalysis(filename = 'output.txt'):
url = filename
file= open(url)
t = file.read()
bobo = TextBlob(t)
score = []
score.append(bobo.sentiment[0])
score.append(bobo.sentiment[1])
result = score[0] * 5 + score[1] * 5
print("The Response: ")
log = open("output.txt", "r")
for line in log:
print(line)
print("\n\nThe essay score out of 10: ")
print(result)
# the main driver program
def main():
silence_based_conversion(sys.argv[1])
textAnalysis()
if __name__ == '__main__':
main()
| 3.828125 | 4 |
server/lib/python/cartodb_services/cartodb_services/metrics/user.py | digideskio/dataservices-api | 0 | 12766104 | <gh_stars>0
from datetime import date, timedelta
from dateutil.relativedelta import relativedelta
from calendar import monthrange
def last_day_of_month(year, month):
"""last valid day of a month"""
return monthrange(year, month)[1]
def latest_valid_date(year, month, day):
"""latest date not later than the day specified"""
valid_day = min(day, last_day_of_month(year, month))
return date(year, month, valid_day)
class UserMetricsService:
""" Class to manage all the user info """
SERVICE_GEOCODER_NOKIA = 'geocoder_here'
SERVICE_GEOCODER_CACHE = 'geocoder_cache'
SERVICE_HERE_ISOLINES = 'here_isolines'
SERVICE_MAPZEN_ISOLINES = 'mapzen_isolines'
SERVICE_MAPBOX_ISOLINES = 'mapbox_isolines'
SERVICE_TOMTOM_ISOLINES = 'tomtom_isolines'
SERVICE_MAPZEN_ROUTING = 'routing_mapzen'
SERVICE_MAPBOX_ROUTING = 'routing_mapbox'
SERVICE_TOMTOM_ROUTING = 'routing_tomtom'
SERVICE_OBSERVATORY = 'obs_general'
DAY_OF_MONTH_ZERO_PADDED = '%d'
def __init__(self, user_geocoder_config, redis_connection):
self._user_geocoder_config = user_geocoder_config
self._redis_connection = redis_connection
self._username = user_geocoder_config.username
self._orgname = user_geocoder_config.organization
def used_quota(self, service_type, date):
if service_type in [self.SERVICE_HERE_ISOLINES,
self.SERVICE_MAPZEN_ISOLINES,
self.SERVICE_MAPBOX_ISOLINES,
self.SERVICE_TOMTOM_ISOLINES]:
return self.__used_isolines_quota(service_type, date)
elif service_type in [self.SERVICE_MAPZEN_ROUTING,
self.SERVICE_MAPBOX_ROUTING,
self.SERVICE_TOMTOM_ROUTING]:
return self.__used_routing_quota(service_type, date)
elif service_type == self.SERVICE_OBSERVATORY:
return self.__used_observatory_quota(service_type, date)
else:
return self.__used_geocoding_quota(service_type, date)
def __used_geocoding_quota(self, service_type, date):
""" Recover the used quota for the user in the current month """
date_from, date_to = self.__current_billing_cycle()
current_use = 0
success_responses = self.get_metrics(service_type,
'success_responses', date_from,
date_to)
empty_responses = self.get_metrics(service_type,
'empty_responses', date_from,
date_to)
current_use += (success_responses + empty_responses)
if service_type == self.SERVICE_GEOCODER_NOKIA:
cache_hits = self.get_metrics(self.SERVICE_GEOCODER_CACHE,
'success_responses', date_from,
date_to)
current_use += cache_hits
return current_use
def __used_isolines_quota(self, service_type, date):
""" Recover the used quota for the user in the current month """
date_from, date_to = self.__current_billing_cycle()
current_use = 0
isolines_generated = self.get_metrics(service_type,
'isolines_generated', date_from,
date_to)
empty_responses = self.get_metrics(service_type,
'empty_responses', date_from,
date_to)
current_use += (isolines_generated + empty_responses)
return current_use
def __used_routing_quota(self, service_type, date):
""" Recover the used quota for the user in the current month """
date_from, date_to = self.__current_billing_cycle()
current_use = 0
success_responses = self.get_metrics(service_type,
'success_responses', date_from,
date_to)
empty_responses = self.get_metrics(service_type,
'empty_responses', date_from,
date_to)
current_use += (success_responses + empty_responses)
return current_use
def __used_observatory_quota(self, service_type, date):
date_from, date_to = self.__current_billing_cycle()
current_use = 0
success_responses = self.get_metrics(service_type,
'success_responses', date_from,
date_to)
empty_responses = self.get_metrics(service_type,
'empty_responses', date_from,
date_to)
current_use += (success_responses + empty_responses)
return current_use
def increment_service_use(self, service_type, metric, date=date.today(),
amount=1):
""" Increment the services uses in monthly and daily basis"""
self.__increment_user_uses(service_type, metric, date, amount)
if self._orgname:
self.__increment_organization_uses(service_type, metric, date,
amount)
def get_metrics(self, service, metric, date_from, date_to):
aggregated_metric = 0
key_prefix = "org" if self._orgname else "user"
entity_name = self._orgname if self._orgname else self._username
for date in self.__generate_date_range(date_from, date_to):
redis_prefix = self.__parse_redis_prefix(key_prefix, entity_name,
service, metric, date)
zero_padded_day = date.strftime(self.DAY_OF_MONTH_ZERO_PADDED)
score = self._redis_connection.zscore(redis_prefix, zero_padded_day)
aggregated_metric += int(score) if score else 0
return aggregated_metric
# Private functions
def __increment_user_uses(self, service_type, metric, date, amount):
redis_prefix = self.__parse_redis_prefix("user", self._username,
service_type, metric, date)
self._redis_connection.zincrby(redis_prefix,
date.strftime(self.DAY_OF_MONTH_ZERO_PADDED),
amount)
def __increment_organization_uses(self, service_type, metric, date, amount):
redis_prefix = self.__parse_redis_prefix("org", self._orgname,
service_type, metric, date)
self._redis_connection.zincrby(redis_prefix,
date.strftime(self.DAY_OF_MONTH_ZERO_PADDED),
amount)
def __parse_redis_prefix(self, prefix, entity_name, service_type, metric,
date):
yearmonth_key = date.strftime('%Y%m')
redis_name = "{0}:{1}:{2}:{3}:{4}".format(prefix, entity_name,
service_type, metric,
yearmonth_key)
return redis_name
def __current_billing_cycle(self):
""" Return the begining and end date for the current billing cycle """
end_period_day = self._user_geocoder_config.period_end_date.day
today = date.today()
if end_period_day > today.day:
temp_date = today + relativedelta(months=-1)
date_from = latest_valid_date(temp_date.year, temp_date.month, end_period_day)
else:
date_from = latest_valid_date(today.year, today.month, end_period_day)
return date_from, today
def __generate_date_range(self, date_from, date_to):
for n in range(int((date_to - date_from).days + 1)):
yield date_from + timedelta(n)
| 2.359375 | 2 |
generateAndersonLayer.py | caf-ltar/LtarModelingAndersonClassification_ScriptConvertCdlToLvl1Anderson | 0 | 12766105 | <reponame>caf-ltar/LtarModelingAndersonClassification_ScriptConvertCdlToLvl1Anderson<gh_stars>0
import arcpy
from arcpy import env
from arcpy.sa import *
import pandas as pd
import os.path
import errno
import sys
import math
import glob
import shutil
# --- FUNCTIONS ----------------------------------------------------------------
def getRasterCalcArgument(df, categories, rasterValue,
gisDataLayerName,
shoulSetNoDataToZero = False):
data = df.loc[df['anderson-code'] == categories[0]]
result = "Con("
count = len(data.index)
for i in range(0, count):
result += "(" + gisDataLayerName + " == "
result += str(data.iloc[i]['cdl-code'])
result += ")"
if i < count - 1:
result += " | "
result += "," + str(rasterValue)
if shoulSetNoDataToZero:
result += ",0"
result += ")\n"
return result
def createAndersonLayer(cdlRasterBasename, cdlRasterYear, cdlRasterExtension, rasterCalcArgs, resultDirName, workingDirName):
cdlRasterName = cdlRasterBasename + str(cdlRasterYear) + cdlRasterExtension
print("Starting raster: "+cdlRasterName)
# IMPORTANT: Make sure this variable name matches the parameter _layerNameForRasterCalcArgs
rasterIn = Raster("Working" + os.path.sep + cdlRasterName)
# Create layer for each anderson value
rasterLayers = []
print("... generating layers")
count = len(rasterCalcArgs)
for i in range(1, count):
print("... generating layer " + str(i))
exec("tempRaster = "+rasterCalcArgs[i])
rasterLayers.append(tempRaster)
# Combine all layers
print("... Generating mosaic")
rasterOutFileName = cdlRasterBasename + "anderson-" + str(cdlRasterYear) + ".tif"
arcpy.MosaicToNewRaster_management(rasterLayers,
resultDirName,rasterOutFileName,"","8_BIT_UNSIGNED","",
1,"FIRST","FIRST")
#if(shouldSaveIntermediateLayers == True):
# print("... Saving intermediate layers")
# for i in range(0, len(rasterLayers)):
# rasterToSave = arcpy.env.workspace + os.path.sep + workingDirName + os.path.sep + "anderson-" + str(year) + "-" + str(i) + ".tif"
# rasterLayers[i].save(rasterToSave)
# Clean temperary files
#print("... cleaning up temperary files")
#if arcpy.Exists("in_memory"):
# arcpy.Delete_management("in_memory")
return os.path.join(resultDirName, rasterOutFileName)
def createDynamicMap(andersonMapPaths, outputDirWorking, outputDirPathResult):
print("Creating dynamic map...")
# Turn filenames into Rasters
andersonMaps = []
for andersonMapPath in andersonMapPaths:
andersonMaps.append(Raster(andersonMapPath))
print("... running cell statistics")
majorityRasterTempPath = os.path.join(outputDirWorking, "majorityRasterTemp.tif")
majorityPath = os.path.join(outputDirWorking, "majorityRaster.tif")
# Create MAJORITY Cell Statistic layer
majorityRasterTemp = arcpy.gp.CellStatistics_sa(andersonMaps,
majorityRasterTempPath,
"MAJORITY", "DATA")
# Returns largest YYYY in list of anthromeYYYYn.tif
andersonPathCurrYearPath = sorted(andersonMapPaths, reverse=True)[0]
# The MAJORITY function in Cell Statistics returns NoData if a tie for majority, so fill these with current year's value'
majorityRaster = Con(IsNull(majorityRasterTempPath), andersonPathCurrYearPath, majorityRasterTempPath)
majorityRaster.save(majorityPath)
varietyRaster = arcpy.gp.CellStatistics_sa(andersonMaps,
os.path.join(outputDirWorking, "varietyRaster.tif"),
"VARIETY", "DATA")
varietyPath = os.path.join(outputDirWorking, "varietyRaster.tif")
# Get cutoff value, should be greater than 50%
#dynamicUnstableCuttoff = len(andersonMapPaths)/2
dynamicUnstableCuttoff = int((len(andersonMapPaths)/2) + 0.5)
print("... generating stable, dynamic, and unstable rasters")
stableRaster = Con(varietyPath, majorityPath, "", "Value = 1")
#dynamicRaster = Con(varietyPath, Raster(majorityPath) + 100, "", "Value > 1 AND Value < " + str(dynamicUnstableCuttoff))
#unstableRaster = Con(varietyPath, Raster(majorityPath) + 200, "", "Value >= " + str(dynamicUnstableCuttoff))
dynamicRaster = Con(varietyPath, Raster(majorityPath) + 100, "", "Value > 1 AND Value <= " + str(dynamicUnstableCuttoff))
unstableRaster = Con(varietyPath, Raster(majorityPath) + 200, "", "Value > " + str(dynamicUnstableCuttoff))
stableRaster.save(os.path.join(outputDirPathResult, "andersonStable.tif"))
dynamicRaster.save(os.path.join(outputDirPathResult, "andersonDynamic.tif"))
unstableRaster.save(os.path.join(outputDirPathResult, "andersonUnstable.tif"))
print("... generating mosaic")
arcpy.MosaicToNewRaster_management(
[stableRaster, dynamicRaster, unstableRaster],
outputDirPathResult,"anderson-athrome-mandan.tif",
"",
"8_BIT_UNSIGNED","",1,"FIRST","FIRST")
#print("... cleaning up")
#arcpy.Delete_management(majorityRaster)
#arcpy.Delete_management(majorityRasterTemp)
#arcpy.Delete_management(varietyRaster)
# --- PARAMETERS ---------------------------------------------------------------
_cdlToAndersonMapFilename = "CdlToAndersonMap.csv"
_layerNameForRasterCalcArgs = "rasterIn"
# Input raster filenames should be of the form {basename}{year}{extension}
# Provides basename to be combined with years and extension to create
# e.g. CDL_CAF_2016.tif, basename = "CDL_CAF_"
_inputRasterBasename = "cdl-mandan-"
_years = [
2016,
2015,
2014,
2013,
2012,
2011,
2010]
_inputRasterExtension = ".tif"
_workingDirName = "WorkingTemp"
_resultDirName = "Results"
_tempFolderName = "temp"
shouldSaveIntermediateLayers = True
# Environment Parameters
arcpy.env.workspace = r"C:\OneDrive\OneDrive - Washington State University (email.wsu.edu)\Projects\CafModelingAndersonClassification\Working\ArcMap"
arcpy.env.overwriteOutput = True
#arcpy.env.snapRaster = arcpy.env.workspace + os.path.sep + _irrigatedPath
# --- MAIN ---------------------------------------------------------------------
# Setup
tempFolderPath = arcpy.env.workspace + os.path.sep + _tempFolderName
shutil.rmtree(tempFolderPath, ignore_errors=True)
os.makedirs(tempFolderPath)
arcpy.env.scratchWorkspace = tempFolderPath
# TESTING --------------------------------------------
#arcpy.CheckOutExtension("spatial")
#
#_andersonMapPaths = []
#_andersonMapPaths.append(os.path.join(
# arcpy.env.workspace, _resultDirName, "anderson-anthrome-2015.tif"
#))
#_andersonMapPaths.append(os.path.join(
# arcpy.env.workspace, _resultDirName, "anderson-anthrome-2016.tif"
#))
#
#createDynamicMap(_andersonMapPaths,
# os.path.join(arcpy.env.workspace, _workingDirName),
# os.path.join(arcpy.env.workspace, _resultDirName))
#
#arcpy.CheckInExtension("spatial")
#
#quit()
# -------------------------------------------- TESTING
# Read in map attributes
try:
df = pd.read_csv(_cdlToAndersonMapFilename)
except Exception as e:
sys.stderr.write('ERROR: %sn' % str(e))
# Get unique categories in the data
categories = df["anderson-code"].unique()
arcpy.CheckOutExtension("spatial")
# Get raster arguments for each category
rasterStrings = []
for cat in categories:
rasterStrings.append(
getRasterCalcArgument(df, [cat], cat, _layerNameForRasterCalcArgs))
# Generate a map of all Anderson level 1 classifications for all years
andersonMapPaths = []
for year in _years:
andersonMapPaths.append(
createAndersonLayer(_inputRasterBasename, year, _inputRasterExtension,
rasterStrings, _resultDirName, _workingDirName))
# Determine stable, unstable, dynamic layers then compress into single map
createDynamicMap(andersonMapPaths,
os.path.join(arcpy.env.workspace, _workingDirName),
os.path.join(arcpy.env.workspace, _resultDirName))
arcpy.CheckInExtension("spatial")
# Cleanup
#shutil.rmtree(tempFolderPath, ignore_errors=True)
#arcpy.Delete_management(tempFolderPath) | 2.359375 | 2 |
alfred/alfred.py | mengfu188/alfred | 1 | 12766106 | #!/usr/bin/env python3
#
# Copyright (c) 2020 JinTian.
#
# This file is part of alfred
# (see http://jinfagang.github.io).
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
main entrance of Alfred
"""
import os
import sys
import argparse
from colorama import Fore, Back, Style
import traceback
from .modules.vision.video_extractor import VideoExtractor
from .modules.scrap.image_scraper import ImageScraper
from .modules.vision.to_video import VideoCombiner
from .modules.vision.video_reducer import VideoReducer
from .modules.data.view_voc import vis_voc
from .modules.data.view_coco import vis_coco
from .modules.data.view_txt import vis_det_txt
from .modules.data.gather_voclabels import gather_labels
from .modules.data.voc2coco import convert
from .modules.data.eval_voc import eval_voc
from .modules.cabinet.count_file import count_file
from .modules.cabinet.split_txt import split_txt_file
from .modules.cabinet.license import apply_license
from .modules.cabinet.stack_imgs import stack_imgs
from alfred.utils.log import logger as logging
__VERSION__ = '2.7.1'
__AUTHOR__ = '<NAME>'
__DATE__ = '20202.10.01, since 2019.11.11'
__LOC__ = 'Shenzhen, China'
__git__ = 'http://github.com/jinfagang/alfred'
def arg_parse():
"""
parse arguments
:return:
"""
parser = argparse.ArgumentParser(prog="alfred")
parser.add_argument('--version', '-v', action="store_true", help='show version info.')
# vision, text, scrap
main_sub_parser = parser.add_subparsers()
# =============== vision part ================
vision_parser = main_sub_parser.add_parser('vision', help='vision related commands.')
vision_sub_parser = vision_parser.add_subparsers()
vision_extract_parser = vision_sub_parser.add_parser('extract', help='extract image from video: alfred vision '
'extract -v tt.mp4')
vision_extract_parser.set_defaults(which='vision-extract')
vision_extract_parser.add_argument('--video', '-v', help='video to extract')
vision_extract_parser.add_argument('--jumps', '-j', help='jump frames for wide extract')
vision_reduce_parser = vision_sub_parser.add_parser('reduce', help='reduce video by drop frames'
'\nalfred vision reduce -v a.mp4 -j 10')
vision_reduce_parser.set_defaults(which='vision-reduce')
vision_reduce_parser.add_argument('--video', '-v', help='video to extract')
vision_reduce_parser.add_argument('--jumps', '-j', help='jump frames for wide extract')
vision_2video_parser = vision_sub_parser.add_parser('2video', help='combine into a video: alfred vision '
'2video -d ./images')
vision_2video_parser.set_defaults(which='vision-2video')
vision_2video_parser.add_argument('--dir', '-d', help='dir contains image sequences.')
vision_clean_parser = vision_sub_parser.add_parser('clean', help='clean images in a dir.')
vision_clean_parser.set_defaults(which='vision-clean')
vision_clean_parser.add_argument('--dir', '-d', help='dir contains images.')
vision_getface_parser = vision_sub_parser.add_parser('getface', help='get all faces inside an image and save it.')
vision_getface_parser.set_defaults(which='vision-getface')
vision_getface_parser.add_argument('--dir', '-d', help='dir contains images to extract faces.')
# =============== text part ================
text_parser = main_sub_parser.add_parser('text', help='text related commands.')
text_sub_parser = text_parser.add_subparsers()
text_clean_parser = text_sub_parser.add_parser('clean', help='clean text.')
text_clean_parser.set_defaults(which='text-clean')
text_clean_parser.add_argument('--file', '-f', help='file to clean')
text_translate_parser = text_sub_parser.add_parser('translate', help='translate')
text_translate_parser.set_defaults(which='text-translate')
text_translate_parser.add_argument('--file', '-f', help='translate a words to target language')
# =============== scrap part ================
scrap_parser = main_sub_parser.add_parser('scrap', help='scrap related commands.')
scrap_sub_parser = scrap_parser.add_subparsers()
scrap_image_parser = scrap_sub_parser.add_parser('image', help='scrap images.')
scrap_image_parser.set_defaults(which='scrap-image')
scrap_image_parser.add_argument('--query', '-q', help='query words.')
# =============== cabinet part ================
cabinet_parser = main_sub_parser.add_parser('cab', help='cabinet related commands.')
cabinet_sub_parser = cabinet_parser.add_subparsers()
count_file_parser = cabinet_sub_parser.add_parser('count', help='scrap images.')
count_file_parser.set_defaults(which='cab-count')
count_file_parser.add_argument('--dir', '-d', default='./', help='dir to count.')
count_file_parser.add_argument('--type', '-t', help='dir to count.')
split_txt_parser = cabinet_sub_parser.add_parser('split', help='split txt file.')
split_txt_parser.set_defaults(which='cab-split')
split_txt_parser.add_argument('--file', '-f', required=True, help='file to split.')
split_txt_parser.add_argument('--ratios', '-r', help='ratios.')
split_txt_parser.add_argument('--names', '-n', help='names.')
stackimgs_parser = cabinet_sub_parser.add_parser('stackimgs', help='stack images into one')
stackimgs_parser.set_defaults(which='cab-stackimgs')
stackimgs_parser.add_argument('--imgs', '-i', required=True, nargs='+', help='images list.')
stackimgs_parser.add_argument('--dim', '-d', help='dims like 2x3.')
apply_license_parser = cabinet_sub_parser.add_parser('license', help='automatically add/update license.')
apply_license_parser.set_defaults(which='cab-license')
apply_license_parser.add_argument('--owner', '-o', required=True, help='owner of license.')
apply_license_parser.add_argument('--name', '-n', help='project name.')
apply_license_parser.add_argument('--year', '-y', help='project year: 2016-2020.')
apply_license_parser.add_argument('--url', '-u', default='manaai.cn', help='your website url.')
apply_license_parser.add_argument('--dir', '-d', default='./', help='to apply license dir.')
apply_license_parser.add_argument('--except', '-e', help='except extensions: xml,cc,h')
# =============== data part ================
data_parser = main_sub_parser.add_parser('data', help='data related commands.')
data_sub_parser = data_parser.add_subparsers()
view_voc_parser = data_sub_parser.add_parser('vocview', help='view voc.')
view_voc_parser.set_defaults(which='data-vocview')
view_voc_parser.add_argument('--image_dir', '-i', help='Root path of VOC image.')
view_voc_parser.add_argument('--label_dir', '-l', help='Root path of VOC label.')
view_txt_parser = data_sub_parser.add_parser('txtview', help='view voc.')
view_txt_parser.set_defaults(which='data-txtview')
view_txt_parser.add_argument('--image_dir', '-i', help='Root path of VOC image.')
view_txt_parser.add_argument('--label_dir', '-l', help='Root path of VOC label.')
view_coco_parser = data_sub_parser.add_parser('cocoview', help='view voc.')
view_coco_parser.set_defaults(which='data-cocoview')
view_coco_parser.add_argument('--image_dir', '-i', help='Root path of COCO images.')
view_coco_parser.add_argument('--json', '-j', help='Root path of COCO annotations.json .')
voc_label_parser = data_sub_parser.add_parser('voclabel', help='gather labels from annotations dir.')
voc_label_parser.set_defaults(which='data-voclabel')
voc_label_parser.add_argument('--anno_dir', '-d', help='dir to annotations.')
split_voc_parser = data_sub_parser.add_parser('splitvoc', help='split VOC to train and val.')
split_voc_parser.set_defaults(which='data-splitvoc')
split_voc_parser.add_argument('--image_dir', '-i', help='Root path of VOC image.')
split_voc_parser.add_argument('--label_dir', '-l', help='Root path of VOC label.')
labelone2voc_parser = data_sub_parser.add_parser('labelone2voc', help='convert labelone to VOC.')
labelone2voc_parser.set_defaults(which='data-labelone2voc')
labelone2voc_parser.add_argument('--json_dir', '-j', help='Root of labelone json dir.')
voc2coco_parser = data_sub_parser.add_parser('voc2coco', help='convert VOC to coco.')
voc2coco_parser.set_defaults(which='data-voc2coco')
voc2coco_parser.add_argument('--xml_dir', '-d', help='Root of xmls dir (Annotations/).')
evalvoc_parser = data_sub_parser.add_parser('evalvoc', help='evaluation on VOC.')
evalvoc_parser.set_defaults(which='data-evalvoc')
evalvoc_parser.add_argument('-g', '--gt_dir', type=str, required=True, help="Ground truth path (can be xml dir or txt dir, coco json will support soon)")
evalvoc_parser.add_argument('-d', '--det_dir', type=str, required=True, help="Detection result (should saved into txt format)")
evalvoc_parser.add_argument('-im', '--images_dir', type=str, default='images', help="Raw images dir for animation.")
evalvoc_parser.add_argument('-na', '--no-animation', help="no animation is shown.", action="store_true")
evalvoc_parser.add_argument('-np', '--no-plot', help="no plot is shown.", action="store_true")
evalvoc_parser.add_argument('-q', '--quiet', help="minimalistic console output.", action="store_true")
evalvoc_parser.add_argument('--min_overlap', type=float, default=0.5, help="min overlap, default is 0.5")
evalvoc_parser.add_argument('-i', '--ignore', nargs='+', type=str, help="ignore a list of classes.")
evalvoc_parser.add_argument('--set-class-iou', nargs='+', type=str, help="set IoU for a specific class.")
return parser.parse_args()
def print_welcome_msg():
print(Fore.BLUE + Style.BRIGHT + 'Alfred ' + Style.RESET_ALL +
Fore.WHITE + '- Valet of Artificial Intelligence.' + Style.RESET_ALL)
print('Author: ' + Fore.RED + Style.BRIGHT + __AUTHOR__ + Style.RESET_ALL)
print('At : ' + Fore.RED + Style.BRIGHT + __DATE__ + Style.RESET_ALL)
print('Loc : ' + Fore.RED + Style.BRIGHT + __LOC__ + Style.RESET_ALL)
print('Star : ' + Fore.RED + Style.BRIGHT + __git__ + Style.RESET_ALL)
print('Ver. : ' + Fore.RED + Style.BRIGHT + __VERSION__ + Style.RESET_ALL)
def main(args=None):
args = arg_parse()
if args.version:
print(print_welcome_msg())
exit(0)
else:
args_dict = vars(args)
print_welcome_msg()
try:
module = args_dict['which'].split('-')[0]
action = args_dict['which'].split('-')[1]
print(Fore.GREEN + Style.BRIGHT)
print('=> Module: ' + Fore.WHITE + Style.BRIGHT + module + Fore.GREEN + Style.BRIGHT)
print('=> Action: ' + Fore.WHITE + Style.BRIGHT + action)
if module == 'vision':
if action == 'extract':
v_f = args_dict['video']
j = args_dict['jumps']
print(Fore.BLUE + Style.BRIGHT + 'Extracting from {}'.format(v_f))
video_extractor = VideoExtractor(jump_frames=j)
video_extractor.extract(v_f)
elif action == 'reduce':
v_f = args_dict['video']
j = args_dict['jumps']
print(Fore.BLUE + Style.BRIGHT + 'Reduce from {}, jumps: {}'.format(v_f, j))
video_reducer = VideoReducer(jump_frames=j)
video_reducer.act(v_f)
elif action == '2video':
d = args_dict['dir']
combiner = VideoCombiner(img_dir=d)
print(Fore.BLUE + Style.BRIGHT + 'Combine video from {}'.format(d))
print(Fore.BLUE + Style.BRIGHT + 'What the hell.. {}'.format(d))
combiner.combine()
elif action == 'clean':
d = args_dict['dir']
print(Fore.BLUE + Style.BRIGHT + 'Cleaning from {}'.format(d))
elif action == 'getface':
try:
from .modules.vision.face_extractor import FaceExtractor
import dlib
d = args_dict['dir']
print(Fore.BLUE + Style.BRIGHT + 'Extract faces from {}'.format(d))
face_extractor = FaceExtractor()
face_extractor.get_faces(d)
except ImportError:
print('This action needs to install dlib first. http://dlib.net')
elif module == 'text':
if action == 'clean':
f = args_dict['file']
print(Fore.BLUE + Style.BRIGHT + 'Cleaning from {}'.format(f))
elif action == 'translate':
f = args.v
print(Fore.BLUE + Style.BRIGHT + 'Translate from {}'.format(f))
elif module == 'scrap':
if action == 'image':
q = args_dict['query']
q_list = q.split(',')
q_list = [i.replace(' ', '') for i in q_list]
image_scraper = ImageScraper()
image_scraper.scrap(q_list)
elif module == 'cab':
if action == 'count':
d = args_dict['dir']
t = args_dict['type']
logging.info('dir: {}, types: {}'.format(d, t))
count_file(d, t)
elif action == 'split':
f = args_dict['file']
r = args_dict['ratios']
n = args_dict['names']
logging.info('files: {}, ratios: {}, names: {}'.format(f, r, n))
split_txt_file(f, r, n)
elif action == 'stackimgs':
f = args_dict['imgs']
r = args_dict['dim']
logging.info('files: {}, dim: {}'.format(f, r))
stack_imgs(f, r)
elif action == 'license':
owner = args_dict['owner']
project_name = args_dict['name']
year = args_dict['year']
url = args_dict['url']
d = args_dict['dir']
apply_license(owner, project_name, year, url, d)
elif module == 'data':
if action == 'vocview':
image_dir = args_dict['image_dir']
label_dir = args_dict['label_dir']
vis_voc(img_root=image_dir, label_root=label_dir)
elif action == 'cocoview':
img_d = args_dict['image_dir']
json_f = args_dict['json']
vis_coco(img_d, json_f)
elif action == 'txtview':
image_dir = args_dict['image_dir']
label_dir = args_dict['label_dir']
vis_det_txt(img_root=image_dir, label_root=label_dir)
elif action == 'voclabel':
anno_dir = args_dict['anno_dir']
gather_labels(anno_dir)
elif action == 'splitvoc':
logging.info('split VOC to train and val not implement yet.')
pass
elif action == 'labelone2voc':
logging.info('labelone2voc not implement yet.')
pass
elif action == 'voc2coco':
logging.info('start convert VOC to coco... Annotations root: {}'.format(args_dict['xml_dir']))
convert(args_dict['xml_dir'])
elif action == 'evalvoc':
logging.info('start eval on VOC dataset..')
eval_voc(args)
except Exception as e:
traceback.print_exc()
print(Fore.RED, 'parse args error, type -h to see help. msg: {}'.format(e))
if __name__ == '__main__':
main()
| 1.421875 | 1 |
Software/dsmr.py | AvansETI/SmartMeterDIY_schematics | 0 | 12766107 | <filename>Software/dsmr.py
from __future__ import annotations
from abc import ABC, abstractmethod
import re
import json
class DSMR_Parser(object):
"""
P1 datagram parser for Dutch Smart Meter Readings
"""
def __init__(self, datagram) -> None:
self._strategy = DSMR_UNKNOWN()
self._datagram = datagram
if re.search(r'1-3:0\.2\.8\(42\)', self._datagram) != None:
self._strategy = DSMR_40()
elif re.search(r'1-3:0\.2\.8\(50\)', self._datagram) != None:
self._strategy = DSMR_50()
else:
self._strategy = DSMR_V2()
@property
def strategy(self) -> Strategy:
return self._strategy
# @property
# def datagram(self) :
# return self._datagram
@strategy.setter
def strategy(self, strategy: Strategy) -> None:
self._strategy = strategy
# @datagram.setter
# def datagram(self, datagram):
# self._datagram = datagram
def parse(self) -> ():
return self._strategy.parse(self._datagram)
class Strategy(ABC):
@abstractmethod
def parse(self, datagram):
pass
class DSMR_UNKNOWN(Strategy):
def parse(self, datagram):
return {}
class DSMR_40(Strategy):
def parse(self, datagram):
# info
version = re.search(r'1-3:0\.2\.8\(([0-9]+)\)', datagram).group(1)
# Manufacturer
manufacturer = re.search(r'([A-Z]{3})[0-9]{1}([a-zA-Z0-9]+)', datagram).group()
# Power DELIVERED by client
power_delivered = re.search(r'1-0:1\.7\.0\(([0-9]*\.[0-9]*)\*(kW)\)', datagram).group(1,2)
# Power RECEIVED by client
power_received = re.search(r'1-0:2\.7\.0\(([0-9]*\.[0-9]*)\*(kW)\)', datagram).group(1,2)
# Energy DELIVERED tariff 1 TO client
energy_to_t1 = re.search(r'1-0:1\.8\.1\(0*([0-9]*\.[0-9]*)\*(kWh)\)', datagram).group(1,2)
energy_to_t2 = re.search(r'1-0:1\.8\.2\(0*([0-9]*\.[0-9]*)\*(kWh)\)', datagram).group(1,2)
# Energy DELIVERED tariff 2 BY client
energy_by_t1 = re.search(r'1-0:2\.8\.1\(0*([0-9]*\.[0-9]*)\*(kWh)\)', datagram).group(1,2)
energy_by_t2 = re.search(r'1-0:2\.8\.2\(0*([0-9]*\.[0-9]*)\*(kWh)\)', datagram).group(1,2)
# Current TARIFF
tariff = re.search(r'0-0:96\.14\.0\(([0-9]*)\)', datagram).group(1)
# Equipment ID
equipment_id = re.search(r'0-0:96\.1\.1(\(([0-9]{1,96})\))', datagram).group(2)
# Bundle all in dictionary
retval = {
'manufacturer': manufacturer,
'version' : version,
'equipment_id': equipment_id,
'tariff': int(tariff),
'power' : [
{'delivered' : {'value': float(power_delivered[0]), 'unit': power_delivered[1] }},
{'received': {'value': float(power_received[0]), 'unit': power_received[1] }}
],
'energy': [
{'tariff' : 1,
'delivered': {'value': float(energy_by_t1[0]), 'unit': energy_by_t1[1]},
'received': {'value': float(energy_to_t1[0]), 'unit': energy_to_t1[1]}
},
{'tariff' : 2,
'delivered': {'value': float(energy_by_t2[0]), 'unit': energy_by_t2[1]},
'received': {'value': float(energy_to_t2[0]), 'unit': energy_to_t2[1]}
},
]
}
# Optionals
# Gas DELIVERED TO client (from clients view: consumed)
gas_delivered = re.search(r'0-1:24\.2\.1\((0*[0-w]*)\)\(*(0*[0-9]*\.[0-9]*)\*(m3)\)', datagram)
if gas_delivered is not None:
gas_delivered = gas_delivered.group(1, 2, 3)
gas_dict = {
"delivered": {'value': float(gas_delivered[1]), 'unit': gas_delivered[2]},
"received": {'value': 0.0, 'unit': 'm3'}
}
retval['gas'] = gas_dict
return retval
class DSMR_50(Strategy):
def parse(self, datagram):
# info
version = re.search(r'1-3:0\.2\.8\(([0-9]+)\)', datagram).group(1)
# Manufacturer
manufacturer = re.search(r'([ /].*)', datagram).group()
# Power DELIVERED by client
power_delivered = re.search(r'1-0:1\.7\.0\(([0-9]*\.[0-9]*)\*(kW)\)', datagram).group(1,2)
# Power RECEIVED by client
power_received = re.search(r'1-0:2\.7\.0\(([0-9]*\.[0-9]*)\*(kW)\)', datagram).group(1,2)
# Energy DELIVERED tariff 1 TO client
energy_to_t1 = re.search(r'1-0:1\.8\.1\(0*([0-9]*\.[0-9]*)\*(kWh)\)', datagram).group(1,2)
energy_to_t2 = re.search(r'1-0:1\.8\.2\(0*([0-9]*\.[0-9]*)\*(kWh)\)', datagram).group(1,2)
# Energy DELIVERED tariff 2 BY client
energy_by_t1 = re.search(r'1-0:2\.8\.1\(0*([0-9]*\.[0-9]*)\*(kWh)\)', datagram).group(1,2)
energy_by_t2 = re.search(r'1-0:2\.8\.2\(0*([0-9]*\.[0-9]*)\*(kWh)\)', datagram).group(1,2)
# Current TARIFF
tariff = re.search(r'0-0:96\.14\.0\(([0-9]*)\)', datagram).group(1)
# Equipment ID
equipment_id = re.search(r'0-0:96\.1\.1(\(([0-9]{1,96})\))', datagram).group(2)
# Bundle all in mandatory dictionary
retval = {
'manufacturer': manufacturer,
'version': version,
'equipment_id': equipment_id,
'tariff': int(tariff),
'power': [
{'delivered': {'value': float(power_delivered[0]), 'unit': power_delivered[1]}},
{'received': {'value': float(power_received[0]), 'unit': power_received[1]}}
],
'energy': [
{'tariff': 1,
'delivered': {'value': float(energy_by_t1[0]), 'unit': energy_by_t1[1]},
'received': {'value': float(energy_to_t1[0]), 'unit': energy_to_t1[1]}
},
{'tariff': 2,
'delivered': {'value': float(energy_by_t2[0]), 'unit': energy_by_t2[1]},
'received': {'value': float(energy_to_t2[0]), 'unit': energy_to_t2[1]}
},
]
}
# Optionals
# Gas DELIVERED TO client (from clients view: consumed)
gas_delivered = re.search(r'0-1:24\.2\.1\((0*[0-w]*)\)\(*(0*[0-9]*\.[0-9]*)\*(m3)\)', datagram)
if gas_delivered is not None:
gas_delivered = gas_delivered.group(1, 2, 3)
gas_dict = {
"delivered": {'value': float(gas_delivered[1]), 'unit': gas_delivered[2]},
"received": {'value': 0.0, 'unit': 'm3'}
}
retval['gas'] = gas_dict
return retval
class DSMR_V2(Strategy):
def parse(self, datagram):
# Manufacturer
manufacturer = re.search(r'([ /].*)', datagram).group()
# Power DELIVERED by client
power_delivered = re.search(r'1-0:1\.7\.0\(([0-9]*\.[0-9]*)\*(kW)\)', datagram).group(1, 2)
# Power RECEIVED by client
power_received = re.search(r'1-0:2\.7\.0\(([0-9]*\.[0-9]*)\*(kW)\)', datagram).group(1, 2)
# Energy DELIVERED tariff 1 TO client
energy_to_t1 = re.search(r'1-0:1\.8\.1\(0*([0-9]*\.[0-9]*)\*(kWh)\)', datagram).group(1, 2)
energy_to_t2 = re.search(r'1-0:1\.8\.2\(0*([0-9]*\.[0-9]*)\*(kWh)\)', datagram).group(1, 2)
# Energy DELIVERED tariff 2 BY client
energy_by_t1 = re.search(r'1-0:2\.8\.1\(0*([0-9]*\.[0-9]*)\*(kWh)\)', datagram).group(1, 2)
energy_by_t2 = re.search(r'1-0:2\.8\.2\(0*([0-9]*\.[0-9]*)\*(kWh)\)', datagram).group(1, 2)
# Current TARIFF
tariff = re.search(r'0-0:96\.14\.0\(([0-9]*)\)', datagram).group(1)
# Equipment ID
equipment_id = re.search(r'0-0:96\.1\.1(\(([0-9A-F]{1,96})\))', datagram).group(2)
# Bundle all in dictionary
return {
'manufacturer': manufacturer,
'version': 'V2',
'equipment_id': 'equipment_id',
'tariff': int(tariff),
'power': [
{'delivered': {'value': float(power_delivered[0]), 'unit': power_delivered[1]}},
{'received': {'value': float(power_received[0]), 'unit': power_received[1]}}
],
'energy': [
{'tariff': 1,
'delivered': {'value': float(energy_by_t1[0]), 'unit': energy_by_t1[1]},
'received': {'value': float(energy_to_t1[0]), 'unit': energy_to_t1[1]}
},
{'tariff': 2,
'delivered': {'value': float(energy_by_t2[0]), 'unit': energy_by_t2[1]},
'received': {'value': float(energy_to_t2[0]), 'unit': energy_to_t2[1]}
},
]
}
# if __name__ == '__main__':
# datagram = "/KFM5KAIFA-METER\r\n\r\n1-3:0.2.8(42)\r\n0-0:1.0.0(200213170457W)\r\n0-0:96.1.1(4530303236303030303333333338343136)\r\n1-0:1.8.1(015001.164*kWh)\r\n1-0:1.8.2(012236.435*kWh)\r\n1-0:2.8.1(000942.859*kWh)\r\n1-0:2.8.2(002395.253*kWh)\r\n0-0:96.14.0(0002)\r\n1-0:1.7.0(00.299*kW)\r\n1-0:2.7.0(00.000*kW)\r\n0-0:96.7.21(00001)\r\n0-0:96.7.9(00001)\r\n1-0:99.97.0(2)(0-0:96.7.19)(180712201124S)(0000004179*s)(000101000006W)(2147483647*s)\r\n1-0:32.32.0(00000)\r\n1-0:52.32.0(00000)\r\n1-0:72.32.0(00000)\r\n1-0:32.36.0(00000)\r\n1-0:52.36.0(00000)\r\n1-0:72.36.0(00000)\r\n0-0:96.13.1()\r\n0-0:96.13.0()\r\n1-0:31.7.0(000*A)\r\n1-0:51.7.0(000*A)\r\n1-0:71.7.0(001*A)\r\n1-0:21.7.0(00.101*kW)\r\n1-0:41.7.0(00.038*kW)\r\n1-0:61.7.0(00.158*kW)\r\n1-0:22.7.0(00.000*kW)\r\n1-0:42.7.0(00.000*kW)\r\n1-0:62.7.0(00.000*kW)\r\n0-1:24.1.0(003)\r\n0-1:96.1.0(4730303332353631323831363736343136)\r\n0-1:24.2.1(200213170000W)(06136.485*m3)\r\n!2236\r\n"
# for x in range(1):
# parsed = DSMR_Parser(datagram).parse()
# jsonStr = json.dumps( parsed)
# print(jsonStr)
| 3.09375 | 3 |
Sim_core_user_merged.py | thomasspiesser/MYpop | 0 | 12766108 | <reponame>thomasspiesser/MYpop
#!/usr/bin/python
# -*- coding: iso-8859-1 -*-
#Import modules
from numpy import *
# import random as rd
from scipy import integrate
import sys, re, math
import merged_eq_system
from MYpop_functions import process_condition, check_free, initiate_core_cell, find_and_replace
def Sim_core_user_merged(user_model, matrix_dim, initial_cell_nr, events, G1_details, S_details, G2_details, M_details, parameter_values, precision,print_phase=False):
t = 0 # time point zero
if precision=='low':
tol=0.01
elif precision=='middle':
tol=0.0001
elif precision=='high':
tol=1.49012e-8
my_model_species = ['mCLN','Cln','B_R','B_Am','B_Ad','mB_R','mB_A','mCLB','Clb']
my_species_init_cond = dict([('mCLN',0),('Cln',0),('B_R',25),('B_Am',8.5),('B_Ad',0),('mB_R',1),('mB_A',1),('mCLB',0),('Clb',0)])
my_species_init_cond.update(user_model.species_values)
my_cell = [('field',int),('generation',int),('t_in_G1',int),('t_in_S',int),('t_in_G2',int),('t_in_M',int),('times_in_G1',list),('times_in_S',list),('times_in_G2',list),('times_in_M',list),('V_div',list),('V',list),('ratios',list),('V0',list)] + [(i, list) for i in my_model_species] + [(i, list) for i in user_model.species]
culture = zeros((matrix_dim,matrix_dim), dtype=my_cell) # initiate virtual culture grid (n*m fields)
############################################################
# initiate new cells, start population:
for i in range(initial_cell_nr):
culture = initiate_core_cell(culture, my_species_init_cond)
############################################################
individuals = []
core_parameters = ['k_d1', 'k_p1', 'k_d2', 'k_R_G1', 'k_R_SG2M', 'k_Am_G1', 'k_Am_SG2M', 'k_Ad_G1', 'k_Ad_SG2M', 'growth', 'k_d3', 'k_p2', 'k_d4']
# core_parameter_values = dict(k_d1 = 0.1, k_p1 = 0.35, k_d2 = 0.1, k_R_G1 = 4.75, k_R_SG2M = 2, k_Am_G1 = 1, k_Am_SG2M = 0, k_Ad_G1 = 0, k_Ad_SG2M = 1, growth=0.02, k_d3 = 0.1, k_p2 = 0.25, k_d4 = 0.1)
core_parameter_values=parameter_values.copy()
parameter_restore_per_event = []
############################################################
#clean events (para_ids instead of names and put values in dict para_restore to restore to original value of event)
for i, event in enumerate(events):
parameter_restore = dict()
e_trigger, e_vars = event
for my_p in core_parameters:
if re.search(r'%s(?=\W|$)'%(my_p),e_vars):
parameter_restore[my_p] = core_parameter_values[my_p]
for pid, pname in user_model.parameter_names.items():
if re.search(r'%s(?=\W|$)'%(pname),e_vars):
events[i][1] = find_and_replace(pattern=pname, dest_str=events[i][1], repl_str=pid)
parameter_restore[pid] = user_model.parameter_values[pid]
print parameter_restore, ': par restore', i
parameter_restore_per_event.append(parameter_restore)
print parameter_restore_per_event, ': per event'
############################################################
G1_condition = process_condition(G1_details, user_model.species+my_model_species, user_model.species_names)
S_condition = process_condition(S_details, user_model.species+my_model_species, user_model.species_names)
G2_condition = process_condition(G2_details, user_model.species+my_model_species, user_model.species_names)
M_condition = process_condition(M_details, user_model.species+my_model_species, user_model.species_names)
flags = [False] * len(events) # flags default to False for every event
while True:
cells = culture['field']==1
i_s,j_s = nonzero(cells)
#global events:
for i, event in enumerate(events):
e_trigger, e_vars = event
e_target, e_expr = e_vars.split('=')
e_target = e_target.strip()
e_expr = e_expr.strip()
if eval(e_trigger, {'t':t,"__builtins__": None}) and not flags[i]: # {"__builtins__": {}} protects from attacks in eval of unknown expressions
z0 = core_parameter_values.copy() # namespace for eval dict(my_paras, user_paras, security_thingi)
z0.update(user_model.parameter_values)
z0.update({"__builtins__": None})
z0.update(vars(math)) # so all math.functions are allowed, like pi or sin()
if e_target in core_parameters:
core_parameter_values[e_target] = eval(e_expr, z0 )
elif e_target in user_model.parameters:
user_model.parameter_values[e_target] = eval(e_expr, z0 )
flags[i] = True
elif not eval(e_trigger, {'t':t,"__builtins__": None}) and flags[i]: # reset the values to original ones
if e_target in core_parameters:
core_parameter_values[e_target] = parameter_restore_per_event[i][e_target]
elif e_target in user_model.parameters:
user_model.parameter_values[e_target] = parameter_restore_per_event[i][e_target]
flags[i] = False
for i,j in zip(i_s,j_s):
if (eval(G1_condition, {'culture':culture, 'i':i, 'j':j, "__builtins__": None}) and culture[i,j]['t_in_S']<1 and culture[i,j]['t_in_G2']<1 and culture[i,j]['t_in_M']<1):
# G1-phase
if print_phase: print 'G1'
V_t=culture[i,j]['V'][-1]
A_t=culture[i,j]['B_Am'][-1] + culture[i,j]['B_Ad'][-1] # current Area
## save V0 data
if culture[i,j]['t_in_G1'] == 0:
culture[i,j]['V0'].append(V_t)
#stochastic transcription: # one promoter
transcription = random.sample()
if transcription > core_parameter_values['P_Cln']: mCLN_burst = 0.
else: mCLN_burst = 1
# setup integration input:
Y0=array([culture[i,j][my_species][-1] for my_species in my_model_species] + [ culture[i,j][species][-1] for species in user_model.species])
Y0[0] = Y0[0]+mCLN_burst # add transcript
t_step = linspace(t,t+1,2)
p=array([core_parameter_values[para] for para in ['k_d1','k_p1','k_d2','k_R_G1','k_Am_G1','k_Ad_G1','growth','k_d3','k_p2','k_d4']] + [A_t, V_t] + [user_model.parameter_values[p] for p in user_model.parameters])
# integration:
Y = integrate.odeint(merged_eq_system.dY_dt, Y0, t_step,(p,),rtol=tol,atol=tol) # hard coded equations from SBML file and scipy's odeint
# Y,out = integrate.odeint(merged_eq_system.dY_dt, Y0, t_step,(p,),full_output=1) # hard coded equations from SBML file and scipy's odeint
# print out
# if out['message']=='Integration successful.':
# pass
# else:
# print Y0
# print p
# print Y
# print out
# sys.exit(1)
#save results
Y = Y.T
for index, my_species in enumerate(my_model_species):
culture[i,j][my_species].append(Y[index][-1])
for index2, species in enumerate(user_model.species):
culture[i,j][species].append(Y[1+index+index2][-1])
culture[i,j]['V'].append(culture[i,j]['B_Am'][-1]**1.5 + culture[i,j]['B_Ad'][-1]**1.5)
culture[i,j]['t_in_G1']+=1
elif (eval(S_condition, {'culture':culture, 'i':i, 'j':j, "__builtins__": None}) and culture[i,j]['t_in_G2']<1 and culture[i,j]['t_in_M']<1):
# now S-phase:
if print_phase: print 'S'
if culture[i,j]['t_in_S']==0: # first S entry save time in G1
culture[i,j]['times_in_G1'].append(culture[i,j]['t_in_G1'])
# setup integration input:
Y0=array([culture[i,j][my_species][-1] for my_species in my_model_species] + [ culture[i,j][species][-1] for species in user_model.species])
t_step = linspace(t,t+1,2)
V_t=culture[i,j]['V'][-1]
A_t=culture[i,j]['B_Am'][-1] + culture[i,j]['B_Ad'][-1] # current Area
p=array([core_parameter_values[para] for para in ['k_d1','k_p1','k_d2','k_R_SG2M','k_Am_SG2M','k_Ad_SG2M','growth','k_d3','k_p2','k_d4']] + [A_t, V_t] + [user_model.parameter_values[p] for p in user_model.parameters])
# integration:
Y = integrate.odeint(merged_eq_system.dY_dt, Y0, t_step,(p,),rtol=tol,atol=tol) # hard coded equations from SBML file and scipy's odeint
#save results
Y=Y.T
for index, my_species in enumerate(my_model_species):
culture[i,j][my_species].append(Y[index][-1])
for index2, species in enumerate(user_model.species):
culture[i,j][species].append(Y[1+index+index2][-1])
culture[i,j]['V'].append(culture[i,j]['B_Am'][-1]**1.5 + culture[i,j]['B_Ad'][-1]**1.5)
culture[i,j]['t_in_S']+=1
elif (eval(G2_condition, {'culture':culture, 'i':i, 'j':j, "__builtins__": None}) and culture[i,j]['t_in_M']<1):
# now G2-phase:
if print_phase: print 'G2'
if culture[i,j]['t_in_G2']==0: # first S entry save time in G1
culture[i,j]['times_in_S'].append(culture[i,j]['t_in_S'])
#stochastic transcription: # one promoter
transcription = random.sample()
if transcription > core_parameter_values['P_Clb']: mCLB_burst = 0.
else: mCLB_burst = 1
# setup integration input:
Y0=array([culture[i,j][my_species][-1] for my_species in my_model_species] + [ culture[i,j][species][-1] for species in user_model.species])
Y0[7] = Y0[7]+mCLB_burst # add transcript
t_step = linspace(t,t+1,2)
V_t=culture[i,j]['V'][-1]
A_t=culture[i,j]['B_Am'][-1] + culture[i,j]['B_Ad'][-1] # current Area
p=array([core_parameter_values[para] for para in ['k_d1','k_p1','k_d2','k_R_SG2M','k_Am_SG2M','k_Ad_SG2M','growth','k_d3','k_p2','k_d4']] + [A_t, V_t] + [user_model.parameter_values[p] for p in user_model.parameters])
# integration:
Y = integrate.odeint(merged_eq_system.dY_dt, Y0, t_step,(p,),rtol=tol,atol=tol) # hard coded equations from SBML file and scipy's odeint
# print Y
#save results
Y=Y.T
for index, my_species in enumerate(my_model_species):
culture[i,j][my_species].append(Y[index][-1])
for index2, species in enumerate(user_model.species):
culture[i,j][species].append(Y[1+index+index2][-1])
culture[i,j]['V'].append(culture[i,j]['B_Am'][-1]**1.5 + culture[i,j]['B_Ad'][-1]**1.5)
culture[i,j]['t_in_G2']+=1
elif (eval(M_condition, {'culture':culture, 'i':i, 'j':j, "__builtins__": None})):
# now M-phase:
if print_phase: print 'M'
if culture[i,j]['t_in_M']== 0: # first S entry save time in G1 and the geneological age
culture[i,j]['times_in_G2'].append(culture[i,j]['t_in_G2'])
# setup integration input:
Y0 = array([ culture[i,j][my_species][-1] for my_species in my_model_species] + [ culture[i,j][species][-1] for species in user_model.species])
t_step = linspace(t,t+1,2)
V_t=culture[i,j]['V'][-1]
A_t=culture[i,j]['B_Am'][-1] + culture[i,j]['B_Ad'][-1] # current Area
p= array([core_parameter_values[para] for para in ['k_d1','k_p1','k_d2','k_R_SG2M','k_Am_SG2M','k_Ad_SG2M','growth','k_d3','k_p2','k_d4']] + [A_t, V_t] + [user_model.parameter_values[p] for p in user_model.parameters])
# integration:
Y = integrate.odeint(merged_eq_system.dY_dt, Y0, t_step,(p,),rtol=tol,atol=tol) # hard coded equations from SBML file and scipy's odeint
#save results
Y=Y.T
for index, my_species in enumerate(my_model_species):
culture[i,j][my_species].append(Y[index][-1])
for index2, species in enumerate(user_model.species):
culture[i,j][species].append(Y[1+index+index2][-1])
culture[i,j]['V'].append(culture[i,j]['B_Am'][-1]**1.5 + culture[i,j]['B_Ad'][-1]**1.5)
culture[i,j]['t_in_M']+=1
else:
if print_phase: print 'division'
culture[i,j]['times_in_M'].append(culture[i,j]['t_in_M']) # save time in M
#ready to divide..then let's check if there is enough space around
n_ij = check_free(culture)
# n_ij = 1
if n_ij: # if there is space: divide
m_d=culture[i,j]['B_Am'][-1]**1.5/culture[i,j]['V'][-1] # division ratio mum: Area_m^2/3 / ( Area_m^2/3 + Area_d^2/3 )
d_d=1-m_d # division ratio daughter
culture[i,j]['V_div'].append(culture[i,j]['V'][-1]) # stats: add [V] at division to list
culture[i,j]['ratios'].append(d_d) # stats: add fraction given to daughter at division to list
d_species_init_cond = my_species_init_cond.copy() # update initial cond for new daughter
for partition_species in ['mCLN','Cln','B_R','mCLB','Clb']+user_model.species:
d_species_init_cond[partition_species] = culture[i,j][partition_species][-1] * d_d
culture[i,j][partition_species].append(culture[i,j][partition_species][-1] * m_d) # update mommy species values accounting for loss to daughter
d_species_init_cond['B_Am'] = culture[i,j]['B_Ad'][-1] # new daughter has size of mummies bud
d_species_init_cond['B_Ad'] = 0 # and daughter does not have a bud yet
culture[i,j]['B_Am'].append(culture[i,j]['B_Am'][-1])# mummy can grow a new daughter now, need to append here, or else this list is one value short every division
culture[i,j]['B_Ad'].append(0)# mummy can grow a new daughter now
culture[i,j]['V'].append(culture[i,j]['B_Am'][-1]**1.5) # update the volume of the mum
culture[i,j]['t_in_G1']=0 # reset mummy time in G1
culture[i,j]['t_in_S'] = 0 # mummy can enter S again
culture[i,j]['t_in_G2'] = 0 # reset mummy time in G2
culture[i,j]['t_in_M'] = 0 # mummy can enter M again
culture[i,j]['generation'] += 1 # update no. of generations mother
culture = initiate_core_cell(culture, d_species_init_cond) # new daughter with attributes inherited from mother
d= culture['field']==0
individuals.append(len([value for position, value in ndenumerate(d) if not value]))
t += 1
# if individuals: print individuals[-1], t
if individuals[-1] >= matrix_dim**2: break
# if individuals[-1] >= 2: break
# if t >= 1000:
# print 'time_break',individuals
# break
################# round all to 2 after komma and return culture ####################
attrs = ['V_div','V','ratios','times_in_G1','times_in_G2','times_in_S','times_in_M','V0'] + my_model_species + user_model.species
for X in attrs:
for index,cell in enumerate(culture[X].flat):
if cell: culture[X].flat[index]=list([[round(x,2), x][x<0.02] for x in cell])
return culture
| 1.875 | 2 |
time_utils/__init__.py | weihuchao/py2_tools | 0 | 12766109 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019-10-12 10:42
# @Author : weihuchao
import datetime
import os
import time
CHRONOS_DEFAULT_TIME_ZONE = 8
CHRONOS_DEFAULT_TIME_ZONE_NAME = "CHRONOS_DEFAULT_TIME_ZONE"
class ChronosTable(object):
def __init__(self, chronos):
self._chronos = chronos
def transform(self, from_type, to_type):
# if from_type == 1:
# if self._chronos._stamp == 0:
# self._chronos.now()
# elif from_type == 2:
# if not self._chronos._str:
# self.
pass
def _one_to_one(self):
pass
def _one_to_two(self):
pass
def _one_to_three(self):
pass
# class ChronosUtils(object):
#
# def __init__(self, *args):
# self._stamp = 0
# self._datetime = None
# self._str = ""
#
# self._table = ChronosTable(self)
# self._auto_now = init_value == 0
#
# if isinstance(init_value, datetime.datetime):
# self._datetime = init_value
# self.from_type = 3
# else:
# try:
# self._stamp = float(init_value)
# self.from_type = 1
# if self._stamp > 9999999999:
# self._stamp = self._stamp / 1000.0
# elif self._stamp <= 0:
# self._stamp = time.time()
# except ValueError:
# self._str = init_value
# self.from_type = 2
#
# @classmethod
# def get_stamp(cls, *args):
#
# def get_result(self, func_name, **kwargs):
# return getattr(self, func_name)(**kwargs)
#
# def int(self):
# # self._check_table(1)
# return int(self._stamp)
#
# def float(self):
# # self._check_table(1)
# return self._stamp
#
# def millisecond(self):
# # self._check_table(1)
# return int(self._stamp * 1000)
#
# def str(self):
# return self._str
#
# def dt(self):
# pass
# import requests
#
# requests.get()
class Chronos(object):
"""
柯罗诺斯(古希腊语:Χρόνος;英语:Chronos)是古希腊神话中的一位原始神,代表着时间。
"""
def __init__(self, *args, **kwargs):
time_zone = kwargs.get("time_zone")
if not time_zone:
self.time_zone = os.environ.get(CHRONOS_DEFAULT_TIME_ZONE_NAME, CHRONOS_DEFAULT_TIME_ZONE)
self.children = []
for init_args in args or [0]:
self.children.append(self._get_init_stamp(*init_args))
def _get_init_stamp(self, init_value, format_str=None):
if format_str:
return time.mktime(time.strptime(init_value, format_str))
else:
if isinstance(init_value, datetime.datetime):
return time.mktime(init_value.timetuple())
else:
try:
stamp = float(init_value)
if stamp > 9999999999:
stamp = stamp / 1000.0
elif stamp <= 0:
stamp = time.time()
except ValueError:
return time.mktime(time.strptime(init_value, format_str))
return stamp
def _pack_single(self, func_name, **kwargs):
ret = [child.get_result(func_name, **kwargs) for child in self.children]
return ret[0] if self.single else ret
# 处理函数
# ---------
def delta(self, year=0, month=0, day=0, hour=0, minute=0, second=0):
delta_val = 0
if year != 0:
delta_val += year * 365 * 24 * 3600
if month != 0:
delta_val += month * 30 * 24 * 3600
if day != 0:
delta_val += day * 24 * 3600
if hour != 0:
delta_val += hour * 3600
if minute != 0:
delta_val += minute * 60
if second != 0:
delta_val += second
self.children = [child + delta_val for child in self.children]
# 输出函数
# ---------
def int(self, **kwargs):
return self._pack_single("int", **kwargs)
def float(self, **kwargs):
return self._pack_single("float", **kwargs)
def millisecond(self, **kwargs):
return self._pack_single("millisecond")
def str(self, **kwargs):
return self._pack_single("str", **kwargs)
def dt(self, **kwargs):
return self._pack_single("dt", **kwargs)
"""
时间戳
时间字符串
datetime对象
所有计算都是以时间戳为基本
now = datetime.datetime.now()
time.mktime(now.timetuple())
"""
class ChronosShip(object):
STR_FORMAT = "%Y-%m-%d %H:%M:%S"
def __init__(self):
self.stamp = 0
self.datetime = None
self.str = ""
# def _stamp_to_str(self, stamp=0):
# if stamp:
# self.stamp = stamp
# else:
# self.now()
#
# self.datetime = datetime.datetime.fromtimestamp(self.stamp)
# return self.datetime.strftime(self.STR_FORMAT)
def _str_to_stamp(self):
pass
def _stamp_to_datetime(self):
self.datetime = datetime.datetime.fromtimestamp(self.stamp)
def _stamp_to_str(self):
self.str = self.datetime.strftime(self.STR_FORMAT)
def int(self):
return
if __name__ == '__main__':
print Chronos().int()
print Chronos().float()
print Chronos().millisecond()
| 3.078125 | 3 |
src/server/requestHandlers/defaultHandler.py | Hiestaa/my-tornado-media-library | 1 | 12766110 | <filename>src/server/requestHandlers/defaultHandler.py
# -*- coding: utf8 -*-
from __future__ import unicode_literals
from tornado.web import RequestHandler, HTTPError
import random
import logging
from conf import Conf
class DefaultHandler(RequestHandler):
"""
Handle every request that has no request handler.
This could display a proper 404 Error template.
For now, it only raises a 404 Http error.
"""
def get(self, request):
if request == 'login':
raise Exception('/login route should not be hit. \
Try to login using main app login page.')
logging.error("Unable to find item: %s" % request)
raise HTTPError(404)
| 2.859375 | 3 |
git_sentry/parsing/team_config.py | git-sentry/git-sentry | 0 | 12766111 | <reponame>git-sentry/git-sentry
from __future__ import annotations
from git_sentry.logging.printer import pad
class TeamConfig:
def __init__(self, members, admins, repos):
self._members = members
self._admins = admins
self._repos = repos
def members(self):
return self._members
def admins(self):
return self._admins
def repos(self):
return self._repos
def diff(self, older_config: TeamConfig):
new_members = [m for m in self.members() if m not in older_config.members() and m not in older_config.admins()]
new_admins = [m for m in self.admins() if m not in older_config.admins()]
new_repos = {r: permission for r, permission in self.repos().items() if permission != older_config.repos().get(r, None)}
return TeamConfig(new_members, new_admins, new_repos)
def length(self):
return len(self.members()) + len(self.admins()) + len(self.repos())
def __repr__(self):
output = []
if self.members():
output += [pad('Members:', 1)]
output += [pad(member, 2) for member in self.members()]
if self.admins():
output += [pad('Admins:', 1)]
output += [pad(admin, 2) for admin in self.admins()]
repo_configs = self.repos()
if repo_configs:
output += [pad('Repositories:', 1)]
output += [pad(f'{repo_name} : {permission}', 2) for repo_name, permission in repo_configs.items()]
return '\n'.join(output)
def __eq__(self, other):
return self.members() == other.members() and self.admins() == other.admins() and self.repos() == other.repos()
| 2.40625 | 2 |
src/waldur_azure/migrations/0019_add_offer_field.py | geant-multicloud/MCMS-mastermind | 26 | 12766112 | # Generated by Django 2.2.20 on 2021-05-21 15:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('waldur_azure', '0018_drop_spl'),
]
operations = [
migrations.AlterModelOptions(
name='image', options={'ordering': ['publisher', 'offer', 'name', 'sku']},
),
migrations.AddField(
model_name='image',
name='offer',
field=models.CharField(default='offer', max_length=255),
preserve_default=False,
),
]
| 1.625 | 2 |
spml/trainer/misc_utils.py | hellomoto-ai/splatoon2-ml | 0 | 12766113 | import os
import csv
import collections
import numpy as np
class StatsTracker(collections.defaultdict):
"""Keep track of mean values"""
def __init__(self):
super().__init__(float)
self.step = 1
def update(self, data):
for key, val in data.items():
if key.endswith('_min'):
val = np.min(val)
self[key] = min(self.get(key, val), val)
elif key.endswith('_max'):
val = np.max(val)
self[key] = max(self.get(key, val), val)
else:
val = np.mean(val)
self[key] += (val - self[key]) / self.step
self.step += 1
class CSVWriter:
"""CSV Writer"""
def __init__(self, fields, fileobj):
self.fileobj = fileobj
self.writer = csv.DictWriter(fileobj, fieldnames=fields)
self.writer.writeheader()
def write(self, **kwargs):
self.writer.writerow(kwargs)
self.fileobj.flush()
def ensure_dir(filepath):
dirpath = os.path.dirname(filepath)
os.makedirs(dirpath, exist_ok=True)
| 3.203125 | 3 |
overwatch.py | Kami-no/xerxes | 1 | 12766114 | #!/usr/bin/env python3
"""
Daemon to watch over Zabbix
"""
from pyzabbix import ZabbixAPI
from aiohttp import web
from os import getenv
import logging
zabbix_srv = 'https://zabbix.company.ru'
zabbix_user = getenv('secret_zabbix_user')
zabbix_pass = getenv('secret_zabbix_pass')
zabbix_groups = ['Production']
def get_all_versions():
zapi = ZabbixAPI(zabbix_srv)
zapi.login(zabbix_user, zabbix_pass)
logging.info('Connected to Zabbix API Version %s' % zapi.api_version())
listed = list()
'Get groups IDs'
groups = zapi.hostgroup.get(output=['itemid', 'name'])
for group in groups:
if group['name'] in zabbix_groups:
listed.append(group['groupid'])
'Search query'
query = {'key_': 'service_ping[*,service,version]'}
'Get all items'
items = zapi.item.get(
groupids=listed,
search=query,
searchWildcardsEnabled=True,
output=['name', 'lastvalue'])
output = dict()
for item in items:
'Skip zero values'
if item['lastvalue'] != '0':
app = item['name'].split('"')[1]
ver = item['lastvalue']
'Create app dict for the first time'
if app not in output:
output[app] = dict()
'Create ver dict for the first time'
if ver not in output[app]:
output[app][ver] = int()
output[app][ver] += 1
return output
def get_current_versions(data):
output = dict()
output['multi'] = dict()
output['most'] = dict()
for app in data:
'Make it simple if there is only one version'
if len(data[app]) == 1:
output['most'][app] = next(iter(data[app]))
else:
multi = sorted(data[app], key=data[app].get, reverse=True)
output['most'][app] = next(iter(multi))
'Multi-version list'
output['multi'][app] = multi
return output
async def get_it(request):
"""
Get data from Zabbix
:param request: parameters
:type request: aiohttp.web_request.Request
:return: information about versions in json
:rtype: aiohttp.json_response.Response
"""
app = request.match_info.get('data', None)
logging.info('incoming: %s' % app)
data = get_all_versions()
output = get_current_versions(data)
if app:
if app in output['most']:
version = output['most'][app]
else:
version = 'N/A'
logging.error('app not found: %s' % app)
output = {'version': version}
logging.info('output: %s' % output)
return web.json_response(output)
if __name__ == "__main__":
'Setup logging'
logging.basicConfig(format='xerxes_overwatch - %(levelname)s - %(message)s', level=logging.WARNING)
app = web.Application()
app.add_routes([
web.get('/{data}', get_it),
web.get('/', get_it)])
web.run_app(app, port=8080)
| 2.359375 | 2 |
students/K33401/Tikhonova_Elena/Lr2/django_project_tikhonova/project_first_app/migrations/0005_auto_20201019_1532.py | TikhonovaElena/ITMO_ICT_WebDevelopment_2020-2021 | 0 | 12766115 | <filename>students/K33401/Tikhonova_Elena/Lr2/django_project_tikhonova/project_first_app/migrations/0005_auto_20201019_1532.py
# Generated by Django 3.1.2 on 2020-10-19 12:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('project_first_app', '0004_auto_20201019_0022'),
]
operations = [
migrations.AlterField(
model_name='license',
name='type',
field=models.CharField(choices=[], max_length=3),
),
migrations.AlterField(
model_name='owner',
name='birthday',
field=models.DateField(),
),
]
| 1.679688 | 2 |
packaging/setup/plugins/ovirt-engine-common/ovirt-engine/db/config.py | UranusBlockStack/ovirt-engine | 0 | 12766116 | #
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Config plugin."""
import gettext
import os
from otopi import plugin, util
from ovirt_engine_setup.engine import constants as oenginecons
def _(m):
return gettext.dgettext(message=m, domain='ovirt-engine-setup')
@util.export
class Plugin(plugin.PluginBase):
"""Config plugin."""
def __init__(self, context):
super(Plugin, self).__init__(context=context)
@plugin.event(
stage=plugin.Stages.STAGE_INIT,
)
def _init(self):
self.environment.setdefault(
oenginecons.ConfigEnv.OVIRT_ENGINE_DB_BACKUP_DIR,
oenginecons.FileLocations.OVIRT_ENGINE_DEFAULT_DB_BACKUP_DIR
)
@plugin.event(
stage=plugin.Stages.STAGE_VALIDATION,
condition=lambda self: self.environment[oenginecons.CoreEnv.ENABLE],
)
def _validation(self):
path = self.environment[
oenginecons.ConfigEnv.OVIRT_ENGINE_DB_BACKUP_DIR
]
if not os.path.exists(path):
raise RuntimeError(
_(
'Backup path {path} not found'
).format(
path=path,
)
)
# vim: expandtab tabstop=4 shiftwidth=4
| 1.8125 | 2 |
config.py | karan28598/sherlack | 0 | 12766117 | wolframalpha = dict(
app_id = ''
) | 1.125 | 1 |
acceptance_tests/04-xanes-scan.py | mrakitin/profile_collection-srx | 3 | 12766118 | <filename>acceptance_tests/04-xanes-scan.py
print('4. testing integrated scan functions:')
print('testing xanes')
xanes(erange = [7112-30, 7112-20, 7112+30],
estep = [2, 5],
harmonic = None,
acqtime=0.2, roinum=1, i0scale = 1e8, itscale = 1e8,samplename='test',filename='test')
| 1.59375 | 2 |
sga/tests/management_test.py | mitodl/sga-l | 3 | 12766119 | """
Test management commands
"""
from io import StringIO
from sga.management.commands.createmockdata import CreateMockDataCommand
from sga.tests.common import SGATestCase
class ManagementTest(SGATestCase):
"""
Class for management tests
"""
def test_create_mock_data(self):
"""
Test create_mock_data command
"""
out = StringIO()
command = CreateMockDataCommand()
command.execute(stdout=out)
self.assertIn("Successfully created mock data.", out.getvalue())
| 2.46875 | 2 |
savvy_scheduler/check_scheduler.py | roshanxzy/system-programming | 2 | 12766120 | <gh_stars>1-10
#!/usr/bin/python3
from sys import argv
from os.path import isfile
processes = []
counter = 0
LOGFILE = "../gthread.log"
def fail():
print("Failed!")
exit(1)
def main():
if len(argv) != 2:
print("Usage: python3 check_scheduler.py <scheme>")
print("Acceptable schemes are: fcfs, sjf, psrtf, pri, ppri, rr")
exit(1)
lines = open(LOGFILE, 'r').readlines()
lines = [l.split() for l in lines]
if argv[1] == 'sjf':
check_sjf(lines)
elif argv[1] == 'psrtf':
check_psrtf(lines)
elif argv[1] == 'fcfs':
check_fcfs(lines)
elif argv[1] == 'rr':
check_rr(lines)
elif argv[1] == 'pri':
check_pri(lines)
elif argv[1] == 'ppri':
check_ppri(lines)
def my_find(processes, name):
for i in range(len(processes)):
if processes[i]['name'] == name:
return i
return -1
# For comparators that can't get interrupted
def validate_simple_processes(lines, key):
i = 0
processes = []
last_started = None
while i < len(lines):
if lines[i][0] == "Registered":
processes.append({'name': lines[i][1], 'arrival':i,'length':lines[i][2], 'priority':lines[i][3]})
i += 1
elif lines[i][0] == "Switched":
tid = lines[i][2]
if tid == '000':
break
p = min(processes, key=key)
assert(p['name'] == tid)
last_started = tid
i += 1
elif lines[i][0] == "Ended":
assert(lines[i][1] == last_started)
idx = my_find(processes, tid)
if idx == -1:
fail()
processes.pop(idx)
i += 1
assert(len(processes) == 0)
print("Success!")
return
def check_sjf(lines):
validate_simple_processes(lines, lambda obj: (obj['length'], obj['arrival']))
def check_psrtf(lines):
print("PSRTF not yet supported")
def check_fcfs(lines):
validate_simple_processes(lines, lambda obj: (obj['arrival']))
def check_rr(lines):
print("RR not yet supported")
def check_pri(lines):
validate_simple_processes(lines, lambda obj: (obj['priority']))
def check_ppri(lines):
print("PPRI not yet supported")
if __name__ == "__main__":
main()
| 2.703125 | 3 |
Blackjack.py | dprange/Blackjack | 0 | 12766121 | #Name: Blackjack
#Version: v.010
#Authour: dp
#Date: Aug2019
import sys
import random
import time
try:
import Tkinter as tk
except ImportError:
import tkinter as tk
try:
import ttk
py3 = False
except ImportError:
import tkinter.ttk as ttk
py3 = True
class Card(object):
def __init__(self, value, suit):
self.value = value
self.suit = suit
def __str__(self):
return '%s of %s' % (self.value, self.suit)
class Deck(object):
def __init__(self):
self.cards = []
self.build()
def build(self):
suits = ["spade","club","heart","diamond"]
faces = [2,3,4,5,6,7,8,9,10,"jack","queen","king","ace"]
for suit in suits:
for face in faces:
card=(Card(face, suit))
self.cards.append(card)
self.shuffle()
def add_card(self, card):
self.cards.append(card)
def pop_card(self, i=-1):
return str(self.cards.pop(i))
def move_card(self, hand, num):
for i in range(num):
if deck.cards == []:
self.build()
newcard = self.pop_card()
hand.add_card(newcard)
def __str__(self):
res = []
for card in self.cards:
res.append(str(card))
return '\n'.join(res)
def shuffle(self):
random.shuffle(self.cards)
class Hand(Deck):
def __init__(self, label=''):
self.label = label
self.cards = []
def total(self):
rank_values = {'2':2, '3':3, '4':4, '5':5, '6':6, '7':7, '8':8,'9':9, '1':10, 'j':10, 'q':10, 'k':10, 'a':11}
hand_total = 0
ace_counter = 0
for i in range(len(self.cards)):
cardvalue = self.cards[i][0]
hand_total += rank_values[cardvalue]
if cardvalue == 'a':
ace_counter += 1
if (ace_counter > 0 and hand_total > 21):
hand_total -= 10
ace_counter -= 1
return hand_total
player_hand = Hand()
dealer_hand = Hand()
deck = Deck()
global info_i
info_i = 0
def update_info_gui(text):
global info_i
top.info_listbox.insert(info_i,text)
info_i += 1
def update_player_gui():
top.player_listbox.delete(0,100)
top.player_hand_total_lbl.configure(text=player_hand.total())
for i in range(len(player_hand.cards)):
top.player_listbox.insert(i,player_hand.cards[i])
def update_dealer_gui(show):
top.dealer_listbox.delete(0,100)
if show == 1:
top.dealer_hand_total_lbl.configure(text='-')
top.dealer_listbox.insert(0,'Hidden')
top.dealer_listbox.insert(1,dealer_hand.cards[1])
if show == 2:
top.dealer_hand_total_lbl.configure(text=dealer_hand.total())
for i in range(len(dealer_hand.cards)):
top.dealer_listbox.insert(i,dealer_hand.cards[i])
def deal_button_action():
top.info_listbox.delete(0,100)
#top.player_listbox2.place_forget()
info_i = 0
update_info_gui('Dealer Deals a new hand.')
player_hand.cards = []
dealer_hand.cards = []
deck.move_card(player_hand, 1)
deck.move_card(dealer_hand, 1)
deck.move_card(player_hand, 1)
deck.move_card(dealer_hand, 1)
update_player_gui()
time.sleep(.3)
update_dealer_gui(1)
if player_hand.total() == 21:
update_info_gui('BLACKJACK!!!')
if dealer_hand.cards[1][0] == 'a':
pass
#print("Dealer is showing an Ace")
#print("but we are not betting so it does not matter.")
#does player have doubles to split.
#double down - take one card and stay.
sys.stdout.flush()
def hit_button_action():
update_info_gui('---Player Hits---')
deck.move_card(player_hand, 1)
update_player_gui()
if player_hand.total() > 21:
update_info_gui("BUST!")
sys.stdout.flush()
def stand_button_action():
while dealer_hand.total() < 17:
update_info_gui("---Dealer Hits---")
deck.move_card(dealer_hand, 1)
update_dealer_gui(2)
if dealer_hand.total() > 21:
update_info_gui("Dealer BUST!")
update_dealer_gui(2)
if (player_hand.total() > dealer_hand.total() or dealer_hand.total() > 21):
update_info_gui("Player Wins!")
elif player_hand.total() == dealer_hand.total():
update_info_gui("Push!")
else:
update_info_gui("Dealer Wins!")
sys.stdout.flush()
def split_button_action():
pass
#top.player_listbox2 = tk.Listbox(top)
top.player_listbox2.place(relx=0.400, rely=0.549, relheight=0.324
, relwidth=0.352)
top.player_listbox2.configure(background="white")
top.player_listbox2.configure(font="TkFixedFont")
top.player_listbox2.configure(selectbackground="#c4c4c4")
top.player_listbox2.configure(width=124)
#move one card to this list.
#play hand one
#play hand two
#win/lose
#clean up
#time.sleep(1)
top.player_listbox2.place_forget()
top.player_listbox2 = tk.Listbox(top)
sys.stdout.flush()
def init(top, gui, *args, **kwargs):
global w, top_level, root
w = gui
top_level = top
root = top
def destroy_window():
global top_level
top_level.destroy()
top_level = None
def vp_start_gui():
'''Starting point when module is the main routine.'''
global val, w, root, top
root = tk.Tk()
top = Toplevel1 (root)
init(root, top)
root.mainloop()
print(top)
w = None
def create_Toplevel1(root, *args, **kwargs):
'''Starting point when module is imported by another program.'''
global w, w_win, rt, top
rt = root
w = tk.Toplevel (root)
top = Toplevel1 (w)
init(w, top, *args, **kwargs)
return (w, top)
def destroy_Toplevel1():
global w
w.destroy()
w = None
class Toplevel1:
def __init__(self, top=None):
'''This class configures and populates the toplevel window.
top is the toplevel containing window.'''
_bgcolor = '#d9d9d9' # X11 color: 'gray85'
_fgcolor = '#000000' # X11 color: 'black'
_compcolor = '#d9d9d9' # X11 color: 'gray85'
_ana1color = '#d9d9d9' # X11 color: 'gray85'
_ana2color = '#ececec' # Closest X11 color: 'gray92'
font9 = "-family gothic -size 15 -weight normal -slant roman " \
"-underline 0 -overstrike 0"
top.geometry("352x346+2220+7")
top.title("Blackjack")
top.configure(highlightcolor="black")
self.player_listbox = tk.Listbox(top)
self.player_listbox.place(relx=0.057, rely=0.549, relheight=0.324
, relwidth=0.352)
self.player_listbox.configure(background="white")
self.player_listbox.configure(font="TkFixedFont")
self.player_listbox.configure(selectbackground="#c4c4c4")
self.player_listbox.configure(width=124)
self.player_listbox2 = tk.Listbox(top)
self.Label1 = tk.Label(top)
self.Label1.place(relx=0.028, rely=0.029, height=15, width=109)
self.Label1.configure(activebackground="#f9f9f9")
self.Label1.configure(text='''Dealers Hand''')
self.Label2 = tk.Label(top)
self.Label2.place(relx=0.028, rely=0.491, height=15, width=109)
self.Label2.configure(activebackground="#f9f9f9")
self.Label2.configure(text='''Player Hand''')
self.dealer_listbox = tk.Listbox(top)
self.dealer_listbox.place(relx=0.057, rely=0.087, relheight=0.353
, relwidth=0.352)
self.dealer_listbox.configure(background="white")
self.dealer_listbox.configure(font="TkFixedFont")
self.dealer_listbox.configure(selectbackground="#c4c4c4")
self.dealer_listbox.configure(width=124)
self.info_listbox = tk.Listbox(top)
self.info_listbox.place(relx=0.450, rely=0.087, relheight=0.353
, relwidth=0.500)
self.info_listbox.configure(background="white")
self.info_listbox.configure(font="TkFixedFont")
self.info_listbox.configure(selectbackground="#c4c4c4")
self.info_listbox.configure(width=124)
self.deal_button = tk.Button(top)
self.deal_button.place(relx=0.057, rely=0.896, height=25, width=56)
self.deal_button.configure(activebackground="#f9f9f9")
self.deal_button.configure(command=deal_button_action)
self.deal_button.configure(text='''Deal''')
self.hit_button = tk.Button(top)
self.hit_button.place(relx=0.199, rely=0.896, height=25, width=49)
self.hit_button.configure(activebackground="#f9f9f9")
self.hit_button.configure(command=hit_button_action)
self.hit_button.configure(text='''Hit''')
self.stand_button = tk.Button(top)
self.stand_button.place(relx=0.313, rely=0.896, height=25, width=63)
self.stand_button.configure(activebackground="#f9f9f9")
self.stand_button.configure(command=stand_button_action)
self.stand_button.configure(text='''Stand''')
self.split_button = tk.Button(top)
self.split_button.place(relx=0.483, rely=0.896, height=25, width=63)
self.split_button.configure(activebackground="#f9f9f9")
self.split_button.configure(command=split_button_action)
self.split_button.configure(text='''Split''')
self.Button5 = tk.Button(top)
self.Button5.place(relx=0.653, rely=0.896, height=25, width=105)
self.Button5.configure(activebackground="#f9f9f9")
self.Button5.configure(text='''Double Down''')
self.dealer_hand_total_lbl = tk.Label(top)
self.dealer_hand_total_lbl.place(relx=0.313, rely=0.015, height=22
, width=19)
self.dealer_hand_total_lbl.configure(activebackground="#f9f9f9")
self.dealer_hand_total_lbl.configure(font=font9)
#self.dealer_hand_total_lbl.configure(text='''0''')
self.player_hand_total_lbl = tk.Label(top)
self.player_hand_total_lbl.place(relx=0.313, rely=0.477, height=22
, width=20)
self.player_hand_total_lbl.configure(activebackground="#f9f9f9")
self.player_hand_total_lbl.configure(font=font9)
self.player_hand_total_lbl.configure(text='''0''')
if __name__ == '__main__':
vp_start_gui() | 3.421875 | 3 |
semana-02/lista-exercicio/lista-3/exercicio_1.py | larissajusten/ufsc-object-oriented-programming | 6 | 12766122 | <reponame>larissajusten/ufsc-object-oriented-programming
"""
Exercício 1. Escreva uma função que conta a frequência de ocorrência de cada
palavra em um texto (arquivo txt) e armazena tal quantidade em um dicionário, onde a
chave é a vogal considerada.
Correção: "onde a chave é a PALAVRA considerada"
"""
from collections import Counter
def count_palavras(nome_arquivo: str):
file = open(f'{nome_arquivo}.txt', 'rt')
texto = file.read()
palavras = [palavra for palavra in texto.split(' ')]
dicionario = dict(Counter(palavras))
# dicionario2 = {i: palavras.count(i) for i in list(set(palavras))}
return dicionario
if __name__ == '__main__':
nome_arquivo = input('Digite o nome do arquivo de texto: ')
dicionario = count_palavras(nome_arquivo)
print(dicionario)
novo_dicionario = delete_stopwords(dicionario)
print(novo_dicionario)
| 3.953125 | 4 |
jobboard/forms.py | YarinBou/SJMaster | 1 | 12766123 | from django import forms
from jobboard.models import Job
class FormControl(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for form_field in self.visible_fields():
form_field.field.widget.attrs['class'] = 'form-control'
class CreateNewJobForm(FormControl):
class Meta:
model = Job
fields = ['title', 'job_type', 'major', 'work_from', 'description', 'city', 'address', 'title_keywords']
widgets = {'description': forms.Textarea(attrs={'rows': '5'}), }
| 2.3125 | 2 |
fexm/helpers/exceptions.py | fgsect/fexm | 105 | 12766124 | <filename>fexm/helpers/exceptions.py<gh_stars>100-1000
class CouldNotConfigureException(BaseException):
def __str__(self):
return "Could not configure the repository."
class NotABinaryExecutableException(BaseException):
def __str__(self):
return "The file given is not a binary executable"
class ParametersNotAcceptedException(BaseException):
def __str__(self):
return "The search parameters given were not accepted by the github api"
class NoCoverageInformation(BaseException):
def __init__(self, binary_path):
self.binary_path = binary_path
def __str__(self):
return "Could not get any coverage information for " + str(self.binary_path)
| 2.25 | 2 |
tests/test_1_jwt.py | levkowetz/pyjwkest | 76 | 12766125 | import pytest
from jwkest.jwt import JWT, b2s_conv
__author__ = 'roland'
def _eq(l1, l2):
return set(l1) == set(l2)
def test_pack_jwt():
_jwt = JWT(**{"alg": "none", "cty": "jwt"})
jwt = _jwt.pack(parts=[{"iss": "joe", "exp": 1300819380,
"http://example.com/is_root": True}, ""])
p = jwt.split('.')
assert len(p) == 3
def test_unpack_pack():
_jwt = JWT(**{"alg": "none"})
payload = {"iss": "joe", "exp": 1300819380,
"http://example.com/is_root": True}
jwt = _jwt.pack(parts=[payload, ""])
repacked = JWT().unpack(jwt).pack()
assert jwt == repacked
def test_pack_unpack():
_jwt = JWT(**{"alg": "none"})
payload = {"iss": "joe", "exp": 1300819380,
"http://example.com/is_root": True}
jwt = _jwt.pack(parts=[payload, ""])
_jwt2 = JWT().unpack(jwt)
assert _jwt2
out_payload = _jwt2.payload()
assert _eq(out_payload.keys(), ["iss", "exp", "http://example.com/is_root"])
assert out_payload["iss"] == payload["iss"]
assert out_payload["exp"] == payload["exp"]
assert out_payload["http://example.com/is_root"] == payload[
"http://example.com/is_root"]
def test_pack_with_headers():
_jwt = JWT()
jwt = _jwt.pack(parts=["", ""], headers={"foo": "bar"})
assert JWT().unpack(jwt).headers["foo"] == "bar"
def test_unpack_str():
_jwt = JWT(**{"alg": "none"})
payload = {"iss": "joe", "exp": 1300819380,
"http://example.com/is_root": True}
jwt = _jwt.pack(parts=[payload, ""])
_jwt2 = JWT().unpack(jwt)
assert _jwt2
out_payload = _jwt2.payload()
def test_b2s_conv_raise_exception_on_bad_value():
with pytest.raises(ValueError):
b2s_conv(object())
if __name__ == "__main__":
test_unpack_str()
| 2.515625 | 3 |
example.py | Septaris/declic | 0 | 12766126 | <reponame>Septaris/declic<gh_stars>0
from declic import group, argument, command
# on_before callbacks are executed if:
# - the group itself is called
# - if any of the child of the group is called
def before_bar():
print('before bar')
def before_sub(tata):
print('before sub: %s' % tata)
# define the root command (a group)
@group(description='my description', on_before=before_bar)
@argument('--version', action='version', version='<the version>')
@argument('--foo', type=int, default=1)
def bar():
print('bar')
# define a sub-group
@bar.group(invokable=True, on_before=before_sub)
@argument('--toto', type=int, default=2)
@argument('--tata', type=str, default='aaa')
def sub(toto, tata):
print('toto: %s' % toto)
print('tata: %s' % tata)
# define a sub-command of the sub-group
# chain option allows to execute each parent group (if they are invokable) before the command call
# each on_before functions will be executed anyway
@sub.command(chain=True)
def mop(toto, **kwargs):
print('kwargs: %s' % kwargs)
print('toto: %s' % toto)
# define a sub-command of the root group
@bar.command()
@argument('-x', type=int, default=1)
@argument('y', type=float)
def foo(x, y):
print(x, y)
if __name__ == '__main__':
import sys
bar(sys.argv[1:])
# or bar() | 2.84375 | 3 |
util.py | ahrnbom/utocs | 0 | 12766127 | """
Copyright (C) 2022 <NAME>
This work is released under the MIT License.
See the file LICENSE for details
Utility functions
"""
from math import sqrt
from typing import List
import numpy as np
import carla
import io
def loc_dist(a, b):
return sqrt((a.x - b.x)**2 + (a.y - b.y)**2 + (a.z - b.z)**2)
def vector_normalize(v:carla.Vector3D):
norm = v.x**2 + v.y**2 + v.z**2
new = carla.Vector3D(x=v.x/norm, y=v.y/norm, z=v.z/norm)
return new
def vector_from_to(a:carla.Vector3D, b:carla.Vector3D):
dx = b.x - a.x
dy = b.y - a.y
dz = b.z - a.z
return carla.Vector3D(dx, dy, dz)
def scalar_product(a:carla.Vector3D, b:carla.Vector3D):
return a.x*b.x + a.y*b.y + a.z*b.z
def vector_dist(a, b):
return np.linalg.norm(a-b)
def normalize_numpy_vector(x: np.ndarray):
n = np.linalg.norm(x)
if n > 0.00001:
return x / n
else:
return None
# long_str(2) -> '0002'
# long_str(42, 3) -> '042'
def long_str(i:int, N:int=4, padding='0'):
s = str(i)
n = len(s)
if n < N:
s = padding*(N-n) + s
return s
# Removes 'intro' from left part of 'text', raises error if not found
def good_lstrip(text, intro):
assert(len(intro) <= len(text))
l = len(intro)
first = text[:l]
assert(first == intro)
return text[l:]
def intr(x):
return int(round(float(x)))
# Projective flattening, scales homogeneous coordinates so that last coordinate is always one
def pflat(x):
if len(x.shape) == 1:
x /= x[-1]
else:
x /= x[-1, :]
return x
def print_table(row_names:List[str], col_names:List[str], matrix:np.ndarray,
decimals=2):
matrix = np.around(matrix, decimals=decimals)
row_names = np.array(row_names, dtype=str).reshape((len(row_names), 1))
matrix = np.hstack([row_names, matrix])
col_names = np.array(['', *col_names], dtype=str)
col_names = col_names.reshape((1, len(col_names)))
matrix = np.vstack([col_names, matrix])
max_len = max([len(v) for v in matrix.flatten()])
for i in range(matrix.shape[0]):
for j in range(matrix.shape[1]):
val = matrix[i,j]
matrix[i, j] = long_str(val, max_len, padding=' ')
print(np.array2string(matrix, max_line_width=200))
| 3.265625 | 3 |
tensorflow_probability/python/experimental/mcmc/sequential_monte_carlo_kernel.py | brianwa84/probability | 1 | 12766128 | # Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sequential Monte Carlo."""
from __future__ import print_function
import collections
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.experimental.mcmc import weighted_resampling
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.mcmc import kernel as kernel_base
__all__ = [
'SequentialMonteCarlo',
'SequentialMonteCarloResults',
'WeightedParticles',
'ess_below_threshold',
]
# SequentialMonteCarlo `state` structure.
class WeightedParticles(collections.namedtuple(
'WeightedParticles', ['particles', 'log_weights'])):
"""Particles with corresponding log weights.
This structure serves as the `state` for the `SequentialMonteCarlo` transition
kernel.
Elements:
particles: a (structure of) Tensor(s) each of shape
`concat([[num_particles, b1, ..., bN], event_shape])`, where `event_shape`
may differ across component `Tensor`s.
log_weights: `float` `Tensor` of shape
`[num_particles, b1, ..., bN]` containing a log importance weight for
each particle, typically normalized so that
`exp(reduce_logsumexp(log_weights, axis=0)) == 1.`. These must be used in
conjunction with `particles` to compute expectations under the target
distribution.
In some contexts, particles may be stacked across multiple inference steps,
in which case all `Tensor` shapes will be prefixed by an additional dimension
of size `num_steps`.
"""
# SequentialMonteCarlo `kernel_results` structure.
class SequentialMonteCarloResults(collections.namedtuple(
'SequentialMonteCarloResults',
['steps',
'parent_indices',
'incremental_log_marginal_likelihood',
# Track both incremental and accumulated likelihoods so that users can get
# the accumulated likelihood without needing to trace every step.
'accumulated_log_marginal_likelihood',
'seed',
])):
"""Auxiliary results from a Sequential Monte Carlo step.
This structure serves as the `kernel_results` for the `SequentialMonteCarlo`
transition kernel.
Elements:
steps: scalar int `Tensor` number of inference steps completed so far.
parent_indices: `int` `Tensor` of shape `[num_particles, b1, ..., bN]`,
such that `parent_indices[k]` gives the indice(s) of the particle(s) at
the previous step from which the the `k`th current particle is
immediately descended. See also
`tfp.experimental.mcmc.reconstruct_trajectories`.
incremental_log_marginal_likelihood: float `Tensor` of shape
`[b1, ..., bN]`, giving the natural logarithm of an unbiased estimate of
the ratio in normalizing constants incurred in the most recent step
(typically this is the likelihood of observed data).
Note that (by [Jensen's inequality](
https://en.wikipedia.org/wiki/Jensen%27s_inequality))
this is *smaller* in expectation than the true log ratio.
cumulative_log_marginal_likelihood: float `Tensor` of shape
`[b1, ..., bN]`, giving the natural logarithm of an unbiased estimate of
the ratio in normalizing constants incurred since the initial step
(typically this is the likelihood of observed data).
Note that (by [Jensen's inequality](
https://en.wikipedia.org/wiki/Jensen%27s_inequality))
this is *smaller* in expectation than the true log ratio.
seed: The seed used in one_step.
In some contexts, results may be stacked across multiple inference steps,
in which case all `Tensor` shapes will be prefixed by an additional dimension
of size `num_steps`.
"""
__slots__ = ()
def _dummy_indices_like(indices):
"""Returns dummy indices ([0, 1, 2, ...]) with batch shape like `indices`."""
indices_shape = ps.shape(indices)
num_particles = indices_shape[0]
return tf.broadcast_to(
ps.reshape(
ps.range(num_particles),
ps.pad([num_particles],
paddings=[[0, ps.rank_from_shape(indices_shape) - 1]],
constant_values=1)),
indices_shape)
def ess_below_threshold(weighted_particles, threshold=0.5):
"""Determines if the effective sample size is much less than num_particles."""
with tf.name_scope('ess_below_threshold'):
num_particles = ps.size0(weighted_particles.log_weights)
log_weights = tf.math.log_softmax(weighted_particles.log_weights, axis=0)
log_ess = -tf.math.reduce_logsumexp(2 * log_weights, axis=0)
return log_ess < (ps.log(num_particles) +
ps.log(threshold))
class SequentialMonteCarlo(kernel_base.TransitionKernel):
"""Sequential Monte Carlo transition kernel.
Sequential Monte Carlo maintains a population of weighted particles
representing samples from a sequence of target distributions. It is
*not* a calibrated MCMC kernel: the transitions step through a sequence of
target distributions, rather than trying to maintain a stationary
distribution.
"""
def __init__(self,
propose_and_update_log_weights_fn,
resample_fn=weighted_resampling.resample_systematic,
resample_criterion_fn=ess_below_threshold,
name=None):
"""Initializes a sequential Monte Carlo transition kernel.
Args:
propose_and_update_log_weights_fn: Python `callable` with signature
`new_weighted_particles = propose_and_update_log_weights_fn(step,
weighted_particles, seed=None)`. Its input is a
`tfp.experimental.mcmc.WeightedParticles` structure representing
weighted samples (with normalized weights) from the `step`th
target distribution, and it returns another such structure representing
unnormalized weighted samples from the next (`step + 1`th) target
distribution. This will typically include particles
sampled from a proposal distribution `q(x[step + 1] | x[step])`, and
weights that account for some or all of: the proposal density,
a transition density `p(x[step + 1] | x[step]),
observation weights `p(y[step + 1] | x[step + 1])`, and/or a backwards
or 'L'-kernel `L(x[step] | x[step + 1])`. The (log) normalization
constant of the weights is interpreted as the incremental (log) marginal
likelihood.
resample_fn: Resampling scheme specified as a `callable` with signature
`indices = resample_fn(log_probs, event_size, sample_shape, seed)`,
where `log_probs` is a `Tensor` of the same shape as `state.log_weights`
containing a normalized log-probability for every current
particle, `event_size` is the number of new particle indices to
generate, `sample_shape` is the number of independent index sets to
return, and the return value `indices` is an `int` Tensor of shape
`concat([sample_shape, [event_size, B1, ..., BN])`. Typically one of
`tfp.experimental.mcmc.resample_deterministic_minimum_error`,
`tfp.experimental.mcmc.resample_independent`,
`tfp.experimental.mcmc.resample_stratified`, or
`tfp.experimental.mcmc.resample_systematic`.
Default value: `tfp.experimental.mcmc.resample_systematic`.
resample_criterion_fn: optional Python `callable` with signature
`do_resample = resample_criterion_fn(weighted_particles)`,
passed an instance of `tfp.experimental.mcmc.WeightedParticles`. The
return value `do_resample`
determines whether particles are resampled at the current step. The
default behavior is to resample particles when the effective
sample size falls below half of the total number of particles.
Default value: `tfp.experimental.mcmc.ess_below_threshold`.
name: Python `str` name for ops created by this kernel.
"""
self._propose_and_update_log_weights_fn = propose_and_update_log_weights_fn
self._resample_fn = resample_fn
self._resample_criterion_fn = resample_criterion_fn
self._name = name or 'SequentialMonteCarlo'
@property
def is_calibrated(self):
return False
@property
def name(self):
return self._name
@property
def propose_and_update_log_weights_fn(self):
return self._propose_and_update_log_weights_fn
@property
def resample_criterion_fn(self):
return self._resample_criterion_fn
@property
def resample_fn(self):
return self._resample_fn
def one_step(self, state, kernel_results, seed=None):
"""Takes one Sequential Monte Carlo inference step.
Args:
state: instance of `tfp.experimental.mcmc.WeightedParticles` representing
the current particles with (log) weights. The `log_weights` must be
a float `Tensor` of shape `[num_particles, b1, ..., bN]`. The
`particles` may be any structure of `Tensor`s, each of which
must have shape `concat([log_weights.shape, event_shape])` for some
`event_shape`, which may vary across components.
kernel_results: instance of
`tfp.experimental.mcmc.SequentialMonteCarloResults` representing results
from a previous step.
seed: Optional seed for reproducible sampling.
Returns:
state: instance of `tfp.experimental.mcmc.WeightedParticles` representing
new particles with (log) weights.
kernel_results: instance of
`tfp.experimental.mcmc.SequentialMonteCarloResults`.
"""
with tf.name_scope(self.name):
with tf.name_scope('one_step'):
seed = samplers.sanitize_seed(seed)
proposal_seed, resample_seed = samplers.split_seed(seed)
state = WeightedParticles(*state) # Canonicalize.
num_particles = ps.size0(state.log_weights)
# Propose new particles and update weights for this step, unless it's
# the initial step, in which case, use the user-provided initial
# particles and weights.
proposed_state = self.propose_and_update_log_weights_fn(
# Propose state[t] from state[t - 1].
ps.maximum(0, kernel_results.steps - 1),
state,
seed=proposal_seed)
is_initial_step = ps.equal(kernel_results.steps, 0)
# TODO(davmre): this `where` assumes the state size didn't change.
state = tf.nest.map_structure(
lambda a, b: tf.where(is_initial_step, a, b), state, proposed_state)
normalized_log_weights = tf.nn.log_softmax(state.log_weights, axis=0)
# Every entry of `log_weights` differs from `normalized_log_weights`
# by the same normalizing constant. We extract that constant by
# examining an arbitrary entry.
incremental_log_marginal_likelihood = (state.log_weights[0] -
normalized_log_weights[0])
do_resample = self.resample_criterion_fn(state)
# Some batch elements may require resampling and others not, so
# we first do the resampling for all elements, then select whether to
# use the resampled values for each batch element according to
# `do_resample`. If there were no batching, we might prefer to use
# `tf.cond` to avoid the resampling computation on steps where it's not
# needed---but we're ultimately interested in adaptive resampling
# for statistical (not computational) purposes, so this isn't a
# dealbreaker.
resampled_particles, resample_indices = weighted_resampling.resample(
state.particles,
state.log_weights,
self.resample_fn,
seed=resample_seed)
uniform_weights = tf.fill(
ps.shape(state.log_weights),
value=-tf.math.log(tf.cast(num_particles, state.log_weights.dtype)))
(resampled_particles,
resample_indices,
log_weights) = tf.nest.map_structure(
lambda r, p: ps.where(do_resample, r, p),
(resampled_particles, resample_indices, uniform_weights),
(state.particles, _dummy_indices_like(resample_indices),
normalized_log_weights))
return (WeightedParticles(particles=resampled_particles,
log_weights=log_weights),
SequentialMonteCarloResults(
steps=kernel_results.steps + 1,
parent_indices=resample_indices,
incremental_log_marginal_likelihood=(
incremental_log_marginal_likelihood),
accumulated_log_marginal_likelihood=(
kernel_results.accumulated_log_marginal_likelihood +
incremental_log_marginal_likelihood),
seed=seed))
def bootstrap_results(self, init_state):
with tf.name_scope(self.name):
with tf.name_scope('bootstrap_results'):
init_state = WeightedParticles(*init_state)
batch_zeros = tf.zeros(
ps.shape(init_state.log_weights)[1:],
dtype=init_state.log_weights.dtype)
return SequentialMonteCarloResults(
steps=0,
parent_indices=_dummy_indices_like(init_state.log_weights),
incremental_log_marginal_likelihood=batch_zeros,
accumulated_log_marginal_likelihood=batch_zeros,
seed=samplers.zeros_seed())
| 2.03125 | 2 |
ivf/batch/initial_normal.py | tody411/ImageViewerFramework | 0 | 12766129 | # -*- coding: utf-8 -*-
## @package ivf.batch.initial_normal
#
# ivf.batch.initial_normal utility package.
# @author tody
# @date 2016/02/19
import numpy as np
import cv2
import matplotlib.pyplot as plt
from ivf.batch.batch import DatasetBatch
from ivf.io_util.image import loadNormal, saveNormal
from ivf.core.sfs import amg_constraints
from ivf.core.solver import amg_solver
from ivf.core.sfs.lumo import computeNz
from ivf.cv.normal import normalizeImage
from ivf.np.norm import normalizeVectors
class InitialNormalBatch(DatasetBatch):
def __init__(self, name="InitialNormal", dataset_name="3dmodel"):
super(InitialNormalBatch, self).__init__(name, dataset_name)
def _runImp(self):
normal_data = loadNormal(self._data_file)
if normal_data is None:
return
N0_32F, A_8U = normal_data
h, w = A_8U.shape[:2]
A_c, b_c = amg_constraints.silhouetteConstraints(A_8U, is_flat=True)
A_L = amg_constraints.laplacianMatrix((h, w), num_elements=3)
A = A_c + A_L
b = b_c
N = amg_solver.solve(A, b).reshape(-1, 3)
N = computeNz(N)
N = normalizeVectors(N)
N_32F = N.reshape(h, w, 3)
file_path = self.resultFile(self._data_file_name)
saveNormal(file_path, N_32F, A_8U)
if __name__ == '__main__':
InitialNormalBatch().run() | 2.1875 | 2 |
online_attacks/utils/optimizer/extragradient.py | hugobb/OnlineAttacks | 15 | 12766130 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from torch import optim
import copy
class Extragradient(optim.Optimizer):
def __init__(self, optimizer, params):
super(Extragradient, self).__init__(params, optimizer.defaults)
self.params_copy = []
self.optimizer = optimizer
self.extrapolation_flag = False
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
if self.extrapolation_flag is False:
for group in self.param_groups:
group["params_copy"] = copy.deepcopy(group["params"])
self.optimizer.step()
self.extrapolation_flag = True
else:
for group in self.param_groups:
for p, p_copy in zip(group["params"], group["params_copy"]):
p.data = p_copy.data
self.optimizer.step()
self.extrapolation_flag = False
return loss
| 2.296875 | 2 |
accountsynchr/ucalgroup/group_manager.py | uw-it-aca/eventcal | 0 | 12766131 | # Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
"""
This class provides GWS Group related methods
"""
import logging
from uw_trumba.models import TrumbaCalendar
from accountsynchr.models import (
UwcalGroup, EDITOR, SHOWON, new_editor_group, new_showon_group)
from accountsynchr.dao.gws import Gws
logger = logging.getLogger(__name__)
class GroupManager:
def __init__(self):
self.gws = Gws()
# {campus_code: {group-id: UwcalGroup}}
self.campus_editor_groups = {}
self.campus_showon_groups = {}
for choice in TrumbaCalendar.CAMPUS_CHOICES:
campus_code = choice[0]
result = self.gws.get_campus_groups(campus_code)
campus_editor_groups = result[EDITOR]
self.campus_editor_groups[campus_code] = campus_editor_groups
self.campus_showon_groups[campus_code] = result[SHOWON]
def get_all_editors(self):
return self.gws.all_editors
def get_campus_editor_groups(self, campus_code):
"""
:return: the list of UwcalGroup object in the given campus
"""
return self.campus_editor_groups[campus_code].values()
def get_campus_showon_groups(self, campus_code):
"""
:return: the list of UwcalGroup object in the given campus
"""
return self.campus_showon_groups[campus_code].values()
def get_editor_group(self, trumba_cal):
"""
:return: the UwcalGroup object of the corresponding
editor group for the given TrumbaCalendar object
"""
return self.campus_editor_groups[trumba_cal.campus].get(
trumba_cal.get_group_name(EDITOR))
def get_showon_group(self, trumba_cal):
"""
:return: the UwcalGroup object of the corresponding
showon group for the given TrumbaCalendar object
"""
return self.campus_showon_groups[trumba_cal.campus].get(
trumba_cal.get_group_name(SHOWON))
def has_editor_group(self, trumba_cal):
"""
:param trumba_cal: a TrumbaCalendar object
:return: True if the corresponding editor UwcalGroup exists
"""
return self.get_editor_group(trumba_cal) is not None
def has_showon_group(self, trumba_cal):
"""
:param trumba_cal: a TrumbaCalendar object
:return: True if the corresponding showon UwcalGroup exists
"""
return self.get_showon_group(trumba_cal) is not None
def put_editor_group(self, trumba_cal):
"""
Create or update the editor group for the trumba calendar
:param trumba_cal: a TrumbaCalendar object
:return: the UwcalGroup object created, None is failed
"""
uwcal_group = self.get_editor_group(trumba_cal)
if uwcal_group is not None:
if uwcal_group.same_name(trumba_cal):
return uwcal_group
uwcal_group.set_calendar_name(trumba_cal.name)
else:
uwcal_group = new_editor_group(trumba_cal)
return self._execute_put(uwcal_group)
def put_showon_group(self, trumba_cal):
"""
Create or update the showon group for the trumba calendar
:param trumba_cal: a TrumbaCalendar object
:return: the UwcalGroup object created, None is failed
"""
uwcal_group = self.get_showon_group(trumba_cal)
if uwcal_group is not None:
if uwcal_group.same_name(trumba_cal):
return uwcal_group
uwcal_group.set_calendar_name(trumba_cal.name)
else:
uwcal_group = new_showon_group(trumba_cal)
return self._execute_put(uwcal_group)
def _execute_put(self, uwcal_group):
gwsgroup = self.gws.put_group(uwcal_group)
if (gwsgroup is not None and
gwsgroup.name == uwcal_group.get_group_name()):
# group id match
uwcal_group.group_ref = gwsgroup
return uwcal_group
return None
| 2.28125 | 2 |
features/regnet.py | roatienza/agmax | 2 | 12766132 | <gh_stars>1-10
import torch
import torch.nn as nn
import timm
from . import extractor
class RegNetX002(extractor.BaseModule):
def __init__(self, config, name):
super(RegNetX002, self).__init__()
self.name = name
self.features = timm.create_model('regnetx_002')
self.n_features = 368
def forward(self, x):
return self.features.forward_features(x)
class RegNetY004(extractor.BaseModule):
def __init__(self, config, name):
super(RegNetY004, self).__init__()
self.name = name
self.features = timm.create_model('regnety_004')
self.n_features = 440
def forward(self, x):
return self.features.forward_features(x)
| 2.28125 | 2 |
Common_Questions/TextBookQuestions/PythonCrashCourse/Chapter_8/8_10.py | tegamax/ProjectCode | 0 | 12766133 | '''
8-10. Sending Messages: Start with a copy of your program from Exercise 8-9.
Write a function called send_messages() that prints each text message and moves each message to a new list called sent_messages as it’s printed.
After calling the function, print both of your lists to make sure the messages were moved correctly.
'''
def send_messages(short_message):
sent_messages=[]
while short_message:
current_message = short_message.pop()
sent_messages.append(current_message)
print(f'Original list {short_message}')
print(f'Updated List {sent_messages}')
messages = ['apple','mango','guava']
print(send_messages(messages))
'''
def send_messages(short_list,sent_messages):
while short_list:
curent_message = short_list.pop()
print(f'THe item {curent_message} is now removed from short_list')
sent_messages.append(curent_message)
def printitng_messages(sent_messages):
for sent_message in sent_messages:
print(sent_message)
messages = ['monitor','mouse','laptop','keyboard']
sent_messages = []
send_messages(messages,sent_messages)
printitng_messages(sent_messages)
''' | 4.375 | 4 |
sv_core/core/evt/models.py | ekryukov/sv_core | 0 | 12766134 | <reponame>ekryukov/sv_core
# ************************************************************
# * Events API.
# * Created by <NAME>.(<EMAIL>) at 10.05.2016
# *************************************************************
import logging
from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.translation import ugettext_lazy as _
from sequences import get_next_value
from sv_core.core.com.api import get_dict_choice
from sv_core.core.com.models import BaseLang, Dictionary
from sv_core.core.ost.models import DEFAULT_INST
from sv_core.core.prc.models import get_session_id
from sv_core.core.rul.models import load_params, execute_rule_set
from sv_core.share.common import get_hash
from sv_core.share.models import Base
logger = logging.getLogger(__name__)
event_shared_data = {}
event_object_tab = []
event_rule_tab = []
event_params = {}
EVENT_STATUS_KEY = 'EVST'
EVENT_STATUS_READY = 'EVST0001'
EVENT_STATUS_PROCESSED = 'EVST0002'
EVENT_STATUS_DO_NOT_PROCES = 'EVST0003'
class Event(BaseLang):
"""Events raising in the system for exact institution."""
event_type = models.CharField(_("Event type"), max_length=8, help_text=_("Event type code."), unique=True)
is_cached = models.BooleanField(_("Is cached"), default=False, help_text=_("Cached (delayed) rule set execution."))
status_lov = models.ForeignKey('com.Lov', on_delete=models.SET_NULL, null=True)
class Meta:
db_table = "evt_event"
verbose_name = _("Event")
def save(self, *args, **kwargs):
dict_code = 'EVNT'
self.event_type = self.event_type.upper()
code = self.event_type[4:].upper()
d, created = Dictionary.objects.get_or_create(dict_code=dict_code, code=code)
if created:
d.save()
super().save(*args, **kwargs)
def __str__(self):
return self.event_type
class EventObject(Base):
"""Objects awaiting processing by subscribers."""
event = models.ForeignKey('Event', on_delete=models.CASCADE, help_text=_("Reference to event."))
procedure_name = models.CharField(_("Procedure name"), max_length=200, help_text=_("Subscriber procedure name."))
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE, help_text=_("Business-entity type."))
object_id = models.PositiveIntegerField(_("Reference to the object."))
content_object = GenericForeignKey('content_type', 'object_id')
eff_date = models.DateTimeField(_("Effective date"), help_text=_("Event effective date"))
session = models.ForeignKey('prc.Session', related_name=_("Session"), help_text=_("Session identifier."),
on_delete=models.CASCADE)
split_hash = models.PositiveIntegerField(_("Split hash"), default=-1,
help_text=_("Hash value to split further processing."))
status = models.CharField(_("Status"), help_text=_("Event status"), max_length=8, choices=get_dict_choice('EVST'))
class Meta:
db_table = "evt_event_object"
verbose_name = _("Event object")
class EventType(Base):
"""Event types"""
event_type = models.CharField(_("Event type"), max_length=8, help_text=_("Event type code."),
choices=get_dict_choice('EVNT'))
entity_type = models.CharField(_("Entity type"), max_length=8, help_text=_("Entity type code."),
choices=get_dict_choice("ENTT"))
class Meta:
db_table = "evt_event_type"
verbose_name = _("Event type")
def __str__(self):
return "{}::{}".format(self.event_type, self.entity_type)
@property
def event_type_raw(self):
return self.event_type
@property
def entity_type_raw(self):
return self.entity_type
class Subscriber(models.Model):
"""Processes subscribed on events to process objects linked with event."""
procedure_name = models.CharField(_("Procedure name"), max_length=200, help_text=_("Subscriber procedure name."))
event_type = models.CharField(_("Event type"), max_length=8, help_text=_("Reference to event."))
priority = models.PositiveSmallIntegerField(_("Priority"), default=10, help_text=_(
"Event processing priority when subscriber process few events."))
event = models.ManyToManyField(Event, through='Subscription')
def __str__(self):
return "{}::{}".format(self.event_type, self.procedure_name)
class Meta:
db_table = "evt_subscriber"
verbose_name = _("Subscriber")
class Subscription(models.Model):
event = models.ForeignKey(Event, on_delete=models.CASCADE)
subscriber = models.ForeignKey(Subscriber, on_delete=models.CASCADE)
modifier = models.ForeignKey("rul.Modifier", on_delete=models.CASCADE, null=True, blank=True)
class Meta:
db_table = 'evt_subscription'
verbose_name = _("Subscription")
class EventRuleSet(models.Model):
"""Rule sets executing when events raised"""
event = models.ForeignKey('Event', on_delete=models.CASCADE, help_text=_("Reference to event."))
rule_set = models.ForeignKey('rul.RuleSet', on_delete=models.CASCADE, help_text=_("Reference to rule set."))
modifier = models.ForeignKey('rul.Modifier', on_delete=models.CASCADE, help_text=_(
"Modifier containing filter on objects will processed by current rule set."), null=True)
def get_subscriber_tab(event_type):
"""Get list of subscribers for event type"""
subscriber_tab = []
a = Subscription.objects.filter(event__event_type=event_type)
if a.count() > 0:
subscriber_tab = [{"event_id": x.event_id, "mod_id": x.modifier.id, "proc_name": x.subscriber.procedure_name}
for x in a]
return subscriber_tab
def get_rule_set_tab(event_type):
"""Get list"""
rule_set_tab = []
a = EventRuleSet.objects.filter(event__event_type=event_type)
if a.count() > 0:
rule_set_tab = [(x.modifier_id, x.rule_set_id, x.event.is_cached) for x in a]
return rule_set_tab
def register_event(event_type, eff_date, entity_type, object_id, param_tab, status=None, inst_id=DEFAULT_INST):
"""Register event"""
rec_tab = {}
logger.debug(_("Incoming event {} {} {} {}".format(
event_type,
eff_date,
":".join((entity_type, str(object_id))),
status
)))
split_hash = get_hash(object_id, settings.SPLIT_DEGREE)
l_param_tab = param_tab
l_param_tab["EVENT_TYPE"] = event_type
l_param_tab["EVENT_DATE"] = eff_date
l_param_tab["ENTITY_TYPE"] = entity_type
l_param_tab["OBJECT_ID"] = object_id
l_param_tab["INST_ID"] = inst_id
l_param_tab["SPLIT_HASH"] = split_hash
load_params(entity_type=entity_type, object_id=object_id, param_tab=l_param_tab)
subscriber_tab = get_subscriber_tab(event_type)
cnt = 0
for rec in subscriber_tab:
logger.debug("Asserting modifier {}".format(rec["mod_id"]))
# TODO check condition - check modifier parameters
rec_tab.clear()
rec_tab["event_object_id"] = get_next_value('event_object_id')
rec_tab["event_id"] = rec["event_id"]
rec_tab["procedure_name"] = rec["proc_name"]
rec_tab["entity_tab"] = entity_type
rec_tab["object_id"] = object_id
rec_tab["eff_date"] = eff_date
rec_tab["inst_id"] = inst_id
rec_tab["split_hash"] = split_hash
rec_tab["status"] = status or EVENT_STATUS_READY
rec_tab["session_id"] = get_session_id
event_object_tab.extend(rec_tab)
cnt += 1
if cnt > 1000:
flush_events()
flush_events()
def cancel_events():
event_object_tab.clear()
event_rule_tab.clear()
def flush_events():
logger.debug("Going to flush {} subscriptions".format(len(event_object_tab)))
content_object = None
cnt = 0
for rec in event_object_tab:
e = EventObject()
e.pk = rec["event_object_id"]
e.event_id = rec["event_id"]
e.procedure_name = rec["procedure_name"]
e.eff_date = rec["eff_date"]
e.content_object = content_object
e.session_id = rec["session_id"]
e.split_hash = rec["split_hash"]
e.status = rec["status"]
e.save()
cnt += 1
logger.debug(' {} Subscriptions saved'.format(cnt))
event_object_tab.clear()
for rec in event_rule_tab:
event_params.clear()
event_params['EVENT_TYPE'] = rec["event_type"]
event_params['EVENT_DATE'] = rec["event_date"]
event_params['ENTITY_TYPE'] = rec["entity_type"]
event_params['OBJECT_ID'] = rec["object_id"]
event_params['INST_ID'] = rec["inst_id"]
event_params['SPLIT_HASH'] = rec["split_hash"]
# get object parameters
load_params(entity_type=rec["entity_type"], object_id=rec["object_id"], param_tab=event_params)
cnt = execute_rule_set(rec.rule_set_id, event_params)
logger.debug("Count of runs: %d" % cnt)
event_rule_tab.clear()
| 1.6875 | 2 |
setup.py | gmcbretas/vaex-gql-schema | 0 | 12766135 | import os
import imp
from setuptools import setup, find_packages
dirname = os.path.dirname(__file__)
path_version = os.path.join(dirname, 'vaex_gql_schema/_version.py')
version = imp.load_source('version', path_version)
name = 'vaex-gql-schema'
author = '<NAME>'
author_email= '<EMAIL>'
license = 'MIT'
version = version.__version__
url = 'https://www.github.com/gmcbretas/vaex-graphql'
install_requires_graphql = ['vaex-core>=4.1.0,<5', 'graphene>=3.0b7,<4', 'vaex>=4.1.0,<5', 'pandas>=1.2.4,<2']
setup(
name=name,
version=version,
description='GraphQL support for accessing vaex DataFrame',
url=url,
author=author,
author_email=author_email,
install_requires=install_requires_graphql,
license=license,
packages=find_packages(exclude=['tests*']),
zip_safe=False,
entry_points={
'vaex.dataframe.accessor': ['graphql = vaex_gql_schema:DataFrameAccessorGraphQL'],
},
)
| 1.507813 | 2 |
Blender_python/objectMap.py | CaoYuchen/SSS-dataset | 3 | 12766136 | <filename>Blender_python/objectMap.py
import bpy
import mathutils
import os
#######record groundtruth of object pose#######
dir = os.path.dirname(bpy.data.filepath)
filename = os.path.join(dir, 'objectmap.txt')
file2=open(filename, "w")
# make sure objects with same class label are named with same prefix, e.g. chair1 and chair1.001
# two chair instances with same class label.
objects = [bpy.data.objects["chair1"],
bpy.data.objects["chair1.002"],
bpy.data.objects["chair1.003"],
bpy.data.objects["chair2"],
bpy.data.objects["chair2.001"],
bpy.data.objects["chair3"],
bpy.data.objects["chair3.001"],
bpy.data.objects["table1"],
bpy.data.objects["table1.001"],
bpy.data.objects["table2"],
bpy.data.objects["table3"],
bpy.data.objects["room4"]]
#rectify = mathutils.Matrix([[1,0,0,0],[0,0,1,0],[0,-1,0,0],[0,0,0,1]])
for item in objects:
mat = item.matrix_world
trans = mat.to_translation()
orien = mat.to_quaternion()
name, sep, tail = item.name.partition(".")
# print("Current Frame: %d" % f)
file2.write("%s " %name)
# write translation
for item in trans:
file2.write("%s " %str(round(item,6)))
# write quaternion
for i, item in enumerate(orien):
if i != len(orien)-1:
file2.write("%s " %str(round(item,6)))
else:
file2.write("%s" %str(round(item,6)))
file2.write("\n")
file2.close() | 2.65625 | 3 |
ext/opentelemetry-ext-testutil/src/opentelemetry/ext/testutil/version.py | lzchen/opentelemetry-python | 0 | 12766137 | __version__ = "0.3dev0"
| 1.046875 | 1 |
ci/ci/asgi.py | muchu1983/CollectiveIntelligence | 0 | 12766138 | # -*- coding: utf-8 -*-
"""
Copyright © 2017, <NAME>
Contributed by <NAME> (<EMAIL>)
This file is part of BSD license
<https://opensource.org/licenses/BSD-3-Clause>
"""
import os
from channels.asgi import get_channel_layer
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ci.settings")
channel_layer = get_channel_layer() | 1.390625 | 1 |
api/handy_tools_apple_devices.py | jsugg/locate-apple-devices | 1 | 12766139 | <reponame>jsugg/locate-apple-devices
from flask import Flask, request
from flask_restful import Resource, Api
from json import dumps
from subprocess import call
import urllib
import subprocess
from pyicloud import PyiCloudService
import os
import cStringIO
import sys
user = os.environ['APPLE_DEVICES_ACCOUNT_USER']
password = os.environ['APPLE_DEVICES_ACCOUNT_PASSWORD']
my_env = os.environ
my_env['PYTHONIOENCODING'] = 'utf-8'
def touch(fname, times=None):
with open(fname, 'a'):
os.utime(fname, times)
apple_devices_handy_tools = Flask(__name__)
api = Api(apple_devices_handy_tools)
class PlaySound(Resource):
def post(self):
if not 'device_id' in request.form:
return "Missing device id.", 400
device_id = request.form['device_id']
try:
icloud_api = PyiCloudService(user, password)
icloud_api.devices[device_id].play_sound()
return "Beeping " + device_id, 200
except:
return 'Could not send the beep.', 400
class SendMessage(Resource):
def post(self):
subject = request.form['title'] if 'title' in request.form else 'Subject'
message = request.form['message'] if 'message' in request.form else 'Message'
beep = request.form['beep'] if 'beep' in request.form else False
device_id = request.form['device_id'] if 'device_id' in request.form else False
if not device_id:
return 'Missing device id.', 400
try:
icloud_api = PyiCloudService(user, password)
icloud_api.devices[device_id].display_message(subject, message, beep)
return 'Message sent to device id: ' + device_id, 200
except:
return 'Could not send the message.', 400
api.add_resource(PlaySound, '/beeps/')
api.add_resource(SendMessage, '/messages/')
if __name__ == '__main__':
apple_devices_handy_tools.run(debug=False, host='0.0.0.0', port=3010)
| 2.484375 | 2 |
pandas_module/pandas_test3.py | kenwaldek/python | 1 | 12766140 | <filename>pandas_module/pandas_test3.py
#! /usr/bin/env python3
# -*- coding:utf-8 -*-
###############################################################
# © kenwaldek MIT-license
#
# Title: pandas 3 Version: 1.0
# Date: 01-01-2017 Language: python3
# Description: concatination and appending
#
###############################################################
import pandas as pd
df1 = pd.DataFrame({'HPI':[80,85,88,85],
'Int_rate':[2, 3, 2, 2],
'US_GDP_Thousands':[50, 55, 65, 55]},
index = [2001, 2002, 2003, 2004])
df2 = pd.DataFrame({'HPI':[80,85,88,85],
'Int_rate':[2, 3, 2, 2],
'US_GDP_Thousands':[50, 55, 65, 55]},
index = [2005, 2006, 2007, 2008])
df3 = pd.DataFrame({'HPI':[80,85,88,85],
'Int_rate':[2, 3, 2, 2],
'Low_tier_HPI':[50, 52, 50, 53]},
index = [2001, 2002, 2003, 2004])
# concat = pd.concat([df1,df2])
# print(concat)
concat = pd.concat([df1, df2, df3])
print(concat)
df4 = df1.append(df2)
print(df4)
| 3.84375 | 4 |
config/dev_config.py | LongmaoTeamTf/audio_aligner_app | 5 | 12766141 | <reponame>LongmaoTeamTf/audio_aligner_app
import os
# APP STATIC config
APP_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
APP_STATIC_DIR = os.path.join(APP_PATH, "static")
APP_LOG_PATH = os.path.join(APP_PATH, 'logs', 'app.log')
APP_STATIC_URL_PATH = '/public_resource'
# LOG config
LOG_ENABLED = True # 是否开启日志
LOG_TO_CONSOLE = True # 是否输出到控制台
LOG_TO_FILE = True # 是否输出到文件
LOG_TO_ES = False # 是否输出到es
LOG_LEVEL = 'DEBUG' # 日志级别
LOG_FORMAT = '%(levelname)s - %(asctime)s - process: %(process)d - %(filename)s - %(name)s - %(lineno)d - %(module)s - %(message)s' # 每条日志输出格式
# ES config
# ELASTIC_SEARCH_HOST = 'localhost' # es集群的名字 elasticsearch host
# ELASTIC_SEARCH_PORT = 9200 # es运行的端口
# ELASTIC_SEARCH_INDEX = 'runtime' # es索引的名字
# TOKEN config
TOKEN_SECRET_KEY = os.urandom(24)
TOKEN_EXPIRES_IN = 86400
# DB config
MONGO_HOST = '127.0.0.1'
MONGO_PORT = 27017
# volume config
APP_UPLOAD_PATH = os.path.join(APP_STATIC_DIR, 'upload')
APP_VOLUME_PATH = os.path.join(APP_STATIC_DIR, 'volume')
APP_STATIC_URL_UPLOAD_PATH = os.path.join(APP_STATIC_URL_PATH, 'upload')
APP_STATIC_URL_VOLUME_PATH = os.path.join(APP_STATIC_URL_PATH, 'volume')
# kaldi path
KALDI_BASE = '/media/xddz/xddz/code/kaldi-trunk'
ENGLIST_KALDI_PATH = KALDI_BASE + '/egs/librispeech/s5'
CHINESE_KALDI_PATH = KALDI_BASE + '/egs/aidatatang_200zh/s5'
| 1.648438 | 2 |
script_train_error.py | xiaoyanLi629/single_cell_data_analysis | 0 | 12766142 | <gh_stars>0
# Dependencies:
# pip: scikit-learn, anndata, scanpy
#
# Python starter kit for the NeurIPS 2021 Single-Cell Competition.
# Parts with `TODO` are supposed to be changed by you.
#
# More documentation:
#
# https://viash.io/docs/creating_components/python/
# ./scripts/1_unit_test.sh
# ./scripts/2_generate_submission.sh
# ./scripts/3_evaluate_submission.sh
import logging
import anndata as ad
import gc
import sys
from scipy.sparse import csc_matrix
from sklearn.model_selection import train_test_split
from sklearn.decomposition import TruncatedSVD
from sklearn.linear_model import LinearRegression
import random
import numpy as np
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import TensorDataset, DataLoader
import matplotlib.pyplot as plt
import pickle
logging.basicConfig(level=logging.INFO)
start = time.perf_counter()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
random.seed(0)
np.random.seed(0)
torch.manual_seed(0)
## VIASH START
# Anything within this block will be removed by `viash` and will be
# replaced with the parameters as specified in your config.vsh.yaml.
# par = {
# 'input_train_mod1': 'sample_data/openproblems_bmmc_multiome_starter/openproblems_bmmc_multiome_starter.train_mod1.h5ad',
# 'input_train_mod2': 'sample_data/openproblems_bmmc_multiome_starter/openproblems_bmmc_multiome_starter.train_mod2.h5ad',
# 'input_test_mod1': 'sample_data/openproblems_bmmc_multiome_starter/openproblems_bmmc_multiome_starter.test_mod1.h5ad',
# 'input_test_mod2': 'sample_data/openproblems_bmmc_multiome_starter/openproblems_bmmc_multiome_starter.test_mod2.h5ad',
# 'distance_method': 'minkowski',
# 'output': 'output.h5ad',
# 'n_pcs': 50,
# }
par_RNA_DNA = {
# RNA, 16394 * 13431
'input_train_mod1': 'output/datasets/predict_modality/openproblems_bmmc_multiome_phase1_rna/openproblems_bmmc_multiome_phase1_rna.censor_dataset.output_train_mod1.h5ad',
# DNA, 16394 * 10000
'input_train_mod2': 'output/datasets/predict_modality/openproblems_bmmc_multiome_phase1_rna/openproblems_bmmc_multiome_phase1_rna.censor_dataset.output_train_mod2.h5ad',
# RNA, 1000 * 13431
'input_test_mod1': 'output/datasets/predict_modality/openproblems_bmmc_multiome_phase1_rna/openproblems_bmmc_multiome_phase1_rna.censor_dataset.output_test_mod1.h5ad',
# DNA, 1000 * 10000
'input_test_mod2': 'output/datasets/predict_modality/openproblems_bmmc_multiome_phase1_rna/openproblems_bmmc_multiome_phase1_rna.censor_dataset.output_test_mod2.h5ad',
'distance_method': 'minkowski',
'output': 'output.h5ad',
'n_pcs': 50,
}
par_DNA_RNA = {
# DNA, 16394 * 116490
'input_train_mod1': 'output/datasets/predict_modality/openproblems_bmmc_multiome_phase1_mod2/openproblems_bmmc_multiome_phase1_mod2.censor_dataset.output_train_mod1.h5ad',
# RNA, 16394 * 13431
'input_train_mod2': 'output/datasets/predict_modality/openproblems_bmmc_multiome_phase1_mod2/openproblems_bmmc_multiome_phase1_mod2.censor_dataset.output_train_mod2.h5ad',
# DNA, 1000 * 116490
'input_test_mod1': 'output/datasets/predict_modality/openproblems_bmmc_multiome_phase1_mod2/openproblems_bmmc_multiome_phase1_mod2.censor_dataset.output_test_mod1.h5ad',
# RNA, 1000 * 13431
'input_test_mod2': 'output/datasets/predict_modality/openproblems_bmmc_multiome_phase1_mod2/openproblems_bmmc_multiome_phase1_mod2.censor_dataset.output_test_mod2.h5ad',
'distance_method': 'minkowski',
'output': 'output.h5ad',
'n_pcs': 50,
}
par_RNA_Pro = {
# RNA, 29077 * 13953
'input_train_mod1': 'output/datasets/predict_modality/openproblems_bmmc_cite_phase1_rna/openproblems_bmmc_cite_phase1_rna.censor_dataset.output_train_mod1.h5ad',
# Protein, 29077 * 134
'input_train_mod2': 'output/datasets/predict_modality/openproblems_bmmc_cite_phase1_rna/openproblems_bmmc_cite_phase1_rna.censor_dataset.output_train_mod2.h5ad',
# RNA, 1000 * 13953
'input_test_mod1': 'output/datasets/predict_modality/openproblems_bmmc_cite_phase1_rna/openproblems_bmmc_cite_phase1_rna.censor_dataset.output_test_mod1.h5ad',
# Protein, 1000 * 134
'input_test_mod2': 'output/datasets/predict_modality/openproblems_bmmc_cite_phase1_rna/openproblems_bmmc_cite_phase1_rna.censor_dataset.output_test_mod2.h5ad',
'distance_method': 'minkowski',
'output': 'output.h5ad',
'n_pcs': 50,
}
par_Pro_RNA = {
# Protein, 29077 * 134
'input_train_mod1': 'output/datasets/predict_modality/openproblems_bmmc_cite_phase1_mod2/openproblems_bmmc_cite_phase1_mod2.censor_dataset.output_train_mod1.h5ad',
# RNA, 29077 * 13953
'input_train_mod2': 'output/datasets/predict_modality/openproblems_bmmc_cite_phase1_mod2/openproblems_bmmc_cite_phase1_mod2.censor_dataset.output_train_mod2.h5ad',
# Protein. 1000 * 134
'input_test_mod1': 'output/datasets/predict_modality/openproblems_bmmc_cite_phase1_mod2/openproblems_bmmc_cite_phase1_mod2.censor_dataset.output_test_mod1.h5ad',
# RNA, 1000 * 13953
'input_test_mod2': 'output/datasets/predict_modality/openproblems_bmmc_cite_phase1_mod2/openproblems_bmmc_cite_phase1_mod2.censor_dataset.output_test_mod2.h5ad',
'distance_method': 'minkowski',
'output': 'output.h5ad',
'n_pcs': 50,
}
par_list = [par_RNA_DNA, par_DNA_RNA, par_RNA_Pro, par_Pro_RNA]
index = int(sys.argv[1])
# print('index:', type(int(sys.argv[1])))
par = par_list[index]
## VIASH END
# TODO: change this to the name of your method
# par = par_list[int(sys.argv[1])]
method_id = "WRENCH;)"
# logging.info('Reading `h5ad` files...')
# logging.info(par['input_train_mod1'])
# logging.info(par['input_train_mod2'])
# logging.info(par['input_test_mod1'])
input_train_mod1 = ad.read_h5ad(par['input_train_mod1'])
input_train_mod2 = ad.read_h5ad(par['input_train_mod2'])
input_test_mod1 = ad.read_h5ad(par['input_test_mod1'])
s2d4 = input_train_mod1[input_train_mod1.obs["batch"] == "s2d4", :]
input_train_mod1 = s2d4
s2d1 = input_train_mod2[input_train_mod2.obs["batch"] == "s2d4", :]
input_train_mod2 = s2d1
# s3d6 = RNA_data[RNA_data.obs["batch"] == "s3d6", :]
# s2d1 = RNA_data[RNA_data.obs["batch"] == "s2d1", :]
# s1d1 = RNA_data[RNA_data.obs["batch"] == "s1d1", :]
mod1 = input_train_mod1.var['feature_types'][0]
mod2 = input_train_mod2.var['feature_types'][0]
model_path = mod1 +'_to_' + mod2 + '.pth'
# print('model path:', model_path)
# input_train = ad.concat(
# {"train": input_train_mod1, "test": input_test_mod1},
# axis=0,
# join="outer",
# label="group",
# fill_value=0,
# index_unique="-"
# )
#######################################################################
X_train, X_val, Y_train, Y_val = train_test_split(input_train_mod1, input_train_mod2, test_size=0.2, random_state=42)
train_inputs = torch.from_numpy(np.array(X_train.X.toarray()))
train_targets = torch.from_numpy(np.array(Y_train.X.toarray()))
val_inputs = torch.from_numpy(np.array(X_val.X.toarray()))
val_targets = torch.from_numpy(np.array(Y_val.X.toarray()))
test_inputs = torch.from_numpy(np.array(input_test_mod1.X.toarray()))
train_inputs = train_inputs.float()
train_targets = train_targets.float()
val_inputs = val_inputs.float()
val_targets = val_targets.float()
test_inputs = test_inputs.float()
# print(train_inputs.shape, train_targets.shape)
if train_inputs.shape[1] == 600:
model_path = 'sample_' + model_path
print('model path:', model_path)
test_targets = None
#########
input_test_mod2 = ad.read_h5ad(par['input_test_mod2'])
test_targets = torch.from_numpy(np.array(input_test_mod2.X.toarray()))
test_targets = test_targets.float()
#########
train_inputs = train_inputs.to(device)
train_targets = train_targets.to(device)
val_inputs = val_inputs.to(device)
val_targets = val_targets.to(device)
test_inputs = test_inputs.to(device)
test_targets = test_targets.to(device)
##############################
# num_epochs = int(sys.argv[1])
# learning_rate = float(sys.argv[2])
num_epochs = 500
learning_rate = 0.01
latent_dim = 50
loss_fn = F.mse_loss
batch_size = 1024*8
# model_path = 'auto_encoder_model.pth'
print('Epochs:', num_epochs, 'Learning rate:', learning_rate)
print('latent dim:', latent_dim)
print('batch size:', batch_size)
# Define data loader
train_ds = TensorDataset(train_inputs, train_targets)
train_dl = DataLoader(train_ds, batch_size, shuffle=True)
input_feature = train_inputs.shape[1]
output_feature = train_targets.shape[1]
class Autoencoder_model(nn.Module):
def __init__(self):
super(Autoencoder_model, self).__init__()
# encoding
self.encoder = nn.Sequential(
nn.Linear(input_feature, input_feature//16),
nn.Dropout(0.25),
nn.ReLU(),
nn.Linear(input_feature//16, input_feature//16),
nn.ReLU(),
nn.Linear(input_feature//16, latent_dim),
nn.ReLU(),
)
# decoding
self.decoder = nn.Sequential(
nn.Linear(latent_dim, input_feature//16),
nn.Dropout(0.25),
nn.ReLU(),
nn.Linear(input_feature//16, input_feature//16),
nn.ReLU(),
nn.Linear(input_feature//16, output_feature),
# nn.ReLU()
)
def forward(self, x):
encoding = self.encoder(x)
output = self.decoder(encoding)
# print('output type:', output.dtype)
return output.float()
def fit(num_epochs, model, loss_fn):
val_min_loss = float('inf')
counter = 0
# print('Model training...')
for epoch in range(num_epochs):
for x,y in train_dl:
# Generate predictions
model = model.train()
pred = model(x)
loss = loss_fn(pred, y)
loss = loss.float()
loss.backward()
opt.step()
opt.zero_grad()
if epoch % 100 == 0:
loss = loss.cpu().detach().numpy()
model = model.eval()
val_pred = model(val_inputs)
val_loss = loss_fn(val_pred, val_targets)
val_loss = torch.sqrt(val_loss)
val_loss = val_loss.cpu().detach().numpy()
train_pred = model(train_inputs)
train_loss = loss_fn(train_pred, train_targets)
train_loss = torch.sqrt(train_loss)
train_loss = train_loss.cpu().detach().numpy()
test_pred = model(test_inputs)
test_loss = loss_fn(test_pred, test_targets)
test_loss = torch.sqrt(test_loss)
test_loss = test_loss.cpu().detach().numpy()
print('Epoch ', epoch, 'Train_loss: ', train_loss, ' Validation_loss: ', val_loss, ' Test_loss: ', test_loss)
# train_pred = train_pred.cpu().detach().numpy()
# val_pred = val_pred.cpu().detach().numpy()
# test_pred = test_pred.cpu().detach().numpy()
return train_pred, val_pred, test_pred
model = Autoencoder_model()
model = model.to(device)
opt = torch.optim.SGD(model.parameters(), lr=learning_rate)
print('Training the model')
(train_pred, val_pred, test_pred) = fit(num_epochs, model, loss_fn)
print('train pred:')
# print(train_pred)
plt.matshow((train_pred-train_targets).cpu().detach().numpy())
train_error = (train_pred-train_targets).cpu().detach().numpy()
print('val pred:')
# print(val_pred)
plt.matshow((val_pred-val_targets).cpu().detach().numpy())
val_error = (val_pred-val_targets).cpu().detach().numpy()
print('test pred:')
# print(test_pred)
plt.matshow((test_pred-test_targets).cpu().detach().numpy())
test_error = (test_pred-test_targets).cpu().detach().numpy()
plt.show()
res = [train_error, val_error, test_error]
with open('error.pickle', 'wb') as handle:
pickle.dump(res, handle)
train_error_square = np.square(train_error)
val_error_square = np.square(val_error)
test_error_square = np.square(test_error)
error_square = [train_error_square, val_error_square, test_error_square]
with open('error_square.pickle', 'wb') as handle:
pickle.dump(error_square, handle)
train_error_square_gene = train_error_square.sum(axis = 0)
val_error_square_gene = val_error_square.sum(axis = 0)
test_error_square_gene = test_error_square.sum(axis = 0)
error_square_gene = [train_error_square_gene, val_error_square_gene, test_error_square_gene]
with open('error_square_gene.pickle', 'wb') as handle:
pickle.dump(error_square_gene, handle)
# with open('error.pickle', 'rb') as handle:
# error = pickle.load(handle)
# torch.save(model.state_dict(), model_path)
# # model = Autoencoder_model()
# model.load_state_dict(torch.load(model_path))
# model.eval()
# Y_pred = model(test_inputs)
# ###############
# # ## this is for self testing
# # input_test_mod2 = ad.read_h5ad(par['input_test_mod2'])
# # test_targets = torch.from_numpy(np.array(input_test_mod2.X.toarray()))
# # test_targets = test_targets.float()
# # test_loss = loss_fn(Y_pred, test_targets)
# # print('Testing loss:', test_loss.cpu().detach().numpy())
# ###############
# Y_pred = Y_pred.cpu().detach().numpy()
# Y_pred = csc_matrix(Y_pred)
# adata = ad.AnnData(
# X=Y_pred,
# obs=input_test_mod1.obs,
# var=input_train_mod2.var,
# uns={
# 'dataset_id': input_train_mod1.uns['dataset_id'],
# 'method_id': method_id,
# },
# )
# logging.info('Storing annotated data...')
# adata.write_h5ad(par['output'], compression = "gzip")
# # adata.write_h5ad(par[model_path], compression = "gzip")
# end = time.perf_counter()
# print(f'Training finished in {end-start} seconds')
#######################################################################
# # TODO: implement own method
# # Do PCA on the input data
# print('Build the model')
# logging.info('Performing dimensionality reduction on modality 1 values...')
# embedder_mod1 = TruncatedSVD(n_components=50)
# mod1_pca = embedder_mod1.fit_transform(input_train.X)
# logging.info('Performing dimensionality reduction on modality 2 values...')
# embedder_mod2 = TruncatedSVD(n_components=50)
# mod2_pca = embedder_mod2.fit_transform(input_train_mod2.X)
# # split dimred back up
# X_train = mod1_pca[input_train.obs['group'] == 'train']
# X_test = mod1_pca[input_train.obs['group'] == 'test']
# y_train = mod2_pca
# # assert len(X_train) + len(X_test) == len(mod1_pca)
# # Get all responses of the training data set to fit the
# # KNN regressor later on.
# #
# # Make sure to use `toarray()` because the output might
# # be sparse and `KNeighborsRegressor` cannot handle it.
# logging.info('Running Linear regression...')
# reg = LinearRegression()
# # Train the model on the PCA reduced modality 1 and 2 data
# reg.fit(X_train, y_train)
# print('Prediction...')
# y_pred = reg.predict(X_test)
# # Project the predictions back to the modality 2 feature space
# y_pred = y_pred @ embedder_mod2.components_
# # Store as sparse matrix to be efficient. Note that this might require
# # different classifiers/embedders before-hand. Not every class is able
# # to support such data structures.
# y_pred = csc_matrix(y_pred)
# print(len(input_train_mod1))
# print(input_train_mod1.uns['dataset_id'])
# adata = ad.AnnData(
# X=y_pred,
# obs=input_test_mod1.obs,
# var=input_train_mod2.var,
# uns={
# 'dataset_id': input_train_mod1.uns['dataset_id'],
# 'method_id': method_id,
# },
# )
# logging.info('Storing annotated data...')
# adata.write_h5ad(par['output'], compression = "gzip")
| 2.140625 | 2 |
test_main.py | GrivIN/intern-api | 0 | 12766143 | from fastapi.testclient import TestClient
from main import *
client = TestClient(app)
def test_index():
response = client.get("/")
assert response.status_code == 200
assert response.json() == {"msg": "Hello World"}
def test_health():
response = client.get("/health")
assert response.status_code == 200
assert response.json() == {"status": "ok"}
def test_jokes():
response = client.get("/jokes")
assert response.status_code == 200
assert response.json() == data
def test_random_jokes():
response = client.get("/jokes/random")
contains = response.json() in data["jokes"]
assert contains is True
def test_same_random_jokes():
response1 = client.get("/jokes")
response2 = client.get("/jokes")
assert response1 != response2
| 3 | 3 |
sdk/python/pulumi_aws/route53/health_check.py | dixler/pulumi-aws | 0 | 12766144 | <filename>sdk/python/pulumi_aws/route53/health_check.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class HealthCheck(pulumi.CustomResource):
child_health_threshold: pulumi.Output[float]
"""
The minimum number of child health checks that must be healthy for Route 53 to consider the parent health check to be healthy. Valid values are integers between 0 and 256, inclusive
"""
child_healthchecks: pulumi.Output[list]
"""
For a specified parent health check, a list of HealthCheckId values for the associated child health checks.
"""
cloudwatch_alarm_name: pulumi.Output[str]
"""
The name of the CloudWatch alarm.
"""
cloudwatch_alarm_region: pulumi.Output[str]
"""
The CloudWatchRegion that the CloudWatch alarm was created in.
"""
enable_sni: pulumi.Output[bool]
"""
A boolean value that indicates whether Route53 should send the `fqdn` to the endpoint when performing the health check. This defaults to AWS' defaults: when the `type` is "HTTPS" `enable_sni` defaults to `true`, when `type` is anything else `enable_sni` defaults to `false`.
"""
failure_threshold: pulumi.Output[float]
"""
The number of consecutive health checks that an endpoint must pass or fail.
"""
fqdn: pulumi.Output[str]
"""
The fully qualified domain name of the endpoint to be checked.
"""
insufficient_data_health_status: pulumi.Output[str]
"""
The status of the health check when CloudWatch has insufficient data about the state of associated alarm. Valid values are `Healthy` , `Unhealthy` and `LastKnownStatus`.
"""
invert_healthcheck: pulumi.Output[bool]
"""
A boolean value that indicates whether the status of health check should be inverted. For example, if a health check is healthy but Inverted is True , then Route 53 considers the health check to be unhealthy.
"""
ip_address: pulumi.Output[str]
"""
The IP address of the endpoint to be checked.
"""
measure_latency: pulumi.Output[bool]
"""
A Boolean value that indicates whether you want Route 53 to measure the latency between health checkers in multiple AWS regions and your endpoint and to display CloudWatch latency graphs in the Route 53 console.
"""
port: pulumi.Output[float]
"""
The port of the endpoint to be checked.
"""
reference_name: pulumi.Output[str]
"""
This is a reference name used in Caller Reference
(helpful for identifying single health_check set amongst others)
"""
regions: pulumi.Output[list]
"""
A list of AWS regions that you want Amazon Route 53 health checkers to check the specified endpoint from.
"""
request_interval: pulumi.Output[float]
"""
The number of seconds between the time that Amazon Route 53 gets a response from your endpoint and the time that it sends the next health-check request.
"""
resource_path: pulumi.Output[str]
"""
The path that you want Amazon Route 53 to request when performing health checks.
"""
search_string: pulumi.Output[str]
"""
String searched in the first 5120 bytes of the response body for check to be considered healthy. Only valid with `HTTP_STR_MATCH` and `HTTPS_STR_MATCH`.
"""
tags: pulumi.Output[dict]
"""
A mapping of tags to assign to the health check.
"""
type: pulumi.Output[str]
"""
The protocol to use when performing health checks. Valid values are `HTTP`, `HTTPS`, `HTTP_STR_MATCH`, `HTTPS_STR_MATCH`, `TCP`, `CALCULATED` and `CLOUDWATCH_METRIC`.
"""
def __init__(__self__, resource_name, opts=None, child_health_threshold=None, child_healthchecks=None, cloudwatch_alarm_name=None, cloudwatch_alarm_region=None, enable_sni=None, failure_threshold=None, fqdn=None, insufficient_data_health_status=None, invert_healthcheck=None, ip_address=None, measure_latency=None, port=None, reference_name=None, regions=None, request_interval=None, resource_path=None, search_string=None, tags=None, type=None, __props__=None, __name__=None, __opts__=None):
"""
Provides a Route53 health check.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[float] child_health_threshold: The minimum number of child health checks that must be healthy for Route 53 to consider the parent health check to be healthy. Valid values are integers between 0 and 256, inclusive
:param pulumi.Input[list] child_healthchecks: For a specified parent health check, a list of HealthCheckId values for the associated child health checks.
:param pulumi.Input[str] cloudwatch_alarm_name: The name of the CloudWatch alarm.
:param pulumi.Input[str] cloudwatch_alarm_region: The CloudWatchRegion that the CloudWatch alarm was created in.
:param pulumi.Input[bool] enable_sni: A boolean value that indicates whether Route53 should send the `fqdn` to the endpoint when performing the health check. This defaults to AWS' defaults: when the `type` is "HTTPS" `enable_sni` defaults to `true`, when `type` is anything else `enable_sni` defaults to `false`.
:param pulumi.Input[float] failure_threshold: The number of consecutive health checks that an endpoint must pass or fail.
:param pulumi.Input[str] fqdn: The fully qualified domain name of the endpoint to be checked.
:param pulumi.Input[str] insufficient_data_health_status: The status of the health check when CloudWatch has insufficient data about the state of associated alarm. Valid values are `Healthy` , `Unhealthy` and `LastKnownStatus`.
:param pulumi.Input[bool] invert_healthcheck: A boolean value that indicates whether the status of health check should be inverted. For example, if a health check is healthy but Inverted is True , then Route 53 considers the health check to be unhealthy.
:param pulumi.Input[str] ip_address: The IP address of the endpoint to be checked.
:param pulumi.Input[bool] measure_latency: A Boolean value that indicates whether you want Route 53 to measure the latency between health checkers in multiple AWS regions and your endpoint and to display CloudWatch latency graphs in the Route 53 console.
:param pulumi.Input[float] port: The port of the endpoint to be checked.
:param pulumi.Input[str] reference_name: This is a reference name used in Caller Reference
(helpful for identifying single health_check set amongst others)
:param pulumi.Input[list] regions: A list of AWS regions that you want Amazon Route 53 health checkers to check the specified endpoint from.
:param pulumi.Input[float] request_interval: The number of seconds between the time that Amazon Route 53 gets a response from your endpoint and the time that it sends the next health-check request.
:param pulumi.Input[str] resource_path: The path that you want Amazon Route 53 to request when performing health checks.
:param pulumi.Input[str] search_string: String searched in the first 5120 bytes of the response body for check to be considered healthy. Only valid with `HTTP_STR_MATCH` and `HTTPS_STR_MATCH`.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the health check.
:param pulumi.Input[str] type: The protocol to use when performing health checks. Valid values are `HTTP`, `HTTPS`, `HTTP_STR_MATCH`, `HTTPS_STR_MATCH`, `TCP`, `CALCULATED` and `CLOUDWATCH_METRIC`.
> This content is derived from https://github.com/terraform-providers/terraform-provider-aws/blob/master/website/docs/r/route53_health_check.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['child_health_threshold'] = child_health_threshold
__props__['child_healthchecks'] = child_healthchecks
__props__['cloudwatch_alarm_name'] = cloudwatch_alarm_name
__props__['cloudwatch_alarm_region'] = cloudwatch_alarm_region
__props__['enable_sni'] = enable_sni
__props__['failure_threshold'] = failure_threshold
__props__['fqdn'] = fqdn
__props__['insufficient_data_health_status'] = insufficient_data_health_status
__props__['invert_healthcheck'] = invert_healthcheck
__props__['ip_address'] = ip_address
__props__['measure_latency'] = measure_latency
__props__['port'] = port
__props__['reference_name'] = reference_name
__props__['regions'] = regions
__props__['request_interval'] = request_interval
__props__['resource_path'] = resource_path
__props__['search_string'] = search_string
__props__['tags'] = tags
if type is None:
raise TypeError("Missing required property 'type'")
__props__['type'] = type
super(HealthCheck, __self__).__init__(
'aws:route53/healthCheck:HealthCheck',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, child_health_threshold=None, child_healthchecks=None, cloudwatch_alarm_name=None, cloudwatch_alarm_region=None, enable_sni=None, failure_threshold=None, fqdn=None, insufficient_data_health_status=None, invert_healthcheck=None, ip_address=None, measure_latency=None, port=None, reference_name=None, regions=None, request_interval=None, resource_path=None, search_string=None, tags=None, type=None):
"""
Get an existing HealthCheck resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[float] child_health_threshold: The minimum number of child health checks that must be healthy for Route 53 to consider the parent health check to be healthy. Valid values are integers between 0 and 256, inclusive
:param pulumi.Input[list] child_healthchecks: For a specified parent health check, a list of HealthCheckId values for the associated child health checks.
:param pulumi.Input[str] cloudwatch_alarm_name: The name of the CloudWatch alarm.
:param pulumi.Input[str] cloudwatch_alarm_region: The CloudWatchRegion that the CloudWatch alarm was created in.
:param pulumi.Input[bool] enable_sni: A boolean value that indicates whether Route53 should send the `fqdn` to the endpoint when performing the health check. This defaults to AWS' defaults: when the `type` is "HTTPS" `enable_sni` defaults to `true`, when `type` is anything else `enable_sni` defaults to `false`.
:param pulumi.Input[float] failure_threshold: The number of consecutive health checks that an endpoint must pass or fail.
:param pulumi.Input[str] fqdn: The fully qualified domain name of the endpoint to be checked.
:param pulumi.Input[str] insufficient_data_health_status: The status of the health check when CloudWatch has insufficient data about the state of associated alarm. Valid values are `Healthy` , `Unhealthy` and `LastKnownStatus`.
:param pulumi.Input[bool] invert_healthcheck: A boolean value that indicates whether the status of health check should be inverted. For example, if a health check is healthy but Inverted is True , then Route 53 considers the health check to be unhealthy.
:param pulumi.Input[str] ip_address: The IP address of the endpoint to be checked.
:param pulumi.Input[bool] measure_latency: A Boolean value that indicates whether you want Route 53 to measure the latency between health checkers in multiple AWS regions and your endpoint and to display CloudWatch latency graphs in the Route 53 console.
:param pulumi.Input[float] port: The port of the endpoint to be checked.
:param pulumi.Input[str] reference_name: This is a reference name used in Caller Reference
(helpful for identifying single health_check set amongst others)
:param pulumi.Input[list] regions: A list of AWS regions that you want Amazon Route 53 health checkers to check the specified endpoint from.
:param pulumi.Input[float] request_interval: The number of seconds between the time that Amazon Route 53 gets a response from your endpoint and the time that it sends the next health-check request.
:param pulumi.Input[str] resource_path: The path that you want Amazon Route 53 to request when performing health checks.
:param pulumi.Input[str] search_string: String searched in the first 5120 bytes of the response body for check to be considered healthy. Only valid with `HTTP_STR_MATCH` and `HTTPS_STR_MATCH`.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the health check.
:param pulumi.Input[str] type: The protocol to use when performing health checks. Valid values are `HTTP`, `HTTPS`, `HTTP_STR_MATCH`, `HTTPS_STR_MATCH`, `TCP`, `CALCULATED` and `CLOUDWATCH_METRIC`.
> This content is derived from https://github.com/terraform-providers/terraform-provider-aws/blob/master/website/docs/r/route53_health_check.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["child_health_threshold"] = child_health_threshold
__props__["child_healthchecks"] = child_healthchecks
__props__["cloudwatch_alarm_name"] = cloudwatch_alarm_name
__props__["cloudwatch_alarm_region"] = cloudwatch_alarm_region
__props__["enable_sni"] = enable_sni
__props__["failure_threshold"] = failure_threshold
__props__["fqdn"] = fqdn
__props__["insufficient_data_health_status"] = insufficient_data_health_status
__props__["invert_healthcheck"] = invert_healthcheck
__props__["ip_address"] = ip_address
__props__["measure_latency"] = measure_latency
__props__["port"] = port
__props__["reference_name"] = reference_name
__props__["regions"] = regions
__props__["request_interval"] = request_interval
__props__["resource_path"] = resource_path
__props__["search_string"] = search_string
__props__["tags"] = tags
__props__["type"] = type
return HealthCheck(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 2.25 | 2 |
aram_predictor/get_matches.py | ericmarkmartin/aram_predictor | 0 | 12766145 | <filename>aram_predictor/get_matches.py
from match_crawler import cass_setup, crawl_matches
from settings import RIOT_API_KEY, SEED_SUMMONER
from itertools import islice
import cassiopeia as cass
import pandas as pd
DF_COLUMNS = ["match_id"] + [
"{}_{}".format(side, i) for side in ["blue", "red"] for i in range(1, 6)
]
def champions_from_match(match):
return [
participant.champion
for team in match.teams
for participant in team.participants
]
def row_from_match(match):
return [match.id] + [champion.id for champion in champions_from_match(match)]
def df_from_matches(matches):
return pd.DataFrame(
(row_from_match(match) for match in matches), columns=DF_COLUMNS
)
if __name__ == "__main__":
cass_setup(RIOT_API_KEY, "NA")
seed_summoner = cass.get_summoner(name=SEED_SUMMONER)
poro_kings = islice(
(
match
for match in crawl_matches(seed_summoner, 10)
if match.mode == cass.GameMode.poro_king
),
20,
)
df = df_from_matches(poro_kings)
df.to_csv(path_or_buf="matches.csv")
| 2.71875 | 3 |
cvpl-homepage/homepage/site.py | robinsax/canvas-plugin-multirepo | 0 | 12766146 | # coding: utf-8
'''
Pages.
'''
import re
import canvas as cv
from canvas.plugins import users
@cv.alter_root_page_view
def alter_root_page_view(PageView):
class CustomPageView(PageView):
def setup(self):
self.assets = ('site.js', 'site.css', *self.assets, 'decor.js')
if self.title is None:
self.title = 'canvas | modern web apps'
else:
title = self.title.lower()
if re.match(r'[0-9]{3}\s', title):
title = title[3:]
self.title = ' | '.join((title, 'canvas'))
return CustomPageView
@cv.page('/', title=None, assets=('home.js', 'home.css'))
class Homepage: pass
@cv.page('/login', title='log in', assets=('login.js',))
class LoginPage: pass
@cv.page('/new-plugin', title='register a plugin', assets=('plugins.js',))
class PluginRegisterPage:
@users.require_user
def on_get(self, context):
return super().on_get(context)
@cv.page('/dashboard', title='my dashboard', assets=('dash.js',))
class DashboardPage:
@users.require_user
def on_get(self, context):
return super().on_get(context)
@cv.page('/plugins', title='plugins', assets=('plugins.js', 'plugins.css'))
class PluginPage: pass
| 2.21875 | 2 |
source/control_plane/python/lambda/ttl_checker/ttl_checker.py | aneoconsulting/aws-htc-grid | 1 | 12766147 | # Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# Licensed under the Apache License, Version 2.0 https://aws.amazon.com/apache-2-0/
import boto3
import time
import os
from botocore.exceptions import ClientError
from boto3.dynamodb.conditions import Key, Attr
from utils.performance_tracker import EventsCounter, performance_tracker_initializer
from utils.state_table_common import *
from utils import grid_error_logger as errlog
from api.queue_manager import queue_manager
region = os.environ["REGION"]
perf_tracker = performance_tracker_initializer(
os.environ["METRICS_ARE_ENABLED"],
os.environ["METRICS_TTL_CHECKER_LAMBDA_CONNECTION_STRING"],
os.environ["METRICS_GRAFANA_PRIVATE_IP"])
# dynamodb = boto3.resource('dynamodb')
# table = dynamodb.Table(os.environ['TASKS_STATUS_TABLE_NAME'])
from api.state_table_manager import state_table_manager
state_table = state_table_manager(
os.environ['TASKS_STATUS_TABLE_SERVICE'],
os.environ['TASKS_STATUS_TABLE_CONFIG'],
os.environ['TASKS_STATUS_TABLE_NAME'],
os.environ['DYNAMODB_ENDPOINT_URL'])
# sqs_res = boto3.resource('sqs', region_name=region, endpoint_url=os.environ['SQS_PORT'])
# sqs_cli = boto3.client('sqs', endpoint_url=os.environ['SQS_PORT'])
# queue = sqs_res.get_queue_by_name(QueueName=os.environ['TASKS_QUEUE_NAME'])
# dlq = sqs_res.get_queue_by_name(QueueName=os.environ['TASKS_QUEUE_DLQ_NAME'])
queue = queue_manager(
grid_queue_service=os.environ['GRID_QUEUE_SERVICE'],
grid_queue_config=os.environ['GRID_QUEUE_CONFIG'],
endpoint_url=os.environ["SQS_ENDPOINT_URL"],
queue_name=os.environ['TASKS_QUEUE_NAME'],
region=region)
dlq = queue_manager(
grid_queue_service="SQS", # TODO extend parameters to configure this queue.
grid_queue_config=os.environ['GRID_QUEUE_CONFIG'],
endpoint_url=os.environ["SQS_ENDPOINT_URL"],
queue_name=os.environ['TASKS_QUEUE_DLQ_NAME'],
region=region)
MAX_RETRIES = 5
RETRIEVE_EXPIRED_TASKS_LIMIT = 200
# TODO: implement archival after 10 days in S3
def lambda_handler(event, context):
"""Handler called by AWS Lambda runtime
Args:
event(dict): a CloudWatch Event generated every minute
context:
Returns:
"""
stats_obj = {'01_invocation_tstmp': {"label": "None", "tstmp": int(round(time.time() * 1000))}}
event_counter = EventsCounter(
["counter_expired_tasks", "counter_failed_to_acquire",
"counter_failed_tasks", "counter_released_tasks", "counter_inconsistent_state", "counter_tasks_queue_size"])
for expired_tasks in state_table.query_expired_tasks():
event_counter.increment("counter_expired_tasks", len(expired_tasks))
event_counter.increment("counter_tasks_queue_size", queue.get_queue_length())
for item in expired_tasks:
print("Processing expired task: {}".format(item))
task_id = item.get('task_id')
owner_id = item.get('task_owner')
current_heartbeat_timestamp = item.get('heartbeat_expiration_timestamp')
try:
is_acquired = state_table.acquire_task_for_ttl_lambda(
task_id, owner_id, current_heartbeat_timestamp)
if not is_acquired:
# task has been updated at the very last second...
event_counter.increment("counter_failed_to_acquire")
continue
# retreive current number of retries and SQS_handler
retries, sqs_handler_id, task_priority = retreive_retries_and_sqs_handler_and_priority(task_id)
print("Number of retires for task[{}]: {} Priority: {}".format(task_id, retries, task_priority))
print("Last owner for task [{}]: {}".format(task_id, owner_id))
# TODO: MAX_RETRIES should be extracted from task definition... Store in DDB?
if retries == MAX_RETRIES:
print("Failing task {} after {} retries".format(task_id, retries))
event_counter.increment("counter_failed_tasks")
fail_task(task_id, sqs_handler_id, task_priority)
continue
event_counter.increment("counter_released_tasks")
# else
state_table.retry_task(task_id, retries + 1)
try:
# Task can be acquired by an agent from this point
reset_sqs_vto(sqs_handler_id, task_priority)
print("SUCCESS FIX for {}".format(task_id))
except ClientError:
try:
errlog.log('Failed to reset VTO trying to delete: {} '.format(task_id))
delete_message_from_queue(sqs_handler_id)
except ClientError:
errlog.log('Inconsistent task: {} sending do DLQ'.format(task_id))
event_counter.increment("counter_inconsistent_state")
set_task_inconsistent(task_id)
send_to_dlq(item)
except ClientError as e:
errlog.log('Lambda ttl error: {}'.format(e.response['Error']['Message']))
print("Cannot process task {} : {}".format(task_id, e))
print("Sending task {} to DLQ...".format(task_id))
send_to_dlq(item)
except Exception as e:
print("Cannot process task {} : {}".format(task_id, e))
print("Sending task {} to DLQ...".format(task_id))
errlog.log('Lambda ttl error: {}'.format(e))
send_to_dlq(item)
stats_obj['02_completion_tstmp'] = {"label": "ttl_execution_time", "tstmp": int(round(time.time() * 1000))}
perf_tracker.add_metric_sample(
stats_obj,
event_counter=event_counter,
from_event="01_invocation_tstmp",
to_event="02_completion_tstmp"
)
perf_tracker.submit_measurements()
def fail_task(task_id, sqs_handler_id, task_priority):
"""This function set the task_status of task to fail
Args:
task_id(str): the id of the task to update
sqs_handler_id(str): the sqs handler associated to this task
task_priority(int): the priority of the task.
Returns:
Nothing
Raises:
ClientError: if DynamoDB table cannot be updated
"""
try:
delete_message_from_queue(sqs_handler_id, task_priority)
state_table.update_task_status_to_failed(task_id)
except ClientError as e:
errlog.log("Cannot fail task {} : {}".format(task_id, e))
raise e
def set_task_inconsistent(task_id):
"""This function set the task_status of task to inconsistent
Args:
task_id(str): the id of the task to update
Returns:
Nothing
Raises:
ClientError: if DynamoDB table cannot be updated
"""
try:
state_table.update_task_status_to_inconsistent(task_id)
except ClientError as e:
errlog.log("Cannot set task to inconsystent {} : {}".format(task_id, e))
raise e
def delete_message_from_queue(sqs_handler_id, task_priority):
"""This function delete a message from a SQS queue
Args:
sqs_handler_id(str): the sqs handler associated of the message to be deleted
task_priority(int): priority of the task
Returns:
Nothing
Raises:
ClientError: if SQS queue cannot be updated
"""
try:
queue.delete_message(sqs_handler_id, task_priority)
except ClientError as e:
errlog.log("Cannot delete message {} : {}".format(sqs_handler_id, e))
raise e
def retreive_retries_and_sqs_handler_and_priority(task_id):
"""This function retrieve (i) the number of retries,
(ii) the SQS handler associated to an expired task
and (iii) and the priority under which this task was executed.
Args:
task_id(str): the id of the expired task
Returns:
rtype: 3 variables
Raises:
ClientError: if DynamoDB query failed
"""
try:
resp_task = state_table.get_task_by_id(task_id)
# CHeck if 1 and only 1
return resp_task.get('retries'),\
resp_task.get('sqs_handler_id'),\
resp_task.get('task_priority')
except ClientError as e:
errlog.log("Cannot retreive retries and handler for task {} : {}".format(task_id, e))
raise e
def reset_sqs_vto(handler_id, task_priority):
"""
Args:
handler_id:
Returns:
"""
try:
visibility_timeout_sec = 0
queue.change_visibility(handler_id, visibility_timeout_sec, task_priority)
except ClientError as e:
errlog.log("Cannot reset VTO for message {} : {}".format(handler_id, e))
raise e
def send_to_dlq(task):
"""
Args:
task:
Returns:
"""
print("Sending task [{}] to DLQ".format(task))
dlq.send_message(message_bodies=[str(task)])
| 1.617188 | 2 |
python/day20.py | kdrag0n/aoc2020 | 5 | 12766148 | <reponame>kdrag0n/aoc2020<gh_stars>1-10
#!/usr/bin/env python3
import sys
from collections import namedtuple
def ints(itr):
return [int(i) for i in itr]
with open(sys.argv[1], "r") as f:
lines = [l for l in f.read().split("\n\n") if l]
ilist = []
imap = {}
total = 0
result = 0
other = 0
#
# 123
# 456
# 789
#
# 741
# 852
# 963
#
# 0,0 -> 2,0
# 1,0 -> 2,1
# 2,0 ->
BorderCandidates = namedtuple('BorderCandidates', ['top', 'bottom', 'left', 'right'])
class Tile():
def __init__(self, id, view):
self.id = id
self.view = view
def __hash__(self):
return hash((self.id, tuple(tuple(row) for row in self.view)))
def hflip(self):
nv = [list(reversed(row)) for row in self.view]
return Tile(self.id, nv)
def vflip(self):
return Tile(self.id, list(reversed(self.view)))
def rot(self):
nv = []
for i in range(len(self.view)):
nv.append([" "] * len(self.view[0]))
for y, row in enumerate(self.view):
for x, c in enumerate(row):
nv[x][len(self.view) - 1 - y] = c
return Tile(self.id, nv)
def topb(self):
return tuple(self.view[0])
def bottomb(self):
return tuple(self.view[-1])
def leftb(self):
return tuple([x[0] for x in self.view])
def rightb(self):
return tuple([x[-1] for x in self.view])
def __str__(self):
return "\n".join("".join(map(str, x)) for x in self.view)
def __repr__(self):
return str(self)
t = Tile(1, [[1, 2, 3], [4, 5, 6], [7, 8, 9]])
print(t)
print()
t = t.rot()
print(t)
tiles = []
for l in lines:
a = [x for x in l.split("\n") if x]
tid = int(a[0].replace(":", "").replace("Tile ", ""))
view = [list(x) for x in a[1:]]
tile = Tile(tid, view)
tiles += [tile]
rots = [tile] #, tile.rot(), tile.rot().rot(), tile.rot().rot().rot()]
for rott in rots:
tiles += [rott] #, rott.hflip(), rott.vflip()]
tiles = set(tiles)
tile_cands = {}
for mtile in tiles:
print()
print()
print(mtile)
top_adj = []
left_adj = []
right_adj = []
bottom_adj = []
for cand in tiles:
if mtile.topb() == cand.bottomb():
top_adj += [cand]
if mtile.bottomb() == cand.topb():
bottom_adj += [cand]
if mtile.leftb() == cand.rightb():
left_adj += [cand]
if mtile.rightb() == cand.leftb():
right_adj += [cand]
print([top_adj, left_adj, right_adj, bottom_adj])
print()
tile_cands[mtile] = BorderCandidates(top_adj, bottom_adj, left_adj, right_adj)
# corners
print('\n\n\n\n\n\n')
for mtile, bcands in tile_cands.items():
if len(bcands.right) == 1 and len(bcands.bottom) == 1 and len(bcands.top) == 0 and len(bcands.left) == 0:
print(f'top left\n{mtile}', '\n\n\n')
print(f"Total: {total}")
print(f"Result: {result}")
print(f"Other: {other}")
| 3.0625 | 3 |
systori/lib/middleware.py | systori/systori | 12 | 12766149 | import time
from django.utils.deprecation import MiddlewareMixin
class StatsMiddleware(MiddlewareMixin):
def process_request(selfs, request):
request.start_time = time.time()
def process_response(self, request, response):
total = time.time() - request.start_time
print(f"cycle took {total}")
return response
| 2.25 | 2 |
src/pages/gd_solution_page.py | kryvokhyzha/gradient-descent | 1 | 12766150 | <filename>src/pages/gd_solution_page.py
import streamlit as st
import pandas as pd
import numpy as np
import os
import time
from collections import namedtuple
from sklearn.datasets import make_regression, make_classification
import matplotlib.pyplot as plt
from utils.constants import *
from db import db_insert
from plot import plot_regression_all, plot_classification_all
def show_side_bar():
st.sidebar.header('Algorithm modification')
modification = st.sidebar.selectbox('', key='modification_slbox',
options=list(MODIFICATIONS.keys()))
st.sidebar.header('Hypothesis function')
hypothesis = st.sidebar.selectbox('', key='hypothesis_slbox',
options=list(HYPOTHESES.keys()))
if hypothesis != 'Linear':
st.sidebar.header('Polynomial degree')
degree = int(st.sidebar.number_input('', key='degree', min_value=1, max_value=4, value=2, step=1))
else:
degree = 1
st.sidebar.header('Cost function')
cost_function = st.sidebar.selectbox('', key='costf_slbox',
options=list(COST_FUNCTIONS.keys()))
st.sidebar.header('Regularization')
regularization = st.sidebar.selectbox('', key='regularization_slbox',
options=list(REGULARIZATION.keys()))
st.sidebar.header('Scaling function')
scaler = st.sidebar.selectbox('', key='scale_slbox', options=list(SCALE.keys()))
if regularization != 'None':
st.sidebar.header('Regularization coeff')
reg_coef = float(st.sidebar.number_input('', key='reg_coef', min_value=0.0, value=1.0, step=0.1))
else:
reg_coef = 0.0
st.sidebar.header('Learning rate')
alpha = st.sidebar.slider('', 0.001, 0.1, step=0.001, format='%f', key='learning_rate')
st.sidebar.header('Early stopping')
eps = st.sidebar.slider('', 0.0, 0.1, step=0.001, format='%f', key='early_stopping')
st.sidebar.header('Max number of itteration')
max_num_itter = int(st.sidebar.number_input('', key='max_num_itter', min_value=1, max_value=10000, value=100, step=1))
Properties = namedtuple('Properties', ['modification', 'hypothesis', 'degree', 'cost_function',
'scaler', 'regularization', 'reg_coef', 'alpha', 'eps', 'max_num_itter'])
Choice = namedtuple('Choice', ['modification', 'hypothesis', 'cost_function',
'scaler', 'regularization'])
return Properties(modification=MODIFICATIONS[modification], hypothesis=HYPOTHESES[hypothesis], degree=degree,
cost_function=COST_FUNCTIONS[cost_function], scaler=SCALE[scaler],
regularization=REGULARIZATION[regularization], reg_coef=reg_coef,
eps=eps, alpha=alpha, max_num_itter=max_num_itter), Choice(modification=modification,
hypothesis=hypothesis, cost_function=cost_function, scaler=scaler, regularization = regularization)
def select_task_type():
st.header('Please, select type of task:')
task_type = st.selectbox('', key='type_slbox', options=['Individual', 'Generate regression task', 'Generate classification task'])
return task_type
def params_for_generate_regression():
st.header('Please, select parameters for dataset generation')
n_samples = int(st.number_input('The number of samples', key='n_samples_r', min_value=1, max_value=1000, value=100, step=1))
n_features = int(st.number_input('The number of features', key='n_features_r', min_value=1, max_value=10, value=1, step=1))
n_informative = int(st.number_input('The number of informative features', key='n_informative_r', min_value=1, max_value=n_features, value=1, step=1))
degree = int(st.number_input('The number of degree', key='degree_r', min_value=1, max_value=4, value=1, step=1))
noise = float(st.number_input('The standard deviation of the gaussian noise applied to the output',
key='noise_r', min_value=0.0, value=10.0, step=0.1))
return {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_informative,
'noise': noise
}, degree
def params_for_generate_classification():
st.header('Please, select parameters for dataset generation')
n_samples = int(st.number_input('The number of samples', key='n_samples_c', min_value=1, max_value=1000, value=100, step=1))
n_features = int(st.number_input('The number of features', key='n_features_c', min_value=1, max_value=10, value=1, step=1))
n_informative = int(st.number_input('The number of informative features', key='n_informative_c', min_value=1, max_value=n_features, value=1, step=1))
n_redundant_title = 'The number of redundant features. These features are generated as random linear combinations of the informative features'
n_redundant = int(st.number_input(n_redundant_title, key='n_redundant_c', min_value=0, max_value=int(n_features - n_informative), value=0, step=1))
n_clusters_per_class = int(st.number_input('The number of clusters per class.', key='n_clusters_per_class_c', min_value=1, max_value=n_informative, value=1, step=1))
return {
'n_samples': n_samples,
'n_features': n_features,
'n_redundant': n_redundant,
'n_informative': n_informative,
'n_clusters_per_class': n_clusters_per_class
}
def generate_regression_task(h_type, degree, scaler, data_degree, **kwargs):
X, y = make_regression(**kwargs)
y = y.reshape((len(y), 1))
if scaler is not None:
X = scaler.fit_transform(X)
y = scaler.fit_transform(y)
y = y ** data_degree
return h_type(X, y, degree=degree)
def generate_clasiffication_task(h_type, degree, scaler, **kwargs):
X, y = make_classification(**kwargs)
if scaler is not None:
X = scaler.fit_transform(X)
y = y.reshape((len(y), 1))
return h_type(X, y, degree=degree)
def individual_task(h_type, degree, scaler):
df = pd.read_csv('data/restaurant_revenue.txt', header=None, sep=',')
if scaler is not None:
X = scaler.fit_transform(df[[0]].values)
y = scaler.fit_transform(df[[1]].values)
else:
X = df[[0]].values
y = df[[1]].values
return h_type(X, y, degree=degree)
def solve_btn(h, properties, choice, task_type):
if st.button('Solve', key='solve_btn'):
st.text('Started weights:')
st.write(pd.DataFrame(h.weight, columns=['w']))
start_time = time.time()
with st.spinner('waiting...'):
loss_history, weights_history, y_pred_history = properties.modification(h, properties.max_num_itter, properties.cost_function,
regularization=properties.regularization, C=properties.reg_coef,
alpha=properties.alpha, eps=properties.eps)
st.success('Calculations have already finished! Look at the results:')
if np.isnan(h.weight).any():
st.error("Result approximates to infinity. Please, select another parameters.")
else:
st.text('Finished weights:')
st.write(pd.DataFrame(h.weight, columns=['w']))
db_insert(h, properties, time.time() - start_time, choice)
if task_type == 'classification':
plot_classification_all(h, properties, weights_history, loss_history, y_pred_history)
elif task_type == 'regression':
plot_regression_all(h, properties, weights_history, loss_history, y_pred_history)
def gd_solution_page():
st.title('Gradient Descent')
properties, choice = show_side_bar()
task_type = select_task_type()
if choice.cost_function == 'BCE' and choice.hypothesis != 'Sigmoid' and task_type == 'Generate classification task':
st.warning('Please, for BCE cost function use Sigmoid hypothesis!')
return
elif choice.cost_function != 'BCE' and choice.hypothesis == 'Sigmoid' and task_type == 'Generate classification task':
st.warning('Please, for Sigmoid hypothesis use BCE cost function!')
return
elif task_type == 'Generate classification task' and choice.cost_function != 'BCE':
st.warning('Please, select another cost function for correct working!')
return
elif task_type != 'Generate classification task' and choice.cost_function == 'BCE':
st.warning('Please, select another cost function for correct working!')
return
if task_type == 'Individual':
task_type = 'regression'
h = individual_task(properties.hypothesis, properties.degree, properties.scaler)
elif task_type == 'Generate regression task':
task_type = 'regression'
kwargs, degree = params_for_generate_regression()
h = generate_regression_task(properties.hypothesis, properties.degree, properties.scaler, degree, **kwargs)
elif task_type == 'Generate classification task':
task_type = 'classification'
kwargs = params_for_generate_classification()
h = generate_clasiffication_task(properties.hypothesis, properties.degree, properties.scaler, **kwargs)
solve_btn(h, properties, choice, task_type)
| 2.734375 | 3 |