max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
fatiando/seismic/tests/test_seismic_conv.py
|
XuesongDing/fatiando
| 179
|
12774951
|
from __future__ import absolute_import, division
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_allclose
from pytest import raises
from fatiando.seismic import conv
def test_impulse_response():
"""
conv.convolutional_model raises the source wavelet as result when the model
is a centred spike, considering the dimension of the model equal to the
source wavelet
"""
w = conv.rickerwave(30., 2.e-3)
rc_test = np.zeros((w.shape[0], 20))
rc_test[w.shape[0]//2, :] = 1.
spike = conv.convolutional_model(rc_test, 30., conv.rickerwave, dt=2.e-3)
for j in range(0, rc_test.shape[1]):
assert_array_almost_equal(spike[:, j], w, 9)
def test_rc_shorter_than_wavelet():
"""
When the reflectivity series is shorter than the wavelength, the spike
response is observed like in the opposite case. The difference is that the
the ricker wavelet (or other symmetric wavelet) is shorter in the result.
"""
w = conv.rickerwave(30., 2.e-3)
rc_test = np.zeros((21, 20))
rc_test[rc_test.shape[0]//2, :] = 1
spike = conv.convolutional_model(rc_test, 30., conv.rickerwave, dt=2.e-3)
for j in range(0, rc_test.shape[1]):
wmin = (w.shape[0] - rc_test.shape[0])//2
wmax = -(w.shape[0] - rc_test.shape[0])//2
assert_array_almost_equal(spike[:, j], w[wmin:wmax], 9)
def test_reflectivity_wrong_dimensions():
"""
Velocity and density are provided as matrix or vector to reflectivity
calculation, so they must have the same dimension.
"""
vel = np.ones((10, 10))
dens = np.ones((11, 11))
raises(AssertionError, conv.reflectivity, vel, dens)
vel = np.ones((10))
dens = np.ones((11))
raises(AssertionError, conv.reflectivity, vel, dens)
def test_depth_2_time_wrong_dimensions():
"""
Velocity and property are provided as matrix to depth to time cconversion,
so they must have the same dimension.
"""
vel = np.ones((10, 10))
dens = np.ones((11, 11))
dt = 2.e-3
dz = 1.
raises(AssertionError, conv.depth_2_time, vel, dens, dt, dz)
def test_ricker():
"""
conv.rickerwave inputs must satisfy the condition for sampling and
stability, otherwise this implies in a error.
"""
f = 50.
dt = 2.e-3
raises(AssertionError, conv.rickerwave, f, dt)
| 2.4375
| 2
|
private_files/views.py
|
vilamatica/django-private-files
| 4
|
12774952
|
<filename>private_files/views.py<gh_stars>1-10
try:
from urllib.parse import unquote
except ImportError:
from urllib import unquote
from django.conf import settings
from django.http import Http404
from django.core.exceptions import PermissionDenied
from django.apps import apps
from django.shortcuts import get_object_or_404
from django.core.cache import cache
from private_files.signals import pre_download
try:
from django.urls.utils import get_callable
except ImportError:
from django.core.urlresolvers import get_callable
def get_file(request, app_label, model_name, field_name, object_id, filename):
handler = get_callable(getattr(settings, 'PRIVATE_DOWNLOAD_HANDLER', 'private_files.handlers.basic'))
model = apps.get_model(app_label, model_name)
instance = get_object_or_404(model, pk=unquote(object_id))
condition = getattr(instance, field_name).condition
single_use = getattr(instance, field_name).single_use
if single_use:
value = cache.get(request.GET.get('access-key', 'no-access-key'), None)
cache.delete(request.GET.get('access-key', 'no-access-key'))
if value != '%s-%s-%s-%s-%s' % (app_label, model_name, field_name, object_id, filename):
raise PermissionDenied()
if not model:
raise Http404("")
if not hasattr(instance, field_name):
raise Http404("")
if condition(request, instance):
pre_download.send(sender=model, instance=instance, field_name=field_name, request=request)
return handler(request, instance, field_name)
else:
raise PermissionDenied()
| 2.15625
| 2
|
machine-learning-gists/7e6c7875761f293ba12d882f1cf723e48e0b0350/snippet.py
|
qwbjtu2015/dockerizeme
| 0
|
12774953
|
#!/usr/bin/env python
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# @Author: <NAME>
# @Lab of Machine Learning and Data Mining, TianJin University
# @Email: <EMAIL>
# @Date: 2018-10-26 15:32:34
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
from __future__ import print_function
from __future__ import absolute_import
import argparse
import os
import subprocess
class NvidiaSuper:
NVIDIA_COMMAND = 'nvidia-smi'
def __init__(self):
self.source = None
self.gpu_process = []
self._get_source()
self._get_process_pool()
def _get_source(self):
try:
res = subprocess.Popen(
self.NVIDIA_COMMAND,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
# oncomment below if you use py2
# encoding='utf-8'
)
self.source = res.stdout.readlines()
except:
raise EnvironmentError('No GPU driver.')
def _get_process_line(self):
for idx, line in enumerate(self.source):
if 'Processes' in line:
return idx
def _get_process_pool(self):
idx_line = self._get_process_line() + 3
for line in self.source[idx_line:]:
if line.startswith('+-'):
break
if 'No running processes found' in line:
return []
info_lst = line.strip().split()
idx_gpu = info_lst[1]
pid = info_lst[2]
s = self.ps_info(pid)
s.append('\n')
info = []
info.append(idx_gpu)
# user
info.append(s[0])
# pid
info.append(s[1])
# stat
info.append(s[7])
# start
info.append(s[8])
# time
info.append(s[9])
command = ' '.join(s[10:])
info.append(command)
self.gpu_process.append('\t'.join(info))
return self.gpu_process
@staticmethod
def ps_info(pid):
res = subprocess.Popen(
'ps -u -p ' + pid,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
# oncomment below if you use py2
# encoding='utf-8'
)
return res.stdout.readlines()[1].split()
def print_to(self):
print(''.join(self.source))
title = ['GPU', 'USER', 'PID', 'STAT', 'START', 'TIME', 'COMMAND']
print('\t'.join(title))
print(''.join(self.gpu_process))
if __name__ == '__main__':
mnitor = NvidiaSuper()
mnitor.print_to()
| 2.578125
| 3
|
main_game.py
|
Matistjati/Simple-console-game
| 0
|
12774954
|
<reponame>Matistjati/Simple-console-game<filename>main_game.py
# todo Better drop system
# Return values:
# 0: Failed for something like an error in internal structure
# 1: Success (even if nothing changed, it may be considered success)
# 2: "Failed" due to some sort of intended reason
import random
import time
import os
import sys
import ctypes
import logging
import json
import subprocess
import winreg
import platform
from enum import Enum
# Importing modules not in standard library
try:
import colorama
import win32gui
import pynput
except ModuleNotFoundError as missing_module:
print("Missing module error: {}".format(missing_module))
# Returns the closest matching integer/float to the passed value in an array of integers/floats
def closest_match(number, array):
if isinstance(array, dict):
return min(list(array.keys()), key=lambda x: abs(x - number))
else:
return min(array, key=lambda x: abs(x - number))
# Returns a bool representing if the entered variable is an int or not
def isint(variable_to_test):
try:
variable_to_test = int(variable_to_test)
variable_to_test += 1
return True
except ValueError:
return False
# Starts a subprocess playing a the passed wav file
# We only need to pass the name since it looks in the game's music folder
# A subprocess is used so that it runs in the background and stops if the game is exited
def play_wav(file_name):
# Ensuring that we don't play try to play a nonexistant file
if file_name not in GameMaster.missing_audio:
project_path = os.path.dirname(sys.argv[0])
subprocess.Popen(["python", "{}\\Scripts\\play_wav.py".format(project_path),
"{}\\Audio\\{}".format(project_path, file_name)], shell=False)
else:
return
class ColoredString(str):
"""
A custom data type intended to contain strings with ANSI codes
The main purpose of it is to return the length of the string that will be displayed, not the ANSI
"""
def __new__(cls, string: str, reset_colors: bool=True, colored_chars=0):
if reset_colors and string[-4:] != colorama.Style.RESET_ALL:
string += colorama.Style.RESET_ALL
colored_chars += len(colorama.Style.RESET_ALL)
new_str = super(ColoredString, cls).__new__(cls, string)
new_str.string = string
new_str.colored_chars = colored_chars
return new_str
def __len__(self):
temp_str_len = super(ColoredString, self).__len__()
temp_str_len -= self.colored_chars
return temp_str_len
def __add__(self, s):
if isinstance(s, ColoredString):
return ColoredString((self.string + s), colored_chars=(self.colored_chars + s.colored_chars),
reset_colors=False)
else:
return ColoredString((self.string + s), colored_chars=self.colored_chars, reset_colors=False)
def __radd__(self, other):
if isinstance(other, ColoredString):
return ColoredString((other + self.string), colored_chars=(self.colored_chars + other.colored_chars),
reset_colors=False)
else:
return ColoredString((other + self.string), colored_chars=self.colored_chars, reset_colors=False)
def __repr__(self):
return ColoredString(self.string)
def __str__(self):
return ColoredString(self.string)
class Console:
# A class for collecting all methods related to the console
@staticmethod
def clear():
# Removes all written characters in the console
os.system('cls' if os.name == 'nt' else 'clear')
@staticmethod
def size_reset():
# Sets the console to a desired size
os.system("mode con cols=120 lines=30")
@staticmethod
def print_with_layout(extra_text=None, battle=False):
""" Method for printing text along with other things, for example a layout
The layout will remain at static location, even if other text is printed to the same line
If battle is passed as true, the battle layout containing healthbars,
an action log and turn meter will be printed too
"""
Console.clear()
enemy = player.current_enemy
# Determining the type of extra_text in order to handle correctly
if extra_text is not None:
if isinstance(extra_text, list):
lines_in = extra_text
elif isinstance(extra_text, str):
# Splitting input into a list
lines_in = extra_text.split("\n")
else:
error_logger.error("Unknown extra text type: {}, {}".format(type(extra_text), extra_text))
lines_in = extra_text
else:
lines_in = []
# Filling up so that the list will contain 30 entries
# This is to ensure that even if less than 30 lines of input is entered, the code will not cause an exception
for i in range(len(lines_in), 31):
lines_in.append("")
# Adding the lines to be printed
# If input was entered, it will contain that input to be printed, otherwise it will contain an empty string
line_1 = lines_in[0]
line_2 = lines_in[1]
line_3 = lines_in[2]
line_4 = lines_in[3]
line_5 = lines_in[4]
line_6 = lines_in[5]
line_7 = lines_in[6]
line_8 = lines_in[7]
line_9 = lines_in[8]
line_10 = lines_in[9]
line_11 = lines_in[10]
line_12 = lines_in[11]
line_13 = lines_in[12]
line_14 = lines_in[13]
line_15 = lines_in[14]
line_16 = lines_in[15]
line_17 = lines_in[16]
line_18 = lines_in[17]
line_19 = lines_in[18]
line_20 = lines_in[19]
line_21 = lines_in[20]
line_22 = lines_in[21]
line_23 = lines_in[22]
line_24 = lines_in[23]
line_25 = lines_in[24]
line_26 = lines_in[25]
line_27 = lines_in[26]
line_28 = lines_in[27]
line_29 = lines_in[28]
# Declaring ASCII characters for the health_bars and the action log
standing_line = chr(124)
block = chr(9608)
top_line = chr(175)
# Checking if we want the battle layout
if battle:
# Declaring the resource bars, actions log and turn meter
# This is done in such a way that they will remain at a static position in the console
# A turn meter at the upper-right corner
turn = (" " * ((119 - (len("Turn ") + len(str(GameMaster.turn)))) - len(line_1)) +
"Turn {}".format(GameMaster.turn))
# Player's resource bar
# The top and bottom of the player's resource bar
# How many characters from left the resource bar is
player_health_bar_spacing = 26
player_top_resource_bar = (' ' * (player_health_bar_spacing - len(line_22)) + " " + ("_" * 10) +
player.name + " ")
player_bot_resource_bar = ' ' * (player_health_bar_spacing - len(line_26)) + " " + top_line * 10
# Adding symbols for easy information of current statuses
for status in player.Statuses:
if status == Statuses.stun:
player_top_resource_bar += ColoredString("{}*".format(colorama.Fore.LIGHTYELLOW_EX),
colored_chars=len(colorama.Fore.LIGHTYELLOW_EX))
elif status == Statuses.apply_bleed:
player_top_resource_bar += ColoredString("{}{}".format(colorama.Fore.LIGHTRED_EX, chr(191)),
colored_chars=len(colorama.Fore.LIGHTRED_EX))
# Checking if the effect is a stat boost
elif status in [i.value for i in Stats]:
if player.Statuses[status]['amount'] >= 0:
player_top_resource_bar += ColoredString("{}^".format(colorama.Fore.LIGHTBLUE_EX),
colored_chars=len(colorama.Fore.LIGHTBLUE_EX))
else:
player_top_resource_bar += ColoredString("{}v".format(colorama.Fore.YELLOW),
colored_chars=len(colorama.Fore.YELLOW))
# Calculating and displaying the player's resources
player_hp = int((player.current_hp / player.max_hp) * 10)
player_mp = int((player.current_mp / player.max_mp) * 10)
player_stamina = int((player.current_stamina / player.max_stamina) * 10)
player_mid_health_bar = (' ' * (player_health_bar_spacing - len(line_23)) + standing_line +
ColoredString("{}{}".format(colorama.Fore.RED, (block * player_hp)),
colored_chars=len(colorama.Fore.RED))
+ " " * (10 - player_hp) + standing_line +
"{}/{} hp".format(player.current_hp, player.max_hp))
player_mid_mp_bar = (' ' * (player_health_bar_spacing - len(line_24)) + standing_line +
ColoredString("{}{}".format(colorama.Fore.BLUE, (block * player_mp)),
colored_chars=len(colorama.Fore.BLUE))
+ " " * (10 - player_mp) + standing_line +
"{}/{} mp".format(player.current_mp, player.max_mp))
player_mid_stamina_bar = (' ' * (player_health_bar_spacing - len(line_25)) + standing_line +
ColoredString("{}{}".format(colorama.Fore.GREEN, (block * player_stamina)),
colored_chars=len(colorama.Fore.GREEN))
+ " " * (10 - player_stamina) + standing_line +
"{}/{} stamina".format(player.current_stamina, player.max_stamina))
# Enemy's resources
# The top and bottom of the enemy's resource bar
# How many characters from left to right the resource bar is
enemy_health_bar_spacing = 80
enemy_top_resource_bar = (' ' * (enemy_health_bar_spacing - len(line_6))
+ " " + "_" * 10 + player.current_enemy.name + " ")
enemy_bot_resource_bar = ' ' * (enemy_health_bar_spacing - len(line_10)) + " " + top_line * 10
# Adding symbols for easy information of current statuses
for status in player.current_enemy.Statuses:
if status == Statuses.stun:
enemy_top_resource_bar += "{}*{}".format(colorama.Fore.LIGHTYELLOW_EX, colorama.Style.RESET_ALL)
elif status == Statuses.apply_bleed:
enemy_top_resource_bar += "{}{}{}".format(colorama.Fore.LIGHTRED_EX, chr(191),
colorama.Style.RESET_ALL)
# Checking if the effect is a stat boost
elif status in [i.value for i in Stats]:
if enemy.Statuses[status]['amount'] >= 0:
enemy_top_resource_bar += "{}^{}".format(colorama.Fore.LIGHTBLUE_EX, colorama.Style.RESET_ALL)
else:
enemy_top_resource_bar += "{}v{}".format(colorama.Fore.YELLOW, chr(8673),
colorama.Style.RESET_ALL)
# Calculating and diplaying the enemy's resources
enemy_hp = int((player.current_enemy.current_hp / player.current_enemy.max_hp) * 10)
enemy_mp = int((player.current_enemy.current_mp / player.current_enemy.max_mp) * 10)
enemy_stamina = int((player.current_enemy.current_stamina / player.current_enemy.max_stamina) * 10)
enemy_mid_health_bar = (' ' * (enemy_health_bar_spacing - len(line_7)) + standing_line +
ColoredString("{}{}".format(colorama.Fore.RED, (block * enemy_hp)),
colored_chars=len(colorama.Fore.RED))
+ " " * (10 - enemy_hp) + standing_line +
'{}/{} hp'.format(player.current_enemy.current_hp, player.current_enemy.max_hp))
enemy_mid_mp_bar = (' ' * (enemy_health_bar_spacing - len(line_8)) + standing_line +
ColoredString("{}{}".format(colorama.Fore.BLUE, (block * enemy_mp)),
colored_chars=len(colorama.Fore.BLUE))
+ " " * (10 - enemy_mp) + standing_line +
'{}/{} mp'.format(player.current_enemy.current_mp, player.current_enemy.max_mp))
enemy_mid_stamina_bar = (' ' * (enemy_health_bar_spacing - len(line_9)) + standing_line +
ColoredString("{}{}".format(colorama.Fore.GREEN, (block * enemy_stamina)),
colored_chars=len(colorama.Fore.GREEN))
+ " " * (10 - enemy_stamina) + standing_line +
'{}/{} stamina'.format(player.current_enemy.current_stamina,
player.current_enemy.max_stamina))
# Calculating some spacing for the action log
overlapping_action_log_spacing_special = 30
overlapping_action_log_spacing = 22
normal_action_log_spacing = 52
log_lines = 5
max_spacing = max(
list(len(GameMaster.action_log[- (i + 1)]) for i in range(log_lines)))
spacing_1 = " " * (max_spacing - len(GameMaster.action_log[-1]))
spacing_2 = " " * (max_spacing - len(GameMaster.action_log[-2]))
spacing_3 = " " * (max_spacing - len(GameMaster.action_log[-3]))
spacing_4 = " " * (max_spacing - len(GameMaster.action_log[-4]))
spacing_5 = " " * (max_spacing - len(GameMaster.action_log[-5]))
# Defining the action log parts
action_log_top = (' ' * (overlapping_action_log_spacing - (len(player_top_resource_bar)) +
(overlapping_action_log_spacing_special - len(line_22))) + " " +
"_" * max_spacing + "Action log")
action_log_bot = (" " + ' ' * (normal_action_log_spacing - len(line_27)) + top_line * max_spacing)
action_log_mid_1 = (' ' * (overlapping_action_log_spacing - (len(player_mid_health_bar)) +
(overlapping_action_log_spacing_special - len(line_23)))
+ standing_line +
GameMaster.action_log[len(GameMaster.action_log) - 1]
+ spacing_1 + standing_line)
action_log_mid_2 = (' ' * (overlapping_action_log_spacing - (len(player_mid_mp_bar)) +
(overlapping_action_log_spacing_special - len(line_24)))
+ standing_line +
GameMaster.action_log[len(GameMaster.action_log) - 2]
+ spacing_2 + standing_line)
action_log_mid_3 = (' ' * (overlapping_action_log_spacing - (len(player_mid_stamina_bar)) +
(overlapping_action_log_spacing_special - len(line_25)))
+ standing_line +
GameMaster.action_log[len(GameMaster.action_log) - 3]
+ spacing_3 + standing_line)
action_log_mid_4 = (' ' * (overlapping_action_log_spacing - (len(player_bot_resource_bar)) +
(overlapping_action_log_spacing_special - len(line_26)))
+ standing_line +
GameMaster.action_log[len(GameMaster.action_log) - 4]
+ spacing_4 + standing_line)
action_log_mid_5 = (' ' * (normal_action_log_spacing - len(line_27)) + standing_line +
GameMaster.action_log[len(GameMaster.action_log) - 5]
+ spacing_5 + standing_line)
# If we don't want the battle layout, the health_bars and the log will instead be empty strings
else:
turn = ""
enemy_top_resource_bar = ""
enemy_bot_resource_bar = ""
enemy_mid_health_bar = ""
enemy_mid_mp_bar = ""
enemy_mid_stamina_bar = ""
player_top_resource_bar = ""
player_bot_resource_bar = ""
player_mid_health_bar = ""
player_mid_stamina_bar = ""
player_mid_mp_bar = ""
action_log_top = ""
action_log_bot = ""
action_log_mid_1 = ""
action_log_mid_2 = ""
action_log_mid_3 = ""
action_log_mid_4 = ""
action_log_mid_5 = ""
# Joining all the strings to be printed
lines = {0: line_1 + turn, 1: line_2, 2: line_3, 3: line_4, 4: line_5,
5: line_6 + enemy_top_resource_bar,
6: line_7 + enemy_mid_health_bar,
7: line_8 + enemy_mid_mp_bar,
8: line_9 + enemy_mid_stamina_bar,
9: line_10 + enemy_bot_resource_bar,
10: line_11, 11: line_12, 12: line_13,
13: line_14, 14: line_15, 15: line_16, 16: line_17, 17: line_18,
18: line_19, 19: line_20, 20: line_21,
21: line_22 + player_top_resource_bar + action_log_top,
22: line_23 + player_mid_health_bar + action_log_mid_1,
23: line_24 + player_mid_mp_bar + action_log_mid_2,
24: line_25 + player_mid_stamina_bar + action_log_mid_3,
25: line_26 + player_bot_resource_bar + action_log_mid_4,
26: line_27 + action_log_mid_5,
27: line_28 + action_log_bot,
28: line_29}
# Printing the strings
for i in range(0, 29):
print(lines[i])
@staticmethod
def interactive_choice(cases: list, head_string: str, back_want: bool = False,
battle: bool = False, enumerated: bool = False, custom_area=()):
# This method makes use of the print_with_layout method in order to make some printed objects clickable
# Cases is a list of the clickable strings
# Head_string will be printed at the top of the console and will not be clickable
# If battle is True, an enemy must be supplied and print_with_layout will use the battle layout
# If back_want is True, a back option will be added
# Returns the name of the string clicked(or None, signaling that back was clicked)
if len(custom_area) == 0:
GameMaster.last_interactive_choice_call['cases'] = cases
GameMaster.last_interactive_choice_call['head_string'] = head_string
GameMaster.last_interactive_choice_call['back_want'] = back_want
GameMaster.last_interactive_choice_call['battle'] = battle
GameMaster.last_interactive_choice_call['enumerated'] = enumerated
# Console borders need to be accounted for
console_x_border: int = GameMaster.x_to_console # pixels
console_y_border: int = GameMaster.y_to_console # pixels
font_size_x: int = GameMaster.font_size_x # pixels
font_size_y: int = GameMaster.font_size_y # pixels
# Some lines are not clickable
uninteractive_lines = head_string.count("\n") + 1
# Adding a back option if desired
if back_want and "back" not in cases:
cases.append("back")
# If you're bug hunting a displaced turn meter, this is the root of the problem
# The split method returns a string, therefore removing the coloredstring's custom len
# The easiest way to solve this is to never have two colored strings as head strings
if len(custom_area) == 0:
if head_string.count("\n") != 0:
head_string_list = head_string.split("\n")
temp_cases = head_string_list + cases
else:
temp_cases = cases.copy()
temp_cases.insert(0, head_string)
if len(custom_area) == 0:
# noinspection PyUnboundLocalVariable
temp_cases = ["*" + case for case in temp_cases]
else:
temp_cases = cases.copy()
# Printing everything
Console.print_with_layout(extra_text=temp_cases, battle=battle)
case = None
if len(custom_area) == 0:
line_areas = []
for i in range(0, 31):
line_areas.append([])
for move in cases:
line_areas[cases.index(move)].append(console_x_border)
line_areas[cases.index(move)].append((len(move) * font_size_x) + console_x_border)
line_areas[cases.index(move)].append(((cases.index(move)) * font_size_y
+ console_y_border + font_size_y * uninteractive_lines) -
8)
line_areas[cases.index(move)].append((cases.index(move) + 1) * font_size_y
+ console_y_border + font_size_y * uninteractive_lines)
# Removing empty nested lists
line_areas = [x for x in line_areas if x != []]
else:
line_areas = [list(row) for row in custom_area]
for sublist in line_areas:
line_areas[line_areas.index(sublist)][0] *= font_size_x
line_areas[line_areas.index(sublist)][0] += console_x_border
line_areas[line_areas.index(sublist)][1] *= font_size_x
line_areas[line_areas.index(sublist)][1] += console_x_border
line_areas[line_areas.index(sublist)][2] *= font_size_y
line_areas[line_areas.index(sublist)][2] += console_y_border
line_areas[line_areas.index(sublist)][3] *= font_size_y
line_areas[line_areas.index(sublist)][3] += console_y_border
def update_area():
x_y_window = []
# noinspection PyUnusedLocal
def callback(hwnd, extra):
rect = win32gui.GetWindowRect(hwnd)
x_window = rect[0]
y_window = rect[1]
w = rect[2] - x_window
h = rect[3] - y_window
if win32gui.GetWindowText(hwnd) == GameMaster.game_name:
nonlocal x_y_window
x_y_window = [x_window, y_window]
win32gui.EnumWindows(callback, None)
# TODO Support stationary console location
if len(x_y_window) == 0:
listener.stop()
return
temp_console_x_border = x_y_window[0]
temp_console_y_border = x_y_window[1]
# Calculating the areas which are clickable
# First two x values, then two y values in the dict
temp_line_areas = list(sub__list.copy() for sub__list in line_areas)
for sub_list in temp_line_areas:
temp_line_areas[temp_line_areas.index(sub_list)][0] += temp_console_x_border
temp_line_areas[temp_line_areas.index(sub_list)][1] += temp_console_x_border
temp_line_areas[temp_line_areas.index(sub_list)][2] += temp_console_y_border
temp_line_areas[temp_line_areas.index(sub_list)][3] += temp_console_y_border
return temp_line_areas
def on_click(x, y, button, pressed):
temp_line_areas = update_area()
if temp_line_areas is None:
return
# Checking whether a left click is performed
if pressed and button == pynput.mouse.Button.left:
if len(custom_area) == 0:
for x_y in temp_line_areas:
# Checking if the mouse input is within the desired area
if (x in range(temp_line_areas[temp_line_areas.index(x_y)][0],
temp_line_areas[temp_line_areas.index(x_y)][1]) and
y in range(temp_line_areas[temp_line_areas.index(x_y)][2],
temp_line_areas[temp_line_areas.index(x_y)][3])):
# For the listener to exit, we need to return false
# Therefore, in order to return other values, we use a global variable
nonlocal case
case = cases[temp_line_areas.index(x_y)]
return False
else:
for x_y in temp_line_areas:
# Checking if the mouse input is within the desired area
if (x in range(temp_line_areas[temp_line_areas.index(x_y)][0],
temp_line_areas[temp_line_areas.index(x_y)][1])and
y in range(temp_line_areas[temp_line_areas.index(x_y)][2],
temp_line_areas[temp_line_areas.index(x_y)][3])):
if cases[temp_line_areas.index(x_y)] != "":
global case_custom_area
case_custom_area = cases[temp_line_areas.index(x_y)]
return False
# Checks for mouse clicks, if there are any it calls on_click
with pynput.mouse.Listener(on_click=on_click) as listener:
listener.join()
if len(custom_area) == 0:
try:
_ = case
del _
except NameError:
print("It seems that you aren't running this game through a console. Please do")
input()
raise SystemExit
finally:
if case is None:
print("It seems that you aren't running this game through a console. Please do")
input()
raise SystemExit
if case == "*back" or case == "back":
# If the input is back, return None
return None
else:
# If a clickable case was clicked, return which one
# If enumerated is true, we return the index of the case
if enumerated:
return cases.index(case)
else:
if case[0] == "*":
case = case[1:]
return case
else:
if case_custom_area == "back":
# If the input is back, return None
return None
else:
# If a clickable case was clicked, return which one
# If enumerated is true, we return the index of the case
if enumerated:
return cases.index(case_custom_area)
else:
return case_custom_area
class Statuses:
# A collection class of all the statuses a character can have
@staticmethod
def apply_bleed(target):
# Applies DOT damage at the start of a turn
damage_taken = (int(target.max_hp * 0.15))
return damage_taken
@staticmethod
def stun():
pass
supported_Statuses = {
# Information about the previous class's statuses
# head_type sorting whether it's a positive or negative effect.
# head_type is also used in curses, effects that don't go away by simply playing
# apply_type is used to determine when during combat the effect takes effect
# type doesn't serve any purpose at the moment
# description is used when inspecting someone who is afflicted
# on_apply_message_player and enemy are to be used in the action log
Statuses.apply_bleed:
{
'head_type': 'debuff',
'apply_type': 'start_dot',
'type': 'burning',
'description': 'Bleeding',
'description_nerd': 'Bleed',
'on_apply_message_player': 'You better stop this bleeding soon... You take',
'on_apply_message_enemy': 'Blood spills forth as the enemy takes'
},
Statuses.stun:
{
'head_type': 'debuff',
'apply_type': '',
'description': 'Stunned',
'description_nerd': 'Stun',
'on_apply_message_player': 'Your head feels too dizzy to do anything.',
'on_apply_message_enemy': 'Looks like {} is too dizzy to act'
}
}
WeaponEffect = {
}
class Item:
""" name -- the name of the item
weight -- the item's weight
value -- the item's raw value, not accounting for merchant rates etc
item_type -- determines its uses, i.e. weapon, material, consumable
description -- the item's flavor text
rarity -- the rate at which the item is dropped
"""
def __init__(self, name: str, weight: int, value: int, item_type: str, item_id: int, description: str, rarity: int,
max_stack: int):
self.rarity = rarity
self.name = name
self.weight = weight
self.value = value
self.item_type = item_type
self.item_id = item_id
self.description = description
self.max_stack = max_stack
# Keep in mind that this is far from optimal
# The code below returns a string containing the closest match to the item's rarity
# The data structures are for making the method rarity_level work
#
# It works by having a 'hierarchy' of rarities and a dictionary of keys being the same values as the hierarchy
# The function takes the item's rarity, finds its closest match in the hierarchy
# It then returns a string according to its level in the hierarchy from the dictionary
rarity_levels = {75: 'extremely common', 40: "very common", 20: "common", 10: "uncommon",
5: "rare", 2: "legendary"}
rarity_hierarchy = (2, 5, 10, 20, 40, 75)
def rarity_level(self):
return Item.rarity_levels[closest_match(self.rarity, Item.rarity_hierarchy)]
# Displaying information about the item when inspected
def inspect(self):
# Checking whether to use "a" or "an"
a_or_an = "an" if str(self.item_type)[0] in GameMaster.vowels else "a"
# Instead of "it weighs 0", it becomes "it weighs nothing"
weight = self.weight
if weight == 0:
weight = "nothing"
# Some flavor texts contains quest marks or exclamation marks at the end
# We do not want (flavor text)?. hence, we do the following operation
temp_description = self.description
sentence_endings = ('!', '?', '.')
if not temp_description[-1] in sentence_endings:
temp_description = temp_description + "."
# Concatenating it all together
if GameMaster.settings['nerd mode']:
return ("{}\nIt is worth {} gold and weighs {}.\nIt is {} {} whose droprate is {}%.".format
(temp_description, self.value, weight, a_or_an, self.item_type, self.rarity))
else:
return ("{}\nIt is worth {} gold and weighs {}.\nIt is {} {} that is {}.".format
(temp_description, self.value, weight, a_or_an, self.item_type, Item.rarity_level(self)))
class Wearable(Item):
# This class exists only for the wearable class to inherit the inspect method
#
""" Name: The armor's name
Weight: The armor's weight
Value: The armor's value
item_type: Should always be armor
item_id: The id of the item
Description: The item's flavor text
Rarity: The rate at which the item will be dropped
Armor_weight: The weight of the armor
Defense: How much damage the armor will defend against
Armor_effect: A special effect bound to the armor
Set_member: What armor set the armor is part of (used for set bonuses)
"""
effect_inspect_text = "Error: no inspection text"
# noinspection PyMethodOverriding
def inspect(self):
# noinspection PyUnresolvedReferences
return Item.inspect(self) + "\n{}".format(self.parent.effect_inspect_text)
class Weapon(Item):
def __init__(self, name, weight, value, item_type, item_id, description, rarity, max_stack,
weapon_damage, crit, special_effect=None, effect_rate=0):
super().__init__(name, weight, value, item_type, item_id, description, rarity, max_stack)
self.weapon_damage = weapon_damage
self.crit = crit
if not effect_rate:
self.special_effect = special_effect
self.effect_rate = effect_rate
likeliness_levels = {80: "very likely", 60: "likely", 40: "unlikely",
20: "very unlikely"}
crit_levels = {33: 'very likely', 20: 'highly likely', 10: 'likely', 5: 'unlikely', 2: 'very unlikely'}
def likeliness_level(self):
return self.likeliness_levels[closest_match(self.effect_rate, self.likeliness_levels)]
def crit_level(self):
return self.crit_levels[closest_match(self.crit, self.crit_levels)]
def inspect(self):
a_or_an = "an" if str(self.item_type)[0] in GameMaster.vowels else "a"
if self.special_effect is not None:
if GameMaster.settings['nerd mode']:
special_effect_text = ("{} that works {}% of the time".format
(WeaponEffect[self.special_effect]['description'],
self.effect_rate))
else:
special_effect_text = ("{} and is {} affect the enemy".format
(WeaponEffect[self.special_effect]['description'],
self.likeliness_level()))
else:
special_effect_text = ""
if GameMaster.settings['nerd mode']:
return ("{}.\nGold worth: {}, weight: {}. Damage: {}, crit: {}%, droprate: {}%. {}".format
(self.description, self.value, self.weight, a_or_an, self.item_type, self.rarity,
special_effect_text))
else:
return ("{}.\nIt is worth {} gold and weighs {}.\n"
"You will deal around {} damage when attacking with this and are {} to deal double damage.\n"
"It is {}. {}".format
(self.description, self.value if self.value != 0 else 'nothing',
self.weight if self.weight != 0 else 'nothing', self.weapon_damage, self.crit_level(),
self.rarity_level(),
special_effect_text))
# feather = Item('feather', 1, 10, 'material', 1, 'A feather from a hen', 'common')
# radish = Item('radish', 1, 20, 'food', 2, 'Fresh from The earth!', 'uncommon')
#
# feather.inspect()
# b = Wearable()
class Bare(Wearable):
item_id = 1
set_effect_description_good = 'People are astonished by your amazing body, increasing your charisma by '
set_effect_description_bad = "People won't trust you, running around without clothes, decreasing your charisma by "
effect_inspect_text = "If you're weak and naked, no one will trust you, making negotiating harder.\n" \
"However, if you're buff, people will be amazed, making negotiating easier"
change_type = "charisma"
inspect_flavor_text = 'Get some real clothes, you hobo'
class Head:
def __init__(self, parent):
self.parent = parent
set_part = "head"
description = "Just a plain old ugly head"
item_type = "wearable"
name = "Bare"
weight = 0
value = 'unsellable'
rarity = 'unobtainable'
defense = 1
dodge_mod = 4
crit_mod = 0
speed_mod = 10
damage_mod = 0
charisma_mod = 0
intelligence_mod = 0
hp_regen_mod = 0
mp_regen_mod = 0
stamina_regen_mod = 0
description_good = 'Even though your face looks terrible, people are distracted by your glorious body,'
effect_amount_good = 0
description_bad = 'Your face looks terrible, it will make negotiating harder'
effect_amount_bad = -2
class Chest:
def __init__(self, parent):
self.parent = parent
set_part = "chest"
description = "Just a plain old torso"
item_type = "wearable"
name = "Bare"
weight = 0
value = 'unsellable'
rarity = 'unobtainable'
defense = 3
dodge_mod = 4
crit_mod = 0
speed_mod = 10
damage_mod = 0
charisma_mod = 0
intelligence_mod = 0
hp_regen_mod = 0
mp_regen_mod = 0
stamina_regen_mod = 1
description_good = 'Nice gains, bro'
effect_amount_good = 4
description_bad = 'You even lift, bro?'
effect_amount_bad = -1
class Legs:
def __init__(self, parent):
self.parent = parent
set_part = "legs"
description = "What humans use to walk"
item_type = "wearable"
name = "Bare"
weight = 0
value = 'unsellable'
rarity = 'unobtainable'
defense = 2
dodge_mod = 4
crit_mod = 0
speed_mod = 10
damage_mod = 0
charisma_mod = 0
intelligence_mod = 0
hp_regen_mod = 0
mp_regen_mod = 0
stamina_regen_mod = 0
description_good = 'Not wearing pants only seems to be in your flavor with such a body'
effect_amount_good = 1
description_bad = 'Oh please, at least put some pants on'
effect_amount_bad = -7
@staticmethod
def get_set_part_description(set_part, user) -> str:
if user.strength > 50:
return set_part.description_good
else:
return set_part.description_bad
@staticmethod
def get_set_effect(user, head: bool = False, chest: bool = False, legs: bool = False):
change_amount = 0
if hasattr(user, 'parent'):
user_strength = user.parent.strength
else:
user_strength = user.strength
if user_strength > 50:
if head:
change_amount += Bare.Head.effect_amount_good
if chest:
change_amount += Bare.Chest.effect_amount_good
if legs:
change_amount += Bare.Legs.effect_amount_good
return "charisma", change_amount, Bare.set_effect_description_good
else:
if head:
change_amount -= Bare.Head.effect_amount_bad
if chest:
change_amount -= Bare.Chest.effect_amount_bad
if legs:
change_amount -= Bare.Legs.effect_amount_bad
return "charisma", change_amount, Bare.set_effect_description_bad
class Leaves(Wearable):
item_id = 2
set_effect_description_good = "People are happy that you're hiding at least a little of your weak body, " \
"increasing your charisma by "
set_effect_description_bad = "People are disappointed that you're hiding your glorious body, decreasing " \
"your charisma by "
effect_inspect_text = "If you're weak, people will respect you for hiding your weak body, increasing your " \
"charisma\nHowever, if you're buff, people will become angry for not showing yourself," \
" decreasing your charisma"
change_type = "charisma"
inspect_flavor_text = 'Mother nature to the rescue!'
class Head:
def __init__(self, parent):
self.parent = parent
set_part = "head"
description = "A pretty leaf crown"
item_type = "wearable"
name = "leaf crown"
weight = 0
value = 2
rarity = 3
defense = 2
dodge_mod = 4
crit_mod = 0
speed_mod = 13
damage_mod = 0
charisma_mod = 0
intelligence_mod = 3
hp_regen_mod = 0
mp_regen_mod = 0
stamina_regen_mod = 0
description_good = 'Your leaf crown actually hides your horrible face pretty well'
effect_amount_good = 1
description_bad = "People don't really mind your face since your body is so muscular"
effect_amount_bad = -0
class Chest:
def __init__(self, parent):
self.parent = parent
set_part = "chest"
description = "A well-made leaf chestmail"
item_type = "wearable"
name = "leaf chestmail"
weight = 0
value = 4
rarity = 3
defense = 2
dodge_mod = 4
crit_mod = 0
speed_mod = 13
damage_mod = 0
charisma_mod = 0
intelligence_mod = 3
hp_regen_mod = 0
mp_regen_mod = 1
stamina_regen_mod = 0
description_good = 'This finely crafted leaf chestmail hides your weak chest perfectly'
effect_amount_good = 2
description_bad = 'Why hide your amazing chest?'
effect_amount_bad = -4
class Legs:
def __init__(self, parent):
self.parent = parent
set_part = "legs"
description = "Just some leafs to cover the private parts. The leggings part was a lie"
item_type = "wearable"
name = "<NAME>"
weight = 0
value = 3
rarity_level = 3
defense = 2
dodge_mod = 4
crit_mod = 0
speed_mod = 13
damage_mod = 0
charisma_mod = 0
intelligence_mod = 3
hp_regen_mod = 0
mp_regen_mod = 0
stamina_regen_mod = 0
description_good = 'People are looking happy that you at least covered up your private parts'
effect_amount_good = 4
description_bad = 'People look angry that you hide your amazing body'
effect_amount_bad = -2
@staticmethod
def get_set_part_description(set_part, user) -> str:
if user.strength < 50:
return set_part.description_good
else:
return set_part.description_bad
@staticmethod
def get_set_effect(user, head: bool = False, chest: bool = False, legs: bool = False):
change_amount = 0
# noinspection PyUnresolvedReferences
if hasattr(user, 'parent'):
user_strength = user.parent.strength
else:
user_strength = user.strength
if user_strength < 50:
if head:
change_amount += Leaves.Head.effect_amount_good
if chest:
change_amount += Leaves.Chest.effect_amount_good
if legs:
change_amount += Leaves.Legs.effect_amount_good
return "charisma", change_amount, Leaves.set_effect_description_good
else:
if head:
change_amount -= Leaves.Head.effect_amount_bad
if chest:
change_amount -= Leaves.Chest.effect_amount_bad
if legs:
change_amount -= Leaves.Legs.effect_amount_bad
return "charisma", change_amount, Leaves.set_effect_description_bad
# Initiating all items
Gold = Item('Gold', 0, 1, 'valuable', 0, 'The foundation of modern society.. or perhaps its worst mistake?', 75,
50000)
Leaves.Head = Leaves.Head(Leaves)
Leaves.Chest = Leaves.Chest(Leaves)
Leaves.Legs = Leaves.Legs(Leaves)
Bare.Head = Bare.Head(Bare)
Bare.Chest = Bare.Chest(Bare)
Bare.Legs = Bare.Legs(Bare)
Fist = Weapon('Fist', 0, 0, 'weapon', 3, 'A plain old fist', 75, 1, 2, 3)
WoodenSword = Weapon('Wooden sword', 5, 10, 'weapon', 4, 'A plain old sword out of sturdy oak', 20, 1, 4, 4,)
# An enumeration of all the game stats
class Stats(Enum):
crit = 'crit'
charisma = 'charisma'
speed = 'speed'
awareness = 'awareness'
strength = 'strength'
intelligence = 'intelligence'
dodge = 'dodge'
prot = 'prot'
hp_regen = 'hp_regen'
mp_regen = 'mp_regen'
stamina_regen = 'stamina_regen'
class GameMaster:
# Runtime computations
# Tuples that can be iterated through to check for various things
percent_stats = ('crit', 'dodge', 'prot')
Bare_set = (Bare.Head, Bare.Chest, Bare.Legs, Fist)
no_s_at_end_exceptions = ('Gold',)
vowels = ("a", "o", "u", "e", "i", "A", "O", "U", "E", "I")
# The game settings
# Should contain nerd mode, quickedit and forcev2
settings = {}
# The name of the game, used for setting the title of the process and getting a handle to it
game_name = "Please select a game name"
# Used to ensure that an audio file exists when trying to play it
missing_audio = []
# Some info about the console
y_to_console = 0
x_to_console = 0
font_size_x = 0
font_size_y = 0
# Used at the death screen
last_damage_player = ""
# Counter for displaying the current turn during combat
# It gets incremented and reset from the combat function
turn = 1
# A dict containing the last call to interactive choice
# This is used in case we need to update the action log
last_interactive_choice_call = {'cases': [], 'head_string': '', 'battle': False, 'back_want': False}
# A list containing actions made by the player and the enemy to be displayed
# This should never be appended to directly
action_log = [' ', ' ', ' ', ' ', ' ',
' ']
# A method for appending to the action log
def extend_action_log(self, new_action):
# Ensuring that the message will never be too long
if len(new_action) > 56:
self.action_log.append('Message too long. Show the developer your error log')
error_logger.error("Message longer than 56 chars found at action_log: {}. len: {}".format(new_action,
len(new_action)))
else:
# Calling interactive_choice to ensure a smoother experience
self.action_log.append(new_action)
if self.last_interactive_choice_call['head_string'] != "":
temp_cases = GameMaster.last_interactive_choice_call['cases'].copy()
temp_head_string = GameMaster.last_interactive_choice_call['head_string'][:]
if (GameMaster.last_interactive_choice_call['back_want'] and
"back" not in GameMaster.last_interactive_choice_call['cases']):
temp_cases.append("back")
if temp_head_string.count("\n") != 0:
temp_head_string_list = temp_head_string.split("\n")
temp_cases = temp_head_string_list + temp_cases
else:
temp_cases.insert(0, temp_head_string)
temp_cases = ["*" + case for case in temp_cases]
Console.print_with_layout(extra_text=temp_cases, battle=self.last_interactive_choice_call['battle'])
time.sleep(1)
class Character:
awareness: int
speed: int
def __init__(self, name, gender, dodge, speed, intelligence, prot,
crit, charisma, awareness, max_hp, max_stamina, max_mp, hp_regen,
stamina_regen, mp_regen, strength, description=""):
self.name = name
name_split = name.split()
self.first_name = name_split[0]
self.gender = gender
self.intelligence = intelligence
self.dodge = dodge
self.speed = speed
self.gender = gender
self.prot = prot
self.crit = crit
self.charisma = charisma
self.awareness = awareness
self.max_hp = max_hp
self.current_hp = max_hp
self.max_mp = max_mp
self.current_mp = max_mp
self.max_stamina = max_stamina
self.current_stamina = max_stamina
self.hp_regen = hp_regen
self.mp_regen = mp_regen
self.stamina_regen = stamina_regen
self.strength = strength
self.description = ""
self.Statuses = {}
self.current_enemy = None
self.description = description
# noinspection PyUnresolvedReferences
def calculate_stat_change(self, stat, stat_value):
if stat in [i.value for i in Stats]:
error_logger.error("Unknown stat type at calculate_stat_change: {}".format(stat))
return stat_value
for status in self.Statuses:
try:
if status == stat:
stat_value += player.Statuses[status]['amount']
except KeyError:
pass
# This code is messy, let's pray that it doesn't break
effect_level_head = 1
effect_level_chest = 1
effect_level_legs = 1
if player.inventory.current_equips['head'].parent == player.inventory.current_equips['chest'].parent:
effect_level_head += 1
effect_level_chest += 1
if player.inventory.current_equips['head'].parent == player.inventory.current_equips['legs'].parent:
effect_level_head += 1
effect_level_legs += 1
if player.inventory.current_equips['chest'].parent == player.inventory.current_equips['legs'].parent:
effect_level_legs += 1
effect_level_chest += 1
if effect_level_chest == 3:
effect_level_chest += 2
if effect_level_head == 3:
effect_level_head += 2
if effect_level_legs == 3:
effect_level_legs += 2
change_types = []
armor_effect_amounts = []
if self == player:
for set_part in self.inventory.current_equips:
if set_part == "head":
change_type, amount, __ = (self.inventory.current_equips[set_part].parent
.get_set_effect(player, head=True))
elif set_part == "chest":
change_type, amount, __ = (self.inventory.current_equips[set_part].parent
.get_set_effect(player, chest=True))
elif set_part == "legs":
change_type, amount, __ = (self.inventory.current_equips[set_part].parent
.get_set_effect(player, legs=True))
else:
amount = 0
change_type = ""
change_types.append(change_type)
armor_effect_amounts.append(amount)
elif hasattr(self, 'current_equips'):
for set_part in self.current_equips:
if set_part == "head":
change_type, amount, __ = (self.current_equips[set_part].parent
.get_set_effect(player, head=True))
elif set_part == "chest":
change_type, amount, __ = (self.current_equips[set_part].parent
.get_set_effect(player, chest=True))
elif set_part == "legs":
change_type, amount, __ = (self.current_equips[set_part].parent
.get_set_effect(player, legs=True))
else:
amount = 0
change_type = ""
change_types.append(change_type)
armor_effect_amounts.append(amount)
elif isinstance(self, Enemy):
for set_part in self.inventory.current_equips:
if set_part == "head":
change_type, amount, __ = (self.inventory.current_equips[set_part].parent
.get_set_effect(player, head=True))
elif set_part == "chest":
change_type, amount, __ = (self.inventory.current_equips[set_part].parent
.get_set_effect(player, chest=True))
elif set_part == "legs":
change_type, amount, __ = (self.inventory.current_equips[set_part].parent
.get_set_effect(player, legs=True))
else:
amount = 0
change_type = ""
change_types.append(change_type)
armor_effect_amounts.append(amount)
else:
error_logger.error("Unknown self: {}".format(self))
try:
armor_effect_amounts[0] *= effect_level_head
except IndexError:
pass
try:
armor_effect_amounts[1] *= effect_level_chest
except IndexError:
pass
try:
armor_effect_amounts[2] *= effect_level_legs
except IndexError:
pass
change = False
try:
if change_types[0] == change_types[1]:
del change_types[1]
armor_effect_amounts[0] += armor_effect_amounts[1]
del armor_effect_amounts[1]
change = True
except IndexError:
pass
if change:
try:
if change_types[0] == change_types[1]:
del change_types[1]
armor_effect_amounts[0] += armor_effect_amounts[1]
del armor_effect_amounts[1]
except IndexError:
pass
else:
try:
if change_types[0] == change_types[2]:
del change_types[2]
armor_effect_amounts[0] += armor_effect_amounts[2]
del armor_effect_amounts[2]
except IndexError:
pass
try:
if change_types[1] == change_types[2]:
del change_types[2]
armor_effect_amounts[1] += armor_effect_amounts[2]
del armor_effect_amounts[2]
except IndexError:
pass
for change_type in change_types:
if change_type == stat:
stat_value += armor_effect_amounts[change_types.index(change_type)]
if self == player:
head = self.inventory.current_equips['head']
chest = self.inventory.current_equips['chest']
legs = self.inventory.current_equips['legs']
elif hasattr(self, 'current_equips'):
head = self.current_equips('head')
chest = self.current_equips('chest')
legs = self.current_equips('legs')
elif isinstance(self, Enemy):
head = self.inventory.current_equips['head']
chest = self.inventory.current_equips['chest']
legs = self.inventory.current_equips['legs']
else:
error_logger.error("Unknown self: {}".format(self))
head = Bare.Head
chest = Bare.Chest
legs = Bare.Legs
if stat == 'crit':
stat_value += head.crit_mod
stat_value += chest.crit_mod
stat_value += legs.crit_mod
elif stat == 'intelligence':
stat_value += head.intelligence_mod
stat_value += chest.intelligence_mod
stat_value += legs.intelligence_mod
elif stat == 'dodge':
stat_value += head.dodge_mod
stat_value += chest.dodge_mod
stat_value += legs.dodge_mod
elif stat == 'speed':
stat_value += head.speed_mod
stat_value += chest.speed_mod
stat_value += legs.speed_mod
elif stat == 'damage':
stat_value += head.damage_mod
stat_value += chest.damage_mod
stat_value += legs.damage_mod
elif stat == 'charisma':
stat_value += head.charisma_mod
stat_value += chest.charisma_mod
stat_value += legs.charisma_mod
elif stat == 'hp_regen':
stat_value += head.hp_regen_mod
stat_value += chest.hp_regen_mod
stat_value += legs.hp_regen_mod
elif stat == 'mp_regen':
stat_value += head.mp_regen_mod
stat_value += chest.mp_regen_mod
stat_value += legs.mp_regen_mod
elif stat == 'stamina_regen':
stat_value += head.stamina_regen_mod
stat_value += chest.stamina_regen_mod
stat_value += legs.stamina_regen_mod
if stat_value > 100:
return 100
elif stat_value < -100:
return -100
else:
return stat_value
class Inventory:
def __init__(self, parent, max_spaces: int = 10):
self.max_spaces = max_spaces
self.parent = parent
items = {}
current_equips = {'head': Leaves.Head, 'chest': Leaves.Chest, 'legs': Leaves.Legs, 'left hand': WoodenSword,
'right hand': WoodenSword}
# noinspection PyUnresolvedReferences
@staticmethod
def get_plural(words):
# Dicts are only used in the case of the inventory
if isinstance(words, dict):
plural_words = []
for item in words:
if words[item] > 1:
if item.name[-1] == "h" or item.name[-1] == "H":
plural_words.append('{} {}es'.format(words[item], item.name))
# Some things shouldn't have s at the end, even in plural. Example: golds
elif item.name not in GameMaster.no_s_at_end_exceptions:
plural_words.append('{} {}s'.format(words[item], item.name))
else:
plural_words.append('{} {}'.format(words[item], item.name))
else:
plural_words.append('{} {}'.format(words[item], item.name))
return plural_words
else:
item = words
if isinstance(item, type):
if item.name[-1] == "h" or item.name[-1] == "H":
final_word = ('{}es'.format(item.name))
# Some things shouldn't have s at the end, even in plural. Example: golds is wrong
elif item.name not in GameMaster.no_s_at_end_exceptions:
final_word = ('{}s'.format(item.name))
else:
final_word = ('{}'.format(item.name))
else:
if item[-1] == "h" or item[-1] == "H":
final_word = ('{}es'.format(item))
# Some things shouldn't have s at the end, even in plural. Example: golds
elif item not in GameMaster.no_s_at_end_exceptions:
final_word = ('{}s'.format(item))
else:
final_word = ('{}'.format(item))
return final_word
def unequip(self, slot: str):
if self.current_equips[slot] != Bare:
try_unequip = self.add_item(self.current_equips[slot])
if try_unequip == "bag_full":
return
else:
if slot == "head":
self.current_equips[slot] = Bare.Head
elif slot == "chest":
self.current_equips[slot] = Bare.Chest
elif slot == "legs":
self.current_equips[slot] = Bare.Legs
elif slot == "left hand":
self.current_equips[slot] = Fist
elif slot == "right hand":
self.current_equips[slot] = Fist
else:
error_logger.error("slot at unequip={}".format(slot))
else:
error_logger.error('Unhandled case of trying to unequip bare in the {} slot'.format(slot))
def throw_away(self, item):
# Method for removing an item from the inventory
if item in self.items:
# Checking if the item actually "exists", otherwise deletes it and logs it
if self.items[item] <= 0:
del self.items[item]
error_logger.error("An item of amount {} was found in inventory at throw_away".format(item))
elif self.items[item] == 1:
# If only one of the item exists, check if the player is sure
# If the player does want to throw it away, it does so and informs the player via the action log
confirmation = Console.interactive_choice(['Yes', 'No'],
('Are you sure that you want to throw away the {} ?'.
format(item.name)),
battle=True)
if confirmation == "Yes":
del self.items[item]
GameMaster.extend_action_log("You threw away the {}".format(item.name))
return "all"
elif confirmation == "No":
return
else:
error_logger.error('Unknown case "{}"'.format(confirmation))
else:
amount = Console.interactive_choice(['all', 'specific amount'],
'How many do you want to throw away?',
battle=True, back_want=True)
if amount == 'all':
confirmation = (Console.interactive_choice
(['Yes', 'No'], ('Are you sure that you want to throw away all of the {} ?'.
format(item.name)), battle=True,))
if confirmation == "Yes":
GameMaster.extend_action_log("You threw away all the {}".format(item.name))
del self.items[item]
return 'all'
elif confirmation == "No":
return
else:
error_logger.error('Unknown case "{}"'.format(confirmation))
elif amount == 'specific amount':
if item.name in GameMaster.no_s_at_end_exceptions:
head_string = "How much of the {} do you want to throw away?".format(self.
get_plural(item.name))
else:
head_string = "How many of the {} do you want to throw away?".format(self.
get_plural(item.name))
while True:
while True:
Console.clear()
amount_to_throw_away: int = input(head_string + "\n"
"If you do not want to throw away any, enter 0\n")
if isint(amount_to_throw_away):
amount_to_throw_away = int(amount_to_throw_away)
break
if amount_to_throw_away >= self.items[item]:
GameMaster.extend_action_log("You threw away all the {}".format(item.name))
del self.items[item]
return 'all'
elif amount_to_throw_away <= 0:
return
else:
(GameMaster.extend_action_log("You threw away {} {}"
.format(amount_to_throw_away,
self.get_plural(item.name))))
self.items[item] -= amount_to_throw_away
return
elif item in self.current_equips:
confirmation = Console.interactive_choice(['Yes', 'No'],
('Are you sure that you want to throw away the {} ?'.
format(self.current_equips[item].name)),
battle=True)
# You can't throw away your own body
if confirmation:
if item == "head":
self.current_equips[item] = Bare.Head
elif item == "chest":
self.current_equips[item] = Bare.Chest
elif item == "legs":
self.current_equips[item] = Bare.Legs
elif item == "left hand":
self.current_equips[item] = Fist
elif item == "right hand":
self.current_equips[item] = Fist
else:
error_logger.error('Error trying to unequip unknown type: {}'.format(item))
else:
error_logger.error("Trying to remove the item {}"
" that isn't in the inventory or current equips: {}, {}".format(item, self.items,
self.current_equips))
def add_item(self, item, amount: int = 1):
current_weight = 0
if not len(self.items) == 0:
for thing in self.items:
current_weight = current_weight + (thing.weight * self.items[thing])
if (current_weight + item.weight) <= self.max_spaces:
if item not in self.items:
self.items[item] = amount
else:
self.items[item] += amount
else:
print("Your bag can't fit this item")
return 2
else:
self.items[item] = amount
# Method for equipping an armor
def equip(self, item, hand=""):
# Checking if the item is an armor piece
if hasattr(item, "parent"):
# Checking that it exists
if not self.items[item] <= 0:
# noinspection PyUnresolvedReferences
if self.current_equips[item.set_part].parent == Bare:
self.current_equips[item.set_part] = item
GameMaster.extend_action_log("You equip a {}".format(item.name))
if self.items[item] == 1:
del self.items[item]
else:
self.items[item] -= 1
return 1
else:
error_logger.error("Equip called on non-bare Bare")
return 2
else:
error_logger.error("{} {} found in inventory".format(self.items[item], item.name))
del self.items[item]
elif isinstance(item, Weapon):
# Checking that it exists
if not self.items[item] <= 0:
if hand != "":
if self.current_equips[hand] == Fist:
self.current_equips[hand] = item
GameMaster.extend_action_log("You equip a {}".format(item.name))
if self.items[item] == 1:
del self.items[item]
else:
self.items[item] -= 1
return 1
else:
GameMaster.extend_action_log("Your {} is already using something else".format(hand))
return 1
else:
error_logger.error("Trying to equip weapon {}, {} without hand provided".format(item.name,
item))
else:
error_logger.error("{} {} found in inventory".format(self.items[item], item.name))
del self.items[item]
else:
error_logger.error("trying to equip {}, which is not an armor or weapon".format(item))
def view(self):
# Returns a list of your current items and an informative string that will not be clickable
if len(self.items) != 0:
head_string = "You have:"
else:
head_string = "You have nothing at all, you poor peasant"
# Formatting the items to be grammatically proper
item_list = self.get_plural(self.items)
# Returning the items in the inventory
return head_string, item_list
def view_raw_names(self):
# Returns a list of all the object names in the inventory
item_list = []
for item in self.items:
item_list.append(item)
return item_list
# noinspection PyUnresolvedReferences
def view_equips(self):
# Returns a string with the current equips and the effects of the armor
# This code is messy, i don't want to talk about it
# It works(Probably)
effect_level_head = 1
effect_level_chest = 1
effect_level_legs = 1
if self.current_equips['head'].parent == self.current_equips['chest'].parent:
effect_level_head += 1
effect_level_chest += 1
if self.current_equips['head'].parent == self.current_equips['legs'].parent:
effect_level_head += 1
effect_level_legs += 1
if self.current_equips['chest'].parent == self.current_equips['legs'].parent:
effect_level_legs += 1
effect_level_chest += 1
if effect_level_chest == 3:
effect_level_chest += 2
if effect_level_head == 3:
effect_level_head += 2
if effect_level_legs == 3:
effect_level_legs += 2
head_string = "Equipment bonuses:\n"
armor_effect_descriptions = []
armor_effect_amounts = []
for set_part in self.current_equips:
if set_part == "head":
__, amount, description = (self.current_equips[set_part].parent
.get_set_effect(self, head=True))
elif set_part == "chest":
__, amount, description = (self.current_equips[set_part].parent
.get_set_effect(self, chest=True))
elif set_part == "legs":
__, amount, description = (self.current_equips[set_part].parent
.get_set_effect(self, legs=True))
elif set_part == "left hand" or set_part == "right hand":
pass
else:
error_logger.error("A key dict which does not belong there is in current_equips: {}"
.format(self.current_equips))
description = "Something has gone terribly wrong and will be fixed soon"
amount = 0
# noinspection PyUnboundLocalVariable
armor_effect_descriptions.append(description)
# noinspection PyUnboundLocalVariable
armor_effect_amounts.append(amount)
armor_effect_amounts[0] *= effect_level_head
armor_effect_amounts[1] *= effect_level_chest
armor_effect_amounts[2] *= effect_level_legs
change = False
if armor_effect_descriptions[0] == armor_effect_descriptions[1]:
del armor_effect_descriptions[1]
armor_effect_amounts[0] += armor_effect_amounts[1]
del armor_effect_amounts[1]
change = True
if change:
if armor_effect_descriptions[0] == armor_effect_descriptions[1]:
del armor_effect_descriptions[1]
armor_effect_amounts[0] += armor_effect_amounts[1]
del armor_effect_amounts[1]
else:
if armor_effect_descriptions[0] == armor_effect_descriptions[2]:
del armor_effect_descriptions[2]
armor_effect_amounts[0] += armor_effect_amounts[2]
del armor_effect_amounts[2]
try:
if armor_effect_descriptions[1] == armor_effect_descriptions[2]:
del armor_effect_descriptions[2]
armor_effect_amounts[1] += armor_effect_amounts[2]
del armor_effect_amounts[2]
except IndexError:
pass
armor_effect_descriptions = set(armor_effect_descriptions)
armor_effect_descriptions = list(armor_effect_descriptions)
for armor_effect in armor_effect_descriptions:
head_string = (head_string + armor_effect +
str(abs(armor_effect_amounts[armor_effect_descriptions.index(armor_effect)])) + "\n")
head_string = head_string + "Current equips:"
return ['Head: {}'.format(self.current_equips['head'].name),
'Chest: {}'.format(self.current_equips['chest'].name),
'Legs: {}'.format(self.current_equips['legs'].name),
'Left Hand: {}'.format(self.current_equips['left hand'].name),
'Right Hand: {}'.format(self.current_equips['right hand'].name)], head_string
class Moves:
# A collection of all possible moves
# The general blueprint is:
# does an action based on why made the move (caster)
# creates a string to add to the action log and returns it
def __init__(self, parent):
self.parent = parent
self.unlocked_Moves = {}
self.supported_moves = {
self.calming_heal:
{
'type': 'heal',
},
self.intense_heal:
{
'type': 'heal',
}
}
def add_move(self, new_move):
if new_move in self.supported_moves:
if new_move not in self.unlocked_Moves:
self.unlocked_Moves[new_move] = {}
self.unlocked_Moves[new_move]['type'] = self.supported_moves[new_move]['type']
else:
return 2
else:
error_logger.error("unknown move: {} at add_move".format(new_move.__name__))
def calming_heal(self):
if self.parent.current_mp < 5:
if self.parent == player:
return "You do not have enough mp to use this move"
else:
return
else:
self.parent.current_mp -= 5
amount_healed = int((self.parent.current_hp / 5) + (self.parent.max_hp / 10))
amount_healed = int(amount_healed * ((self.parent.calculate_stat_change(
Stats.intelligence, self.parent.intelligence) / 100) + 1))
self.parent.current_hp += amount_healed
if self.parent.current_hp >= self.parent.max_hp:
self.parent.current_hp = self.parent.max_hp
awareness_bonus = 20
self.parent.awareness += awareness_bonus
if self == player.moves:
if GameMaster.settings['nerd mode']:
return "You heal for {} hp and your awareness increases by {}".format(amount_healed,
awareness_bonus)
else:
return "You heal for {} hp and feel a bit calmer".format(amount_healed)
else:
if self.parent.gender == "male":
gender_pronoun = "his"
else:
gender_pronoun = "her"
if GameMaster.settings['nerd mode']:
return "{} heals {} hp and {} awareness is raised by {}".format(self.parent.first_name,
amount_healed,
gender_pronoun,
awareness_bonus)
else:
return "{} heals for {} hp and becomes calmer".format(self.parent.first_name, amount_healed)
def intense_heal(self):
if self.parent.current_mp < 7:
if self.parent == player:
return "You do not have enough mp to use this move"
else:
return
else:
self.parent.current_mp -= 7
amount_healed = int((self.parent.current_hp / 3) + (self.parent.max_hp / 4))
amount_healed = int(amount_healed * ((self.parent.calculate_stat_change(
Stats.intelligence, self.parent.intelligence) / 100) + 1))
self.parent.current_hp += amount_healed
if self.parent.current_hp >= self.parent.max_hp:
self.parent.current_hp = self.parent.max_hp
if self == player.moves:
return "You heal for {} hp".format(amount_healed)
else:
return "{} enemy heals for {} hp".format(self.parent.first_name, amount_healed)
def apply_status(self, status, duration=0, effect_amount=0):
stat_statuses = ('crit', 'prot', 'intelligence', 'dodge', 'strength', 'charisma')
if status in supported_Statuses:
if status in self.Statuses:
if status == Statuses.stun:
pass
else:
self.Statuses[status]['amount'] += effect_amount
self.Statuses[status]['duration'] += duration
else:
if status == Statuses.stun:
self.Statuses[status] = {}
self.Statuses[status]['duration'] = duration
else:
self.Statuses[status] = {}
self.Statuses[status]['duration'] = duration
self.Statuses[status]['amount'] = effect_amount
elif status in stat_statuses:
if status in self.Statuses:
self.Statuses[status]['duration'] += duration
self.Statuses[status]['amount'] += effect_amount
else:
self.Statuses[status] = {}
self.Statuses[status]['duration'] = duration
self.Statuses[status]['amount'] = effect_amount
else:
error_logger.error("Unknown Effect: {}".format(status))
awareness_levels = {95: "paranoid", 90: "on guard", 80: "alert",
60: "drowsy", 30: "distracted", 20: "panicking"}
speed_levels = {90: "fast as fuck boiii", 80: "fast", 70: "fleet",
40: "tired", 30: "sluggish", 20: "injured"}
crit_levels = {33: 'very likely', 20: 'highly likely', 10: 'likely', 5: 'unlikely', 2: 'very unlikely'}
dodge_levels = {80: 'very likely', 60: 'highly likely', 45: 'likely', 20: 'unlikely', 10: 'very unlikely'}
prot_levels = {80: 'the majority', 60: 'a big part of', 45: 'half', 20: 'a small bit', 10: 'very little'}
def stat_level(self, stat, custom_stat=None):
if stat == "crit":
stat = self.calculate_stat_change(Stats.crit, self.crit)
stat_levels = self.crit_levels
elif stat == "awareness":
stat = self.calculate_stat_change(Stats.awareness, self.awareness)
stat_levels = self.awareness_levels
elif stat == "speed":
stat = self.calculate_stat_change(Stats.speed, self.speed)
stat_levels = self.speed_levels
elif stat == "dodge":
stat = self.calculate_stat_change(Stats.dodge, self.dodge)
stat_levels = self.dodge_levels
elif stat == "prot":
stat = self.calculate_stat_change(Stats.prot, self.prot)
stat_levels = self.prot_levels
else:
error_logger.error("Unknown stat: {}".format(stat))
stat_levels = {0: 'Something failed, please send your logs to the dev'}
stat = 0
if custom_stat is None:
return stat_levels[closest_match(stat, stat_levels)]
else:
return stat_levels[closest_match(custom_stat, stat_levels)]
def deal_damage(self, damage):
self.current_enemy.current_hp -= damage
if self.current_enemy.current_hp <= 0 and self.current_enemy != player:
player.loot_drop()
def inspect(self, target):
if self.gender == "male":
gender_pronoun_1 = "his"
gender_pronoun_2 = "he"
else:
gender_pronoun_1 = "her"
gender_pronoun_2 = "she"
# Checking Whether to add descriptions for ones statuses
# If so, creates a list with all the current statuses's descriptions
if not len(self.Statuses) == 0:
if GameMaster.settings['nerd mode']:
status_string = ""
temp_descritions = []
for status in self.Statuses:
if status in supported_Statuses:
new_status = ""
new_status += "{}: ".format(supported_Statuses[status]['description_nerd'])
try:
if self.Statuses[status]['duration'] > 1:
end = "s"
else:
end = ""
duration = self.Statuses[status]['duration']
except KeyError:
error_logger.error("Status with unknown duration: {}, a {}'s statuses {}"
.format(self.name, self.__class__.__name__, self.Statuses))
duration = "?"
end = "s"
new_status += "{} turn{}".format(duration, end)
else:
new_status = "{} increased by {} for {} turn".format(status.capitalize(),
self.Statuses[status]['amount'],
self.Statuses[status]['duration'])
if self.Statuses[status]['duration'] > 1:
end = "s"
else:
end = ""
new_status += end
temp_descritions.append(new_status)
counter = 0
for status in temp_descritions:
counter += 1
if temp_descritions.index(status) == 0:
status_string += status
else:
if counter >= 3:
try:
_ = temp_descritions[temp_descritions.index(status) + 1]
del _
status_string += ", {}\n".format(status)
counter = 0
except IndexError:
status_string += ", {}".format(status)
counter = 0
else:
if status.endswith("\n"):
status_string += status
else:
status_string += ", {}".format(status)
if self == player:
current_states = "\nCurrent statuses:\n{}".format(status_string)
else:
current_states = "\n{}s statuses:\n{}".format(self.first_name.capitalize(), status_string)
else:
status_descriptions = []
for status in self.Statuses:
if status in supported_Statuses:
status_descriptions.append(supported_Statuses[status]['description'])
else:
new_status = "{} is {} increased by {} for {} turn".format(("your" if self == player else
gender_pronoun_1),
status.capitalize(),
self.Statuses[status]['amount'],
self.Statuses[status]['duration'])
if self.Statuses[status]['duration'] > 1:
end = "s"
else:
end = ""
new_status += end
status_descriptions.append(new_status)
# Creates a pretty string, properly joining the descriptions with " ", "," and "and"
status_string = ""
for status in status_descriptions:
if status_descriptions.index(status) == (len(status_descriptions) - 2):
status_string = status_string + status + " "
elif status_descriptions.index(status) == (len(status_descriptions) - 1):
if not len(status_string) == 0:
status_string = status_string + "and " + status + "."
else:
status_string = status_string + status + "."
else:
status_string = status_string + status + ", "
if self == player:
current_states = "\nYou are {}".format(status_string)
else:
current_states = "\n{} is {}".format(self.first_name.capitalize(), status_string)
else:
# If the enemy is not afflicted, an empty string will be returned to be used
current_states = ""
# Applying buffs and debuffs to the values
temp_speed = self.calculate_stat_change(Stats.speed, self.speed)
temp_awareness = self.calculate_stat_change(Stats.awareness, self.awareness)
temp_strength = self.calculate_stat_change(Stats.strength, self.strength)
temp_intelligence = self.calculate_stat_change(Stats.intelligence, self.intelligence)
temp_dodge = self.calculate_stat_change(Stats.dodge, self.dodge)
temp_prot = self.calculate_stat_change(Stats.prot, self.prot)
temp_crit = self.calculate_stat_change(Stats.crit, self.crit)
temp_hp_regen = self.calculate_stat_change(Stats.hp_regen, self.hp_regen)
temp_mp_regen = self.calculate_stat_change(Stats.mp_regen, self.mp_regen)
temp_stamina_regen = self.calculate_stat_change(Stats.stamina_regen, self.stamina_regen)
temp_charisma = self.calculate_stat_change(Stats.charisma, self.charisma)
# Joining all the string together
# Different depending on if the target is the player or the enemy
if isinstance(target, Player):
if GameMaster.settings['nerd mode']:
# noinspection PyUnresolvedReferences
return ("Level: {}, xp: {}.\n"
"Hp: {}/{}, mp: {}/{}, stamina: {}/{}.\n"
"Hp regen: {}, mp regen: {}, stamina regen: {}.\n"
"Strength: {}, intelligence: {}, crit: {}%.\n"
"Prot: {}%, dodge: {}%, speed: {}, awareness: {}, charisma: {}."
"{}"
.format(self.level, self.xp, self.current_hp, self.max_hp, self.current_mp, self.max_mp,
self.current_stamina, self.max_stamina,
temp_hp_regen, temp_mp_regen, temp_stamina_regen,
temp_strength, temp_intelligence, temp_crit, temp_prot, temp_dodge,
temp_speed, temp_awareness, temp_charisma, current_states))
else:
# noinspection PyUnresolvedReferences
return ("You are level {} and you have {} xp. You have {}/{} hp, {}/{} mp and {}/{} stamina.\n"
"You will regain {} hp, {} mp and {} stamina at the start of your turn.\n"
"Your current strength is {}, your intelligence is {} and your attacks' damage are {} to be "
"doubled.\n"
"You are currently {} and {}.\nYou will block {} of incoming attacks and are {} to dodge them."
"{}"
.format(self.level, self.xp, self.current_hp, self.max_hp, self.current_mp, self.max_mp,
self.current_stamina, self.max_stamina, self.hp_regen, self.mp_regen,
self.stamina_regen, temp_strength, temp_intelligence, self.stat_level(Stats.crit),
self.stat_level(Stats.awareness),
self.stat_level(Stats.speed), self.stat_level(Stats.prot),
self.stat_level(Stats.dodge), current_states))
else:
if GameMaster.settings['nerd mode']:
if self.__class__.__name__[0] in GameMaster.vowels:
prefix = "An"
else:
prefix = "A"
# noinspection PyUnresolvedReferences
return("{} {}, Rank {}.\n"
"Hp: {}/{}, mp: {}/{}, stamina: {}/{}.\n"
"Hp regen: {}, mp regen: {}, stamina regen: {}.\n"
"Strength: {}, intelligence: {}, crit: {}%.\n"
"Prot: {}%, dodge: {}%, speed: {}, awareness: {}, charisma: {}."
"{}"
.format(prefix, self.__class__.__name__, self.rank, self.current_hp, self.max_hp,
self.current_mp, self.max_mp, self.current_stamina, self.max_stamina,
temp_hp_regen, temp_mp_regen, temp_stamina_regen,
temp_strength, temp_intelligence, temp_crit, temp_prot, temp_dodge,
temp_speed, temp_awareness, temp_charisma, current_states))
else:
return ("{}: {}.\n{} has {}/{} hp, {}/{} mp and {}/{} stamina.\n{} will regain {} hp, {} mp and {} "
"stamina at the start of {} turn.\n"
"{} strength is {}, {} intelligence is {} and {} attacks' damage are {} to be doubled.\n"
"{} is currently {} and {}.\n{} will block {} of your attacks and is {} to dodge them.{}"
.format
(self.name, self.description, self.name, self.current_hp, self.max_hp, self.current_mp,
self.max_mp, self.current_stamina, self.max_stamina, gender_pronoun_2.capitalize(),
self.hp_regen, self.mp_regen, self.stamina_regen, gender_pronoun_1,
gender_pronoun_1.capitalize(), temp_strength, gender_pronoun_1, temp_intelligence,
gender_pronoun_1.capitalize(), self.stat_level(Stats.crit),
gender_pronoun_2.capitalize(), self.stat_level(Stats.speed), self.stat_level(Stats.awareness),
gender_pronoun_2.capitalize(), self.stat_level(Stats.prot), self.stat_level(Stats.dodge),
current_states))
class Player(Character):
# noinspection PyMissingConstructor
def __init__(self, name, gender):
self.level = 1
self.xp = 0
super(Player, self).__init__(name, gender, random.randint(20, 80), random.randint(50, 80),
random.randint(5, 10), random.randint(0, 5), random.randint(1, 5),
random.randint(5, 10), random.randint(70, 100), random.randint(25, 30),
random.randint(10, 15), random.randint(10, 15),
1 if random.randint(0, 100) > 80 else 0,
random.randint(1, 3), random.randint(1, 3), random.randint(5, 10))
def loot_drop(self):
print('You successfully defeated {}!'.format(self.current_enemy))
dropped_items = {}
if Gold.rarity >= random.randint(0, 100):
dropped_items[Gold] = random.randint(self.current_enemy.rank * 25, self.current_enemy.rank * 100)
for drop in self.current_enemy.drops: # last
if drop.rarity >= random.randint(0, 100):
dropped_items[drop] = int(drop.rarity * (self.current_enemy.rank * 0.5))
def alive_check(self):
if self.current_hp <= 0:
if GameMaster.last_damage_player != "":
self.dead(GameMaster.last_damage_player)
else:
error_logger.error("Player Took Undocumented Damage")
self.dead()
@staticmethod
def dead(killer=None, custom_text: str = ''):
Console.clear()
if custom_text != '':
print(custom_text)
else:
if killer is not None:
print("You were killed by {}.".format(killer))
else:
print("You died")
time.sleep(5)
main_menu()
class Enemy(Character):
def alive_check(self):
if self.current_hp <= 0:
pass
# todo call to player xp add and loot drop
class Orc(Enemy):
def __init__(self, rank: int, name: str, gender, description: str, dodge: int, speed: int, intelligence: int,
prot: int, crit, awareness: int, *drops, injured: float = False):
self.rank = rank
max_hp = int(player.max_hp * (rank * 0.7)) + (random.randint(-int(player.max_hp * 0.2),
int(player.max_hp * 0.4)))
max_mp = int(player.max_mp * (rank * 0.3)) + (random.randint(-int(player.max_mp * 0.5),
int(player.max_mp * 0.2)))
max_stamina = int(player.max_stamina * (rank * 0.7)) + (random.randint(-int(player.max_stamina * 0.3),
int(player.max_stamina * 0.4)))
hp_regen = round(random.randint(-int(player.hp_regen * (rank * 0.8)),
int(player.hp_regen * (rank * 0.3))))
mp_regen = random.randint(-int(player.mp_regen * (rank * 0.7),
int(player.mp_regen * (rank * 0.3))))
stamina_regen = random.randint(-int(player.stamina_regen * (rank * 0.5)),
int(player.stamina_regen * (rank * 0.3)))
if max_mp < 10:
max_mp = 10
if max_stamina < 10:
max_stamina = 10
if hp_regen < 0:
hp_regen = 0
if stamina_regen < 0:
stamina_regen = 0
if mp_regen < 0:
mp_regen = 0
strength = round(player.max_hp * (rank * 0.1)) + rank * 2
dodge += random.randint(-20, -40)
if dodge < 0:
dodge = 0
prot += random.randint(15, 50)
if prot > 80:
prot = 80
intelligence += random.randint(-20, -70)
if intelligence < 0:
intelligence = 0
charisma = 0
self.drops = []
for item in drops:
self.drops.append(item)
super(Orc, self).__init__(name, gender, dodge, speed, intelligence, prot, crit, charisma, awareness, max_hp,
max_stamina, max_mp, hp_regen, stamina_regen, mp_regen, strength,
description=description)
self.current_hp += random.randint(round(-3 * (self.rank * 0.5)), round(5 * (self.rank * 0.5)))
if self.current_hp > self.max_hp:
self.current_hp = self.max_hp
if injured:
self.current_hp = int(self.current_hp * injured)
resistances = {
Statuses.apply_bleed: 0,
}
class Animal(Enemy):
def __init__(self, rank: int, name: str, gender, description: str, dodge: int, speed: int, intelligence: int,
prot: int, crit, awareness: int, *drops, injured: float = False):
self.rank = rank
max_hp = int(player.current_hp * (rank * 0.4)) + (random.randint(-int(player.max_hp * 0.3),
int(player.max_hp * 0.25)))
max_mp = int(player.max_mp * (rank * 0.2)) + (random.randint(-int(player.max_mp * 0.5),
int(player.max_mp * 0.2)))
max_stamina = int(player.max_stamina * (rank * 0.7)) + (random.randint(-int(player.max_stamina * 0.3),
int(player.max_stamina * 0.4)))
hp_regen = round(random.randint(-int(player.hp_regen * (rank * 0.8)),
int(player.hp_regen * (rank * 0.3))))
mp_regen = random.randint(-int(player.mp_regen * (rank * 0.7)),
int(player.mp_regen * (rank * 0.3)))
stamina_regen = random.randint(-int(player.stamina_regen * (rank * 0.4)),
int(player.stamina_regen * (rank * 0.4)))
if max_mp < 10:
max_mp = 10
if max_stamina < 10:
max_stamina = 10
if hp_regen < 0:
hp_regen = 0
if stamina_regen < 0:
stamina_regen = 0
if mp_regen < 0:
mp_regen = 0
charisma = 0
strength = round(player.max_hp * (rank * 0.1)) + rank * 2
self.drops = []
for item in drops:
self.drops.append(item)
super(Animal, self).__init__(name, gender, dodge, speed, intelligence, prot, crit, charisma, awareness, max_hp,
max_stamina, max_mp, hp_regen, stamina_regen, mp_regen,
strength, description=description)
self.current_hp += random.randint(round(-3 * (self.rank * 0.5)), round(5 * (self.rank * 0.5)))
if self.current_hp > self.max_hp:
self.current_hp = self.max_hp
if injured:
self.current_hp = int(self.current_hp * injured)
resistances = {
Statuses.apply_bleed: 0,
}
class Human(Enemy):
def __init__(self, rank: int, name: str, gender, description: str, dodge: int, speed: int, intelligence: int,
prot: int, crit, awareness: int, *drops, injured: float = False):
self.rank = rank
max_hp = int(player.current_hp * (rank * 0.5)) + (random.randint(-int(player.max_hp * 0.3),
int(player.max_hp * 0.3)))
max_mp = int(player.max_mp * (rank * 0.7)) + (random.randint(-int(player.max_mp * 0.3),
int(player.max_mp * 0.5)))
max_stamina = int(player.max_stamina * (rank * 0.5)) + (random.randint(-int(player.max_stamina * 0.2),
int(player.max_stamina * 0.4)))
hp_regen = round(random.randint(-int(player.hp_regen * (rank * 0.8)),
int(player.hp_regen * (rank * 0.3))))
mp_regen = random.randint(-int(player.mp_regen * (rank * 0.2),
int(player.mp_regen * (rank * 0.6))))
stamina_regen = random.randint(-int(player.stamina_regen * (rank * 0.5)),
int(player.stamina_regen * (rank * 0.4)))
if max_mp < 10:
max_mp = 10
if max_stamina < 0:
max_stamina = 0
if hp_regen < 0:
hp_regen = 0
if stamina_regen < 0:
stamina_regen = 0
if mp_regen < 0:
mp_regen = 0
strength = round(player.max_hp * (rank * 0.1)) + rank * 2
dodge += random.randint(-10, 25)
if dodge < 0:
dodge = 0
prot += random.randint(10, -10)
if prot > 80:
prot = 80
intelligence += random.randint(round(rank * 5), round(rank * 20))
if intelligence < 0:
intelligence = 0
charisma = player.charisma + 10
if charisma > 100:
charisma = 100
self.drops = []
for item in drops:
self.drops.append(item)
super(Human, self).__init__(name, gender, dodge, speed, intelligence, prot, crit, charisma, awareness, max_hp,
max_stamina, max_mp, hp_regen, stamina_regen, mp_regen,
strength, description=description)
self.current_hp += random.randint(round(-3 * (self.rank * 0.5)), round(5 * (self.rank * 0.5)))
if self.current_hp > self.max_hp:
self.current_hp = self.max_hp
if injured:
self.current_hp = int(self.current_hp * injured)
resistances = {
Statuses.apply_bleed: 0,
}
class Skeleton(Enemy):
def __init__(self, rank: int, name: str, gender, description: str, dodge: int, speed: int, intelligence: int,
prot: int, crit, awareness: int, *drops, injured: float = False):
self.rank = rank
max_hp = int(player.max_hp * (rank * 0.3)) + (random.randint(-int(player.max_hp * 0.3),
int(player.max_hp * 0.15)))
max_mp = int(player.max_mp * (rank * 0.2)) + (random.randint(-int(player.max_mp * 0.5),
int(player.max_mp * 0.2)))
max_stamina = int(player.max_stamina * (rank * 1.5)) + (random.randint(-int(player.max_stamina * 0.3),
int(player.max_stamina * 0.6)))
hp_regen = round(random.randint(-int(player.hp_regen * (rank * 0.8)),
int(player.hp_regen * (rank * 0.3))))
mp_regen = random.randint(-int(player.mp_regen * (rank * 0.7),
int(player.mp_regen * (rank * 0.3))))
stamina_regen = random.randint(-int(player.stamina_regen * (rank * 0.2),
int(player.stamina_regen * (rank * 0.6))))
if max_mp < 10:
max_mp = 10
if max_stamina < 10:
max_stamina = 10
if hp_regen <= 0:
hp_regen = 0
if stamina_regen < 0:
stamina_regen = 0
if mp_regen < 0:
mp_regen = 0
strength = round(player.max_hp * (rank * 0.1)) + rank * 2
dodge += random.randint(10, 25)
if dodge < 0:
dodge = 0
intelligence -= random.randint(round(rank * 3), round(rank * 10))
prot += random.randint(-10, -50)
if prot > 80:
prot = 80
charisma = 0
self.drops = []
for item in drops:
self.drops.append(item)
super(Skeleton, self).__init__(name, gender, dodge, speed, intelligence, prot, crit, charisma, awareness,
max_hp, max_stamina, max_mp, hp_regen, stamina_regen, mp_regen,
strength, description=description)
self.current_hp += random.randint(round(-3 * (self.rank * 0.5)), round(5 * (self.rank * 0.5)))
if self.current_hp > self.max_hp:
self.current_hp = self.max_hp
if injured:
self.current_hp = int(self.current_hp * injured)
resistances = {
Statuses.apply_bleed: 100,
Statuses.stun: 100
}
def main_menu():
while True:
# The name which will be shown during the startup sequence and a calculation to ensure it will be centered
game_name = GameMaster.game_name
console_spaces_center_name = 60 - int(len(game_name) / 2)
# Dramatic sort of startup animation
# This is possible by having for loops counting down determine the game name's location and clearing the console
# in each new iteration
for i in range(5, -1, -1):
Console.clear()
print("\n" * i + " " * console_spaces_center_name + game_name)
time.sleep(0.2)
for i in range(console_spaces_center_name, -1, -1):
Console.clear()
print(" " * i + game_name)
time.sleep(0.03)
time.sleep(0.2)
Console.clear()
print("Welcome to {}".format(game_name))
time.sleep(2)
def combat(enemy): # todo location
# Sets both combatant's current enemy to the others
# This is used in a couple of places for example to determine the loot offered to the player
player.current_enemy, enemy.current_enemy = enemy, player
print("{} approaches!".format(enemy.name))
GameMaster.turn: int = 1
first_turn: bool = True
def player_turn():
player.current_hp += player.hp_regen
if player.current_hp > player.max_hp:
player.current_hp = player.max_hp
player.current_mp += player.mp_regen
if player.current_mp > player.max_mp:
player.current_mp = player.max_mp
player.current_stamina += player.stamina_regen
if player.current_stamina > player.max_stamina:
player.current_stamina = player.max_stamina
print("player")
for status in list(player.Statuses):
player.Statuses[status]['duration'] -= 1
if player.Statuses[status]['duration'] <= 0:
del player.Statuses[status]
for status in list(player.Statuses):
if status in supported_Statuses:
if supported_Statuses[status]['apply_type'] == "start_dot":
damage = status(player)
GameMaster.extend_action_log("{} {} damage.".format
(supported_Statuses[status]['on_apply_message_player'], damage))
GameMaster.last_damage_player = supported_Statuses[status]['type']
player.current_hp -= damage
player.alive_check()
def main_choice():
def execute_move(move_type):
available_moves = []
for ability in player.moves.unlocked_Moves:
if player.moves.unlocked_Moves[ability]['type'] == move_type:
available_moves.append(ability)
if len(available_moves) == 0:
Console.interactive_choice(["back"],
'You Do not Have any {} Moves Yet. Please Try Something Else'.
format(move_type), battle=True)
else:
pretty_moves = []
for move in available_moves:
pretty_string_split = move.__name__.split("_")
pretty_string_joined = " ".join(pretty_string_split)
pretty_moves.append(pretty_string_joined)
move: str = Console.interactive_choice(pretty_moves,
"Click on the move you want to use\nAvailable Moves:",
back_want=True, battle=True)
if move is not None:
move_result: str = available_moves[pretty_moves.index(move)]()
else:
move_result = None
if move_result is not None:
GameMaster.extend_action_log(move_result)
return True
Console.clear()
supported_head_moves = [ColoredString('{}defend{}'.format(colorama.Style.DIM, colorama.Style.NORMAL),
colored_chars=(len(colorama.Style.DIM) + len(colorama.Style.NORMAL))),
ColoredString('{}heal'.format(colorama.Fore.LIGHTYELLOW_EX),
colored_chars=len(colorama.Fore.LIGHTYELLOW_EX)),
ColoredString('{}attack'.format(colorama.Fore.LIGHTRED_EX),
colored_chars=len(colorama.Fore.LIGHTRED_EX)),
ColoredString('{}debuff'.format(colorama.Fore.YELLOW),
colored_chars=len(colorama.Fore.YELLOW)),
ColoredString('{}buff'.format(colorama.Fore.LIGHTCYAN_EX),
colored_chars=len(colorama.Fore.LIGHTCYAN_EX)),
'use item(upcoming)', 'inspect', 'help', 'view and edit your inventory', 'settings']
while True:
action = Console.interactive_choice(supported_head_moves,
ColoredString("{}What do you want to do?".format
(colorama.Style.BRIGHT),
colored_chars=(len(colorama.Style.BRIGHT))),
battle=True, enumerated=True)
# The equivalent of defend
if action == 0:
end_player_choice = execute_move("defend")
if end_player_choice:
break
# The equivalent of heal
elif action == 1:
end_player_choice = execute_move("heal")
if end_player_choice:
break
# The equivalent of attack
elif action == 2:
end_player_choice = execute_move("attack")
if end_player_choice:
break
# The equivalent of debuff
elif action == 3:
end_player_choice = execute_move("debuff")
if end_player_choice:
break
# The equivalent of buff
elif action == 4:
end_player_choice = execute_move("buff")
if end_player_choice:
break
# The equivalent of use item
elif action == 5:
end_player_choice = execute_move("use item")
if end_player_choice:
break
# The equivalent of inspect
elif action == 6:
while True:
inspectable_objects = ['yourself', '{}'.format(player.current_enemy.name)]
to_inspect = Console.interactive_choice(inspectable_objects, ('Which one of these do you '
'want to inspect?'),
battle=True, back_want=True)
if to_inspect == "yourself":
Console.interactive_choice(["I'm done"], player.inspect(player),
battle=True)
elif to_inspect == "{}".format(player.current_enemy.name):
Console.interactive_choice(["I'm done"], player.current_enemy.inspect(enemy),
battle=True)
# Back selected
elif to_inspect is None:
break
else:
error_logger.error("Unknown case at inspect: {}".format(to_inspect))
# The equivalent of help
if action == 7:
# Different categories you have to traverse through to reach desired information
# This is to make sure there are never more than 30 options at the screen at once
# If more than 30 options are present at once, it would "break" the console layout
help_options = {
'moves': {
'healing moves': {
'calming heal': "Restores health equal to the total of 20% of the caster's current hp"
" and 10% of the caster's maximum hp\n"
"It also increases the caster's awareness by one level",
'intense heal': "Restores health equal to the total of 33% of the caster's current hp"
" and 25% of the caster's maximum hp"
},
'damaging moves': {
},
},
'statuses': {
'buffs': {
'': '',
},
'debuffs': {
'weak': "Halves the damage of the afflicted's incoming attacks",
'bleeding': "At the start of the afflicted's turn, deals damage equal to 15% of "
"the afflicted's maximum hp",
'frozen': "Halves the afflicted's speed",
'on fire': "At the start of the afflicted's turn, deals damage equal to 10% of"
"the afflicted's turn\nIt also makes the afflicted panic",
},
'curses': {
'': "",
}
},
'sword enchantments': {
},
'general glossary': {
},
}
# Creates a menu which makes it possible to view the different levels of the above dictionary
# The while loops create the functionality to only go back one level
while True:
help_with: str = Console.interactive_choice(list(help_options.keys()),
'What sort of thing do you want to know more '
'about?',
back_want=True, battle=True)
if help_with is None:
break
while True:
subcategory: str = Console.interactive_choice(list(help_options[help_with].keys()),
'Which one of these categories '
'do you want to know more about?',
back_want=True, battle=True)
if subcategory is None:
break
while True:
final_type: str = Console.interactive_choice(list(help_options[help_with]
[subcategory].keys()),
'Which one of these do you want to know '
' more about?',
battle=True, back_want=True)
if final_type is None:
break
Console.interactive_choice(["back"], help_options[help_with][subcategory]
[final_type], battle=True)
# The equivalent of view and edit your inventory
elif action == 8:
# The while loop hierarchy makes it so that you don't go back to the main menu from clicking back
while True:
# Asking the player for what part of their inventory they want to view
case_inventory = Console.interactive_choice(['Current equips', 'Items'],
'what part of your inventory do you want to view?',
back_want=True, battle=True)
# If the player selects back
if case_inventory is None:
break
elif case_inventory == "Current equips":
while True:
# Getting the current equips
case_list, head_string = player.inventory.view_equips()
# Asking what equip they want to view
# We will be using a text changing depending on the equip as the case
# Therefore, we will enumerate the cases
numbered_case = Console.interactive_choice(case_list, head_string,
back_want=True, battle=True,
enumerated=True)
# noinspection PyUnresolvedReferences
def handle_slot(slot: str, joke_text: str = ''):
armor_slots = ('head', 'chest', 'legs')
hand_slots = ('left hand', 'right hand')
if slot in armor_slots:
equipment_header_str = (player.inventory.current_equips
[slot].parent.get_set_part_description
(player.inventory.current_equips[slot], player))
elif slot in hand_slots:
equipment_header_str = (player.inventory.current_equips[slot].inspect())
else:
error_logger.error("Unknown slot type at handle_slot: {}".format(slot))
equipment_header_str = 'Sorry, something failed miserably and it has been noted'
slot_actions = []
if not player.inventory.current_equips[slot] in GameMaster.Bare_set:
slot_actions.append('Throw away')
if not player.inventory.current_equips[slot] == Fist:
slot_actions.append('Unequip')
decision = Console.interactive_choice(slot_actions,
equipment_header_str,
battle=True, back_want=True)
if decision == "Unequip":
if not player.inventory.current_equips[slot] in GameMaster.Bare_set:
player.inventory.unequip(slot)
else:
player.dead(None, custom_text=joke_text)
elif decision == "Throw away":
player.inventory.throw_away(slot)
# The equivalent of head
if numbered_case == 0:
if player.inventory.current_equips['head'] == Bare.Head:
handle_slot('head', joke_text='You dismember your own head and die immediately')
else:
handle_slot('head')
# The equivalent of chest
elif numbered_case == 1:
if player.inventory.current_equips['chest'] == Bare.Chest:
handle_slot('chest',
joke_text='How do you even manage to dismember your whole torso?!')
else:
handle_slot('chest')
# The equivalent of legs
elif numbered_case == 2:
if player.inventory.current_equips['legs'] == Bare.Legs:
handle_slot('legs',
joke_text='You dismember your legs and slowly die from blood loss')
else:
handle_slot('legs')
# The equivalent of left hand
elif numbered_case == 3:
handle_slot('left hand')
# The equivalent of right hand
elif numbered_case == 4:
handle_slot('right hand')
# Back
elif numbered_case is None:
break
elif case_inventory == "Items":
while True:
head_string, inventory_items = player.inventory.view()
raw_inventory_items = player.inventory.view_raw_names()
item_to_inspect: str = Console.interactive_choice(inventory_items, head_string,
battle=True, back_want=True)
# Going back if desired
if item_to_inspect is None:
break
# Removing integer amounts and whitespace from the string so that it can be used
try:
description = (raw_inventory_items
[inventory_items.index(item_to_inspect)]
.parent.inspect(raw_inventory_items
[inventory_items.index(item_to_inspect)]))
except (AttributeError, ValueError):
if item_to_inspect is not None:
if hasattr(raw_inventory_items[inventory_items.index(item_to_inspect)],
'parent'):
description = (raw_inventory_items[inventory_items.index(item_to_inspect)].
parent.inspect())
else:
description = (raw_inventory_items[inventory_items.index(item_to_inspect)].
inspect())
else:
description = "Something went wrong under Inventory -> items"
error_logger.error('item_to_inspect changed')
# Making a list of the possible ways to interact with the item
item_actions = ["throw away"]
# If the item is some sort of armor, you can equip it
if (hasattr(raw_inventory_items[inventory_items.index(item_to_inspect)], "parent") or
isinstance(raw_inventory_items[inventory_items.index(item_to_inspect)],
Weapon)):
item_actions.append("equip")
while True:
item_interaction = Console.interactive_choice(item_actions, description,
battle=True, back_want=True)
if item_interaction is None:
break
elif item_interaction == "throw away":
thrown_away = player.inventory.throw_away(raw_inventory_items
[inventory_items.index
(item_to_inspect)])
if thrown_away == "all":
break
elif item_interaction == "equip":
if isinstance(raw_inventory_items[inventory_items.index(item_to_inspect)],
Weapon):
hand = Console.interactive_choice(['left hand', 'right hand'],
'With which hand do you want to use this '
'item with?', back_want=True, battle=True)
if hand is None:
hand = ""
else:
hand = ""
result = player.inventory.equip(raw_inventory_items
[inventory_items.index
(item_to_inspect)], hand=hand)
if result == 1:
break
else:
error_logger.error("Unexpected result: {}".format(result))
else:
error_logger.error("Unknown item interaction: {}".format(item_interaction))
# The equivalent of settings
elif action == 9:
def save_settings():
project_path = os.path.abspath("")
with open("{}\\Saves\\Config\\Config.json".format(project_path), 'w') as f:
json.dump(GameMaster.settings, f)
while True:
setting_list = []
for key in GameMaster.settings:
if key == "ForceV2":
if GameMaster.settings[key] is None:
display_key = False
else:
display_key = True
else:
display_key = True
if display_key:
setting_list.append("{}: {}".format(key, '{}On{}'.format(colorama.Fore.GREEN,
colorama.Style.RESET_ALL)
if GameMaster.settings[key] else
'{}Off{}'.format(colorama.Fore.RED, colorama.Style.RESET_ALL)))
setting_to_be_changed = Console.interactive_choice(setting_list,
"Click on any of these to change them\n"
"Disabling Quickedit makes the game sort of "
"unplayable",
battle=True, back_want=True, enumerated=True)
# Going back case
if setting_to_be_changed is None:
break
# Nerd mode case
elif setting_to_be_changed == 0:
if not GameMaster.settings['nerd mode']:
GameMaster.settings['nerd mode'] = True
elif GameMaster.settings['nerd mode']:
GameMaster.settings['nerd mode'] = False
else:
(error_logger.error("Illegal case in settings nerd mode:{}"
.format(GameMaster.settings['nerd mode'])))
save_settings()
# Quickedit case
elif setting_to_be_changed == 1:
if not GameMaster.settings['Quickedit']:
new_setting = 1
safe_registry_edit = True
GameMaster.settings['Quickedit'] = new_setting
elif GameMaster.settings['Quickedit']:
new_setting = 0
safe_registry_edit = True
GameMaster.settings['Quickedit'] = new_setting
else:
(error_logger.error("Illegal case in settings nerd mode:{}"
.format(GameMaster.settings['Quickedit'])))
safe_registry_edit = False
new_setting = 0
if safe_registry_edit:
try:
path = "Console\\%SystemRoot%_py.exe"
key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, path, 0, winreg.KEY_WRITE)
winreg.SetValueEx(key, "Quickedit", 0, winreg.REG_DWORD, new_setting)
winreg.CloseKey(key)
except WindowsError:
path = "Console"
key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, path, 0, winreg.KEY_WRITE)
winreg.SetValueEx(key, "Quickedit", 0, winreg.REG_DWORD, new_setting)
winreg.CloseKey(key)
save_settings()
# ForceV2 case
elif setting_to_be_changed == 2:
if not GameMaster.settings['ForceV2']:
new_setting = 1
safe_registry_edit = True
GameMaster.settings['ForceV2'] = new_setting
elif GameMaster.settings['ForceV2']:
new_setting = 0
safe_registry_edit = True
GameMaster.settings['ForceV2'] = new_setting
else:
(error_logger.error("Illegal case in settings ForceV2:{}"
.format(GameMaster.settings['Quickedit'])))
safe_registry_edit = False
new_setting = 0
if safe_registry_edit:
path = "Console"
key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, path, 0, winreg.KEY_WRITE)
winreg.SetValueEx(key, "ForceV2", 0, winreg.REG_DWORD, new_setting)
winreg.CloseKey(key)
save_settings()
else:
error_logger.error("Unknown case in settings to edit: {}".format(setting_to_be_changed))
# Calls the player's main choice
# The call is way back here because the code it depends on need to be declared
main_choice()
def enemy_turn():
enemy.current_hp += enemy.hp_regen
if enemy.current_hp > enemy.max_hp:
enemy.current_hp = enemy.max_hp
enemy.current_mp += enemy.mp_regen
if enemy.current_mp > enemy.max_mp:
enemy.current_mp = enemy.max_mp
enemy.current_stamina += enemy.stamina_regen
if enemy.current_stamina > enemy.max_stamina:
enemy.current_stamina = enemy.max_stamina
enemy.deal_damage(enemy.strength)
result = enemy.Moves.calming_heal(enemy.moves)
if result is not None:
GameMaster.extend_action_log(result)
print("enemy")
print("\n")
time.sleep(1)
while True:
if first_turn:
if (player.awareness + random.randint(0, 100)) >= (
player.current_enemy.awareness + random.randint(0, 100)):
player_first = True
else:
player_first = False
first_turn = False
else:
GameMaster.turn += 1
temp_player_speed = player.calculate_stat_change(Stats.speed, player.speed)
temp_enemy_speed = enemy.calculate_stat_change(Stats.speed, enemy.speed)
if random.randint(random.randint(int((temp_player_speed / 3)), (temp_player_speed - 10)),
temp_player_speed * 2) >= \
random.randint(random.randint(int((temp_enemy_speed / 3)), (temp_enemy_speed - 10)),
temp_enemy_speed * 2):
player_first = True
else:
player_first = False
if player_first:
if Statuses.stun in player.Statuses:
GameMaster.extend_action_log(supported_Statuses[Statuses.stun]['on_apply_message_player'])
player.Statuses[Statuses.stun]['duration'] -= 1
if player.Statuses[Statuses.stun]['duration'] <= 0:
del player.Statuses[Statuses.stun]
else:
player_turn()
if Statuses.stun in player.current_enemy.Statuses:
GameMaster.extend_action_log(supported_Statuses[Statuses.stun]
['on_apply_message_enemy'].format(enemy.name))
enemy.Statuses[Statuses.stun]['duration'] -= 1
if enemy.Statuses[Statuses.stun]['duration'] <= 0:
del enemy.Statuses[Statuses.stun]
else:
enemy_turn()
else:
if Statuses.stun in player.current_enemy.Statuses:
GameMaster.extend_action_log(supported_Statuses[Statuses.stun]
['on_apply_message_enemy'].format(enemy.name))
enemy.Statuses[Statuses.stun]['duration'] -= 1
if enemy.Statuses[Statuses.stun]['duration'] <= 0:
del enemy.Statuses[Statuses.stun]
else:
enemy_turn()
if Statuses.stun in player.Statuses:
GameMaster.extend_action_log(supported_Statuses[Statuses.stun]['on_apply_message_player'])
player.Statuses[Statuses.stun]['duration'] -= 1
if player.Statuses[Statuses.stun]['duration'] <= 0:
del player.Statuses[Statuses.stun]
else:
player_turn()
def on_start():
# Defining this projects path
project_path = os.path.dirname(sys.argv[0])
# Setting the name of the game
GameMaster.game_name = "Temporary placeholder for a game name, please change later"
Console.size_reset()
try:
os.mkdir("{}\\Saves".format(project_path))
except FileExistsError:
pass
try:
os.mkdir("{}\\Logs".format(project_path))
except FileExistsError:
pass
try:
os.mkdir("{}\\Saves\\Player saves".format(project_path))
except FileExistsError:
pass
try:
os.mkdir("{}\\Saves\\Config".format(project_path))
except FileExistsError:
pass
# A function for creating loggers
def setup_logger(name, file, level=logging.WARNING):
# Function to easily create loggers
handler = logging.FileHandler(file)
formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(filename)s:%(lineno)d:%(message)s")
handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(level)
logger.addHandler(handler)
return logger
# Info logger
info_log = setup_logger("Info logging", "{}\\Logs\\logging_info.log".format(project_path), level=logging.INFO)
# Error logger
error_log = setup_logger("Error logging", "{}\\Logs\\logging_errors.log".format(project_path))
# Debug logger
debug_log = setup_logger('Debug Logging', "{}\\Logs\\debug_log.log".format(project_path), level=logging.DEBUG)
# A json structure to be filled with info about things
setup = '''
{
"os": null,
"font_size_x": null,
"font_size_y": null
}
'''
setup = json.loads(setup)
# The versions i am supporting
# This is basically information about the fallback size of the console, used in interactive choices
accepted_operating_systems = ('Windows-8', 'Windows-10', 'Windows-8.1')
supported_os = True
os_version = platform.platform(terse=True)
if os_version in accepted_operating_systems:
setup['os'] = os_version
else:
setup['os'] = os_version
supported_os = False
error_log.error("Unsupported os: {}. Falling back to windows 10 settings".format(os))
# Getting the console's font size
# Done using the win32 api
ctypes.windll.kernel32.SetConsoleTitleW(GameMaster.game_name)
error = ctypes.windll.kernel32.GetLastError()
if error:
hwnd = win32gui.GetForegroundWindow()
win32gui.SetWindowText(hwnd, GameMaster.game_name)
class COORD(ctypes.Structure):
_fields_ = [("X", ctypes.c_short), ("Y", ctypes.c_short)]
# noinspection PyPep8Naming
class CONSOLE_FONT_INFO(ctypes.Structure):
_fields_ = [("nFont", ctypes.c_uint32),
("dwFontSize", COORD)]
font = CONSOLE_FONT_INFO()
# noinspection PyPep8Naming
STD_OUTPUT_HANDLE = -11
handle = ctypes.windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
ctypes.windll.kernel32.GetCurrentConsoleFont(
handle,
ctypes.c_long(False),
ctypes.pointer(font))
setup['font_size_x'] = font.dwFontSize.X
setup['font_size_y'] = font.dwFontSize.Y
if font.dwFontSize.X == 0 and font.dwFontSize.Y == 0:
error_log.error("Get font size error: msdn error {}".format(ctypes.windll.kernel32.GetLastError()))
# Asserting that all audio files exist
audio_path = "{}\\Audio\\".format(project_path)
audio_files = ("abc_123_a.ogg",)
missing_audio_files = []
for audio_name in audio_files:
if not os.path.isfile(audio_path + audio_name):
GameMaster.missing_audio.append(audio_name)
missing_audio_files.append(audio_path)
if not len(missing_audio_files) == 0:
missing_files_str = ", ".join(missing_audio_files)
missing_files_str = "Missing auido:" + missing_files_str
error_log.error(missing_files_str)
# We only want to meddle with the registry if we know what we are dealing with
if supported_os:
# Reading the user's initial values set for the console in case they want to reverse it later
py_exe_installed = False
try:
registry_key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, "Console\\%SystemRoot%_py.exe", 0,
winreg.KEY_READ)
quickedit_py_exe, __ = winreg.QueryValueEx(registry_key, "Quickedit")
winreg.CloseKey(registry_key)
py_exe_installed = True
except WindowsError:
pass
finally:
try:
registry_key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, "Console", 0,
winreg.KEY_READ)
quickedit_cmd, __ = winreg.QueryValueEx(registry_key, "Quickedit")
winreg.CloseKey(registry_key)
except WindowsError:
quickedit_cmd = 0
try:
registry_key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, "Console", 0,
winreg.KEY_READ)
legacy, __ = winreg.QueryValueEx(registry_key, "ForceV2")
winreg.CloseKey(registry_key)
except WindowsError:
legacy = None
# Setting registry values of the console for an optimized experience
# If the option to enable legacy console exists, we want do that
if legacy is not None:
try:
path = "Console"
key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, path, 0, winreg.KEY_WRITE)
winreg.SetValueEx(key, "ForceV2", 0, winreg.REG_DWORD, 0)
winreg.CloseKey(key)
except WindowsError:
pass
# Disabling quickedit
if py_exe_installed:
try:
path = "Console\\%SystemRoot%_py.exe"
key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, path, 0, winreg.KEY_WRITE)
winreg.SetValueEx(key, "Quickedit", 0, winreg.REG_DWORD, 0)
winreg.CloseKey(key)
except WindowsError:
pass
finally:
path = "Console"
key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, path, 0, winreg.KEY_WRITE)
winreg.SetValueEx(key, "Quickedit", 0, winreg.REG_DWORD, 0)
winreg.CloseKey(key)
else:
quickedit_cmd = None
quickedit_py_exe = None
legacy = None
try:
# noinspection PyUnboundLocalVariable
temp = quickedit_py_exe
del temp
except NameError:
quickedit_py_exe = 0
if quickedit_cmd or quickedit_py_exe:
os.system("start {}\\Scripts\\Restart_game.pyw".format(project_path))
if quickedit_cmd or quickedit_py_exe:
raise SystemExit
# Dumping the json settings
try:
with open("{}\\Saves\\Config\\Config.json".format(project_path), 'x') as _:
pass
except FileExistsError:
pass
if os.stat("{}\\Saves\\Config\\Config.json".format(project_path)).st_size == 0:
with open("{}\\Saves\\Config\\Config.json".format(project_path), 'w') as f:
settings = '''
{
"nerd mode": false,
"Quickedit": null,
"ForceV2": null
}
'''
settings = json.loads(settings)
settings['Quickedit'] = quickedit_cmd
settings['ForceV2'] = legacy
json.dump(settings, f)
GameMaster.settings = settings
else:
with open("{}\\Saves\\Config\\Config.json".format(project_path), 'r') as f_1:
test_content = f_1.readlines()
if test_content[0] == "\n" and len(test_content) == 1:
with open("{}\\Saves\\Config\\Config.json".format(project_path), 'w') as f_2:
settings = '''
{
"nerd mode": false,
"Quickedit": null,
"ForceV2": null
}
'''
settings = json.loads(settings)
settings['Quickedit'] = quickedit_cmd
settings['ForceV2'] = legacy
json.dump(settings, f_2)
GameMaster.settings = settings
# Settings up some info depending on the windows version used
with open("{}\\Saves\\Config\\Config.json".format(project_path)) as f:
try:
GameMaster.settings = json.load(f)
except json.JSONDecodeError as json_error:
error_log.error("JsonDecodeError: {}".format(json_error))
settings = '''
{
"nerd mode": false,
"Quickedit": null,
"ForceV2": null
}
'''
settings = json.loads(settings)
settings['Quickedit'] = quickedit_cmd
settings['ForceV2'] = legacy
GameMaster.settings = settings
if len(GameMaster.settings) == 0:
settings = '''
{
"nerd mode": false,
"Quickedit": null,
"ForceV2": null
}
'''
settings = json.loads(settings)
settings['Quickedit'] = quickedit_cmd
settings['ForceV2'] = legacy
GameMaster.settings = settings
try:
with open("{}\\Saves\\Config\\Setup.json".format(project_path), 'x') as _:
pass
except FileExistsError:
pass
with open("{}\\Saves\\Config\\Setup.json".format(project_path), 'w') as f:
json.dump(setup, f)
# Settings up some info depending on the windows version used
with open("{}\\Saves\\Config\\Setup.json".format(project_path)) as f:
try:
config = json.load(f)
os_version = config['os']
except json.JSONDecodeError as json_error:
error_log.error("JsonDecodeError: {}".format(json_error))
os_version = "Windows-10"
config = setup
# Setting information about the font size and the width of the console border
if len(sys.argv) != 1:
if sys.argv[1] == "debug":
GameMaster.font_size_x = 7 if not config['font_size_x'] else config['font_size_x']
GameMaster.font_size_y = 12 if not config['font_size_y'] else config['font_size_y']
GameMaster.x_to_console = 9
GameMaster.y_to_console = 32
else:
if os_version == 'Windows-8.1' or os_version == 'Windows-8':
GameMaster.font_size_x = 8 if not config['font_size_x'] else config['font_size_x']
GameMaster.font_size_y = 12 if not config['font_size_y'] else config['font_size_y']
GameMaster.x_to_console = 9
GameMaster.y_to_console = 32
elif os_version == 'Windows-10':
GameMaster.font_size_x = 8 if not config['font_size_x'] else config['font_size_x']
GameMaster.font_size_y = 16 if not config['font_size_y'] else config['font_size_y']
GameMaster.x_to_console = 1
GameMaster.y_to_console = 30
else:
# If the user is using an os i'm not yet supporting
error_log.warning("Unsupported os:{}".format(os_version))
# Will default to windows 10 settings
GameMaster.font_size_x = 8
GameMaster.font_size_y = 16
GameMaster.x_to_console = 1
GameMaster.y_to_console = 30
return error_log, info_log, debug_log
if __name__ == '__main__':
# Initiating everything
player = Player('Tester', 'male')
player.inventory = player.Inventory(player)
player.moves = player.Moves(player)
GameMaster = GameMaster()
# Initiating colorama so that we can color console output
colorama.init()
# Setting some variables to be used during runtime
# Along with setting up some loggers
error_logger, info_logger, debug_logger = on_start()
# Debug
hen = Animal(1, 'Gullbert the hen', 'male', 'A hen', 15, 40, 2, 0, 3, 20)
hen.inventory = hen.Inventory(hen)
hen.moves = hen.Moves(hen)
player.moves.add_move(player.moves.calming_heal)
player.moves.add_move(player.moves.intense_heal)
hen.apply_status(Statuses.stun, 10)
hen.apply_status(Statuses.apply_bleed, 10)
player.apply_status(Statuses.apply_bleed, 10)
player.apply_status("crit", 9, 100)
player.inventory.add_item(Gold, 10)
combat(hen)
| 2.953125
| 3
|
zeTorch/data.py
|
piovere/zeTorch
| 0
|
12774955
|
"""Contains class for data files. Maybe should eventually sublass Spectrum?
"""
import numpy as np
class Data(object):
"""DOCSTRING
"""
def __init__(self, file=None):
"""DOCSTRING
"""
self._file = file
def load(self, file=None):
if file is None and self._file is None:
raise FileNotFoundError("You must provide a filename")
if file is None:
file = self._file
self._data = np.loadtxt(file, skiprows=14)
@property
def data(self):
return self._data
@property
def wavelengths(self):
return self._data[:,0]
@property
def intensity(self, )
| 2.671875
| 3
|
Chapter8/src/discriminator.py
|
AI-Nerd/Generative-Adversarial-Networks-Cookbook
| 98
|
12774956
|
#!/usr/bin/env python3
import sys
import numpy as np
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, BatchNormalization
from keras.layers.convolutional import Conv3D, Deconv3D
from keras.layers.core import Activation
from keras.layers.advanced_activations import LeakyReLU
from keras.models import Sequential, Model
from keras.optimizers import Adam
from keras.utils import plot_model
class Discriminator(object):
def __init__(self, side=16):
self.INPUT_SHAPE = (side,side,side,3)
self.OPTIMIZER = Adam(lr=0.000001, beta_1=0.5)
self.Discriminator = self.model()
self.Discriminator.compile(loss='binary_crossentropy', optimizer=self.OPTIMIZER, metrics=['accuracy'] )
# self.save_model()
self.summary()
def block(self,first_layer,filter_size=512,kernel_size=(3,3,3)):
x = Conv3D(filters=filter_size, kernel_size=kernel_size, kernel_initializer='glorot_normal',
bias_initializer='zeros', padding='same')(first_layer)
x = BatchNormalization()(x)
x = LeakyReLU(0.2)(x)
return x
def model(self):
input_layer = Input(shape=self.INPUT_SHAPE)
x = self.block(input_layer,filter_size=8)
x = self.block(x,filter_size=16,)
x = self.block(x,filter_size=32)
x = self.block(x,filter_size=64)
x = Conv3D(filters=1, kernel_size=(3,3,3),
strides=(1,1,1), kernel_initializer='glorot_normal',
bias_initializer='zeros', padding='valid')(x)
x = BatchNormalization()(x)
x = Flatten()(x)
output_layer = Dense(1, activation='sigmoid')(x)
model = Model(inputs=input_layer, outputs=output_layer)
return model
def summary(self):
return self.Discriminator.summary()
def save_model(self):
plot_model(self.Discriminator.model, to_file='/data/Discriminator_Model.png')
| 2.53125
| 3
|
pybenford/benford.py
|
pierrepo/pybenford
| 1
|
12774957
|
<gh_stars>1-10
"""Module to verify Benford's law on observed data."""
import math
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import distributions, power_divergence
np.random.seed(2021) # Random seed
def get_theoretical_freq_benford(nb_digit=1, base=10):
"""Theoretical proportions of Benford's law.
Function to return the theoretical proportion of the first
significant digits.
Parameters
¯¯¯¯¯¯¯¯¯¯
nb_digit : int
Number of first digits to consider. Default is `1`.
base : int
Mathematical bassis. Default is `10`.
Returns
¯¯¯¯¯¯¯
p_benford : array
Theoretical proportion of the first digits considered.
"""
digit = (base ** nb_digit) - (base ** (nb_digit - 1))
p_benford = np.zeros(digit, dtype=float)
for i in range(digit):
p_benford[i] = (math.log((1 + (1 / (i + (base ** (nb_digit - 1))))),
base))
return p_benford
def count_first_digit(numbers, nb_digit=1):
"""Distribution of the first digits in base 10 of observed data.
Function to return the observed distribution of the first digits
in base 10 of an observed data set. This function removes numbers
less than 1.
Parameters
¯¯¯¯¯¯¯¯¯¯
numbers : array of numbers
Integer array.
nb_digit : int
Number of first significant digits.
Returns
¯¯¯¯¯¯¯
digit_distrib : array
Distribution of the first digits in base 10.
"""
size_array = (10 ** nb_digit) - (10 ** (nb_digit - 1))
# array size return
digit_distrib = np.zeros(size_array, dtype=int)
for number in numbers:
number = abs(number)
if type(number) == float:
if number <= 9e-5:
number = str(number)
i = 0
nb_string = ""
while number[i] != 'e':
nb_string += number[i]
i += 1
number = nb_string
number = str(number)
number = number.replace(".", "")
number = number.strip("0") # remove not-significant 0.
if int(number) >= (10 ** (nb_digit - 1)):
number = str(number)
first = int(number[0:nb_digit])
digit_distrib[first - (10 ** (nb_digit - 1))] += 1
# nb_delet = (1 - (sum(digit_distrib)/len(numbers))) * 100
# print(f" Warning : {nb_delet:.2f}% of numbers remove")
return digit_distrib
def normalize_first_digit(array):
"""Normalize observed distribution of the first significant digits.
Function normalizing an array by the sum of the array values.
Parameters
¯¯¯¯¯¯¯¯¯¯
array: array of int
Array of observed data.
Returns
¯¯¯¯¯¯¯
array: array of float
Array of observed data normalized.
"""
array = array / sum(array)
return array
def build_hist_freq_ben(freq_obs, freq_theo, nb_digit, title="",
xlab="First digit", ylab="Proportion",
legend="", name_save="", size=(6, 4)):
"""Histogram of observed proportion and theoretical proportion.
Function realizing the histogram of observed proportions and adding
the theoretical proportion of Benford.
Parameters
¯¯¯¯¯¯¯¯¯¯
freq_obs : array
Array of observed frequency.
freq_theo : array
Array of theoritical frequency.
nb_digit : int
Number of first significant digits.
title : string, optinal
Title of histogram.
xlab: string, optinal
Label of x-axis. Default is `"First digit"`.
ylab: string, optional
Label of y-axis. Default is `"Proportion"`.
legend: string, optional
Label of the legend for the theoretical frequency.
name_save: string, optional
Name of the image to save in .png format,
if you want to save it.
size: tuple of 2 int, optional
Plot size. Default is `(6, 4)`.
Returns
¯¯¯¯¯¯¯
Histogram.
"""
plt.figure(figsize=size)
plt.plot(range(1, len(freq_theo)+1), freq_theo, marker="o",
color="red")
plt.bar(range(1, len(freq_obs)+1), freq_obs)
lab = []
for i in range((10 ** (nb_digit-1)), (10 ** nb_digit)):
lab.append(str(i))
plt.xticks(ticks=range(1, len(freq_theo)+1), labels=lab)
plt.title(label=title)
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.legend(labels=("Benford's law", legend))
if name_save != "":
plt.savefig(f"{name_save}.png", transparent=True)
def calculate_pom(data_obs):
"""Physical order of magnitude.
Function of calulated physical order of magnitude in a dataset.
Parameters
¯¯¯¯¯¯¯¯¯¯
data_obs: array of int
Interger array of observed dataset.
Returns
¯¯¯¯¯¯¯
pom : float
Physical order of magnitude in data_obs.
Notes
¯¯¯¯¯
Benford’s Law Applications for Forensic Accounting, Auditing, and
Fraud Detection. MARK <NAME>, B.COM (HONS), MBA, PHD. 2012 by
<NAME> & Sons, Inc. ISBN 978-1-118-15285-0
"""
pom = max(data_obs) / min(data_obs)
print(f"POM : {pom}")
return pom
def calculate_oom(data_obs):
"""Order of magnitude.
Function of calculated order of magnitude in a dataset.
Parameters
¯¯¯¯¯¯¯¯¯¯
data_obs: array of int
Interger array of observed dataset.
Returns
¯¯¯¯¯¯¯
pom : float
Order of magnitude in data_obs.
Notes
¯¯¯¯¯
Benford’s Law Applications for Forensic Accounting, Auditing, and
Fraud Detection. MARK J. NIGRINI, B.COM (HONS), MBA, PHD. 2012 by
John Wiley & Sons, Inc. ISBN 978-1-118-15285-0
"""
oom = math.log(calculate_pom(data_obs), 10)
print(f"OOM : {oom}")
return oom
def calculate_ssd(f_obs, f_theo):
"""Sum of squares deviation.
Function of calculated sum of squares deviation between a observed
proportion and a theoretical proportion.
Parameters
¯¯¯¯¯¯¯¯¯¯
f_obs : array of float
Float array of observed proportion.
Proportion is between 0 and 1.
f_theo : array of float
Float array of theoretical proportion.
Proportion is between 0 and 1.
returns
¯¯¯¯¯¯¯
sdd : float
sum of squares deviation
Notes
-----
The orginal formula uses percentage. We transforme proportion
to percentage for the calculation.
Benford’s Law Applications for Forensic Accounting, Auditing, and
Fraud Detection. MARK J. NIGRINI, B.COM (HONS), MBA, PHD. 2012 by
John Wiley & Sons, Inc. ISBN 978-1-118-15285-0
"""
if len(f_theo) != len(f_obs):
return -1
sdd = sum((100*f_obs - 100*f_theo)**2)
print(f"SDD : {sdd}")
return sdd
def calculate_rmssd(f_obs, f_theo):
"""Root mean sum of squares deviation.
Function of calculated root mean sum of squares deviation between
a observed proportion and a theoretical proportion.
Parameters
¯¯¯¯¯¯¯¯¯¯
f_obs : array of float
Float array of observed proportion.
f_theo : array of float
Float array of theoretical proportion.
returns
¯¯¯¯¯¯¯
rmssd : float
root mean sum of squares deviation
"""
if len(f_theo) != len(f_obs):
return -1
rmssd = math.sqrt(calculate_ssd(f_obs, f_theo) / len(f_theo))
print(f"RMSSD : {rmssd}")
return rmssd
def calculate_dist_hellinger(f_obs, f_theo):
"""Hellinger distance.
Function of calculated Hellinger distance between a observed
proportion and a theoretical proportion
Parameters
¯¯¯¯¯¯¯¯¯¯
f_obs : array of float
Float array of observed proportion.
f_theo : array of float
Float array of theoretical proportion.
returns
¯¯¯¯¯¯¯
dist_h : float
Hellinger distance
Notes
¯¯¯¯¯
https://en.wikipedia.org/wiki/Hellinger_distance
Benford’s law and geographical information – the example of
OpenStreetMap. Mocnik, Franz-Benjamin. 2021/04/07, International
Journal of Geographical Information Science.
https://doi.org/10.1080/13658816.2020.1829627
"""
if len(f_theo) != len(f_obs):
return -1
dist_h = math.sqrt(0.5 * (sum(np.sqrt(f_obs) - np.sqrt(f_theo)) ** 2))
print(f"Hellinger distance : {dist_h}")
return dist_h
def calculate_dist_k_and_l(f_obs, f_theo):
"""Kullback & Leibler distance.
Function of calculated Kullback & Leibler distance between a
observed proportion and a theoretical proportion
Parameters
¯¯¯¯¯¯¯¯¯¯
f_obs : array of float
Float array of observed proportion.
f_theo : array of float
Float array of theoretical proportion.
returns
¯¯¯¯¯¯¯
dist_kl : float
Kullback & Leibler distance
Notes
¯¯¯¯¯
https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence
Benford’s law and geographical information – the example of
OpenStreetMap. Mocnik, Franz-Benjamin. 2021/04/07, International
Journal of Geographical Information Science.
https://doi.org/10.1080/13658816.2020.1829627
"""
if len(f_theo) != len(f_obs):
return -1
dist_kl = sum(f_obs * np.log10(f_obs/f_theo))
print(f"Kullback & Leibler distance : {dist_kl}")
return dist_kl
def chi2_test(data_obs, f_theo, nb_digit=1):
"""Chisquare test for Benford law.
Function performing a chisquare test of compliance to Benford law.
Parameters
¯¯¯¯¯¯¯¯¯¯
data_obs : array of int
Interger array of observed dataset.
f_theo : array of float
Float array of theoretical frequency.
nb_digit : int
Number of first siginficant digits. Default is `1`.
Returns
¯¯¯¯¯¯¯
chi2 : float
statistics of chisquare test.
p_avl : float
p-value of chi2.
"""
d_theo = np.array(f_theo * len(data_obs))
d_obs = count_first_digit(data_obs, nb_digit)
chi2, p_val = power_divergence(f_obs=d_obs, f_exp=d_theo, lambda_=1)
print(f"statistics : {chi2} ; p-value : {p_val}")
return chi2, p_val
def g_test(data_obs, f_theo, nb_digit=1):
"""G-test for Benford law.
Function performing a G-test of compliance to Benford law.
Parameters
¯¯¯¯¯¯¯¯¯
data_obs : array of int
Interger array of observed dataset.
f_theo : array of float
Float array of theoretical frequency.
nb_digit : int
Number of first siginficant digits. Default is `1`.
Returns
¯¯¯¯¯¯¯
g : float
statistics of G-test.
p_avl : float
p-value of chi2.
"""
d_theo = np.array(f_theo * len(data_obs))
d_obs = count_first_digit(data_obs, nb_digit)
print(d_obs)
print(d_theo)
g_stat, p_val = power_divergence(f_obs=d_obs, f_exp=d_theo, lambda_=0)
print(f"statistics : {g_stat} ; p-value : {p_val}")
return g_stat, p_val
def calculate_bootstrap_chi2(data_obs, f_theo, nb_digit, nb_val=1000,
nb_loop=1000, type_test=1):
"""Average of calculated chi2 and asociate p_value.
Function to calculate average chi2 in the function bootstrap_chi2.
parameters
¯¯¯¯¯¯¯¯¯¯
data_obs : array of int
Integer array of observed dataset.
f_theo : array of float-80.72309844128006
Float array of theoretical frequency.
nb_digit: int
Number of first significant digits. Default is `1`.
nb_val : int, optinal
Sample size. Default is `1000`.
nb_loop : int, optional
number of "bootstrap" procedure is performed.
Default is `1000`.
type_test: string or int, optional
statistical test type performed. Default is `1`.
String Value test type
"pearson" 1 Chisquare-test.
"log-likelihood" 0 G-test.
Returns
¯¯¯¯¯¯¯
mean_chi2: float
Chi2 average of "bootstrap".
p_val
p-value of mean_chi2.
nb_signif: int
number of significant statistical tests in the "bootstrap"
"""
sum_chi2 = np.zeros(nb_loop, dtype=float)
d_theo = np.array(f_theo * nb_val)
for i in range(nb_loop):
ech = np.random.choice(data_obs, size=nb_val, replace=False)
d_obs = count_first_digit(ech, nb_digit)
result = power_divergence(f_obs=d_obs, f_exp=d_theo,
lambda_=type_test)
sum_chi2[i] = result[0]
mean_chi2 = sum(sum_chi2) / nb_loop
k = len(f_theo+1)
p_val = distributions.chi2.sf(mean_chi2, k - 1)
print(f"statistics : {mean_chi2} ; p-value : {p_val}")
return mean_chi2, p_val
if __name__ == "__main__":
print("\nThis is benford module. This module contains functions to"
" analyze a data set according to Benford's law.\n")
| 3.625
| 4
|
losses/l2/L2.py
|
harshikaninawe/Machine-Learning-concepts
| 10
|
12774958
|
import numpy as np
def L2Loss(y_predicted, y_ground_truth, reduction="None"):
"""returns l2 loss between two arrays
:param y_predicted: array of predicted values
:type y_predicted: ndarray
:param y_ground_truth: array of ground truth values
:type y_ground_truth: ndarray
:param reduction: reduction mode, defaults to "mean"
:type reduction: str, optional
:return: l2-loss
:rtype: scalar if reduction is sum or mean, else ndarray
"""
# Calculate the difference array
difference = y_predicted - y_ground_truth
# Raise every difference value to the power of 2
squared_difference = np.multiply(difference, difference)
# L2 distance is the reduced form of the squared difference array
if reduction == "sum":
# Reduction can be done by summing up all the values in the difference array (this is known as "L2-Loss")
l2_distance = np.sum(squared_difference)
return l2_distance
elif reduction == "mean":
# Reduction can also be done by taking the mean (this is known as "Mean Squared Error")
mean_squared_error = np.mean(squared_difference)
return mean_squared_error
elif reduction == "None":
return squared_difference
else:
print('ValueError: reduction should be "sum" / "mean" / "None"')
def main():
print("Initializing predicted and ground truth arrays:\n")
print('(NOTE: Enter the values in a space-separated format. Ex: "5.36 1.02 2.03")')
y_predicted = [
float(item) for item in input("Enter the predicted values: ").split()
]
y_ground_truth = [
float(item)
for item in input("Enter the corresponding ground truth values: ").split()
]
assert len(y_predicted) == len(
y_ground_truth
), "Number of predicted values {} and ground truth {} values should match".format(
len(y_predicted), len(y_ground_truth)
)
y_predicted = np.array(y_predicted)
y_ground_truth = np.array(y_ground_truth)
reduction = str(input('Enter the reduction mode: "sum" / "mean" / "None": '))
loss = L2Loss(y_predicted, y_ground_truth, reduction=reduction)
print("L2-Loss with {}-reduction: {}".format(reduction, loss))
if __name__ == "__main__":
main()
| 4.15625
| 4
|
pyfos/utils/extension/gigabitethernet_speed_set.py
|
sandeepv451/Pyfostest
| 0
|
12774959
|
#!/usr/bin/env python3
# Copyright © 2018 Broadcom. All Rights Reserved. The term “Broadcom” refers to
# Broadcom Inc. and/or its subsidiaries.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may also obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`gigabitethernet_speed_set` - PyFOS util to set GE port speed.
*******************************************************************************
The :mod:`gigabitethernet_speed_set` Util is used to set speed of GE port.
This module is a stand-alone script that can be used to set the
switch GE port speed on an extension platform.
gigabitethernet_speed_set.py: Usage
* Infrastructure options:
* -i,--ipaddr=IPADDR: IP address of FOS switch.
* -L,--login=LOGIN: Login name.
* -P,--password=PASSWORD: Password.
* -f,--vfid=VFID: VFID to which the request is directed.
* -s,--secured=MODE: HTTPS mode "self" or "CA"[Optional].
* -v,--verbose: Verbose mode[Optional].
* Util scripts options:
* -n,--name=NAME: Set name.
* --speed=VALUE: Set speed.
* Outputs:
* Python dictionary content with RESTCONF response data.
.. function:: gigabitethernet_speed_set.set_port_speed(session,\
name, speed)
*Modify extension gigabitethernet speed*
Example usage of the method::
ret = gigabitethernet_speed_set.set_port_speed(session,
name, speed)
print (ret)
Details::
gigabitethernet = gigabitethernet()
gigabitethernet.set_name(name)
gigabitethernet.set_speed(speed)
result = gigabitethernet.patch(session)
* Inputs:
:param session: Session returned by login.
:param name: Gigabitethernet port name expressed as slot/port.
:param speed: Speed for the GE port to be set.
* Outputs:
:rtype: Dictionary of return status matching rest response.
*Use cases*
Modify extension gigabitethernet port speed to 1G or 10G.
"""
import pyfos.pyfos_auth as pyfos_auth
import pyfos.pyfos_util as pyfos_util
from pyfos.pyfos_brocade_gigabitethernet import gigabitethernet
import sys
import pyfos.utils.brcd_util as brcd_util
isHttps = "0"
def _set_port_speed(session, rest_obj):
result = rest_obj.patch(session)
return (result)
def set_port_speed(session, name, speed):
geObject = gigabitethernet()
geObject.set_name(name)
geObject.set_speed(speed)
result = _set_port_speed(session, geObject)
return (result)
def validate(geObject):
if geObject.peek_name() is None or \
geObject.peek_speed() is None:
return 1
return 0
def main(argv):
# myinputs = "-h -i 10.17.3.70 --name 4/17 --speed 10000000000"
# myinputs = "-h -i 10.17.3.70 --speed 1000000000 -n 4/17"
# myinputs = "--name 4/17 --speed 1000000000"
# myinputs = "-i 10.17.3.70 --name 4/17"
# argv = myinputs.split()
filters = ['name', 'speed']
inputs = brcd_util.parse(argv, gigabitethernet, filters,
validate)
session = brcd_util.getsession(inputs)
result = _set_port_speed(inputs['session'], inputs['utilobject'])
pyfos_util.response_print(result)
pyfos_auth.logout(session)
if __name__ == "__main__":
main(sys.argv[1:])
| 2.03125
| 2
|
src/testjson.py
|
stormfish-scientific/sensorstreamer-data-viz-quickstart
| 1
|
12774960
|
<filename>src/testjson.py<gh_stars>1-10
import json
import argparse
from pprint import pprint
parser = argparse.ArgumentParser('test json file')
parser.add_argument('json_file')
args = parser.parse_args()
with open(args.json_file, 'r') as jf:
filedata = jf.read()
data = json.loads(filedata)
pprint(data[0])
print(len(data))
| 3.15625
| 3
|
src/opendr/perception/object_detection_2d/nms/utils/nms_dataset.py
|
daoran/opendr
| 0
|
12774961
|
# Copyright 2020-2022 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from opendr.engine.datasets import Dataset
from opendr.engine.data import Image
from opendr.perception.object_detection_2d.datasets.transforms import BoundingBoxListToNumpyArray
from opendr.engine.constants import OPENDR_SERVER_URL
from pycocotools.coco import COCO
import os
from urllib.request import urlretrieve
import ssl
import time
from zipfile import ZipFile
import tarfile
import pickle
import numpy as np
import math
from tqdm import tqdm
import gc
class Dataset_NMS(Dataset):
def __init__(self, path=None, dataset_name=None, split=None, use_ssd=True, device='cuda'):
super().__init__()
available_dataset = ['COCO', 'PETS', 'TEST_MODULE']
self.dataset_sets = {'train': None,
'val': None,
'test': None}
if dataset_name not in available_dataset:
except_str = 'Unsupported dataset: ' + dataset_name + '. Currently available are:'
for j in range(len(available_dataset)):
except_str = except_str + ' \'' + available_dataset[j] + '\''
if j < len(available_dataset) - 1:
except_str = except_str + ','
except_str = except_str + '.'
raise ValueError(except_str)
ssl._create_default_https_context = ssl._create_unverified_context
self.dataset_name = dataset_name
self.split = split
# self.__prepare_dataset()
self.path = os.path.join(path, dataset_name)
self.src_data = []
if self.dataset_name == "PETS":
self.detector = 'JPD'
self.detector_type = 'default'
if use_ssd:
self.detector = 'SSD'
self.detector_type = 'custom'
self.dataset_sets['train'] = 'train'
self.dataset_sets['val'] = 'val'
self.dataset_sets['test'] = 'test'
if self.dataset_sets[self.split] is None:
raise ValueError(self.split + ' split is not available...')
if not os.path.exists(os.path.join(self.path, 'images/S1/L1')):
self.download(
'http://ftp.cs.rdg.ac.uk/pub/PETS2009/Crowd_PETS09_dataset/a_data/Crowd_PETS09/S1_L1.tar.bz2',
download_path=os.path.join(self.path, 'images'), file_format="tar.bz2", create_dir=True)
if not os.path.exists(os.path.join(self.path, 'images/S1/L2')):
self.download(
'http://ftp.cs.rdg.ac.uk/pub/PETS2009/Crowd_PETS09_dataset/a_data/Crowd_PETS09/S1_L2.tar.bz2',
download_path=os.path.join(self.path, 'images'), file_format="tar.bz2", create_dir=True)
if not os.path.exists(os.path.join(self.path, 'images/S2/L1')):
self.download(
'http://ftp.cs.rdg.ac.uk/pub/PETS2009/Crowd_PETS09_dataset/a_data/Crowd_PETS09/S2_L1.tar.bz2',
download_path=os.path.join(self.path, 'images'), file_format="tar.bz2", create_dir=True)
if not os.path.exists(os.path.join(self.path, 'images/S2/L2')):
self.download(
'http://ftp.cs.rdg.ac.uk/pub/PETS2009/Crowd_PETS09_dataset/a_data/Crowd_PETS09/S2_L2.tar.bz2',
download_path=os.path.join(self.path, 'images'), file_format="tar.bz2", create_dir=True)
if not os.path.exists(os.path.join(self.path, 'images/S2/L3')):
self.download(
'http://ftp.cs.rdg.ac.uk/pub/PETS2009/Crowd_PETS09_dataset/a_data/Crowd_PETS09/S2_L3.tar.bz2',
download_path=os.path.join(self.path, 'images'), file_format="tar.bz2", create_dir=True)
if not os.path.exists(os.path.join(self.path, 'images/S3/Multiple_Flow')):
self.download(
'http://ftp.cs.rdg.ac.uk/pub/PETS2009/Crowd_PETS09_dataset/a_data/Crowd_PETS09/S3_MF.tar.bz2',
download_path=os.path.join(self.path, 'images'), file_format="tar.bz2", create_dir=True)
if not os.path.exists(
os.path.join(self.path, 'annotations', 'pets_' + self.dataset_sets[self.split] + '.json')):
self.download('http://datasets.d2.mpi-inf.mpg.de/hosang17cvpr/PETS_annotations_json.zip',
download_path=os.path.join(self.path, 'annotations'), file_format="zip",
create_dir=True)
pkl_filename = os.path.join(self.path,
'data_' + self.detector + '_' + self.dataset_sets[self.split] + '_pets.pkl')
if not os.path.exists(pkl_filename):
ssd = None
if use_ssd:
from opendr.perception.object_detection_2d.ssd.ssd_learner import SingleShotDetectorLearner
ssd = SingleShotDetectorLearner(device=device)
ssd.download(".", mode="pretrained")
ssd.load("./ssd_default_person", verbose=True)
if not os.path.exists(
os.path.join(self.path, 'detections',
'PETS-' + self.dataset_sets[self.split] + '_siyudpm_dets.idl')):
self.download('http://datasets.d2.mpi-inf.mpg.de/hosang17cvpr/PETS_detections.zip',
download_path=os.path.join(self.path, 'detections'), file_format="zip",
create_dir=True)
if not os.path.exists(
os.path.join(self.path, 'annotations', 'PETS-' + self.dataset_sets[self.split] + '.idl')):
self.download('http://datasets.d2.mpi-inf.mpg.de/hosang17cvpr/PETS_annotations.zip',
download_path=os.path.join(self.path, 'annotations'), file_format="zip",
create_dir=True)
with open(os.path.join(self.path, 'annotations',
'PETS-' + self.dataset_sets[self.split] + '.idl')) as fp_gt:
fp_dt = None
if self.detector_type == 'default':
fp_dt = open(os.path.join(self.path, 'detections',
'PETS-' + self.dataset_sets[self.split] + '_siyudpm_dets.idl'))
print('Preparing PETS ' + self.dataset_sets[self.split] + ' set...')
current_id = 0
number_samples = 1696
if self.split == 'val':
current_id = 1696
number_samples = 240
elif self.split == 'test':
current_id = 1936
number_samples = 436
pbarDesc = "Overall progress"
pbar = tqdm(desc=pbarDesc, total=number_samples)
if self.detector_type == 'default':
line_dt = fp_dt.readline()
line_gt = fp_gt.readline()
while line_gt:
remove_strings = ['PETS09-', '\"', ':', '(', ')', ',', '', ';']
data_gt = line_gt.replace(':', ' ')
for j in range(len(remove_strings)):
data_gt = data_gt.replace(remove_strings[j], '')
data_gt = data_gt.split()
filename_gt = data_gt[0][0:2] + '/' + data_gt[0][2:]
if filename_gt[0:6] == 'S2/L1/':
filename_gt = filename_gt.replace('img/00', 'Time_12-34/View_001/frame_')
num = int(filename_gt[-8:-4]) - 1
filename_gt = filename_gt[:-8] + str(num).zfill(4) + '.jpg'
if filename_gt[0:6] == 'S2/L2/':
filename_gt = filename_gt.replace('img/00', 'Time_14-55/View_001/frame_')
num = int(filename_gt[-8:-4]) - 1
filename_gt = filename_gt[:-8] + str(num).zfill(4) + '.jpg'
if filename_gt[0:2] == 'S3':
filename_gt = filename_gt.replace('_MF', 'Multiple_Flow')
if self.detector_type == 'default':
data_dt = line_dt.replace(':', ' ')
for j in range(len(remove_strings)):
data_dt = data_dt.replace(remove_strings[j], '')
data_dt = data_dt.split()
filename_dt = data_dt[0][0:2] + '/' + data_dt[0][2:]
if filename_dt[0:6] == 'S2/L1/':
filename_dt = filename_dt.replace('img/00', 'Time_12-34/View_001/frame_')
num = int(filename_dt[-8:-4]) - 1
filename_dt = filename_dt[:-8] + str(num).zfill(4) + '.jpg'
if filename_dt[0:6] == 'S2/L2/':
filename_dt = filename_dt.replace('img/00', 'Time_14-55/View_001/frame_')
num = int(filename_dt[-8:-4]) - 1
filename_dt = filename_dt[:-8] + str(num).zfill(4) + '.jpg'
if filename_dt[0:2] == 'S3':
filename_dt = filename_dt.replace('_MF', 'Multiple_Flow')
if filename_gt != filename_dt:
raise ValueError('Errors in files...')
img = Image.open(os.path.join(self.path, 'images/', filename_gt))
dt_boxes = []
if self.detector_type == 'default':
for i in range(1, (len(data_dt)), 5):
dt_box = np.array((float(data_dt[i]), float(data_dt[i + 1]), float(data_dt[i + 2]),
float(data_dt[i + 3]), 1 / (1 + math.exp(- float(data_dt[i + 4])))))
dt_boxes.append(dt_box)
else:
bboxes_list = ssd.infer(img, threshold=0.0, custom_nms=None, nms_thresh=0.975,
nms_topk=6000, post_nms=6000)
bboxes_list = BoundingBoxListToNumpyArray()(bboxes_list)
bboxes_list = bboxes_list[bboxes_list[:, 4] > 0.015]
bboxes_list = bboxes_list[np.argsort(bboxes_list[:, 4]), :][::-1]
bboxes_list = bboxes_list[:5000, :]
for b in range(len(bboxes_list)):
dt_boxes.append(np.array([bboxes_list[b, 0], bboxes_list[b, 1], bboxes_list[b, 2],
bboxes_list[b, 3], bboxes_list[b, 4][0]]))
gt_boxes = []
for i in range(1, (len(data_gt)), 5):
gt_box = np.array((float(data_gt[i]), float(data_gt[i + 1]), float(data_gt[i + 2]),
float(data_gt[i + 3])))
gt_boxes.append(gt_box)
self.src_data.append({
'id': current_id,
'filename': os.path.join('images', filename_gt),
'resolution': img.opencv().shape[0:2][::-1],
'gt_boxes': [np.asarray([]), np.asarray(gt_boxes)],
'dt_boxes': [np.asarray([]), np.asarray(dt_boxes)]
})
current_id = current_id + 1
pbar.update(1)
if self.detector_type == 'default':
line_dt = fp_dt.readline()
line_gt = fp_gt.readline()
pbar.close()
if self.detector_type == 'default':
fp_dt.close()
elif self.detector == 'SSD':
del ssd
gc.collect()
with open(pkl_filename, 'wb') as handle:
pickle.dump(self.src_data, handle, protocol=pickle.DEFAULT_PROTOCOL)
else:
with open(pkl_filename, 'rb') as fp_pkl:
self.src_data = pickle.load(fp_pkl)
self.classes = ['background', 'human']
self.class_ids = [-1, 1]
self.annotation_file = 'pets_' + self.dataset_sets[self.split] + '.json'
elif self.dataset_name == "COCO":
self.dataset_sets['train'] = 'train'
self.dataset_sets['val'] = 'minival'
self.dataset_sets['test'] = 'valminusminival'
if self.dataset_sets[self.split] is None:
raise ValueError(self.split + ' split is not available...')
elif self.dataset_sets[self.split] == 'train':
imgs_split = 'train2014'
else:
imgs_split = 'val2014'
self.detector = 'FRCN'
self.detector_type = 'default'
ssd = None
if use_ssd:
self.detector = 'SSD'
self.detector_type = 'custom'
from opendr.perception.object_detection_2d.ssd.ssd_learner import SingleShotDetectorLearner
ssd = SingleShotDetectorLearner(device=device)
ssd.download(".", mode="pretrained")
ssd.load("./ssd_default_person", verbose=True)
if not os.path.exists(os.path.join(self.path, imgs_split)):
self.download('http://images.cocodataset.org/zips/' + imgs_split + '.zip',
download_path=os.path.join(self.path), file_format="zip",
create_dir=True)
pkl_filename = os.path.join(self.path, 'data_' + self.detector + '_' +
self.dataset_sets[self.split] + '_coco.pkl')
if not os.path.exists(pkl_filename):
if not os.path.exists(os.path.join(self.path, 'annotations', 'instances_' +
self.dataset_sets[self.split] +
'2014.json')):
if self.dataset_sets[self.split] == 'train':
ann_url = 'http://images.cocodataset.org/annotations/annotations_trainval2014.zip'
self.download(ann_url, download_path=os.path.join(self.path), file_format="zip",
create_dir=True)
else:
if self.dataset_sets[self.split] == 'minival':
ann_url = 'https://dl.dropboxusercontent.com/s/o43o90bna78omob/' \
'instances_minival2014.json.zip?dl=0'
else:
ann_url = 'https://dl.dropboxusercontent.com/s/s3tw5zcg7395368/' \
'instances_valminusminival2014.json.zip?dl=0'
self.download(ann_url, download_path=os.path.join(self.path, 'annotations'), file_format="zip",
create_dir=True)
if not os.path.exists(os.path.join(self.path, 'detections', 'coco_2014_' +
self.dataset_sets[self.split] +
'_FRCN_train.pkl')):
self.download('http://datasets.d2.mpi-inf.mpg.de/hosang17cvpr/coco_2014_FRCN.tar.gz',
download_path=os.path.join(self.path, 'detections'), file_format='tar.gz',
create_dir=True)
with open(os.path.join(self.path, 'detections',
'coco_2014_' + self.dataset_sets[self.split] + '_FRCN_train.pkl'), 'rb') as f:
dets_default = pickle.load(f, encoding='latin1')
annots = COCO(annotation_file=os.path.join(self.path, 'annotations', 'instances_' +
self.dataset_sets[self.split] + '2014.json'))
pbarDesc = "Overall progress"
pbar = tqdm(desc=pbarDesc, total=len(dets_default[1]))
for i in range(len(dets_default[1])):
dt_boxes = []
img_info = annots.loadImgs([dets_default[1][i]])[0]
img = Image.open(os.path.join(self.path, imgs_split, img_info["file_name"]))
if self.detector_type == 'default':
dt_boxes = dets_default[0][1][i]
elif self.detector == 'SSD':
bboxes_list = ssd.infer(img, threshold=0.0, custom_nms=None, nms_thresh=0.975,
nms_topk=6000, post_nms=6000)
bboxes_list = BoundingBoxListToNumpyArray()(bboxes_list)
if bboxes_list.shape[0] > 0:
bboxes_list = bboxes_list[bboxes_list[:, 4] > 0.015]
if bboxes_list.shape[0] > 0:
bboxes_list = bboxes_list[np.argsort(bboxes_list[:, 4]), :][::-1]
bboxes_list = bboxes_list[:5000, :]
for b in range(len(bboxes_list)):
dt_boxes.append(np.array([bboxes_list[b, 0], bboxes_list[b, 1], bboxes_list[b, 2],
bboxes_list[b, 3], bboxes_list[b, 4][0]]))
dt_boxes = np.asarray(dt_boxes)
annots_in_frame = annots.loadAnns(
annots.getAnnIds(imgIds=[dets_default[1][i]], catIds=[1], iscrowd=False))
gt_boxes = []
for j in range(len(annots_in_frame)):
gt_boxes.append(annots_in_frame[j]['bbox'])
gt_boxes = np.asarray(np.asarray(gt_boxes))
if gt_boxes.shape[0] > 0:
gt_boxes[:, 2] = gt_boxes[:, 0] + gt_boxes[:, 2]
gt_boxes[:, 3] = gt_boxes[:, 1] + gt_boxes[:, 3]
self.src_data.append({
'id': dets_default[1][i],
'filename': os.path.join(imgs_split, img_info["file_name"]),
'resolution': [img_info['width'], img_info['height']],
'gt_boxes': [np.asarray([]), gt_boxes],
'dt_boxes': [np.asarray([]), dt_boxes]
})
pbar.update(1)
pbar.close()
if self.detector == 'SSD':
del ssd
gc.collect()
with open(pkl_filename, 'wb') as handle:
pickle.dump(self.src_data, handle, protocol=pickle.DEFAULT_PROTOCOL)
else:
with open(pkl_filename, 'rb') as fp_pkl:
self.src_data = pickle.load(fp_pkl)
self.classes = ['background', 'person']
self.class_ids = [-1, 1]
self.annotation_file = 'instances_' + self.dataset_sets[self.split] + '2014.json'
elif self.dataset_name == "TEST_MODULE":
self.dataset_sets['train'] = 'test'
self.dataset_sets['val'] = 'test'
self.dataset_sets['test'] = 'test'
if self.dataset_sets[self.split] is None:
raise ValueError(self.split + ' split is not available...')
pkl_filename = os.path.join(self.path, 'test_module.pkl')
if not os.path.exists(pkl_filename):
data_url = OPENDR_SERVER_URL + '/perception/object_detection_2d/nms/datasets/test_module.zip'
self.download(data_url, download_path=os.path.join(self.path).replace("TEST_MODULE", ""), file_format="zip",
create_dir=True)
with open(pkl_filename, 'rb') as fp_pkl:
self.src_data = pickle.load(fp_pkl)
self.classes = ['background', 'person']
self.class_ids = [-1, 1]
self.annotation_file = 'test_module_anns.json'
@staticmethod
def download(
url, download_path, dataset_sub_path=".", file_format="zip", create_dir=False):
if create_dir:
os.makedirs(download_path, exist_ok=True)
print("Downloading dataset from", url, "to", download_path)
start_time = 0
last_print = 0
def reporthook(count, block_size, total_size):
nonlocal start_time
nonlocal last_print
if count == 0:
start_time = time.time()
last_print = start_time
return
duration = time.time() - start_time
progress_size = int(count * block_size)
speed = int(progress_size / (1024 * duration))
if time.time() - last_print >= 1:
last_print = time.time()
print(
"\r%d MB, %d KB/s, %d seconds passed" %
(progress_size / (1024 * 1024), speed, duration),
end=''
)
if file_format == "zip":
zip_path = os.path.join(download_path, "dataset.zip")
urlretrieve(url, zip_path, reporthook=reporthook)
print()
print("Extracting data from zip file")
with ZipFile(zip_path, 'r') as zip_ref:
zip_ref.extractall(download_path)
os.remove(zip_path)
elif file_format == "tar.bz2" or file_format == "tar.gz":
tar_path = os.path.join(download_path, "dataset." + file_format)
urlretrieve(url, tar_path, reporthook=reporthook)
print()
def members(tf):
l = len("Crowd_PETS09/")
for member in tf.getmembers():
if member.path.startswith("Crowd_PETS09/"):
member.path = member.path[l:]
yield member
with tarfile.open(tar_path, "r:" + file_format.split('.')[1]) as tar:
if file_format == "tar.bz2":
tar.extractall(path=download_path, members=members(tar))
else:
tar.extractall(path=download_path)
tar.close()
os.remove(tar_path)
else:
raise ValueError("Unsupported file_format: " + file_format)
| 1.867188
| 2
|
mashumaro/serializer/json.py
|
dand-oss/mashumaro
| 0
|
12774962
|
import json
from types import MappingProxyType
from typing import Any, Dict, Mapping, Type, TypeVar, Union
from typing_extensions import Protocol
from mashumaro.serializer.base import DataClassDictMixin
DEFAULT_DICT_PARAMS = {
"use_bytes": False,
"use_enum": False,
"use_datetime": False,
}
EncodedData = Union[str, bytes, bytearray]
T = TypeVar("T", bound="DataClassJSONMixin")
class Encoder(Protocol): # pragma no cover
def __call__(self, obj, **kwargs) -> EncodedData:
...
class Decoder(Protocol): # pragma no cover
def __call__(self, s: EncodedData, **kwargs) -> Dict[Any, Any]:
...
class DataClassJSONMixin(DataClassDictMixin):
def to_json(
self: T,
encoder: Encoder = json.dumps,
dict_params: Mapping = MappingProxyType({}),
**encoder_kwargs,
) -> EncodedData:
return encoder(
self.to_dict(**dict(DEFAULT_DICT_PARAMS, **dict_params)),
**encoder_kwargs,
)
@classmethod
def from_json(
cls: Type[T],
data: EncodedData,
decoder: Decoder = json.loads,
dict_params: Mapping = MappingProxyType({}),
**decoder_kwargs,
) -> T:
return cls.from_dict(
decoder(data, **decoder_kwargs),
**dict(DEFAULT_DICT_PARAMS, **dict_params),
)
| 2.28125
| 2
|
template.py
|
RemyG/python-scripts-template
| 0
|
12774963
|
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-V", "--version", help="show program version", action="store_true")
args = parser.parse_args()
if args.version:
print("Version 0.1")
| 2.875
| 3
|
odin/metrics/performance_summary.py
|
gsamarakoon/Odin
| 103
|
12774964
|
<reponame>gsamarakoon/Odin<filename>odin/metrics/performance_summary.py
import pandas as pd
from .compute_drawdowns import compute_drawdowns
from .compute_sharpe_ratio import compute_sharpe_ratio
def performance_summary(history, portfolio_id):
"""This function computes common performance metrics for a time-series of
portfolio equity states. For instance, the function will compute the Sharpe
ratio, the maximum drawdown, the drawdown duration, the annualized returns
and the average number of positions held at each moment in the time-series.
Parameters
----------
history: A portfolio history object.
The portfolio history object containing the equity and positional
information for a time-series corresponding to the period of performance
of a trading algorithm.
portfolio_id: String.
A unique identifier assigned to the portfolio.
"""
equity = history.equity
n = len(equity)
m = pd.DataFrame(index=[portfolio_id])
m.ix[portfolio_id, "total equity"] = equity.ix[-1]
m.ix[portfolio_id, "max equity"] = equity.max()
m.ix[portfolio_id, "max drawdown"], m.ix[portfolio_id, "max duration"] = (
compute_drawdowns(equity)
)
m.ix[portfolio_id, "sharpe ratio"] = (
compute_sharpe_ratio(history.returns)
)
m.ix[portfolio_id, "avg positions"] = history.n_positions.mean()
m.ix[portfolio_id, "annualized returns"] = (
(1. + history.returns).prod() ** (252. / n)
)
return m
| 3.390625
| 3
|
cogkit/modules/provider-localscheduler/examples/ec2-cloud-provider/cloud.py
|
stefb965/swift-k
| 99
|
12774965
|
#!/usr/bin/env python
import os
import errno
import sys
import random
import logging
import pprint
import argparse
import datetime
import time
#from __future__ import print_function
import imp
try:
imp.find_module('libcloud')
except ImportError:
sys.stderr.write("Python: Apache libcloud module not available, cannot proceed\n")
exit(-1)
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.compute.base import NodeSize, NodeImage
from libcloud.compute.types import NodeState
import libcloud.compute.types
NODESTATES = { NodeState.RUNNING : "RUNNING",
NodeState.REBOOTING : "REBOOTING",
NodeState.TERMINATED : "TERMINATED",
NodeState.STOPPED : "STOPPED",
NodeState.PENDING : "PENDING",
NodeState.UNKNOWN : "UNKNOWN" }
WORKER_USERDATA='''#!/bin/bash
export JAVA=/usr/local/bin/jdk1.7.0_51/bin
export SWIFT=/usr/local/bin/swift-trunk/bin
export PATH=$JAVA:$SWIFT:$PATH
export WORKER_LOGGING_LEVEL=TRACE
'''
NEW_LINE='''
'''
def aws_create_security_group(driver, configs):
""" Creates security group if not present.
Currently opens all tcp/udp ports in range 0, 65000 for all sources.
args : driver instance, configs dictionary
returns: Nothing
"""
group_name = configs["ec2securitygroup"]
current = driver.ex_list_security_groups()
if group_name not in current:
logging.debug("Security group absent, creating group" + str(configs["ec2securitygroup"]));
res = driver.ex_create_security_group(name=group_name,description="Open all ports")
if not driver.ex_authorize_security_group(group_name, 0, 65000, '0.0.0.0/0'):
sys.stderr.write("Authorizing ports for security group failed \n")
if not driver.ex_authorize_security_group(group_name, 0, 65000, '0.0.0.0/0', protocol='udp'):
sys.stderr.write("Authorizing ports for security group failed \n")
def check_keypair(driver, configs):
""" Checks if valid keypairs exist, if not creates them
args : driver instance, configs dictionary
returns: Nothing
"""
if "ec2keypairname" in configs and "ec2keypairfile" in configs:
all_pairs = driver.list_key_pairs()
for pair in all_pairs:
if pair.name == configs['ec2keypairname']:
return 0
key_pair = driver.create_key_pair(name=configs['ec2keypairname'])
f = open(configs['ec2keypairfile'], 'w')
f.write(str(key_pair.private_key))
f.close()
os.chmod(configs['ec2keypairfile'], 0600)
else:
sys.stderr.write("ec2keypairname and/or ec2keypairfile missing\n")
sys.stderr.write("Cannot proceed without ec2keypairname and ec2keypairfile\n")
exit(-1)
def _read_conf(config_file):
cfile = open(config_file, 'r').read()
config = {}
for line in cfile.split('\n'):
# Checking if empty line or comment
if line.startswith('#') or not line :
continue
temp = line.split('=')
config[temp[0]] = temp[1].strip('\r')
return config
def pretty_configs(configs):
printer = pprint.PrettyPrinter(indent=4)
printer.pprint(configs)
def read_configs(config_file):
config = _read_conf(config_file)
if 'ec2credentialsfile' in config :
config['ec2credentialsfile'] = os.path.expanduser(config['ec2credentialsfile'])
config['ec2credentialsfile'] = os.path.expandvars(config['ec2credentialsfile'])
cred_lines = open(config['ec2credentialsfile']).readlines()
cred_details = cred_lines[1].split(',')
credentials = { 'AWS_Username' : cred_details[0],
'AWSAccessKeyId' : cred_details[1],
'AWSSecretKey' : cred_details[2] }
config.update(credentials)
else:
print "ec2credentialsfile , Missing"
print "ERROR: Cannot proceed without access to ec2credentialsfile"
exit(-1)
return config
def node_status(driver, node_uuids):
nodes = driver.list_nodes()
for node in nodes:
if node.uuid in node_uuids :
if node.state == NodeState.RUNNING:
print node.uuid, "R"
elif node.state == NodeState.PENDING:
print node.uuid, "Q"
elif node.state == NodeState.TERMINATED:
print node.uuid, "C"
elif node.state == NodeState.STOPPED:
print node.uuid, "C"
elif node.state == NodeState.UNKNOWN:
print node.uuid, "Q" # This state could be wrong
else:
sys.stderr.write("Node state unknown/invalid " + str(NODESTATE[node.state]))
return -1
return 0
def node_start(driver, configs, WORKER_STRING):
cloudinit = ""
if "ec2cloudinit" in configs:
logging.info("ec2cloudinit from script : " + configs['ec2cloudinit'])
cloudinit = open(configs['ec2cloudinit'],'r').read()
userdata = WORKER_USERDATA + cloudinit + NEW_LINE + WORKER_STRING.lstrip('"').rstrip('"')
image = NodeImage(id=configs['ec2workerimage'], name=None, driver=driver)
sizes = driver.list_sizes()
size = [ s for s in sizes if s.id == configs['ec2workertype'] ]
if not size:
logging.info("ec2workerimage not legal/valid : %s", configs['ec2workertype'])
sys.stderr.write("ec2workerimage not legal/valid \n")
exit(-1);
node = driver.create_node(name="swift_worker",
image=image,
size=size[0],
ex_keyname=configs['ec2keypairname'],
ex_securitygroup=configs['ec2securitygroup'],
ex_userdata=userdata )
print 'jobid={0}'.format(node.uuid)
# node_names is a list
def node_terminate(driver, node_uuids):
nodes = driver.list_nodes()
deleted_flag = False
for node in nodes:
if node.uuid in node_uuids and node.state == NodeState.RUNNING :
logging.info("Terminating node : %s", str(node))
code = driver.destroy_node(node)
deleted_flag = True
return deleted_flag
def init_checks(driver, configs):
aws_create_security_group(driver, configs)
check_keypair(driver, configs)
def init(conf_file):
logging.debug("conf_file: " + str(conf_file))
configs = read_configs(conf_file)
# Setting defaults for optional configs
if 'ec2securitygroup' not in configs :
logging.info("ec2SecurityGroup not set: Defaulting to swift-security-group")
configs['ec2securitygroup'] = "swift-security-group"
if "ec2keypairname" not in configs:
logging.info("ec2KeypairName not set: Defaulting to swift-keypaid")
configs['ec2keypairname'] = "swift-keypair"
# If $HOME/.ssh is not accessible check_keypair will throw errors
if "ec2keypairfile" not in configs:
keyfile = os.path.expandvars("$HOME/.ssh/" + configs['ec2keypairname'] + ".pem")
logging.info("ec2keypairfile not set: Defaulting to " + keyfile)
configs['ec2keypairfile'] = keyfile
driver = get_driver(Provider.EC2_US_WEST_OREGON) # was EC2
ec2_driver = driver(configs['AWSAccessKeyId'], configs['AWSSecretKey'])
return configs,ec2_driver
if __name__ == '__main__' :
parser = argparse.ArgumentParser()
mu_group = parser.add_mutually_exclusive_group(required=True)
mu_group.add_argument("-s", "--submit", default=None , help='Takes a config file. Submits the CMD_STRING in the configs for execution on a cloud resource')
mu_group.add_argument("-t", "--status", default=None , help='gets the status of the CMD_STRING in the configs for execution on a cloud resource')
mu_group.add_argument("-c", "--cancel", default=None , help='cancels the jobs with jobids')
parser.add_argument("-v", "--verbose", help="set level of verbosity, DEBUG, INFO, WARN")
parser.add_argument("-l", "--logfile", help="set path to logfile, defaults to /dev/null")
parser.add_argument("-j", "--jobid", type=str, action='append')
args = parser.parse_args()
# Setting up logging
if args.logfile:
if not os.path.exists(os.path.dirname(args.logfile)):
os.makedirs(os.path.dirname(args.logfile))
logging.basicConfig(filename=args.logfile, level=logging.DEBUG)
else:
logging.basicConfig(filename='/dev/null', level=logging.DEBUG)
config_file = ( args.status or args.submit or args.cancel )
configs, driver = init(config_file)
if args.submit :
# Init checks confirm keypairs and security groups to allow for access to ports
init_checks(driver, configs)
node_start(driver, configs, configs['CMD_STRING'])
elif args.status :
node_status(driver, args.jobid )
elif args.cancel :
node_terminate(driver, args.jobid)
else:
sys.stderr.write("ERROR: Undefined args, cannot be handled")
sys.stderr.write("ERROR: Exiting...")
exit(-1)
exit(0)
| 2.046875
| 2
|
Singleton.py
|
RaynoldKim/MyTrade
| 0
|
12774966
|
class Singleton:
__instance = None
@classmethod
def __get_instance(cls):
return cls.__instance
@classmethod
def instance(cls, *args, **kargs):
cls.__instance = cls(*args, **kargs)
cls.instance = cls.__get_instance
return cls.__instance
"""""
class MyClass(BaseClass, Singleton):
pass
c = MyClass.instance()
"""""
| 3.421875
| 3
|
exercicios-Python/desaf095.py
|
marcelo-py/Exercicios-Python
| 0
|
12774967
|
dadosd = dict()
jogadores = list()
gols = list()
while True:
dadosd['nome'] = str(input('Nome: '))
total = int(input('Quantas partidas {} jogou? '.format(dadosd['nome'])))
for c in range(0, total):
gols.append(int(input('Quantos gols no {}º jogo? '.format(c+1))))
dadosd['gols'] = gols[:]
gols.clear()
jogadores.append(dadosd.copy())
dadosd.clear()
sn = str(input('Quer continuar? S/N '))
while sn not in 'SsNn':
sn = str(input('Quer continuar? S ou N'))
if sn in 'Nn':
break
print('Nº | Nome | Gols | Total')
for i, p in enumerate(jogadores):
print('{} {} {} {}'.format(i, p['nome'], p['gols'], sum(p['gols'])))
print('=-'*30)
while True:
num = int(input('Ler dados de qual jogador? (999 para sair) '))
if num == 999:
break
if num >= len(jogadores):
print('Erro!!! numero não está na lista')
if num <= len(jogadores)-1:
print('Levantamento do jogador {}'.format(jogadores[num]['nome']))
for i, v in enumerate(jogadores[num]['gols']):
print('=> No {}º jogo fez {} '.format(i+1, v))
| 3.546875
| 4
|
swexpert/d3/sw_5948.py
|
ruslanlvivsky/python-algorithm
| 3
|
12774968
|
test_cases = int(input())
for t in range(1, test_cases + 1):
nums = list(map(int, input().strip().split()))
result = []
for i in range(0, 5):
for j in range(i + 1, 6):
for k in range(j + 1, 7):
result.append(nums[i] + nums[j] + nums[k])
result = sorted(list(set(result)), reverse=True)
print('#{} {}'.format(t, result[4]))
| 2.96875
| 3
|
crypto_balancer/backtest_exchange.py
|
GRTTX/crypto_balancer
| 28
|
12774969
|
import glob
import json
import pandas as pd
from crypto_balancer.dummy_exchange import DummyExchange
LIMITS = {'BNB/BTC': {'amount': {'max': 90000000.0, 'min': 0.01},
'cost': {'max': None, 'min': 0.001},
'price': {'max': None, 'min': None}},
'BNB/ETH': {'amount': {'max': 90000000.0, 'min': 0.01},
'cost': {'max': None, 'min': 0.01},
'price': {'max': None, 'min': None}},
'BNB/USD': {'amount': {'max': 10000000.0, 'min': 0.01},
'cost': {'max': None, 'min': 10.0},
'price': {'max': None, 'min': None}},
'BTC/USD': {'amount': {'max': 10000000.0, 'min': 1e-06},
'cost': {'max': None, 'min': 10.0},
'price': {'max': None, 'min': None}},
'ETH/BTC': {'amount': {'max': 100000.0, 'min': 0.001},
'cost': {'max': None, 'min': 0.001},
'price': {'max': None, 'min': None}},
'ETH/USD': {'amount': {'max': 10000000.0, 'min': 1e-05},
'cost': {'max': None, 'min': 10.0},
'price': {'max': None, 'min': None}},
'XRP/BNB': {'amount': {'max': 90000000.0, 'min': 0.1},
'cost': {'max': None, 'min': 1.0},
'price': {'max': None, 'min': None}},
'XRP/BTC': {'amount': {'max': 90000000.0, 'min': 1.0},
'cost': {'max': None, 'min': 0.001},
'price': {'max': None, 'min': None}},
'XRP/ETH': {'amount': {'max': 90000000.0, 'min': 1.0},
'cost': {'max': None, 'min': 0.01},
'price': {'max': None, 'min': None}},
'XRP/USD': {'amount': {'max': 90000000.0, 'min': 0.1},
'cost': {'max': None, 'min': 1.0},
'price': {'max': None, 'min': None}},
'XLM/USD': {'amount': {'max': 90000000.0, 'min': 0.1},
'cost': {'max': None, 'min': 1.0},
'price': {'max': None, 'min': None}},
'XLM/XRP': {'amount': {'max': 90000000.0, 'min': 0.1},
'cost': {'max': None, 'min': 1.0},
'price': {'max': None, 'min': None}}}
class BacktestExchange(DummyExchange):
def __init__(self, filenames, balances, fee=0.001):
self.name = 'BacktestExchange'
self._currencies = balances.keys()
final_df = pd.DataFrame()
for path in glob.glob(filenames):
filename = path.split('/')[-1]
pair = filename.split('.')[0]
pair = pair.replace('-','/')
self.pairs.append(pair)
data = json.load(open(path, 'r'))#['Data']
df = pd.DataFrame(data)
df.set_index(pd.to_datetime(df['time'], unit='s'), inplace=True)
df = df[~df.index.duplicated()]
final_df[pair] = df['close']
# print('loaded', pair)
final_df.fillna(method='ffill', inplace=True)
# self._iter = final_df['2018-01-08':].iterrows()
# self._iter = final_df['2017-09-11':].iterrows()
# self._iter = final_df[:'2018-12-30'].iterrows()
# self._iter = final_df['2018-09-11':'2018-12-30'].iterrows()
self._iter = final_df.iterrows()
self._rates = {}
self._balances = balances
self._fee = fee
self.tick()
def tick(self):
self._rates = dict(next(self._iter)[1])
@property
def limits(self):
return LIMITS
| 2.3125
| 2
|
db.py
|
vorian77/udacity-into-to-programming-python-adventure-game
| 0
|
12774970
|
# atomic level
def get_idx(list, key):
for idx in range(len(list)):
if key == list[idx][0]:
return idx
def ins(list, key, val):
list.append([key, val])
return list
def ret(list, key):
idx = get_idx(list, key)
return list[idx][1]
def upd(list, key, val):
new_item = [key.lower(), val]
idx = get_idx(list, key)
list[idx] = new_item
return list
def delete(list, key):
idx = get_idx(list, key)
list.remove(idx)
return list
# table level
def ins_tab(db, table_name):
return ins(db, table_name, [])
def ret_tab(db, table_name):
return ret(db, table_name)
def upd_tab(db, table_name, table):
return upd(db, table_name, table)
def del_tab(db, table_name):
return delete(db, table_name)
# record level
def is_member(record, kv, check_value):
if len(kv) == 0:
return True
else:
for item in record:
if item[0] == kv[0]:
if check_value:
if item[1] == kv[1]:
return True
else:
return True
return False
def kvs_in_rec(record, kv_list):
# all kv's of kv_list_search are members of record
for kv in kv_list:
if not is_member(record, kv, True):
return False
return True
def ins_rec(db, table_name, kv_list):
table = ret(db, table_name)
table.append(kv_list)
return upd(db, table_name, table)
def ret_recs(db, table_name, kv_list):
list = []
table = ret(db, table_name)
for record in table:
if kvs_in_rec(record, kv_list):
list.append(record)
return list
def ret_rec_idx(db, table_name, record_idx):
table = ret(db, table_name)
if len(table) >= record_idx:
return table[record_idx]
else:
return None
def upd_recs(db, table_name, kv_list_search, kv_list_upd):
# updates all records identified by kv_list_search
new_table = []
old_table = ret_tab(db, table_name)
for old_rec in old_table:
if kvs_in_rec(old_rec, kv_list_search):
# matching record
new_rec = old_rec
for kv in kv_list_upd:
# if kv is member of record, update value of kv,
# otherwise insert entire kv
key = kv[0]
val = kv[1]
if is_member(new_rec, kv, False):
new_rec = upd(new_rec, key, val)
else:
new_rec = ins(new_rec, key, val)
new_table.append(new_rec)
else:
new_table.append(old_rec)
return upd(db, table_name, new_table)
def del_recs(db, table_name, kv_list):
new_table = []
old_table = ret_tab(db, table_name)
for record in old_table:
if not kvs_in_rec(record, kv_list):
new_table.append(record)
return upd(db, table_name, new_table)
def del_all(db, table_name):
table = []
return upd(db, table_name, table)
# value level
def ret_val(db, table_name, record_id_key, record_id_value, data_key):
# assumes [record_id_key, record_id_value] identifies a single record
records = ret_recs(db, table_name, [[record_id_key, record_id_value]])
if len(records) == 0:
return None
else:
return ret(records[0], data_key)
def ret_val_idx(db, table_name, record_idx, data_key):
record = ret_rec_idx(db, table_name, record_idx)
if record:
return ret(record, data_key)
else:
return None
def upd_val(db, table_name, record_id_key, record_id_val, data_key, data_val):
# updates all records identified by [record_id_key, record_id_value]
return upd_recs(db,
table_name,
[[record_id_key, record_id_val]],
[[data_key, data_val]])
# summary
def rec_cnt(db, table_name, kv_list):
records = ret_recs(db, table_name, kv_list)
return len(records)
def rec_list(db, table_name, key):
list = []
table = ret_tab(db, table_name)
for record in table:
for item in record:
if item[0].lower() == key.lower():
if not item[1] in list:
list.append(item[1])
return list
| 2.796875
| 3
|
src/processors/user_accounts.py
|
carlashley/munkicon
| 15
|
12774971
|
import subprocess
from distutils.version import StrictVersion
from platform import mac_ver
try:
from munkicon import plist
from munkicon import worker
except ImportError:
from .munkicon import plist
from .munkicon import worker
# Keys: 'user_home_path'
# 'secure_token'
# 'volume_owners'
class UserAccounts(object):
def __init__(self):
self.conditions = self._process()
def _users(self):
"""Users."""
result = set()
_ignore_users = ['daemon',
'nobody',
'root']
_cmd = ['/usr/bin/dscl', '.', '-list', '/Users']
_p = subprocess.Popen(_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_r, _e = _p.communicate()
if _p.returncode == 0:
if isinstance(_r, bytes):
_r = _r.decode('utf-8').strip()
for _u in _r.splitlines():
if not _u.startswith('_'):
if _u not in _ignore_users:
result.add(_u)
return result
def _home_dirs(self):
"""Home Directories"""
result = {'user_home_path': list()}
_users = self._users()
_home_dirs = set()
if _users:
for _u in _users:
_cmd = ['/usr/bin/dscl', '-plist', '.', '-read', '/Users/{}'.format(_u), 'NFSHomeDirectory']
_p = subprocess.Popen(_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_r, _e = _p.communicate()
if _p.returncode == 0:
if isinstance(_r, bytes):
_r = _r.decode('utf-8').strip()
if _r:
_h = plist.readPlistFromString(_r)['dsAttrTypeStandard:NFSHomeDirectory']
if _h:
try:
_r = '{},{}'.format(_u, _h[0].strip())
except IndexError:
_r = '{},{}'.format(_u, _h.strip())
_home_dirs.add(_r)
result['user_home_path'] = list(_home_dirs)
return result
def _secure_tokens(self):
"""Determine SecureToken status for user."""
result = {'secure_token': list()}
_users = self._users()
if _users and StrictVersion(mac_ver()[0]) >= StrictVersion('10.14'):
for _u in _users:
_status = 'DISABLED'
_cmd = ['/usr/sbin/sysadminctl', '-secureTokenStatus', _u]
_p = subprocess.Popen(_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_r, _e = _p.communicate()
if _p.returncode == 0:
# Output is on stderr, not stdout
if isinstance(_e, bytes):
_e = _e.decode('utf-8').strip()
if 'ENABLED' in _e:
_status = 'ENABLED'
result['secure_token'].append('{},{}'.format(_u, _status))
else:
pass
return result
def _user_guids(self):
result = dict()
_users = self._users()
for _user in _users:
_cmd = ['/usr/bin/dscl', '.', '-read', f'/Users/{_user}', 'GeneratedUID']
_p = subprocess.run(_cmd, capture_output=True, encoding='utf-8')
if _p.returncode == 0 and _p.stdout:
_uid = _p.stdout.strip().replace('GeneratedUID: ', '')
result[_uid] = _user
return result
def _volume_owners(self):
"""Determine volume owners on APFS disks"""
result = {'volume_owners': list()}
_users_with_uid = self._user_guids()
_vol_own_users = set()
_cmd = ['/usr/sbin/diskutil', 'apfs', 'listUsers', '/', '-plist']
_p = subprocess.run(_cmd, capture_output=True)
if _p.returncode == 0 and _p.stdout:
try:
_users = plist.readPlistFromString(_p.stdout.strip())['Users']
for _user in _users:
_apfs_crypto_type = _user.get('APFSCryptoUserType')
_apfs_crypto_uuid = _user.get('APFSCryptoUserUUID')
_is_volume_owner = _user.get('VolumeOwner')
_hr_user_name = _users_with_uid.get(_apfs_crypto_uuid)
if _is_volume_owner and _apfs_crypto_type != 'PersonalRecovery':
_vol_own_users.add(_hr_user_name)
except Exception:
pass
result['volume_owners'] = sorted(list(_vol_own_users))
return result
def _process(self):
"""Process all conditions and generate the condition dictionary."""
result = dict()
result.update(self._home_dirs())
result.update(self._secure_tokens())
result.update(self._volume_owners())
return result
def runner(dest):
users = UserAccounts()
mc = worker.MunkiConWorker(conditions_file=dest, log_src=__file__)
mc.write(conditions=users.conditions)
| 1.84375
| 2
|
src/values.py
|
abhra2020-smart/FlowLang
| 0
|
12774972
|
<gh_stars>0
from dataclasses import dataclass
@dataclass
class Number:
value: any
def __repr__(self):
return f"{self.value}"
@dataclass
class Bool:
value: bool
def __repr__(self):
return f"{self.value}".lower()
| 2.734375
| 3
|
causalator.py
|
nickwbarber/hilt-scripts
| 1
|
12774973
|
#!/usr/bin/env python3
import os
from itertools import chain
from collections import Counter
import argparse
import gatenlphiltlab
relators = [
"because",
"cuz",
"since",
"after",
"when",
"whenever",
"once",
"therefore",
"so",
"if",
"soon",
"result",
"results",
"resulted",
"resulting",
"cause",
"causes",
"caused",
"causing",
"starts",
"start",
"starts",
"started",
"starting",
"make",
"makes",
"made",
"making",
"precipitate",
"precipitates",
"precipitated",
"precipitating",
"lead",
"leads",
"led",
"produce",
"produces",
"produced",
"producing",
"provoke",
"provokes",
"provoked",
"provoking",
"breeds",
"breeds",
"bred",
"breeding",
"induce",
"induces",
"induced",
"inducing",
"create",
"creates",
"created",
"creating",
"effect",
"effects",
"effected",
"effecting",
]
parser = argparse.ArgumentParser(
description="Annotates causal connectives within GATE annotation files"
)
parser.add_argument(
"-i",
"--annotation-file",
dest="annotation_files",
nargs="+",
required="true",
help="GATE annotation files"
)
args = parser.parse_args()
for annotation_file_path in args.annotation_files:
annotation_file = gatenlphiltlab.AnnotationFile(annotation_file_path)
EAU_heuristics_set = annotation_file.create_annotation_set("EAU_heuristics")
tokens = [
annotation
for annotation in annotation_file.annotations
if annotation.type.lower() == "token"
]
for token in tokens:
# if token.text.lower() in relators:
if token.text.lower() == "because":
EAU_heuristics_set.create_annotation(
annotation_type="possible_causal_connective",
start=token.start_node,
end=token.end_node,
)
annotation_file.save_changes()
| 2.4375
| 2
|
install/scripts/popoolationte2.py
|
shunhuahan/mcclintock
| 0
|
12774974
|
import sys
import os
sys.path.append(snakemake.config['paths']['mcc_path'])
import scripts.mccutils as mccutils
def main():
download_success = mccutils.download(snakemake.params.url, snakemake.output[0], md5=snakemake.params.md5, max_attempts=3)
if not download_success:
print("popoolationTE2 download failed... exiting...")
print("try running --install with --clean for clean installation")
sys.exit(1)
if __name__ == "__main__":
main()
| 2.0625
| 2
|
gradle-conda-plugin/examples/multi-project-example/example-lib/src/main/python/lib.py
|
logbee/gradle-plugins
| 3
|
12774975
|
<filename>gradle-conda-plugin/examples/multi-project-example/example-lib/src/main/python/lib.py
class Example:
def __init__(self):
self.name = ""
pass
def greet(self, name):
self.name = name
print("hello " + name)
| 1.820313
| 2
|
core/data/astrosource/astro_source.py
|
xcamilox/frastro
| 1
|
12774976
|
<reponame>xcamilox/frastro<gh_stars>1-10
import time
import json
class AstroSource(object):
__id=""
__catalogs=[] # list of catalogs source
__images=[] #list of image source
__spectra=[] #list of spectral
__sumary={} #usfully data like: magnitud, redshift, name, id..etc
__date_request=000000 #timestamp(utc) with the last request to this ra-dec
def __init__(self,coordinates):
ra=coordinates.ra.degree
dec=coordinates.dec.degree
position=coordinates.to_string('hmsdms').replace("h",":").replace("m",":").replace("d",":").replace("s","")
self.__sumary={
'id':str(str(ra)+"_"+str(dec)).replace(" ",""),
'ra':ra,
'dec':dec,
'pos':position
}
self.__id = str(str(ra)+"_"+str(dec)).replace(" ","")
self.__date_request = time.time()
self.__catalogs = []
self.__images = []
self.__spectra = []
def addImage(self,sourceImage):
self.__images.append(sourceImage)
def addCatalog(self,sourceCatalog):
self.__catalogs.append(sourceCatalog)
def addSpectra(self,sourceSpectra):
self.__spectra.append(sourceSpectra)
def addSummaryParams(self,key,value):
self.__sumary[key]=value
def getCatalogs(self):
return self.__catalogs
def getImages(self):
return self.__images
def getSpectra(self):
return self.__spectra
def getSummary(self):
return self.__sumary
def getId(self):
return self.__id
def __dict__(self):
astrosource = {"id":self.__id,"catalogs": [], "images": [], "spectra": [], "summary": self.__sumary,
"request_date": self.__date_request}
for cat in self.__catalogs:
astrosource["catalogs"].append(cat.__dict__())
for img in self.__images:
astrosource["images"].append(img.__dict__())
for spc in self.__spectra:
astrosource["spectra"].append(spc.__dict__())
return astrosource
def toJson(self):
return json.dumps(self.__dict__())
| 2.34375
| 2
|
calvin/runtime/south/plugins/storage/twistedimpl/securedht/tests/test_dht_server_evil.py
|
josrolgil/exjobbCalvin
| 1
|
12774977
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import os
import traceback
import twisted
import shutil
from calvin.utilities.calvin_callback import CalvinCB
from calvin.utilities import calvinlogger
from calvin.runtime.south.plugins.storage.twistedimpl.securedht.append_server import *
from calvin.runtime.south.plugins.storage.twistedimpl.securedht.dht_server import *
from calvin.runtime.south.plugins.storage.twistedimpl.securedht.service_discovery_ssdp import *
from calvin.runtime.south.plugins.storage.twistedimpl.securedht.dht_server_commons import *
from kademlia.node import Node
from calvin.runtime.south.plugins.async import threads
from calvin.utilities import calvinconfig
_conf = calvinconfig.get()
_conf.add_section("security")
_conf_file = os.path.join(os.getenv("HOME"), ".calvin/security/test/openssl.conf")
_conf.set("security", "certificate_conf", _conf_file)
_conf.set("security", "certificate_domain", "test")
_cert_conf = None
_log = calvinlogger.get_logger(__name__)
reactor.suggestThreadPoolSize(30)
@pytest.fixture(scope="session", autouse=True)
def cleanup(request):
def fin():
reactor.callFromThread(reactor.stop)
request.addfinalizer(fin)
@pytest.mark.interactive
@pytest.mark.slow
class TestDHT(object):
test_nodes = 2
_sucess_start = (True,)
@pytest.fixture(autouse=True, scope="class")
def setup(self, request):
global _cert_conf
_cert_conf = certificate.Config(_conf_file, "test").configuration
@pytest.inlineCallbacks
def test_dht_multi(self, monkeypatch):
iface = "0.0.0.0"
a = None
b = None
q = Queue.Queue()
def server_started(aa, *args):
for b in args:
if isinstance(b, twisted.python.failure.Failure):
b.printTraceback()
else:
_log.debug("** %s" % b)
q.put([aa,args])
try:
amount_of_servers = 1
# Twisted is using 20 threads so having > 20 server
# causes threadlocks really easily.
servers = []
callbacks = []
for servno in range(0, amount_of_servers):
a = evilAutoDHTServer()
servers.append(a)
callback = CalvinCB(server_started, str(servno))
servers[servno].start(iface, network="Niklas", cb=callback, type="poison", name="evil")
# servers[servno].start(iface, network="Hej", cb=callback, type="eclipse", name="evil")
# servers[servno].start(iface, network="Hej", cb=callback, type="sybil")
# servers[servno].start(iface, network="Hej", cb=callback, type="insert")
callbacks.append(callback)
print("Starting {}".format(servers[servno].dht_server.port.getHost().port))
# Wait for start
started = []
while len(started) < amount_of_servers:
try:
server = yield threads.defer_to_thread(q.get)
except Queue.Empty:
_log.debug("Queue empty!")
#raise
if server not in started:
started.append(server)
_log.debug("DHT Servers added: {}".format(started))
callbacks[int(server[0][0])].func = lambda *args, **kvargs:None
else:
_log.debug("Server: {} already started." \
" {} out of {}".format(started,
len(started),
amount_of_servers))
_log.debug("All {} out of {} started".format(started,
len(started),
amount_of_servers))
for servno in range(0, amount_of_servers):
assert [str(servno), self._sucess_start] in started
yield threads.defer_to_thread(q.queue.clear)
yield threads.defer_to_thread(time.sleep, 15)
evilPort = servers[0].dht_server.port.getHost().port
drawNetworkState("start_graph.png", servers, amount_of_servers)
servers[0].dht_server.kserver.protocol.turn_evil(evilPort)
print("Node with port {} turned evil".format(servers[servno].dht_server.port.getHost().port))
yield threads.defer_to_thread(time.sleep, 12)
# assert get_value[0] == "banan"
# assert get_value[1] == "bambu"
# assert get_value[2] == "morot"
yield threads.defer_to_thread(time.sleep, 5)
print("Attacking node exiting")
except AssertionError as e:
print("Server {} with port {} got wrong value".format(servno, servers[servno].dht_server.port.getHost().port))
pytest.fail(traceback.format_exc())
except Exception as e:
traceback.print_exc()
pytest.fail(traceback.format_exc())
finally:
for server in servers:
name_dir = os.path.join(_cert_conf["CA_default"]["runtimes_dir"], "evil")
shutil.rmtree(os.path.join(name_dir, "others"), ignore_errors=True)
os.mkdir(os.path.join(name_dir, "others"))
server.stop()
yield threads.defer_to_thread(time.sleep, 5)
| 1.59375
| 2
|
src/backend/common/decorators.py
|
guineawheek/ftc-data-take-2
| 0
|
12774978
|
<gh_stars>0
from functools import partial, wraps
from flask import make_response, request, Response
def cached_public(func=None, timeout: int = 61):
if func is None: # Handle no-argument decorator
return partial(cached_public, timeout=timeout)
@wraps(func)
def decorated_function(*args, **kwargs):
resp = make_response(func(*args, **kwargs))
if resp.status_code == 200: # Only cache OK responses
# TODO: hook into Redis
resp.headers["Cache-Control"] = "public, max-age={0}, s-maxage={0}".format(
max(
timeout, 61
) # needs to be at least 61 seconds to work with Google Frontend cache
)
resp.add_etag()
# Check for ETag caching
if resp.headers.get("ETag", None) in str(request.if_none_match):
resp = Response(status=304)
return resp
return decorated_function
| 2.5625
| 3
|
actionrules/reduction/__init__.py
|
KIZI/actionrules
| 8
|
12774979
|
from .reduction import *
| 1.117188
| 1
|
CURSO PYTHON UDEMY/Curso Udemy/Mundo 4 (POO)/AGRAGACAO108.py
|
nihilboy1455/CURSO-PYTHON-UDEMY
| 0
|
12774980
|
<reponame>nihilboy1455/CURSO-PYTHON-UDEMY
class Carrinho_de_compras:
def __init__(self):
self.produtos = []
def inserir_produto(self, produto):
self.produtos.append(produto)
def listar_produtos(self):
for produto in self.produtos:
print(produto.nome, produto.valor)
def soma_total(self):
total = 0
for x in self.produtos:
total = total + x.valor
return total
'''
Eu não consigo desempacotar uma lista dentro de duas variaveis num for aqui nas classes
eu tenho que chamar pra dentro de uma só, e depois, qd for tratar ela, eu especifico com qual método eu quero lidar
'''
class Produto:
def __init__(self, nome, valor):
self.nome = nome
self.valor = valor
| 4.125
| 4
|
server.py
|
Mushrifah/Stress-detection
| 1
|
12774981
|
import flask
import random
import sys
import os
import glob
import re
from pathlib import Path
import pickle
import numpy as np
# Import fast.ai Library
from fastai import *
from fastai.vision import *
# Flask utils
from flask import Flask, redirect, url_for, request, render_template,jsonify
from werkzeug.utils import secure_filename
app = flask.Flask(__name__)
UPLOAD_FOLDER = './UPLOAD_FOLDER/'
path=Path("path")
classes = ['stress', 'non-stress']
learn=load_learner(path,'a.pkl')
with open('classifier_pickle','rb') as f:
cls=pickle.load(f)
label_dictionary = {0: 'Healthy Plant', 1: 'Stress but recoverable',2:'Cannot Recover'}
def model_predict(img_path):
"""model_predict will return the preprocessed image
"""
img = open_image(img_path)
pred_class,pred_idx,outputs = learn.predict(img)
return pred_class
@app.route('/upload', methods = ['GET', 'POST'])
def handle_request():
print("hello");
imagefile = flask.request.files['image']
print("hello", flask.request);
filename = UPLOAD_FOLDER + str(random.randint(0, 5000)) + '.png'
#filename = werkzeug.utils.secure_filename(imagefile.filename)
#filename= "photo.jpg";
print("\nReceived image File name : " + imagefile.filename)
imagefile.save(filename)
preds=model_predict(filename)
print(type(preds))
return str(preds)
@app.route('/calculate', methods = ['GET', 'POST'])
def handle_response():
print("Hello");
# getting the data from a separate json file.
json = request.get_json()
# the keys that should be included in the json file.
transaction_keys = ['tdry' , 'twet', 'tcanopy', 'timeDay']
# return a error message if a key is not included in the file.
#stringValues= flask.request.values.get['dry', 'wet', 'canopy', 'time']
#print("Hello", flask.request);
a=json[transaction_keys[0]]
print(a)
b=json[transaction_keys[1]]
print(b)
c=json[transaction_keys[2]]
print(c)
d=json[transaction_keys[3]]
print(d)
pred=np.array([[a,b,c,d]])
pr=cls.predict(pred)
print(pr)
return jsonify(label_dictionary[int(pr)])
#ans=label_dictionary[int(pr)]
#print(ans)
#return ans
app.run(host="127.0.0.1",port=5000, debug=True)
| 2.46875
| 2
|
ninja_apikey/tests.py
|
mawassk/django-ninja-apikey
| 13
|
12774982
|
<reponame>mawassk/django-ninja-apikey
# flake8: noqa
from datetime import timedelta
import pytest
from django.contrib.admin.sites import AdminSite
from django.contrib.auth.hashers import check_password
from django.contrib.auth.models import User
from django.utils import timezone
from django.utils.crypto import get_random_string
from .admin import APIKeyAdmin
from .models import APIKey
from .security import check_apikey, generate_key
def test_apikey_validation():
key = APIKey()
assert key
assert key.is_valid
key.revoked = True
assert not key.is_valid
key.revoked = False
assert key.is_valid
key.expires_at = timezone.now() - timedelta(minutes=1)
assert not key.is_valid
key.expires_at = timezone.now() + timedelta(minutes=1)
assert key.is_valid
key.expires_at = None
assert key.is_valid
def test_key_generation():
data = generate_key()
assert data
assert data.prefix
assert data.key
assert data.hashed_key
assert check_password(data.key, data.hashed_key)
@pytest.mark.django_db
def test_apikey_check():
assert not check_apikey(None)
user = User()
user.name = get_random_string(10)
user.password = get_random_string(10)
user.save()
assert user
key = APIKey()
key.user = user
key_data = generate_key()
key.prefix = key_data.prefix
key.hashed_key = key_data.hashed_key
key.save()
assert key
assert user.username in str(key)
assert not check_apikey(key_data.key)
assert not check_apikey(key.prefix)
assert not check_apikey(f"{key_data.prefix}.{get_random_string(10)}")
assert check_apikey(f"{key_data.prefix}.{key_data.key}")
user.is_active = False
user.save()
assert not check_apikey(f"{key_data.prefix}.{key_data.key}")
user.delete()
assert not check_apikey(f"{key_data.prefix}.{key_data.key}")
@pytest.mark.django_db
def test_admin_save():
admin_site = AdminSite()
apikey_admin = APIKeyAdmin(APIKey, admin_site=admin_site)
assert admin_site
assert apikey_admin
user = User()
user.name = get_random_string(10)
user.password = <PASSWORD>(10)
user.save()
assert user
key = APIKey()
key.user = user
key = apikey_admin.save_model(request=None, obj=key, form=None, change=None)
assert key
assert key.prefix
assert key.hashed_key
assert key.user == user
| 2.171875
| 2
|
config.py
|
Zegers/fernwehTrips
| 0
|
12774983
|
<reponame>Zegers/fernwehTrips
DEBUG = True
HOST = '127.0.0.1'
PORT = 8000
| 1.023438
| 1
|
gripql/python/gripql/connection.py
|
jordan2lee/grip
| 0
|
12774984
|
from __future__ import absolute_import, print_function, unicode_literals
from gripql.graph import Graph
from gripql.util import BaseConnection, raise_for_status
class Connection(BaseConnection):
def __init__(self, url, user=None, password=None, token=None, credential_file=None):
super(Connection, self).__init__(url, user, password, token, credential_file)
self.url = self.base_url + "/v1/graph"
def listGraphs(self):
"""
List graphs.
"""
response = self.session.get(
self.url
)
raise_for_status(response)
return response.json()['graphs']
def addGraph(self, name):
"""
Create a new graph.
"""
response = self.session.post(
self.url + "/" + name,
{}
)
raise_for_status(response)
return response.json()
def deleteGraph(self, name):
"""
Delete graph.
"""
response = self.session.delete(
self.url + "/" + name
)
raise_for_status(response)
return response.json()
def getSchema(self, name):
"""
Get a graph schema.
"""
response = self.session.get(
self.url + "/" + name + "/schema"
)
raise_for_status(response)
return response.json()
def graph(self, name):
"""
Get a graph handle.
"""
return Graph(self.base_url, name, self.user, self.password, self.token, self.credential_file)
| 2.515625
| 3
|
CONTENT/DS-n-Algos/ALGO/_LEETCODE/032_longest_valid_parentheses/longest_valid_parentheses_TE.py
|
impastasyndrome/DS-ALGO-OFFICIAL
| 13
|
12774985
|
class Solution(object):
def longestValidParentheses(self, s):
"""
:type s: str
:rtype: int
"""
i = 0
maxlen = 0
self.longest = {}
while i < len(s):
if s[i] == ")":
i = i + 1
continue
else:
l = self.find_longest(i, s)
if l - i + 1 > maxlen:
maxlen = l - i + 1
i = l + 1
return maxlen
def find_longest(self, i, s):
if i in self.longest:
return self.longest[i]
start = balance = i
left = 0
right = 0
while True:
if i >= len(s):
self.longest[start] = balance
return balance
if s[i] == "(":
left += 1
else:
right += 1
if left < right:
self.longest[start] = balance
return balance
if left == right:
balance = i
i = i + 1
| 3.421875
| 3
|
api/migrations/0005_auto_20201231_0034.py
|
jjkivai/SolutionsWeb
| 0
|
12774986
|
<reponame>jjkivai/SolutionsWeb
# Generated by Django 3.1.4 on 2020-12-31 00:34
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0004_licenses_name'),
]
operations = [
migrations.RenameModel(
old_name='Licenses',
new_name='License',
),
]
| 1.71875
| 2
|
models/engine/file_storage.py
|
cbarros7/AirBnB_clone_v2
| 0
|
12774987
|
#!/usr/bin/python3
"""This module defines a class to manage file storage for hbnb clone"""
import json
class FileStorage:
"""This class manages storage of hbnb models in JSON format"""
__file_path = 'file.json'
__objects = {}
def all(self, cls=None):
"""Returns a dictionary of models currently in storage"""
if cls is not None:
if type(cls) == str:
cls = eval(cls)
new_dict = {}
for key, value in self.__objects.items():
# if self.__class__.__name__ == cls:
if type(value) == cls:
new_dict[key] = value
return new_dict
else:
return self.__objects
def new(self, obj):
"""Adds new object to storage dictionary"""
if obj:
key = "{}.{}".format(type(obj).__name__, obj.id)
self.__objects[key] = obj
def save(self):
"""Saves storage dictionary to file"""
with open(FileStorage.__file_path, 'w') as f:
temp = {}
temp.update(FileStorage.__objects)
for key, val in temp.items():
temp[key] = val.to_dict()
json.dump(temp, f)
def reload(self):
"""Loads storage dictionary from file"""
from models.base_model import BaseModel
from models.user import User
from models.place import Place
from models.state import State
from models.city import City
from models.amenity import Amenity
from models.review import Review
classes = {
'BaseModel': BaseModel, 'User': User, 'Place': Place,
'State': State, 'City': City, 'Amenity': Amenity,
'Review': Review
}
try:
temp = {}
with open(FileStorage.__file_path, 'r') as f:
temp = json.load(f)
for key, val in temp.items():
self.all()[key] = classes[val['__class__']](**val)
except FileNotFoundError:
pass
def delete(self, obj=None):
"""Delete objects"""
if obj:
key = "{}.{}".format(type(obj).__name__, obj.id)
if self.__objects[key]:
del FileStorage.__objects[key]
self.save()
def close(self):
"""Method for deserializing the JSON file to objects"""
self.reload()
| 3.09375
| 3
|
grafana_client/__init__.py
|
peekjef72/grafana-client
| 11
|
12774988
|
<filename>grafana_client/__init__.py
from .api import GrafanaApi
| 1.015625
| 1
|
ac.py
|
freenoth/nltktask
| 0
|
12774989
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module represent logic of detection and conversion of cheats.
"""
# py_ver : [3.5.2]
# date : [02.11.2016]
# author : [<NAME>]
# email : [<EMAIL>]
class AntiCheater(object):
""" Help to detect and convert text cheats.
:method check_word: convert word if needed and indicate cheating
"""
def __init__(self):
super().__init__()
self.LRUS = set('АБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдеёжзийклмнопрстуфхцчшщъыьэюя')
self.LENG = set('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz')
self.TR_URUS = list('АВЕКМНОРСТУХЬ')
self.TR_UENG = list('ABEKMHOPCTYXb')
self.TR_LRUS = list('аеикорсухь')
self.TR_LENG = list('aeukopcyxb')
def check_word(self, word):
""" Check word for creats - changing russian letters by similar english and vice versa.
Detect cheats and converts cheat-letters to source language.
:param word: a word to check
:return: a tuple like (result_word, is_cheat)
is_cheat = 0 - there is no cheats or 'word' is not alphabetic
result_word = source word
is_cheat = 1 - cheat found
result_word = source word with cheat correction
is_cheat = -1 - cheats maybe found, all letters can be both of rus and eng
if number of RUS unique letters less than ENG, then
result_word = source word with translate to ENG
else
result_word = source word with translate to RUS
is_cheat = -2 - there is bot of rus and eng letters, but it can convert to another lang
result_word = source word
"""
is_cheat = 0
result_word = word
if not word.isalpha():
return result_word, is_cheat
# get set of letters by language
rus_parts = {x for x in word if (x in self.LRUS)}
eng_parts = {x for x in word if (x in self.LENG)}
if not rus_parts or not eng_parts:
# if only one language detected - there is no cheats
return result_word, is_cheat
# check letters can be converted to another language
check_rus = rus_parts.issubset(set(self.TR_LRUS + self.TR_URUS))
check_eng = eng_parts.issubset(set(self.TR_LENG + self.TR_UENG))
if not check_rus and not check_eng:
# strange word, but..
is_cheat = -2
elif check_rus and not check_eng:
# translate to english
is_cheat = 1
result_word = word.translate(str.maketrans(''.join(self.TR_LRUS + self.TR_URUS),
''.join(self.TR_LENG + self.TR_UENG)))
elif not check_rus and check_eng:
# translate to russian
is_cheat = 1
result_word = word.translate(str.maketrans(''.join(self.TR_LENG + self.TR_UENG),
''.join(self.TR_LRUS + self.TR_URUS)))
else:
is_cheat = -1
if len(rus_parts) >= len(eng_parts):
# translate to russian
result_word = word.translate(str.maketrans(''.join(self.TR_LENG + self.TR_UENG),
''.join(self.TR_LRUS + self.TR_URUS)))
else:
# translate to english
result_word = word.translate(str.maketrans(''.join(self.TR_LRUS + self.TR_URUS),
''.join(self.TR_LENG + self.TR_UENG)))
return result_word, is_cheat
def _main():
police = AntiCheater()
print(police.check_word('мoлoкo'))
print(police.check_word('milk'))
print(police.check_word('ёжuк'))
print(police.check_word('cмecb'))
print(police.check_word('вecтник'))
print(police.check_word('КНИГА'))
print(police.check_word('кyкушка'))
print(police.check_word('kykaреку'))
print(police.check_word('АВТOMOБИЛb'))
print(police.check_word('superДОМ'))
if __name__ == '__main__':
_main()
| 3.625
| 4
|
statsmodels/stats/moment_helpers.py
|
ginggs/statsmodels
| 6
|
12774990
|
<filename>statsmodels/stats/moment_helpers.py<gh_stars>1-10
"""helper functions conversion between moments
contains:
* conversion between central and non-central moments, skew, kurtosis and
cummulants
* cov2corr : convert covariance matrix to correlation matrix
Author: <NAME>
License: BSD-3
"""
import numpy as np
from scipy.special import comb
def _convert_to_multidim(x):
if any([isinstance(x, list), isinstance(x, tuple)]):
return np.array(x)
elif isinstance(x, np.ndarray):
return x
else:
# something strange was passed and the function probably
# will fall, maybe insert an exception?
return x
def _convert_from_multidim(x, totype=list):
if len(x.shape) < 2:
return totype(x)
return x.T
def mc2mnc(mc):
"""convert central to non-central moments, uses recursive formula
optionally adjusts first moment to return mean
"""
x = _convert_to_multidim(mc)
def _local_counts(mc):
mean = mc[0]
mc = [1] + list(mc) # add zero moment = 1
mc[1] = 0 # define central mean as zero for formula
mnc = [1, mean] # zero and first raw moments
for nn, m in enumerate(mc[2:]):
n = nn + 2
mnc.append(0)
for k in range(n + 1):
mnc[n] += comb(n, k, exact=True) * mc[k] * mean ** (n - k)
return mnc[1:]
res = np.apply_along_axis(_local_counts, 0, x)
# for backward compatibility convert 1-dim output to list/tuple
return _convert_from_multidim(res)
def mnc2mc(mnc, wmean=True):
"""convert non-central to central moments, uses recursive formula
optionally adjusts first moment to return mean
"""
X = _convert_to_multidim(mnc)
def _local_counts(mnc):
mean = mnc[0]
mnc = [1] + list(mnc) # add zero moment = 1
mu = []
for n, m in enumerate(mnc):
mu.append(0)
for k in range(n + 1):
sgn_comb = (-1) ** (n - k) * comb(n, k, exact=True)
mu[n] += sgn_comb * mnc[k] * mean ** (n - k)
if wmean:
mu[1] = mean
return mu[1:]
res = np.apply_along_axis(_local_counts, 0, X)
# for backward compatibility convert 1-dim output to list/tuple
return _convert_from_multidim(res)
def cum2mc(kappa):
"""convert non-central moments to cumulants
recursive formula produces as many cumulants as moments
References
----------
<NAME>: Numerical Analysis for Statisticians, page 40
"""
X = _convert_to_multidim(kappa)
def _local_counts(kappa):
mc = [1, 0.0] # _kappa[0]] #insert 0-moment and mean
kappa0 = kappa[0]
kappa = [1] + list(kappa)
for nn, m in enumerate(kappa[2:]):
n = nn + 2
mc.append(0)
for k in range(n - 1):
mc[n] += comb(n - 1, k, exact=True) * kappa[n - k] * mc[k]
mc[1] = kappa0 # insert mean as first moments by convention
return mc[1:]
res = np.apply_along_axis(_local_counts, 0, X)
# for backward compatibility convert 1-dim output to list/tuple
return _convert_from_multidim(res)
def mnc2cum(mnc):
"""convert non-central moments to cumulants
recursive formula produces as many cumulants as moments
https://en.wikipedia.org/wiki/Cumulant#Cumulants_and_moments
"""
X = _convert_to_multidim(mnc)
def _local_counts(mnc):
mnc = [1] + list(mnc)
kappa = [1]
for nn, m in enumerate(mnc[1:]):
n = nn + 1
kappa.append(m)
for k in range(1, n):
num_ways = comb(n - 1, k - 1, exact=True)
kappa[n] -= num_ways * kappa[k] * mnc[n - k]
return kappa[1:]
res = np.apply_along_axis(_local_counts, 0, X)
# for backward compatibility convert 1-dim output to list/tuple
return _convert_from_multidim(res)
def mc2cum(mc):
"""
just chained because I have still the test case
"""
first_step = mc2mnc(mc)
if isinstance(first_step, np.ndarray):
first_step = first_step.T
return mnc2cum(first_step)
# return np.apply_along_axis(lambda x: mnc2cum(mc2mnc(x)), 0, mc)
def mvsk2mc(args):
"""convert mean, variance, skew, kurtosis to central moments"""
X = _convert_to_multidim(args)
def _local_counts(args):
mu, sig2, sk, kur = args
cnt = [None] * 4
cnt[0] = mu
cnt[1] = sig2
cnt[2] = sk * sig2 ** 1.5
cnt[3] = (kur + 3.0) * sig2 ** 2.0
return tuple(cnt)
res = np.apply_along_axis(_local_counts, 0, X)
# for backward compatibility convert 1-dim output to list/tuple
return _convert_from_multidim(res, tuple)
def mvsk2mnc(args):
"""convert mean, variance, skew, kurtosis to non-central moments"""
X = _convert_to_multidim(args)
def _local_counts(args):
mc, mc2, skew, kurt = args
mnc = mc
mnc2 = mc2 + mc * mc
mc3 = skew * (mc2 ** 1.5) # 3rd central moment
mnc3 = mc3 + 3 * mc * mc2 + mc ** 3 # 3rd non-central moment
mc4 = (kurt + 3.0) * (mc2 ** 2.0) # 4th central moment
mnc4 = mc4 + 4 * mc * mc3 + 6 * mc * mc * mc2 + mc ** 4
return (mnc, mnc2, mnc3, mnc4)
res = np.apply_along_axis(_local_counts, 0, X)
# for backward compatibility convert 1-dim output to list/tuple
return _convert_from_multidim(res, tuple)
def mc2mvsk(args):
"""convert central moments to mean, variance, skew, kurtosis"""
X = _convert_to_multidim(args)
def _local_counts(args):
mc, mc2, mc3, mc4 = args
skew = np.divide(mc3, mc2 ** 1.5)
kurt = np.divide(mc4, mc2 ** 2.0) - 3.0
return (mc, mc2, skew, kurt)
res = np.apply_along_axis(_local_counts, 0, X)
# for backward compatibility convert 1-dim output to list/tuple
return _convert_from_multidim(res, tuple)
def mnc2mvsk(args):
"""convert central moments to mean, variance, skew, kurtosis
"""
X = _convert_to_multidim(args)
def _local_counts(args):
# convert four non-central moments to central moments
mnc, mnc2, mnc3, mnc4 = args
mc = mnc
mc2 = mnc2 - mnc * mnc
mc3 = mnc3 - (3 * mc * mc2 + mc ** 3) # 3rd central moment
mc4 = mnc4 - (4 * mc * mc3 + 6 * mc * mc * mc2 + mc ** 4)
return mc2mvsk((mc, mc2, mc3, mc4))
res = np.apply_along_axis(_local_counts, 0, X)
# for backward compatibility convert 1-dim output to list/tuple
return _convert_from_multidim(res, tuple)
# def mnc2mc(args):
# """convert four non-central moments to central moments
# """
# mnc, mnc2, mnc3, mnc4 = args
# mc = mnc
# mc2 = mnc2 - mnc*mnc
# mc3 = mnc3 - (3*mc*mc2+mc**3) # 3rd central moment
# mc4 = mnc4 - (4*mc*mc3+6*mc*mc*mc2+mc**4)
# return mc, mc2, mc
# TODO: no return, did it get lost in cut-paste?
def cov2corr(cov, return_std=False):
"""
convert covariance matrix to correlation matrix
Parameters
----------
cov : array_like, 2d
covariance matrix, see Notes
Returns
-------
corr : ndarray (subclass)
correlation matrix
return_std : bool
If this is true then the standard deviation is also returned.
By default only the correlation matrix is returned.
Notes
-----
This function does not convert subclasses of ndarrays. This requires that
division is defined elementwise. np.ma.array and np.matrix are allowed.
"""
cov = np.asanyarray(cov)
std_ = np.sqrt(np.diag(cov))
corr = cov / np.outer(std_, std_)
if return_std:
return corr, std_
else:
return corr
def corr2cov(corr, std):
"""
convert correlation matrix to covariance matrix given standard deviation
Parameters
----------
corr : array_like, 2d
correlation matrix, see Notes
std : array_like, 1d
standard deviation
Returns
-------
cov : ndarray (subclass)
covariance matrix
Notes
-----
This function does not convert subclasses of ndarrays. This requires
that multiplication is defined elementwise. np.ma.array are allowed, but
not matrices.
"""
corr = np.asanyarray(corr)
std_ = np.asanyarray(std)
cov = corr * np.outer(std_, std_)
return cov
def se_cov(cov):
"""
get standard deviation from covariance matrix
just a shorthand function np.sqrt(np.diag(cov))
Parameters
----------
cov : array_like, square
covariance matrix
Returns
-------
std : ndarray
standard deviation from diagonal of cov
"""
return np.sqrt(np.diag(cov))
| 2.8125
| 3
|
app/controller/report/__init__.py
|
bhzunami/reanalytics
| 0
|
12774991
|
<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from flask import Blueprint
report = Blueprint('report', __name__)
from . import views
| 1.21875
| 1
|
fte/plots_ports.py
|
jason-r-becker/financial-transfer-entropy
| 6
|
12774992
|
import time
from collections import defaultdict
from datetime import timedelta
import cvxpy as cp
import empiricalutilities as eu
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
from transfer_entropy import TransferEntropy
plt.style.use('fivethirtyeight')
# %%
eqs = 'SPY DIA XLK XLV XLF IYZ XLY XLP XLI XLE XLU XME IYR XLB XPH IWM PHO ' \
'SOXX WOOD FDN GNR IBB ILF ITA IYT KIE PBW ' \
'AFK EZA ECH EWW EWC EWZ EEM EIDO EPOL EPP EWA EWD EWG EWH EWJ EWI EWK ' \
'EWL EWM EWP EWQ EWS EWT EWU EWY GXC HAO EZU RSX TUR'.split()
fi = 'AGG SHY IEI IEF TLT TIP LQD HYG MBB'.split()
cmdtys = 'GLD SLV DBA DBC USO UNG'.split()
fx = 'FXA FXB FXC FXE FXF FXY'.split()
assets = eqs + fi + cmdtys + fx
def cum_rets(rets):
cum_rets = []
cum_rets.append(1)
for i, ret in enumerate(rets):
cum_rets.append(cum_rets[i]*(1+ret))
return cum_rets
# %%
ete_mats = {}
mod = TransferEntropy(assets=assets)
period = 'Q'
months = mod.prices.index.to_period(period).unique().to_timestamp()
iters = len(months)-24
with tqdm(total=iters) as pbar:
for start, end in zip(months[:-1], months[1:]):
end -= timedelta(1)
mod.set_timeperiod(start, end)
mod.compute_effective_transfer_entropy(sims=30, bins=6,
std_threshold=1)
ete = mod.ete.copy()
ete_mats[start] = ete
pbar.update(1)
ete_df = pd.concat(ete_mats)
ete_df.to_csv(f'../ete_{period}.csv')
# %%
q = 4
res = defaultdict(dict)
mod = TransferEntropy(assets=assets)
iters = len(months)-1
for start, end in zip(months[:-1], months[1:]):
ete = ete_mats[start]
ete_out = ete.sum(axis=0)
ete_in = ete.sum(axis=1)
end -= timedelta(1)
mod.set_timeperiod(start, end)
returns = mod.prices.iloc[-1]/mod.prices.iloc[0]-1
vols = mod.data.std()
names = 'eteout etein etenetout etetotal'.split()
for name, ETE in zip(names, [ete_out, ete_in,
ete_out-ete_in, ete_in+ete_out]):
df = pd.DataFrame({'returns': returns, 'vol': vols, name: ETE})
df['q'] = pd.qcut(ETE, q=q, labels=False)
res[name][start] = df.groupby('q').agg('mean').reset_index().copy()
# %%
q_rets = {}
for name in names:
resdf = res[name]
resdf = pd.concat(resdf)
resdf.index = resdf.index.droplevel(1)
fig, ax = plt.subplots(1, 1, figsize=(10, 6))
cmap = sns.color_palette('Blues_r', n_colors=4)
for c, qtile in zip(cmap, range(q)):
q_rets[qtile] = resdf[resdf['q']==qtile]['returns'].values
ax.plot(months, cum_rets(q_rets[qtile]), c=c,
lw=2, alpha=1, label=f'Quartile {qtile+1}')
fig.autofmt_xdate()
plt.ylabel('Cumulative Return')
plt.xlabel('Time')
plt.legend()
plt.tight_layout()
plt.savefig(f'../plots/{name}_quartile_returns.png', dpi=300)
eu.latex_figure(f'../data/plots/{name}_quartile_returns.png')
# %%
for name in names:
table = defaultdict(dict)
resdf = res[name]
resdf = pd.concat(resdf)
resdf.index = resdf.index.droplevel(1)
table
for qtile in range(q):
table[qtile]['r'] = resdf[resdf['q']==qtile]['returns'].mean()*12
table[qtile]['v'] = resdf[resdf['q']==qtile]['returns'].std()*np.sqrt(12)
table[qtile][name] = resdf[resdf['q']==qtile][name].mean()
table = pd.DataFrame.from_dict(table, orient='index')
table['sr'] = table['r']/table['v']
table = table.reset_index()
table = table[['index', 'r', 'v', 'sr', name]]
cols = 'Quartile Return Volatility Sharpe'.split()
cols += [name]
table.columns = cols
table['Quartile'] += 1
table[['Return', 'Volatility']] *= 100
eu.latex_print(table, prec=2, hide_index=True)
# %%
def get_CAPM_weights(er, cov, gamma):
n = cov.shape[0]
w = cp.Variable((n, 1))
gamma = cp.Parameter(nonneg=True, value=gamma)
ret = w.T @ er
risk = cp.quad_form(w, cov)
constraints = [
cp.sum(w) == 1,
w <= 0.1,
w >= 0,
ret >= 0.02,
]
obj = cp.Maximize(ret - gamma*risk)
prob = cp.Problem(obj, constraints)
prob.solve()
return w.value
def get_MV_weights(er, cov):
n = cov.shape[0]
w = cp.Variable((n, 1))
ret = w.T @ er
risk = cp.quad_form(w, cov)
constraints = [
cp.sum(w) == 1,
w <= 0.1,
w >= 0,
ret >= 0.02,
]
obj = cp.Minimize(risk)
prob = cp.Problem(obj, constraints)
prob.solve()
return w.value
def get_weights(er, start, ete):
n = len(ete)
w = cp.Variable((n, 1))
ret = w.T @ er
obj = cp.Minimize(w.T @ (ete))
constraints = [
cp.sum(w) == 1,
w <= 0.1,
w >= 0,
ret >= 0.02,
]
prob = cp.Problem(obj, constraints)
prob.solve()
return w.value
# %%
ete_mats = pd.read_csv('../ete_Q.csv', index_col=[0, 1], parse_dates=True,
infer_datetime_format=True)
ete_mats = ete_mats[assets].copy()
mod = TransferEntropy(assets=assets)
mo_df = mod.prices.resample('Q').last()
mo_ret_df = (mo_df/mo_df.shift(1).values-1).dropna()
EXP_RETS = mo_ret_df.mean().values.reshape(-1, 1)
e_perf = []
e_perf_ete = []
mv_perf = []
mv_perf_ete = []
capm = defaultdict(list)
capm_ete = defaultdict(list)
gammas = [0.1, 1, 10]
with tqdm(total=iters) as pbar:
for start, end in zip(months[:-1], months[1:]):
end -= timedelta(1)
mod.set_timeperiod(start, end)
# get month's returns, cov, and ete matrices
cov = np.cov(mod.data.values, rowvar=False)
ete_mat = ete_mats.loc[start]
ete_mat = ete_mat.T[assets].T.values.copy()
ete_out = ete_mat.sum(axis=0).reshape(-1, 1)
ete_in = ete_mat.sum(axis=1).reshape(-1, 1)
net_out = ete_out - ete_in
r = (mod.prices.iloc[-1]/mod.prices.iloc[0]-1).values
# get strategy weights
we = get_weights(EXP_RETS, start, net_out)
wmv = get_MV_weights(EXP_RETS, cov)
e_perf.append(np.squeeze(we.T @ r))
e_perf_ete.append(np.squeeze(we.T @ net_out))
mv_perf.append(np.squeeze(wmv.T @ r))
mv_perf_ete.append(np.squeeze(wmv.T @ net_out))
for gamma in gammas:
w_capm = get_CAPM_weights(EXP_RETS, cov, gamma)
capm[gamma].append(np.squeeze(w_capm.T @ r))
capm_ete[gamma].append(np.squeeze(w_capm.T @ net_out))
pbar.update(1)
# %%
alpha=0.75
lw=2
fig, ax = plt.subplots(1, 1, figsize=(10, 6))
cmap2 = sns.color_palette('Reds_r', n_colors=len(gammas)*2)
ax.plot(months, cum_rets(e_perf), alpha=alpha,
label='ETE', lw=lw, c='steelblue')
ax.plot(months, cum_rets(mv_perf), alpha=alpha,
label='MV', lw=lw, c='forestgreen')
for i, gamma in enumerate(reversed(gammas[1:])):
ax.plot(months, cum_rets(capm[gamma]), alpha=alpha,
label=f'CAPM $\\gamma={gamma}$', lw=lw, c=cmap2[i])
fig.autofmt_xdate()
plt.ylabel('Cumulative Return')
plt.xlabel('Time')
plt.legend()
plt.tight_layout()
eu.save_fig(f'../plots/portfolio_comparison', dpi=300)
plt.show()
eu.latex_figure(f'../plots/portfolio_comparison')
# %%
tbl = pd.DataFrame({
'ETE': e_perf,
'MV': mv_perf,
'CAPM 1': capm[1],
'CAPM 10': capm[10],
}, index=months[1:])
tbl = (tbl.mean()*4).to_frame().join((tbl.std()*np.sqrt(4)).to_frame(),
rsuffix='vol')
tbl.columns = 'Return Volatility'.split()
tbl['Sharpe'] = tbl['Return']/tbl['Volatility']
tbl['Return'] *= 100
tbl['Volatility'] *= 100
tbl2 = pd.DataFrame({
'ETE': e_perf_ete,
'MV': mv_perf_ete,
'CAPM 1': capm_ete[1],
'CAPM 10': capm_ete[10],
}, index=months[1:])
tbl = tbl.join(tbl2.mean().to_frame())
tbl.columns = 'Return Volatility Sharpe ETE'.split()
eu.latex_print(tbl, prec=2)
| 1.96875
| 2
|
web/controllers/home/user/Profile.py
|
apanly/python_learn_master
| 5
|
12774993
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
from flask import Blueprint,request
from application import db
from common.components.helper.UtilHelper import UtilHelper
from common.components.helper.ValidateHelper import ValidateHelper
from common.models.notice.UserNews import UserNews
from common.services.CommonConstant import CommonConstant
from common.services.CurrentUserService import CurrentUserService
from common.models.rbac.User import ( User )
route_home_profile = Blueprint('home_profile_page', __name__)
@route_home_profile.route("/")
@route_home_profile.route("/index")
def home_index():
return UtilHelper.renderView( "home/user/profile/index.html",{ "info":CurrentUserService.getInfo() } )
@route_home_profile.route("/set_info",methods=[ "GET","POST" ])
def set_info():
if UtilHelper.isGet():
return UtilHelper.renderPopView("home/user/profile/set_info.html", {"info": CurrentUserService.getInfo() })
req = request.values
name = req.get("name", "").strip()
email = req.get("email", "").strip()
if not ValidateHelper.validLength(name, 1, 10):
return UtilHelper.renderErrJSON("请输入符合规范的姓名~~")
if not ValidateHelper.validEMail(email):
return UtilHelper.renderErrJSON("请输入符合规范的邮箱~~")
info = CurrentUserService.getInfo()
if not info:
return UtilHelper.renderErrJSON( CommonConstant.SYSTEM_DEFAULT_ERROR )
has_in = User.query.filter(User.email == email, User.id != info.id ).first()
if has_in:
return UtilHelper.renderErrJSON("该邮箱已存在,请换一个再试~~")
info.name = name
info.email = email
db.session.add(info)
db.session.commit()
return UtilHelper.renderSucJSON()
@route_home_profile.route("/news")
def home_news():
req = request.values
kw = req.get("kw", "").strip()
status = int( req.get("status",CommonConstant.default_status_neg_99) )
page = int(req.get("p", 1))
query = UserNews.query.filter_by( uid = CurrentUserService.getUid() )
if kw:
query = query.filter(UserNews.title.ilike('%{}%'.format(kw)))
if status > CommonConstant.default_status_neg_99:
query = query.filter_by( status = status )
page_params = {
"total": query.count(),
"page_size": CommonConstant.PAGE_SIZE,
"page": page,
"display": CommonConstant.PAGE_DISPLAY
}
pages = UtilHelper.iPagination(page_params)
offset = (page - 1) * CommonConstant.PAGE_SIZE
limit = CommonConstant.PAGE_SIZE * page
list = query.order_by(UserNews.id.desc())[offset:limit]
sc = {
'kw': kw,
'status': status
}
return UtilHelper.renderView("home/user/profile/news.html",{"list": list,"pages":pages,"sc":sc })
@route_home_profile.route("/news/ops",methods=[ "POST" ])
def news_ops():
req = request.values
id = int(req['id']) if ('id' in req and req['id']) else 0
if not id:
return UtilHelper.renderErrJSON( CommonConstant.SYSTEM_DEFAULT_ERROR )
info = UserNews.query.filter_by( id = id,uid = CurrentUserService.getUid() ).first()
if not info:
return UtilHelper.renderErrJSON( "指定站内信不存在" )
info.status = CommonConstant.default_status_true
db.session.add( info )
db.session.commit()
return UtilHelper.renderSucJSON()
@route_home_profile.route("/news/batch_ops",methods=[ "POST" ])
def news_batch_ops():
UserNews.query.filter_by( uid = CurrentUserService.getUid() ).update({"status": CommonConstant.default_status_true })
db.session.commit()
return UtilHelper.renderSucJSON()
| 2.125
| 2
|
telltime/main.py
|
wjimenez5271/telltime
| 1
|
12774994
|
from datetime import datetime
from time import time
from argparse import ArgumentParser
def epoch_to_datetime(epoch):
return datetime.fromtimestamp(epoch)
def get_epoch_time():
return time()
def main():
parser = ArgumentParser()
parser.add_argument('epoch_time', type=int, default=-1, nargs='?')
parser.add_argument('--verbose_format', action='store_true')
args = parser.parse_args()
dt = epoch_to_datetime(args.epoch_time)
if args.epoch_time == -1:
print "Current epoch time: {}".format(get_epoch_time())
elif args.verbose_format:
print dt.strftime("%A %d. %B %Y %I:%M:%S %p" )
else:
print dt.isoformat()
if __name__ == '__main__':
main()
| 3.46875
| 3
|
polus-cell-nuclei-segmentation/src/dsb2018_topcoders/albu/src/pytorch_zoo/inplace_abn/models/__init__.py
|
nishaq503/polus-plugins-dl
| 0
|
12774995
|
from .wider_resnet import *
| 1.140625
| 1
|
some-ml-examples/PyDataSeattle-master/check_environment.py
|
kryvokhyzha/examples-and-courses
| 1
|
12774996
|
<gh_stars>1-10
import importlib
packages = ['pandas', 'IPython', 'statsmodels', 'sklearn', 'seaborn',
'toolz', 'bs4', 'requests', 'scipy', 'tables']
bad = []
for package in packages:
try:
importlib.import_module(package)
except ImportError:
bad.append("Can't import %s" % package)
else:
if len(bad) > 0:
print('\n'.join(bad))
else:
from sklearn.datasets import california_housing
print("Caching california_housing")
data = california_housing.fetch_california_housing()
print("All good. Enjoy the tutorial!")
| 2.609375
| 3
|
preprocessing.py
|
Draeius/SVM_TxtCat
| 0
|
12774997
|
import re
from typing import List
from collections import Counter
from data import Article
class Process:
def process(self, words: List[str]) -> List[str]:
pass
class Strategy:
_lastLetters = List[str]
_measure = List[int]
_containsVowel = False
_doubleConsonant = False
_endsWithPattern = False
_pattern = ".*[b-df-hj-np-tv-z][aiueo][b-df-hj-np-tvz].{{{suffixlen}}}$"
_applyRegex = ""
_suffixLen = 0
_replacement = ""
_suffix = ""
def apply(self, word: str) -> str:
"""
Applies this strategy to the given word regardless of wether it is applicable or not.
"""
return re.sub(self._applyRegex, self._replacement, word, 0)
def isApplicable(self, word: str, wordMeasure: int) -> str:
"""
Checks if this strategy is applicable or not
"""
if len(word) <= self._suffixLen:
return False
#word does not end with the suffix of this strategy
if not word.endswith(self._suffix):
return False
#measure is important, if it is not empty
#wordMeasure has to be in measure for this strategy to be applicable
if self._measure and wordMeasure not in self._measure:
return False
#last letter is important
if self._lastLetters:
#test all letters that matter
found = False
for letter in self._lastLetters:
#check letter
if self.lastLetterEquals(word, letter):
found = True
#did not find any
if not found:
return False
#vowels matter
if self._containsVowel:
#check for vowels
vowel = self.containsVowel(word)
#did not find any
if not vowel:
return False
#needs a double consonant before the suffix
if self._doubleConsonant:
consonant = self.doubleConsonant(word)
#did not find a consonant
if not consonant:
return False
#needs a specific pattern before the suffix
if self._endsWithPattern:
pattern = self.endsWithPattern(word)
#did not find it
if not pattern:
return False
#all conditions met
return True
def lastLetterEquals(self, text: str, letter: str) -> bool:
"""
Checks if the last letter before the suffix is the given letter
"""
#get last letter of the word stem in lower case
return text[-1 * self._suffixLen - 1].lower() == letter
def containsVowel(self, text: str) -> bool:
"""
Checks for vowels in the given word
"""
return any(char in ["a", "i", "u", "e", "o", "A", "I", "U", "E", "O"]
for char in text[:-1 * self._suffixLen])
def doubleConsonant(self, text: str) -> bool:
"""
checks if the letter before the suffix is a double consonant
"""
if (self._suffixLen + 1) >= len(text):
return False
#last stem letter is a consonant
if (text[-1 * self._suffixLen -
1].lower() not in ["a", "i", "u", "e", "o"]):
#check for double consonant
return text[-1 * self._suffixLen -
1].lower() == text[-1 * self._suffixLen - 2].lower()
return False
def endsWithPattern(self, text: str) -> bool:
"""
Checks if the word before the suffix ends witch a given pattern
"""
if self._invertPattern:
return not re.search(self._pattern, text)
return re.search(self._pattern, text)
def __init__(self,
suffix: str,
replacement: str,
lastLetters: List[str],
measure: List[int],
containsVowel: bool,
doubleConsonant: bool,
endsWithPattern: bool,
invertPattern=False):
self._suffixLen = len(suffix)
self._pattern = re.compile(
self._pattern.format(suffixlen=self._suffixLen), re.IGNORECASE)
self._measure = measure
self._lastLetters = lastLetters
self._containsVowel = containsVowel
self._doubleConsonant = doubleConsonant
self._endsWithPattern = endsWithPattern
self._applyRegex = re.compile(suffix + "$", re.IGNORECASE)
self._replacement = replacement
self._suffix = suffix
self._invertPattern = invertPattern
class Tokenizer:
def __init__(self, keepPunctuation: bool, keepCaps: bool):
self._keepPunctuation = keepPunctuation
self._keepCaps = keepCaps
def erasePunctuation(self, text: str) -> str:
replacements = {
",": " ",
".": " ",
";": " ",
":": " ",
"/": " ",
"(": " ",
")": " ",
"{": " ",
"}": " ",
"+": " ",
"-": " ",
"<": " ",
">": " ",
'"': " ",
"'": " ",
"*": " ",
"!": " ",
"?": " ",
"^": " ",
"\u007f": ""
}
return "".join([replacements.get(c, c) for c in text])
def tokenize(self, text: str) -> List[str]:
if not self._keepPunctuation:
text = self.erasePunctuation(text)
if not self._keepCaps:
text = text.lower()
return text.split()
class SingleLetterStrategy(Strategy):
def apply(self, word: str) -> str:
return word[:-1]
def isApplicable(self, word: str, wordMeasure: int) -> bool:
return self.doubleConsonant(word) and not (self.lastLetterEquals(
word, "l") or self.lastLetterEquals(
word, "s") or self.lastLetterEquals(word, "z"))
class Stemmer(Process):
_measureRegex = "^[b-df-hj-np-tv-z]*([aiueo]+[b-df-hj-np-tv-z]+){{{}}}[aiueo]*$"
#-----------------------------------------------------------------------------------
#a whole lot of Porter's stemming rules...
_step1a = [
Strategy("sses", "ss", [], [], False, False, False),
Strategy("ies", "i", [], [], False, False, False),
Strategy("ss", "ss", [], [], False, False, False),
Strategy("s", "", [], [], False, False, False)
]
_step1b = [
Strategy("eed", "ee", [], range(1, 100), False, False, False),
Strategy("ed", "", [], [], True, False, False),
Strategy("ing", "", [], [], True, False, False)
]
_step1bnext = [
Strategy("at", "ate", [], [], False, False, False),
Strategy("bl", "ble", [], [], False, False, False),
Strategy("iz", "ize", [], [], False, False, False),
SingleLetterStrategy("", "", [
"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "m", "n",
"o", "p", "q", "r", "t", "u", "v", "w", "x", "y"
], [], False, True, False),
Strategy("", "e", [], [1], False, False, True)
]
_step1c = [Strategy("y", "i", [], [], True, False, False)]
_step2 = [
Strategy("ational", "ate", [], range(1, 100), False, False, False),
Strategy("tional", "tion", [], range(1, 100), False, False, False),
Strategy("enci", "ence", [], range(1, 100), False, False, False),
Strategy("anci", "ance", [], range(1, 100), False, False, False),
Strategy("izer", "ize", [], range(1, 100), False, False, False),
Strategy("abli", "able", [], range(1, 100), False, False, False),
Strategy("alli", "al", [], range(1, 100), False, False, False),
Strategy("entli", "ent", [], range(1, 100), False, False, False),
Strategy("eli", "e", [], range(1, 100), False, False, False),
Strategy("ousli", "ous", [], range(1, 100), False, False, False),
Strategy("ization", "ize", [], range(1, 100), False, False, False),
Strategy("ation", "ate", [], range(1, 100), False, False, False),
Strategy("ator", "ate", [], range(1, 100), False, False, False),
Strategy("alism", "al", [], range(1, 100), False, False, False),
Strategy("iveness", "ive", [], range(1, 100), False, False, False),
Strategy("fulness", "ful", [], range(1, 100), False, False, False),
Strategy("ousness", "ous", [], range(1, 100), False, False, False),
Strategy("aliti", "al", [], range(1, 100), False, False, False),
Strategy("iviti", "ive", [], range(1, 100), False, False, False),
Strategy("biliti", "ble", [], range(1, 100), False, False, False)
]
_step3 = [
Strategy("icate", "ic", [], range(1, 100), False, False, False),
Strategy("ative", "", [], range(1, 100), False, False, False),
Strategy("alize", "al", [], range(1, 100), False, False, False),
Strategy("icite", "ic", [], range(1, 100), False, False, False),
Strategy("ical", "ic", [], range(1, 100), False, False, False),
Strategy("ful", "", [], range(1, 100), False, False, False),
Strategy("ness", "", [], range(1, 100), False, False, False)
]
_step4 = [
Strategy("al", "", [], range(2, 100), False, False, False),
Strategy("ance", "", [], range(2, 100), False, False, False),
Strategy("ence", "", [], range(2, 100), False, False, False),
Strategy("er", "", [], range(2, 100), False, False, False),
Strategy("ic", "", [], range(2, 100), False, False, False),
Strategy("able", "", [], range(2, 100), False, False, False),
Strategy("ible", "", [], range(2, 100), False, False, False),
Strategy("ant", "", [], range(2, 100), False, False, False),
Strategy("ement", "", [], range(2, 100), False, False, False),
Strategy("ment", "", [], range(2, 100), False, False, False),
Strategy("ent", "", [], range(2, 100), False, False, False),
Strategy("ion", "", ["s", "t"], range(2, 100), False, False, False),
Strategy("ou", "", [], range(2, 100), False, False, False),
Strategy("ism", "", [], range(2, 100), False, False, False),
Strategy("ate", "", [], range(2, 100), False, False, False),
Strategy("iti", "", [], range(2, 100), False, False, False),
Strategy("ous", "", [], range(2, 100), False, False, False),
Strategy("ive", "", [], range(2, 100), False, False, False),
Strategy("ize", "", [], range(2, 100), False, False, False)
]
_step5a = [
Strategy("e", "", [], range(2, 100), False, False, False),
Strategy("e", "", [], [1], False, False, False, True)
]
_step5b = [
SingleLetterStrategy("e", "", ["l"], range(2, 100), False, True, False)
]
#-----------------------------------------------------------------------------------
def process(self, words: List[str]) -> List[str]:
indices = range(len(words))
for index in indices:
wordMeasure = self.getMeasure(words[index])
# Step 1 ----------------------------------------------------------
words[index] = self.applyList(self._step1a, words[index],
wordMeasure)[0]
step1b = self.applyList(self._step1b, words[index], wordMeasure)
words[index] = step1b[0]
if step1b[1] == 2 or step1b[1] == 3:
words[index] = self.applyList(self._step1bnext, words[index],
wordMeasure)[0]
words[index] = self.applyList(self._step1c, words[index],
wordMeasure)[0]
# Step 2 ----------------------------------------------------------
words[index] = self.applyList(self._step2, words[index],
wordMeasure)[0]
# Step 3 ----------------------------------------------------------
words[index] = self.applyList(self._step3, words[index],
wordMeasure)[0]
# Step 4 ----------------------------------------------------------
words[index] = self.applyList(self._step4, words[index],
wordMeasure)[0]
# Step 5 ----------------------------------------------------------
words[index] = self.applyList(self._step5a, words[index],
wordMeasure)[0]
words[index] = self.applyList(self._step5b, words[index],
wordMeasure)[0]
return words
def applyList(self, strategies, word: str, wordMeasure: int) -> str:
#apply porter strategies
counter = 0
for strat in strategies:
counter += 1
if strat.isApplicable(word, wordMeasure):
return [strat.apply(word), counter]
return [word, counter]
def getMeasure(self, word: str) -> int:
#get porters word measure
for index in range(100):
if re.search(self._measureRegex.format(index), word,
re.IGNORECASE):
return index
return -1
class StopwordEraser(Process):
stopwords = [
"mln", "dlr", "reuters", "\x03", 'a', 'about', 'above', 'after', 'again', 'against', 'all', 'am', 'an', 'and', 'any', 'are', "aren't", 'as', 'at', 'be',
'because', 'been', 'before', 'being', 'below', 'between', 'both', 'but', 'by', "can't", 'cannot', 'could', "couldn't", 'did', "didn't", 'do', 'does', "doesn't",
'doing', "don't", 'down', 'during', 'each', 'few', 'for', 'from', 'further', 'had', "hadn't", 'has', "hasn't", 'have', "haven't", 'having', 'he', "he'd",
"he'll", "he's", 'her', 'here', "here's", 'hers', "herself'", 'him', 'himself', 'his', 'how', "how's", 'i', "i'd", "i'll", "i'm", "i've", "if'", 'in', 'into', 'is',
"isn't", 'it', "it's", 'its', 'itself', "let's", 'me', 'more', 'most', "mustn't", 'my', 'myself', 'no', 'nor', 'not', 'of', 'off', 'on', 'once', 'only', 'or', 'other',
'ought', 'our', 'ours', 'ourselves', 'out', 'over', 'own', 'same', "shan't", 'she', "she'd", "she'll", "she's", 'should', "shouldn't", 'so', 'some', 'such', 'than',
'that', "that's", 'the', "their'", 'theirs', 'them', 'themselves', 'then', 'there', "there's", 'these', 'they', "they'd", "they'll", "they're", "they've", 'this',
'those', 'through', 'to', 'too', 'under', 'until', 'up', 'very', 'was', "wasn't", 'we', "we'd", "we'll", "we're", "we've", 'were', "weren't", 'what', "what's", 'when',
"when's", 'where', "where's", 'which', 'while', 'who', "who's", 'whom', 'why', "why's", 'with', "won't", 'would', "wouldn't", 'you', "you'd", "you'll", "you're", "you've",
'your', 'yours', 'yourself', 'yourselves'
]
def process(self, words: List[str]) -> List[str]:
#only return words that are not in the stopword array
return [word for word in words if not word in self.stopwords]
class NumberEraser(Process):
def process(self, words: List[str]) -> List[str]:
#find all numbers and replace with /number/
regex = re.compile(r".*\d")
for index in range(len(words)):
if re.match(regex, words[index]):
words[index] = "/number/"
return words
class GarbageEraser(Process):
_blaRegex = r"^b+l+a+$"
_noVocalRegex = r"^[b-df-hj-np-tv-z]*$"
def process(self, words: List[str]) -> List[str]:
blaRegex = re.compile(self._blaRegex)
noVocalRegex = re.compile(self._noVocalRegex)
#there are seriously a whole lot of blas in this articles .... -.-
return [
word for word in words
if (not re.match(blaRegex, word)) and len(word) > 1 and (
not re.match(noVocalRegex, word))
]
class IllicitWordEraser(Process):
def __init__(self, allowedWords: List[str]):
self._allowedWords = allowedWords
def process(self, words: List[str]) -> List[str]:
#return only words that are in allowedWords list
return [word for word in words if word in self._allowedWords]
class Preprocessor:
#regex template to test for words
#_regexTemplate = "^[b-df-hj-np-tv-z]*([aiueo]+[b-df-hj-np-tv-z]+){{{}}}[aiueo]*$"
def addProcessor(self, process: Process) -> None:
self._processors.append(process)
def process(self, article: Article) -> Article:
#process the given article
#first: tokenization
words = self._tokenizer.tokenize(article.text)
#all the other preprocessors
for proc in self._processors:
words = proc.process(words)
#count the words
self._counter.update(words)
#pass a copy to the article.
#copy is necessary to not count all words in all articles
article.preprocessed = self._counter.copy()
#reset counter
self.resetCounter()
return article
def resetCounter(self) -> None:
for index in self._counter:
self._counter[index] = 0
def __init__(self, allowedWords):
self._processors = []
self._tokenizer = Tokenizer(False, False)
self._counter = Counter(allowedWords)
class PreprocessorFactory:
instance = None
@staticmethod
def FACTORY(allowedWords: List[str]) -> Preprocessor:
# create standard Preprocessor
if PreprocessorFactory.instance == None:
preprocessor = Preprocessor(allowedWords)
preprocessor.addProcessor(StopwordEraser())
preprocessor.addProcessor(NumberEraser())
preprocessor.addProcessor(GarbageEraser())
preprocessor.addProcessor(Stemmer())
preprocessor.addProcessor(IllicitWordEraser(allowedWords))
PreprocessorFactory.instance = preprocessor
return preprocessor
return PreprocessorFactory.instance
@staticmethod
def CACHE_FACTORY() -> Preprocessor:
#create a preprocessor to build the cache
preprocessor = Preprocessor([])
preprocessor.addProcessor(StopwordEraser())
preprocessor.addProcessor(NumberEraser())
preprocessor.addProcessor(GarbageEraser())
preprocessor.addProcessor(Stemmer())
return preprocessor
| 3.3125
| 3
|
applications/CoSimulationApplication/python_scripts/helpers/dummy_solver_wrapper.py
|
lcirrott/Kratos
| 2
|
12774998
|
<reponame>lcirrott/Kratos
from __future__ import print_function, absolute_import, division # makes KratosMultiphysics backward compatible with python 2.6 and 2.7
# Importing the Kratos Library
import KratosMultiphysics as KM
# Importing the base class
from KratosMultiphysics.CoSimulationApplication.base_classes.co_simulation_solver_wrapper import CoSimulationSolverWrapper
# Other imports
import KratosMultiphysics.CoSimulationApplication.co_simulation_tools as cs_tools
def Create(settings, solver_name):
return DummySolverWrapper(settings, solver_name)
class DummySolverWrapper(CoSimulationSolverWrapper):
"""This class serves as dummy for testing, it does not solve anything
It only imports a mesh that can be used in the testing workflow
Note that this is only an example, other configurations are of course also possible
"""
def __init__(self, settings, solver_name):
super(DummySolverWrapper, self).__init__(settings, solver_name)
self.time_step = self.settings["solver_wrapper_settings"]["time_step"].GetDouble()
self.model_part = self.model.CreateModelPart(self.settings["solver_wrapper_settings"]["main_model_part_name"].GetString())
self.model_part.ProcessInfo[KM.DOMAIN_SIZE] = self.settings["solver_wrapper_settings"]["domain_size"].GetInt()
cs_tools.AllocateHistoricalVariablesFromCouplingData(self.data_dict.values(), self.model, self.name)
def Initialize(self):
severity = KM.Logger.GetDefaultOutput().GetSeverity()
KM.Logger.GetDefaultOutput().SetSeverity(KM.Logger.Severity.WARNING) # mute MP-IO
model_part_io = KM.ModelPartIO(self.settings["solver_wrapper_settings"]["mdpa_file_name"].GetString())
model_part_io.ReadModelPart(self.model_part)
KM.Logger.GetDefaultOutput().SetSeverity(severity)
super(DummySolverWrapper, self).Initialize()
def AdvanceInTime(self, current_time):
return current_time + self.time_step
def PrintInfo(self):
cs_tools.cs_print_info("DummySolver", self._ClassName())
## TODO print additional stuff with higher echo-level
| 2.09375
| 2
|
external_packages/matlab/non_default_packages/Gaussian_Process/deck/+dk/+mapred/python/mapred_build.py
|
ThomasYeoLab/Standalone_He2022_MM
| 0
|
12774999
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import argparse
import string
import json
import mapred_utils as util
# ------------------------------ ========== ------------------------------
# ------------------------------ ========== ------------------------------
# Template strings to be formatted and saved as bash scripts.
'''
Line to appear in the task file, running a single worker.
Matlab is run with singleCompThread, but multi-threaded mex can still be executed.
'''
TPL_MAP = string.Template("""matlab -singleCompThread -nodisplay -r "cd '${startdir}'; startup; cd '${workdir}'; obj = ${classname}(); obj.run_worker('${savedir}',${workerid}); exit(0);" """)
'''
The reduce script basically aggregates all the worker's results into a single MAT file.
'''
TPL_REDUCE = string.Template("""#!/bin/bash
matlab -singleCompThread -nodisplay -r "cd '${startdir}'; startup; cd '${workdir}'; obj = ${classname}(); obj.run_reduce('${savedir}'); exit(0);" """)
'''
The submission scripts submits a job-array defined in the task file (map-phase),
and a reduce job waiting for completion of the job array.
'''
TPL_SUBMIT = string.Template("""#!/bin/bash
# remove info in all job subfolders
for folder in job_*; do
[ -f $${folder}/info.json ] && rm -f $${folder}/info.json
done
# submit map/reduce job to the cluster
mid=$$(fsl_sub -q ${queue} -M ${email} -m ${mailopt} ${threads} -N ${jobname} -l "${logdir}" -t "${mapscript}")
rid=$$(fsl_sub -j $${mid} -q ${queue} -M ${email} -m ${mailopt} -N ${jobname} -l "${logdir}" ./"${redscript}")
# Show IDs
echo "Submitted map with ID $${mid} and reduce with ID $${rid}. Use qstat and mapred_status to monitor the progress."
""")
'''
Runworker can be used ad hoc to run the desired worker locally with nohup.
'''
TPL_RUNWORKER = string.Template("""#!/bin/bash
if [ $$# -lt 1 ]; then
echo "Usage: runworker <WorkerID>"
fi
nohup nice \\
matlab -singleCompThread -nodisplay \\
-r "cd '${startdir}'; startup; cd '${workdir}'; obj = ${classname}(); obj.run_worker('${savedir}',$$1); exit;" \\
>| "${logdir}/runworker_$${1}.log" 2>&1 &
echo "Running with pid $$!."
""")
'''
Message to be displayed if an existing configuration is found in the target folder.
'''
MSG_WARN = """WARNING:
Another configuration was found in folder '%s', and it looks compatible with the current one.
Going through with this build might result in OVERWRITING existing results.
The options in the current configuration are:\n%s
The options in the existing configuration are:\n%s
Do you wish to proceed with the build?"""
# ------------------------------ ========== ------------------------------
# ------------------------------ ========== ------------------------------
def check_existing(cfg):
folder = cfg['folders']['save']
if os.path.isdir(folder):
# If the reduced file already exists
redfile = os.path.join( folder, cfg['files']['reduce'] )
assert not os.path.isfile(redfile), \
'Reduced file "%s" already exists, either back it up or change "files.reduce" field.' % (redfile)
# If any of the workers outputs already exists
nworkers = len(cfg['exec']['workers'])
for i in xrange(nworkers):
workerfile = os.path.join( folder, cfg['files']['worker'] % (i+1) )
assert not os.path.isfile(workerfile), \
'Worker file "%s" already exists, either back it up or change "files.worker" field.' % (workerfile)
# If there is an existing config ..
cfgfile = os.path.join( folder, 'config/config.json' )
if os.path.isfile(cfgfile):
# .. make sure it is compatible with the current one
other = util.read_json(cfgfile)
assert other['id'] == cfg['id'], \
'Id mismatch with existing configuration "%s".' % (cfgfile)
assert len(other['exec']['jobs']) == len(cfg['exec']['jobs']), \
'Number of jobs mismatch with existing configuration "%s".' % (cfgfile)
# format options as strings for comparison
opt_new = json.dumps( cfg['exec']['options'], indent=4 )
opt_old = json.dumps( other['exec']['options'], indent=4 )
# Return true if the folder already exists
return util.query_yes_no( MSG_WARN % ( folder, opt_new, opt_old ), "no" )
return True
# Write new config to save folder
def make_config( cfg, folder ):
# creat config folder if it doesnt exist
cfg_folder = os.path.join( folder, 'config' )
if not os.path.isdir( cfg_folder ):
os.makedirs( cfg_folder )
print 'Created folder "%s".' % (cfg_folder)
# link and filename
cfg_name = 'config_%s.json' % (util.sortable_timestamp())
link_file = os.path.join( cfg_folder, 'config.json' )
cfg_file = os.path.join( cfg_folder, cfg_name )
util.write_json( cfg_file, cfg )
util.relink( link_file, cfg_name )
# Write scripts according to current config
def make_scripts( cfg, folder ):
# default configuration
workdir = cfg['folders']['work']
if not workdir:
workdir = cfg['folders']['start']
# substitution values from config
sub = dict(cfg['cluster'])
sub.update({
'savedir': cfg['folders']['save'],
'startdir': cfg['folders']['start'],
'workdir': workdir,
'classname': cfg['exec']['class'],
'logdir': 'logs',
'mapscript': 'map.task',
'redscript': 'reduce'
})
# multithreading
if 'threads' in cfg['cluster'] and cfg['cluster']['threads'] > 1:
sub['threads'] = '-s openmp,%d' % (cfg['cluster']['threads'])
else:
sub['threads'] = ''
# put the scripts together
nworkers = len(cfg['exec']['workers'])
scripts = {
'map.task': "\n".join([ TPL_MAP.substitute(sub,workerid=(i+1)) for i in xrange(nworkers) ]) + "\n",
'reduce': TPL_REDUCE.substitute(sub) + "\n",
'runworker': TPL_RUNWORKER.substitute(sub),
'submit': TPL_SUBMIT.substitute(sub)
}
# create log folder
logdir = os.path.join( folder, 'logs' )
if not os.path.isdir(logdir):
os.mkdir(logdir)
# create scripts and make executable
for name,text in scripts.iteritems():
sname = os.path.join(folder,name)
with open( sname, 'w' ) as f:
f.write(text)
util.make_executable(sname)
# Success message
msg_success = """
Successful build (%d jobs across %d workers). To submit to the cluster, run:
cd %s
./submit
"""
def main(args):
# Try different extensions in case it's missing
config = args.config
if os.path.isfile(config + '.json'):
config = config + '.json'
elif os.path.isfile(config + 'apred.json'):
config = config + 'apred.json'
elif os.path.isfile(config + '.mapred.json'):
config = config + '.mapred.json'
else:
assert os.path.isfile(config), 'File "%s" not found.' % (config)
# Load config and validate it
config = util.parse_config(config)
# Save folder
folder = args.savedir
if not folder:
folder = config['folders']['save']
else:
print 'Overriding configured savedir "%s" to "%s".' % (config['folders']['save'],folder)
config['folders']['save'] = folder
# Process config
if check_existing(config):
# Create save folder
if not os.path.isdir( folder ):
os.makedirs( folder )
print 'Created savedir "%s".' % (folder)
# Create config and scripts
make_config( config, folder )
make_scripts( config, folder )
# Success message
njobs = len(config['exec']['jobs'])
nworkers = len(config['exec']['workers'])
print msg_success % ( njobs, nworkers, folder )
if __name__ == '__main__':
parser = argparse.ArgumentParser( prog='mapred_build' )
parser.add_argument('config', help='Configuration file to be built')
parser.add_argument('--savedir', default='', help='Override save folder in config')
main(parser.parse_args())
| 2.46875
| 2
|
appstore_tools/actions/publish.py
|
luke14free/appstore-tools
| 0
|
12775000
|
import os
import hashlib
import colorama
import requests
from typing import Union, Sequence
from appstore_tools import appstore
from appstore_tools.print_util import print_clr, clr, json_term
from appstore_tools.appstore.auth import AccessToken
from .util import (
read_txt_file,
print_locale_status,
print_media_set_status,
print_media_status,
)
def media_checksum_ok(media, media_asset_dir: str) -> bool:
"""Checks if the appstore checksum matches the asset checksum."""
file_name = media["attributes"]["fileName"]
file_path = os.path.join(media_asset_dir, file_name)
appstore_checksum = media["attributes"]["sourceFileChecksum"]
if appstore_checksum is None:
print_media_status(
file_name,
colorama.Fore.CYAN,
"checksum missing (in processing)",
)
return False
if not os.path.isfile(file_path):
print_media_status(
file_name,
colorama.Fore.RED,
"no source file",
)
return False
with open(file_path, "rb") as file:
asset_checksum = hashlib.md5(file.read()).hexdigest()
if asset_checksum == appstore_checksum:
print_media_status(
file_name,
colorama.Fore.CYAN + colorama.Style.DIM,
clr(
f"checksum matched: ",
f"{colorama.Style.DIM}{asset_checksum}",
),
)
else:
print_media_status(
file_name,
colorama.Fore.CYAN,
clr(
f"checksum changed: ",
f"{colorama.Style.DIM}{appstore_checksum} -> {asset_checksum}",
),
)
return asset_checksum == appstore_checksum
def upload_media(media, media_asset_path: str) -> str:
"""Upload media asset (screenshot or preview) to the appstore.
Returns:
str: checksum
"""
file_hash = hashlib.md5()
upload_operations = media["attributes"]["uploadOperations"]
for op in upload_operations:
method: str = op["method"]
url: str = op["url"]
headers: dict = {}
for h in op["requestHeaders"]:
headers[h["name"]] = h["value"]
length: int = op["length"]
offset: int = op["offset"]
with open(media_asset_path, "rb") as file:
file.seek(offset)
file_chunk = file.read(length)
file_hash.update(file_chunk)
print_media_status(
media_asset_path,
colorama.Fore.CYAN,
f"uploading chunk (offset: {offset}, length: {length})",
)
requests.request(method=method, url=url, headers=headers, data=file_chunk)
return file_hash.hexdigest()
def get_media_file_names(media: Sequence[dict]):
return [x["attributes"]["fileName"] for x in media]
def get_asset_file_names(asset_dir: str):
return [
x for x in os.listdir(asset_dir) if os.path.isfile(os.path.join(asset_dir, x))
]
def get_new_file_paths(media: Sequence[dict], asset_dir: str):
media_file_names = get_media_file_names(media)
asset_file_names = get_asset_file_names(asset_dir)
return [
os.path.join(asset_dir, x)
for x in asset_file_names
if x not in media_file_names
]
def publish_screenshot(
access_token: AccessToken,
screenshot_path: str,
screenshot_set_id: str,
):
if not os.path.isfile(screenshot_path):
raise FileNotFoundError(f"Screenshot path does not exist: {screenshot_path}")
_, file_name = os.path.split(screenshot_path)
file_stat = os.stat(screenshot_path)
# Create
print_media_status(
file_name,
colorama.Fore.CYAN,
"reserving asset",
)
screenshot = appstore.create_screenshot(
screenshot_set_id=screenshot_set_id,
file_name=file_name,
file_size=file_stat.st_size,
access_token=access_token,
)
# Upload
checksum = upload_media(media=screenshot, media_asset_path=screenshot_path)
# Commit
print_media_status(
file_name,
colorama.Fore.CYAN,
"commiting upload",
)
screenshot = appstore.update_screenshot(
screenshot_id=screenshot["id"],
uploaded=True,
sourceFileChecksum=checksum,
access_token=access_token,
)
def publish_screenshots(
access_token: AccessToken,
screenshot_set_dir: str,
screenshot_set_id: str,
display_type: str,
):
print_media_set_status(display_type, colorama.Fore.CYAN, "checking for changes")
# Delete outdated screenshots
screenshots = appstore.get_screenshots(
screenshot_set_id=screenshot_set_id, access_token=access_token
)
for screenshot in screenshots:
if not media_checksum_ok(media=screenshot, media_asset_dir=screenshot_set_dir):
appstore.delete_screenshot(
screenshot_id=screenshot["id"], access_token=access_token
)
# Create new screenshots
screenshots = appstore.get_screenshots(
screenshot_set_id=screenshot_set_id, access_token=access_token
)
# Publish
new_file_paths = get_new_file_paths(screenshots, screenshot_set_dir)
for file_path in new_file_paths:
publish_screenshot(
access_token=access_token,
screenshot_path=file_path,
screenshot_set_id=screenshot_set_id,
)
# Reorder the screenshots
print_media_set_status(display_type, colorama.Fore.CYAN, "sorting screenshots")
screenshots = appstore.get_screenshots(
screenshot_set_id=screenshot_set_id, access_token=access_token
)
screenshots.sort(key=lambda x: x["attributes"]["fileName"])
screenshot_ids = [x["id"] for x in screenshots]
appstore.update_screenshot_order(
screenshot_set_id=screenshot_set_id,
screenshot_ids=screenshot_ids,
access_token=access_token,
)
def publish_screenshot_sets(
access_token: AccessToken,
localization_dir: str,
localization_id: str,
):
"""Publish the screenshot sets from assets on disk."""
screenshots_dir = os.path.join(localization_dir, "screenshots")
if not os.path.isdir(screenshots_dir):
print_clr(
f" No screenshots: directory {colorama.Fore.CYAN}{screenshots_dir}{colorama.Fore.RESET} not found.",
)
return
screenshot_sets = appstore.get_screenshot_sets(
localization_id=localization_id, access_token=access_token
)
asset_display_types = [
x
for x in os.listdir(screenshots_dir)
if os.path.isdir(os.path.join(screenshots_dir, x))
]
# Create new display types
loc_display_types = [
x["attributes"]["screenshotDisplayType"] for x in screenshot_sets
]
new_display_types = [x for x in asset_display_types if x not in loc_display_types]
for display_type in new_display_types:
print_media_set_status(
display_type, colorama.Fore.YELLOW, "creating display type"
)
screenshot_set = appstore.create_screenshot_set(
localization_id=localization_id,
display_type=display_type,
access_token=access_token,
)
screenshot_sets.append(screenshot_set)
for screenshot_set in screenshot_sets:
screenshot_set_id = screenshot_set["id"]
display_type = screenshot_set["attributes"]["screenshotDisplayType"]
screenshot_set_dir = os.path.join(screenshots_dir, display_type)
# Delete removed display types
if not os.path.isdir(screenshot_set_dir):
print_media_set_status(
display_type, colorama.Fore.RED, "deleting display type"
)
appstore.delete_screenshot_set(
screenshot_set_id=screenshot_set_id, access_token=access_token
)
continue
# Publish
publish_screenshots(
access_token=access_token,
screenshot_set_dir=screenshot_set_dir,
screenshot_set_id=screenshot_set_id,
display_type=display_type,
)
def publish_preview(
access_token: AccessToken,
preview_path: str,
preview_set_id: str,
):
if not os.path.isfile(preview_path):
raise FileNotFoundError(f"Preview path does not exist: {preview_path}")
_, file_name = os.path.split(preview_path)
file_stat = os.stat(preview_path)
# Create
print_media_status(
file_name,
colorama.Fore.CYAN,
"reserving asset",
)
preview = appstore.create_preview(
preview_set_id=preview_set_id,
file_name=file_name,
file_size=file_stat.st_size,
access_token=access_token,
)
# Upload
checksum = upload_media(media=preview, media_asset_path=preview_path)
# Commit
print_media_status(
file_name,
colorama.Fore.CYAN,
"commiting upload",
)
preview = appstore.update_preview(
preview_id=preview["id"],
uploaded=True,
sourceFileChecksum=checksum,
access_token=access_token,
)
def publish_previews(
access_token: AccessToken,
preview_set_dir: str,
preview_set_id: str,
display_type: str,
):
print_media_set_status(display_type, colorama.Fore.CYAN, "checking for changes")
# Delete outdated previews
previews = appstore.get_previews(
preview_set_id=preview_set_id, access_token=access_token
)
for preview in previews:
if not media_checksum_ok(media=preview, media_asset_dir=preview_set_dir):
appstore.delete_preview(preview_id=preview["id"], access_token=access_token)
# Create new previews
previews = appstore.get_previews(
preview_set_id=preview_set_id, access_token=access_token
)
new_file_paths = get_new_file_paths(previews, preview_set_dir)
# Publish
for file_path in new_file_paths:
publish_preview(
access_token=access_token,
preview_path=file_path,
preview_set_id=preview_set_id,
)
# Reorder the previews
print_media_set_status(display_type, colorama.Fore.CYAN, "sorting previews")
previews = appstore.get_previews(
preview_set_id=preview_set_id, access_token=access_token
)
previews.sort(key=lambda x: x["attributes"]["fileName"])
preview_ids = [x["id"] for x in previews]
appstore.update_preview_order(
preview_set_id=preview_set_id,
preview_ids=preview_ids,
access_token=access_token,
)
def publish_preview_sets(
access_token: AccessToken,
localization_dir: str,
localization_id: str,
):
"""Publish the previews sets from assets on disk."""
previews_dir = os.path.join(localization_dir, "previews")
if not os.path.isdir(previews_dir):
print_clr(
f" No previews: directory {colorama.Fore.CYAN}{previews_dir}{colorama.Fore.RESET} not found.",
)
return
preview_sets = appstore.get_preview_sets(
localization_id=localization_id, access_token=access_token
)
asset_display_types = [
x
for x in os.listdir(previews_dir)
if os.path.isdir(os.path.join(previews_dir, x))
]
# Create new display types
loc_preview_types = [x["attributes"]["previewType"] for x in preview_sets]
new_preview_types = [x for x in asset_display_types if x not in loc_preview_types]
for preview_type in new_preview_types:
print_media_set_status(
preview_type, colorama.Fore.YELLOW, "creating preview type"
)
preview_set = appstore.create_preview_set(
localization_id=localization_id,
preview_type=preview_type,
access_token=access_token,
)
preview_sets.append(preview_set)
for preview_set in preview_sets:
preview_set_id = preview_set["id"]
preview_type = preview_set["attributes"]["previewType"]
preview_set_dir = os.path.join(previews_dir, preview_type)
# Delete removed display types
if not os.path.isdir(preview_set_dir):
print_media_set_status(
preview_type, colorama.Fore.RED, "deleting preview type"
)
appstore.delete_preview_set(
preview_set_id=preview_set_id, access_token=access_token
)
continue
# Publish
publish_previews(
access_token=access_token,
preview_set_dir=preview_set_dir,
preview_set_id=preview_set_id,
display_type=preview_type,
)
def publish_version_localizations(
access_token: AccessToken,
app_dir: str,
version_id: str,
allow_create_locale: bool = True,
allow_delete_locale: bool = True,
):
localizations = appstore.get_version_localizations(
version_id=version_id, access_token=access_token
)
asset_locales = [
x for x in os.listdir(app_dir) if os.path.isdir(os.path.join(app_dir, x))
]
# create new localizations
version_locales = [loc["attributes"]["locale"] for loc in localizations]
new_locales = [x for x in asset_locales if x not in version_locales]
if allow_create_locale:
for locale in new_locales:
print_locale_status(locale, colorama.Fore.YELLOW, "creating locale")
loc = appstore.create_version_localization(
version_id=version_id,
locale=locale,
localization_attributes={},
access_token=access_token,
)
localizations.append(loc)
else:
for locale in new_locales:
print_locale_status(
locale, colorama.Fore.LIGHTBLACK_EX, "locale creation not allowed"
)
# publish localizations
for loc in localizations:
loc_id = loc["id"]
loc_attr = loc["attributes"]
locale = loc_attr["locale"]
loc_dir = os.path.join(app_dir, locale)
# Delete removed locales
if not os.path.isdir(loc_dir):
if allow_delete_locale:
print_locale_status(locale, colorama.Fore.RED, "deleting locale")
appstore.delete_version_localization(
localization_id=loc_id, access_token=access_token
)
else:
print_locale_status(
locale, colorama.Fore.LIGHTBLACK_EX, "locale deletion not allowed"
)
continue
# Normalize all attribute values to strings
for key in appstore.VersionLocalizationAttributes.__annotations__.keys():
if loc_attr[key] is None:
loc_attr[key] = ""
# Load local data from disk
asset_loc_data: appstore.VersionLocalizationAttributes = {}
for key in appstore.VersionLocalizationAttributes.__annotations__.keys():
path = os.path.join(loc_dir, key + ".txt")
content = read_txt_file(path)
if content is not None:
asset_loc_data[key] = content # type: ignore
# Only need to update if there are differences
loc_diff_keys = [
key
for key, value in asset_loc_data.items()
if value is not None and value != loc_attr[key]
]
if len(loc_diff_keys) > 0:
print_locale_status(
locale,
colorama.Fore.CYAN,
f"updating locale {colorama.Fore.CYAN}{colorama.Style.DIM}{loc_diff_keys}",
)
appstore.update_version_localization(
localization_id=loc_id,
localization_attributes=asset_loc_data,
access_token=access_token,
)
else:
print_locale_status(
locale, colorama.Fore.CYAN, "no changes in version settings"
)
# Screenshots
publish_screenshot_sets(
access_token=access_token,
localization_dir=loc_dir,
localization_id=loc_id,
)
# Previews
publish_preview_sets(
access_token=access_token,
localization_dir=loc_dir,
localization_id=loc_id,
)
def publish_version(
access_token: AccessToken,
app_dir: str,
app_id: str,
bundle_id: str,
platform: Union[appstore.Platform, str], # pylint: disable=unsubscriptable-object
version_string: str,
update_version_string: bool,
allow_create_version: bool = True,
allow_create_locale: bool = True,
allow_delete_locale: bool = True,
):
# Get Versions
versions = appstore.get_versions(
app_id=app_id,
access_token=access_token,
platforms=[platform],
states=appstore.editable_version_states,
)
print_clr(
f"Found {colorama.Fore.CYAN}{len(versions)}{colorama.Fore.RESET} editable app versions ",
f"for {colorama.Fore.CYAN}{platform}{colorama.Fore.RESET}.",
)
if len(versions) == 0 and allow_create_version:
print(
f"Creating new version: {colorama.Fore.BLUE}{version_string}{colorama.Fore.RESET}"
)
created_version = appstore.create_version(
app_id=app_id,
platform=platform,
version_string=version_string,
access_token=access_token,
)
versions.append(created_version)
elif update_version_string:
for v in versions:
version_id = v["id"]
version_state = v["attributes"]["appStoreState"]
version_attributes: appstore.VersionAttributes = {
"versionString": version_string,
}
print_clr(
f"{colorama.Fore.GREEN}Version ",
f"{colorama.Fore.BLUE}{version_state} ",
f": updating version ",
f"{colorama.Fore.CYAN}{version_attributes}",
)
appstore.update_version(
version_id=version_id,
version_attributes=version_attributes,
access_token=access_token,
)
for v in versions:
version_id = v["id"]
version_state = v["attributes"]["appStoreState"]
print_clr(
f"{colorama.Fore.GREEN}Version ",
f"{colorama.Fore.BLUE}{version_id} ",
f"{colorama.Fore.CYAN}{version_state} ",
)
publish_version_localizations(
access_token=<PASSWORD>_token,
app_dir=app_dir,
version_id=version_id,
allow_create_locale=allow_create_locale,
allow_delete_locale=allow_delete_locale,
)
def publish_info(
access_token: AccessToken,
app_dir: str,
app_id: str,
bundle_id: str,
platform: Union[appstore.Platform, str], # pylint: disable=unsubscriptable-object
):
# Get Infos
infos = appstore.get_infos(
app_id=app_id,
access_token=access_token,
states=appstore.editable_version_states,
)
print_clr(
f"Found {colorama.Fore.CYAN}{len(infos)}{colorama.Fore.RESET} editable app infos."
)
asset_locales = [
x for x in os.listdir(app_dir) if os.path.isdir(os.path.join(app_dir, x))
]
for info in infos:
info_id = info["id"]
version_state = info["attributes"]["appStoreState"]
print_clr(
colorama.Fore.GREEN + "AppInfo ",
colorama.Fore.BLUE + f"{info_id} ",
colorama.Fore.CYAN + f"{version_state}",
)
localizations = appstore.get_info_localizations(
info_id=info_id, access_token=access_token
)
# create new localizations
info_locales = [loc["attributes"]["locale"] for loc in localizations]
new_locales = [x for x in asset_locales if x not in info_locales]
for locale in new_locales:
print_locale_status(
locale, colorama.Fore.LIGHTBLACK_EX, "locale creation not allowed"
)
for loc in localizations:
loc_id = loc["id"]
loc_attr = loc["attributes"]
locale = loc_attr["locale"]
loc_dir = os.path.join(app_dir, locale)
# Delete removed locales
if not os.path.isdir(loc_dir):
print_locale_status(
locale, colorama.Fore.LIGHTBLACK_EX, "locale deletion not allowed"
)
continue
# Normalize all attribute values to strings
for key in appstore.InfoLocalizationAttributes.__annotations__.keys():
if loc_attr[key] is None:
loc_attr[key] = ""
# Load local data from disk
asset_loc_data: appstore.InfoLocalizationAttributes = {}
for key in appstore.InfoLocalizationAttributes.__annotations__.keys():
path = os.path.join(loc_dir, key + ".txt")
content = read_txt_file(path)
if content is not None:
asset_loc_data[key] = content # type: ignore
# Only need to update if there are differences
loc_diff_keys = [
key
for key, value in asset_loc_data.items()
if value is not None and value != loc_attr[key]
]
if len(loc_diff_keys) > 0:
print_locale_status(
locale,
colorama.Fore.CYAN,
f"updating app info {colorama.Fore.CYAN}{colorama.Style.DIM}{loc_diff_keys}",
)
appstore.update_info_localization(
info_localization_id=loc_id,
info_localization_attributes=asset_loc_data,
access_token=access_token,
)
else:
print_locale_status(
locale, colorama.Fore.CYAN, "no changes in app settings"
)
def publish(
access_token: AccessToken,
asset_dir: str,
app_id: str,
bundle_id: str,
platform: Union[appstore.Platform, str], # pylint: disable=unsubscriptable-object
version_string: str,
update_version_string: bool,
allow_create_version: bool = True,
allow_create_locale: bool = True,
allow_delete_locale: bool = True,
):
"""Publish all the app meta data app store, using any editable app versions found.
If none are found, a new version can be created for the specified target platform."""
print_clr("Publishing assets from directory: ", colorama.Fore.CYAN + asset_dir)
# Application directory
app_dir = os.path.join(asset_dir, bundle_id)
if not os.path.isdir(app_dir):
raise FileNotFoundError(
f"App directory {colorama.Fore.CYAN}{app_dir}{colorama.Fore.RESET} not found. "
)
publish_version(
access_token=access_token,
app_dir=app_dir,
app_id=app_id,
bundle_id=bundle_id,
platform=platform,
version_string=version_string,
update_version_string=update_version_string,
allow_create_version=allow_create_version,
allow_create_locale=allow_create_locale,
allow_delete_locale=allow_delete_locale,
)
publish_info(
access_token=access_token,
app_dir=app_dir,
app_id=app_id,
bundle_id=bundle_id,
platform=platform,
)
print_clr(colorama.Fore.GREEN + "Publish complete")
| 2.421875
| 2
|
le-namesilo/namesilo.py
|
vs49688/scripts
| 1
|
12775001
|
#!/usr/bin/env python3
import os
import urllib.parse
import urllib.request
from collections import OrderedDict
import xml.etree.ElementTree
class NameSilo(object):
def __init__(self, apikey):
self._apikey = apikey
def _make_url(self, op, **args):
x = OrderedDict(
version=1,
type='xml',
key=self._apikey
)
x.update(args)
urltuple = (
'https',
'www.namesilo.com',
'/api/{0}'.format(op),
'',
urllib.parse.urlencode(x),
''
)
uu = urllib.parse.urlunparse(urltuple)
#print(uu)
return uu
def _get_url(self, op, **args):
url = self._make_url(op, **args)
req = urllib.request.Request(
url=url,
headers={'User-Agent': 'Mozilla/5.0'}
)
data = urllib.request.urlopen(req).read().decode('utf-8')
#print(data)
root = xml.etree.ElementTree.fromstring(data)
code = int(root.find('./reply/code').text)
if code != 300:
raise Exception('{0}: {1}'.format(op, root.find('./reply/detail').text))
return root.find('./reply')
def add_record(self, domain, host, value, ttl):
reply = self._get_url('dnsAddRecord', domain=domain, rrtype='TXT', rrhost=host, rrvalue=value, rrttl=ttl)
return reply.find('./record_id').text
def del_record(self, domain, id):
self._get_url('dnsDeleteRecord', domain=domain, rrid=id)
def list_records(self, domain):
reply = self._get_url('dnsListRecords', domain=domain)
return [{e.tag:e.text for e in r} for r in reply.findall('./resource_record')]
def amce_build_host(domain):
parts = domain.split('.')
if len(parts) > 2:
host = '_acme-challenge.{0}'.format('.'.join(parts[:-2]))
cdomain = '.'.join(parts[-2:])
else:
host = '_acme-challenge'
cdomain = domain
fullhost = '_acme-challenge.{0}'.format(domain)
return (host, cdomain, fullhost)
| 2.5625
| 3
|
capiq/tests/unit/test_capiq_client_gdsp.py
|
vy-labs/capiq-python
| 29
|
12775002
|
import unittest
from mock import mock
from capiq.capiq_client import CapIQClient
def mocked_gdsp_data_requests_post(*args, **kwargs):
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def json(self):
return self.json_data
if args[0] is not None:
return MockResponse({"GDSSDKResponse": [{
"Headers": ["IQ_CLOSEPRICE"],
"Rows": [{"Row": ["46.80"]}],
"NumCols": 1,
"Seniority": "",
"Mnemonic": "IQ_CLOSEPRICE",
"Function": "GDSP",
"ErrMsg": None,
"Properties": {},
"StartDate": "",
"NumRows": 1,
"CacheExpiryTime": "0",
"SnapType": "",
"Frequency": "",
"Identifier": "TRIP:",
"Limit": ""
}]}, 200)
def mocked_gdsp_no_data_requests_post(*args, **kwargs):
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def json(self):
return self.json_data
"""
if args[0] == 'http://someurl.com/test.json':
return MockResponse({"key1": "value1"}, 200)
elif args[0] == 'http://someotherurl.com/anothertest.json':
"""
if args[0] is not None:
return MockResponse(
{
"GDSSDKResponse":
[
{
"Headers": ["IQ_CLOSEPRICE"],
"Rows": [{"Row": ["46.80"]}],
"NumCols": 1,
"Seniority": "",
"Mnemonic": "IQ_CLOSEPRICE",
"Function": "GDSP",
"ErrMsg": "SOME ERROR",
"Properties": {},
"StartDate": "",
"NumRows": 1,
"CacheExpiryTime": "0",
"SnapType": "",
"Frequency": "",
"Identifier": "TRIP:",
"Limit": ""
}
]
}, 200)
class TestCapiqClientGdsp(unittest.TestCase):
@mock.patch('capiq.capiq_client.requests.post', side_effect=mocked_gdsp_data_requests_post)
def test_gdsp_data(self, mocked_post):
ciq_client = CapIQClient("username", "password")
return_value = ciq_client.gdsp(["TRIP"], ["IQ_CLOSEPRICE"], ["close_price"], [{}])
self.assertEqual(return_value, {'TRIP:': {'close_price': '46.80'}})
@mock.patch('capiq.capiq_client.requests.post', side_effect=mocked_gdsp_no_data_requests_post)
def test_gdsp_no_data(self, mocked_post):
ciq_client = CapIQClient("username", "password")
return_value = ciq_client.gdsp(["TRIP"], ["IQ_CLOSEPRICE"], ["close_price"], [{}])
self.assertEqual(return_value, {'TRIP:': {'close_price': None}})
| 2.609375
| 3
|
generate_configs.py
|
ryanclanigan/messaging-bridge
| 0
|
12775003
|
from shutil import copyfile
import glob
import os
if not os.path.exists("config"):
os.mkdir("config")
for file in glob.glob("example-config/*.json"):
copyfile(file, os.path.join("config", file.split("example-")[-1]))
| 2.765625
| 3
|
example_model.py
|
amipy/numerous
| 0
|
12775004
|
from enum import Enum
from numerous.engine.model import Model
from numerous.engine.simulation import Simulation
from numerous.engine.system import Subsystem, Item
from tests.test_equations import TestEq_ground, Test_Eq, TestEq_input
from enum import Enum
from numerous.engine.model import Model
from numerous.engine.simulation import Simulation
from numerous.engine.system import Subsystem, Item
from tests.test_equations import TestEq_ground, Test_Eq, TestEq_input
class SolverType(Enum):
SOLVER_IVP = 0
NUMEROUS = 1
solver_types = [SolverType.NUMEROUS, SolverType.SOLVER_IVP]
def ms2():
class I(Item):
def __init__(self, tag, P, T, R):
super().__init__(tag)
t1 = self.create_namespace('t1')
t1.add_equations([TestEq_input(P=P, T=T, R=R)])
class T(Item):
def __init__(self, tag, T, R):
super().__init__(tag)
t1 = self.create_namespace('t1')
t1.add_equations([Test_Eq(T=T, R=R)])
class G(Item):
def __init__(self, tag, TG, RG):
super().__init__(tag)
t1 = self.create_namespace('t1')
t1.add_equations([TestEq_ground(TG=TG, RG=RG)])
class S2(Subsystem):
def __init__(self, tag):
super().__init__(tag)
input = I('1', P=100, T=0, R=10)
item1 = T('2', T=0, R=5)
item2 = T('3', T=0, R=3)
item3 = T('4', T=0, R=2)
## RG is redundant we use item3.R as a last value of R in a chain
ground = G('5', TG=10, RG=2)
input.t1.T_o.add_mapping(item1.t1.T)
# item1.bind(input=input, output=item2)
item1.t1.R_i.add_mapping(input.t1.R)
item1.t1.T_i.add_mapping(input.t1.T)
item1.t1.T_o.add_mapping(item2.t1.T)
# t_0 = item1.t1.T_o
# item1.t1.T_o = item2.t1.T
item2.t1.R_i.add_mapping(item1.t1.R)
item2.t1.T_i.add_mapping(item1.t1.T)
item2.t1.T_o.add_mapping(item3.t1.T)
item3.t1.R_i.add_mapping(item2.t1.R)
item3.t1.T_i.add_mapping(item2.t1.T)
item3.t1.T_o.add_mapping(ground.t1.T)
self.register_items([input, item1, item2, item3, ground])
return S2('S2')
def run_model(ms, solver, use_llvm):
# print(type(ms2))
m1 = Model(system=ms, use_llvm=use_llvm)
s1 = Simulation(m1, t_start=0, t_stop=1000, num=10, solver_type=solver)
s1.solve()
return s1
def ms2N(n):
class I(Item):
def __init__(self, tag, P, T, R):
super().__init__(tag)
t1 = self.create_namespace('t1')
t1.add_equations([TestEq_input(P=P, T=T, R=R)])
class T(Item):
def __init__(self, tag, T, R):
super().__init__(tag)
t1 = self.create_namespace('t1')
t1.add_equations([Test_Eq(T=T, R=R)])
class G(Item):
def __init__(self, tag, TG, RG):
super().__init__(tag)
t1 = self.create_namespace('t1')
t1.add_equations([TestEq_ground(TG=TG, RG=RG)])
class S2N(Subsystem):
def __init__(self, tag):
super().__init__(tag)
items = []
input = I('1', P=100, T=0, R=10)
for i in range(n):
items.append(T(str(i+2), T=1, R=5))
#print(items)
#print(f'GNDID{n + 2}')
ground = G(str(n + 2), TG=10, RG=2)
input.t1.T_o.add_mapping(items[0].t1.T)
# item1.bind(input=input, output=item2)
for item in range(n):
if item == 0:
items[item].t1.R_i.add_mapping(input.t1.R)
#items[item].t1.T_i.add_mapping(input.t1.T)
items[item].t1.T_o.add_mapping(items[item + 1].t1.T)
elif item == n-1:
items[item].t1.R_i.add_mapping(items[item - 1].t1.R)
items[item].t1.T_i.add_mapping(items[item - 1].t1.T)
items[item].t1.T_o.add_mapping(ground.t1.T)
else:
items[item].t1.R_i.add_mapping(items[item - 1].t1.R)
items[item].t1.T_i.add_mapping(items[item - 1].t1.T)
items[item].t1.T_o.add_mapping(items[item + 1].t1.T)
r_items = [input]
for i in items:
r_items.append(i)
r_items.append(ground)
print(r_items)
#print(type(items[0]))
self.register_items(r_items)
return S2N('S2')
#s1=run_model(ms2(), solver=SolverType.NUMEROUS, use_llvm=[True, False])
#s1_result=s1.model.historian_df
s1n=run_model(ms2N(8), solver=SolverType.NUMEROUS, use_llvm=[True, False])
s1n_result=s1n.model.historian_df
| 2.40625
| 2
|
src/filter.py
|
Boyploy/IMF
| 108
|
12775005
|
<filename>src/filter.py
# Copyright (c) 2017 <NAME> and <NAME> at SoftSec, KAIST
#
# See the file LICENCE for copying permission.
import os
import utils
import sys
def parse_name(data):
return data.split('\'')[1]
def parse_selector(data):
if 'selector' in data:
ret = data.split('selector')[1].split('\'value\':')[1].split(',')[0]
ret = int(ret.strip()[2:], 16)
return ret
return None
def merge(name, selector):
ret = name
if selector != None:
ret = '%s, %d'%(name, selector)
return ret.__hash__()
def loader(path):
ret = []
with open(path, 'rb') as f:
data = f.read().split('\n')[:-1]
idx = 0
while idx < len(data):
name = parse_name(data[idx])
selector = parse_selector(data[idx])
hval = merge(name, selector)
ret.append(hval)
idx += 2
return path, ret
def list_dir(path):
files = []
for fn in os.listdir(path):
files.append(os.path.join(path, fn))
return files
def get(l, idx):
if len(l) >idx:
return l[idx]
return None
def categorize(groups, idx):
ret = []
for group in groups:
tmp = {}
for fn, hvals in group:
hval = get(hvals, idx)
if hval not in tmp:
tmp[hval] = []
tmp[hval].append((fn, hvals))
for hval in tmp:
if hval != None :
ret.append(tmp[hval])
return ret
def pick_best(groups, n):
for group in groups:
if len(group) >= n:
return group[:n]
return None
def find_best(groups, n):
before = None
idx = 0
while len(groups) != 0:
before = groups
groups = categorize(groups, idx)
if pick_best(groups, n) == None:
return pick_best(before, n), idx
idx += 1
utils.error('find_best error')
def save_best(path, best_group, idx):
for fn, _ in best_group:
name = fn.split('/')[-1]
with open(fn, 'rb') as f:
data = f.read().split('\n')[:-1]
with open(os.path.join(path, name), 'wb') as f:
for x in data[:idx*2]:
f.write(x+'\n')
def do_filter(log_path, out_path, n, core):
log_names = list_dir(log_path)
logs = utils.multiproc(loader, log_names, core)
best_group, idx = find_best([logs], n)
save_best(out_path, best_group, idx)
def show_help():
print './filter-log [log dir] [output dir] [# of output log] [# of core]'
if __name__ == '__main__':
if len(sys.argv) !=5:
show_help()
sys.exit(-1)
n = int(sys.argv[3])
core = int(sys.argv[4])
do_filter(sys.argv[1], sys.argv[2], n, core)
| 2.46875
| 2
|
src/MicroPython/main.py
|
mnkagarwal0/IoTMQTTSample
| 46
|
12775006
|
<filename>src/MicroPython/main.py
## The file name needs to be renamed to main.py for it work on the ESP 32 board
import utime
from util import create_mqtt_client, get_telemetry_topic, get_c2d_topic, parse_connection
HOST_NAME = "HostName"
SHARED_ACCESS_KEY_NAME = "SharedAccessKeyName"
SHARED_ACCESS_KEY = "SharedAccessKey"
SHARED_ACCESS_SIGNATURE = "SharedAccessSignature"
DEVICE_ID = "DeviceId"
MODULE_ID = "ModuleId"
GATEWAY_HOST_NAME = "GatewayHostName"
## Parse the connection string into constituent parts
dict_keys = parse_connection("<YOUR CONNECTION STRING>")
shared_access_key = dict_keys.get(SHARED_ACCESS_KEY)
shared_access_key_name = dict_keys.get(SHARED_ACCESS_KEY_NAME)
gateway_hostname = dict_keys.get(GATEWAY_HOST_NAME)
hostname = dict_keys.get(HOST_NAME)
device_id = dict_keys.get(DEVICE_ID)
module_id = dict_keys.get(MODULE_ID)
## Create you own shared access signature from the connection string that you have
## Azure IoT Explorer can be used for this purpose.
sas_token_str = "<YOUR SAS TOKEN STRING>"
## Create username following the below format '<HOSTNAME>/<DEVICE_ID>'
username = hostname + '/' + device_id
## Create UMQTT ROBUST or UMQTT SIMPLE CLIENT
mqtt_client = create_mqtt_client(client_id=device_id, hostname=hostname, username=username, password=<PASSWORD>, port=8883, keepalive=120, ssl=True)
print("connecting")
mqtt_client.reconnect()
def callback_handler(topic, message_receive):
print("Received message")
print(message_receive)
subscribe_topic = get_c2d_topic(device_id)
mqtt_client.set_callback(callback_handler)
mqtt_client.subscribe(topic=subscribe_topic)
print("Publishing")
topic = get_telemetry_topic(device_id)
## Send telemetry
messages = ["Accio", "Aguamenti", "Alarte Ascendare", "Expecto Patronum", "Hom<NAME>", "Priori Incantato", "Revelio", "Rictusempra", "Nox" , "Stupefy", "<NAME>"]
for i in range(0, len(messages)):
print("Sending message " + str(i))
mqtt_client.publish(topic=topic, msg=messages[i])
utime.sleep(2)
## Send a C2D message and wait for it to arrive at the device
print("waiting for message")
mqtt_client.wait_msg()
| 2.640625
| 3
|
lisc/tests/plts/test_words.py
|
koudyk/lisc
| 0
|
12775007
|
<reponame>koudyk/lisc<filename>lisc/tests/plts/test_words.py
"""Tests for lisc.plts.words."""
from collections import Counter
from lisc.tests.tutils import plot_test, optional_test
from lisc.plts.words import *
###################################################################################################
###################################################################################################
@optional_test('wordcloud')
@plot_test
def test_plot_wordcloud():
freq_dist = Counter(['lots', 'of', 'words', 'words'])
plot_wordcloud(freq_dist, 5)
@optional_test('matplotlib')
@plot_test
def test_plot_years():
years = Counter([2000, 2000, 2015, 2016])
plot_years(years, year_range=[1999, 2017])
| 2.53125
| 3
|
2015/3/directions_2.py
|
lvaughn/advent
| 0
|
12775008
|
<reponame>lvaughn/advent
#!/usr/bin/env python3
visited = set()
visited.add((0, 0))
turn = 0 # Santa = 0, Robo-Santa = 1
locations = [[0, 0], [0, 0]]
with open('input.txt', 'r') as f:
for line in f:
for ch in line:
pos = locations[turn]
turn = (turn + 1) % 2
if ch == '^':
pos[0] += 1
elif ch == 'v':
pos[0] -= 1
elif ch == '<':
pos[1] -= 1
elif ch == '>':
pos[1] += 1
visited.add(tuple(pos))
print("Total houses", len(visited))
| 3.296875
| 3
|
tests/ui/test_functional.py
|
REFEDS/met
| 0
|
12775009
|
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from pyvirtualdisplay import Display
from tests.testing_utilities import populate_test_db
class FunctionalTest(StaticLiveServerTestCase):
def setUp(self):
display = Display(visible=0, size=(800, 600))
display.start()
self.selenium = webdriver.Firefox()
self.selenium.implicitly_wait(3)
populate_test_db()
def tearDown(self):
self.selenium.quit()
# Auxiliary function to add view subdir to URL
def _get_full_url(self, namespace):
return self.live_server_url + namespace
def _is_text_present(self, text):
try:
body = self.selenium.find_element_by_tag_name('body')
except NoSuchElementException:
return False
return text in body.text # check if the text is in body's text
def test_home_title(self):
"""
Tests that Home is loading properly
"""
self.selenium.get(self._get_full_url('/'))
self.assertIn('Metadata Explorer Tool', self.selenium.title)
def test_home_sections(self):
"""
Tests that Home is showing the right sections
"""
self.selenium.get(self._get_full_url('/'))
self.assertTrue(self._is_text_present('Entities summary'))
| 2.375
| 2
|
src/dask_remote/cluster_base.py
|
octoenergy/dask-remote
| 4
|
12775010
|
<gh_stars>1-10
import asyncio
from distributed.core import rpc
from distributed.deploy.cluster import Cluster
from distributed.security import Security
from distributed.utils import LoopRunner
class NoOpAwaitable(object):
"""An awaitable object that always returns None.
Useful to return from a method that can be called in both asynchronous and
synchronous contexts.
From `distributed.deploy.spec`.
"""
def __await__(self):
async def f():
return None
return f().__await__()
class ContextCluster(Cluster):
def __init__(self, asynchronous=False, loop=None):
self._loop_runner = LoopRunner(loop=loop, asynchronous=asynchronous)
self.loop = self._loop_runner.loop
super().__init__(asynchronous=asynchronous)
if not self.asynchronous:
self._loop_runner.start()
self.sync(self._start)
def __enter__(self):
if self.status != "running":
raise ValueError(f"Expected status 'running', found '{self.status}'")
return self
def __exit__(self, typ, value, traceback):
self.close()
self._loop_runner.stop()
class AsyncContextCluster(ContextCluster):
def __await__(self):
async def _():
if self.status == "created":
await self._start()
return self
return _().__await__()
async def __aenter__(self):
await self
return self
async def __aexit__(self, *args, **kwargs):
await self.close()
class RemoteSchedulerCluster(AsyncContextCluster):
def __init__(self, asynchronous=False, loop=None, security=None):
self.security = security or Security()
super().__init__(asynchronous=asynchronous, loop=loop)
async def _start(self):
self.status: str
while self.status == "starting":
await asyncio.sleep(0.01)
if self.status == "running":
return
if self.status == "closed":
raise ValueError("Unexpected 'closed' status")
self.status = "starting"
self.scheduler_comm = rpc(
self.scheduler_address, connection_args=self.security.get_connection_args("client")
)
await super()._start()
| 2.46875
| 2
|
packages/cdk-s3-deployment/lib/lambda/handler.py
|
DaySmart/daysmart-cdk-constructs
| 1
|
12775011
|
<reponame>DaySmart/daysmart-cdk-constructs<gh_stars>1-10
import subprocess
import os
import tempfile
import json
import traceback
import logging
import shutil
import boto3
import contextlib
from datetime import datetime
from uuid import uuid4
from urllib.request import Request, urlopen
from zipfile import ZipFile
logger = logging.getLogger()
logger.setLevel(logging.INFO)
cloudfront = boto3.client('cloudfront')
CFN_SUCCESS = "SUCCESS"
CFN_FAILED = "FAILED"
def handler(event, context):
def cfn_error(message=None):
logger.error("| cfn_error: %s" % message)
cfn_send(event, context, CFN_FAILED, reason=message)
try:
logger.info(event)
# cloudformation request type (create/update/delete)
request_type = event['RequestType']
# extract resource properties
props = event['ResourceProperties']
old_props = event.get('OldResourceProperties', {})
physical_id = event.get('PhysicalResourceId', None)
try:
source_bucket_names = props['SourceBucketNames']
source_object_keys = props['SourceObjectKeys']
dest_bucket_name = props['DestinationBucketName']
dest_bucket_prefix = props.get('DestinationBucketKeyPrefix', '')
distribution_id = props.get('DistributionId', '')
environment = props.get('Environment', '')
sns_topic_arn = props.get('SnsTopicArn', '')
default_distribution_path = dest_bucket_prefix
if not default_distribution_path.endswith("/"):
default_distribution_path += "/"
if not default_distribution_path.startswith("/"):
default_distribution_path = "/" + default_distribution_path
default_distribution_path += "*"
distribution_paths = props.get(
'DistributionPaths', [default_distribution_path])
except KeyError as e:
cfn_error("missing request resource property %s. props: %s" %
(str(e), props))
return
# treat "/" as if no prefix was specified
if dest_bucket_prefix == "/":
dest_bucket_prefix = ""
s3_source_zips = map(lambda name, key: "s3://%s/%s" %
(name, key), source_bucket_names, source_object_keys)
s3_dest = "s3://%s/%s" % (dest_bucket_name, dest_bucket_prefix)
old_s3_dest = "s3://%s/%s" % (old_props.get("DestinationBucketName", ""),
old_props.get("DestinationBucketKeyPrefix", ""))
# obviously this is not
if old_s3_dest == "s3:///":
old_s3_dest = None
logger.info("| s3_dest: %s" % s3_dest)
logger.info("| old_s3_dest: %s" % old_s3_dest)
# if we are creating a new resource, allocate a physical id for it
# otherwise, we expect physical id to be relayed by cloudformation
if request_type == "Create":
physical_id = "aws.cdk.s3deployment.%s" % str(uuid4())
else:
if not physical_id:
cfn_error(
"invalid request: request type is '%s' but 'PhysicalResourceId' is not defined" % request_type)
return
if request_type == "Update" or request_type == "Create":
s3_deploy(s3_source_zips, s3_dest)
if distribution_id:
failed_invalidation_list = cloudfront_invalidate(
distribution_paths, dest_bucket_name)
logger.info("failed invalidation list => %s" %
failed_invalidation_list)
if (len(failed_invalidation_list) > 0):
invalidation_failed(environment,
sns_topic_arn, failed_invalidation_list)
cfn_send(event, context, CFN_SUCCESS, physicalResourceId=physical_id)
except KeyError as e:
cfn_error("invalid request. Missing key %s" % str(e))
except Exception as e:
logger.exception(e)
cfn_error(str(e))
# ---------------------------------------------------------------------------------------------------
# populate all files from s3_source_zips to a destination bucket
def s3_deploy(s3_source_zips, s3_dest):
# create a temporary working directory
workdir = tempfile.mkdtemp()
logger.info("| workdir: %s" % workdir)
# create a directory into which we extract the contents of the zip file
contents_dir = os.path.join(workdir, 'contents')
os.mkdir(contents_dir)
# download the archive from the source and extract to "contents"
for s3_source_zip in s3_source_zips:
archive = os.path.join(workdir, str(uuid4()))
logger.info("archive: %s" % archive)
aws_command("s3", "cp", s3_source_zip, archive)
logger.info("| extracting archive to: %s\n" % contents_dir)
with ZipFile(archive, "r") as zip:
zip.extractall(contents_dir)
# cp from "contents" to destination
s3_command = ["s3", "cp"]
s3_command.append("--recursive")
s3_command.extend([contents_dir, s3_dest])
aws_command(*s3_command)
shutil.rmtree(workdir)
# ---------------------------------------------------------------------------------------------------
# invalidate files in the CloudFront distribution edge caches
def cloudfront_invalidate(distribution_paths, dest_bucket_name):
invalidate_distribution_list = []
failed_invalidation_list = []
aws_distribution_list = []
cf_list_response = cloudfront.list_distributions()
aws_distribution_list.extend(cf_list_response['DistributionList']['Items'])
if(cf_list_response['DistributionList']['IsTruncated']):
aws_distribution_list.extend(paginate_distributions(cf_list_response['DistributionList']['NextMarker']))
logger.info("Distributions => %s" % aws_distribution_list)
for distribution in aws_distribution_list:
origin_list = distribution['Origins']['Items']
for origin in origin_list:
if (origin['DomainName'] and origin['DomainName'].find(dest_bucket_name) != -1):
invalidate_distribution_list.append(distribution['Id'])
logger.info("List of Distribution Ids to be Invalidated => %s" %
invalidate_distribution_list)
for distribution_id in invalidate_distribution_list:
try:
invalidation_resp = cloudfront.create_invalidation(
DistributionId=distribution_id,
InvalidationBatch={
'Paths': {
'Quantity': len(distribution_paths),
'Items': distribution_paths
},
'CallerReference': str(uuid4()),
})
# by default, will wait up to 10 minutes
cloudfront.get_waiter('invalidation_completed').wait(
DistributionId=distribution_id,
Id=invalidation_resp['Invalidation']['Id'])
except Exception as e:
failed_invalidation_list.append(distribution_id)
pass
return failed_invalidation_list
# ---------------------------------------------------------------------------------------------------
# recursive method for paginating aws cf distribution list
def paginate_distributions(marker=None):
aws_distribution_list = []
if(marker is None):
return None
else:
cf_list_response = cloudfront.list_distributions(
Marker=marker
)
aws_distribution_list.extend(cf_list_response['DistributionList']['Items'])
if(cf_list_response['DistributionList']['IsTruncated']):
aws_distribution_list.extend(paginate_distributions(cf_list_response['DistributionList']['NextMarker']))
return aws_distribution_list
# ---------------------------------------------------------------------------------------------------
# executes an "aws" cli command
def aws_command(*args):
aws = "/opt/awscli/aws" # from AwsCliLayer
logger.info("| aws %s" % ' '.join(args))
subprocess.check_call([aws] + list(args))
# ---------------------------------------------------------------------------------------------------
# sends a response to cloudformation
def cfn_send(event, context, responseStatus, responseData={}, physicalResourceId=None, noEcho=False, reason=None):
responseUrl = event['ResponseURL']
logger.info(responseUrl)
responseBody = {}
responseBody['Status'] = responseStatus
responseBody['Reason'] = reason or (
'See the details in CloudWatch Log Stream: ' + context.log_stream_name)
responseBody['PhysicalResourceId'] = physicalResourceId or context.log_stream_name
responseBody['StackId'] = event['StackId']
responseBody['RequestId'] = event['RequestId']
responseBody['LogicalResourceId'] = event['LogicalResourceId']
responseBody['NoEcho'] = noEcho
responseBody['Data'] = responseData
body = json.dumps(responseBody)
logger.info("| response body:\n" + body)
headers = {
'content-type': '',
'content-length': str(len(body))
}
try:
request = Request(responseUrl, method='PUT', data=bytes(
body.encode('utf-8')), headers=headers)
with contextlib.closing(urlopen(request)) as response:
logger.info("| status code: " + response.reason)
except Exception as e:
logger.error("| unable to send response to CloudFormation")
logger.exception(e)
def invalidation_failed(environment, sns_topic_arn, failed_invalidation_list):
failed_ids_text = ""
for id in failed_invalidation_list:
failed_ids_text = failed_ids_text + id + ", "
failed_ids_message_blocks = [
{
"type": "section",
"content": {
"text": "CloudFront Invalidations failed for: %s (%s)" % (failed_ids_text, environment)
}
}
]
for id in failed_invalidation_list:
failed_ids_message_blocks.append(
{
"type": "action",
"content": {
"text": "AWS Console %s" % id,
"url": "https://console.aws.amazon.com/cloudfront/home?region=us-east-1#distribution-settings:%s" % id
}
}
)
sns_client = boto3.client('sns')
sns_json = {
"blocks": failed_ids_message_blocks,
"environment": environment
}
sns_message = json.dumps(sns_json)
logger.info("Sns Message => %s" % sns_message)
sns_client.publish(
TopicArn=sns_topic_arn,
Message=sns_message,
Subject="Failed CloudFront Invalidations!"
)
| 2.015625
| 2
|
src/negotiating_agent/venv/lib/python3.8/site-packages/geniusweb/connection/Connectable.py
|
HahaBill/CollaborativeAI
| 1
|
12775012
|
from abc import ABC, abstractmethod
from typing import TypeVar, Generic, List
from geniusweb.connection.ConnectionEnd import ConnectionEnd
INTYPE = TypeVar('INTYPE')
OUTTYPE = TypeVar('OUTTYPE')
class Connectable(ABC, Generic[INTYPE,OUTTYPE]):
'''
A Connectable is an object that can connect on request with a provided
{@link ConnectionEnd} and then respond to incoming and outgong signals.
@param <INTYPE> the type of incoming messages
@param <OUTTYPE> the type of outgoing messages
'''
@abstractmethod
def connect(self,connection:ConnectionEnd[INTYPE, OUTTYPE] ):
'''
creates the connection. Only called if not yet connected.
@param connection the new connection
'''
@abstractmethod
def disconnect(self):
'''
Removes the connection from a connectable; the previously given
connection can not be used anymore after this call. Only called if
previously connected.
'''
| 3
| 3
|
musicxml/xmlelement/containers.py
|
alexgorji/musicxml
| 0
|
12775013
|
<gh_stars>0
from musicxml.xmlelement.xmlchildcontainer import XMLChildContainerFactory
from musicxml.xsd.xsdcomplextype import *
from musicxml.xsd.xsdcomplextype import __all__
containers = {}
for ct in __all__[1:]:
cls = eval(ct)
if cls.get_xsd_indicator():
containers[ct] = XMLChildContainerFactory(complex_type=cls).get_child_container()
| 1.742188
| 2
|
fastwork/merge.py
|
xugongli/fastwork
| 0
|
12775014
|
import os
import pandas as pd
class MergeExcel(object):
def __init__(self, excel_filepath=None, folder_path=None, sheetname_lst=None):
"""
: df_dict: df组成的字典,key为sheetname
: sheetname_lst: 需要合并的sheetname列为,默认为空,即不指定则合并所有
"""
self.excel_filepath = excel_filepath
self.folder_path = folder_path
self.sheetname_lst = sheetname_lst
if self.excel_filepath is not None:
new_filename = "%s_处理完成.xlsx" % os.path.basename(excel_filepath)
abs_filepath = os.path.abspath(excel_filepath)
self.new_filepath = os.path.join(os.path.dirname(abs_filepath), new_filename)
else:
self.new_filepath = os.path.join(os.path.dirname(self.folder_path),
"%s_合并结果.xlsx" % os.path.basename(self.folder_path))
def read_excel(self, excel_filepath, sheet_name=None):
"""
读取excel等本地文件数据路径,返回Pandas.DataFrame结构化数据
:param excel_filepath:文件路径
:sheet_name: 设置读取的工作表范围,默认为None,即读取所有的sheet
:return:Pandas.DataFrame 字典对象,key:sheet_name,values:pandas.df
"""
try:
df_dict = pd.read_excel(excel_filepath, sheet_name=sheet_name)
return df_dict
except Exception as err:
print(err)
@classmethod
def df_concat(self, df_sheet_dict):
# try:
df = pd.concat(df_sheet_dict)
print(df.index.values)
df.index = [x[0] for x in df.index]
print(df.index)
df.columns = [x for x in df.columns]
df.index.name = '工作表名称'
return df
def merge_worksheet(self):
if type(self.sheetname_lst) in [str, list] or self.sheetname_lst is None:
df_dict = self.read_excel(excel_filepath=self.excel_filepath,
sheet_name=self.sheetname_lst)
print("【注意】当前共有%s个工作表需要合并!" % len(df_dict))
for sheet_name, df in df_dict.items():
print("工作表名称【%s】: 共%s行" % (sheet_name, df.shape[0]))
df_merge = pd.concat(df_dict)
df_merge.index = [x[0] for x in df_merge.index]
df_merge.index.name = '工作表名称'
else:
print("当前指定的参数有误!,请检查后重新输入!")
df_merge = None
if df_merge is not None:
return df_merge
else:
return None
def merge_workbooks(self):
'''
:param folder_path: 文件夹路径
:param end_str_lst: 指定读取的文件扩展名列表
:return: pd.DataFrame
'''
folder_path = self.folder_path
end_str_lst = ['.xlsx', '.xls']
end_str_tuble = tuple(end_str_lst)
# 判断是否为绝对路径,如果不是,则转换为绝对路径
if not os.path.isabs(folder_path):
folder_path = os.path.abspath(folder_path)
df_all_lst = []
for root, dirs, files in os.walk(folder_path, topdown=True):
'''
root:当前正在遍历的这个文件夹
dirs :list ,root目录中所有的文件夹的名字(不包括文件夹中的子目录)
files:list , root目录中所有的文件名称(不包括子目录)
'''
excel_files = [file for file in files if file.endswith(end_str_tuble) and not file.startswith(("~$"))]
print(root)
print(dirs)
print(files)
print(excel_files)
# 如果excel_files列表不为空
if excel_files:
for excel_file in excel_files:
df_dict = pd.read_excel(os.path.join(root, excel_file), sheet_name=None)
if self.sheetname_lst is not None:
sheetname_lst = list(df_dict.keys())
keep_key = sheetname_lst[0]
df_dict = {keep_key: df_dict[keep_key]}
df_merge = pd.concat(df_dict)
df_merge.index = [x[0] for x in df_merge.index]
df_merge.index.name = '工作表名称'
col_name = list(df_merge.columns)
df_merge['excel文件名称'] = excel_file
df_merge['工作表名称'] = df_merge.index
df_merge = pd.DataFrame(df_merge, columns=['excel文件名称', '工作表名称'] + col_name)
df_all_lst.append(df_merge)
df_all = pd.concat(df_all_lst)
return df_all
def to_excel(self, df, sheet_name="sheet1"):
writer = pd.ExcelWriter('%s' % self.new_filepath, engine='xlsxwriter')
df.to_excel(writer, sheet_name=sheet_name,
startrow=0,
index=False)
workbook = writer.book
# 统计部分内容部分样式
txt_cell_format = workbook.add_format({'bold': False, 'italic': False, 'font_size': 10, 'border': 1})
txt_cell_format.set_align('left')
worksheet = writer.sheets[sheet_name]
# worksheet1.set_column(起始列,结束列,列宽,格式)
worksheet.set_column(0, df.shape[1], 15, txt_cell_format)
writer.save()
if __name__ == "__main__":
folder_path = r"C:\Users\soari\Desktop\excel"
merge = MergeExcel(folder_path=folder_path, sheetname_lst=None)
df_all = merge.merge_workbooks()
print(df_all)
merge.to_excel(df_all)
| 3.03125
| 3
|
configs/experiments.py
|
YuXie96/time
| 0
|
12775015
|
"""Experiments and corresponding analysis.
format adapted from https://github.com/gyyang/olfaction_evolution
Each experiment is described by a function that returns a list of configurations
function name is the experiment name
combinatorial mode:
config_ranges should not have repetitive values
sequential mode:
config_ranges values should have equal length,
otherwise this will only loop through the shortest one
control mode:
base_config must contain keys in config_ranges
"""
import os
import copy
from collections import OrderedDict
import logging
import numpy as np
from configs.config_global import ROOT_DIR, LOG_LEVEL
from configs.configs import BaseConfig
from utils.config_utils import vary_config
from analysis.train_analysis import plot_train_log
import evaluate
from analysis import plots
def init_analysis(configs_):
logging.basicConfig(level=LOG_LEVEL)
exp_name = configs_[0].experiment_name
print('Analyzing ' + exp_name)
exp_path = os.path.join(ROOT_DIR, 'experiments', exp_name) + os.sep
plot_train_log([exp_path], exp_name=exp_name)
# -----------------------------------------------------
# experiments
# -----------------------------------------------------
def timescale():
config = BaseConfig()
config.experiment_name = 'timescale'
config.rnn_type = 'plainRNN'
config.t_scale = 1.0
config.augment = None
config.use_velocity = False
config.context = None
config.context_w = 10
config.hidden_size = 64
config.num_ep = 40
config_ranges = OrderedDict()
config_ranges['rnn_type'] = ['plainRNN',
'CTRNN',
'LSTM',
'GRU',
'RNNSTSP']
configs = vary_config(config, config_ranges, mode='combinatorial')
return configs
def timescale_aug():
config = BaseConfig()
config.experiment_name = 'timescale_aug'
config.rnn_type = 'plainRNN'
config.t_scale = 1.0
config.augment = (0.5, 1.5)
config.use_velocity = False
config.context = None
config.context_w = 10
config.hidden_size = 64
config.num_ep = 40
config_ranges = OrderedDict()
config_ranges['rnn_type'] = ['plainRNN',
'CTRNN',
'LSTM',
'GRU',
'RNNSTSP']
configs = vary_config(config, config_ranges, mode='combinatorial')
return configs
def timecode():
config = BaseConfig()
config.experiment_name = 'timecode'
config.rnn_type = 'plainRNN'
config.t_scale = 1.0
config.augment = None
config.use_velocity = False
config.context = 'zero'
config.context_w = 10
config.hidden_size = 64
config.num_ep = 40
config_ranges = OrderedDict()
config_ranges['context'] = ['zero', 'noise', 'scalar', 'ramping',
'clock', 'stairs_end', 'stairs_start']
configs = vary_config(config, config_ranges, mode='combinatorial')
return configs
def timecode_aug():
config = BaseConfig()
config.experiment_name = 'timecode_aug'
config.rnn_type = 'plainRNN'
config.t_scale = 1.0
config.augment = (0.5, 1.5)
config.use_velocity = False
config.context = 'zero'
config.context_w = 10
config.hidden_size = 64
config.num_ep = 40
config_ranges = OrderedDict()
config_ranges['context'] = ['zero', 'noise', 'scalar', 'ramping',
'clock', 'stairs_end', 'stairs_start']
configs = vary_config(config, config_ranges, mode='combinatorial')
return configs
# -----------------------------------------------------
# analysis
# -----------------------------------------------------
def timescale_analysis():
configs = timescale()
init_analysis(configs)
t_scale_list = np.arange(0.1, 2, 0.1)
acc_list = np.zeros_like(t_scale_list)
for cfg in configs:
for i_s, t_scale in enumerate(t_scale_list):
new_cfg = copy.deepcopy(cfg)
new_cfg.t_scale = t_scale
acc_list[i_s] = evaluate.eval_total_acc(new_cfg)
np.save(os.path.join(cfg.save_path, 'tscalelist.npy'), t_scale_list)
np.save(os.path.join(cfg.save_path, 'acclist.npy'), acc_list)
plots.plot_gen(t_scale_list, acc_list, cfg.rnn_type)
plots.plot_group_gen(configs, configs[0].experiment_name, mode='rnn_type')
def timescale_aug_analysis():
configs = timescale_aug()
init_analysis(configs)
t_scale_list = np.arange(0.1, 2, 0.1)
acc_list = np.zeros_like(t_scale_list)
for cfg in configs:
for i_s, t_scale in enumerate(t_scale_list):
new_cfg = copy.deepcopy(cfg)
new_cfg.t_scale = t_scale
acc_list[i_s] = evaluate.eval_total_acc(new_cfg)
np.save(os.path.join(cfg.save_path, 'tscalelist.npy'), t_scale_list)
np.save(os.path.join(cfg.save_path, 'acclist.npy'), acc_list)
plots.plot_gen(t_scale_list, acc_list, cfg.rnn_type)
plots.plot_group_gen(configs, configs[0].experiment_name, mode='rnn_type')
def timecode_analysis():
configs = timecode()
init_analysis(configs)
t_scale_list = np.arange(0.1, 2, 0.1)
acc_list = np.zeros_like(t_scale_list)
for cfg in configs:
for i_s, t_scale in enumerate(t_scale_list):
new_cfg = copy.deepcopy(cfg)
new_cfg.t_scale = t_scale
acc_list[i_s] = evaluate.eval_total_acc(new_cfg)
np.save(os.path.join(cfg.save_path, 'tscalelist.npy'), t_scale_list)
np.save(os.path.join(cfg.save_path, 'acclist.npy'), acc_list)
plots.plot_gen(t_scale_list, acc_list, cfg.context)
plots.plot_group_gen(configs, configs[0].experiment_name, mode='context')
def timecode_aug_analysis():
configs = timecode_aug()
init_analysis(configs)
t_scale_list = np.arange(0.1, 2, 0.1)
acc_list = np.zeros_like(t_scale_list)
for cfg in configs:
for i_s, t_scale in enumerate(t_scale_list):
new_cfg = copy.deepcopy(cfg)
new_cfg.t_scale = t_scale
acc_list[i_s] = evaluate.eval_total_acc(new_cfg)
np.save(os.path.join(cfg.save_path, 'tscalelist.npy'), t_scale_list)
np.save(os.path.join(cfg.save_path, 'acclist.npy'), acc_list)
plots.plot_gen(t_scale_list, acc_list, cfg.context)
plots.plot_group_gen(configs, configs[0].experiment_name, mode='context')
| 2.6875
| 3
|
onnx_tf/handlers/backend/conv_transpose.py
|
malisit/onnx-tensorflow
| 1,110
|
12775016
|
from onnx_tf.handlers.backend_handler import BackendHandler
from onnx_tf.handlers.handler import onnx_op
from onnx_tf.handlers.handler import partial_support
from onnx_tf.handlers.handler import ps_description
from .conv_mixin import ConvMixin
@onnx_op("ConvTranspose")
@partial_support(True)
@ps_description("ConvTranspose with dilations != 1, or " +
"transposed convolution for 4D or higher " +
"are not supported in Tensorflow.")
class ConvTranspose(ConvMixin, BackendHandler):
@classmethod
def version_1(cls, node, **kwargs):
return cls.conv(node, kwargs["tensor_dict"], transpose=True)
@classmethod
def version_11(cls, node, **kwargs):
return cls.conv(node, kwargs["tensor_dict"], transpose=True)
| 2.0625
| 2
|
binding/__init__.py
|
zauberzeug/binding
| 1
|
12775017
|
from binding.binding import BindableProperty, update, reset
| 1.289063
| 1
|
Clase12/iris_seaborn.py
|
qagustina/python-exercises
| 0
|
12775018
|
# 12.10
import pandas as pd
import seaborn as sns
from sklearn.datasets import load_iris
iris_dataset = load_iris()
# creamos un dataframe de los datos de flores
# etiquetamos las columnas usando las cadenas de iris_dataset.feature_names
iris_dataframe = pd.DataFrame(iris_dataset['data'], columns = iris_dataset.feature_names)
# agrego al DataFrame el atributo target
iris_dataframe['target'] = iris_dataset['target']
# reemplazo valores por nombres
iris_dataframe['target'].replace([0], 'Setosa', inplace=True)
iris_dataframe['target'].replace([1], 'Versicolor', inplace=True)
iris_dataframe['target'].replace([2], 'Virginica', inplace=True)
sns.pairplot(iris_dataframe, hue="target")
| 3.015625
| 3
|
examples/__init__.py
|
tehdragonfly/pyramid_services_viewmapper
| 0
|
12775019
|
from __future__ import annotations
from pyramid.config import Configurator
from wsgiref.simple_server import make_server
from zope.interface import Interface, implementer
from pyramid_services_viewmapper import ServiceInjector as SI, ServiceViewMapper
class IExampleService(Interface):
def example(self):
pass
@implementer(IExampleService)
class ExampleService:
def example(self):
return "example"
def function_view(request, example_service: IExampleService):
return {"function": example_service.example()}
class ClassView:
def __init__(self, request, example_service: IExampleService):
self.request = request
self.example_service = example_service
def __call__(self):
return {"class": self.example_service.example()}
class NamedService:
def named(self):
return "named"
def named_view(request, named_service: SI(name="named_service")):
return {"function": named_service.named()}
def implicit_named_view(request, named_service: SI):
return {"function": named_service.named()}
if __name__ == '__main__':
config = Configurator()
config.include("pyramid_services")
config.register_service(ExampleService(), IExampleService)
config.register_service(NamedService(), name="named_service")
config.include("pyramid_services_viewmapper")
config.add_route("function", "/function")
config.add_view(function_view, route_name="function", renderer="json")
config.add_route("class", "/class")
config.add_view(ClassView, route_name="class", renderer="json")
config.add_route("named", "/named")
config.add_view(named_view, route_name="named", renderer="json")
config.add_route("implicit_named", "/implicit_named")
config.add_view(implicit_named_view, route_name="implicit_named", renderer="json")
server = make_server('0.0.0.0', 8080, config.make_wsgi_app())
server.serve_forever()
| 2.34375
| 2
|
motivating-examples/maze-solver/maze.py
|
dgrafov/redi-python-intro
| 8
|
12775020
|
<reponame>dgrafov/redi-python-intro
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import random
import os
import time
MAZE_WIDTH = 20
MAZE_HEIGHT = 20
MAX_STEPS = 5000
PAUSE_BETWEEN_STEPS = 0.05
class Maze:
cur_x = 0
cur_y = 0
entrance = (0, 0)
def __init__(self, height, width):
self.height = height
self.width = width
self.exit = (height, width)
self.walls = [[False for i in range(width)] for j in range(height)]
self.visited = [[False for i in range(width)] for j in range(height)]
self.visited_count = [[0 for i in range(width)] for j in range(height)]
def cur_position(self):
return self.cur_y, self.cur_x
def display(self):
symbols = {"CUR_POSITION": u'ME', "VISITED": u'\u2591\u2591', "NOT_VISITED": ' ', "WALL": u'\u2588\u2588',
"H_BOUND": u'\u2501\u2501', "V_BOUND": u'\u2503',
"UP_LEFT": u'\u250F', "UP_RIGHT": u'\u2513', "BOTTOM_LEFT": u'\u2517', "BOTTOM_RIGHT": u'\u251B'}
print("Maze size: {}*{}\nEntrance: {}, Exit: {}".format(self.height, self.width, self.entrance, self.exit))
str = symbols.get("UP_LEFT")
for j in range(self.width):
if (0, j) != self.entrance and (0, j) != self.exit:
str = str + symbols.get("H_BOUND")
else:
str = str + ' '
print(str + symbols.get("UP_RIGHT"))
for i in range(self.height):
if (i, 0) != self.entrance and (i, 0) != self.exit:
str = symbols.get("V_BOUND")
else:
str = ' '
for j in range(self.width):
if (i, j) == self.cur_position():
str = str + symbols.get("CUR_POSITION")
else:
if self.walls[i][j]:
str = str + symbols.get("WALL")
elif self.visited[i][j]:
str = str + symbols.get("VISITED")
else:
str = str + symbols.get("NOT_VISITED")
if (i, j) != self.entrance and (i, j) != self.exit:
str = str + symbols.get("V_BOUND")
print(str)
str = symbols.get("BOTTOM_LEFT")
for j in range(self.width):
if (self.height - 1, j) != self.entrance and (self.height - 1, j) != self.exit:
str = str + symbols.get("H_BOUND")
else:
str = str + ' '
print(str + symbols.get("BOTTOM_RIGHT"))
def clear(self):
self.cur_y = self.entrance[0]
self.cur_x = self.entrance[1]
self.visited = [[False for i in range(self.width)] for j in range(self.height)]
self.visited_count = [[0 for i in range(self.width)] for j in range(self.height)]
def randomize(self, walls_density=0.25):
self.entrance = self.get_random_position_on_maze_boundary()
self.exit = self.get_random_position_on_maze_boundary()
self.cur_y = self.entrance[0]
self.cur_x = self.entrance[1]
for i in range(self.height):
for j in range(self.width):
if random.random() < walls_density and (i, j) != self.entrance and (i, j) != self.exit:
self.walls[i][j] = True
def get_random_position_on_maze_boundary(self):
boundaries = ['UP', 'LEFT', 'BOTTOM', 'RIGHT']
wall = random.choice(boundaries)
if wall == 'UP':
return (0, random.choice(range(self.width)))
elif wall == 'BOTTOM':
return (self.height - 1, random.choice(range(self.width)))
elif wall == 'LEFT':
return (random.choice(range(self.height)), 0)
elif wall == 'RIGHT':
return (random.choice(range(self.height)), self.width - 1)
def move(self, direction):
if direction == 'UP':
if self.cur_y > 0 and not self.walls[self.cur_y - 1][self.cur_x]:
self.cur_y = self.cur_y - 1
else:
return False
elif direction == 'DOWN':
if self.cur_y < self.height - 1 and not self.walls[self.cur_y + 1][self.cur_x]:
self.cur_y = self.cur_y + 1
else:
return False
elif direction == 'LEFT':
if self.cur_x > 0 and not self.walls[self.cur_y][self.cur_x - 1]:
self.cur_x = self.cur_x - 1
else:
return False
elif direction == 'RIGHT':
if self.cur_x < self.width - 1 and not self.walls[self.cur_y][self.cur_x + 1]:
self.cur_x = self.cur_x + 1
else:
return False
self.visited[self.cur_y][self.cur_x] = True
self.visited_count[self.cur_y][self.cur_x] += 1
return True
def chaotic_run(self):
for i in range(MAX_STEPS):
time.sleep(PAUSE_BETWEEN_STEPS)
os.system('clear')
self.move(random.choice(('UP', 'DOWN', 'LEFT', 'RIGHT')))
self.display()
if self.cur_position() == self.exit:
print("Successfully reached exit. {} steps were needed".format(i + 1))
return True
print("This is CHAOTIC RUN\nTrying to get out of this maze... {} steps done".format(i + 1))
print("Maximum of {} steps reached, exit not found...".format(i + 1))
return False
def smart_run(self):
for i in range(MAX_STEPS):
time.sleep(PAUSE_BETWEEN_STEPS)
os.system('clear')
self.move(self.get_best_move())
self.display()
if self.cur_position() == self.exit:
print("Sucessfully reached exit. {} steps were needed".format(i + 1))
return True
print("This is SMART RUN\nTrying to get out of this maze... {} steps done".format(i + 1))
print("Maximum of {} steps reached, exit not found...".format(i + 1))
return False
def get_best_move(self):
moves = ['UP', 'DOWN', 'LEFT', 'RIGHT']
options = {}
new_position = (self.cur_y, self.cur_x)
for move in moves:
if move == 'UP':
new_position = (self.cur_y - 1, self.cur_x)
elif move == 'DOWN':
new_position = (self.cur_y + 1, self.cur_x)
elif move == 'LEFT':
new_position = (self.cur_y, self.cur_x - 1)
elif move == 'RIGHT':
new_position = (self.cur_y, self.cur_x + 1)
if new_position[0] >= 0 \
and new_position[0] < self.height \
and new_position[1] >= 0 \
and new_position[1] < self.width \
and not self.walls[new_position[0]][new_position[1]]:
options[move] = self.get_distance(self.exit, new_position) \
* (self.visited_count[new_position[0]][new_position[1]] + 1)
return min(options, key=options.get)
def get_distance(self, point1, point2):
return abs(point1[0] - point2[0]) + abs(point1[1] - point2[1])
os.environ['TERM'] = 'xterm'
print('Maze generated:')
m1 = Maze(MAZE_WIDTH, MAZE_HEIGHT)
m1.randomize(0.25)
m1.display()
while True:
command = input('Press:\ns - for smart run\nc - for chaotic run\nother key - to exit\n')
if command == 's':
m1.smart_run()
elif command == 'c':
m1.chaotic_run()
else:
break
command = input('Press:\nc - to generate a new maze and continue\nr - to continue with the same maze'
'\nother key - to exit\n')
if command == 'c':
print('Maze generated:')
m1 = Maze(MAZE_WIDTH, MAZE_HEIGHT)
m1.randomize(0.25)
m1.display()
elif command == 'r':
m1.clear()
m1.display()
else:
break
| 3.921875
| 4
|
tests/optimization_test.py
|
NREL/flasc
| 3
|
12775021
|
<reponame>NREL/flasc
import numpy as np
import pandas as pd
from pandas.core.base import DataError
import unittest
from flasc.optimization import (
find_timeshift_between_dfs,
match_y_curves_by_offset
)
def generate_dataframes():
# Define a reference signal
t = pd.date_range(
"2019-01-10 12:15:01",
"2019-01-10 16:15:01",
freq='1s'
)
t1 = pd.to_datetime("2000-01-01 00:00:00")
y1 = 180 + 180 * np.sin(0.001 * (t - t1) / np.timedelta64(1, 's'))
df1 = pd.DataFrame({"time": t, "wd_000": y1})
# Now define similar signal but shifted by 5 minutes
t2 = pd.to_datetime("2000-01-01 00:5:00")
y2 = 180 + 180 * np.sin(0.001 * (t - t2) / np.timedelta64(1, 's'))
df2 = pd.DataFrame({"time": t, "wd_000": y2})
return df1, df2
class TestOptimization(unittest.TestCase):
def test_estimation_dy_offset(self):
# Define a reference signal
t = np.linspace(0, 4*np.pi, 1000)
yref = 180 + 180 * np.sin(0.01 * t)
# Define a shifted signal with 360 deg angle wrapping
ytest = 44.0 + yref
ytest = ytest + 5 * np.random.randn(1000)
ytest[ytest >= 360.0] += -360.0
# Estimate shift in curves between the two
dy, _ = match_y_curves_by_offset(
yref,
ytest,
dy_eval=np.arange(-180.0, 180.0, 1.0),
angle_wrapping=True,
)
self.assertAlmostEqual(dy, 44.0)
def test_exceptions(self):
df1, df2 = generate_dataframes()
df1_man = df1.copy()
df1_man.loc[0, "time"] = df1_man.loc[10, "time"]
df2_man = df2.copy()
df2_man.loc[0, "time"] = df2_man.loc[10, "time"]
self.assertRaises(
DataError,
find_timeshift_between_dfs,
df1_man, df2, cols_df1=["wd_000"], cols_df2=["wd_000"],
)
self.assertRaises(
DataError,
find_timeshift_between_dfs,
df1, df2_man, cols_df1=["wd_000"], cols_df2=["wd_000"],
)
self.assertRaises(
NotImplementedError,
find_timeshift_between_dfs,
df1, df2, cols_df1=["wd_000"], cols_df2=["wd_000"],
correct_y_shift=True, use_circular_statistics=False,
)
def test_estimation_df_timeshift(self):
df1, df2 = generate_dataframes()
out = find_timeshift_between_dfs(
df1=df1,
df2=df2,
cols_df1=["wd_000"],
cols_df2=["wd_000"],
use_circular_statistics=True,
correct_y_shift=False,
opt_bounds=[np.timedelta64(-60, 'm'), np.timedelta64(60, 'm')],
opt_Ns=13,
)
x = np.timedelta64(out[0]["x_opt"]) / np.timedelta64(1, 's')
self.assertAlmostEqual(x, -300.0) # Should be equal to 5 minute shift
# Try the same code with correct_y_shift=True
out = find_timeshift_between_dfs(
df1=df1,
df2=df2,
cols_df1=["wd_000"],
cols_df2=["wd_000"],
use_circular_statistics=True,
correct_y_shift=True,
opt_bounds=[np.timedelta64(-60, 'm'), np.timedelta64(60, 'm')],
opt_Ns=13,
)
x = np.timedelta64(out[0]["x_opt"]) / np.timedelta64(1, 's')
self.assertAlmostEqual(x, -300.0) # Should be equal to 5 minute shift
# No angle wrapping so should even work with use_circular_stats=False
out = find_timeshift_between_dfs(
df1=df1,
df2=df2,
cols_df1=["wd_000"],
cols_df2=["wd_000"],
use_circular_statistics=False,
correct_y_shift=False,
opt_bounds=[np.timedelta64(-60, 'm'), np.timedelta64(60, 'm')],
)
x = np.timedelta64(out[0]["x_opt"]) / np.timedelta64(1, 's')
self.assertAlmostEqual(x, -300.0, places=2) # Should be almost equal to 5 minute shift
| 2.640625
| 3
|
test_work/tree_views/core/views.py
|
Netromnik/python
| 0
|
12775022
|
from django.views.generic import TemplateView
class Slide(TemplateView):
pass
| 1.007813
| 1
|
pelops/features/feature_producer.py
|
dave-lab41/pelops
| 48
|
12775023
|
<filename>pelops/features/feature_producer.py<gh_stars>10-100
import numpy as np
from PIL import Image
from pelops.datasets.chipper import Chipper
from pelops.datasets.featuredataset import FeatureDataset
class FeatureProducer(object):
def __init__(self, chip_producer):
self.chip_producer = chip_producer
self.set_variables()
def return_features(self):
if isinstance(self.chip_producer, Chipper):
chips = []
chip_keys = []
for chip_list in self.chip_producer:
chips.extend(chip_list)
for i, chip in enumerate(chip_list):
chip_keys.append('{}_{}'.format(chip.frame_number, i))
else:
chips = []
chip_keys = []
for chip_key, chip in self.chip_producer.chips.items():
chips.append(chip)
chip_keys.append(chip_key)
feats = np.zeros((len(chips), self.feat_size), dtype=np.float32)
for i, chip in enumerate(chips):
feats[i] = self.produce_features(chip)
return chip_keys, chips, feats
@staticmethod
def get_image(chip):
if hasattr(chip, 'img_data'):
img = Image.fromarray(chip.img_data)
return img.convert('RGB')
else:
return Image.open(chip.filepath)
def produce_features(self, chip):
"""Takes a chip object and returns a feature vector of size
self.feat_size. """
raise NotImplementedError("produce_features() not implemented")
def save_features(self, output_filename):
"""
Calculate features and save as a "FeatureDataset"
Args:
filename:
Returns:
"""
# TODO: See if this function should save the features in memory
if isinstance(self.chip_producer, Chipper):
raise NotImplementedError("Only ChipDatasets are supported at this time")
chip_keys, chips, features = self.return_features()
FeatureDataset.save(output_filename, chip_keys, chips, features)
def set_variables(self):
"""Child classes should use this to set self.feat_size, and any other
needed variables. """
self.feat_size = None # Set this in your inherited class
raise NotImplementedError("set_variables() is not implemented")
| 2.75
| 3
|
PWGJE/EMCALJetTasks/Tracks/analysis/test/PlotScaledTriggered.py
|
maroozm/AliPhysics
| 114
|
12775024
|
'''
Created on 22.09.2014
@author: markusfasel
'''
from PWGJE.EMCALJetTasks.Tracks.analysis.base.Graphics import SinglePanelPlot, GraphicsObject, Style, Frame
from PWGJE.EMCALJetTasks.Tracks.analysis.correction.TriggeredSpectrumScaler import TriggeredSpectrumScaler
from PWGJE.EMCALJetTasks.Tracks.analysis.correction.SpectrumCombiner import SpectrumCombiner
from ROOT import kRed, kBlack, kBlue
class PlotScaledTriggeredToMinBias(SinglePanelPlot):
'''
classdocs
'''
def __init__(self, minbiasspectrum, triggeredSpectrum):
'''
Constructor
'''
SinglePanelPlot.__init__(self)
self.__minbiasSpectrum = GraphicsObject(minbiasspectrum, Style(kRed,25))
triggeredSpectrumMaker = TriggeredSpectrumScaler(minbiasspectrum, triggeredSpectrum)
self.__triggeredSpectrum = GraphicsObject(triggeredSpectrumMaker.GetScaledTriggeredSpectrum(), Style(kBlue, 24))
combinedSpectrumMaker = SpectrumCombiner(minbiasspectrum, self.__triggeredSpectrum.GetData())
self.__combinedSpectrum = GraphicsObject(combinedSpectrumMaker.MakeCombinedSpectrum(50.), Style(kBlack, 20))
self.__labeltext = None
def SetLabel(self, label):
self.__labeltext = label
def Create(self):
self._OpenCanvas("triggerSpectrumScalerPlot", "Compare scaled trigger to minbias")
pad = self._GetFramedPad()
#pad.GetPad().SetLogx()
pad.GetPad().SetLogy()
frame = Frame("framecomp", 0.1, 100, 1e-10, 2)
frame.SetXtitle("p_{t} (GeV/c)")
frame.SetYtitle("1/N_{ev} dN/dp_{t} ((GeV/c)^{-1})")
pad.DrawFrame(frame)
pad.DrawGraphicsObject(self.__combinedSpectrum, True, "Combined")
pad.DrawGraphicsObject(self.__minbiasSpectrum, True, "MinBias")
pad.DrawGraphicsObject(self.__triggeredSpectrum, True, "Triggered")
pad.CreateLegend(0.55, 0.75, 0.89, 0.89)
if self.__labeltext:
pad.CreateLabel(0.15, 0.15, 0.45, 0.2, self.__labeltext)
| 1.570313
| 2
|
HW7/Mengyuan_HW7.py
|
MengyuanZoe/HomeIn
| 5
|
12775025
|
"""
Module Functions:
Plot King County House Rate data,
in the form of sale listing cluster map and density heat map.
"""
import os
import webbrowser
import pandas as pd
import folium
from folium import plugins
# Set global settings and macros.
MAX_SHOW = 1000
HOUSE_URL = 'houses.html'
HOUSE_HEAT_URL = "househeatmap.html"
CRIME_HEAT_URL = "crimeheatmap.html"
pd.set_option('display.max_columns', None) # To display all columns
# Read in king county house data.
data = pd.read_csv("../../Data/kc_house_data.csv", parse_dates=['date'])
data['zipcode'] = data['zipcode'].astype(str)
## Create one map showing each listing of in house dataset and show in browser.
# Use folium Map function to plot underlying basic map.
house_map = folium.Map(location=[data['lat'].mean(), data['long'].mean()], zoom_start=10)
# Define clusters to show house clusters to add to the underlying houses_map.
marker_cluster = folium.MarkerCluster().add_to(house_map)
# Iteratively add interactive clusters to the basic map.
# When mouse-over the cluster, show house listing information:
# sqft, price.
for iters, row in data[0:MAX_SHOW].iterrows():
folium.Marker([row["lat"], row["long"]],
popup="{0} sqft: Sold for $ {1}"\
.format(row["sqft_living"], row["price"])).add_to(marker_cluster)
# Save the house cluster map to a html and open it in broswer.
house_map.save(HOUSE_URL)
webbrowser.open('file://' + os.path.realpath(HOUSE_URL), new=2)
## Create one map showing the frequency of house sales and show in browser.
# Use folium Map function to plot underlying basic map
houses_heatmap = folium.Map(location=[data['lat'].mean(), data['long'].mean()], zoom_start=10)
# Add heatmap on top of the basic map.
houses_heatmap.add_children(
plugins.HeatMap([[row["lat"], row["long"], row["price"]]
for iters, row in data.iterrows()],
min_opacity=0.5, max_zoom=18, radius=8)) #[0:MAX_SHOW]
# Save the house sale frequency heat map to a html and open it in broswer.
houses_heatmap.save(HOUSE_HEAT_URL)
webbrowser.open('file://' + os.path.realpath(HOUSE_HEAT_URL), new=2)
| 3.78125
| 4
|
yangTools/scripts/ytPlugin.py
|
mightyang/yangTools
| 1
|
12775026
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File : scriptsytPlugin.py
# Author : yang <<EMAIL>>
# Date : 04.03.2019
# Last Modified Date: 13.03.2019
# Last Modified By : yang <<EMAIL>>
from ytLoggingSettings import yl
import ytVariables
import ytVersion
import platform
import os
# set environment sperator
envSperator = ':'
if platform.system() == 'Windows':
envSperator = ';'
class ytIcon():
icon = {}
def __init__(self, icon='play.ico'):
self.status = ytVariables.ytIcon.ytIcon_status_stopped
self.icon[self.status] = icon
self.icon[ytVariables.ytIcon.ytIcon_status_running] = 'stop.ico'
def getIcon(self):
if self.status in self.icon:
return self.icon[self.status]
return self.icon[ytVariables.ytIcon.ytIcon_status_stopped]
def setIcon(self, icon, status):
if status in ytVariables.ytIcon.__dict__.values():
i = self.findIcon(icon)
if i:
self.icon[status] = i
else:
yl.error('can not find icon: %s, use default icon' % icon)
else:
yl.error('TypeError: status need ytVariables.ytIcon.status')
def setStatus(self, status):
if status in ytVariables.ytIcon.__dict__.values():
self.status = status
else:
yl.error('TypeError: status need ytVariables.ytIcon.status')
def findIcon(self, icon):
# if icon is exist
if os.path.isfile(icon):
return icon
# find icon in YT_ICON_PATH
paths = os.environ['YT_ICON_PATH'].split(envSperator)
for p in paths:
ip = os.path.join(p, icon)
if os.path.isfile(ip):
return ip
class ytPlugin():
def __init__(self):
self.name = 'ytPlugin'
self.version = ytVersion.ytVersion()
self.help = ''
self.icon = ytIcon()
def ytStart(self):
pass
def ytStop(self):
pass
def isRunning(self):
pass
class ytRegeditPlugin(object):
def __new__(cls, plugin):
if isinstance(plugin, ytPlugin):
return object.__new__(cls, plugin)
else:
yl.error('TypeError: plugin need ytPlugin, but: %s' % str(type(plugin)))
def __init__(self, plugin):
self.plugin = plugin
self.startCallbackList = []
self.startedCallbackList = []
self.stopCallbackList = []
self.stoppedCallbackList = []
def go(self):
if not self.plugin.isRunning():
try:
self.startCallback()
self.plugin.ytStart()
self.startedCallback()
except Exception, e:
yl.error(e.message)
else:
try:
self.stopCallback()
self.plugin.ytStop()
self.stoppedCallback()
except Exception, e:
yl.error(e.message)
def stop(self):
try:
self.stopCallback()
self.plugin.ytStop()
self.stoppedCallback()
except Exception, e:
yl.error(e.message)
def getName(self):
return self.plugin.name
def getTooltip(self):
return self.plugin.help
def getIcon(self):
return self.plugin.icon.getIcon()
def startCallback(self):
if len(self.startCallbackList) > 0:
yl.debug('startCallback of plugin: %s ' % self.plugin.name)
try:
for c in self.startCallbackList:
c[0](self, *c[1])
except Exception as e:
yl.error(e.message)
def startedCallback(self):
if len(self.startedCallbackList) > 0:
yl.debug('startedCallback of plugin: %s ' % self.plugin.name)
try:
for c in self.startedCallbackList:
c[0](self, *c[1])
except Exception as e:
yl.error(e.message)
def stopCallback(self):
if len(self.stopCallbackList) > 0:
yl.debug('stopCallback of plugin: %s ' % self.plugin.name)
try:
for c in self.stopCallbackList:
c[0](self, *c[1])
except Exception as e:
yl.error(e.message)
def stoppedCallback(self):
if len(self.stoppedCallbackList) > 0:
yl.debug('stoppedCallback of plugin: %s ' % self.plugin.name)
try:
for c in self.stoppedCallbackList:
c[0](self, *c[1])
except Exception as e:
yl.error(e.message)
def addStartCallback(self, callback):
'''callback need list or tuple: (func, *argvs, **kwargvs)'''
self.startCallbackList.append(callback)
def addStartedCallback(self, callback):
'''callback need list or tuple: (func, *argvs, **kwargvs)'''
self.startedCallbackList.append(callback)
def addStopCallback(self, callback):
'''callback need list or tuple: (func, *argvs, **kwargvs)'''
self.stopCallbackList.append(callback)
def addStoppedCallback(self, callback):
'''callback need list or tuple: (func, *argvs, **kwargvs)'''
self.stoppedCallbackList.append(callback)
def removeStartCallback(self, func):
i = [f[0] for f in self.startCallbackList].index(func)
if i:
self.startCallbackList.pop(i)
def removeStartedCallback(self, func):
i = [f[0] for f in self.startedCallbackList].index(func)
if i:
self.startedCallbackList.pop(i)
def removeStopCallback(self, func):
i = [f[0] for f in self.stopCallbackList].index(func)
if i:
self.stopCallbackList.pop(i)
def removeStoppedCallback(self, func):
i = [f[0] for f in self.stoppedCallbackList].index(func)
if i:
self.stoppedCallbackList.pop(i)
| 2.40625
| 2
|
tests/unit/test_domain.py
|
be-rock/coin-flipper
| 1
|
12775027
|
from collections import defaultdict
def test_can_obtain_coinflip_results(coin_fixture):
number_of_flips = 10
results = coin_fixture.flip(number_of_flips=number_of_flips)
assert isinstance(results, defaultdict)
assert results["heads"] + results["tails"] == number_of_flips
| 2.515625
| 3
|
stats/merging_hierarchy_mapping/process_merging_tmp.py
|
beneisner/partnet_seg_exps
| 70
|
12775028
|
import os
import sys
in_fn = sys.argv[1]
out_fn = sys.argv[2]
fin = open(in_fn, 'r')
fout = open(out_fn, 'w')
for item in fin.readlines():
data = item.rstrip().split()
if len(data[-1]) == 0: data = data[:-1]
if len(data) == 4:
fout.write('%s %s\n' % (data[-1], data[-1]))
else:
fout.write('%s %s\n' % (data[-2], data[-1]))
fin.close()
fout.close()
| 2.640625
| 3
|
hsi_to_rgb.py
|
wkiino/Hyperspectral_to_rgb_image
| 3
|
12775029
|
from pathlib import Path
import numpy as np
from PIL import Image
def load_light_distribution(name="lamp_spectrum.csv"):
sd_light_source = np.loadtxt(name, skiprows=1, dtype="float")
sd_light_source = sd_light_source[np.where(sd_light_source[:, 0] >= 400)]
# rindx = np.where(sd_light_source[:, 0] >= 400) and np.where(sd_light_source[:, 0] <= 600)
sd_light_source = sd_light_source[:, 1:2]
# print("sum", np.sum(sd_light_source))
sd_light_source = sd_light_source[::20]
sd_light_source = sd_light_source[:44]
# print(sd_light_source.shape)
return sd_light_source
def load_illuminantA(name="A.csv"):
sd_light_source = np.loadtxt(name, skiprows=1, dtype="float")
sd_light_source = sd_light_source[np.where(sd_light_source[:, 0] >= 400)]
sd_light_source = sd_light_source[:, 1:2]
# print("sum",np.sum(sd_light_source))
# sd_light_source = sd_light_source / np.max(sd_light_source)
sd_light_source = sd_light_source[::2]
sd_light_source = sd_light_source[:44]
# print(sd_light_source)
return sd_light_source
def hsi_to_ci31931_rgb(himg, dist_name):
pass
def hsi_to_rgb(himg, dist_name):
"""
input: ハイパースペクトル画像 HSI(numpy型)
return: RGB画像(Image objedct)
"""
# 計測時にノイズとして負の値になった値を0にする
np.where(himg < 0, 0, himg)
cmf = np.loadtxt("./csvs/CIE1931-2deg-XYZ.csv", delimiter=",")
# HSIが400nm以上のため等色関数も400nm以上のみを利用
cmf = cmf[np.where(cmf[:, 0] >= 400)]
# 光源の分光分布の5nm刻みをHSIと同じ10nm刻みに変更
cmf = cmf[::2]
cmf = cmf[:44, :]
stem = Path(dist_name).stem
if stem in ["A"]:
# 標準光源Aは780nmまでを可視光としているため,HSIと等色関数も780nmm以下に制限
nhimg = himg[:, :, :39]
cmf = cmf[:39, :]
sd_light_source = load_illuminantA(name=dist_name)
elif stem in ["D65"]:
nhimg = himg[:, :, :44]
cmf = cmf[:44, :]
sd_light_source = load_illuminantA(name=dist_name)
else:
nhimg = himg[:, :, :44]
sd_light_source = load_light_distribution(name=dist_name)
flag_const_100 = False
ncmf = cmf[:, 1:]
nmf_multi_ld = ncmf * sd_light_source
x = nmf_multi_ld[:, 0]
y = nmf_multi_ld[:, 1]
z = nmf_multi_ld[:, 2]
if flag_const_100:
k = 100 / np.sum(y)
# print(np.sum(y))
else:
k = 1 / np.sum(y)
# print(np.sum(y))
X = np.sum(x * nhimg, axis=2)
Y = np.sum(y * nhimg, axis=2)
Z = np.sum(z * nhimg, axis=2)
XYZ = np.stack([X, Y, Z], 2)
# print(np.max(XYZ), np.min(XYZ))
# print(np.max(Y*k), np.min(Y*k))
XYZ = XYZ * k
XYZ.shape
xyz_to_r = np.array([3.2406255, -1.537208, -0.4986286])
r = np.dot(XYZ, xyz_to_r)
xyz_to_g = np.array([-0.9689307, 1.8757561, 0.0415175])
g = np.dot(XYZ, xyz_to_g)
xyz_to_b = np.array([0.0557101, -0.2040211, 1.0569959])
b = np.dot(XYZ, xyz_to_b)
rgb_img2 = np.stack([r, g, b], axis=2)
rgb_img2 = np.where(rgb_img2 < 0, 0, rgb_img2)
if flag_const_100:
# HSI画像配布元と同じガンマ補正(ガンマ=0.6)をする
# print(np.max(rgb_img2))
rgb_img2 = np.power(rgb_img2/255, 0.6)
else:
# XYZからsRGBへのレンダリングするためのガンマ補正
# print(np.max(255*rgb_img2))
rgb_img2 = np.where(rgb_img2 <= 0.0031308, 12.92 * rgb_img2, 1.055 * np.power(rgb_img2, 1/2.4) - 0.055)
rgb_img2 = np.clip(rgb_img2, 0, 1)
if flag_const_100:
img = Image.fromarray(np.uint8(255*rgb_img2))
else:
img = Image.fromarray(np.uint8(255*rgb_img2))
return img
| 2.671875
| 3
|
backBin/scrapers/washu/scrape_washu_edu.py
|
rishabhranawat/CrowdPlatform
| 1
|
12775030
|
<reponame>rishabhranawat/CrowdPlatform<gh_stars>1-10
import requests
from bs4 import BeautifulSoup
import re
import urlparse
import json
def is_abs(url):
return bool(urlparse.urlparse(url).netloc)
def get_relative_path(host, rel):
return urlparse.urljoin(host, rel)
def get_washu_courses_page():
COURSES_CS_WASHU = "https://www.cs.washington.edu/education/courses"
content = requests.get(COURSES_CS_WASHU).content
courses_washu = BeautifulSoup(content, 'html.parser')
spans = courses_washu.findAll('span',
attrs={'class': 'course-listing-title'})
courses = set()
for span in spans:
listings = span.findAll('a')
url = listings[0].get('href')
courses.add(url)
return courses
def get_all_courses_links(courses):
course_past_courses = {}
for COURSES_CS_WASHU_COURSE_INDEX in courses:
try:
content = requests.get(COURSES_CS_WASHU_COURSE_INDEX).content
course_past_courses[COURSES_CS_WASHU_COURSE_INDEX] = []
course_index = BeautifulSoup(content, 'html.parser')
past_course_index = course_index.find('div',
attrs={'id':'block-menu-menu-news-and-events'})
past_course_links = past_course_index.findAll('a')
for past_course_link in past_course_links:
pcl = past_course_link.get('href')
if(not is_abs(pcl)):
course_past_courses[COURSES_CS_WASHU_COURSE_INDEX].append(\
get_relative_path(
COURSES_CS_WASHU_COURSE_INDEX, pcl))
except:
print(COURSES_CS_WASHU_COURSE_INDEX)
return course_past_courses
with open("wash_links.json", "w") as f:
data = get_all_courses_links(get_washu_courses_page())
json.dump(data, f)
| 2.96875
| 3
|
server.py
|
zemogle/unicorn_sounds
| 0
|
12775031
|
import pyaudio
import numpy as np
import sys
import time
import asyncio
from aiohttp import web, WSMsgType
import json
import os
import struct
import websocket
HOST = os.getenv('HOST', '0.0.0.0')
PORT = int(os.getenv('PORT', 8080))
SAMPLE_RATE = 44100
CHUNK_SIZE = 4096
AUDIO_FORMAT = pyaudio.paInt16
FORMAT = np.int16
def calculate_levels(data, chunk,sample_rate):
# Apply FFT - real data so rfft used
fourier=np.fft.rfft(data)
# Remove last element in array to make it the same size as chunk
fourier=np.delete(fourier,len(fourier)-1)
#fourier = fourier[0:256]
# Find amplitude
power = np.log10(np.abs(fourier))**2
# Arrange array into 256 rows for the Unicorn HAT HD
power = np.reshape(power,(256,8))
matrix= np.average(power,axis=1)
return list(matrix.astype(int).astype(float))
def calculate_spect(data, chunk):
data_int = struct.unpack(str(2 * chunk) + 'B', data)
yf = np.fft.rfft(data_int)
spect = np.abs(yf[256:512]) / (128 * chunk)
max_v = np.max(spect)
# hist = np.histogram(spect, 256)
return list(spect.astype(float)), max_v.astype(float)
# return list(hist[0].astype(float)), max_v.astype(float)
def audio_analyse(stream):
signal = np.frombuffer(stream.read(CHUNK_SIZE, exception_on_overflow = False), FORMAT)
# levels = calculate_levels(signal, CHUNK_SIZE, SAMPLE_RATE)
levels, max_v = calculate_spect(signal, CHUNK_SIZE)
return json.dumps({'data':levels,'max':max_v})
async def connection_test(request):
return web.Response(text='Connection test')
async def websocket_handler(request):
print('Websocket connection starting')
ws = web.WebSocketResponse()
await ws.prepare(request)
print('Websocket connection ready')
# rgb = audio_analyse(stream)
async for msg in ws:
levels = audio_analyse(stream)
if msg.type == WSMsgType.TEXT:
if msg.data == 'close':
await ws.close()
else:
await ws.send_str(levels)
print('Websocket connection closed')
return ws
def main():
loop = asyncio.get_event_loop()
app = web.Application(loop=loop)
app.router.add_route('GET', '/', connection_test)
app.router.add_route('GET', '/ws', websocket_handler)
web.run_app(app, host=HOST, port=PORT)
if __name__ == '__main__':
p = pyaudio.PyAudio()
stream = p.open(format=AUDIO_FORMAT, channels=1, rate=SAMPLE_RATE, input=True, frames_per_buffer=CHUNK_SIZE)
main()
| 2.390625
| 2
|
common/query_settings.py
|
clearspending/api.clearspending.ru
| 1
|
12775032
|
# -*- coding: utf-8 -*-
APIdict = {}
from api.snippets import booleaniator, dbid, yearfilter, asIs, dateRange, floatRange, toListAnd, toListOr, toList, \
is_guid, placingtype, mongo_id, unicode_whitespace, okdp_okpd, less_int
# используется для фильтрации входящих параметров
notUseParameterVal = {u'None', u'all', None, 'None', 'all', "", u""}
operatorsDontModify = {u"$all", u"$in", u"$or", u"$gte", u"$gt", u"$lte", u"$lt"}
share_parameters = {u"format", u"get_report"}
# словарь функций преобразования входящих из апи параметров
typeFunctions = {
"mongo_id": mongo_id,
"unicode": unicode,
"unicode_whitespace": unicode_whitespace,
"integer": int,
"float": float,
"string": str,
"boolean": booleaniator,
"dbid": dbid,
"yearfilter": yearfilter,
"asIs": asIs,
"daterange": dateRange,
"floatrange": floatRange,
"listand": toListAnd,
"listor": toListOr,
"list": toList,
"placingtype": placingtype,
"guid": is_guid,
"okdp_okpd": okdp_okpd,
"less_int": less_int
}
from api.db_selectors import underConstruction, sphnxSelect, selectData, get_data, selectDict, select_budget_dict
from api.response_modifiers import modifier_select_rsp_dictionaries, modifier_select_rsp_suppliers, \
modifier_select_rsp_customers, modifier_top_rsp_contracts, modifier_top_rsp_organizations, \
modifier_get_rsp, modifier_select_rsp_contracts, modifier_select_rsp_invalidcontracts, \
modifier_get_grants_rsp, modifier_select_rsp_grants, modifier_top_rsp_grants, modifier_top_rsp_farma, \
modifier_get_notifications_rsp, modifier_select_rsp_notifications, modifier_top_rsp_notifications
APIdict[u"search"] = {
u"contracts": {"function": underConstruction},
u"suppliers": {"function": underConstruction},
u"customers": {"function": underConstruction}
}
APIdict[u"request"] = {
"stats": {"function": underConstruction}
}
APIdict[u"search"] = {
u"notifications": {
"function": sphnxSelect,
"modifier": modifier_select_rsp_notifications,
"description": {"DB_Name": "Notifications", "DB_collectionName": "Notifications"},
"parameters": {
"productsearch": {"field": u"sphnxsearch", "type": "unicode", "default": None,
"sphinx_field": "productsearch"},
"placing": {"field": u"sphnxsearchlist", "type": "list", "default": None, "sphinx_field": "placingway"},
"number": {"field": u"number", "type": "unicode", "default": None},
"pricerange": {"field": u"lot.maxPrice", "type": "floatrange",
"default": None},
"publish_daterange": {"field": u"publishDate", "type": "daterange", "default": None},
"participate_daterange": {"field": u"collectingDate.procedureInfo.collecting.endDate", "type": "daterange",
"default": None},
"regioncode": {"field": u"regionCode", "type": "unicode", "default": None},
},
"sort": {
"lot.maxPrice": [1, -1],
"publishDate": [1, -1],
"collectingDate.procedureInfo.collecting.endDate": [1, -1]
}
},
u"grants": {
"function": sphnxSelect,
"modifier": modifier_select_rsp_grants,
"description": {"DB_Name": "Grants", "DB_collectionName": "grants"},
"parameters": {
"productsearch": {"field": u"sphnxsearch", "type": "unicode", "default": None,
"sphinx_field": "description"},
"name_organization_search": {"field": u"sphnxsearch", "type": "unicode", "default": None,
"sphinx_field": "name_organization_search"},
"address_search": {"field": u"sphnxsearch", "type": "unicode", "default": None,
"sphinx_field": "address_search"},
"operator": {"field": u"operator", "type": "unicode_whitespace", "default": None},
"daterange": {"field": u"filing_date", "type": "daterange", "default": None},
"OGRN": {"field": u"OGRN", "type": "unicode", "default": None},
"price": {"field": u"price", "type": "floatrange", "default": None},
"grant_status": {"field": u"grant_status", "type": "unicode", "default": None},
},
},
u"customers": {
"function": sphnxSelect,
"modifier": modifier_select_rsp_customers,
"description": {"DB_Name": "Organizations", "DB_collectionName": "Customers_v3"},
"parameters": {
"namesearch": {"field": u"sphnxsearch", "type": "unicode", "default": None, "sphinx_field": "names"},
"address": {"field": u"sphnxsearch", "type": "unicode", "default": None, "sphinx_field": "address"},
"namesearchlist": {"field": u"sphnxsearchlist", "type": "list", "default": None, "sphinx_field": "names"},
"spzregnum": {"field": u"regNumber", "type": "unicode", "default": None},
"okpo": {"field": u"OKPO", "type": "unicode", "default": None},
"okved": {"field": u"OKVED", "type": "unicode", "default": None},
"name": {"field": u"fullName", "type": "unicode", "default": None},
"inn": {"field": u"inn", "type": "unicode", "default": None},
"kpp": {"field": u"kpp", "type": "unicode", "default": None},
"ogrn": {"field": u"ogrn", "type": "unicode", "default": None},
"okogu": {"field": u"okogu.code", "type": "unicode", "default": None},
"okato": {"field": u"factualAddress.OKATO", "type": "unicode", "default": None},
"subordination": {"field": u"subordinationType.id", "type": "unicode", "default": None},
"orgtype": {"field": u"organizationType.id", "type": "unicode", "default": None},
"kladregion": {"field": u"region.kladrCode", "type": "unicode", "default": None},
"fz": {"field": u"fz", "type": "unicode", "default": None},
"regioncode": {"field": u"regionCode", "type": "unicode", "default": None},
"orgclass": {"field": u"_orgClass", "type": "unicode", "default": None}
},
"sort": {
"contractsCount": [1, -1],
"contractsSum": [1, -1]
}
},
u"suppliers": {
"function": sphnxSelect,
"modifier": modifier_select_rsp_suppliers,
"description": {"DB_Name": "Organizations", "DB_collectionName": "Suppliers_v3"},
"parameters": {
"namesearch": {"field": u"sphnxsearch", "type": "unicode", "default": None, "sphinx_field": "names"},
"address": {"field": u"sphnxsearch", "type": "unicode", "default": None, "sphinx_field": "address"},
"inn": {"field": u"inn", "type": "unicode", "default": None},
"kpp": {"field": u"kpp", "type": "unicode", "default": None},
"regioncode": {"field": u"regionCode", "type": "unicode", "default": None},
"orgform": {"field": u"organizationForm", "type": "unicode", "default": None},
"orgclass": {"field": u"_orgClass", "type": "unicode", "default": None},
"inblacklist": {"field": u"xRNP.inRNP", "type": "boolean", "default": None}
},
"sort": {
"contractsCount": [1, -1],
"contractsSum": [1, -1]
}
},
u"contracts": {
"function": sphnxSelect,
"modifier": modifier_select_rsp_contracts,
"description": {"DB_Name": "Contracts", "DB_collectionName": "Contracts4API_v3"},
"parameters": {
"productsearch": {"field": u"sphnxsearch", "type": "unicode", "default": None, "sphinx_field": "products"},
"address": {"field": u"sphnxsearch", "type": "unicode", "default": None, "sphinx_field": "address"},
"misuses": {"field": u"sphnxsearch", "type": "unicode", "default": None, "sphinx_field": "misuses"},
"placing": {"field": u"sphnxsearchlist", "type": "list", "default": None, "sphinx_field": "placingway"},
"productsearchlist": {"field": u"sphnxsearchlist", "type": "list", "default": None,
"sphinx_field": "products"},
"regnum": {"field": u"regNum", "type": "unicode", "default": None},
"customerinn": {"field": u"customer.inn", "type": "unicode", "default": None},
"customerkpp": {"field": u"customer.kpp", "type": "unicode", "default": None},
"supplierinn": {"field": u"suppliers.inn", "type": "unicode", "default": None,
"sphinx_field": "supplierinn_list"},
"supplierkpp": {"field": u"suppliers.kpp", "type": "unicode", "default": None,
"sphinx_field": "supplierkpp_list"},
"okdp_okpd": {"field": u"okdp_okpd", "type": "okdp_okpd", "default": None,
"sphinx_field": "okdp_okpd_list"},
"budgetlevel": {"field": u"finances.budgetLevel.code", "type": "unicode", "default": None},
"grbs": {"field": u"finances.budgetary.KBK", "type": "unicode", "default": None},
"fkr": {"field": u"finances.budgetary.KBK", "type": "unicode", "default": None},
"sub-fkr": {"field": u"sub_fkr", "type": "unicode", "default": None},
"csr": {"field": u"finances.budgetary.KBK", "type": "unicode", "default": None},
"kvr": {"field": u"finances.budgetary.KBK", "type": "unicode", "default": None},
"customerregion": {"field": u"regionCode", "type": "unicode", "default": None},
"currentstage": {"field": u"currentContractStage", "type": "unicode", "default": None},
"daterange": {"field": u"signDate", "type": "daterange", "default": None},
"pricerange": {"field": u"price", "type": "floatrange", "default": None},
"fz": {"field": u"fz", "type": "unicode", "default": None}
},
"sort": {
"price": [1, -1],
"signDate": [1, -1]
}
}
}
APIdict[u"top"] = {
u"notifications": {
"function": selectData,
"modifier": modifier_top_rsp_notifications,
"description": {"DB_Name": "Statistics", "DB_collectionName": "Statistics_v3"},
"parameters": {
"statName": {"field": u"statName", "type": "unicode", "default": u'topNotifications'},
'fz': {'field': u'fz', "type": "unicode", "default": None}
},
"sort": {
"price": [1, -1],
}
},
u"grants": {
"function": selectData,
"modifier": modifier_top_rsp_grants,
"description": {"DB_Name": "Statistics", "DB_collectionName": "Statistics_v3"},
# TODO: нельзя не указывать параметр selectData отдаёт None
"parameters": {
"statName": {"field": u"statName", "type": "unicode", "default": u'topGrants'},
'grant_status': {'field': u'grant_status', "type": "unicode", "default": None}
},
"sort": {
"price": [1, -1],
}
},
u"contracts": {
"function": selectData,
"modifier": modifier_top_rsp_contracts,
"description": {"DB_Name": "Statistics", "DB_collectionName": "Statistics_v3"},
"parameters": {
"statname": {"field": u"statName", "type": "unicode", "default": u"topContracts"},
"year": {"field": u"year", "type": "unicode", "default": None}
},
"sort": {
"price": [1, -1],
"signDate": [1, -1]
}
},
u"suppliers": {
"function": selectData,
"modifier": modifier_top_rsp_organizations,
"description": {"DB_Name": "Statistics", "DB_collectionName": "Statistics_v3"},
"parameters": {
"statname": {"field": u"statName", "type": "unicode", "default": u"topSuppliers"},
"stattype": {"field": u"statType", "type": "unicode", "default": None}
},
"sort": {
"contractsCount": [1, -1],
"contractsSum": [1, -1]
}
},
u"customers": {
"function": selectData,
"modifier": modifier_top_rsp_organizations,
"description": {"DB_Name": "Statistics", "DB_collectionName": "Statistics_v3"},
"parameters": {
"statname": {"field": u"statName", "type": "unicode", "default": u"topCustomers"},
"stattype": {"field": u"statType", "type": "unicode", "default": None}
},
"sort": {
"contractsCount": [1, -1],
"contractsSum": [1, -1]
}
},
u"npo": {
"function": selectData,
"modifier": modifier_top_rsp_organizations,
"description": {"DB_Name": "Statistics", "DB_collectionName": "Statistics_v3"},
"parameters": {
"statname": {"field": u"statName", "type": "unicode", "default": u"topNPO"},
"stattype": {"field": u"statType", "type": "unicode", "default": None}
},
"sort": {
"contractsCount": [1, -1],
"contractsSum": [1, -1]
}
},
u"farma": {
"function": selectData,
"modifier": modifier_top_rsp_farma,
"description": {"DB_Name": "Statistics", "DB_collectionName": "Statistics_v3"},
"parameters": {
"statname": {"field": u"statName", "type": "unicode", "default": u"topFarma"},
"stattype": {"field": u"statType", "type": "unicode", "default": None}
}
},
u"univers": {
"function": selectData,
"modifier": modifier_top_rsp_organizations,
"description": {"DB_Name": "Statistics", "DB_collectionName": "Statistics_v3"},
"parameters": {
"statname": {"field": u"statName", "type": "unicode", "default": u"topUniversities"},
"stattype": {"field": u"statType", "type": "unicode", "default": None}
},
"sort": {
"contractsCount": [1, -1],
"contractsSum": [1, -1]
}
}
}
APIdict[u"get"] = {
u"notifications": {
"function": get_data,
"modifier": modifier_get_notifications_rsp,
"description": {"DB_Name": "Notifications", "DB_collectionName": "Notifications"},
"parameters": {
"number": {"field": u"number", "type": "unicode", "default": None},
"id": {"field": u"id", "type": "unicode", "default": None}
}
},
u"grants": {
"function": get_data,
"modifier": modifier_get_grants_rsp,
"description": {"DB_Name": "Grants", "DB_collectionName": "grants"},
"parameters": {
"id": {"field": u"id", "type": "integer", "default": None}
}
},
u"contracts": {
"function": get_data,
"modifier": modifier_get_rsp,
"description": {"DB_Name": "Contracts", "DB_collectionName": "Contracts4API_v3"},
"parameters": {
# "newestver": {"field": u"_newestVersion", "type": "boolean", "default": True},
"regnum": {"field": u"regNum", "type": "unicode", "default": None},
"id": {"field": u"id", "type": "guid", "default": None}
}
},
u"suppliers": {
"function": get_data,
"modifier": modifier_get_rsp,
"description": {"DB_Name": "Organizations", "DB_collectionName": "Suppliers_v3"},
"parameters": {
"id": {"field": u"_id", "type": "dbid", "default": None},
"inn": {"field": u"inn", "type": "unicode", "default": None},
"kpp": {"field": u"kpp", "type": "unicode", "default": None}
}
},
u"customers": {
"function": get_data,
"modifier": modifier_get_rsp,
"description": {"DB_Name": "Organizations", "DB_collectionName": "Customers_v3"},
"parameters": {
"id": {"field": u"_id", "type": "dbid", "default": None},
"spzregnum": {"field": u"regNumber", "type": "unicode", "default": None}
}
},
u"dicts": {
"function": underConstruction
},
u"regions": {
"function": get_data,
"modifier": modifier_get_rsp,
"description": {"DB_Name": "Dictionaries", "DB_collectionName": "Regions"},
"parameters": {
"regioncode": {"field": u"subjectCode", "type": "integer", "default": None},
"okato": {"field": u"codeOKATO", "type": "integer", "default": None},
"kladr": {"field": u"codeKLADR", "type": "integer", "default": None}
}
},
u"budgetlevels": {
"function": get_data,
"modifier": modifier_get_rsp,
"description": {"DB_Name": "Dictionaries", "DB_collectionName": "BudgetLevels"},
"parameters": {
"level": {"field": u"budgetLevelCode", "type": "unicode", "default": None}
}
},
u"opf": {
"function": get_data,
"modifier": modifier_get_rsp,
"description": {"DB_Name": "Dictionaries", "DB_collectionName": "OPF"},
"parameters": {
"opf": {"field": u"opf", "type": "unicode", "default": None}
}
},
u"kbk": {
"function": get_data,
"modifier": modifier_get_rsp,
"description": {"DB_Name": "Dictionaries", "DB_collectionName": "kbk"},
"parameters": {
"actual": {"field": u"actual", "type": "unucode", "default": u"true"},
"kbk": {"field": u"kbk", "type": "unicode", "default": None},
"budget": {"field": u"budget", "type": "unicode", "default": None}
}
},
u"kosgu": {
"function": get_data,
"modifier": modifier_get_rsp,
"description": {"DB_Name": "Dictionaries", "DB_collectionName": "kosgu"},
"parameters": {
"actual": {"field": u"actual", "type": "unucode", "default": u"true"},
"kosgu": {"field": u"kbk", "type": "unicode", "default": None}
}
},
u"invalidreasons": {
"function": get_data,
"modifier": modifier_get_rsp,
"description": {"DB_Name": "Dictionaries", "DB_collectionName": "InvalidReasons"},
"parameters": {
"code": {"field": u"code", "type": "unicode", "default": None}
}
},
u"placing": {
"function": get_data,
"modifier": modifier_get_rsp,
"description": {"DB_Name": "Dictionaries", "DB_collectionName": "Placing"},
"parameters": {
"code": {"field": u"code", "type": "unicode", "default": None}
}
},
u"okato": {
"function": get_data,
"modifier": modifier_get_rsp,
"description": {"DB_Name": "Dictionaries", "DB_collectionName": "okato"},
"parameters": {
"code": {"field": u"code", "type": "unicode", "default": None},
}
}
}
APIdict[u"select"] = {
u"notifications": {
"function": sphnxSelect,
"modifier": modifier_select_rsp_notifications,
"description": {"DB_Name": "Notifications", "DB_collectionName": "Notifications"},
"parameters": {
"productsearch": {"field": u"sphnxsearch", "type": "unicode", "default": None, "sphinx_field": "product"},
"placing": {"field": u"sphnxsearchlist", "type": "list", "default": None, "sphinx_field": "placingway"},
"number": {"field": u"number", "type": "unicode", "default": None},
"pricerange": {"field": u"lots.lot.customerRequirements.customerRequirement.maxPrice", "type": "floatrange",
"default": None},
"publish_daterange": {"field": u"publishDate", "type": "daterange", "default": None},
"participate_daterange": {"field": u"notificationCommission.p1Date", "type": "daterange", "default": None},
"regioncode": {"field": u"regionCode", "type": "unicode", "default": None},
"fz": {"field": u"fz", "type": "unicode", "default": None},
},
"sort": {
"publish_daterange": [1, -1]
}
},
u"grants": {
"function": selectData,
"modifier": modifier_select_rsp_grants,
"description": {"DB_Name": "Grants", "DB_collectionName": "grants"},
"parameters": {
"year": {"field": u"year", "type": "unicode", "default": None},
"status": {"field": u"grant_status", "type": "unicode", "default": None},
"grant": {"field": u"grant", "type": "unicode", "default": None},
"price": {"field": u"price", "type": "floatrange", "default": None},
"daterange": {"field": u"filing_date", "type": "daterange", "default": None},
},
},
u"contracts": {
"function": selectData,
"modifier": modifier_select_rsp_contracts,
"description": {"DB_Name": "Contracts", "DB_collectionName": "Contracts4API_v3"},
"parameters": {
"regnum": {"field": u"regNum", "type": "unicode", "default": None},
"customerinn": {"field": u"customer.inn", "type": "unicode", "default": None},
"customerkpp": {"field": u"customer.kpp", "type": "unicode", "default": None},
"supplierinn": {"field": u"suppliers.inn", "type": "unicode", "default": None},
"supplierkpp": {"field": u"suppliers.kpp", "type": "unicode", "default": None},
"okpd": {"field": u"products.OKPD.code", "type": "unicode", "default": None},
"okdp": {"field": u"products.OKDP.code", "type": "unicode", "default": None},
"budgetlevel": {"field": u"finances.budgetLevel.code", "type": "unicode", "default": None},
"customerregion": {"field": u"regionCode", "type": "unicode", "default": None},
"industrial": {"field": u"economic_sectors.code", "type": "unicode", "default": None},
"currentstage": {"field": u"currentContractStage", "type": "unicode", "default": None},
"daterange": {"field": u"signDate", "type": "daterange", "default": None},
"placing": {"field": u"placingWayCode", "type": "placingtype", "default": None},
"pricerange": {"field": u"price", "type": "floatrange", "default": None},
"fz": {"field": u"fz", "type": "unicode", "default": None},
},
"sort": {
"price": [1, -1],
"signDate": [1, -1]
}
},
u"invalidcontracts": {
"function": selectData,
"modifier": modifier_select_rsp_invalidcontracts,
"description": {"DB_Name": "Contracts", "DB_collectionName": "ContractsInnKppAnalytics_v3"},
"parameters": {
"valid": {"field": u"_valid", "type": "boolean", "default": False},
"regnum": {"field": u"regNum", "type": "unicode", "default": None},
"customerinn": {"field": u"customer.inn", "type": "unicode", "default": None},
"customerkpp": {"field": u"customer.kpp", "type": "unicode", "default": None},
"supplierinn": {"field": u"suppliers.inn", "type": "unicode", "default": None},
"supplierkpp": {"field": u"suppliers.kpp", "type": "unicode", "default": None},
"customerregion": {"field": u"regionCode", "type": "unicode", "default": None},
"reasonslistand": {"field": u"_invalidReasonList", "type": "listand", "default": None},
"reasonslistor": {"field": u"_invalidReasonList", "type": "listor", "default": None}
},
"sort": {
"price": [1, -1],
"signDate": [1, -1]
}
},
u"suppliers": {
"function": selectData,
"modifier": modifier_select_rsp_suppliers,
"description": {"DB_Name": "Organizations", "DB_collectionName": "Suppliers_v3"},
"parameters": {
"inn": {"field": u"inn", "type": "unicode", "default": None},
"kpp": {"field": u"kpp", "type": "unicode", "default": None},
"regioncode": {"field": u"regionCode", "type": "unicode", "default": None},
"orgform": {"field": u"organizationForm", "type": "unicode", "default": None},
"orgclass": {"field": u"_orgClass", "type": "unicode", "default": None},
"inblacklist": {"field": u"xRNP.inRNP", "type": "boolean", "default": None}
},
"sort": {
"contractsCount": [1, -1],
"contractsSum": [1, -1]
}
},
u"customers": {
"function": selectData,
"modifier": modifier_select_rsp_customers,
"description": {"DB_Name": "Organizations", "DB_collectionName": "Customers_v3"},
"parameters": {
"spzregnum": {"field": u"regNumber", "type": "unicode", "default": None},
"okpo": {"field": u"OKPO", "type": "unicode", "default": None},
"okved": {"field": u"OKVED", "type": "unicode", "default": None},
"name": {"field": u"fullName", "type": "unicode", "default": None},
"inn": {"field": u"inn", "type": "unicode", "default": None},
"kpp": {"field": u"kpp", "type": "unicode", "default": None},
"ogrn": {"field": u"ogrn", "type": "unicode", "default": None},
"okogu": {"field": u"okogu.code", "type": "unicode", "default": None},
"okato": {"field": u"factualAddress.OKATO", "type": "unicode", "default": None},
"subordination": {"field": u"subordinationType.id", "type": "unicode", "default": None},
"orgtype": {"field": u"organizationType.id", "type": "unicode", "default": None},
"kladregion": {"field": u"factualAddress.region.kladrCode", "type": "unicode", "default": None},
"regioncode": {"field": u"regionCode", "type": "unicode", "default": None},
"orgclass": {"field": u"_orgClass", "type": "unicode", "default": None}
},
"sort": {
"contractsCount": [1, -1],
"contractsSum": [1, -1]
}
},
u"dicts": {"function": underConstruction},
u"regions": {
"function": selectDict,
"modifier": modifier_select_rsp_dictionaries,
"description": {"DB_Name": "Dictionaries", "DB_collectionName": "Regions"},
"parameters": {
# "name": {"field": u"name", "type": "unicode", "default": None},
"regioncode": {"field": u"subjectCode", "type": "integer", "default": None},
"okato": {"field": u"codeOKATO", "type": "integer", "default": None},
"kladr": {"field": u"codeKLADR", "type": "integer", "default": None}
}
},
u"budgetlevels": {
"function": selectDict,
"modifier": modifier_select_rsp_dictionaries,
"description": {"DB_Name": "Dictionaries", "DB_collectionName": "BudgetLevels"},
"parameters": {
"level": {"field": u"budgetLevelCode", "type": "unicode", "default": None}
}
},
u"opf": {
"function": selectDict,
"modifier": modifier_select_rsp_dictionaries,
"description": {"DB_Name": "Dictionaries", "DB_collectionName": "OPF"},
"parameters": {
"opf": {"field": u"opf", "type": "unicode", "default": None}
}
},
u"kbk": {
"function": selectDict,
"modifier": modifier_select_rsp_dictionaries,
"description": {"DB_Name": "Dictionaries", "DB_collectionName": "kbk"},
"parameters": {
"actual": {"field": u"actual", "type": "unucode", "default": u"true"},
"kbk": {"field": u"kbk", "type": "unicode", "default": None},
"budget": {"field": u"budget", "type": "unicode", "default": None}
}
},
u"kosgu": {
"function": selectDict,
"modifier": modifier_select_rsp_dictionaries,
"description": {"DB_Name": "Dictionaries", "DB_collectionName": "kosgu"},
"parameters": {
"actual": {"field": u"actual", "type": "unucode", "default": u"true"},
"kosgu": {"field": u"code", "type": "unicode", "default": None}
}
},
u"invalidreasons": {
"function": selectDict,
"modifier": modifier_select_rsp_dictionaries,
"description": {"DB_Name": "Dictionaries", "DB_collectionName": "InvalidReasons"},
"parameters": {
"code": {"field": u"code", "type": "unicode", "default": None}
}
},
u"orgtype": {
"function": selectDict,
"modifier": modifier_select_rsp_dictionaries,
"description": {"DB_Name": "Dictionaries", "DB_collectionName": "OrgType"},
"parameters": {
"code": {"field": u"code", "type": "unicode", "default": None}
}
},
u"okato": {
"function": selectDict,
"modifier": modifier_select_rsp_dictionaries,
"description": {"DB_Name": "Dictionaries", "DB_collectionName": "okato"},
"parameters": {
"code": {"field": u"code", "type": "unicode", "default": None},
"parentcode": {"field": u"parent", "type": "unicode", "default": None},
"level": {"field": u"level", "type": "integer", "default": None}
}
}
}
APIdict[u"dictionaries"] = {
u"budget": {
"function": select_budget_dict,
"modifier": modifier_select_rsp_dictionaries,
"description": {"DB_Name": "Dictionaries", "DB_collectionName": "Budget"},
"parameters": {
"grbs": {"field": u"chief_steward", "type": "unicode", "default": None},
"fkr": {"field": u"section", "type": "unicode", "default": None},
"sub-fkr": {"field": u"subsection", "type": "unicode", "default": None},
"csr": {"field": u"target_article", "type": "unicode", "default": None},
"kvr": {"field": u"type_expenditure", "type": "unicode", "default": None},
"level": {"field": u"level", "type": "less_int", "default": None}
}
},
}
APIdict[u"statistics"] = {
u"regionspending": {
"function": selectDict,
"modifier": modifier_select_rsp_dictionaries,
"description": {"DB_Name": "Statistics", "DB_collectionName": "RegionSpending_v3"},
"parameters": {
# "name": {"field": u"name", "type": "unicode", "default": None},
"regioncode": {"field": u"regionCode", "type": "unicode", "default": None},
"year": {"field": u"year", "type": "unicode", "default": None}
}
},
u"db_info": {
"function": selectDict,
"modifier": modifier_select_rsp_dictionaries,
"description": {"DB_Name": "Statistics", "DB_collectionName": "db_statistics"},
"parameters": {
"info": {"field": u"info", "type": "unicode", "default": None},
}
}
}
try:
from common.local_query_settings import *
except ImportError:
pass
| 1.796875
| 2
|
tests/unit/test_std_stream_replacer.py
|
matthewgdv/miscutils
| 0
|
12775033
|
# import pytest
class TestBaseReplacerMixin:
def test_target(self): # synced
assert True
def test_write(self): # synced
assert True
def test_flush(self): # synced
assert True
def test_close(self): # synced
assert True
class TestStdOutReplacerMixin:
def test_target(self): # synced
assert True
class TestStdErrReplacerMixin:
def test_target(self): # synced
assert True
class TestStdOutFileRedirector:
def test___str__(self): # synced
assert True
def test_write(self): # synced
assert True
class TestBaseStreamRedirector:
def test___str__(self): # synced
assert True
def test_write(self): # synced
assert True
def test_flush(self): # synced
assert True
def test_close(self): # synced
assert True
class TestStdOutStreamRedirector:
pass
class TestStdErrStreamRedirector:
pass
class TestSupressor:
def test_write(self): # synced
assert True
| 2.125
| 2
|
api/admin.py
|
josuelopes512/mastercode_films_api
| 0
|
12775034
|
from django.contrib import admin
# Register your models here.
from .models import Movie
class MovieAdmin(admin.ModelAdmin):
list_display = ['id', 'movie_id', 'title', 'slug']
prepopulated_fields = {
"slug": ("title",)
}
admin.site.register(Movie, MovieAdmin)
| 1.992188
| 2
|
dynamodb-streams-lambda-filter/src/update_view_counters.py
|
MauriceBrg/snippets
| 2
|
12775035
|
import os
import boto3
TABLE_NAME = os.environ.get("TABLE_NAME", "filter-demo-data")
TABLE = boto3.resource("dynamodb").Table(TABLE_NAME)
CLIENT = boto3.client("dynamodb")
def lambda_handler(event, context):
# We can work on the assumption that we only get items
# in NewImage with a type of "VIEW", that means we can
# rely on userId, videoId, and duration being present.
# We can also assume we get a single record.
item = event["Records"][0]["dynamodb"]["NewImage"]
event_name = event["Records"][0]["eventName"] # INSERT or REMOVE
user_id = item["userId"]["S"]
video_id = item["videoId"]["S"]
duration = item["duration"]["N"]
print(f"Type: {event_name} User: {user_id} Video: {video_id} Duration: {duration}")
# We use a transaction so either both writes succeed, or both fail.
CLIENT.transact_write_items(
TransactItems=[
{
"Update": {
"TableName": TABLE_NAME,
"Key": {
"PK": {"S": f"USER#{user_id}"},
"SK": {"S": "SUMMARY"}
},
"UpdateExpression": "ADD #views :view_increment, "\
"#duration :duration_increment "\
"SET #type = :type, "\
"#userId = :userId",
"ExpressionAttributeNames": {
"#views": "views",
"#duration": "duration",
"#type": "type",
"#userId": "userId",
},
"ExpressionAttributeValues": {
":view_increment": {"N": str(1)},
":duration_increment": {"N": str(duration)},
":type": {"S": "USER_SUMMARY"},
":userId": {"S": str(user_id)},
},
},
},
{
"Update": {
"TableName": TABLE_NAME,
"Key": {
"PK": {"S": f"VIDEO#{video_id}"},
"SK": {"S": "SUMMARY"}
},
"UpdateExpression": "ADD #views :view_increment, "\
"#duration :duration_increment "\
"SET #type = :type, "\
"#videoId = :videoId, "\
"#gsi1pk = :gsi1pk, "\
"#gsi1sk = :gsi1sk",
"ExpressionAttributeNames": {
"#views": "views",
"#duration": "duration",
"#type": "type",
"#videoId": "videoId",
"#gsi1pk": "GSI1PK",
"#gsi1sk": "GSI1SK",
},
"ExpressionAttributeValues": {
":view_increment": {"N": str(1)},
":duration_increment": {"N": str(duration)},
":type": {"S": "VIDEO_SUMMARY"},
":videoId": {"S": str(video_id)},
":gsi1pk": {"S": f"VIDEO#{video_id}"},
":gsi1sk": {"S": "SUMMARY"},
},
}
},
]
)
| 2.015625
| 2
|
conanfile.py
|
vuo/conan-libusb
| 0
|
12775036
|
<filename>conanfile.py<gh_stars>0
from conans import ConanFile, tools, AutoToolsBuildEnvironment
import shutil
import os
import platform
class LibusbConan(ConanFile):
name = 'libusb'
source_version = '1.0.23'
package_version = '0'
version = '%s-%s' % (source_version, package_version)
build_requires = (
'llvm/5.0.2-1@vuo/stable',
'macos-sdk/11.0-0@vuo/stable',
)
settings = 'os', 'compiler', 'build_type', 'arch'
url = 'https://github.com/libusb/libusb'
license = 'https://github.com/libusb/libusb/blob/master/COPYING'
description = 'A library for USB device access'
source_dir = 'libusb-%s' % source_version
build_x86_dir = '_build_x86'
build_arm_dir = '_build_arm'
install_x86_dir = '_install_x86'
install_arm_dir = '_install_arm'
install_universal_dir = '_install_universal_dir'
def requirements(self):
if platform.system() == 'Linux':
self.requires('patchelf/0.10pre-1@vuo/stable')
elif platform.system() != 'Darwin':
raise Exception('Unknown platform "%s"' % platform.system())
def source(self):
tools.get('https://github.com/libusb/libusb/releases/download/v%s/libusb-%s.tar.bz2' % (self.source_version, self.source_version),
sha256='db11c06e958a82dac52cf3c65cb4dd2c3f339c8a988665110e0d24d19312ad8d')
self.run('mv %s/COPYING %s/%s.txt' % (self.source_dir, self.source_dir, self.name))
def build(self):
autotools = AutoToolsBuildEnvironment(self)
# The LLVM/Clang libs get automatically added by the `requires` line,
# but this package doesn't need to link with them.
autotools.libs = []
autotools.flags.append('-Oz')
if platform.system() == 'Darwin':
autotools.flags.append('-isysroot %s' % self.deps_cpp_info['macos-sdk'].rootpath)
autotools.flags.append('-mmacosx-version-min=10.11')
autotools.link_flags.append('-Wl,-install_name,@rpath/libusb.dylib')
common_configure_args = [
'--quiet',
'--disable-dependency-tracking',
'--disable-static',
'--enable-shared',
]
env_vars = {
'CC' : self.deps_cpp_info['llvm'].rootpath + '/bin/clang',
'CXX': self.deps_cpp_info['llvm'].rootpath + '/bin/clang++',
}
with tools.environment_append(env_vars):
build_root = os.getcwd()
self.output.info("=== Build for x86_64 ===")
tools.mkdir(self.build_x86_dir)
with tools.chdir(self.build_x86_dir):
autotools.flags.append('-arch x86_64')
autotools.link_flags.append('-arch x86_64')
autotools.configure(configure_dir='../%s' % self.source_dir,
build=False,
host=False,
args=common_configure_args + [
'--prefix=%s/%s' % (build_root, self.install_x86_dir),
])
autotools.make(args=['--quiet'])
autotools.make(target='install', args=['--quiet'])
self.output.info("=== Build for arm64 ===")
tools.mkdir(self.build_arm_dir)
with tools.chdir(self.build_arm_dir):
autotools.flags.remove('-arch x86_64')
autotools.flags.append('-arch arm64')
autotools.link_flags.remove('-arch x86_64')
autotools.link_flags.append('-arch arm64')
autotools.configure(configure_dir='../%s' % self.source_dir,
build=False,
host=False,
args=common_configure_args + [
'--prefix=%s/%s' % (build_root, self.install_arm_dir),
'--host=x86_64-apple-darwin15.0.0',
])
autotools.make(args=['--quiet'])
autotools.make(target='install', args=['--quiet'])
def package(self):
if platform.system() == 'Darwin':
libext = 'dylib'
elif platform.system() == 'Linux':
libext = 'so'
else:
raise Exception('Unknown platform "%s"' % platform.system())
tools.mkdir(self.install_universal_dir)
with tools.chdir(self.install_universal_dir):
self.run('lipo -create ../%s/lib/libusb-1.0.%s ../%s/lib/libusb-1.0.%s -output libusb.%s' % (self.install_x86_dir, libext, self.install_arm_dir, libext, libext))
self.copy('*.h', src='%s/include' % self.install_x86_dir, dst='include')
self.copy('libusb.%s' % libext, src=self.install_universal_dir, dst='lib')
self.copy('%s.txt' % self.name, src=self.source_dir, dst='license')
def package_info(self):
self.cpp_info.libs = ['usb']
| 2.1875
| 2
|
tests/qa/test_qa_wl6351.py
|
timgates42/mysql-connector-python
| 0
|
12775037
|
# Copyright (c) 2013, 2021, Oracle and/or its affiliates.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2.0, as
# published by the Free Software Foundation.
#
# This program is also distributed with certain software (including
# but not limited to OpenSSL) that is licensed under separate terms,
# as designated in a particular file or component or in included license
# documentation. The authors of MySQL hereby grant you an
# additional permission to link the program and your derivative works
# with the separately licensed software that they have included with
# MySQL.
#
# Without limiting anything contained in the foregoing, this file,
# which is part of MySQL Connector/Python, is also subject to the
# Universal FOSS Exception, version 1.0, a copy of which can be found at
# http://oss.oracle.com/licenses/universal-foss-exception.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License, version 2.0, for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import mysql.connector
import tests
class WL6351Tests(tests.MySQLConnectorTests):
"""Test to check for Return error codes."""
def test_host(self):
"""Try to open a database connection with wrong ip should throw
an error.
"""
config = self.get_clean_mysql_config()
config["host"] = "1.3.5.1"
config["connect_timeout"] = 1
for cls in self.all_cnx_classes:
self.assertRaises(
(
mysql.connector.errors.InterfaceError,
mysql.connector.errors.DatabaseError,
),
cls,
**config
)
@tests.foreach_cnx()
def test_db(self):
"""Try to open a database connection and use non existing database."""
with self.cnx.cursor() as cur:
with self.assertRaises(
mysql.connector.errors.ProgrammingError
) as context:
cur.execute("use unknowndb")
self.assertEqual(context.exception.errno, 1049)
@tests.foreach_cnx()
def test_table(self):
"""Execute the SQL query using execute() method."""
with self.cnx.cursor() as cur:
with self.assertRaises(
mysql.connector.errors.ProgrammingError
) as context:
cur.execute("SELECT * FROM unknowntable")
self.assertEqual(context.exception.errno, 1146)
| 1.671875
| 2
|
src/settings/ConfigurationHandler.py
|
jenkins-head/jenkins-head-controller
| 1
|
12775038
|
import logging
import yaml
import pprint
from settings.HeadConfiguration import HeadConfiguration
from settings.ConfigurationBaseClass import ConfigurationBaseClass
class ConfigurationHandler(ConfigurationBaseClass):
"""
This class handels the configuration file. After loading the content this class
provides getter methods to the content.
"""
__mServerSectionKey = 'servers'
__mHeadsSectionKey = 'heads'
def __init__(self, configurationFilePath: str):
self.__mFilePath = configurationFilePath
with open(configurationFilePath, 'r') as fileHandle:
configurationContent = yaml.safe_load(fileHandle)
self.__checkConfigurationFileContent(configurationContent)
serverList = configurationContent[self.__mServerSectionKey]
self.__checkServerListContent(serverList)
self.__mServers = serverList
self.__mHeads = configurationContent[self.__mHeadsSectionKey]
listOfHeads = self.__convertHeadsConfig2ListOfHeads()
self.__checkListOfHeads(listOfHeads)
self.__mListOfHeads = listOfHeads
def getListOfHeadConfigurationObjects(self) -> list():
return self.__mListOfHeads
def __convertHeadsConfig2ListOfHeads(self):
listOfHeadConfigurationsObjects = []
for headConfiguration in self.__mHeads:
listOfHeadConfigurationsObjects.append(HeadConfiguration(headConfiguration, self.__mServers))
return listOfHeadConfigurationsObjects
def __checkConfigurationFileContent(self, configurationContent):
keysToCheck = [
self.__mServerSectionKey,
self.__mHeadsSectionKey
]
try:
self._checkConfigurationStructure(configurationContent, keysToCheck)
except Exception as error:
logging.error('Wrong structure in file: ' + self.__mFilePath)
raise error
logging.debug('All section are found in file: ' + self.__mFilePath)
def __checkServerListContent(self, serverList):
for serverKey, server in serverList.items():
self._checkServerContent(server)
def __checkListOfHeads(self, listOfHeads):
occurrence = {}
for head in listOfHeads:
self.__countKey(occurrence, head.getName())
self.__countKey(occurrence, head.getBleMac())
for key, value in occurrence.items():
if value > 1:
raise Exception('Multiple occurrence of heads found: ' + pprint.pformat(occurrence))
def __countKey(self, occurrenceDict, key):
if key in occurrenceDict:
occurrenceDict[key] += 1
else:
occurrenceDict[key] = 1
| 2.53125
| 3
|
test/test_datastream_creation.py
|
scramjetorg/framework-python
| 16
|
12775039
|
<reponame>scramjetorg/framework-python<filename>test/test_datastream_creation.py<gh_stars>10-100
from scramjet.streams import Stream, UnsupportedOperation
import asyncio
from scramjet.ansi_color_codes import *
import pytest
# test cases
@pytest.mark.asyncio
async def test_creating_stream_using_constructor():
stream = Stream()
assert isinstance(stream, Stream)
@pytest.mark.asyncio
async def test_creating_stream_from_list():
stream = Stream.from_iterable([1, 2, 3, 4])
assert [1, 2, 3, 4] == await stream.to_list()
@pytest.mark.asyncio
async def test_creating_stream_from_empty_list():
stream = Stream.from_iterable([])
assert [] == await stream.to_list()
@pytest.mark.asyncio
async def test_creating_stream_from_set():
stream = Stream.from_iterable({1, 2, 3, 4})
assert [1, 2, 3, 4] == await stream.to_list()
@pytest.mark.asyncio
async def test_creating_stream_from_string():
stream = Stream.from_iterable('abcd')
assert ['a', 'b', 'c', 'd'] == await stream.to_list()
@pytest.mark.asyncio
async def test_creating_stream_from_dict_keys():
test_input = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
stream = Stream.from_iterable(test_input)
assert ['a', 'b', 'c', 'd'] == await stream.to_list()
@pytest.mark.asyncio
async def test_creating_stream_from_dict_items():
test_input = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
stream = Stream.from_iterable(test_input.items())
assert test_input == dict(await stream.to_list())
@pytest.mark.asyncio
async def test_creating_stream_from_generator():
stream = Stream.from_iterable(range(4))
assert [0, 1, 2, 3] == await stream.to_list()
@pytest.mark.asyncio
async def test_creating_stream_from_file_object():
with open("test/sample_text_1.txt") as f:
stream = Stream.from_iterable(f)
assert ['foo\n', 'bar baz\n', 'qux'] == await stream.to_list()
@pytest.mark.asyncio
async def test_specifying_chunk_size_on_plain_iterable():
with pytest.raises(UnsupportedOperation):
result = Stream.read_from([1, 2, 3, 4], chunk_size=2)
@pytest.mark.asyncio
async def test_non_iterable_source_without_chunk_size():
class Foo():
def read(self, how_many):
return "" + "foo"*how_many
with pytest.raises(UnsupportedOperation):
Stream.read_from(Foo())
class AsyncCountUntil():
def __init__(self, max) -> None:
self.limit = max
async def __aiter__(self):
for i in range(self.limit):
await asyncio.sleep(0.01)
yield i+1
@pytest.mark.asyncio
async def test_creating_stream_from_async_iterable():
stream = Stream.read_from(AsyncCountUntil(8))
assert [1, 2, 3, 4, 5, 6, 7, 8] == await stream.to_list()
@pytest.mark.asyncio
async def test_creating_stream_from_another_stream():
s1 = Stream.read_from(range(8))
s2 = Stream.read_from(s1).map(lambda x: x*2)
s3 = Stream.read_from(s2)
assert [0, 2, 4, 6, 8, 10, 12, 14] == await s3.to_list()
@pytest.mark.asyncio
async def test_iterating_over_a_stream():
stream = Stream.read_from(range(8))
result = [chunk async for chunk in stream]
assert [0, 1, 2, 3, 4, 5, 6, 7] == result
| 2.421875
| 2
|
app/recipe/tests/test_recipe_api.py
|
goldbossstatus/recipe-app-api
| 0
|
12775040
|
<reponame>goldbossstatus/recipe-app-api
# python function that allows you to generate temp files
import tempfile
import os
# pillow requirements importing our image class which will then let us create
# test images which we can then upload to our API
from PIL import Image
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Recipe, Tag, Ingredient
from recipe.serializers import RecipeSerializer, RecipeDetailSerializer
# /api/recipe/recipes
RECIPES_URL = reverse('recipe:recipe-list')
def image_upload_url(recipe_id):
'''
Return URL for recipe image upload
'''
# the name we will give our custom URL for our endpoint
return reverse('recipe:recipe-upload-image', args=[recipe_id])
# /api/recipe/recipes/2 (example)
def detail_recipe_url(recipe_id):
'''
Return recipe detail URL
'''
# you specify arguments with the reverse function by passing in args
# and a list of the arguments you want to add.
return reverse('recipe:recipe-detail', args=[recipe_id])
def sample_tag(user, name='Main Course'):
'''
Create and return a sample tag
'''
return Tag.objects.create(user=user, name=name)
def sample_ingredient(user, name='chocolate'):
'''
Create and return a sample ingredient
'''
return Ingredient.objects.create(user=user, name=name)
def create_user(**params):
return get_user_model().objects.create_user(**params)
def sample_recipe(user, **params):
'''
Create and return a sample recipe
'''
# set of default values with required recipe fields
# any parameters that are passed in after 'user', will override any of
# of the default values we set here.
defaults = {
'title': 'Sample Recipe',
'time_minutes': 10,
'price': 10.00,
}
# update function from python library will allow us to customize
defaults.update(params)
# **defaults will convert our dictionary into an argument
return Recipe.objects.create(user=user, **defaults)
class PublicRecipeApiTests(TestCase):
'''
Test unauthenticated user no recipe API access
'''
def setUp(self):
self.client = APIClient()
def test_login_required(self):
'''
test that login is required to access the endpoint
'''
# make unauthenticated request
response = self.client.get(RECIPES_URL)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateRecipeApiTests(TestCase):
'''
Test authenticated Recipe API Access
'''
def setUp(self):
self.client = APIClient()
self.user = create_user(
email='<EMAIL>',
password='<PASSWORD>',
name='<NAME>'
)
self.client.force_authenticate(self.user)
def test_retrieve_recipes(self):
'''
test retrieving a list of recipes
'''
# create two recipe objects using our sample_recipe helper function.
# do not need to assign them to a variable becuase we dont need to
# access them in this test
sample_recipe(user=self.user)
sample_recipe(user=self.user)
# simulate get request
response = self.client.get(RECIPES_URL)
# now retrieve recipes from db
recipes = Recipe.objects.all().order_by('-id')
# pass our recipe into a serializer, return as a list(many=true)
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# assert that the data equals the serializer that we created
self.assertEqual(response.data, serializer.data)
def test_recipes_limited_to_user(self):
'''
Test retrieving recipes to authenticated user
'''
user2 = create_user(
email='<EMAIL>',
password='<PASSWORD>',
name='<NAME>'
)
# create a recipe object for each user, first unauthenticate user
sample_recipe(user=user2)
# second make recipe object for authenticated user
sample_recipe(user=self.user)
# simulate get http
response = self.client.get(RECIPES_URL)
# now filter recipes by authenticated user
recipes = Recipe.objects.filter(user=self.user)
# pass in returned queryset to serializer
serializer = RecipeSerializer(recipes, many=True)
# make assertions
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data, serializer.data)
def test_view_recipe_detail(self):
'''
test viewing a recipe detail
'''
# create a sample recipe
recipe = sample_recipe(user=self.user)
# add a tag
recipe.tags.add(sample_tag(user=self.user))
# add an ingredient
recipe.ingredients.add(sample_ingredient(user=self.user))
# generate a url that we will call
url = detail_recipe_url(recipe.id)
# generate http get for the url
response = self.client.get(url)
# now we expect the response to be serialized
serializer = RecipeDetailSerializer(recipe)
self.assertEqual(response.data, serializer.data)
def test_create_basic_recipe(self):
'''
Test creating recipe for auth'd user
'''
# create payload with minimum required fields for creating new recipe
payload = {
'title': 'Lava Cake',
'time_minutes': 25,
'price': 7.00
}
# post the payload dictionary to RECIPES_URL
response = self.client.post(RECIPES_URL, payload)
# standard hjttp repsonse code for creating objects in an api
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# when you create an object using gjango rest framework, the default
# behavior is it will return a dictionary containg the created object!
# retrieve created recipe from our models
recipe = Recipe.objects.get(id=response.data['id'])
# loop through each keys and check that the correct value is assigned
# to recipe model
for key in payload.keys():
# getattr is a python helper function that allows you to retrieve
# an attribute from an object by passing in a variable.
self.assertEqual(payload[key], getattr(recipe, key))
def test_create_recipe_with_tags(self):
'''
Test creating recipe with tags
'''
# create 2 sample tags to test
tag1 = sample_tag(user=self.user, name='Mexican')
tag2 = sample_tag(user=self.user, name='Appetizer')
# now create a recipe and assign the tags to the recipe
payload = {
'title': 'Avocado Dip',
'tags': [tag1.id, tag2.id],
'time_minutes': 30,
'price': 20.00,
}
# make http post request
response = self.client.post(RECIPES_URL, payload)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# retrieve recipe that was created
recipe = Recipe.objects.get(id=response.data['id'])
# retieve tags that were created with the recipe
# because we have a manytomany field assigned to our tags,
# this will return all of the tags that were assigned to our recipe
# as a queryset and store them in a tags variable.
tags = recipe.tags.all()
# assert that our TWO tags were returned
self.assertEqual(tags.count(), 2)
# now assert that tags we created as our sample tags are the same
# that are in our query set.
self.assertIn(tag1, tags)
self.assertIn(tag2, tags)
def test_create_recipe_with_ingredients(self):
'''
Test creating recipe with ingredients
'''
# create sample ingredients
ingredient1 = sample_ingredient(user=self.user, name='Avocado')
ingredient2 = sample_ingredient(user=self.user, name='tomato')
# now create a recipe and assign the ingredients to the recipe
payload = {
'title': 'Avocado Dip',
'ingredients': [ingredient1.id, ingredient2.id],
'time_minutes': 30,
'price': 20.00
}
# make http post request
response = self.client.post(RECIPES_URL, payload)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# retrieve recipe that was created
recipe = Recipe.objects.get(id=response.data['id'])
# retrieve ingredients that were created with recipe
ingredients = recipe.ingredients.all()
# assert that our TWO ingredients were returned
self.assertEqual(ingredients.count(), 2)
self.assertIn(ingredient1, ingredients)
self.assertIn(ingredient2, ingredients)
def test_partial_updated_recipe(self):
'''
Test updating a recipe with patch
'''
# create sample recipe
recipe = sample_recipe(user=self.user)
# add a tag 1
recipe.tags.add(sample_tag(user=self.user))
# add a new tag we will replace tag 1 with
new_tag = sample_tag(user=self.user, name='Dessert')
# create payload with a different title to update old title
payload = {
'title': 'Fondant Tart',
'tags': [new_tag.id]
}
# create a our recipe from our detail url
url = detail_recipe_url(recipe.id)
# make http patch request
self.client.patch(url, payload)
# retrieve and update to the recipe from the database
# the details will not change in db unless you call refresh_from_db
recipe.refresh_from_db()
# assert that the title is equal to the new title, Fondant Tart
self.assertEqual(recipe.title, payload['title'])
# retrieve all of the tags that are assigned to this recipe
tags = recipe.tags.all()
# assert that the length of the tags is 1, because we only patched 1
self.assertEqual(len(tags), 1)
# assert that the new tag is in the tags that we retrieved
self.assertIn(new_tag, tags)
def test_full_update_recipe(self):
'''
test updating a recipe with put
'''
# create sample recipe
recipe = sample_recipe(user=self.user)
# create a tag for the sample recipe
recipe.tags.add(sample_tag(user=self.user))
# create payload for put update
payload = {
'title': 'Chiken Pasta Fettuccine',
'time_minutes': 35,
'price': 45,
}
# now create the url
url = detail_recipe_url(recipe.id)
# make http put request
self.client.put(url, payload)
# make sure that values refesh from the db and values have changed
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
# check the minutes have changed
self.assertEqual(recipe.time_minutes, payload['time_minutes'])
# check the price has change
self.assertEqual(recipe.price, payload['price'])
# check that the tags assigned are 0 BECAUSE
# when we omit a field in a PUT request that clears the value
# of that field.
# retrieve the tags (which there will be none)
tags = recipe.tags.all()
self.assertEqual(len(tags), 0)
class RecipeImageUploadTests(TestCase):
def setUp(self):
# create client
self.client = APIClient()
# create user
self.user = get_user_model().objects.create_user(
'<EMAIL>',
'<PASSWORD>'
)
# authenticate user
self.client.force_authenticate(self.user)
# create recipe
self.recipe = sample_recipe(user=self.user)
def tearDown(self):
# We want to make sure that our filesystem is kept clean after our
# tests, and this means REMOVING all of the test files that we create.
self.recipe.image.delete()
def test_upload_image_to_recipe(self):
'''
Test uploading an image to recipe
'''
# create the url using the sample recipe in the setUp
url = image_upload_url(self.recipe.id)
# creates a names temporary file in the system at a random location,
# (usually in the /temp folder)
# this creates a temporary file in the system that we can then write
# too, and after you exit the context manager, (outside the
# with statement) it will automatically remove the file
with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:
# creates an image with the Image class that we uploaded from
# the PIL Library, black square that is 10 pixels by 10 pixels
image = Image.new('RGB', (10, 10))
# save the image to our file, and then write the full map that
# you want to save it as.
image.save(ntf, format='JPEG')
# this is the way the pythton reads files, so we use seek function
# to set the pointer back to the beginning of the file, as if we
# jst opened it
ntf.seek(0)
# request with image payload and add the format option to our post
# to tell django that we want to make a multipart form request.
# A FORM THAT CONSISTS OF DATA. By default it would be a form that
# that just consists of a json object, but we want to post data
res = self.client.post(url, {'image': ntf}, format='multipart')
# now run assertions
self.recipe.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_200_OK)
# now check that image from payload is in the response
self.assertIn('image', res.data)
# check that path exists for image that is saved to model
self.assertTrue(os.path.exists(self.recipe.image.path))
def test_upload_image_bad_request(self):
'''
Test uploading an invalid image
'''
# create url
url = image_upload_url(self.recipe.id)
# create post request with invalid payload
res = self.client.post(url, {'image': 'not image'}, format='multipart')
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_filter_recipes_by_tags(self):
'''
Test returning recipes with specific tags
'''
# create three recipes, two of them will have tags assigned, and one
# of them will not have the tag assigned.
# make the request with the filter parameters for the tags and ensure
# that the results returned match the ones with the tags and exclude
# the one without the tags
recipe1 = sample_recipe(user=self.user, title='Chicken Lime Soup')
recipe2 = sample_recipe(user=self.user, title='Salmon with lemon')
tag1 = sample_tag(user=self.user, name='soup')
tag2 = sample_tag(user=self.user, name='fish')
recipe1.tags.add(tag1)
recipe2.tags.add(tag2)
recipe3 = sample_recipe(user=self.user, title='Tenderloin')
# so now make request for soup and fish options in our database
response = self.client.get(
RECIPES_URL,
# pass get parameters to a get request using the test client by
# just passing in a dictionary of the values you wish to add as
# get parameters.
# the way the filter feature is being desined is that if you want
# to filter by tags, you simply pass a get parameter wiht a comma
# separated list of the tags ids you wish to filter by
{'tags': f'{tag1.id},{tag2.id}'}
)
# serialize the recipes, and see if they exist in the responses return
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, response.data)
self.assertIn(serializer2.data, response.data)
self.assertNotIn(serializer3.data, response.data)
def test_filter_recipes_by_ingredient(self):
'''
Test returning recipes with specific ingredients
'''
# create sample recipes
recipe1 = sample_recipe(user=self.user, title='French Dip')
recipe2 = sample_recipe(user=self.user, title='Mac n cheese Bake')
# create sample ingredients
ingredient1 = sample_ingredient(user=self.user, name='Au Jus')
ingredient2 = sample_ingredient(user=self.user, name='Sharp Cheddar')
# add ingredients to recipe
recipe1.ingredients.add(ingredient1)
recipe2.ingredients.add(ingredient2)
# now make recipe without any ingredients added to it
recipe3 = sample_recipe(user=self.user, title='Chicken Parmesean')
response = self.client.get(
RECIPES_URL,
{'ingredients': f'{ingredient1.id},{ingredient2.id}'},
)
# serialize the objects
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
# make assertions
self.assertIn(serializer1.data, response.data)
self.assertIn(serializer2.data, response.data)
self.assertNotIn(serializer3.data, response.data)
| 2.734375
| 3
|
example_data/test_eval.py
|
pguridi/ageofempyres
| 0
|
12775041
|
<reponame>pguridi/ageofempyres
import sys
from turnboxed.utils import evaluate_in_sandbox
code = """from basebot import BaseBot
from basebot import BaseBot
class Bot(BaseBot):
def on_turn(self, data_dict):
return None
"""
def main():
evaluate_in_sandbox(code)
sys.exit(0)
if __name__ == "__main__":
main()
| 2.09375
| 2
|
PyBank/print_analysis.py
|
gshreve01/python-challenge
| 0
|
12775042
|
<reponame>gshreve01/python-challenge
# prints out the analysis to screen and to a file
import os
import sys
# from a lot of pain to eventually read on a hack method.....
sys.path.insert(1, '../Common')
import common
# taken from stack overflow - https://stackoverflow.com/questions/21208376/converting-float-to-dollars-and-cents
def Format_Currency(amount):
amount = float(amount)
if amount >= 0:
return '${:,.2f}'.format(amount)
else:
return '-${:,.2f}'.format(-amount)
# taken as example to force generation of output directory from stack overflow
# https://stackoverflow.com/questions/12517451/automatically-creating-directories-with-file-output
def GetOutputFileName():
filename = os.path.join("..", "output", "AnalysisResults.txt")
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
return filename
# Define method to print out analysis
def Print_Analysis(analysis):
with open(GetOutputFileName(), mode='w', encoding='utf-8') as output:
common.SendOutput(output, "Financial Analysis")
common.SendOutput(output, "----------------------------")
common.SendOutput(output, f"Total Months: {analysis['total_months']}")
common.SendOutput(output, f"Total: {Format_Currency(analysis['total_profile_loss'])}")
common.SendOutput(output, f"Average Change: {Format_Currency(analysis['average_profit_loss_change'])}")
common.SendOutput(output, f"Greatest Increase in Profits: {analysis['greatest_increase']['month']} - " + \
f"{analysis['greatest_increase']['year']}" + \
f" ({Format_Currency(analysis['greatest_increase']['change_from_previous_month'])})")
common.SendOutput(output, f"Greatest Decrease in Profits: {analysis['greatest_decrease']['month']} - " + \
f"{analysis['greatest_decrease']['year']}" + \
f" ({Format_Currency(analysis['greatest_decrease']['change_from_previous_month'])})")
| 3.421875
| 3
|
yt/frontends/gdf/api.py
|
danielgrassinger/yt_new_frontend
| 0
|
12775043
|
<reponame>danielgrassinger/yt_new_frontend
"""
API for yt.frontends.gdf
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, yt Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
from .data_structures import \
GDFGrid, \
GDFHierarchy, \
GDFDataset
from .fields import \
GDFFieldInfo
add_gdf_field = GDFFieldInfo.add_field
from .io import \
IOHandlerGDFHDF5
| 0.996094
| 1
|
agents/nets.py
|
ishaanchandratreya/phyre-fwd
| 9
|
12775044
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from functools import partial
import numpy as np
import torch
import torch.nn as nn
import torchvision
from omegaconf import OmegaConf
import hydra
import phyre
from phyre_simulator import PhyreSimulator # pylint: disable=unused-import
from losses import * # pylint: disable=wildcard-import,unused-wildcard-import
from preproc import * # pylint: disable=wildcard-import,unused-wildcard-import
USE_CUDA = torch.cuda.is_available()
DEVICE = torch.device('cuda:0' if USE_CUDA else 'cpu')
np.random.seed(42)
class ActionNetwork(nn.Module):
def __init__(self, action_size, output_size, hidden_size=256,
num_layers=1):
super().__init__()
self.layers = nn.ModuleList([nn.Linear(action_size, hidden_size)])
for _ in range(1, num_layers):
self.layers.append(nn.Linear(hidden_size, hidden_size))
self.output = nn.Linear(hidden_size, output_size)
def forward(self, tensor):
for layer in self.layers:
tensor = nn.functional.relu(layer(tensor), inplace=True)
return self.output(tensor)
class FilmActionNetwork(nn.Module):
def __init__(self, action_size, output_size, **kwargs):
super().__init__()
self.net = ActionNetwork(action_size, output_size * 2, **kwargs)
def forward(self, actions, image):
beta, gamma = torch.chunk(
self.net(actions).unsqueeze(-1).unsqueeze(-1), chunks=2, dim=1)
return image * beta + gamma
class SimpleNetWithAction(nn.Module):
def __init__(self, action_size, action_network_kwargs=None):
super().__init__()
action_network_kwargs = action_network_kwargs or {}
self.stem = nn.Sequential(
nn.Conv2d(phyre.NUM_COLORS, 3, kernel_size=1, bias=False),
nn.BatchNorm2d(3),
nn.ReLU(inplace=True),
nn.Conv2d(3, 64, kernel_size=7, stride=4, padding=3, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=5, stride=2, padding=2, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=5, stride=2, padding=2, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=5, stride=2, padding=2, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 128, kernel_size=5, stride=2, padding=2, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 128, kernel_size=5, stride=2, padding=2,
bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 128, kernel_size=5, stride=2, padding=2,
bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
)
self.action_net = ActionNetwork(action_size, 128,
**action_network_kwargs)
@property
def device(self):
if hasattr(self, 'parameters') and next(self.parameters()).is_cuda:
return 'cuda'
else:
return 'cpu'
def preprocess(self, observations):
device = self.device
image = _image_colors_to_onehot(
observations.to(dtype=torch.long, device=device))
return dict(features=self.stem(image).squeeze(-1).squeeze(-1))
def forward(self, observations, actions, preprocessed=None):
if preprocessed is None:
preprocessed = self.preprocess(observations)
return self._forward(actions, **preprocessed)
def _forward(self, actions, features):
actions = self.action_net(actions.to(features.device))
return (actions * features).sum(-1) / (actions.shape[-1]**0.5)
def ce_loss(self, decisions, targets):
targets = torch.ByteTensor(targets).float().to(decisions.device)
return nn.functional.binary_cross_entropy_with_logits(
decisions, targets)
def _get_fusution_points(fusion_place_spec, max_points):
if fusion_place_spec == 'all':
return tuple(range(max_points))
elif fusion_place_spec == 'none':
return tuple()
else:
return tuple(int(fusion_place_spec), )
class ResNet18FilmAction(nn.Module):
def __init__(self,
action_size,
action_layers=1,
action_hidden_size=256,
fusion_place='last'):
super().__init__()
net = torchvision.models.resnet18(pretrained=False)
conv1 = nn.Conv2d(phyre.NUM_COLORS,
64,
kernel_size=7,
stride=2,
padding=3,
bias=False)
self.register_buffer('embed_weights', torch.eye(phyre.NUM_COLORS))
self.stem = nn.Sequential(conv1, net.bn1, net.relu, net.maxpool)
self.stages = nn.ModuleList(
[net.layer1, net.layer2, net.layer3, net.layer4])
def build_film(output_size):
return FilmActionNetwork(action_size,
output_size,
hidden_size=action_hidden_size,
num_layers=action_layers)
assert fusion_place in ('first', 'last', 'all', 'none', 'last_single')
self.last_network = None
if fusion_place == 'all':
self.action_networks = nn.ModuleList(
[build_film(size) for size in (64, 64, 128, 256)])
elif fusion_place == 'last':
# Save module as attribute.
self._action_network = build_film(256)
self.action_networks = [None, None, None, self._action_network]
elif fusion_place == 'first':
# Save module as attribute.
self._action_network = build_film(64)
self.action_networks = [self._action_network, None, None, None]
elif fusion_place == 'last_single':
# Save module as attribute.
self.last_network = build_film(512)
self.action_networks = [None, None, None, None]
elif fusion_place == 'none':
self.action_networks = [None, None, None, None]
else:
raise Exception('Unknown fusion place: %s' % fusion_place)
self.reason = nn.Linear(512, 1)
@property
def device(self):
if hasattr(self, 'parameters') and next(self.parameters()).is_cuda:
return 'cuda'
else:
return 'cpu'
def preprocess(self, observations):
image = self._image_colors_to_onehot(observations)
features = self.stem(image)
for stage, act_layer in zip(self.stages, self.action_networks):
if act_layer is not None:
break
features = stage(features)
else:
features = nn.functional.adaptive_max_pool2d(features, 1)
return dict(features=features)
def forward(self, observations, actions, preprocessed=None):
if preprocessed is None:
preprocessed = self.preprocess(observations)
return self._forward(actions, **preprocessed)
def _forward(self, actions, features):
actions = actions.to(features.device)
skip_compute = True
for stage, film_layer in zip(self.stages, self.action_networks):
if film_layer is not None:
skip_compute = False
features = film_layer(actions, features)
if skip_compute:
continue
features = stage(features)
if not skip_compute:
features = nn.functional.adaptive_max_pool2d(features, 1)
if self.last_network is not None:
features = self.last_network(actions, features)
features = features.flatten(1)
if features.shape[0] == 1 and actions.shape[0] != 1:
# Haven't had a chance to use actions. So will match batch size as
# in actions manually.
features = features.expand(actions.shape[0], -1)
return self.reason(features).squeeze(-1)
def ce_loss(self, decisions, targets):
targets = targets.to(dtype=torch.float, device=decisions.device)
return nn.functional.binary_cross_entropy_with_logits(
decisions, targets)
def _image_colors_to_onehot(self, indices):
onehot = torch.nn.functional.embedding(
indices.to(dtype=torch.long, device=self.embed_weights.device),
self.embed_weights)
onehot = onehot.permute(0, 3, 1, 2).contiguous()
return onehot
def _image_colors_to_onehot(indices):
onehot = torch.nn.functional.embedding(
indices, torch.eye(phyre.NUM_COLORS, device=indices.device))
onehot = onehot.permute(0, 3, 1, 2).contiguous()
return onehot
def gen_dyn_conv(dim_in, dim_out):
# Switched to 1x1 kernels since I might be running it on 1x1 features too.
# Using vector features when using object representation
conv = nn.Conv2d(dim_in,
dim_out,
kernel_size=1,
stride=1,
padding=0,
bias=False)
return conv
class DynConcat(nn.Module):
"""Simple dynamics model, that concats the features and 2 layer MLP."""
def __init__(self, encoder, dim, n, nobj):
super().__init__()
del encoder # This one doesn't need it
self.dyn = nn.Sequential(gen_dyn_conv(dim * n * nobj, dim * nobj),
nn.ReLU(inplace=True),
gen_dyn_conv(dim * nobj, dim * nobj),
nn.ReLU(inplace=True),
gen_dyn_conv(dim * nobj, dim * nobj))
def forward(self, features, pixels):
"""
This dyn model does not use pixels, so will just return the last history
frame
Args:
features: (B, T, Nobj, D, H', W')
pixels: (B, T, Nobj, C, H, W)
Returns:
pred: (B, Nobj, D, H', W')
pixels: (B, Nobj, C, H, W)
addl_losses: {}
"""
cat_feats = torch.reshape(features, (features.shape[0], -1) +
features.shape[-2:])
future_feat = torch.reshape(self.dyn(cat_feats),
features.shape[:1] + features.shape[2:])
# Skip connection, add the last frames features, so it stops
# deleting things
pred = features[:, -1, ...] + future_feat
return pred, pixels[:, -1, ...], {}
class MultiSTN(nn.Module):
"""Multi spatial transformer network: predicts multiple transformations
and applies to parts of the input feature, split on the channel dim."""
def __init__(self,
input_dim,
num_tx,
dof='affine',
inp_type='pix',
affine_tx_mode='bilinear',
kernel_size=3,
stochastic=False):
"""
Args:
input_dim (int): Dimension of the features used to predict the STN
parameters
num_tx (int): Number of transformations to predict, will apply to
the tensor, split along some dimension
dof (str): Controls how generic of a affine matrix to predict.
If 'affine', will predict a generic 3x2 matrix
If 'rot-trans-only', it will only predict theta, x, y,
and use those to construct the affine matrix. So it will force
the matrix to not do any shear, scale etc.
Similarly for 'rot-only' and 'trans-only'
inp_type (str): Defines the type of the input. 'pix' is the default,
to directly transform the grid and move the pixels. 'pt' is the
PointNet style format, where the first 2 dimensions of each
split of the channels must correspond to the X, Y location, and
the transforms will just modify those dimensions, and not
touch the pixel values at all.
affine_tx_mode (str): The mode to use for grid_sample
kernel_size (int)
stochastic (bool): If true, predict a distribution over the affine
matrix, instead of deterministically.
"""
super().__init__()
self.num_tx = num_tx
self.dof = dof
self.inp_type = inp_type
self.affine_tx_mode = affine_tx_mode
# Spatial transformer localization-network
self.localization = nn.Sequential(
nn.Conv2d(input_dim,
8 * num_tx,
kernel_size=kernel_size,
padding=kernel_size // 2), nn.ReLU(True),
nn.Conv2d(8 * num_tx,
10 * num_tx,
kernel_size=kernel_size,
padding=kernel_size // 2), nn.ReLU(True))
# Regressor for the affine matrices
# Predicting 3x2 parameters that should be enough for any generic
# affine transformation, though will subselect in case only few
# parameters are needed
self.stochastic = stochastic
if self.stochastic:
self.fc_loc_mean = nn.Linear(10 * num_tx, 10 * num_tx)
self.fc_loc_logvar = nn.Linear(10 * num_tx, 10 * num_tx)
self.fc_loc = nn.Sequential(nn.Linear(10 * num_tx, 32 * num_tx),
nn.ReLU(True),
nn.Linear(32 * num_tx, num_tx * 3 * 2))
# Initialize the weights/bias with identity transformation
self.fc_loc[2].weight.data.zero_()
if self.dof != 'affine': # The paramters would be used for rot/trans
self.fc_loc[2].bias.data.zero_() # 0 rot/translation by default
else:
self.fc_loc[2].bias.data.copy_(
torch.from_numpy(
np.array([1, 0, 0, 0, 1, 0] * num_tx, dtype=np.float)))
def transform_pix(self, feat, theta, mode='bilinear'):
"""Transform the features using theta."""
grid = nn.functional.affine_grid(theta,
feat.size(),
align_corners=True)
return nn.functional.grid_sample(feat,
grid,
mode=mode,
align_corners=True)
def transform_pt(self, feat, theta):
"""Transform pt-net style feature using theta.
Here, it assumes the first 2 dimensions of the feature are loc.
Args:
feat (B, C, H, W), C >= 2
Returns:
tx feat (B, C, H, W)
"""
assert feat.shape[1] >= 2
feat_pos = feat[:, :2, ...]
feat_pos_ones = torch.ones_like(feat[:, :1, ...])
feat_pos_aug = torch.cat([feat_pos, feat_pos_ones], dim=1)
feat_pos_aug = feat_pos_aug.view(feat.shape[:1] + (3, -1))
feat_pos_aug_end = feat_pos_aug.transpose(1, 2).unsqueeze(-1)
txed = torch.matmul(theta.unsqueeze(1), feat_pos_aug_end)
tx_feat_pos = txed.squeeze(-1).transpose(1, 2).view(feat_pos.shape)
# Attach the features to it
tx_feat = torch.cat([tx_feat_pos, feat[:, 2:, ...]], dim=1)
return tx_feat
def _compute_loc_stochastic(self, feat_hist):
# from https://github.com/pytorch/examples/blob/master/vae/main.py#L53
mean = self.fc_loc_mean(feat_hist)
logvar = self.fc_loc_logvar(feat_hist)
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
latent_var_z = mean + eps * std
kl_loss = -0.5 * torch.sum(1 + logvar - mean.pow(2) - logvar.exp())
return self.fc_loc(latent_var_z), kl_loss
def forward(self, feat_for_tx, feat_to_tx, split_dim=1):
"""
Args:
feat_for_tx (B, D, H, W): The features to use to compute the
transformation
feat_to_tx (B, D', H, W): Features to apply the tx onto
split_dim (int): Dimension to split on
"""
feat_hist_embed = self.localization(feat_for_tx)
# Average out the spatial dimension
feat_hist_embed = torch.mean(feat_hist_embed, dim=[-2, -1])
addl_losses = {}
if self.stochastic:
pred, kl_loss = self._compute_loc_stochastic(feat_hist_embed)
addl_losses['kl'] = kl_loss
else:
pred = self.fc_loc(feat_hist_embed)
if self.dof != 'affine':
pred = pred.view(-1, self.num_tx, 3 * 2)
# Say the first number is actual angle, and next 2 are x, y
angle = pred[..., :1]
pos_x = pred[..., 1:2]
pos_y = pred[..., 2:3]
if self.dof == 'rot-only':
pos_x = torch.zeros_like(pos_x)
pos_y = torch.zeros_like(pos_y)
elif self.dof == 'trans-only':
angle = torch.zeros_like(angle)
else:
assert self.dof == 'rot-trans-only', 'The only other option'
cos_angle = torch.cos(angle)
sin_angle = torch.sin(angle)
# create the 2x3 matrix out of this
theta = torch.cat(
[cos_angle, sin_angle, pos_x, -sin_angle, cos_angle, pos_y],
dim=-1)
theta = theta.view(theta.shape[:-1] + (2, 3))
elif self.dof == 'affine':
theta = pred.view(-1, self.num_tx, 2, 3)
else:
raise NotImplementedError('Unknown {}'.format(self.dof))
# Split the channels of feat_to_tx into num_tx groups, and apply the
# transformations to each of those groups
assert feat_to_tx.shape[split_dim] % self.num_tx == 0, (
'Must be divisible to ensure equal sized chunks')
# Chunk it
feat_to_tx_parts = torch.chunk(feat_to_tx, self.num_tx, split_dim)
# Apply the corresponding transformation to each part
if self.inp_type == 'pix':
tx_fn = partial(self.transform_pix, mode=self.affine_tx_mode)
elif self.inp_type == 'pt':
tx_fn = self.transform_pt
else:
raise NotImplementedError('Unknown type {}'.format(self.inp_type))
feat_to_tx_parts_txed = [
tx_fn(el, theta[:, i, ...])
for i, el in enumerate(feat_to_tx_parts)
]
return torch.cat(feat_to_tx_parts_txed, dim=split_dim), addl_losses
class DynSTN(nn.Module):
"""Spatial Transformer based dynamics model."""
def __init__(self, encoder, dim, n, nobj, num_tx, base_stn):
super().__init__()
del encoder # This one doesn't need it
assert nobj == 1 or nobj == num_tx, (
'Either split the 1 object features and tx, or tx each obj sep')
self.dyn = hydra.utils.instantiate(base_stn, dim * n * nobj, num_tx)
def forward(self, features, pixels):
"""
This dyn model does not use pixels, so will just return the last history
frame
Args:
features: (B, T, Nobj, D, H', W')
pixels: (B, T, Nobj, C, H, W)
Returns:
pred: (B, Nobj, D, H', W')
pix
addl_losses
"""
cat_feats = torch.reshape(features, (features.shape[0], -1) +
features.shape[-2:])
# For > 1 objs, just flatten Nobj and D channels, and the STN class
# will split it back to do the transformations
feat_obj_flat = torch.flatten(features, 2, 3)
new_feat, addl_loses = self.dyn(cat_feats, feat_obj_flat[:, -1, ...])
future_feat = torch.reshape(new_feat,
features.shape[:1] + features.shape[2:])
return future_feat, pixels[:, -1, ...], addl_loses
class DynSTNPixels_DEPRECATED(nn.Module):
"""Spatial Transformer based dynamics model, applied on pixels.
Use DynSTNPixelChannelsDetBg"""
def __init__(self, encoder, dim, n, nobj, num_tx, base_stn):
super().__init__()
self.enc = encoder
self.dyn = hydra.utils.instantiate(base_stn, dim * n * nobj, num_tx)
self.num_tx = num_tx
# A network to predict num_tx attention maps
self.attention = nn.Sequential(
gen_deconv(dim * n * nobj, num_tx),
*([gen_deconv(num_tx, num_tx, upsample_factor=4)] * 2),
nn.Conv2d(num_tx, num_tx, kernel_size=1, padding=0, bias=False),
nn.Softmax(dim=1))
def forward(self, features, pixels):
"""
Args:
features: (B, T, Nobj, D, H', W')
pixels: (B, T, C, H, W)
Returns:
pred: (B, Nobj, D, H', W')
"""
raise NotImplementedError('Deal with objectified pixel input. '
'Also deal with addl losses. ')
cat_feats = torch.reshape(features, (features.shape[0], -1) +
features.shape[-2:])
assert features.shape[2] == 1, 'Not implemented yet for >1 objs'
# Repmat the image channels num_tx times, so STN can predict those many
# transformations
pixels_tiled = pixels.repeat(1, 1, self.num_tx, 1, 1)
future_pixels_tiled = self.dyn(cat_feats, pixels_tiled[:, -1, ...])
# Compute attention maps for compositing
attention_maps = self.attention(cat_feats)
# Do a weighted sum of the channels using the attention maps
attention_maps_split = torch.chunk(attention_maps, self.num_tx, 1)
future_pixels_split = torch.chunk(future_pixels_tiled, self.num_tx, 1)
weighted = [
att * pix
for att, pix in zip(attention_maps_split, future_pixels_split)
]
future_pixels = torch.mean(torch.stack(weighted), dim=0)
# Since this is a new image being generated, need to pass through the
# encoder to get the features for this image
future_feat = self.enc(future_pixels.unsqueeze(1))[:, 0, ...]
return future_feat, future_pixels
class DynSTNPixelChannels_DEPRECATED(nn.Module):
"""Spatial Transformer based dynamics model, applied on channels of img.
Use DynSTNPixelChannelsDetBg"""
def __init__(self, encoder, dim, n, nobj, base_stn):
super().__init__()
self.enc = encoder
self.num_tx = phyre.NUM_COLORS # One tx per color
self.dyn = hydra.utils.instantiate(base_stn, dim * n * nobj,
self.num_tx)
def forward(self, features, pixels):
"""
Args:
features: (B, T, Nobj, D, H', W')
pixels: (B, T, C, H, W)
Returns:
pred: (B, Nobj, D, H', W')
"""
raise NotImplementedError('Deal with objectified pixel input. '
'Also deal with addl losses. ')
assert (pixels.shape[2] == self.num_tx or
pixels.shape[2] == self.num_tx * 3), 'In pix or pt mode so far'
cat_feats = torch.reshape(features, (features.shape[0], -1) +
features.shape[-2:])
assert features.shape[2] == 1, 'Not implemented yet for >1 objs'
future_pixels = self.dyn(cat_feats, pixels[:, -1, ...])
# Since this is a new image being generated, need to pass through the
# encoder to get the features for this image
future_feat = self.enc(future_pixels.unsqueeze(1))[:, 0, ...]
return future_feat, future_pixels
class DynSTNPixelChannelsGenBg_DEPRECATED(nn.Module):
"""Spatial Transformer based dynamics model, applied on channels of img.
Generates the background.
Use DynSTNPixelChannelsDetBg
"""
def __init__(self, encoder, dim, n, nobj, base_stn):
super().__init__()
self.enc = encoder
# One tx per color, except background that is generated since it's not
# an object that can be moved like others. Just a 1x1 convolution on
# the predicted image to gen the last channel
self.num_tx = phyre.NUM_COLORS - 1
self.dyn = hydra.utils.instantiate(base_stn, dim * n * nobj,
self.num_tx)
# Just a couple layer should suffice, over the last frame, and new frame
# feature
self.bg_dec = nn.Sequential(
nn.Conv2d(2 * phyre.NUM_COLORS - 1,
8,
kernel_size=1,
stride=1,
padding=0,
bias=False), nn.ReLU(inplace=True),
nn.Conv2d(8, 1, kernel_size=1, stride=1, padding=0, bias=False))
def forward(self, features, pixels):
"""
Args:
features: (B, T, Nobj, D, H', W')
pixels: (B, T, C, H, W)
Returns:
pred: (B, Nobj, D, H', W')
"""
raise NotImplementedError('Deal with objectified pixel input. '
'Also deal with addl losses. ')
assert (pixels.shape[2] - 1) == self.num_tx
cat_feats = torch.reshape(features, (features.shape[0], -1) +
features.shape[-2:])
assert features.shape[2] == 1, 'Not implemented yet for >1 objs'
future_pixels_obj = self.dyn(cat_feats, pixels[:, -1, 1:, ...])
future_pixels_bg = self.bg_dec(
torch.cat([pixels[:, -1, ...], future_pixels_obj], dim=1))
future_pixels = torch.cat([future_pixels_bg, future_pixels_obj], dim=1)
# Since this is a new image being generated, need to pass through the
# encoder to get the features for this image
future_feat = self.enc(future_pixels.unsqueeze(1))[:, 0, ...]
return future_feat, future_pixels
class DynSTNPixelChannelsDetBg(nn.Module):
"""Spatial Transformer based dynamics model, applied on channels of img.
Generates the background deterministically, using the change.
"""
def __init__(self,
encoder,
dim,
n,
nobj,
base_stn,
movable_ch,
movable_only=False):
super().__init__()
self.enc = encoder
self.movable_only = movable_only
# One tx per color (or movable colors, if that is set),
# except background that is generated since it's not
# an object that can be moved like others.
if self.movable_only:
self.movable_channels = torch.LongTensor(movable_ch)
else:
self.movable_channels = torch.arange(1, phyre.NUM_COLORS)
self.num_tx = len(self.movable_channels)
self.nobj = nobj
self.dyn = hydra.utils.instantiate(base_stn, dim * n * nobj,
self.num_tx * nobj)
def forward(self, features, pixels):
"""
Args:
features: (B, T, Nobj, D, H', W')
pixels: (B, T, Nobj, C, H, W)
Returns:
pred: (B, Nobj, D, H', W')
pix
addl_losses
"""
assert pixels.shape[3] >= self.num_tx
cat_feats = torch.reshape(features, (features.shape[0], -1) +
features.shape[-2:])
pixels_movable = pixels[:, -1, :, self.movable_channels, ...]
# combine all channels of objects and transform
pixels_movable_flat = torch.flatten(pixels_movable, 1, 2)
future_pixels_flat_movable, addl_losses = self.dyn(
cat_feats, pixels_movable_flat)
future_pixels_movable = future_pixels_flat_movable.view(
pixels_movable.shape)
future_pixels = pixels[:, -1, ...] # Copy most of the channels
future_pixels[:, :, self.movable_channels, ...] = future_pixels_movable
# Compute the background deterministically, where all other channels
# are 0s, it has to be 1. So make channels sum to 1.
future_pixels_bg = 1.0 - torch.sum(
future_pixels[:, :, 1:, ...], dim=2, keepdims=True)
future_pixels[:, :, :1, ...] = future_pixels_bg
# Since this is a new image being generated, need to pass through the
# encoder to get the features for this image
future_feat = self.enc(future_pixels.unsqueeze(1))[:, 0, ...]
return future_feat, future_pixels, addl_losses
def gen_deconv(in_dim,
out_dim,
stride=1,
kernel_size=3,
padding=1,
upsample_factor=2,
inst_norm=False,
activation=nn.ReLU(inplace=True)):
return nn.Sequential(
nn.ConvTranspose2d(in_dim,
out_dim,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=False),
# nn.Sequential() simulates identity, if no instance norm to be added
nn.InstanceNorm2d(out_dim, affine=True)
if inst_norm else nn.Sequential(),
activation,
nn.Upsample(scale_factor=upsample_factor,
mode='bilinear',
align_corners=True))
class BasicDecoder(nn.Module):
"""Simple decoder, goes from features to frame representation."""
def __init__(self, in_dim, out_dim, nlayers, kernel_size, padding,
upsample_factor, decode_from, backprop_feat_ext, inst_norm,
activation):
super().__init__()
decoder_dim = 256
self.backprop_feat_ext = backprop_feat_ext
self.decode_from = decode_from
assert self.decode_from in ['pixels', 'features']
if self.decode_from == 'pixels':
in_dim = phyre.NUM_COLORS
decoder_dim = 16
activation = hydra.utils.instantiate(activation)
logging.warning('Using %s activation for decoders', activation)
inter_layers = [
gen_deconv(decoder_dim,
decoder_dim,
1,
kernel_size,
padding,
upsample_factor,
inst_norm,
activation=activation) for _ in range(nlayers)
]
self.deconv_net = nn.Sequential(
gen_deconv(in_dim,
decoder_dim,
1,
kernel_size,
padding,
upsample_factor,
activation=activation), *inter_layers,
gen_deconv(
decoder_dim,
out_dim,
1,
kernel_size,
padding,
upsample_factor,
activation=nn.Sequential())) # No activation on the last
def forward(self, features, pixels):
"""
Args:
features (BxNobjxDxH'xW'): Features to be decoded
pixels (BxNobjxCxHxW): Pixels generated by the dynamics model
Returns:
imgs (BxNobjxD_outxHxW): Output frames (per obj, aggregation is
done later in the Fwd class)
"""
if self.decode_from == 'pixels':
decode_feature = pixels
else:
decode_feature = features
if not self.backprop_feat_ext:
# Means train the decoder separately from the rest of the network,
# don't backprop gradients to the feature extractor
decode_feature = decode_feature.detach()
# Summing the features over all the objects, and doing one decode.
# Separate decodes takes just way too much time, so need to do it once
decode_feature = torch.sum(decode_feature, dim=1, keepdims=True)
features_flatten_obj = torch.flatten(decode_feature, 0, 1)
images = self.deconv_net(features_flatten_obj)
# Reshape back into object level
out = torch.reshape(images,
decode_feature.shape[:2] + images.shape[1:])
return out
class TrivialDecoder(nn.Module):
"""Trivial decoder, simply outputs the frames from the dynamics model."""
def __init__(self, in_dim, out_dim):
super().__init__()
del in_dim, out_dim
def forward(self, features, pixels):
"""
Args:
features (BxNobjxDxH'xW'): Features to be decoded
pixels (BxNobjxCxHxW): Pixels generated by the dynamics model
Returns:
imgs (BxNobjxCxHxW): Output frames
"""
del features # assumes the dynamics model will do all decoding
return pixels
def average_losses(all_losses):
"""Average the losses into one dict of losses.
Args:
all_losses: List of dictionary of losses.
Returns:
combined: A dictionary with same keys as individual dicts, with
all losses combined.
"""
if len(all_losses) == 0:
return {}
combined = {}
for key, val in all_losses[0].items():
if not isinstance(val, torch.Tensor):
# If it's none or sth.. eg some loss was not active
combined[key] = val
else:
# Average all the values
stkd = torch.stack([el[key] for el in all_losses])
# Average the losses that are positive, since I set undefined
# losses to -1 (where not enough GT is available, etc)
combined[key] = torch.mean(stkd * (stkd >= 0), dim=0)
return combined
class BasicObjEncoder(nn.Module):
"""Takes objectified representation, and puts it through more layers."""
def __init__(self,
in_dim,
out_dim,
nlayers,
kernel_size=3,
stride=1,
padding=1,
spatial_mean=True):
super().__init__()
if nlayers > 0:
self.out_dim = out_dim
else:
logging.warning('Ignoring the out_dim (%d) for ObjEncoder',
out_dim)
self.out_dim = in_dim
layers_lst = [[
nn.Conv2d(in_dim if i == 0 else out_dim,
out_dim,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=False),
nn.ReLU(inplace=True)
] for i in range(nlayers)]
layers_lst_flat = [item for sublist in layers_lst for item in sublist]
if len(layers_lst_flat) > 0:
layers_lst_flat = layers_lst_flat[:-1] # Remove the last relu
self.encoder = nn.Sequential(*layers_lst_flat)
else:
self.encoder = None
self.spatial_mean = spatial_mean
def forward(self, feat):
"""
Args:
feat: (B, T, Nobj, D, H', W')
"""
if self.encoder:
feat_flat = torch.flatten(feat, 0, 2)
obj_embed_flat = self.encoder(feat_flat)
obj_embed = torch.reshape(
obj_embed_flat, feat.shape[:3] + obj_embed_flat.shape[1:])
else:
obj_embed = feat
if self.spatial_mean:
obj_embed = torch.mean(obj_embed, dim=[-1, -2], keepdims=True)
return obj_embed
class ContextGatingObjectifier(nn.Module):
"""Takes intermediate representation and converts into object-level rep."""
def __init__(self, dim, obj_encoder, nobj=1):
super().__init__()
self.obj_mapper = nn.Sequential(
nn.Conv2d(dim, dim, kernel_size=1, stride=1, padding=0,
bias=False), nn.ReLU(inplace=True),
nn.Conv2d(dim,
nobj,
kernel_size=1,
stride=1,
padding=0,
bias=False))
self.obj_encoder = hydra.utils.instantiate(obj_encoder, dim)
self.out_dim = self.obj_encoder.out_dim
def forward(self, vid_feat):
"""
Decompose the video features into object level representation.
Args:
vid_feat: (BxTxDxH'xW')
nobj (int): Max number of objects in the scene. The hope is that the
extra channels will just have some degenerate information
Returns:
BxTxNobjxDxH''xW''
"""
raise NotImplementedError('The inp is now objfied, TODO deal with it')
batch_size = vid_feat.shape[0]
# Use context gating: generate a heatmap for each object at each time
# step, and weight using that heatmap to get an object representation
flatten_feat = torch.flatten(vid_feat, 0, 1)
# Unsqueeze to add a channel dimension to the attention maps
obj_map = self.obj_mapper(flatten_feat).unsqueeze(2)
# Add a 1-D object dimension
flatten_feat = flatten_feat.unsqueeze(1)
# Weight the feats with the attention maps to get the object-features
mapped_feat = flatten_feat * obj_map
# Reshape to add the time dimension back
mapped_feat = torch.reshape(mapped_feat,
(batch_size, -1) + mapped_feat.shape[1:])
final_feat = self.obj_encoder(mapped_feat)
return final_feat
class ChannelSplitObjectifier(nn.Module):
"""Splits the channel of image representation to get obj rep."""
def __init__(self, dim, obj_encoder, nobj=1):
super().__init__()
self.nobj = nobj
self.obj_encoder = hydra.utils.instantiate(obj_encoder, dim // nobj)
self.out_dim = self.obj_encoder.out_dim
def forward(self, vid_feat):
"""
Decompose the video features into object level representation.
Args:
vid_feat: (BxTxNobjxDxH'xW')
Returns:
BxTxNobjx(D/Nobj)xH'xW'
"""
assert vid_feat.shape[2] == 1, (
'Channel split can not deal with pre objectified {} input'.format(
vid_feat.shape[2]))
assert vid_feat.shape[3] % self.nobj == 0, 'Must be divisible'
# Reshape the channel dimension to split into an object dimension
objed = vid_feat.view(vid_feat.shape[:2] + (self.nobj, -1) +
vid_feat.shape[-2:])
assert objed.shape[2] == self.nobj
assert objed.shape[3] == vid_feat.shape[3] / self.nobj
# Apply a little network to get a flat feature
obj_encoded = self.obj_encoder(objed)
return obj_encoded
class TrivialObjectifier(nn.Module):
"""Simply returns the feature.
Earlier version would unsqueeze, but since the component splitting the
input at least has 1 obj, so no need to unsqueeze it further.
"""
def __init__(self, dim, obj_encoder, nobj=1):
super().__init__()
del obj_encoder
self.nobj = nobj
self.out_dim = dim
def forward(self, vid_feat):
assert vid_feat.shape[2] == self.nobj, ('{} != {}'.format(
vid_feat.shape[2], self.nobj))
return vid_feat
class SimpleBaseEncoder(nn.Module):
"""Simple network, simplified from Anton's version."""
def __init__(self, in_dim, width_scale_factor):
"""Simple encoder weights.
For a 256x256 input, it'll give a 4x4 output."""
super().__init__()
self.width_scale_factor = width_scale_factor
_s = self._scale_int
self.stem = nn.Sequential(
nn.Conv2d(in_dim, 3, kernel_size=1, bias=False),
nn.BatchNorm2d(3),
nn.ReLU(inplace=True),
nn.Conv2d(3,
_s(64),
kernel_size=7,
stride=2,
padding=3,
bias=False),
nn.BatchNorm2d(_s(64)),
nn.ReLU(inplace=True),
nn.Conv2d(_s(64),
_s(64),
kernel_size=5,
stride=2,
padding=2,
bias=False),
nn.BatchNorm2d(_s(64)),
nn.ReLU(inplace=True),
nn.Conv2d(_s(64),
_s(64),
kernel_size=5,
stride=2,
padding=2,
bias=False),
nn.BatchNorm2d(_s(64)),
nn.ReLU(inplace=True),
nn.Conv2d(_s(64),
_s(64),
kernel_size=5,
stride=2,
padding=2,
bias=False),
nn.BatchNorm2d(_s(64)),
nn.ReLU(inplace=True),
nn.Conv2d(_s(64),
_s(128),
kernel_size=5,
stride=2,
padding=2,
bias=False),
nn.BatchNorm2d(_s(128)),
nn.ReLU(inplace=True),
)
self.out_dim = _s(128)
def _scale_int(self, n):
"""Scale the number by a factor. To control width of this network."""
return int(self.width_scale_factor * n)
def forward(self, image):
return self.stem(image)
class ResNetBaseEncoder(nn.Module):
"""ResNet based feature extractor."""
def __init__(self, in_dim, base_model, nlayers):
super().__init__()
net = hydra.utils.instantiate(base_model)
conv1 = nn.Conv2d(in_dim,
64,
kernel_size=7,
stride=2,
padding=3,
bias=False)
self.stem = nn.Sequential(conv1, net.bn1, net.relu, net.maxpool)
self.stages = nn.ModuleList(
[getattr(net, 'layer%d' % (i + 1)) for i in range(nlayers)])
last_stage = self.stages[-1][-1]
if hasattr(last_stage, 'bn3'):
self.out_dim = last_stage.bn3.num_features
elif hasattr(last_stage, 'bn2'):
self.out_dim = last_stage.bn2.num_features
else:
raise ValueError('This should not happen')
def forward(self, image):
features = self.stem(image)
for stage in self.stages:
features = stage(features)
return features
class BasicEncoder(nn.Module):
"""Encode pixels to features."""
def __init__(self, in_dim, nobj, feat_ext, objectifier, obj_encoder,
spatial_mean, feat_ext_eval_mode, process_objs_together):
"""
Args:
obj_before_enc: If true, do the objectify in the input (pixel) space
before running the encode (so each object is encoded separately)
spatial_mean: Avg pool the features to 1x1
feat_ext_eval_mode: Set the feature extractor to eval mode for BN,
dropout etc
process_objs_together: If true, it will concatenate all objs on the
channel dimension, extract features, and split the features
in channel dimensions to get features for each obj
"""
super().__init__()
self.nobj = nobj
self.process_objs_together = process_objs_together
# The image embedding model
self.feat_ext = hydra.utils.instantiate(
feat_ext, in_dim * nobj if self.process_objs_together else in_dim)
initial_dim = self.feat_ext.out_dim
# The objects model
self.objectifier = hydra.utils.instantiate(objectifier, initial_dim,
obj_encoder)
self.out_dim = self.objectifier.out_dim
if self.process_objs_together:
assert self.out_dim % nobj == 0
self.out_dim //= nobj
self.spatial_mean = spatial_mean
self.feat_ext_eval_mode = feat_ext_eval_mode
def _forward_vid(self, batch_vid_obs, l2_norm_feats=False):
"""
Convert a video into images to run the forward model.
Args:
batch_vid_obs: BxTxCxHxW or BxTxNobjxCxHxW
Returns:
features: BxTxDxH'xW' or BxTxNobjxDxH'xW'
"""
# Add an object dimension, so the rest of the code doesn't have to
# deal with edge cases
added_obj_dim = False
if len(batch_vid_obs.shape) == 4:
added_obj_dim = True
batch_vid_obs = batch_vid_obs.unsqueeze(2) # BxTxNobjxCxHxW
# Flatten videos into frames to extract out the features
# resulting shape B'xC'xHxW
if self.process_objs_together:
# resulting shape B' = B * T, C' = Nobj * C
flat_obs = batch_vid_obs.reshape((-1, ) + batch_vid_obs.shape[-4:])
flat_obs = torch.flatten(flat_obs, 1, 2)
else:
# resulting shape B' = B * T * Nobj, C' = C
flat_obs = batch_vid_obs.reshape((-1, ) + batch_vid_obs.shape[-3:])
# Extract features
if self.feat_ext_eval_mode:
self.feat_ext.eval()
features = self.feat_ext(flat_obs)
if self.spatial_mean:
# Mean over spatial dimensions
features = torch.mean(features, dim=[-2, -1], keepdims=True)
if l2_norm_feats:
# L2 normalize the features -- MemoryBank, MoCo and PIRL do that
features = nn.functional.normalize(features, p=2, dim=-1)
# Reshape back to original batch dimension
if self.process_objs_together:
features_batched = features.reshape(batch_vid_obs.shape[:2] +
(self.nobj, -1) +
features.shape[-2:])
else:
features_batched = features.reshape(batch_vid_obs.shape[:-3] +
features.shape[1:])
if added_obj_dim:
features_batched = features_batched.squeeze(2)
assert features_batched.shape[-3] == self.out_dim
return features_batched
def forward(self, vid):
"""
Args:
vid (B, T, Nobj, C, H, W): Input video, in preprocessed form; i.e.
one-hot
Returns:
obj_feat (B, T, Nobj', D, H', W'): Features with objects, if needed
"""
vid_feat = self._forward_vid(vid)
vid_feat = self.objectifier(vid_feat)
return vid_feat
def combine_obj_pixels(obj_pix, obj_dim):
"""Combine obj-split pixels into a single image.
Args:
obj_pix: B, ..., Nobj, ..., C, H, W
obj_dim: The dimension to reduce over -- which corresponds to objs
Returns
B, ..., ..., C, H, W
"""
if obj_pix is None:
return None
return torch.max(obj_pix, dim=obj_dim)[0]
class MLPClassifier(nn.Module):
"""Simple classifier on top of the intermediate features."""
def __init__(self, in_dim, nlayers, match_inp_sz_layer=False):
super().__init__()
self.nlayers = nlayers
if nlayers == 0:
return
# First linear layer, to project to the in_dim dimension, if not
self.match_inp_sz_layer = match_inp_sz_layer
if self.match_inp_sz_layer:
raise NotImplementedError('Doesnt work with multi-gpu yet..')
self.register_parameter('init_linear_wt', None)
self.in_dim = in_dim
layers = [[nn.Linear(in_dim, in_dim),
nn.ReLU(inplace=True)] for _ in range(nlayers - 1)]
layers_flat = [item for sublist in layers for item in sublist]
self.cls = nn.Sequential(*(layers_flat[:-1] + [nn.Linear(in_dim, 1)]))
def reset_parameters(self, inp, in_dim, out_dim):
self.init_linear_wt = nn.Parameter(
inp.new(in_dim, out_dim).normal_(0, 1))
def forward(self, preds, pixs, process_all_frames=False):
"""
Run the classifier on the predictions.
Args:
preds: (BxTx1xDxH'xW')
pixs: (BxTx1xDxHxW)
Retuns:
solved: (BxT)
process_all_frames: Set true when used by other classifiers for
intermediate feature extraction, so to get features for each
frame.
"""
del pixs # This does not use it
if self.nlayers == 0:
return preds
# Since this classifier doesn't take into account context and the final
# _cls is going to look at the last frame, so might as well only process
# that last frame
if not process_all_frames:
preds = preds[:, -1:, ...]
mean_feat = torch.mean(preds, axis=[2, -1, -2])
if self.match_inp_sz_layer:
if self.init_linear_wt is None:
logging.warning(
'Creating a linear layer to map the input '
'dims (%d) to MLP input dim (%d)', mean_feat.shape[-1],
self.in_dim)
self.reset_parameters(preds, self.in_dim,
preds.shape[1] * preds.shape[3])
mean_feat = nn.functional.linear(mean_feat, self.init_linear_wt)
mean_feat = nn.ReLU(inplace=True)(mean_feat)
return self.cls(mean_feat).squeeze(-1)
class ConvNetClassifier(nn.Module):
"""ConvNet classifier on top of the intermediate features."""
def __init__(self, feat_in_dim, num_conv_blocks, num_fc_layers):
super().__init__()
del feat_in_dim
nobj = 1
self.enc = BasicEncoder(
phyre.NUM_COLORS,
nobj,
OmegaConf.create({
'class': 'nets.ResNetBaseEncoder',
'params': {
'base_model': {
'class': 'torchvision.models.resnet18',
'params': {
'pretrained': False,
}
},
'nlayers': num_conv_blocks,
}
}),
OmegaConf.create({
'class': 'nets.TrivialObjectifier',
'params': {
'nobj': nobj, # will sum into 1 obj
}
}),
OmegaConf.create({
'class': 'nets.BasicObjEncoder',
'params': {
'out_dim': 16,
'nlayers': 0,
'spatial_mean': True,
}
}),
spatial_mean=False,
feat_ext_eval_mode=False,
process_objs_together=False, # Doesn't matter, 1 obj
)
self.cls = MLPClassifier(self.enc.out_dim, num_fc_layers)
def forward(self, preds, pixs, process_all_frames=False):
"""
Run the classifier on the predictions.
Args:
preds: (BxTx1xDxH'xW')
pixs: (BxTx1xDxHxW)
process_all_frames: Set true when used by other classifiers for
intermediate feature extraction, so to get features for each
frame.
Retuns:
solved: (BxT)
"""
# Not enforcing the assert here if pred is None, since this module
# is usually used by other modules as a way to extract features,
# and it might pass in None for preds. But rest assured, this check
# would have been done on the caller side.
assert preds is None or preds.shape[1] == pixs.shape[1], (
'Must pass in run_decode=True if using a pixel-based classifier!!')
del preds # This does not use it
# Since this classifier doesn't take into account context and the final
# _cls is going to look at the last frame, so might as well only process
# that last frame
if not process_all_frames:
pixs = pixs[:, -1:, ...]
obj_feats = self.enc(pixs)
return self.cls(obj_feats, None, process_all_frames=process_all_frames)
class TxClassifier(nn.Module):
"""Transformer on top of the intermediate features over time."""
def __init__(self, in_dim, nheads, nlayers):
super().__init__()
self.tx_enc = TxEncoder(in_dim, nheads, nlayers)
self.cls = nn.Linear(self.tx_enc.out_dim, 1)
def forward(self, preds, pixs):
"""
Run the classifier on the predictions.
Args:
preds: (BxTx1xDxH'xW')
pixs: (BxTx1xDxHxW)
Retuns:
solved: (Bx1)
"""
del pixs # This does not use it
# Spatial mean the features
stacked_mean_feat = torch.flatten(torch.mean(preds, axis=[-1, -2]), 1,
2)
feat_enc_time = self.cls(self.tx_enc(stacked_mean_feat))
# Max pool over time to get the final prediction
# Keepdims since the output format expects a time dimension and does
# a max pool over it at the end
cls_pred = torch.max(feat_enc_time, dim=1,
keepdims=True)[0].squeeze(-1)
return cls_pred
class ConvTxClassifier(nn.Module):
"""Transformer on top of the Conv features learned over time."""
def __init__(self, in_dim, nconvblocks, nheads, nlayers):
super().__init__()
self.conv_feat = ConvNetClassifier(in_dim, nconvblocks, 0)
self.tx_cls = TxClassifier(self.conv_feat.enc.out_dim, nheads, nlayers)
def forward(self, preds, pixs):
"""
Run the classifier on the predictions.
Args:
preds: (BxTx1xDxH'xW')
pixs: (BxTx1xDxHxW)
Retuns:
solved: (Bx1)
"""
assert preds.shape[1] == pixs.shape[1], (
'Must pass in run_decode=True if using a pixel-based classifier!!')
del preds
feats = self.conv_feat(None, pixs, process_all_frames=True)
preds = self.tx_cls(feats, None)
return preds
class Conv3dClassifier(nn.Module):
"""3D conv over features learned over time."""
def __init__(self, in_dim, num_3d_layers):
super().__init__()
layers = [[
nn.Conv3d(in_dim, in_dim, 3, stride=2, padding=1, bias=False),
nn.ReLU(inplace=True)
] for _ in range(num_3d_layers - 1)]
layers_flat = [item for sublist in layers for item in sublist]
self.enc = nn.Sequential(*(layers_flat[:-1]))
self.cls = nn.Linear(in_dim, 1)
def forward(self, preds, pixs):
"""
Run the classifier on the predictions.
Args:
preds: (BxTx1xDxH'xW')
pixs: (BxTx1xDxHxW)
Retuns:
solved: (Bx1)
"""
del pixs
enc_preds = self.enc(preds.squeeze(2).transpose(1, 2))
cls_preds = self.cls(torch.mean(enc_preds, [-1, -2, -3]))
# It has 1 extra dim in the end from the fc layer which should be
# removed, but since I need to add a time dimension anyway, just leave
# this there (will end up the same)
return cls_preds
class ConvConv3dClassifier(nn.Module):
"""Conv3D on top of the Conv features learned over time."""
def __init__(self, in_dim, nconvblocks, n3dlayers):
super().__init__()
self.conv_feat = ConvNetClassifier(in_dim, nconvblocks, 0)
self.td_cls = Conv3dClassifier(self.conv_feat.enc.out_dim, n3dlayers)
def forward(self, preds, pixs):
"""
Run the classifier on the predictions.
Args:
preds: (BxTx1xDxH'xW')
pixs: (BxTx1xDxHxW)
Retuns:
solved: (Bx1)
"""
assert preds.shape[1] == pixs.shape[1], (
'Must pass in run_decode=True if using a pixel-based classifier!!')
del preds
feats = self.conv_feat(None, pixs, process_all_frames=True)
preds = self.td_cls(feats, None)
return preds
class ConcatClassifier(nn.Module):
"""Concat the features and classify."""
def __init__(self, in_dim, nlayers):
super().__init__()
self.cls = MLPClassifier(in_dim, nlayers, match_inp_sz_layer=True)
def forward(self, preds, pixs):
"""
Run the classifier on the predictions.
Args:
preds: (BxTx1xDxH'xW')
pixs: (BxTx1xDxHxW)
Retuns:
solved: (Bx1)
"""
del pixs
# Concatenate over the time dimension
preds_flat = preds.view(preds.shape[0], 1, 1, -1, preds.shape[-2],
preds.shape[-1])
return self.cls(preds_flat, None, process_all_frames=True)
class ConvConcatClassifier(nn.Module):
"""Concat the Conv features and classify."""
def __init__(self, in_dim, nconvblocks, nclslayers):
super().__init__()
self.conv_feat = ConvNetClassifier(in_dim, nconvblocks, 0)
self.concat_cls = ConcatClassifier(self.conv_feat.enc.out_dim,
nclslayers)
def forward(self, preds, pixs):
"""
Run the classifier on the predictions.
Args:
preds: (BxTx1xDxH'xW')
pixs: (BxTx1xDxHxW)
Retuns:
solved: (Bx1)
"""
assert preds.shape[1] == pixs.shape[1], (
'Must pass in run_decode=True if using a pixel-based classifier!!')
del preds
feats = self.conv_feat(None, pixs, process_all_frames=True)
preds = self.concat_cls(feats, None)
return preds
class TrivialInteractor(nn.Module):
"""Model interactions btw objects: do nothing."""
def __init__(self, in_dim):
super().__init__()
del in_dim
@classmethod
def forward(cls, feat):
"""
Args:
feat: (B, T, Nobj, C, H', W')
Returns:
feat as is
"""
return feat
class TxEncoder(nn.Module):
"""Transformer based encoder, generates a feature combining the context."""
def __init__(self, in_dim, nheads, nlayers, maintain_dim=False):
"""
Args:
maintain_dim (bool): If true, it maps the final output to the same
dimensionality as the input
"""
super().__init__()
# Very basic position encoding
self.loc_embed = nn.Sequential(nn.Linear(1, 4), nn.ReLU(inplace=True),
nn.Linear(4, 8))
self.nheads = nheads
self.nlayers = nlayers
in_dim_loc = in_dim + 8 * nheads
self.loc_mixer = nn.Linear(in_dim_loc, in_dim_loc)
layer = nn.TransformerEncoderLayer(in_dim_loc, nheads)
self.encoder = nn.TransformerEncoder(layer, nlayers)
if maintain_dim:
self.back_to_orig_dim = nn.Linear(in_dim_loc, in_dim)
self.out_dim = in_dim
else:
self.back_to_orig_dim = lambda x: x # Identity
self.out_dim = in_dim_loc
def forward(self, feat):
"""
Args:
feat: (B, T, C)
Returns:
Same shape as input
"""
# Add a location embedding (over time), since time axis will flatten
loc_embedding = self.loc_embed(
torch.arange(feat.shape[1],
device=feat.device).unsqueeze(-1).float())
# Make into the shape of the feature
loc_embedding = loc_embedding.unsqueeze(0).repeat(
feat.shape[0], 1, self.nheads)
feat = torch.cat([feat, loc_embedding], dim=-1)
# Mix up the location information throughout the features so each head
# would have it
mixed_feat = self.loc_mixer(feat)
# Transformer encoder expects the time dimension as the 0th! So gotta
# permute things around
return self.back_to_orig_dim(
self.encoder(mixed_feat.permute(1, 0, 2)).permute(1, 0, 2))
class TxInteractor(nn.Module):
"""Model interactions btw objects: using Transformer."""
def __init__(self, in_dim, nheads, nlayers):
super().__init__()
self.in_dim = in_dim
self.tx_enc = TxEncoder(in_dim, nheads, nlayers, maintain_dim=True)
def forward(self, feat):
"""
Args:
feat: (B, T, Nobj, C, H', W')
Returns:
Same shape as input
"""
# Mean reduce the spatial dimensions for tx, then add it back to the
# original feature as a residual connection
feat_spat_mean = torch.mean(feat, dim=[-1, -2])
feat_flat = feat_spat_mean.flatten(1, 2)
tx_feat = self.tx_enc(feat_flat)
tx_feat = tx_feat.view(
feat_spat_mean.shape).unsqueeze(-1).unsqueeze(-1)
return feat + tx_feat
class TrivialSpatialAttention(nn.Module):
def __init__(self, in_dim):
super().__init__()
del in_dim
def forward(self, feat):
return feat
class TxSpatialAttention(nn.Module):
def __init__(self, in_dim, nheads, nlayers):
super().__init__()
self.tx_enc = TxEncoder(in_dim, nheads, nlayers, maintain_dim=True)
def forward(self, feat):
"""
Args:
feats (B, T, Nobj, D, H', W')
"""
feat_flat = torch.flatten(torch.flatten(feat, 0, 2), -2, -1)
feat_att = self.tx_enc(feat_flat.transpose(1, 2)).transpose(1, 2)
return feat_att.view(feat.shape)
class Fwd(nn.Module):
"""The master class with Forward model."""
def __init__(self, agent_cfg):
"""
Args:
dyn_type: The type of dynamics model to use.
dyn_n: Number of previous features used for prediction.
"""
super().__init__()
# The image embedding model
self.preproc = VideoPreprocessor(agent_cfg)
self.enc = hydra.utils.instantiate(agent_cfg.encoder,
self.preproc.out_dim,
agent_cfg.nobj)
dim = self.enc.out_dim
self.interactor = hydra.utils.instantiate(agent_cfg.interactor, dim)
# The dynamics model
self.dyn = hydra.utils.instantiate(agent_cfg.dyn, self.enc, dim)
# Classifier model
self.nframes_to_cls = agent_cfg.nframes_to_cls
# A attention of the latent features before passing them through the
# classifier.
self.spat_att = hydra.utils.instantiate(agent_cfg.spat_att, dim)
self.cls = hydra.utils.instantiate(agent_cfg.cls, dim)
# Decoder model
self.dec = hydra.utils.instantiate(agent_cfg.decoder, dim,
phyre.NUM_COLORS)
# Other loss functions
self.pix_loss = hydra.utils.instantiate(agent_cfg.loss_fn.pix)
self.nce_loss = hydra.utils.instantiate(agent_cfg.loss_fn.nce, dim)
@property
def device(self):
if hasattr(self, 'parameters') and next(self.parameters()).is_cuda:
return 'cuda'
else:
return 'cpu'
def _forward_dyn(self, feats, vids, n_fwd_times, need_intermediate=False):
"""
Args:
feats: (BxT_histxNobjxDxH'xW')
vids: (BxT_histxCxHxW) The video corresponding to the feats, some
dyn models might use them.
n_fwd_times: Number of times to run the fwd model on the last frames
need_intermediate: If true, give all the intermediate features
Returns:
all_preds: The predictions at each time step, in n_fwd_times
all_pixs: The predictions in pixels. Note all dynamics models don't
use pixels, so it might just give the last frame as output
all_solved: The classification at each time step, for n_fwd_times
"""
all_preds = []
all_pixs = []
all_addl_losses = []
if n_fwd_times == 0:
return [all_preds, all_pixs, all_addl_losses]
def run_fwd_append(feats, pixs):
pred, pred_pix, addl_losses = self.dyn(feats, pixs)
all_preds.append(pred)
all_pixs.append(pred_pix)
all_addl_losses.append(addl_losses)
run_fwd_append(feats, vids)
n_fwd_times_copy = n_fwd_times
while n_fwd_times - 1 > 0:
feats = torch.cat(
[feats[:, 1:, ...],
torch.unsqueeze(all_preds[-1], axis=1)],
dim=1)
vids = torch.cat(
[vids[:, 1:, ...],
torch.unsqueeze(all_pixs[-1], axis=1)],
dim=1)
run_fwd_append(feats, vids)
n_fwd_times -= 1
assert len(all_preds) == n_fwd_times_copy, (
'%d %d' % (len(all_preds), n_fwd_times_copy))
if not need_intermediate:
all_preds = [all_preds[-1]]
all_pixs = [all_pixs[-1]]
all_addl_losses = [all_addl_losses[-1]]
# Will compute solved or not later, after decode, in case the classifier
# needs that information
return all_preds, all_pixs, all_addl_losses
def _slice_for_dyn(self, features_batched, n_hist_frames, nslices=-1):
"""
Args:
features_batched: BxTx.... can deal with any following
dimensions, typically it is (BxTxNobjxDxH'xW')
n_hist_frames (int): Number of frames to use as history
nslices (int): If -1, make as many slices of the training data
as possible. If 1, keep only the first one. (1 used when
training classifier on top, which should always see videos
from the start)
Returns:
B'x n_hist_frames x ... (B'x n_hist_frames x Nobj x D x H' x W')
"""
clip_hist = []
assert features_batched.shape[1] >= n_hist_frames
for i in range((features_batched.shape[1] - n_hist_frames + 1)):
if nslices > 0 and i >= nslices:
break
clip_hist.append(features_batched[:, i:i + n_hist_frames, ...])
clip_hist = torch.cat(clip_hist, dim=0)
return clip_hist
def _forward_dec(self, feats, pixels):
"""
Args:
feats: List of features (BxD) from the dynamics prediction stage,
one for each time step predicted.
pixels: List of corresponding pixels from the dynamics model. The
dyn model may or may not actually generate new pixels.
"""
return [self.dec(feat, pix) for feat, pix in zip(feats, pixels)]
# Loss functions ###########################################################
def cswm_loss(self, pred, gt, hinge=1.0):
"""
The energy based contrastive loss.
Args:
pred (BxNobjxDxH'xW')
gt (BxNobjxDxH'xW')
From https://github.com/tkipf/c-swm/blob/master/modules.py#L94
"""
pred = pred.view(pred.shape[:2] + (-1, ))
gt = gt.view(gt.shape[:2] + (-1, ))
batch_size = gt.size(0)
perm = np.random.permutation(batch_size)
neg = gt[perm]
def energy(pred, gt, sigma=0.5):
"""Energy function based on normalized squared L2 norm.
Args:
pred (B, Nobj, D')
gt (B, Nobj, D')
"""
norm = 0.5 / (sigma**2)
diff = pred - gt
return norm * diff.pow(2).sum(2).mean(1)
pos_loss = energy(pred, gt)
zeros = torch.zeros_like(pos_loss)
pos_loss = pos_loss.mean()
neg_loss = torch.max(zeros, hinge - energy(pred, neg)).mean()
return pos_loss + neg_loss
def ce_loss(self, decisions, targets):
targets = targets.to(dtype=torch.float, device=decisions.device)
return torch.nn.functional.binary_cross_entropy_with_logits(
decisions, targets)
def autoencoder_loss(self, pix, latent, autoenc_loss_ratio):
"""
Runs a random portion of the actual frames through decoder to incur a
loss to encourage the intermediate representation to learn a good
autoencoder as well. Random fraction only for compute reasons.
Ideally would run every frame (ratio = 1)
Args:
pix (B, T, H, W): Actual pixels of the input frames
latent (B, T, Nobj, D, H', W'): Latent representation of the input
frames
autoenc_loss_ratio (float): What percentage of the input frames to
run it on. Only for compute reasons, ideally run it on all.
Returns:
loss {'autoenc': (1,) <float>} for the loss
"""
# Flatten the Batch and time dimension to get all the frames
pix_flat = torch.flatten(pix, 0, 1)
latent_flat = torch.flatten(latent, 0, 1)
# Select a subset of the frames to run the loss on
assert pix_flat.shape[0] == latent_flat.shape[0]
idx = np.arange(pix_flat.shape[0])
np.random.shuffle(idx)
sel_cnt = int(autoenc_loss_ratio * len(idx))
idx_sel = np.sort(idx[:sel_cnt])
pix_flat_sel = pix_flat[idx_sel, ...]
latent_flat_sel = latent_flat[idx_sel, ...]
# Generate the pixels for the latent, and incur loss
pred_flat_sel = combine_obj_pixels(self.dec(latent_flat_sel, None), 1)
loss = self.pix_loss(pred_flat_sel, pix_flat_sel).unsqueeze(0)
return {'autoenc_pix': loss}
def solved_or_not_loss(self, clip_preds_solved, vid_is_solved):
"""
Repeat the is_solved to as many times the batch was repeated to get
the class label at each forward prediction
Args:
clip_preds_solved (B',)
vid_is_solved (B,)
B and B' might be different but B' must be a multiple of B, since
it happens when num_slices > 1
Returns:
loss {'ce': (1,) <float>} for the loss
"""
assert clip_preds_solved.shape[0] % vid_is_solved.shape[0] == 0
return {
'ce':
self.ce_loss(
clip_preds_solved,
vid_is_solved.repeat((clip_preds_solved.shape[0] //
vid_is_solved.shape[0], ))).unsqueeze(0)
}
############################################################################
def _compute_losses(self, clip_pred, clip_pred_pix, vid_feat, vid,
n_hist_frames, n_fwd_times):
"""
Compute all losses possible.
"""
dummy_loss = torch.Tensor([-1]).to(clip_pred.device)
losses = {}
# NCE and pixel loss
# find the GT for each clip, note that all predictions may not have a GT
# since the last n_hist_frames for a video will make a prediction that
# goes out of the list of frames that were extracted for that video.
feat_preds = []
feat_gt = []
pix_preds = []
pix_gt = []
batch_size = vid_feat.shape[0]
gt_max_time = vid_feat.shape[1]
# Max slices that could have been made of the data, to use all of the
# training clip
max_slices_with_gt = gt_max_time - n_hist_frames - n_fwd_times + 1
num_slices = clip_pred.shape[0] // batch_size
for i in range(min(max_slices_with_gt, num_slices)):
corr_pred = clip_pred[i * batch_size:(i + 1) * batch_size, ...]
# Get the corresponding GT predictions for this pred
corr_gt = vid_feat[:, i + n_hist_frames + n_fwd_times - 1]
assert corr_gt.shape == corr_pred.shape
feat_preds.append(corr_pred)
feat_gt.append(corr_gt)
# Same thing for pix
if clip_pred_pix is not None:
corr_pix_pred = clip_pred_pix[i * vid_feat.shape[0]:(i + 1) *
vid_feat.shape[0], ...]
corr_pix_gt = vid[:, i + n_hist_frames + n_fwd_times - 1]
pix_preds.append(corr_pix_pred)
pix_gt.append(corr_pix_gt)
if len(feat_gt) > 0:
# Keep a batch dimension to the loss, since it will be run over
# multiple GPUs
feat_preds = torch.cat(feat_preds)
feat_gt = torch.cat(feat_gt)
losses['nce'] = self.nce_loss(feat_preds, feat_gt).unsqueeze(0)
losses['cswm'] = self.cswm_loss(feat_preds, feat_gt).unsqueeze(0)
else:
losses['nce'] = dummy_loss
losses['cswm'] = dummy_loss
# Reconstruction loss
if len(pix_gt) > 0:
losses['pix'] = self.pix_loss(torch.cat(pix_preds),
torch.cat(pix_gt)).unsqueeze(0)
else:
losses['pix'] = dummy_loss
return losses
def _cls(self, feat_hist, pix_hist, feat_preds, pix_preds):
"""
Wrapper around the classifier, collates all the input frames/features
and predicted future frames/features.
The images, features are already summed over the objects
Args:
feat_hist: (B, T, C, H', W')
pix_hist: (B, T, 7, H, W)
feat_preds [list of (B, C, H', W')] -- len = num predictions
pix_preds [list of (B, 7, H, W)] -- len = num predictions
The elements could be None, since not all models predict pixels
Returns:
(B,) predicted scores for the clips
"""
feats_combined = feat_hist
if feat_preds is not None and len(feat_preds) > 0:
feats_combined = torch.cat([feat_hist] +
[el.unsqueeze(1) for el in feat_preds],
dim=1)
pix_combined = pix_hist
if (pix_preds is not None and len(pix_preds) > 0
and pix_preds[0] is not None):
pix_combined = torch.cat([pix_combined] +
[el.unsqueeze(1) for el in pix_preds],
dim=1)
# Sum over objs -- we want the classifier model to see everything
# at the same time
# They are summed now, but need the dimension still
pix_combined = pix_combined.unsqueeze(2)
feats_combined = feats_combined.unsqueeze(2)
# If need to keep only a subset of the frames
if self.nframes_to_cls > 0:
pix_combined = pix_combined[:, :self.nframes_to_cls, ...]
feats_combined = feats_combined[:, :self.nframes_to_cls, ...]
feats_combined = self.spat_att(feats_combined)
# Keep the last prediction, as that should ideally be the best
# prediction of whether it was solved or not
# torch.max was hard to optimize through
return self.cls(feats_combined, pix_combined)[:, -1]
def forward(self,
vid,
vid_is_solved,
n_hist_frames=3,
n_fwd_times=1,
n_fwd_times_incur_loss=999999,
run_decode=False,
compute_losses=False,
need_intermediate=False,
autoenc_loss_ratio=0.0,
nslices=-1):
"""
Args:
vid: (BxTxNobjxHxW) The input video
vid_is_solved: (Bx1) Whether the video is solved in the end of not.
Could be None at test time.
n_hist_frames: (int) Number of frames to use as history for
prediction
n_fwd_times: (int) How many times to run the forward dynamics model
n_fwd_times_incur_loss (int): Upto how many of these forwards to
incur loss on.
run_decode: (bool) Decode the features into pixel output
compute_losses: Should be set at train time. Will compute losses,
whatever it can given the data (eg, if vid_is_solved is not
passed to the function, it will not compute the CE loss).
need_intermediate (bool): Set true if you want to run the dynamics
model and need all the intermediate results. Else, will return
a list with only 1 element, the final output.
autoenc_loss_ratio (float btw 0-1): Set to 1 to run auto-encoder
style loss on all frames when run_decode is set.
num_slices (int): See in the _slice_for_dyn fn
Returns:
clip_feat: BxTxD
"""
vid_preproc = self.preproc.preprocess_vid(vid)
obj_feat = self.enc(vid_preproc)
clip_hist = self._slice_for_dyn(obj_feat,
n_hist_frames,
nslices=nslices)
vid_hist = self._slice_for_dyn(vid_preproc,
n_hist_frames,
nslices=nslices)
assert clip_hist.shape[1] == n_hist_frames
clip_hist = self.interactor(clip_hist)
clip_preds, clip_preds_pix, clip_preds_addl_losses = self._forward_dyn(
clip_hist, vid_hist, n_fwd_times, need_intermediate)
if run_decode:
clip_preds_pix = self._forward_dec(clip_preds, clip_preds_pix)
else:
clip_preds_pix = [None] * len(clip_preds)
# Compute the solved or not, will only do for the ones asked for
clip_preds_solved = self._cls(
combine_obj_pixels(clip_hist, 2), combine_obj_pixels(vid_hist, 2),
[combine_obj_pixels(el, 1) for el in clip_preds],
[combine_obj_pixels(el, 1) for el in clip_preds_pix])
all_losses = []
clip_preds_pix_unpreproc_for_loss = [
self.preproc.unpreprocess_frame_for_loss(el)
for el in clip_preds_pix
]
if compute_losses:
for i in range(min(len(clip_preds), n_fwd_times_incur_loss)):
# Compute losses at each prediction step, if need_intermediate
# is set. Else, it will only return a single output
# (at the last prediction), and then we can only incur loss at
# that point.
if not need_intermediate:
assert len(clip_preds) == 1
pred_id = -1
# Only loss on predicting the final rolled out obs
this_fwd_times = n_fwd_times
else:
assert len(clip_preds) == n_fwd_times
pred_id = i
this_fwd_times = i + 1
all_losses.append(
self._compute_losses(
# For the loss, using only the last prediction (for now)
clip_preds[pred_id],
combine_obj_pixels(
clip_preds_pix_unpreproc_for_loss[pred_id], 1),
obj_feat,
combine_obj_pixels(vid, 2),
n_hist_frames,
this_fwd_times))
all_losses = average_losses(all_losses)
all_losses.update(average_losses(clip_preds_addl_losses))
all_losses.update(
self.solved_or_not_loss(clip_preds_solved, vid_is_solved))
# Add losses on the provided frames if requested
if run_decode and autoenc_loss_ratio > 0:
all_losses.update(
self.autoencoder_loss(combine_obj_pixels(vid, 2), obj_feat,
autoenc_loss_ratio))
clip_preds_pix_unpreproc = [
combine_obj_pixels(self.preproc.unpreprocess_frame_after_loss(el),
1) for el in clip_preds_pix_unpreproc_for_loss
]
all_preds = {
'feats': clip_preds,
'is_solved': clip_preds_solved,
'pixels': clip_preds_pix_unpreproc,
}
return all_preds, all_losses
| 2.171875
| 2
|
ephios/plugins/pages/views.py
|
garinm90/ephios
| 0
|
12775045
|
from django.contrib.auth.views import redirect_to_login
from django.views.generic import DetailView, ListView
from ephios.core.views.settings import SettingsViewMixin
from ephios.extra.mixins import StaffRequiredMixin
from ephios.plugins.pages.models import Page
class PageListView(StaffRequiredMixin, SettingsViewMixin, ListView):
model = Page
class PageView(DetailView):
model = Page
def setup(self, request, *args, **kwargs):
super().setup(request, *args, **kwargs)
self.object = self.get_object()
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated and not self.object.publicly_visible:
return redirect_to_login(self.request.get_full_path())
return super().dispatch(request, *args, **kwargs)
| 2.046875
| 2
|
Back-End/util/create_world.py
|
Zealll/maze
| 0
|
12775046
|
<gh_stars>0
from django.contrib.auth.models import User
from adventure.models import Player, Room
from util.sample_generator import World
world = World()
world.generate_rooms(23, 23, 529)
cache = {}
for i in world.grid:
for j in i:
room = Room(title = j.name, description = f'This room is called {j.name} its ID is {j.id}', x = j.x, y = j.y)
room.save()
cache[(j.x, j.y)] = room
if j.e_to != None:
if (j.e_to.x, j.e_to.y) in cache:
room.connectRooms(cache[(j.e_to.x, j.e_to.y)], 'e')
cache[(j.e_to.x, j.e_to.y)].connectRooms(room, 'w')
if j.w_to != None:
if (j.w_to.x, j.w_to.y) in cache:
room.connectRooms(cache[(j.w_to.x, j.w_to.y)], 'w')
cache[(j.w_to.x, j.w_to.y)].connectRooms(room, 'e')
if j.s_to != None:
if (j.s_to.x, j.s_to.y) in cache:
room.connectRooms(cache[(j.s_to.x, j.s_to.y)], 's')
cache[(j.s_to.x, j.s_to.y)].connectRooms(room, 'n')
if j.n_to != None:
if (j.n_to.x, j.n_to.y) in cache:
room.connectRooms(cache[(j.n_to.x, j.n_to.y)], 'n')
cache[(j.n_to.x, j.n_to.y)].connectRooms(room, 's')
players = Player.objects.all()
first_room = world.grid[0][0]
for p in players:
p.currentRoom = first_room.id
p.save()
# world.print_rooms()
# print(first_layer)
# n = open('./util/names.txt', 'r', encoding='utf-8')
# names = n.read().split("\n")
# n.close()
# Room.objects.all().delete()
# r_outside = Room(title="Outside Cave Entrance",
# description="North of you, the cave mount beckons")
# r_foyer = Room(title="Foyer", description="""Dim light filters in from the south. Dusty
# passages run north and east.""")
# r_overlook = Room(title="Grand Overlook", description="""A steep cliff appears before you, falling
# into the darkness. Ahead to the north, a light flickers in
# the distance, but there is no way across the chasm.""")
# r_narrow = Room(title="Narrow Passage", description="""The narrow passage bends here from west
# to north. The smell of gold permeates the air.""")
# r_treasure = Room(title="Treasure Chamber", description="""You've found the long-lost treasure
# chamber! Sadly, it has already been completely emptied by
# earlier adventurers. The only exit is to the south.""")
# r_outside.save()
# r_foyer.save()
# r_overlook.save()
# r_narrow.save()
# r_treasure.save()
# # Link rooms together
# r_outside.connectRooms(r_foyer, "n")
# r_foyer.connectRooms(r_outside, "s")
# r_foyer.connectRooms(r_overlook, "n")
# r_overlook.connectRooms(r_foyer, "s")
# r_foyer.connectRooms(r_narrow, "e")
# r_narrow.connectRooms(r_foyer, "w")
# r_narrow.connectRooms(r_treasure, "n")
# r_treasure.connectRooms(r_narrow, "s")
# players= Player.objects.all()
# for p in players:
# p.currentRoom = r_outside.id
# p.save()
| 2.28125
| 2
|
tests/django_django_marshmallow/main/api.py
|
filwaitman/whatever-rest-framework
| 1
|
12775047
|
<gh_stars>1-10
from functools import partial
from django.views.generic import View
from wrf.api.base import BaseAPI, api_view
from wrf.base import APIError
from wrf.framework.django import DjangoFrameworkComponent
from wrf.orm.django import DjangoORMComponent
from wrf.pagination.base import NoPaginationComponent, PagePaginationComponent
from wrf.permission.base import AllowAllPermissionComponent, AllowAuthenticatedPermissionComponent, ReadOnlyPermissionComponent
from wrf.schema.marshmallow import MarshmallowSchemaComponent
from .models import User
from .schemas import UserSchema
class MyBaseAPI(BaseAPI):
orm_component_class = DjangoORMComponent
schema_component_class = MarshmallowSchemaComponent
framework_component_class = DjangoFrameworkComponent
pagination_component_class = PagePaginationComponent
permission_component_class = AllowAuthenticatedPermissionComponent
def get_current_user(self):
# `bool(request.user)` is `True` for unauthenticated users. Shame on django.
return self.request.user if self.request.user.is_authenticated else None
class UserAPI(MyBaseAPI):
model_class = User
schema_class = UserSchema
def get_queryset(self):
return User.objects.all()
def _doublename(self, pk):
instance = self.orm_component.get_object(self.get_queryset(), pk)
self.check_permissions(instance)
data = {'doubled': instance.first_name * 2}
return self.framework_component.create_response(data, 200, headers={'header-passed-in': '1'})
@api_view()
def doublename(self, pk):
return self._doublename(pk)
@api_view()
def handled_exception(self):
raise APIError(499, {'detail': 'Now this is a weird HTTP code'})
@api_view()
def unhandled_exception(self):
return 1 / 0
@api_view(permission_component_class=AllowAllPermissionComponent)
def doublename_open(self, pk):
return self._doublename(pk)
@api_view(permission_component_class=ReadOnlyPermissionComponent)
def list_readonly(self):
return self._list()
@api_view(permission_component_class=ReadOnlyPermissionComponent)
def create_readonly(self):
return self._create()
@api_view(framework_component_class=partial(DjangoFrameworkComponent, receive_data_as_json=False))
def create_formdata(self):
return self._create()
@api_view(pagination_component_class=NoPaginationComponent)
def list_nopagination(self):
return self._list()
def get_pagination_component_class(self, api_method_name):
if self.request.GET.get('paginate') == 'f':
return NoPaginationComponent
return super(UserAPI, self).get_pagination_component_class(api_method_name)
class CustomDispatchMixin(object):
def dispatch(self, request, *args, **kwargs):
custom = '{}_{}'.format(request.method.lower(), kwargs['action'])
custom_method = getattr(self, custom, None)
if custom_method:
return custom_method(request, *args, **kwargs)
return super(CustomDispatchMixin, self).dispatch(request, *args, **kwargs)
class UserListAPI(View):
def get(self, request, *args, **kwargs):
return UserAPI(request).list()
def post(self, request, *args, **kwargs):
return UserAPI(request).create()
class UserListCustomAPI(CustomDispatchMixin, View):
def get_read_only(self, request, *args, **kwargs):
return UserAPI(request).list_readonly()
def post_read_only(self, request, *args, **kwargs):
return UserAPI(request).create_readonly()
def get_no_pagination(self, request, *args, **kwargs):
return UserAPI(request).list_nopagination()
def get_exception_handled(self, request, *args, **kwargs):
return UserAPI(request).handled_exception()
def get_exception_unhandled(self, request, *args, **kwargs):
return UserAPI(request).unhandled_exception()
def post_form_data(self, request, *args, **kwargs):
return UserAPI(request).create_formdata()
class UserDetailAPI(View):
def get(self, request, pk, *args, **kwargs):
return UserAPI(request).retrieve(pk)
def patch(self, request, pk, *args, **kwargs):
return UserAPI(request).update(pk)
def delete(self, request, pk, *args, **kwargs):
return UserAPI(request).delete(pk)
class UserDetailCustomAPI(CustomDispatchMixin, View):
def get_doublename(self, request, pk, *args, **kwargs):
return UserAPI(request).doublename(pk)
def get_doublename_open(self, request, pk, *args, **kwargs):
return UserAPI(request).doublename_open(pk)
| 1.992188
| 2
|
applications/ex/models/db1.py
|
Gorang-Maniar/DGD
| 0
|
12775048
|
# coding: utf8
db.define_table('post',
Field('Email',requires=IS_EMAIL()),
Field('filen','upload'),
auth.signature)
| 1.21875
| 1
|
main.py
|
HealYouDown/florensia-inventory-database
| 1
|
12775049
|
<filename>main.py
import ctypes
import datetime
import os
import sys
from typing import Union
import xlsxwriter as xlsx
from pandas import read_excel
from logger import setup_logger
from pywinbot import Address, MemoryReader
from strings import get_strings
import traceback
INVENTORY_BASE_ADDRESS = Address("007BA638")
PLAYER_BASE_ADDRESS = Address("00D511D8")
CREDENTIALS_BASE_ADDRESS = Address("007BA650")
IS_ADMIN = ctypes.windll.shell32.IsUserAnAdmin() != 0
logger = setup_logger()
def show_exception_and_exit(exc_type, exc_value, tb):
logger.error(traceback.format_exception(exc_type, exc_value, tb))
sys.exit(-1)
sys.excepthook = show_exception_and_exit
def get_character_name() -> str:
length_pointer = mr.get_final_pointer(PLAYER_BASE_ADDRESS,
["240", "84", "8", "-4"])
name_pointer = mr.get_final_pointer(PLAYER_BASE_ADDRESS,
["240", "84", "8", "0"])
length: int = mr.read(length_pointer, "b", 1)
return mr.read(name_pointer, "str", length)
def get_account_id() -> str:
length_pointer = mr.get_final_pointer(CREDENTIALS_BASE_ADDRESS,
["34"])
id_pointer = mr.get_final_pointer(CREDENTIALS_BASE_ADDRESS,
["24"])
length: int = mr.read(length_pointer, "b", 1)
return mr.read(id_pointer, "str", length)
class InventarSlot:
def __init__(self, slot_id: int):
self.slot_id: int = slot_id
# Calculate offset
offset_1 = Address("20") + Address("4") * slot_id
# Calculate pointers
self.quantity_pointer = mr.get_final_pointer(INVENTORY_BASE_ADDRESS,
[offset_1.address_string,
"56"])
self.table_pointer = mr.get_final_pointer(INVENTORY_BASE_ADDRESS,
[offset_1.address_string,
"5C"])
self.row_pointer = mr.get_final_pointer(INVENTORY_BASE_ADDRESS,
[offset_1.address_string,
"60"])
def __repr__(self) -> str:
if not self.is_empty:
return f"<InventarSlot{self.slot_id} Quantity={self.quantity} Code=<i|{self.table},{self.row}|> >"
else:
return f"<InventarSlot{self.slot_id} Empty>"
@property
def quantity(self) -> Union[int, None]:
return mr.read(self.quantity_pointer, "h", 2)
@property
def table(self) -> Union[int, None]:
return mr.read(self.table_pointer, "h", 2)
@property
def row(self) -> Union[int, None]:
return mr.read(self.row_pointer, "h", 2)
@property
def is_empty(self) -> bool:
return True if self.quantity is None else False
if __name__ == "__main__":
if not IS_ADMIN:
logger.warning("Script has to run as Administrator.")
sys.exit(-1)
mr = MemoryReader(process_name="FlorensiaEN.bin",
window_class="Florensia")
logger.info("Starting Inventory Database Tool...")
character_name = get_character_name()
account_id = get_account_id()
logger.info(f"Creating database for {account_id} - {character_name}")
logger.info("Getting item strings")
mapper = get_strings()
logger.info("Got 'em!")
slots = [InventarSlot(i+1) for i in range(0, 120)]
headers = ["Account ID", "Character", "Type",
"Name", "Quantity", "Slot", "Updated"]
rows = []
logger.info("Getting inventory slots")
for slot in slots:
if not slot.is_empty:
item: dict = mapper[str(slot.table)][str(slot.row)]
rows.append([
account_id,
character_name,
item["type"],
item["name"],
slot.quantity,
slot.slot_id,
str(datetime.datetime.now())
])
logger.info("Got 'em!")
# Check if file exists -> add data from file to rows list
if os.path.exists("InventoryDatabase.xlsx"):
logger.info("Found an existing file.")
data = read_excel("InventoryDatabase.xlsx").to_dict("split")["data"]
for row in data:
# just add data that is not from this char and account
# because this is already present in rows list
if not (row[0] == account_id and row[1] == character_name):
rows.append(row)
logger.info("Creating excel file")
wb = xlsx.Workbook(filename="InventoryDatabase.xlsx")
bold = wb.add_format({"bold": True})
sheet = wb.add_worksheet()
# Write data
sheet.write_row(0, 0, headers, bold)
for index, row in enumerate(rows):
sheet.write_row(index+1, 0, row)
sheet.autofilter(0, 0, len(rows), len(headers)-2)
wb.close()
logger.info("Finished\n")
| 2.328125
| 2
|
multiscale/tests/test_bulk_img_processing.py
|
uw-loci/multiscale_imaging
| 1
|
12775050
|
# -*- coding: utf-8 -*-
import multiscale.bulk_img_processing as blk
from pathlib import Path
import unittest
class get_core_file_name_TestSuite(unittest.TestCase):
"""Basic test cases."""
def test_multiple_underscores(self):
testStr = 'This_name_has_multiple_underscores.extension'
self.assertEqual(blk.get_core_file_name(testStr),'This')
def test_no_underscores(self):
testStr = 'TestStr.extension'
self.assertEqual(blk.get_core_file_name(testStr),'TestStr')
def test_single_underscore(self):
testStr = 'Test_str.extension'
self.assertEqual(blk.get_core_file_name(testStr),'Test')
#class dataframe_generator_excel_TestSuite(unittest.TestCast):
# return
#
class file_name_parts_TestSuite(unittest.TestCase):
def test_multiple_underscores(self):
testStr = 'This_name_has_multiple_underscores.extension'
parts = ['This', 'name', 'has', 'multiple', 'underscores']
self.assertEqual(blk.file_name_parts(testStr),parts)
def test_no_underscores(self):
testStr = 'TestStr.extension'
self.assertEqual(blk.file_name_parts(testStr),['TestStr'])
def test_single_underscore(self):
testStr = 'Test_str.extension'
parts = ['Test', 'str']
self.assertEqual(blk.file_name_parts(testStr),parts)
class create_new_image_path_TestSuite(unittest.TestCase):
def test_suffix_proivded(self):
path = Path('C:/Test/Folder/Test.tif')
output_dir = Path('C:/Output')
suffix = 'Suffix'
expected = Path('C:/Output/Test_Suffix.tif')
new_path = blk.create_new_image_path(path, output_dir,
output_suffix = suffix)
self.assertEqual(new_path, expected)
def test_empty_suffix(self):
path = Path('C:/Test/Folder/Test.tif')
output_dir = Path('C:/Output')
expected = Path('C:/Output/Test.tif')
new_path = blk.create_new_image_path(path, output_dir)
#Modify to either Accept None Suffix, or to throw error for no bad suffix
self.assertEqual(new_path, expected)
def test_new_extension(self):
path = Path('C:/Test/Folder/Test.tif')
output_dir = Path('C:/Output')
extension = '.test'
expected = Path('C:/Output/Test.test')
new_path = blk.create_new_image_path(path, output_dir,
extension = extension)
self.assertEqual(new_path, expected)
if __name__ == '__main__':
unittest.main(verbosity=2)
| 2.46875
| 2
|