text
stringlengths 8
6.05M
|
|---|
import session_log
import introduction
import image_processing
import keyboard
import flop
import current_stack
import error_log
import pot_odds
import db_query
def check_is_turn(screen_area, deck, stack_collection, db):
element_area = introduction.save_element(screen_area, 'turn_area', db)
if image_processing.search_element(element_area, ['turn'], 'green_board/', db) is False:
if len(session_log.get_actual_hand(screen_area, db)) == 10:
turn = image_processing.search_cards(element_area, deck, 2, db)
session_log.update_hand_after_turn(str(screen_area), turn, db)
last_row = session_log.get_last_row_from_log_session(screen_area, db)
hand = last_row[0][0]
stack = current_stack.get_actual_game_data(screen_area, stack_collection, db)
if make_turn_decision(screen_area, hand, stack, stack_collection, db):
return True
return False
def make_turn_decision(screen_area, hand, stack, stack_collection, db):
opponent_reaction = image_processing.search_last_opponent_action(screen_area, db)
if not isinstance(opponent_reaction, str):
opponent_reaction = opponent_reaction['alias']
hand_value = flop.get_hand_value(hand, screen_area, db)
combination_value = db_query.get_combination_value('turn', hand_value, db)
if turn_action(screen_area, hand_value, combination_value, stack, opponent_reaction, hand, stack_collection, db):
return True
def action_after_cbet(x_coordinate, y_coordinate, width, height, image_path, screen_area, deck, stack_collection, db):
try:
if introduction.check_is_fold(screen_area, x_coordinate, y_coordinate, width, height, image_path, db): return
if check_is_turn(screen_area, deck, stack_collection, db): return
current_stack.get_actual_game_data(screen_area, stack_collection, db)
if check_is_raise_cbet(screen_area, stack_collection, db): return
except Exception as e:
error_log.error_log('action_after_cbet', str(e))
print(e)
def action_after_turn_cbet(x_coordinate, y_coordinate, width, height, image_path, screen_area, deck, stack_collection,
db):
if introduction.check_is_fold(screen_area, x_coordinate, y_coordinate, width, height, image_path, db): return
if check_is_river(screen_area, deck, stack_collection, db): return
if check_is_raise_cbet(screen_area, stack_collection, db): return
def check_is_river(screen_area, deck, stack_collection, db):
element_area = introduction.save_element(screen_area, 'river_area', db)
if image_processing.search_element(element_area, ['river'], 'green_board/', db) is False:
if len(session_log.get_actual_hand(screen_area, db)) == 12:
river = image_processing.search_cards(element_area, deck, 2, db)
session_log.update_hand_after_turn(str(screen_area), river, db)
last_row = session_log.get_last_row_from_log_session(screen_area, db)
hand = last_row[0][0]
action = last_row[0][3]
stack = current_stack.get_actual_game_data(screen_area, stack_collection, db)
if make_river_decision(screen_area, hand, stack, action, stack_collection, db):
return True
return False
def make_river_decision(screen_area, hand, stack, action, stack_collection, db):
opponent_reaction = image_processing.search_last_opponent_action(screen_area, db)
hand_value = flop.get_hand_value(hand, screen_area, db)
combination_value = db_query.get_combination_value('river', hand_value, db)
if not isinstance(opponent_reaction, str):
opponent_reaction = opponent_reaction['alias']
if river_action(screen_area, hand_value, combination_value, stack, action, opponent_reaction, hand, stack_collection, db):
return True
def check_is_raise_cbet(screen_area, stack_collection, db):
hand_value = session_log.get_hand_value(screen_area, db)
combination_value = db_query.get_combination_value('flop', hand_value, db)
opponent_reaction = image_processing.search_last_opponent_action(screen_area, db)
stack = session_log.get_last_row_from_log_session(screen_area, db)[0]['current_stack']
if not isinstance(opponent_reaction, str):
opponent_reaction = opponent_reaction['alias']
if raise_cbet_action(screen_area, hand_value, combination_value, opponent_reaction, stack, stack_collection, db):
return True
def action_after_cc_postflop(screen_area, deck, x_coordinate, y_coordinate, width, height, image_path, stack_collection,
db):
try:
if check_is_river(screen_area, deck, stack_collection, db): return
if check_is_turn(screen_area, deck, stack_collection, db): return
if introduction.check_is_fold(screen_area, x_coordinate, y_coordinate, width, height, image_path, db): return
if get_opponent_flop_reaction(screen_area, stack_collection, db): return
except Exception as e:
error_log.error_log('action_after_cc_postflop', str(e))
print(e)
def get_opponent_flop_reaction(screen_area, stack_collection, db):
hand_value = session_log.get_hand_value(screen_area, db)
combination_value = db_query.get_combination_value('flop', hand_value, db)
stack = session_log.get_last_row_from_log_session(screen_area, db)[0][1]
if hand_value is None:
return False
opponent_reaction = image_processing.search_last_opponent_action(screen_area, db)
if not isinstance(opponent_reaction, str):
opponent_reaction = opponent_reaction['alias']
if opponent_reaction in ('1', '2', '3') and combination_value in ('draw', 'other', 'composite') \
and int(stack) <= 13 and current_stack.search_current_stack(screen_area, stack_collection, db) <= 13:
keyboard.press('q')
session_log.update_action_log_session('push', str(screen_area), db)
elif combination_value == 'draw' and pot_odds.check_is_call_valid(screen_area, hand_value, 'turn', stack_collection,
db):
keyboard.press('c')
session_log.update_action_log_session('cc_postflop', str(screen_area), db)
elif opponent_reaction in ('1', '2') and hand_value not in (
'trash', 'gutshot', 'bottom_pair', 'over_cards') and combination_value != 'draw':
keyboard.press('c')
session_log.update_action_log_session('cc_postflop', str(screen_area), db)
elif opponent_reaction == 'check':
keyboard.press('h')
session_log.update_action_log_session('cc_postflop', str(screen_area), db)
else:
keyboard.press('f')
session_log.update_action_log_session('fold', str(screen_area), db)
return True
def check_is_raise_river_value_bet(screen_area, db):
opponent_reaction = image_processing.search_last_opponent_action(screen_area, db)
if opponent_reaction in ('1', '2'):
keyboard.press('c')
session_log.update_action_log_session('end', str(screen_area), db)
else:
keyboard.press('f')
session_log.update_action_log_session('fold', str(screen_area), db)
return True
def action_after_value_bet(screen_area, x_coordinate, y_coordinate, width, height, image_path, db):
if introduction.check_is_fold(screen_area, x_coordinate, y_coordinate, width, height, image_path, db): return
if check_is_raise_river_value_bet(screen_area, db): return
def check_is_board_danger(hand):
if check_is_four_flush_board(hand): return True
if check_is_four_straight_board(hand): return True
return False
def check_is_four_flush_board(hand):
if len(hand) == 12:
flush_hand = hand[5] + hand[7] + hand[9] + hand[11]
if len(set(flush_hand)) == 1:
return True
elif len(hand) == 14:
flush_hand = hand[5] + hand[7] + hand[9] + hand[11] + hand[13]
counter = {}
for item in flush_hand:
counter[item] = counter.get(item, 0) + 1
doubles = {element: count for element, count in counter.items() if count > 3}
if len(doubles) > 0:
return True
return False
def check_is_four_straight_board(hand):
if len(hand) == 12:
straight_hand = hand[4] + hand[6] + hand[8] + hand[10]
straight_hand = flop.straight_collection(straight_hand)
if list(map(int, straight_hand)) == list(range(min(straight_hand), max(straight_hand) + 1)) \
and len(straight_hand) == 4:
return True
elif len(hand) == 14:
straight_hand = hand[4] + hand[6] + hand[8] + hand[10] + hand[12]
straight_hand = flop.straight_collection(straight_hand)
first_straight_hand = straight_hand[:-1]
second_straight_hand = straight_hand[1:]
if first_straight_hand == list(range(min(first_straight_hand), max(first_straight_hand) + 1)) \
and len(first_straight_hand) == 4:
return True
elif second_straight_hand == list(range(min(second_straight_hand), max(second_straight_hand) + 1)) \
and len(second_straight_hand) == 4:
return True
return False
def turn_action(screen_area, hand_value, combination_value, stack, opponent_reaction, hand, stack_collection, db):
if hand_value in ('top_pair', 'two_pairs', 'set', 'weak_top_pair') and check_is_board_danger(hand):
if image_processing.check_is_cbet_available(screen_area, db):
keyboard.press('h')
session_log.update_action_log_session('cc_postflop', str(screen_area), db)
elif opponent_reaction in ('1', '2', '3'):
keyboard.press('c')
session_log.update_action_log_session('cc_postflop', str(screen_area), db)
elif stack <= 10 and current_stack.search_bank_stack(screen_area, db) > 10:
keyboard.press('q')
session_log.update_action_log_session('push', str(screen_area), db)
else:
keyboard.press('f')
session_log.update_action_log_session('fold', str(screen_area), db)
elif hand_value == 'straight' and check_is_four_flush_board(hand):
if image_processing.check_is_cbet_available(screen_area, db):
keyboard.press('h')
session_log.update_action_log_session('cc_postflop', str(screen_area), db)
elif opponent_reaction in ('1', '2'):
keyboard.press('c')
session_log.update_action_log_session('cc_postflop', str(screen_area), db)
else:
keyboard.press('f')
session_log.update_action_log_session('fold', str(screen_area), db)
elif image_processing.check_is_cbet_available(screen_area, db):
if combination_value == 'premium':
keyboard.press('v')
session_log.update_action_log_session('turn_cbet', str(screen_area), db)
elif int(stack) <= 10 and combination_value in ('draw', 'other', 'composite') \
and current_stack.search_current_stack(screen_area, stack_collection, db) <= 10:
keyboard.press('q')
session_log.update_action_log_session('push', str(screen_area), db)
elif hand_value == 'weak_top_pair':
keyboard.press('q')
session_log.update_action_log_session('push', str(screen_area), db)
else:
keyboard.press('h')
session_log.update_action_log_session('cc_postflop', str(screen_area), db)
else:
if combination_value == 'premium':
keyboard.press('v')
session_log.update_action_log_session('turn_cbet', str(screen_area), db)
elif int(stack) <= 10 and (combination_value in ('draw', 'other', 'composite') or hand_value == 'weak_flush')\
and current_stack.search_current_stack(screen_area, stack_collection, db) <= 10:
keyboard.press('q')
session_log.update_action_log_session('push', str(screen_area), db)
elif combination_value == 'draw' and pot_odds.check_is_call_valid(screen_area, hand_value, 'turn',
stack_collection, db):
keyboard.press('c')
session_log.update_action_log_session('cc_postflop', str(screen_area), db)
elif hand_value == 'weak_top_pair':
keyboard.press('q')
session_log.update_action_log_session('push', str(screen_area), db)
elif opponent_reaction in ('1', '2', '3') and (
combination_value in ('other', 'composite') or hand_value == 'weak_flush'):
keyboard.press('c')
session_log.update_action_log_session('cc_postflop', str(screen_area), db)
else:
keyboard.press('f')
session_log.update_action_log_session('fold', str(screen_area), db)
return True
def river_action(screen_area, hand_value, combination_value, stack, action, opponent_reaction, hand, stack_collection,
db):
if action in ('turn_cbet', 'river_cbet'):
is_call_river_agression = pot_odds.check_is_call_after_opponent_river_agression(screen_area, hand_value, stack_collection, action, db)
if combination_value != 'premium' and int(stack) >= 13:
keyboard.press('h')
session_log.update_action_log_session('cc_postflop', str(screen_area), db)
elif combination_value != 'premium' and int(stack) < 13:
keyboard.press('v')
session_log.update_action_log_session('river_cbet', str(screen_area), db)
elif action == 'river_cbet' and is_call_river_agression is True:
keyboard.press('c')
session_log.update_action_log_session('cc_postflop', str(screen_area), db)
elif action == 'river_cbet' and is_call_river_agression is False:
keyboard.press('f')
session_log.update_action_log_session('fold', str(screen_area), db)
elif check_is_board_danger(hand) is False or hand_value == 'flush':
keyboard.press('v')
session_log.update_action_log_session('river_cbet', str(screen_area), db)
elif hand_value == 'straight' and check_is_four_flush_board(hand) is False:
keyboard.press('v')
session_log.update_action_log_session('river_cbet', str(screen_area), db)
elif image_processing.check_is_cbet_available(screen_area, db):
keyboard.press('h')
session_log.update_action_log_session('cc_postflop', str(screen_area), db)
else:
if opponent_reaction in ('1', '2', '3'):
keyboard.press('c')
session_log.update_action_log_session('cc_postflop', str(screen_area), db)
else:
keyboard.press('f')
session_log.update_action_log_session('fold', str(screen_area), db)
return True
if check_is_board_danger(hand) and hand_value in \
('top_pair', 'two_pairs', 'set', 'weak_top_pair', 'weak_flush'):
if image_processing.check_is_cbet_available(screen_area, db):
keyboard.press('h')
session_log.update_action_log_session('cc_postflop', str(screen_area), db)
elif opponent_reaction in ('1', '2', '3'):
keyboard.press('c')
session_log.update_action_log_session('cc_postflop', str(screen_area), db)
else:
keyboard.press('f')
session_log.update_action_log_session('fold', str(screen_area), db)
elif hand_value == 'straight' and check_is_four_flush_board(hand):
if image_processing.check_is_cbet_available(screen_area, db):
keyboard.press('h')
session_log.update_action_log_session('cc_postflop', str(screen_area), db)
elif opponent_reaction in ('1', '2'):
keyboard.press('c')
session_log.update_action_log_session('cc_postflop', str(screen_area), db)
else:
keyboard.press('f')
session_log.update_action_log_session('fold', str(screen_area), db)
elif image_processing.check_is_cbet_available(screen_area, db):
if combination_value == 'premium' or hand_value == 'weak_top_pair':
keyboard.press('v')
session_log.update_action_log_session('river_cbet', str(screen_area), db)
elif combination_value == 'value' and check_is_board_danger(hand) is False:
if current_stack.search_bank_stack(screen_area, db) <= 3:
keyboard.press('j')
else:
keyboard.press('k')
session_log.update_action_log_session('value_bet', str(screen_area), db)
else:
keyboard.press('h')
session_log.update_action_log_session('cc_postflop', str(screen_area), db)
else:
if combination_value == 'premium' or hand_value == 'weak_top_pair':
keyboard.press('v')
session_log.update_action_log_session('river_cbet', str(screen_area), db)
elif (combination_value == 'value' or hand_value == 'weak_flush') and check_is_board_danger(
hand) is False and opponent_reaction in ('1', '2', '3'):
keyboard.press('c')
session_log.update_action_log_session('cc_postflop', str(screen_area), db)
elif opponent_reaction in ('1', '2',) and (hand_value == 'middle_pair' or hand_value == 'low_two_pairs' or hand_value.find('middle_pair') != -1):
keyboard.press('c')
session_log.update_action_log_session('cc_postflop', str(screen_area), db)
else:
keyboard.press('f')
session_log.update_action_log_session('fold', str(screen_area), db)
return True
def raise_cbet_action(screen_area, hand_value, combination_value, opponent_reaction, stack, stack_collection, db):
if combination_value == 'premium' or hand_value == 'weak_top_pair':
keyboard.press('q')
session_log.update_action_log_session('push', str(screen_area), db)
elif int(stack) <= 10 and hand_value != 'trash' and current_stack.search_current_stack(screen_area, stack_collection, db) <= 10:
keyboard.press('q')
session_log.update_action_log_session('push', str(screen_area), db)
elif combination_value == 'draw' and pot_odds.check_is_call_valid(
screen_area, hand_value, 'turn', stack_collection, db):
keyboard.press('c')
session_log.update_action_log_session('cc_postflop', str(screen_area), db)
elif combination_value in ('other', 'composite') and opponent_reaction in ('1', '2', '3'):
keyboard.press('c')
session_log.update_action_log_session('cc_postflop', str(screen_area), db)
else:
keyboard.press('f')
session_log.update_action_log_session('fold', str(screen_area), db)
return True
|
"""
World
Corporation
DERs
User
Role
"""
import names
import random
import pprint
random.seed(0) # Always generate the same dataset
n_utilities = 5
n_providers = 6
n_sp = 5
admins = 4
sec_auditors = 2
n_der = n_utilities * n_providers * 50
total_accounts = n_utilities + n_providers + n_sp + admins + sec_auditors + n_der + n_der
der_per_util = int(n_der/n_utilities)
der_per_sp = int(n_der/n_providers)
der_list = []
for d in range(1, n_der+1):
der_list.append('DER %d' % d)
# create utilities
rbac = {}
for corporation in range(n_utilities):
utilities_people = random.randint(20,60)
total_accounts = total_accounts + utilities_people
rbac['Utility %d' % (corporation + 1)] = {}
util_slice = [corporation * der_per_util, (corporation + 1) * der_per_util - 1]
# DER under the utility's control
rbac['Utility %d' % (corporation + 1)]['DER'] = der_list[util_slice[0]: util_slice[1]]
for people in range(utilities_people):
for role in ['Utility DERMS Team', 'Utility Software', 'Utility Billing', 'Utility Auditing']:
rbac['Utility %d' % (corporation + 1)][names.get_full_name()] = role
# create service providers
for corporation in range(n_sp):
sp_people = random.randint(10,30)
total_accounts = total_accounts + sp_people
rbac['Service Provider %d' % (corporation + 1)] = {}
sp_slice = [corporation * der_per_sp, (corporation + 1) * der_per_sp - 1]
# DER under the service provider's control
random.shuffle(der_list) # randomize the list so different SPs and utilities want access to the DER
rbac['Service Provider %d' % (corporation + 1)]['DER'] = der_list[sp_slice[0]: sp_slice[1]]
for people in range(sp_people):
for role in ['DER Installers', 'Aggregation/VPP Team', 'Firmware/Patching', 'Billing', 'Utility Auditing']:
rbac['Service Provider %d' % (corporation + 1)][names.get_full_name()] = role
# DER owners
rbac['DER Owner'] = {}
rbac['DER Device'] = {}
for people in range(n_der):
for role in ['DER Owner']:
name = names.get_full_name()
rbac['DER Owner'][name] = role
# Fixed: Create a dictionary with all the DER Owners and their DER Devices that they own
rbac['DER Device'][name] = 'DER %d' % (people + 1)
# RBAC Admins
rbac['Security Administrator'] = {}
for people in range(admins):
for role in ['Security Administrator']:
rbac['Security Administrator'][names.get_full_name()] = role
# RBAC Auditor
rbac['Security Auditor'] = {}
for people in range(sec_auditors):
for role in ['Security Auditor']:
rbac['Security Auditor'][names.get_full_name()] = role
#pprint.pprint(rbac)
|
import requests
import sys
import fundamentus
def download_all(stocks, session_id):
s = requests.Session()
for stock in stocks:
referer_url = "{}balancos.php?papel={}&tipo=1".format(
fundamentus.get_base_url(),stock)
s.get(referer_url)
s.headers.update({'Referer': referer_url})
s.headers.update({'HOST': fundamentus.get_base_url()})
s.headers.update({'User-Agent': "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36"})
s.headers.update({'Cookie': "__utmt=1; PHPSESSID={}; __utma=138951332.685737504.1494551880.1494996032.1495510253.5; __utmb=138951332.4.10.1495510253; __utmc=138951332; __utmz=138951332.1494551880.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none)".format(session_id)})
download_link = "{}planilhas.php?SID={}".format(
fundamentus.get_base_url(), session_id)
stock_file = s.get(download_link)
with open("./balancos/{}.zip".format(stock), 'wb') as f:
print("Downloading {} ...".format(stock))
for chunk in stock_file.iter_content(chunk_size=128):
f.write(chunk)
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage example: python fundamentus_downloader.py e81tphr617q54")
else:
session_id = sys.argv[1]
stocks = fundamentus.get_stocks()
download_all(stocks, session_id)
|
# coding=utf-8
from rest_framework import serializers
class PublicServantSerializer(serializers.Serializer):
first_name = serializers.CharField(label='Prenume', max_length=255)
last_name = serializers.CharField(label='Nume', max_length=255)
position = serializers.CharField(label='Funcție', max_length=255)
position_location = serializers.CharField(label='Locul funcției',
max_length=255)
# I. Bunuri imobile
class LandSerializer(serializers.Serializer):
CATEGORY_CHOICES = (
(1, 'agricol'),
(2, 'forestier'),
(3, 'intravilan'),
(4, 'luciu apă'),
(5, 'alte categorii de terenuri extravilane, dacă se află în '
'circuitul civil'),
)
location = serializers.CharField(label='Adresa sau zona', max_length=255)
category = serializers.ChoiceField(label='Categoria',
choices=CATEGORY_CHOICES)
year_acquired = serializers.DateField(label='Anul dobândirii',
input_formats=['%Y'])
area = serializers.IntegerField(label='Suprafața (mp)')
share = serializers.IntegerField(label='Cota-parte (%)', min_value=0,
max_value=100)
method_acquired = serializers.CharField(label='Modul de dobândire',
max_length=255)
owner = serializers.CharField(label='Titularul', max_length=255)
class BuildingSerializer(serializers.Serializer):
CATEGORY_CHOICES = (
(1, 'apartament'),
(2, 'casă de locuit'),
(3, 'casă de vacanță'),
(4, 'spații comerciale/de producție'),
)
location = serializers.CharField(label='Adresa sau zona', max_length=255)
category = serializers.ChoiceField(label='Categoria',
choices=CATEGORY_CHOICES)
year_acquired = serializers.DateField(label='Anul dobândirii',
input_formats=['%Y'])
area = serializers.IntegerField(label='Suprafața (mp')
share = serializers.IntegerField(label='Cota-parte (%)', min_value=0,
max_value=100)
method_acquired = serializers.CharField(label='Modul de dobândire',
max_length=255)
owner = serializers.CharField(label='Titularul', max_length=255)
# II. Bunuri mobile
class VehicleSerializer(serializers.Serializer):
kind = serializers.CharField(label='Natura', max_length=255)
brand = serializers.CharField(label='Marca', max_length=255)
count = serializers.IntegerField(label='Nr. de bucăți', min_value=0)
year_made = serializers.DateField(label='Anul fabricației',
input_formats=['%Y'])
method_acquired = serializers.CharField(label='Modul de dobândire',
max_length=255)
class PreciousItemSerializer(serializers.Serializer):
description = serializers.CharField(label='Descriere sumară',
max_length=255)
year_acquired = serializers.DateField(label='Anul dobândirii',
input_formats=['%Y'])
estimated_value = serializers.IntegerField(label='Valoarea estimată',
min_value=0)
# III. Bunuri înstrăinate
class EstrangedGoodSerializer(serializers.Serializer):
kind = serializers.CharField(label='Natura bunului înstrăinat',
max_length=255)
date_estranged = serializers.DateField(label='Data înstrăinării')
estranged_to = serializers.CharField(label='Persoana către care s-a '
'înstrăinat',
max_length=255)
procedure = serializers.CharField(label='Forma înstrăinării',
max_length=255)
value = serializers.IntegerField(label='Valoarea', min_value=0)
# IV. Active financiare
class BankAccountSerializer(serializers.Serializer):
KIND_CHOICES = (
(1, 'cont curent sau echivalente (inclusiv card)'),
(2, 'depozit bancar sau echivalente'),
(3, 'fonduri de investiții sau echivalente, inclusiv fonduri private '
'de pensii sau alte sisteme de acumulare')
)
institution = serializers.CharField(label='Instituția care administrează '
'și adresa acesteia',
max_length=255)
kind = serializers.ChoiceField(label='Tipul', choices=KIND_CHOICES)
currency = serializers.CharField(label='Valuta', max_length=3)
year_opened = serializers.DateField(label='Deschis în anul',
input_formats=['%Y'])
value = serializers.IntegerField(label='Sold/valoare la zi', min_value=0)
class PlacementSerializer(serializers.Serializer):
KIND_CHOICES = (
(1, 'hârtii de valoare deținute (titluri de stat, certificate, '
'obligațiuni'),
(2, 'acțiuni sau părți sociale în societăți comerciale'),
(3, 'împrumuturi acordate în nume personal')
)
target = serializers.CharField(label='Emitent titlu/societatea în care '
'persoana e acționar/beneficiar de '
'împrumut', max_length=255)
kind = serializers.ChoiceField(label='Tipul', choices=KIND_CHOICES)
# V. Datorii
class DebtSerializer(serializers.Serializer):
creditor = serializers.CharField(label='Creditor', max_length=255)
year_contracted = serializers.DateField(label='Contractat în anul',
input_formats=['%Y'])
due = serializers.DateField(label='Scadent la')
value = serializers.IntegerField(label='Valoare', min_value=0)
# VI. Cadouri & VII. Venituri
class PresentIncomeSerializer(serializers.Serializer):
EARNER_ROLE_CHOICES = (
(1, 'Titular'),
(2, 'Soț/soție'),
(3, 'Copil'),
)
INCOME_CURRENCY_CHOICES = (
(1, 'RON'),
(2, 'EUR'),
(3, 'USD'),
)
earner = serializers.CharField(label='Cine a realizat venitul')
earner_role = serializers.ChoiceField(choices=EARNER_ROLE_CHOICES)
giver = serializers.CharField(label='Sursa venitului: nume, adresa',
max_length=255)
service = serializers.CharField(label='Serviciul prestat/Obiectul '
'generator de venit',
max_length=255)
anual_income = serializers.IntegerField(label='Venitul anual încasat',
min_value=0)
income_currency = serializers.ChoiceField(label='Valuta',
choices=INCOME_CURRENCY_CHOICES)
class WealthStatementSerializer(serializers.Serializer):
date = serializers.DateField(label='Data declarației',
input_formats=['%d-%m-%Y'])
public_servant = PublicServantSerializer()
lands = LandSerializer(required=False, many=True)
buildings = BuildingSerializer(required=False, many=True)
vehicles = VehicleSerializer(required=False, many=True)
precious_items = PreciousItemSerializer(required=False, many=True)
estranged_goods = EstrangedGoodSerializer(required=False, many=True)
bank_accounts = BankAccountSerializer(required=False, many=True)
placements = PlacementSerializer(required=False, many=True)
debts = DebtSerializer(required=False, many=True)
presents = PresentIncomeSerializer(required=False, many=True)
sources_of_income = PresentIncomeSerializer(required=False, many=True)
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from resource_management.core.logger import Logger
from resource_management.core.resources.system import Execute
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions.constants import Direction
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions import setup_ranger_plugin_xml
from resource_management.libraries.functions import ranger_functions_v2
def setup_ranger_hdfs(upgrade_type=None):
import params
if params.enable_ranger_hdfs:
stack_version = None
if upgrade_type is not None:
stack_version = params.version
if params.retryAble:
Logger.info("HDFS: Setup ranger: command retry enables thus retrying if ranger admin is down !")
else:
Logger.info("HDFS: Setup ranger: command retry not enabled thus skipping if ranger admin is down !")
if params.is_hdfs_federation_enabled and params.is_namenode_host:
if params.namenode_nameservice is not None and params.fs_default_name == format("hdfs://{namenode_nameservice}"):
update_ranger_hdfs_service_name()
api_version = 'v2'
setup_ranger_plugin_xml.setup_ranger_plugin('hadoop-client', 'hdfs', params.previous_jdbc_jar,
params.downloaded_custom_connector, params.driver_curl_source,
params.driver_curl_target, params.java_home,
params.repo_name, params.hdfs_ranger_plugin_repo,
params.ranger_env, params.ranger_plugin_properties,
params.policy_user, params.policymgr_mgr_url,
params.enable_ranger_hdfs, conf_dict = params.hadoop_conf_dir,
component_user = params.hdfs_user, component_group = params.user_group, cache_service_list = ['hdfs'],
plugin_audit_properties = params.config['configurations']['ranger-hdfs-audit'], plugin_audit_attributes = params.config['configurationAttributes']['ranger-hdfs-audit'],
plugin_security_properties = params.config['configurations']['ranger-hdfs-security'], plugin_security_attributes = params.config['configurationAttributes']['ranger-hdfs-security'],
plugin_policymgr_ssl_properties = params.config['configurations']['ranger-hdfs-policymgr-ssl'], plugin_policymgr_ssl_attributes = params.config['configurationAttributes']['ranger-hdfs-policymgr-ssl'],
component_list = ['hadoop-client'], audit_db_is_enabled = params.xa_audit_db_is_enabled,
credential_file = params.credential_file, xa_audit_db_password = params.xa_audit_db_password,
ssl_truststore_password = params.ssl_truststore_password, ssl_keystore_password = params.ssl_keystore_password,
api_version = api_version ,stack_version_override = stack_version, skip_if_rangeradmin_down = not params.retryAble,
is_security_enabled = params.security_enabled,
is_stack_supports_ranger_kerberos = params.stack_supports_ranger_kerberos,
component_user_principal = params.nn_principal_name if params.security_enabled else None,
component_user_keytab = params.nn_keytab if params.security_enabled else None)
else:
Logger.info('Ranger Hdfs plugin is not enabled')
def create_ranger_audit_hdfs_directories():
import params
if params.enable_ranger_hdfs and params.xml_configurations_supported and params.xa_audit_hdfs_is_enabled:
params.HdfsResource("/ranger/audit",
type="directory",
action="create_on_execute",
owner=params.hdfs_user,
group=params.hdfs_user,
mode=0755,
recursive_chmod=True,
)
params.HdfsResource("/ranger/audit/hdfs",
type="directory",
action="create_on_execute",
owner=params.hdfs_user,
group=params.hdfs_user,
mode=0700,
recursive_chmod=True,
)
params.HdfsResource(None, action="execute")
else:
Logger.info('Skipping creation of audit directory for Ranger Hdfs Plugin.')
def update_ranger_hdfs_service_name():
"""
This is used for renaming and updating the default service created on Ranger Admin for NN Federation enabled cluster
"""
import params
service_name_exist = setup_ranger_plugin_xml.get_policycache_service_name(service_name = "hdfs", repo_name = params.repo_name, cache_service_list = ['hdfs'])
if not service_name_exist:
get_repo_name = None
ranger_admin_v2_obj = ranger_functions_v2.RangeradminV2(url = params.policymgr_mgr_url, skip_if_rangeradmin_down = not params.retryAble)
user_create_response = ranger_admin_v2_obj.create_ambari_admin_user(ambari_admin_username = params.ranger_env['ranger_admin_username'], ambari_admin_password = params.ranger_env['ranger_admin_password'], usernamepassword = params.ranger_env['admin_username'] + ":" + params.ranger_env['admin_password'])
if user_create_response is not None and user_create_response == 200:
get_repo_name = ranger_admin_v2_obj.get_repository_by_name_urllib2(name = params.repo_name_default, component = "hdfs", status = "true", usernamepassword = params.ranger_env['ranger_admin_username'] + ":" + params.ranger_env['ranger_admin_password'])
if get_repo_name is not None and get_repo_name['name'] == params.repo_name_default:
update_repo_name = ranger_admin_v2_obj.update_repository_urllib2(component = "hdfs", repo_name = params.repo_name_default, repo_properties = params.hdfs_ranger_plugin_repo,
admin_user = params.ranger_env['ranger_admin_username'], admin_password = params.ranger_env['ranger_admin_password'], force_rename = True)
|
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.utils.translation import ugettext_lazy as _
from .managers import CustomUserManager
class CustomUser(AbstractUser):
username = None
email = models.EmailField(_('email address'), unique=True)
klass = models.PositiveSmallIntegerField(default=0, verbose_name = 'Номер Класса')
imya = models.TextField(max_length=50, default='-', verbose_name = 'Имя')
familiya = models.TextField(max_length=50, default='-', verbose_name = 'Фамилия')
otchestvo = models.TextField(max_length=50, default='-', verbose_name = 'Отчество')
telefon = models.TextField(max_length=50, default='-', verbose_name = 'Телефон')
grupa = models.TextField(max_length=50, default='-', verbose_name = 'Группа')
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
objects = CustomUserManager()
def __str__(self):
return self.email
|
import json
import random
import re
from datetime import date
from operator import attrgetter
from typing import List, Optional
from MainSettings import MainSettings
from medalcalc.models.Hero import Hero
from medalcalc.models.HeroHistory import HeroHistory
from common.utils.DateUtils import DateUtils
from medalcalc.models.MedalType import MedalType
from medalcalc.utils.DungeonUtils import DungeonUtils
from medalcalc.utils.HeroUtils import HeroUtils, Element
class MedalCalculator:
TIMEOUT_DAYS: int = 5000
heroUtils: HeroUtils
dungeonUtils = DungeonUtils
heroes: dict
elementX2: Element
dailyTickets: int
rebuys: int
showHistory: bool
heroFilter: List[str] = []
calcElements: List[str] = []
result: List[Hero] = []
def __init__(self):
HeroUtils()
self.dungeonUtils = DungeonUtils()
self.heroUtils = HeroUtils()
# settings
self.showHistory = MainSettings.config.getboolean("calculator", "showHistory")
self.showHistory = MainSettings.config.getboolean("calculator", "showHistory")
for value in MainSettings.config.get("calculator", "heroFilter").split(","):
if value != '':
self.heroFilter.append(value.strip())
farmElements = re.split(' +', MainSettings.config.get("calculator", "elements"))
for element in farmElements:
self.calcElements.append(element)
def calculateMedals(self):
# dates
DateUtils.init(date.today())
# dungeons
self.dailyTickets = self.dungeonUtils.ticketsDaily
# x2
extraTicketsConfig = self.dungeonUtils.ticketsExtraX2
extraTickets = extraTicketsConfig
self.rebuys = self.dungeonUtils.rebuysX2
# initially skip the current day
DateUtils.nextDay()
while True:
isX2 = self.dungeonUtils.isDoubleDungeon(DateUtils.currentDate)
element = self.dungeonUtils.elementX2 if isX2 else self.dungeonUtils.getDungeon()
if element:
extraTickets = extraTickets if isX2 else 0
self.heroes = self.heroUtils.heroes[element]
if len(self.heroes) > 0:
self.simulateTickets(
isX2=isX2,
extraTickets=extraTickets
)
if not isX2:
extraTickets = extraTicketsConfig
##################
# cc
self.heroUtils.crusherChest()
self.heroUtils.checkMax(self.result, self.heroes, self.dungeonUtils)
# guild shop
self.heroUtils.guildShop()
self.heroUtils.checkMax(self.result, self.heroes, self.dungeonUtils)
self.heroUtils.crusade()
self.heroUtils.checkMax(self.result, self.heroes, self.dungeonUtils)
# request
self.heroUtils.requestHero(self.heroes, DateUtils.currentDate)
self.heroUtils.checkMax(self.result, self.heroes, self.dungeonUtils)
# mm
self.heroUtils.incrementMagicMedal()
self.heroUtils.useMagicMedals(self.heroes)
self.heroUtils.checkMax(self.result, self.heroes, self.dungeonUtils)
# check finished
if self.isFinished():
return
###########
# next day
DateUtils.nextDay()
# timeout
if DateUtils.days > self.TIMEOUT_DAYS:
raise Exception(f"Timeout after {self.TIMEOUT_DAYS} days: {DateUtils.days}")
###############
#########################
def simulateTickets(
self,
isX2: bool = False,
extraTickets: int = 0):
totalTickets = self.dailyTickets + extraTickets
medalType = MedalType.DUNGEON
if isX2:
totalTickets += (self.rebuys * 10)
medalType = MedalType.DUNGEON_X2
medalIncrement = 2 if isX2 else 1
for ticket in range(totalTickets):
if not self.heroes:
return
# draw hero
hero = self.randomHero(self.heroes)
if hero:
hero.increment(medalIncrement, type=medalType)
self.heroUtils.checkMax(self.result, self.heroes, self.dungeonUtils)
# draw common
commonHero = self.randomHero(self.heroes, common=True)
if commonHero:
commonHero.increment(medalIncrement, type=medalType)
if commonHero is not hero:
self.heroUtils.checkMax(self.result, self.heroes, self.dungeonUtils)
def randomHero(self, heroMap, common: bool = False) -> Optional[Hero]:
heroes = list(heroMap)
if common:
commonFilter = filter(lambda commonHero: commonHero.isCommon(), heroMap.values())
heroes = list(map(lambda commonHero: commonHero.name, commonFilter))
if not heroes:
return None
heroName = random.choice(heroes)
return heroMap[heroName]
def isFinished(self) -> bool:
for calcElement in self.calcElements:
element = Element(calcElement)
if self.heroUtils.heroes[element]:
return False
return True
def print(self, sort: bool):
for element in self.calcElements:
self.__printElement(element=Element(element), sort=sort, showHistory=self.showHistory)
def __printElement(self, element: Element, sort=False, showHistory=False):
HeroHistory.printHeader(element.value)
if sort:
histories: List[HeroHistory] = []
for hero in self.result:
if hero.element == element and (not self.heroFilter or hero.name in self.heroFilter):
for hist in hero.history:
if showHistory or hist.isMax():
histories.append(hist)
histories = sorted(histories, key=attrgetter('days'))
for hist in histories:
hist.print()
if not sort:
for hero in self.result:
hero.print(history=showHistory)
HeroHistory.printFooter()
def printSettings(self):
print(f"Settings")
HeroHistory.printFooter()
length = 25
print(f"Request (common):".ljust(length) + str(self.heroUtils.request))
print(f"Request (sunday):".ljust(length) + str(self.heroUtils.requestSunday))
print(f"MM Hero:".ljust(length) + str(self.heroUtils.magicMedalHero))
print(f"Element x2:".ljust(length) + self.dungeonUtils.elementX2.value)
print(f"Dungeon daily tickets:".ljust(length) + str(self.dungeonUtils.ticketsDaily))
print(f"Dungeon x2 extra tickets:".ljust(length) + str(self.dungeonUtils.ticketsExtraX2))
print(f"Dungeon x2 rebuys:".ljust(length) + str(self.dungeonUtils.rebuysX2))
print(f"Weekly dungeons:".ljust(length) + json.dumps(self.dungeonUtils.dailyDungeons))
|
from izigraph.importers import NgvImporter
import izigraph
def test_importer_can_import_graph(tmpdir):
importer = NgvImporter()
content = "0\n0\n"
p = tmpdir.mkdir("sub").join("graph.csv")
p.write(content)
g = importer.import_graph(str(p))
assert isinstance(g, izigraph.Graph)
def test_importer_can_import_graph_with_nodes(tmpdir):
importer = NgvImporter()
content = "2\n0\n0,0;0,0\n0,0;0,0\n"
p = tmpdir.mkdir("sub").join("graph.csv")
p.write(content)
g = importer.import_graph(str(p))
assert g.nodes_count() == 2
def test_importer_can_import_graph_nodes_positions(tmpdir):
importer = NgvImporter()
content = "2\n0\n1,2;2,3\n3,4;4,5\n"
p = tmpdir.mkdir("sub").join("graph.csv")
p.write(content)
g = importer.import_graph(str(p))
n0 = g.find_node_by_label('0')
n1 = g.find_node_by_label('1')
assert n0.position() == (1.2, 2.3)
assert n1.position() == (3.4, 4.5)
def test_importer_can_import_graph_with_links(tmpdir):
importer = NgvImporter()
content = "2\n1\n1,2;2,3\n3,4;4,5\n0;1\n"
p = tmpdir.mkdir("sub").join("graph.csv")
p.write(content)
g = importer.import_graph(str(p))
assert g.links_count() == 1
assert ('0', '1') in g.links_dict()
assert ('1', '0') in g.links_dict()
|
# Generated by Django 2.1.7 on 2019-04-30 09:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='catalog',
name='image',
field=models.ImageField(blank=True, upload_to='', verbose_name='Preview'),
),
]
|
import requests
import math
import geocoder
def get_mid_of_2pts(coords1,coords2):
x1,y1 = coords1
x2,y2 = coords2
x_mid = (x1 + x2)/2.0
y_mid = (y1 + y2)/2.0
return (x_mid,y_mid)
def address_to_coordinates(user_address):
the_location = geocoder.osm(user_address)
lat = the_location.osm.get('y',None)
lon = the_location.osm.get('x', None)
return (lat,lon)
def radius_1mile(coords):
#1km = 0.015060 coordinates
lat,lng = coords
lat_down = lat - (0.015060)*2
lat_up = lat + (0.015060)*2
lng_left = lng - (0.015060)*2
lng_right = lng + (0.015060)*2
return (min(lat_down, lat_up), max(lat_down, lat_up), min(lng_left, lng_right), max(lng_left, lng_right))
#return (lat_up, lat_down, lng_left, lng_right)
def distance_between_2pts(coords1,coords2):
x1,y1 = coords1
x2,y2 = coords2
x = (x2-x1)**2
y = (y2-y1)**2
z = x+y
return math.sqrt(z)
|
import common
import re
def help():
return {'authors': ['kqr', 'nycz'],
'years': ['2012', '2013'],
'version': '1.2',
'description': 'Interface till Google via något slags ajax json API.',
'argument': '<googlesökning>'}
def run(nick, message, cmd_prefix):
arg = common.get_plugin_argument(message, cmd_prefix, 'g')
if arg is None:
return
return "{0}: {1}".format(nick, search(arg))
def search(args):
hits = common.read_json(common.read_url("http://ajax.googleapis.com/ajax/services/search/web?v=1.0&safe=off&q=", args))['responseData']['results']
if hits:
striphtml = lambda s: re.sub(r'<.+?>', '', re.sub(r' +', '', s))
url = striphtml(hits[0]['url'])
title = striphtml(hits[0]['title'])
content = striphtml(hits[0]['content'])
result = "{1}: {2} -- {0}".format(url, title, content)
else:
result = "No hits."
return result
|
from django.contrib.staticfiles.management.commands.collectstatic import Command as BaseCommand
from js_routing.functions import build_js_file
class Command(BaseCommand):
"""
Command that collects static and generates the routes js from the templates.
"""
help = "Collect static files from apps and other locations in a single location."
def handle_noargs(self, **options):
build_js_file()
super(Command, self).handle_noargs(**options)
|
import keras
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten, BatchNormalization
from keras.layers import Conv2D, MaxPooling2D
import os
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
num_classes = 3
img_rows, img_cols = 48,48
batch_size = 128
train_data_dir = 'NHS_DataSet/train'
validation_data_dir = 'NHS_DataSet/validation'
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=30,
zoom_range=0.3,
width_shift_range=0.4,
height_shift_range=0.4,
horizontal_flip=True,
fill_mode='nearest')
validation_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
color_mode='grayscale',
target_size=(img_rows,img_cols),
batch_size=batch_size,
class_mode='categorical',
shuffle=True)
validation_generator = validation_datagen.flow_from_directory(
validation_data_dir,
color_mode='grayscale',
target_size=(img_rows,img_cols),
batch_size=batch_size,
class_mode='categorical',
shuffle=True)
model = Sequential()
model.add(Conv2D(32,(3,3),padding='same', kernel_initializer='he_normal', activation='elu', input_shape=(img_rows,img_cols,1)))
model.add(BatchNormalization())
model.add(Conv2D(32,(3,3),padding='same', kernel_initializer='he_normal', activation='elu', input_shape=(img_rows,img_cols,1)))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.1))
model.add(Conv2D(64,(3,3),padding='same', activation='elu', kernel_initializer='he_normal'))
model.add(BatchNormalization())
model.add(Conv2D(64,(3,3),padding='same', activation='elu', kernel_initializer='he_normal'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.1))
model.add(Conv2D(128,(3,3),padding='same', activation='elu', kernel_initializer='he_normal'))
model.add(BatchNormalization())
model.add(Conv2D(128,(3,3),padding='same', activation='elu', kernel_initializer='he_normal'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.1))
model.add(Conv2D(256,(3,3),padding='same', activation='elu', kernel_initializer='he_normal'))
model.add(BatchNormalization())
model.add(Conv2D(256,(3,3),padding='same', activation='elu', kernel_initializer='he_normal'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.1))
model.add(Flatten())
model.add(Dense(64,activation='elu',kernel_initializer='he_normal'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Dense(64,activation='elu',kernel_initializer='he_normal'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Dense(num_classes, kernel_initializer='he_normal', activation='softmax'))
print(model.summary())
checkpoint = ModelCheckpoint(
'NHS_vgg.h5',
monitor='val_accuracy',
mode='max',
save_best_only=True,
verbose=1)
reduce_lr = ReduceLROnPlateau(
monitor='val_loss',
factor=0.2,
patience=3,
verbose=1,
min_delta=0.0001)
callbacks = [checkpoint, reduce_lr]
model.compile(loss='categorical_crossentropy',
optimizer = Adam(lr=0.003),
metrics=['accuracy'])
nb_train_samples = 15421
nb_validation_samples = 1921
epochs = 10
history = model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples//batch_size,
epochs = epochs,
callbacks = callbacks,
validation_data = validation_generator,
validation_steps = nb_validation_samples//batch_size)
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
|
print("Started with GitHub")
|
N = input()
cruzamentos = 0
x = 1 #x é uma variável qualquer temporária
pregos = [int(i) for i in input().split()]
for i in range(len(pregos)-1,0,-1):
for j in range(len(pregos)-1-x,-1,-1):
if pregos[j] > pregos[i]:
cruzamentos +=1
x += 1
print(cruzamentos)
|
#import the get_basic_image file as an easier to read name
#no need to import serpapi since it is imported in the get_basic_image file
import get_basic_images as gbi
#An array of all queries that need to be run
#This is a google search query so search syntax can be used link below for examples
#https://support.google.com/websearch/answer/2466433?hl=en
queries = ["Villages", "Roads", "Cities", "Rivers"]
#Loop through the queries array and run the query, save response arrray to results
#Use results to extract image links and download_image_from_url to download
for query in queries:
results = gbi.search_request(query)
links = gbi.get_image_links_from_response(results)
for link in links:
#save the images to images directory and in seperated folders by query
gbi.download_image_from_url(link, "images2/" + query + "/" + query + str(links.index(link)) + ".png")
|
from django.shortcuts import render, render_to_response
from django.template.context import RequestContext
# Create your views here.
def home(request):
template = "index.html"
return render_to_response(template, context_instance=RequestContext(request,locals()))
|
from collections import deque
from typing import List
from leetcode import TreeNode, new_tree, test
def level_order_bottom(root: TreeNode) -> List[List[int]]:
if not root:
return []
queue, result = deque([root]), []
while queue:
queue_len = len(queue)
level = []
for _ in range(queue_len):
node = queue.popleft()
level.append(node.val)
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
result.append(level)
result.reverse()
return result
test(
level_order_bottom,
[
(new_tree(3, 9, 20, None, None, 15, 7), [[15, 7], [9, 20], [3]]),
],
)
|
import requests
import Utils
from bs4 import BeautifulSoup
TRULIA_REQUEST_KEY = 'trulia'
HOMEFINDER_REQUEST_KEY = 'homefinder'
REMAX_REQUEST_KEY = 'remax'
ZILLOW_REQUEST_KEY = 'zillow'
def get_next_homefinder_url(raw_html):
soup_file = BeautifulSoup(raw_html, 'lxml')
for link in soup_file.findAll("link", attrs={'rel': True}):
if link['rel'][0] == 'next':
retrieved_link = link['href']
return retrieved_link
# def get_next_trulia_url(base_link, pg):
# return base_link + str(pg) + '_p/'
def get_next_trulia_url(raw_html):
soup_file = BeautifulSoup(raw_html, 'lxml')
for link in soup_file.findAll("link", attrs={'rel': True}):
if link['rel'][0] == 'next':
retrieved_link = link['href']
if retrieved_link is not None:
retrieved_link = str(retrieved_link).replace('//', 'http://')
else:
retrieved_link = ""
return retrieved_link
def get_next_remax_url(base_link, pg):
return base_link
def get_next_link(site_key, raw):
next_link = ""
if site_key == TRULIA_REQUEST_KEY:
next_link = get_next_trulia_url(raw)
elif site_key == HOMEFINDER_REQUEST_KEY:
next_link = get_next_homefinder_url(raw)
elif site_key == REMAX_REQUEST_KEY:
print('Working on getting next Remax pages')
return next_link
def get_and_write_number_of_html_files(site_key, html_dir, num_to_get, base_link, today):
link_to_get = base_link
requested_files = []
if num_to_get > 0:
pg_num = 1
for _ in range(num_to_get):
if link_to_get != "":
response = requests.get(link_to_get)
raw_html = response.content
new_file_name = today + '-' + str(pg_num) + '.' + site_key + '.html'
Utils.write_html_to_file(html_dir, new_file_name, raw_html)
requested_files.append(new_file_name)
pg_num += 1
link_to_get = get_next_link(site_key, raw_html)
else:
print("Error getting next link")
return requested_files
def get_and_write_all_html_files(site_key, html_dir, base_link, today):
link_to_get = base_link
requested_files = []
pg_num = 1
while link_to_get != "" and link_to_get is not None:
response = requests.get(link_to_get)
raw_html = response.content
new_file_name = today + '-' + str(pg_num) + '.' + site_key + '.html'
Utils.write_html_to_file(html_dir, new_file_name, raw_html)
requested_files.append(new_file_name)
pg_num += 1
link_to_get = get_next_link(site_key, raw_html)
return requested_files
|
import os
import git
import yaml
import csv
import re
import time
import gspread
from pydrive.auth import ServiceAccountCredentials
# import pdb; pdb.set_trace()
class Env:
def __init__(self, name):
self.name = name
self.projects = []
self.sum_metrics = None
def get_project(self, name):
for prj in self.projects:
if prj.name == name:
return prj
prj = Project(name)
self.projects.append(prj)
return prj
def calc(self):
sum_min_CPU = 0
sum_max_CPU = 0
sum_min_MEM = 0
sum_max_MEM = 0
int_sum_min_CPU = 0
int_sum_max_CPU = 0
int_sum_min_MEM = 0
int_sum_max_MEM = 0
for prj in self.projects:
sum_min_CPU += prj.sum_metrics.min_CPU
sum_max_CPU += prj.sum_metrics.max_CPU
sum_min_MEM += prj.sum_metrics.min_MEM
sum_max_MEM += prj.sum_metrics.max_MEM
self.sum_metrics = Calculator(sum_min_CPU, sum_max_CPU, sum_min_MEM, sum_max_MEM)
for prj in self.projects:
int_sum_min_CPU += prj.sum_metrics_int.min_CPU
int_sum_max_CPU += prj.sum_metrics_int.max_CPU
int_sum_min_MEM += prj.sum_metrics_int.min_MEM
int_sum_max_MEM += prj.sum_metrics_int.max_MEM
self.sum_metrics_int = Calculator(int_sum_min_CPU, int_sum_max_CPU, int_sum_min_MEM, int_sum_max_MEM)
def __repr__(self):
return "Env: {0}\n Projects: {1}".format(self.name, self.projects)
class Project:
def __init__(self, name):
self.name = name
self.services = []
self.sum_metrics = None
def get_service(self, name):
for srv in self.services:
if srv.name == name:
return srv
srv = Service(name)
self.services.append(srv)
return srv
def calc(self):
sum_min_CPU = 0
sum_max_CPU = 0
sum_min_MEM = 0
sum_max_MEM = 0
int_sum_min_CPU = 0
int_sum_max_CPU = 0
int_sum_min_MEM = 0
int_sum_max_MEM = 0
for srv in self.services:
sum_min_CPU += srv.sum_metrics.min_CPU
sum_max_CPU += srv.sum_metrics.max_CPU
sum_min_MEM += srv.sum_metrics.min_MEM
sum_max_MEM += srv.sum_metrics.max_MEM
self.sum_metrics = Calculator(sum_min_CPU, sum_max_CPU, sum_min_MEM, sum_max_MEM)
for srv in self.services:
int_sum_min_CPU += srv.sum_metrics_int.min_CPU
int_sum_max_CPU += srv.sum_metrics_int.max_CPU
int_sum_min_MEM += srv.sum_metrics_int.min_MEM
int_sum_max_MEM += srv.sum_metrics_int.max_MEM
self.sum_metrics_int = Calculator(int_sum_min_CPU, int_sum_max_CPU, int_sum_min_MEM, int_sum_max_MEM)
def __repr__(self):
return "\nProject: {0}\nServices: {1}".format(self.name, self.services)
class Service:
def __init__(self, name):
self.name = name
self.pods = []
self.sum_metrics = None
self.sum_metrics_int = None
def get_pod(self, name):
for pod in self.pods:
if pod.name == name:
return pod
pod = Pod(name)
self.pods.append(pod)
return pod
def calc(self):
sum_min_CPU = 0
sum_max_CPU = 0
sum_min_MEM = 0
sum_max_MEM = 0
int_sum_min_CPU = 0
int_sum_max_CPU = 0
int_sum_min_MEM = 0
int_sum_max_MEM = 0
for pod in self.pods:
sum_min_CPU += pod.sum_metrics.min_CPU
sum_max_CPU += pod.sum_metrics.max_CPU
sum_min_MEM += pod.sum_metrics.min_MEM
sum_max_MEM += pod.sum_metrics.max_MEM
for pod in self.pods:
int_sum_min_CPU += pod.sum_metrics.min_CPU / pod.replicas
int_sum_max_CPU += pod.sum_metrics.max_CPU / pod.replicas
int_sum_min_MEM += pod.sum_metrics.min_MEM / pod.replicas
int_sum_max_MEM += pod.sum_metrics.max_MEM / pod.replicas
self.sum_metrics = Calculator(sum_min_CPU, sum_max_CPU, sum_min_MEM, sum_max_MEM)
self.sum_metrics_int = Calculator(int_sum_min_CPU, int_sum_max_CPU, int_sum_min_MEM, int_sum_max_MEM)
def __repr__(self):
return "\nService: {0}\nPods: {1}".format(self.name, self.pods)
class Pod:
def __init__(self, name):
self.name = name
self.containers = []
self.replicas = None
self.sum_metrics = None
def add_container(self, cnt):
# for cnt in self.containers:
# if cnt.name == name:
# return cnt
self.containers.append(cnt)
return cnt
def calc(self):
sum_min_CPU = 0
sum_max_CPU = 0
sum_min_MEM = 0
sum_max_MEM = 0
for cnt in self.containers:
sum_min_CPU += self.none_checker(cnt.metrics.min_CPU) * self.replicas
sum_max_CPU += self.none_checker(cnt.metrics.max_CPU) * self.replicas
sum_min_MEM += self.none_checker(cnt.metrics.min_MEM) * self.replicas
sum_max_MEM += self.none_checker(cnt.metrics.max_MEM) * self.replicas
self.sum_metrics = Calculator(sum_min_CPU, sum_max_CPU, sum_min_MEM, sum_max_MEM)
@staticmethod
def none_checker(mtr):
if mtr is None:
return 0
else:
return mtr
def __repr__(self):
return "\nPod: {0} |Replicas: {2}\nContainers: {1}".format(self.name, self.containers, self.replicas)
class Container:
def __init__(self, name, metrics):
self.name = name
self.metrics = metrics
def __repr__(self):
return "\nContainer: {0}\n \tMetrics: {1}".format(self.name, self.metrics)
class Metrics:
def __init__(self):
self.min_CPU = None
self.max_CPU = None
self.min_MEM = None
self.max_MEM = None
def __repr__(self):
return "\n\t\tmin_CPU: {0} \n\t\tmax_CPU: {1} \n\t\tmin_MEM: {2} \n\t\tmax_MEM: {3}".format(self.min_CPU, self.max_CPU,
self.min_MEM, self.max_MEM)
class Calculator:
def __init__(self, min_CPU, max_CPU, min_MEM, max_MEM):
self.min_CPU = min_CPU
self.max_CPU = max_CPU
self.min_MEM = min_MEM
self.max_MEM = max_MEM
class Summarizer:
def __init__(self, envs):
self.envs = envs
def sum(self):
for env in self.envs:
for prj in env.projects:
for srv in prj.services:
for pod in srv.pods:
pod.calc()
srv.calc()
prj.calc()
env.calc()
class Repository:
def __init__(self, git_url):
self.local_path = os.getcwd() + '/repo'
self.git_url = git_url
print("Cloning repo in {0}".format(self.local_path))
try:
git.Repo.clone_from(self.git_url, self.local_path)
except git.exc.GitCommandError as err:
print(err)
self.repo = git.Repo(self.local_path)
print("Active branch: {0}".format(self.repo.active_branch))
print("- : {0}".format(self.repo))
class Parser:
"""
:param repo, folders
:returns nested dict with metrics
"""
def __init__(self, repo, to_check_folders, envs):
self.repo_path = repo # Local system path to repository
self.required_files_path = [] # Array with all files from folders to check
self.to_check_folders = to_check_folders
self.envs = envs
# todo git webhooks after commit
def searcher(self, path):
"""
function recursivelly searches all files from the given file path
:param path: folder path
:return: all files full path, from given folder
"""
for file in os.listdir(path):
file_path = str(path + '/' + file)
if os.path.isdir(file_path):
self.searcher(file_path)
else:
self.required_files_path.append(file_path)
return self.required_files_path
def search_required_folders(self, folders):
"""
:param folders: folders required to check
:return: all files full paths
"""
for folder in folders:
folder = self.repo_path + "/" + folder
try:
for file in os.listdir(folder):
file_path = str(folder + '/' + file)
if os.path.isdir(file_path):
self.searcher(file_path)
else:
self.required_files_path.append(file_path)
except FileNotFoundError as err:
print("Directory not found: {0}".format(err))
return self.required_files_path
def parse(self):
required_files = self.search_required_folders(self.to_check_folders)
self.fill_the_structure(required_files) # Getting files from to_check_folders to parse
def fill_the_structure(self, files):
"""
:param files to parse
:return: list of dicts for each env
"""
files_parsed = 0
files_failed_to_parse = 0
for file in files:
data = self.check_file_kind(file)
if data:
print("Checking file: {0}".format(file))
# print("Data: {0}".format(data))
self.parse_yaml(file, data)
files_parsed+=1
print("Files parsed: {0}".format(files_parsed))
return self.envs
def check_file_kind(self, file):
if file.lower().endswith('.yaml'):
with open(file, 'r') as stream:
try:
for data in yaml.load_all(stream):
if data:
if data.get('kind', None) == 'Deployment':
return data
else:
return False
except AttributeError as err:
print("=> Error parsing yaml file: {0} \n Error: {1}".format(file, err))
return False
except yaml.YAMLError as err:
print("=> Error parsing yaml file: {0} \n Error: {1}".format(file, err))
return False
else:
return False
def get_env(self, name):
for env in self.envs:
if env.name == name:
return env
env = Env(name)
self.envs.append(env)
return env
def parse_yaml(self, file, data):
env = self.get_env(file.split('/')[-2])
# prj = env.get_project(file[(file.index('/deployment/') + len('/deployment/')):file.index("/" + env.name)])
prj = env.get_project(file.split('/')[file.split('/').index('deployment') + 1])
if data.get('metadata', {}).get('name', None):
srv = prj.get_service(data['metadata']['name'])
pod_replicas = data.get('spec', {}).get('replicas', None)
if data.get('spec', {}).get('template', {}).get('metadata', {}).get('labels', {}).get('app', None):
pod = srv.get_pod(data['spec']['template']['metadata']['labels']['app'])
pod.replicas = pod_replicas
else:
pod = srv.get_pod(srv.name)
pod.replicas = pod_replicas
if data.get('spec', {}).get('template', {}).get('spec', {}).get('containers', None):
for container in data['spec']['template']['spec']['containers']:
metrics = Metrics()
if container.get('resources', None):
if container['resources'].get('requests', None):
metrics.min_CPU = container['resources']['requests'].get('cpu', 0)
metrics.min_MEM = container['resources']['requests'].get('memory', 0)
if container['resources'].get('limits', None):
metrics.max_CPU = container['resources']['limits'].get('cpu', 0)
metrics.max_MEM = container['resources']['limits'].get('memory', 0)
metrics.min_CPU = self.metric_recalculation(metrics.min_CPU)
metrics.max_CPU = self.metric_recalculation(metrics.max_CPU)
metrics.max_MEM = self.metric_recalculation(metrics.max_MEM)
metrics.min_MEM = self.metric_recalculation(metrics.min_MEM)
cnt = Container(container.get('name', None), metrics)
pod.add_container(cnt)
def metric_recalculation(self, metric):
if type(metric) is int:
return metric
if type(metric) is str:
if 'm' in metric:
return int(re.findall('\d+', str(metric))[0]) / 1000
elif 'Gi' in metric:
return int(re.findall('\d+', str(metric))[0]) * 1000
elif 'Mi' in metric:
return int(re.findall('\d+', str(metric))[0])
if type(metric) is None:
return int(0)
class Printer:
def __init__(self, envs):
self.envs = envs
def print(self):
for env in envs:
print("Env: " + env.name)
for prj in env.projects:
print("- Project: " + prj.name)
for srv in prj.services:
print(" - Service: " + srv.name)
for pod in srv.pods:
print(" - Pod: " + pod.name)
print(" Replicas: {0}".format(pod.replicas))
for cnt in pod.containers:
print(" - Container: {0}".format(cnt.name))
print(" {0}".format(cnt.metrics))
class CSVPrinter(Printer):
def __init__(self, envs):
self.headers = ["Project", "Service", "Pod", "Summary", "Replicas", "Container", "min_CPU", "max_CPU",
"min_MEM", "max_MEM"]
super(CSVPrinter, self).__init__(envs)
def write_row(self, *arg):
self.csv_env.append([*arg])
def print_to_files(self):
csv_files_dir = os.getcwd() + '/envs/'
print("Printing to files to {0}".format(csv_files_dir))
if not os.path.exists(csv_files_dir):
os.makedirs(csv_files_dir)
files = []
env_names = []
for env in self.envs:
with open(csv_files_dir + env.name, "w") as file:
writer = csv.DictWriter(file, self.headers)
writer.writeheader()
for row in self.print_env(env):
writer.writerow(dict(zip(self.headers, row)))
files.append(csv_files_dir + env.name)
env_names.append(env.name)
return env_names, files
def print_env(self, env):
self.csv_env = []
for prj in env.projects:
for srv in prj.services:
for pod in srv.pods:
for cnt in pod.containers:
self.write_row(prj.name, srv.name, pod.name, None, None, cnt.name,
cnt.metrics.min_CPU, cnt.metrics.max_CPU, cnt.metrics.min_MEM, cnt.metrics.max_MEM)
self.write_row(prj.name, srv.name, pod.name, "by pod", pod.replicas, None,
pod.sum_metrics.min_CPU, pod.sum_metrics.max_CPU, pod.sum_metrics.min_MEM, pod.sum_metrics.max_MEM)
self.write_row(None, None, None, None, None, None, None, None, None, None)
self.write_row(prj.name, srv.name, None, "by service", None, None,
srv.sum_metrics.min_CPU, srv.sum_metrics.max_CPU, srv.sum_metrics.min_MEM, srv.sum_metrics.max_MEM)
self.write_row(prj.name, srv.name, None, "by service(integration)", None, None,
srv.sum_metrics_int.min_CPU, srv.sum_metrics_int.max_CPU, srv.sum_metrics_int.min_MEM, srv.sum_metrics_int.max_MEM)
self.write_row(None, None, None, None, None, None, None, None, None, None)
self.write_row(prj.name, None, None, "by project", None, None,
prj.sum_metrics.min_CPU, prj.sum_metrics.max_CPU, prj.sum_metrics.min_MEM, prj.sum_metrics.max_MEM)
self.write_row(prj.name, None, None, "by project(integration)", None, None,
prj.sum_metrics_int.min_CPU, prj.sum_metrics_int.max_CPU, prj.sum_metrics_int.min_MEM, prj.sum_metrics_int.max_MEM)
self.write_row(None, None, None, None, None, None, None, None, None, None)
self.write_row(None, None, None, None, None, None, None, None, None, None)
self.write_row(None, None, None, "Total", None, None,
env.sum_metrics.min_CPU, env.sum_metrics.max_CPU, env.sum_metrics.min_MEM, env.sum_metrics.max_MEM)
self.write_row(None, None, None, "Total(integration)", None, None,
env.sum_metrics_int.min_CPU, env.sum_metrics_int.max_CPU, env.sum_metrics_int.min_MEM, env.sum_metrics_int.max_MEM)
return self.csv_env
def print(self):
for env in envs:
return env.name, self.print_env(env)
def print_by_env(self, env):
return env.name, self.print_env(env)
class CSVImporter:
def __init__(self, credentials):
scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
self.credentials = ServiceAccountCredentials.from_json_keyfile_name(credentials, scope)
self.gc = gspread.authorize(self.credentials)
def create_sheet(self, sh_name, mail):
self.sh = self.gc.create(sh_name)
self.sh.share('mail', perm_type='user', role='writer')
return self.sh
def calculate_env_size(self, metrics):
rows = len(metrics)
cols = len(metrics[0])
print("rows: {0} \ncolumns: {1}".format(rows, cols))
return rows, cols
def import_env(self, sheet_key, env_name, metrics, mail):
print("Updating env: {0}".format(env_name))
try:
sheet = self.gc.open_by_key(sheet_key)
print("Opening sheet: {0}".format(sheet.title))
except Exception:
print("Creating new sheet: {0}".format(sheet.title))
sheet = self.create_sheet(sheet_key, mail)
rows, cols = self.calculate_env_size(metrics)
try:
sheet.del_worksheet(sheet.worksheet(env_name))
except gspread.exceptions.WorksheetNotFound:
print("Creating new worksheet: {0}".format(env_name))
wsh = sheet.add_worksheet(title=env_name, rows=rows, cols=cols)
self.update_by_cell(wsh, metrics)
def update_by_cell(self, wsh, metrics):
if self.credentials.access_token_expired:
print("Refreshing token")
self.gc.login()
for row_i, row in enumerate(metrics):
print(row_i, row)
for col_i, cell in enumerate(row):
print(row_i + 1, col_i + 1, cell)
wsh.update_cell(row_i + 1, col_i + 1, cell)
time.sleep(1)
def update_wsh(self, wsh, metrics):
cell_list = []
for row_i, row in enumerate(metrics):
for col_i, value in enumerate(row):
print(row_i + 1, col_i + 1, value)
cellToUpdate = wsh.cell(row_i + 1, col_i + 1)
cellToUpdate.value = value
cell_list.append(cellToUpdate)
# print("cell_list:")
# print(cell_list)
wsh.update_cells(cell_list)
if __name__ == '__main__':
# todo argparse
# Execution parameters
repo_url = 'git@github.com:amaslakou/saas-app-deployment.git'
to_check_folders = ["folder1", "folder2", "folder3"]
google_sheet_key = '1b34X*****************'
credentials_importer = os.getcwd() + '/service_account.json'
mail = 'YOUR_MAIL_ADDR@gmail.com'
# Downloading repository
repo = Repository(repo_url)
repository = repo.local_path
# Parse required folders, create envs structure
envs = []
parser = Parser(repo.local_path, to_check_folders, envs)
parser.parse()
if not envs:
print("No metrics found")
exit()
else:
printer = Printer(envs)
printer.print()
# Add sum values in envs structure
summarizer = Summarizer(envs)
summarizer.sum()
csv_printer = CSVPrinter(envs)
# Prints CSV to local files
csv_printer.print_to_files()
# Authenticate in Google
print(credentials_importer)
# Writing envs to Google Sheets
for env in envs:
env_name, env_metrics = csv_printer.print_by_env(env)
csv_importer = CSVImporter(credentials_importer)
csv_importer.import_env(google_sheet_key, env_name, env_metrics, mail)
|
import math
import sys
if len(sys.argv) == 1:
print('Input filename:')
f=str(sys.stdin.readline()).strip()
else: f = sys.argv[1]
with open(f, 'r') as fp:
l, mreset, string = fp.readline(), [], ''
while l:
string += str(l).strip()
l = fp.readline()
mreset = [int(x) for x in string.split(',')]
def run(m, id: int):
p, lenm = 0, len(m)
while p < lenm:
if lenm-p < 5: instr = m + ([0] * 5)
else: instr = m[:p+5]
n = m[p]
op = n % 100
m1 = n // 100 % 10
m2 = n // 1000 % 10
m3 = n // 10000 % 10
p1, p2, p3 = instr[p+1], instr[p+2], instr[p+3]
v1 = m[p1] if m1 == 0 and op != 99 else p1
v2 = m[p2] if m2 == 0 and op not in {99,3,4} else p2
v3 = m[p3] if m3 == 0 and op in {1,2,7,8} else p3
# 99: end
if op == 99:
print('END')
break
# 1: sum
elif op == 1:
r = v1 + v2
if m3 == 0: m[p3] = r
else: m[p+3] = r
p += 4
# 2: multiply
elif op == 2:
r = v1 * v2
if m3 == 0: m[p3] = r
else: m[p+3] = r
p += 4
# 3: save to address
elif op == 3:
m[p1] = id
p += 2
# 4: output
elif op == 4:
print('OUTPUT: ', v1, p1)
p += 2
# 5: jump-if-true
elif op == 5:
if v1 != 0: p = v2
else: p += 3
# 6: jump-if-false
elif op == 6:
if v1 == 0: p = v2
else: p += 3
# 7: less than
elif op == 7:
r = 1 if v1 < v2 else 0
if m3 == 0: m[p3] = r
else: m[p+3] = r
p += 4
# 8: equal
elif op == 8:
r = 1 if v1 == v2 else 0
if m3 == 0: m[p3] = r
else: m[p+3] = r
p += 4
else:
print('ERRRRR.....', p, op, m[p:p+4])
break
return m
print(' ----------- RUN -------- ')
print('Provide Sytem ID:')
id=int(sys.stdin.readline())
result = run(mreset[:], id)
|
from django.contrib import admin
# Register your models here.
from django.contrib import admin
from student.models import Student
class StudentAdmin(admin.ModelAdmin):
list_display = ('name', 'roll_no',)
admin.site.register(Student, StudentAdmin)
|
from time import time
from datetime import datetime
from PIL import Image
from simpleai.search import SearchProblem, astar
from .modules.brute_force_mazeSolver import runSolver #pylint: disable=relative-beyond-top-level
from .modules.a_star_mazeSolver import MazeSolver #pylint: disable=relative-beyond-top-level
from .modules.mazeToGif import makeGIF #pylint: disable=relative-beyond-top-level
def get_start_end(mazeMatrix:list):
start = (1,1)
end = (len(mazeMatrix)-2,len(mazeMatrix[0])-2)
if mazeMatrix[start[0]][start[1]] == 1:
s1 = (start[0],start[1]+1)
s2 = (start[0]+1,start[1])
s3 = (start[0]+1,start[1]+1)
start = s1 if mazeMatrix[s1[0]][s1[1]] != 1 else s2 \
if mazeMatrix[s2[0]][s2[1]] != 1 else s3
if mazeMatrix[end[0]][end[1]] == 1:
e1 = (end[0],end[1]-1)
e2 = (end[0]-1,end[1])
e3 = (end[0]-1,end[1]-1)
end = e1 if mazeMatrix[e1[0]][e1[1]] != 1 else e2 \
if mazeMatrix[e2[0]][e2[1]] != 1 else e3
return start, end
def get_file_name(nameSearch:str):
now = "_".join(str(datetime.now()).split(" "))
now = ".".join(now.split(":"))
file_name = None
if nameSearch == "brute":
file_name = "./static/mazeImg/"+"brute_"+now+".gif"
else:
file_name = "./static/mazeImg/"+"astar_"+now+".gif"
return file_name
def brute_force(mazeMatrix:list):
start,end = get_start_end(mazeMatrix)
fileName = get_file_name("brute")
timeStart = time()
runSolver(mazeMatrix,start,end,fileName)
timeEnd = time()
timeElap = round((timeEnd - timeStart),4)
return fileName, timeElap
def mazeMatrix_for_astar(mazeMatrix:list):
start,end = get_start_end(mazeMatrix)
mazeConverted = []
for y in range(len(mazeMatrix)):
tempRow = []
for x in range(len(mazeMatrix[y])):
if (y,x) == start:
tempRow.append("o")
elif (y,x) == end:
tempRow.append("x")
elif x == 0 or x == len(mazeMatrix[0])-1 or \
y == 0 or y == len(mazeMatrix)-1 or mazeMatrix[y][x] == 1:
tempRow.append("#")
elif mazeMatrix[y][x] == 0:
tempRow.append(" ")
mazeConverted.append(tempRow)
return mazeConverted
def a_star(mazeMatrix:list):
images = []
fileName = get_file_name("astar")
MAP = mazeMatrix_for_astar(mazeMatrix)
cost_regular = 1.0
# Create the cost dictionary
COSTS = {
"up": cost_regular,
"down": cost_regular,
"left": cost_regular,
"right": cost_regular
}
# Create maze solver object
timeStart = time()
problem = MazeSolver(MAP,COSTS)
# Run the solver
result = astar(problem, graph_search=True)
timeEnd = time()
timeElap = round((timeEnd - timeStart),4)
# Extract the path
path = [x[1] for x in result.path()]
# Print the result
print()
start = (0,0)
end = (0,0)
maze = []
for y in range(len(MAP)):
tempRow = []
for x in range(len(MAP[y])):
if (x, y) == problem.initial:
start = problem.initial[::-1]
elif (x, y) == problem.goal:
end = problem.goal[::-1]
if MAP[y][x] == '#':
tempRow.append(1)
elif MAP[y][x] == ' ' or MAP[y][x] == 'o' or MAP[y][x] == 'x':
tempRow.append(0)
maze.append(tempRow)
path = [item[::-1] for item in path]
for i in range(len(path)):
tempPath = path[:i+1]
mazeWithSteps = []
for y in range(len(maze)):
tempRow = []
for x in range(len(maze[y])):
if (y,x) in tempPath:
tempRow.append(tempPath.index((y,x)))
else:
tempRow.append(0)
mazeWithSteps.append(tempRow)
images = makeGIF(start,end,maze,mazeWithSteps,[], images)
path = path[::-1]
for i in range(10):
if i % 2 == 0:
images = makeGIF(start,end,maze,mazeWithSteps,path,images)
else:
images = makeGIF(start,end,maze,mazeWithSteps,[],images)
images[0].save(fileName,
save_all=True, append_images=images[1:],
optimize=False, duration=0.5, loop=0)
return fileName, timeElap
# def main():
# maze = generator(30,10)
# a_star(maze)
# print("Done A*")
# brute_force(maze)
# print("DONE Brute_Force")
# main()
|
import mxnet as mx
import numpy as np
from collections import namedtuple
x = mx.nd.ones((100,100))
y = mx.nd.ones((100,100))
data = mx.sym.var('data')
fc1 = mx.symbol.FullyConnected(data, name='fc1', num_hidden=128)
act1 = mx.symbol.Activation(fc1, name='relu1', act_type='relu')
fc2 = mx.symbol.FullyConnected(act1, name='fc2', num_hidden=128)
act2 = mx.symbol.Activation(fc2, name='relu2', act_type='relu')
fc3 = mx.symbol.FullyConnected(act2, name='fc3', num_hidden=100) # TODO: output shape
sym = fc3
mod = mx.mod.Module(symbol=fc3, label_names=None)
mod.bind(data_shapes=[('data', x[0].shape)]) # inputs_need_grad=True # TODO: data shape
initializer = mx.init.Xavier(rnd_type='gaussian', factor_type='in', magnitude=2)
lr = 0.5
mom = 0.5
wd = 0.0
optimizer_params = {
'learning_rate' : lr,
'momentum' : mom,
'wd' : wd,
}
mod.init_params(initializer)
mod.init_optimizer(optimizer_params=optimizer_params, force_init=True)
pre_err = []
while True:
err = 0
for i in range(x.shape[0]):
Batch = namedtuple('Batch', ['data'])
mod.forward(Batch(mx.nd.array(x[i])))
output = mod.get_outputs()[0]
grad = 2 * (output - y[i])
mod.backward([grad])
mod.update()
err += mx.nd.norm(output - y[i]).asscalar()
if len(pre_err) != 0:
if err > pre_err[-1]:
print 'changing learning rate'
lr /= 2.0
mom /= 2.0
optimizer_params = {
'learning_rate' : lr,
'momentum' : mom,
'wd' : wd
}
mod.init_optimizer(optimizer_params=optimizer_params, force_init=True)
if len(pre_err) == 10:
if all(x == err for x in pre_err[-10:]):
break
pre_err = pre_err[1:]
pre_err.append(err)
print err
|
#!/usr/bin/python
## gfal 2.0 ls tool
## @author Adrien Devresse <adevress@cern.ch> CERN
## @license GPLv3
##
import gfal2_utils
import sys
if __name__ == "__main__":
sys.exit(gfal2_utils.gfal_cat_main())
|
from __future__ import print_function
def main():
infile = open('data/adult.data', 'r')
data = []
for line in infile:
data.append(line.split(', '))
#q = Question(9, "Female")
data.pop()
#true_rows, false_rows = partition(data, q)
#gini_data = [data[0],data[1],data[2],data[3],data[4]]
#print(gini(gini_data))
#print(gini(data))
#print(info_gain(true_rows, false_rows, gini(data)))
#best_gain, best_question = find_best_split(gini_data)
#print(best_question)
#my_tree = build_tree(gini_data)
#print_tree(my_tree)
#print(classify(data[0], my_tree))
#print(print_leaf(classify(data[0], my_tree)))
my_tree = build_tree(data)
print_tree(my_tree)
infiletest = open('data/adult.test', 'r')
test_data = []
for line in infiletest:
test_data.append(line.split(', '))
#q = Question(9, "Female")
test_data.pop(0)
for row in test_data:
print("Actual: %s. Predicted: %s" % (row[-1], print_lead(classify(row, my_tree))))
header = ["age", "workclass", "fnlwgt", "education", "education-num", "marital-status", "occupation", "relationship", "race", "sex",
"capital-gain", "capital-loss", "hours-per-week", "native-country", "income"]
def unique_vals(rows, col):
return set([row[col] for row in rows])
def class_counts(rows):
counts = {}
for row in rows:
label = row[-1]
if label not in counts:
counts[label] = 0
counts[label] += 1
return counts
def is_numeric(value):
return isinstance(value, int) or isinstance(value, float)
class Question:
def __init__(self, column, value):
self.column = column
self.value = value
def match(self, example):
val = example[self.column]
if is_numeric(val):
return val >= self.value
else:
return val == self.value
def __repr__(self):
condition = "=="
if is_numeric(self.value):
condition = ">="
return "Is %s %s %s?" % (header[self.column], condition, str(self.value))
def partition(rows, question):
true_rows, false_rows = [], []
for row in rows:
if question.match(row):
true_rows.append(row)
else:
false_rows.append(row)
return true_rows, false_rows
def gini(rows):
counts = class_counts(rows)
impurity = 1
for lbl in counts:
prob_of_lbl = counts[lbl] / float(len(rows))
impurity -= prob_of_lbl**2
return impurity
def info_gain(left, right, current_uncertainty):
p = float(len(left)) / (len(left) + len(right))
return current_uncertainty - p * gini(left) - (1 - p) * gini(right)
def find_best_split(rows):
best_gain = 0
best_question = None
current_uncertainty = gini(rows)
n_features = len(rows[0]) - 1
for col in range(n_features):
values = set([row[col] for row in rows])
for val in values:
question = Question(col, val)
true_rows, false_rows = partition(rows, question)
if len(true_rows) == 0 or len(false_rows) == 0:
continue
gain = info_gain(true_rows, false_rows, current_uncertainty)
if gain >= best_gain:
best_gain, best_question = gain, question
return best_gain, best_question
class Leaf:
def __init__(self, rows):
self.predictions = class_counts(rows)
class Decision_Node:
def __init__ (self, question, true_branch, false_branch):
self.question = question
self.true_branch = true_branch
self.false_branch = false_branch
def build_tree(rows):
gain, question = find_best_split(rows)
if gain == 0:
return Leaf(rows)
true_rows, false_rows = partition(rows, question)
true_branch = build_tree(true_rows)
false_branch = build_tree(false_rows)
return Decision_Node(question, true_branch, false_branch)
def print_tree(node, spacing=" "):
if isinstance(node, Leaf):
print(spacing + "Predict", node.predictions)
return
print(spacing + str(node.question))
print(spacing + '---> True:')
print_tree(node.true_branch, spacing + " ")
print(spacing + '---> False:')
print_tree(node.false_branch, spacing + " ")
def classify(row, node):
if isinstance(node, Leaf):
return node.predictions
if node.question.match(row):
return (classify(row, node.true_branch))
else:
return classify(row, node.false_branch)
def print_leaf(counts):
total = sum(counts.values()) * 1.0
probs = {}
for lbl in counts.keys():
probs[lbl] = str(int(counts[lbl] / total * 100)) + "%"
return probs
if __name__ == "__main__":
main()
|
import socket
import select
def parseData(data):
print("Data: ", data)
BUFFER = 2048
clientList=dict({})
host = ''
port = 12345
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientList[server]={'ip':'', 'port':12345}
server.bind((host, port))
server.listen(5)
print("Server is listening for connections!")
while clientList:
readable,_,_ = select.select(clientList,[],[],0)
for sock in readable:
if sock is server:
conn, addr = server.accept()
clientList[conn] = {'ip':addr[0], 'port':addr[1]}
print("New Connection: ", clientList[conn])
else:
data = sock.recv(BUFFER)
if not data:
print("Connection closed by: ", sock.getpeername())
del clientList[sock]
else:
parseData(data.decode('ascii'))
|
"""
astroHOG Statistical tests
"""
import numpy as np
# ------------------------------------------------------------------------------------------------------------------------
def HOG_PRS(phi):
# Calculates the projected Rayleigh statistic of the distributions of angles phi.
#
# INPUTS
# phi - angles between -pi/2 and pi/2
#
# OUTPUTS
# Zx - value of the projected Rayleigh statistic
# s_Zx -
# meanPhi -
angles=phi #2.*phi
Zx=np.sum(np.cos(angles))/np.sqrt(np.size(angles)/2.)
temp=np.sum(np.cos(angles)*np.cos(angles))
s_Zx=np.sqrt((2.*temp-Zx*Zx)/np.size(angles))
Zy=np.sum(np.sin(angles))/np.sqrt(np.size(angles)/2.)
temp=np.sum(np.sin(angles)*np.sin(angles))
s_Zx=np.sqrt((2.*temp-Zy*Zy)/np.size(angles))
meanPhi=0.5*np.arctan2(Zy, Zx)
return Zx, s_Zx, meanPhi
# ------------------------------------------------------------------------------------------------------------------------------
def HOG_AM(phi):
# Calculate the alignment measure.
#
# INPUTS
# phi - angles between -pi/2 and pi/2
#
# OUTPUTS
#AM - value of the alignment measure.
angles=phi
ami=2.*np.cos(phi)-1.
am=np.mean(ami)
return am
|
from dataloader import tilegenerator
import os
import glob
from multiprocessing import Pool
from functools import partial
def tile_all_process(obj, use_tiss_mask):
if use_tiss_mask:
obj.load_ds_wsi()
obj.stain_entropy_otsu()
obj.morphology()
obj.generate_tiles()
obj.slide_thumbnail()
obj.param()
def single_file_run(file_name, output_dir, input_dir, tile_objective_value, tile_read_size_w, tile_read_size_h, nr_tiles, use_tiss_mask, tiss_level, tiss_cutoff):
print('Extracting patches from ' + os.path.basename(file_name), flush=True)
_, file_type = os.path.splitext(file_name)
if file_type == '.svs' or file_type == '.ndpi' or file_type == '.mrxs' or file_type == 'tif' or file_type == 'tiff':
tile_obj = tilegenerator.TileGenerator(input_dir=input_dir,
file_name=file_name,
output_dir=output_dir,
tile_objective_value=tile_objective_value,
tile_read_size_w=tile_read_size_w,
tile_read_size_h=tile_read_size_h,
nr_tiles=nr_tiles,
use_tiss_mask=use_tiss_mask,
tiss_level=tiss_level,
tiss_cutoff=tiss_cutoff)
tile_all_process(obj=tile_obj, use_tiss_mask=True)
def run(opts_in,
file_name_pattern, num_cpu, tile_objective_value, tile_read_size_w, tile_read_size_h, nr_tiles, use_tiss_mask, tiss_level, tiss_cutoff):
output_dir = opts_in['output_dir']
wsi_input = opts_in['wsi_input']
if not os.path.isdir(output_dir):
os.makedirs(output_dir, exist_ok=True)
if os.path.isdir(wsi_input):
files_all = sorted(glob.glob(os.path.join(wsi_input, file_name_pattern)))
with Pool(num_cpu) as p:
p.map(partial(single_file_run,
output_dir=output_dir,
input_dir=wsi_input,
tile_objective_value=tile_objective_value,
tile_read_size_w=tile_read_size_w,
tile_read_size_h=tile_read_size_h,
nr_tiles=nr_tiles,
use_tiss_mask=use_tiss_mask,
tiss_level=tiss_level,
tiss_cutoff =tiss_cutoff),
files_all)
if os.path.isfile(wsi_input):
input_dir, file_name = os.path.split(wsi_input)
single_file_run(file_name=file_name,
output_dir=output_dir,
input_dir=input_dir,
tile_objective_value=tile_objective_value,
tile_read_size_w=tile_read_size_w,
tile_read_size_h=tile_read_size_h,
nr_tiles=nr_tiles,
use_tiss_mask=use_tiss_mask,
tiss_level=tiss_level,
tiss_cutoff=tiss_cutoff)
|
from analysis import *
fig = plt.figure(figsize =(figWidth,figHeight))
ax = fig.add_axes([0,0,1,1])
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(linW1)
ax.spines[axis].set_color(colK1)
plt.style.use('seaborn-paper')
rc('font',**{'family':'sans-serif','sans-serif':['DejaVu Sans']})
ax.grid(True,linewidth=linW0,alpha=al2)
formatter=ticker.ScalarFormatter(useMathText=True)
formatter.set_scientific(True)
formatter.set_powerlimits((-1,1))
title_pad=-2
ax.plot(spectrum_sorted[h_star:,0],spectrum_sorted[h_star:,1], linewidth=linW1, color=colR2, marker='o',markersize=1.5*marS4 )
ax.plot(h_refined,f_h_spl(h_refined), linewidth=linW2, color=colR3)
#ax.plot(0.333,3.0,'rx',markersize=3*marS4,alpha=0.8)
ax.tick_params(axis='both',which='major',direction='in',colors=colK1,labelsize=fsTk2,length=tkL2,width=tkW2,pad=0.8)
ax.set_xlabel(r' $\mathsf{ h }$',labelpad=0,color=colK1,fontsize=fsLb3)
ax.set_ylabel(r' $\mathsf{ D(h)}$',labelpad=0,color=colK1,fontsize=fsLb3,rotation=0)
#ax.xaxis.set_major_locator(MultipleLocator(0.2))
#ax.yaxis.set_major_locator(MultipleLocator(0.5))
plt.savefig(save_dir+'spectrum.pdf',dpi=dpi2,transparent=True,bbox_inches='tight')
|
import serial
import time
import datetime
import threading
import math
import os
import pytz
import numpy as np
import matplotlib.pyplot as plt
class modemController():
def __init__(self, baud=57600, timeout=0.05):
self.crc = None
self.cmdinf = '01'
self.ser=serial.Serial(
port='/dev/pactor',
baudrate=baud,
bytesize=serial.EIGHTBITS,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
timeout=timeout,
xonxoff=False
)
self.hostmode_quit()
self.write_and_get_response('')
self.restart()
self.response = None
self.interruptFlag = False
self.hostmode = False
self.good_chunks = 0
self.init_commands = ['VER', 'RESTART', 'SERB', 'TRX RTS 0', 'AM', 'TR 0', 'PT', 'QRTC 4', 'ESC 27',
'PTCH 31', 'MAXE 35', 'LF 0', 'CM 0', 'REM 0', 'BOX 0', 'MAIL 0', 'CHOB 0', 'UML 0', 'TRX S 0', 'TRX DU 0',
'U *1', 'CHO 25', 'BK 24', 'FSKA 250', 'PSKA 330', 'PAC PRB 0', 'PAC CM 0', 'PAC CB 0', 'PAC M 0',
'PAC US 10', 'FR MO 0', 'SYS SERN', 'MY *SCSPTC*', 'LICENSE', 'TONES 2', 'MARK 1600', 'SPACE 1400',
'TXD 4', 'CSD 5', 'MYcall VJN4455', 'ARX 0', 'L 0', 'CWID 0', 'CONType 3', 'MYcall VJN4455']
# Excluded: 'TIME 18:00:00', 'DATE 09.11.14', 'ST 2', 'TERM 4', 'MYLEV 2',
def runInitCommands(self):
for c in self.init_commands:
if c == 'RESTART':
self.restart()
else:
self.write_and_get_response(c, printOut=True)
time.sleep(0.2)
return None
def setTimeout(self, timeout):
self.ser.timeout = timeout
return self.ser.timeout
def read(self, chunk_size=1024, retries=0, printOut=False):
r = self.ser.read(chunk_size)
counter = 0
while (counter < retries):
nr = self.ser.read(chunk_size)
if nr:
r += nr
else:
counter += 1
if self.crc:
r = self.crc.decode(r)
if printOut:
print(r.replace('\r','\n'))
return r
def write(self, cmd='', newline='\r', channel=None):
if channel is None:
self.ser.write('%s%s'%(cmd, newline))
else:
self.write_channel(cmd, channel=channel)
return None
def write_and_get_response(self, command, newline='\r', channel=None, chunk_size=1024, retries=0, printOut=False):
self.write(command, newline=newline, channel=channel)
return self.read(chunk_size=chunk_size, retries=retries, printOut=printOut)
def encode(self, message, channel):
c = self.int2hex(channel)
l = self.int2hex(len(message)-1)
m = message.encode('hex')
s = '%s %s %s %s'%(c, self.cmdinf, l, m)
if self.crc:
s = self.crc.encode(s)
return s
def write_channel(self, message, channel=255):
self.writeHexString(self.encode(message, channel))
self.incrementCounter()
return None
def incrementCounter(self, ):
if self.crc:
self.cmdinf = '81' if self.cmdinf == '01' else '01'
return None
def restart(self):
t = self.ser.timeout
self.setTimeout(1.0)
self.write_and_get_response('RESTART', printOut=True)
self.setTimeout(t)
return None
def close(self):
return self.ser.close()
def interrupt(self):
self.interruptFlag = True
def hostmode_start(self, crc=False):
if crc:
self.write('JHOST5')
self.crc = crcCheck()
else:
self.write('JHOST1')
self.crc = None
self.hostmode = True
return self.read(printOut=True)
def hostmode_quit(self):
self.write_and_get_response('JHOST0', channel=0)
self.hostmode = False
return self.write_and_get_response('',printOut=True)
def getChannelsWithOutput(self):
# Poll Channel 255 to find what channels have output
s = bytearray(self.write_and_get_response('G', channel=255))
channels = []
for c in s[2:]:
if (c==255) or (c==0):
continue
else:
channels.append( c-1 )
return channels
def checkResponse(self, r, c):
# Do some checks on the returned data
if len(r) < 3:
return (None, 0)
h = bytearray(r[:3])
d = bytearray(r[3:])
l = h[2] + 1
dl = len(d)
if not h[0] == c:
print('WARNING: Returned data does not match polled channel')
return (None, 0)
if not l == dl:
print('WARNING: Data length (%i) does not match stated amount (%i). After %i good chunks.'%(dl, l, self.good_chunks))
self.good_chunks = 0
if dl < l:
d.extend('\x00'*(l-dl))
else:
d = d[:l]
else:
self.good_chunks += 1
# print('Good chunk')
return (d, l)
def getChannelOutput(self, c, max_data_length=1024, max_retries=1, timeout=1, gpoll=False, chunk_size=1024, report_increment=50000):
self.interruptFlag = False
timeout = int(timeout)
self.response = bytearray(max_data_length)
start_byte = 0; stop_byte = 0; ab = 0
start_time = time.time()
stop_time = start_time + timeout
counter = 0 if max_retries else -1
while (counter < max_retries) and (stop_byte < max_data_length) and (time.time() < stop_time) and (not self.interruptFlag):
# Check to see if there is any data on this channel
if gpoll:
channels = self.getChannelsWithOutput()
if not c in channels:
#print('No data on polled channel %i'%(c))
if max_retries:
counter += 1
continue
# Get the data
r = self.write_and_get_response('G', channel=c, chunk_size=chunk_size)
(d, l) = self.checkResponse(r, c)
if not d:
#print('No data on channel %i'%(c))
if max_retries:
counter += 1
continue
# Get the place to insert in data buffer
stop_byte = start_byte + l
if stop_byte > max_data_length:
stop_byte = max_data_length
# Add the data to the buffer
self.response[start_byte:stop_byte] = d
# Report on progress (print out)
if report_increment:
bb = int(stop_byte/report_increment)
if not ab == bb:
t = time.time() - start_time
print('Read %i kB in %f seconds [%i b/s]'%(stop_byte/1000, t, int(stop_byte/t)))
# Increment timeouts and counters
stop_time = time.time() + timeout
start_byte = stop_byte
ab = bb
# Report the final data amount/time
t = time.time() - start_time
print('Read %i kB in %f seconds [%i b/s]'%(stop_byte/1000, t, int(stop_byte/t)))
self.response = self.response[:stop_byte]
return self.response
def int2hex(self, channel):
c = '%2s'%(hex(channel)[2:])
return c.replace(' ', '0')
def writeHexString(self, s):
bs = s.replace(' ','').decode('hex')
#print('%s [%s]'%(bs, bs.encode('hex')))
self.ser.write(bs)
return None
#modem = modemController()
######################
# Modem Socket class for compatability with Sailmail Telnet sockets
######################
class modem_socket():
def __init__(self, mycall):
self._mycall = mycall
self.recv_buffer = bytearray()
print('Initialising modem')
self.modem = modemController()
print('Running init commands')
commands = ['MYcall %s'%(self._mycall), 'PTCH 31', 'MAXE 35',
'LF 0', 'CM 0', 'REM 0', 'BOX 0', 'MAIL 0', 'CHOB 0', 'UML 0',
'TONES 4', 'MARK 1600', 'SPACE 1400', 'CWID 0', 'CONType 3']
for c in commands:
self.modem.write_and_get_response(c, printOut=True)
self.modem.hostmode_start()
return None
def readto(ser, end="cmd:", line=''):
if not ser.inWaiting():
return None
while not line[-5:] == end:
line += ser.read(1)
lines = line.split('\r')
return lines
def breakin(self):
return self.send('%I')
def hostmode_init(self):
commands = ['#TONES 4', '#MYLEV 3', '#CWID 0', '#CONType 3', 'I %s'%(self._mycall)]
for c in commands:
self.modem.write_and_get_response(c, channel=31)
def connect(self, targetcall):
self.modem.write_channel('C %s'%(targetcall), channel=31)
def disconnect(self):
self.modem.write_channel('D', channel=31)
return None
def force_disconnect(self):
self.modem.write_channel('DD', channel=31)
def send(self, msg):
return self.modem.write_channel(msg, channel=31)
def recv(self, size):
c = 31
chunk_size=10240
output = bytearray()
# Check and get whatever is in the receive buffer
channels = self.modem.getChannelsWithOutput()
if c in channels:
r = self.modem.write_and_get_response('G', channel=c, chunk_size=chunk_size)
(d, l) = self.modem.checkResponse(r, c)
if d:
self.recv_buffer += d
print(str(d))
# Get the number of bytes requested from the recv_buffer
for i in range(size):
if self.recv_buffer:
output += bytearray([self.recv_buffer.pop(0)])
else:
break
# Return bytes as a string
return str(output)
def close(self):
self.modem.hostmode_quit()
self.modem.close()
return None
#####################
# Fax Controller
#####################
class Fax():
def __init__(self, baud=57600, timeout=0.05, modem=None):
self.modem = modemController(baud=baud, timeout=timeout) if not modem else modem
self.baud = self.getBaudrate()
self.ptch = 31
self.data = None
self.xres = None
self.data_rate = None
self.max_data_length=(1024*4000)
self.timeout = 10
self.chunk = ''
self.chunk_lock = threading.Lock()
self.apt_lock = threading.Lock()
self.record_lock = threading.Lock()
self.record_flag = False
self.apt_flag = False
self.receive_flag = False
self.gui_callback = None
self.runInitCommands()
def runInitCommands(self):
self.modem.init_commands = ['VER', 'RESTART', 'SERB', 'TRX RTS 1', 'AM', 'TR 0', 'PT', 'QRTC 4', 'ESC 27', 'PTCH %i'%(self.ptch), 'MAXE 35', 'LF 0', 'CM 0', 'REM 0', 'BOX 0', 'MAIL 0', 'CHOB 0', 'UML 0', 'TRX S 0', 'TRX DU 0', 'U *1', 'CHO 25', 'BK 24', 'ST 2', 'PAC PRB 0', 'PAC CM 0', 'PAC CB 0', 'PAC M 0', 'PAC US 10', 'FR MO 0', 'SYS SERN', 'MY *SCSPTC*', 'LICENSE']
#Excluded: 'TERM 4', 'TIME 17:35:46', 'DATE 04.04.16'
return self.modem.runInitCommands()
def start(self, rate=16, lines_per_minute=120, hostmode=True):
if hostmode:
self.modem.hostmode_start()
time.sleep(0.5)
# Now start up the fax stream
if self.modem.hostmode:
self.data_rate = self.getBaudrate()/rate
self.xres = int(self.data_rate*(float(60)/lines_per_minute))
print('Starting modem hostmode fax streaming at baudrate/%i = %i bytes/s'%(rate, self.data_rate))
start_code='1' if rate==32 else '17'
for p in ['%M1', '#FAX Dev 500', '#FAX FR 3', '#FAX MB %s'%(int(self.baud))]:
print('Writing hostmode command: %s'%(p))
self.modem.write_and_get_response(p, channel=self.ptch)
time.sleep(0.5)
self.modem.write_and_get_response('@F%s'%(start_code), channel=self.ptch)
else:
self.data_rate = int(38400.0/10.0)
self.xres = int(self.data_rate*(float(60)/lines_per_minute))
self.modem.write('FAX Mbaud 38400')
self.modem.write('FAX Fmfax')
# Start reading in data chunks and
# monitoring for apt start/stop signals
time.sleep(0.5)
#self.receive_start()
#self.apt_start()
def quit(self):
if self.modem.hostmode:
print('Closing hostmode fax')
self.apt_stop()
time.sleep(0.6)
self.receive_stop()
time.sleep(0.1)
self.modem.write_and_get_response('@F0', channel=self.ptch)
self.modem.hostmode_quit()
else:
print('Sending term signal 255 to modem')
self.modem.write('\xff', newline='')
time.sleep(0.5)
self.modem.ser.readall()
def close(self):
self.modem.close()
def getBaudrate(self):
return self.modem.ser.baudrate
def clear_buffer(self):
if self.modem.hostmode:
print('Clearing Fax Data Buffer')
self.modem.write_and_get_response('@F', channel=self.ptch, printOut=True)
return None
def record_start(self):
print('Starting Recording of Fax Stream')
self.record_thread = threading.Thread(target=self.do_record)
self.record_thread.daemon = True
self.record_thread.start()
def do_record(self, report_increment=50000):
print('Recording up to %i kB of fax data from channel 252'%(self.max_data_length/1000))
self.record_flag = True
if self.gui_callback:
self.gui_callback()
stop_time = time.time() + self.timeout
self.data = bytearray(self.max_data_length)
start_byte = 0; stop_byte = 0; ab = 0
start_time = time.time()
self.record_lock.acquire()
while (stop_byte < self.max_data_length) and (time.time() < stop_time) and self.record_flag:
# Get the latest data chunk
self.record_lock.acquire()
d = self.chunk
# Get the place to insert in data buffer
stop_byte = self.max_data_length if stop_byte > self.max_data_length else start_byte + 256
# Add the data to the buffer
self.data[start_byte:stop_byte] = d
# Report on progress (print out)
if report_increment:
bb = int(stop_byte/report_increment)
if not ab == bb:
t = time.time() - start_time
print('Read %i kB in %f seconds [%i b/s]'%(stop_byte/1000, t, int(stop_byte/t)))
# Increment timeouts and counters
stop_time = time.time() + self.timeout
start_byte = stop_byte
ab = bb
if self.record_lock.locked():
self.record_lock.release()
self.record_flag = False
t = time.time() - start_time
print('Read %i kB in %f seconds [%i b/s]'%(stop_byte/1000, t, int(stop_byte/t)))
self.data = self.data[:stop_byte]
self.plot(align_data=True)
print('Stopped recording fax')
if self.gui_callback:
self.gui_callback()
def record_stop(self):
self.record_flag = False
def receive_start(self):
if self.modem.hostmode:
channels = self.modem.getChannelsWithOutput()
if not 252 in channels:
print('Channel 252 not streaming fax output')
return None
print('Starting receiving fax data on channel 252')
self.monitor_thread = threading.Thread(target=self.get_chunk)
self.monitor_thread.daemon = True
self.monitor_thread.start()
return None
def receive_stop(self):
self.receive_flag = False
def get_chunk(self, max_retries=10, report_increment=0):
#print("Timeout = %f"%(self.modem.ser.timeout))
#self.clear_buffer()
self.receive_flag = True
start_time = time.time()
stop_time = start_time + self.timeout
retries = 0
chunk_counter = 0
ab = 0
while (time.time() < stop_time) and (retries < max_retries) and self.receive_flag:
# Get a chunk (depending on if we are in hostmode or not)
if self.modem.hostmode:
c = 252
r = self.modem.write_and_get_response('G', channel=c, chunk_size=259)
(d, l) = self.modem.checkResponse(r, c)
else:
l = 256
d = self.modem.read(chunk_size=l)
# If we have a valid chunk then process it otherwise increment the retry counter
if (l == 256):
retries = 0
stop_time = time.time() + self.timeout
self.chunk = d
chunk_counter += 1
# Tell the record and apt loops that it can proceed with new chunk
if self.record_lock.locked():
self.record_lock.release()
if self.apt_lock.locked():
self.apt_lock.release()
else:
if retries > 0:
print('Retrying (Length = %i, Chunks = %i)'%(l, chunk_counter))
#self.clear_buffer()
retries += 1
# Do some reporting if asked for it
if report_increment:
stop_byte = chunk_counter*256
bb = int(stop_byte/report_increment)
if not ab == bb:
t = time.time() - start_time
print('Received %i kB in %f seconds [%i b/s]'%(stop_byte/1000, t, int(stop_byte/t)))
ab = bb
self.receive_flag = False
print('Stopped receiving fax data')
return None
def apt_start(self):
print('Starting APT monitoring')
self.apt_flag = True
self.apt_thread = threading.Thread(target=self.apt_monitor)
self.apt_thread.daemon = True
self.apt_thread.start()
def apt_stop(self):
self.apt_flag = False
def apt_monitor(self, frequency=0.25, retries=10):
# Analyse data packets every <frequency> seconds,
# Once a signal is found then try to get <retries> continuous signals before
stop_time = time.time() + self.timeout
counter = 0
self.apt_lock.acquire()
while (time.time() < stop_time) and self.apt_flag:
self.apt_lock.acquire()
d = self.chunk;
if not self.record_flag:
if self.is_apt_signal(d, frequency=12):
print('Found APT Start Signal #%i'%(counter))
counter += 1
if counter > retries:
print('Starting Fax Recording')
self.record_start()
continue
else:
if self.is_apt_signal(d, frequency=8):
print('Found APT Stop Signal #%i'%(counter))
counter += 1
if counter > retries:
print('Stopping Fax Recording')
self.record_stop()
continue
time.sleep(frequency)
counter = 0
stop_time = time.time() + self.timeout
if self.apt_lock.locked():
self.apt_lock.release()
self.apt_flag = False
print('Stopping APT Monitor')
return None
def get_fax_stream(self):
print('Getting up to %i kB of fax data on Channel 252'%(self.max_data_length/1000))
self.data = self.modem.getChannelOutput(252, max_data_length=self.max_data_length, max_retries=0, timeout=10, chunk_size=259, gpoll=False)
self.plot()
return None
def plot(self, fname=None, xres=None, show_image=False, save_image=True, save_data=True, align_data=False):
if xres is None:
xres = self.xres
if not self.data:
print('No fax data aquired yet')
return None
if align_data:
self.align_data()
a = np.array(bytearray(self.data))
rows = len(a)/xres
cropsize = xres*rows
a = a[:cropsize]
a = a.reshape((rows,xres))
path = 'fax_images'
os.mkdir(path) if not os.path.exists(path) else None
timezone = pytz.timezone('utc')
utctime = datetime.datetime.now(tz=timezone)
if not fname:
fname = '%s/%s.png'%(path, utctime.strftime('%Y-%m-%d_%H%M%S'))
if save_image:
plt.imsave(fname, a, vmin=0, vmax=255, cmap='gray')
print('Saved fax image to %s'%(fname))
if save_data:
bin_fname = fname.replace('.png', '.bin')
self.save_data(bin_fname)
print('Saved binary fax data to %s'%(bin_fname))
if show_image:
plt.imshow(a, vmin=0, vmax=255, cmap='gray')
plt.show()
return None
def fax_data_compressy(self, axis=0):
return self.data.compress([divmod(i,2)[1] for i in range(np.shape(d)[0])], axis=axis)
def save_data(self, fname):
f = open(fname, 'wb')
f.write(self.data)
f.close()
return None
def load_data(self, fname):
f = open(fname, 'rb')
self.data = f.read()
f.close()
self.xres = 1800
def is_apt_signal(self, data, frequency=12, width=1):
# frequency=12 (start), frequency=8 (stop)
signal_pixel_width = frequency*self.xres/1800
l = len(data)/2
# Crop the spectrum info and take only the real part
s = np.abs(np.fft.fft(np.array(bytearray(data))))
s = s.real[1:l]
band_centre = (2*l/signal_pixel_width)
spectrum_average = np.average(np.abs(s))
band_average = np.average(np.abs(s[band_centre-width:band_centre+width]))
if band_average > spectrum_average*5:
return True
return False
def align_data(self, lines=[20,30,40]):
n = len(lines)
offset = 0.0
counter = 0
for line in lines:
t = self.get_image_offset(line)
if counter:
if abs(t-offset) > 30:
print("Alignment data could not be ascertained")
return None
# Keep a running average of the offset
counter += 1
offset = (offset*(counter-1) + t)/counter
offset = int(offset)
print('Aligning data with offset of %i'%(offset))
self.data = self.data[offset:]
return None
def get_image_offset(self, line, signal_width=90):
h = signal_width / 2
d = self.data[line*self.xres:(line+1)*self.xres]
l = np.array(bytearray(d[-1*h:] + d + d[:h]))
a = np.zeros(self.xres)
for i in range(h, self.xres + h):
a[i-h] = np.average(l[i-h:i+h])
return a.argmax()
class crcCheck():
def __init__(self):
self.c = int('8408', base=16)
self.makeCrcTable()
def encode(self, s):
return 'AA AA %s %s'%(self.stuff(s), self.do_calc(s))
def decode(self, s, check_crc=True):
if not len(s) > 4:
print('WARNING: Response not long enough for CRC check')
return s
crc = s[-2:]
body = self.destuff(s[2:-2])
if check_crc:
if not self.do_calc(body.encode('hex')).decode('hex') == crc:
print('WARNING: CRC check failed')
return body
def destuff(self, s):
return ''.join([s[i] for i in range(len(s)) if (i==0) or (s[i-1] != '\xaa')])
def stuff(self, s):
return s.upper().replace('AA', 'AA 00')
def isOdd(self, num):
return bool(divmod(num, 2)[1])
def innerLoop(self, Data, accu=0):
for j in range(8):
if self.isOdd( Data^accu ):
accu = (accu >> 1)^self.c
else:
accu = (accu >> 1)
Data = (Data >> 1)
return accu
def makeCrcTable(self):
self.crc_table = [0 for i in range(256)]
for index in range(256):
self.crc_table[index] = self.innerLoop(index)
return self.crc_table
def calc_crc_ccitt(self, b):
crc = self.crc
self.crc = ((crc >> 8) & 255)^(self.crc_table[(crc^b) & 255])
def invert_crc(self):
print('Before inverting: %i, %s'%(self.crc, hex(self.crc)))
self.crc = self.crc^int('ffff', base=16)
(i, r) = divmod(self.crc, 256)
self.crc = 256*r + i
print('After inverting: %i, %s'%(self.crc, hex(self.crc)))
return self.crc
def do_calc(self, src):
# '\xFF\x01\x00\x47' (Standard G-poll on channel 255) Result should be 6b 55 = 27477
# '\xFF\x01\x00' (Response to G-poll on channel 255) Result should be E7 19 = 59161
# '\x1F\x00\x1E\x19' (Response on channel 31) Result is 1E 19 = 7705
self.crc = int('ffff', base=16)
ba = bytearray(src.replace(' ','').decode('hex'))
for b in ba:
self.calc_crc_ccitt(b)
# Return the inverted results
s = '%04s'%(hex(self.invert_crc())[2:])
s = s.replace(' ','0')
return s
|
from typing import List, Tuple
def split_name(prediction: str) -> Tuple[str, int]:
if len(prediction.split("_")) > 1:
return prediction.split("_")[0], int(prediction.split("_")[1])
else:
return prediction, 0
def is_correctly_retrieved(predictions: List[str], ground_truth: List[str]) -> bool:
top_1_predicted, deg = split_name(predictions[0])
if top_1_predicted in ground_truth:
#print(top_1_predicted,":",deg)
return True
return False
def is_category_correctly_retrieved(predictions: str, ground_truth: str) -> bool:
if predictions == ground_truth:
return True
return False
def is_correctly_ranked(prediction: str, rank, ground_truth: List[str]) -> bool:
if prediction == ground_truth[rank]:
return True
return False
def count_correctly_ranked_predictions(predictions: List[str], ground_truth: List[str]) -> int:
num_ranked_models = len(ground_truth) # 3 or less
top_n_predicted = predictions[:num_ranked_models]
correctly_ranked = 0
total = num_ranked_models
for rank, prediction in enumerate(top_n_predicted): # TODO, fix: this metric is not reasonable, when you consider the rotation
prediction, _ = split_name(prediction)
if is_correctly_ranked(prediction, rank, ground_truth):
correctly_ranked += 1
return correctly_ranked
def get_category(name: str) -> str:
return name.split("_")[-2]
def get_category_from_list(category: str, selected_categories: List[str], others_name: str = "other") -> str:
if category not in selected_categories:
return others_name
else:
return category
|
#!/usr/bin/env python3.5
print("Hello Poland")
|
from data_structure.linkedlist.Node import Node
# Class Linked List
class LinkedList:
# Initialize
def __init__(self):
self.first = None
self.last = None
self.__size = 0
# Add node on first
def add_first(self, data):
new_node = Node(data)
if self.__is_empty():
self.first = self.last = new_node
else:
current_node = self.first
self.first = new_node
new_node.next = current_node
self.__size += 1
# Add node on last
def add_last(self, data):
new_node = Node(data)
if self.__is_empty():
self.first = new_node
self.last = new_node
else:
self.last.next = new_node
self.last = new_node
self.__size += 1
def index_of(self, item):
index = 0
current_node = self.first
while current_node is not None:
if current_node.data == item:
return index
current_node = current_node.next
index = index + 1
return -1
def contains(self, item):
return self.index_of(item) != -1
def remove_first(self):
if self.__is_empty():
print("The list is empty, nothing to delete")
exit()
if self.first == self.last:
return None
self.first = self.first.next
self.__size -= 1
def remove_last(self):
prev = self.get_previous(self.last)
prev.next = None
self.last = prev
self.__size -= 1
def to_array(self):
current_node = self.first
data = []
while current_node is not None:
data.append(current_node.data)
current_node = current_node.next
return data
def reverse(self):
if self.__is_empty():
return
previous = self.first
current = previous.next
while current is not None:
next_node = current.next
current.next = previous
previous = current
current = next_node
self.last = self.first
self.last.next = None
self.first = previous
def get_kth_node_from_end(self, k):
if self.__is_empty():
return -1
index = 0
current = self.first
while current is not None:
if k == index:
break
current = current.next
index += 1
return current.data
def get_previous(self, node):
if self.__is_empty():
print("The list is empty, nothing to delete")
exit()
current_node = self.first
while current_node is not None:
if current_node.next == node:
return current_node
current_node = current_node.next
return None
def sort(self):
if self.first is None:
return False
current = self.first
while current is not None:
target = current.next
while target is not None:
if current.data > target.data:
temp = current.data
current.data = target.data
target.data = temp
target = target.next
current = current.next
def size(self):
return self.__size
def __is_empty(self):
return self.first is None
# Print the list
def print(self):
if self.first is None:
print("The list is empty.")
else:
current_node = self.first
while current_node is not None:
print(current_node.data)
current_node = current_node.next
|
import mimetypes
from django.conf import settings
from django.db import models as dbmodels
from PyPDF2 import PdfFileReader
from PyPDF2.utils import PdfReadError
from ..utils.auth import get_group_model
from ..utils.rest_api import (
FileField,
IdPrimaryKeyRelatedField,
ModelSerializer,
SerializerMethodField,
ValidationError,
)
from .models import Mediafile
class AngularCompatibleFileField(FileField):
def to_internal_value(self, data):
if data == "":
return None
return super(AngularCompatibleFileField, self).to_internal_value(data)
def to_representation(self, value):
if value is None or value.name is None:
return None
filetype = mimetypes.guess_type(value.name)[0]
result = {"name": value.name, "type": filetype}
if filetype == "application/pdf":
try:
if (
settings.DEFAULT_FILE_STORAGE
== "storages.backends.sftpstorage.SFTPStorage"
):
remote_path = value.storage._remote_path(value.name)
file_handle = value.storage.sftp.open(remote_path, mode="rb")
else:
file_handle = open(value.path, "rb")
result["pages"] = PdfFileReader(file_handle).getNumPages()
except FileNotFoundError:
# File was deleted from server. Set 'pages' to 0.
result["pages"] = 0
except PdfReadError:
# File could be encrypted but not be detected by PyPDF.
result["pages"] = 0
result["encrypted"] = True
return result
class MediafileSerializer(ModelSerializer):
"""
Serializer for mediafile.models.Mediafile objects.
"""
media_url_prefix = SerializerMethodField()
filesize = SerializerMethodField()
access_groups = IdPrimaryKeyRelatedField(
many=True, required=False, queryset=get_group_model().objects.all()
)
def __init__(self, *args, **kwargs):
"""
This constructor overwrites the FileField field serializer to return the file meta data in a way that the
angualarjs upload module likes
"""
super(MediafileSerializer, self).__init__(*args, **kwargs)
self.serializer_field_mapping[dbmodels.FileField] = AngularCompatibleFileField
# Make some fields read-oinly for updates (not creation)
if self.instance is not None:
self.fields["mediafile"].read_only = True
class Meta:
model = Mediafile
fields = (
"id",
"title",
"mediafile",
"media_url_prefix",
"filesize",
"access_groups",
"create_timestamp",
"is_directory",
"path",
"parent",
"list_of_speakers_id",
"inherited_access_groups_id",
)
read_only_fields = ("path",)
def validate(self, data):
title = data.get("title")
if title is not None and not title:
raise ValidationError({"detail": "The title must not be empty"})
parent = data.get("parent")
if parent and not parent.is_directory:
raise ValidationError({"detail": "parent must be a directory."})
if data.get("is_directory") and "/" in data.get("title", ""):
raise ValidationError(
{"detail": 'The name contains invalid characters: "/"'}
)
return super().validate(data)
def create(self, validated_data):
access_groups = validated_data.pop("access_groups", [])
mediafile = super().create(validated_data)
mediafile.access_groups.set(access_groups)
mediafile.save()
return mediafile
def update(self, instance, validated_data):
# remove is_directory, create_timestamp and parent from validated_data
# to prevent updating them (mediafile is ensured in the constructor)
validated_data.pop("is_directory", None)
validated_data.pop("create_timestamp", None)
validated_data.pop("parent", None)
return super().update(instance, validated_data)
def get_filesize(self, mediafile):
return mediafile.get_filesize()
def get_media_url_prefix(self, mediafile):
return settings.MEDIA_URL
|
import os
from hca.dss import DSSClient
from hca.util.exceptions import SwaggerAPIException
from tests.utils import Progress
class DataStoreAgent:
DSS_SWAGGER_URL_TEMPLATE = "https://dss.{deployment}.data.humancellatlas.org/v1/swagger.json"
DSS_PROD_SWAGGER_URL = "https://dss.data.humancellatlas.org/v1/swagger.json"
def __init__(self, deployment):
self.deployment = deployment
if self.deployment == "prod":
swagger_url = self.DSS_PROD_SWAGGER_URL
else:
swagger_url = self.DSS_SWAGGER_URL_TEMPLATE.format(deployment=deployment)
self.client = DSSClient(swagger_url=swagger_url)
def search(self, query, replica='aws'):
try:
response = self.client.post_search(replica=replica, es_query=query)
return response['results']
except SwaggerAPIException:
return []
def search_iterate(self, query, replica='aws'):
for hit in self.client.post_search.iterate(replica=replica, es_query=query):
yield hit
def download_bundle(self, bundle_uuid, target_folder):
Progress.report(f"Downloading bundle {bundle_uuid}:\n")
manifest = self.bundle_manifest(bundle_uuid)
bundle_folder = os.path.join(target_folder, bundle_uuid)
try:
os.makedirs(bundle_folder)
except FileExistsError:
pass
for f in manifest['bundle']['files']:
self.download_file(f['uuid'], save_as=os.path.join(bundle_folder, f['name']))
return bundle_folder
def bundle_manifest(self, bundle_uuid, replica='aws'):
return self.client.get_bundle(replica=replica, uuid=bundle_uuid)
def download_file(self, file_uuid, save_as, replica='aws'):
Progress.report(f"Downloading file {file_uuid} to {save_as}\n")
with self.client.get_file.stream(replica=replica, uuid=file_uuid) as fh:
with open(save_as, "wb") as f:
while True:
chunk = fh.raw.read(1024)
if chunk:
f.write(chunk)
else:
break
def tombstone_bundle(self, bundle_uuid, replica='aws'):
self.client.delete_bundle(replica=replica, uuid=bundle_uuid, reason="DCP-wide integration test")
|
T = int(input())
for _ in range(T):
n = int(input())
a = list(map(int,input().split()))
m = a[-1]
c = [a[-1]]
for i in range(n-2,-1,-1):
if m <= a[i]:
m = a[i]
c.append(a[i])
for i in range(len(c)-1,-1,-1):
print(c[i],end=" ")
print()
|
import datetime
import streamlit as st
from threading import Thread
def run(alarmH,alarmM):
while(True):
if(alarmH==datetime.datetime.now().hour and alarmM==datetime.datetime.now().minute):
st.write("Time to wake up")
audio_file=open("song.mp3","rb")
st.audio(audio_file,format='audio/mp3')
break
def activate_alarm(alarmH,alarmM):
process = Thread(target=run,args=[alarmH,alarmM])
process.start()
return True
|
#A file for testing the python apt module
import apt
import sys
pkg_name = "firefx"
cache = apt.cache.Cache()
#cache.update()
if pkg_name in cache:
pkg = cache[pkg_name]
print(pkg.versions[0].description)
else:
print("Package %s not found" % pkg_name)
|
for abc in ["a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","u","v","w","x","y","z"]:
salida = open('english-'+abc+'.txt','w')
fichero = open('english.txt', 'r')
for linea in fichero:
#print (len(linea))
if(len(linea) == 9 and linea[0] == abc):
salida.write(linea)
salida.close()
fichero.close()
|
# -*- coding: utf-8 -*-
import connection
import groups
import users
import logging
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
import sys
name = sys.argv[1]
usrs = sys.argv[2:]
logging.info('creando oficina\nnombre: {}\nusuarios: {}'.format(name, users))
con = connection.getConnection()
try:
o = groups.Office()
o.name = name
o.users = []
for u in usrs:
user = users.UserDAO.findByDni(con, u)
if user is None:
logging.warn('usuario inexistente : {}'.format(u))
continue
(uid, version) = user
o.users.append(uid)
oid = groups.OfficeDAO.persist(con, o)
office = groups.OfficeDAO.findById(con, oid)
logging.info(office.__dict__)
con.commit()
finally:
connection.closeConnection(con)
|
# -*- coding:utf-8 -*-
# class fraction operations
def incor_exit(message):
print(message)
return
class Fraction(object):
def __init__(self, numerator, denominator=1): # a / b
try:
numerator / denominator
except (TypeError, ZeroDivisionError):
incor_exit('Error!')
if isinstance(numerator, int) and isinstance(denominator, int):
self.numerator, self.denominator = numerator, denominator
else:
incor_exit('Error!')
def reduce(self):
a, b = self.numerator, self.denominator
while b:
a, b = b, a % b
self.numerator //= a
self.denominator //= a
return self
def __str__(self):
self.reduce()
c = self.numerator // self.denominator
a = self.numerator % self.denominator
if self.denominator == 1:
return '%d' % c
if c:
return '%d %d/%d' % (c, a, self.denominator)
else:
return '%d/%d' % (a, self.denominator)
def __add__(self, other):
if isinstance(other, int):
other = Fraction(other, 1)
return Fraction(
self.numerator * other.denominator +
self.denominator * other.numerator,
self.denominator * other.denominator
).reduce()
def __radd__(self, other):
return self + other
def __eq__(self, other):
if isinstance(other, int):
other = Fraction(other, 1)
self.reduce()
other.reduce()
return (
self.numerator == other.numerator and
self.denominator == other.denominator
)
def __lt__(self, other):
self.reduce()
other.reduce()
return (
self.numerator * other.denominator -
self.denominator * other.numerator < 0
)
def __neg__(self):
return Fraction(-self.numerator, self.denominator)
def __truediv__(self, other):
return Fraction(
self.numerator * other.denominator,
self.denominator * other.numerator
)
def __float__(self):
return self.numerator / self. denominator
if __name__ == '__main__':
f = Fraction(2)
print(f)
|
#!/usr/bin/python
#coding: utf8
import numpy as np
class anthill:
def __init__(self):
self.nb_ants = None
self.start = None
self.end = None
self.room = []
self.link = []
self.move = []
self.nb_move = 0
class Ant:
def __init__(self, number, node_path=None,
journey=None, color='g.'):
self.number = number
self.node_on_path = node_path
self.journey = journey
self.color = color
def parse_path(self, farm):
self.node_on_path = []
started = False
self.node_on_path.append(farm.start)
for turn in farm.move:
for move in turn:
if move[0] == str(self.number):
self.node_on_path.append(move[1])
started = True
if not started:
self.node_on_path.append(farm.start)
def calc_journey(self, pos, num_steps, farm):
location_list = []
for i in range(len(self.node_on_path)):
if i != 0:
# take 2 nodes and calculate x y between thoses nodes
start_pos = pos[self.node_on_path[i - 1]]
end_pos = pos[self.node_on_path[i]]
x = np.linspace(start_pos[0], end_pos[0], num=num_steps)
y = np.linspace(start_pos[1], end_pos[1], num=num_steps)
location_list.extend(list(zip(x, y)))
i += 1
self.journey = location_list
|
# Generated by Django 2.2.7 on 2019-11-24 20:36
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('accounts', '0002_auto_20191124_2036'),
]
operations = [
migrations.CreateModel(
name='Transaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('last_updated_at', models.DateTimeField(auto_now=True)),
('amount', models.DecimalField(decimal_places=2, max_digits=30)),
('currency', models.CharField(choices=[('USD', 'United States Dollars'), ('EUR', 'Euros')], default='EUR', max_length=3)),
('emitter', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='emitted_transactions', to='accounts.Account')),
('receiver', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='received_transactions', to='accounts.Account')),
],
options={
'abstract': False,
},
),
]
|
x = int(input("Enter x :"))
y = int(input("Enter y :"))
x = x*0+y
y = y*0+x
print(x)
print(y)
|
from sympy.ntheory import primefactors
limit = 1000000
for n in xrange(limit):
ok = True
for i in xrange(4):
if len(primefactors(n+i)) != 4:
ok = False
break
if ok:
print n
break
|
# coding=utf-8
import sympy as sp
import numpy as np
from fractions import gcd
import bitwise_operators_strings as bw
def shift_register(iniCond,poly):
"""Generates a maximum length sequence in a shift register from the next arguments:
- iniCond: the initial condition of the shift register
- poly: primitive polynomial in binary format
both arguments are strings in binary format, i.e '1011'."""
if len(iniCond) != len(poly):
raise Exception('Argument length mismatch')
regSize = len(poly)
Rg = int(iniCond,2)
g = int(poly,2)
N = 2**regSize - 1
for i in range(0,N+1):
if Rg & 1 != 0
def gold_sequence(iniCond,poly):
"""Generates a Gold code sequence from the next parameters:
- iniCond: the initial condition of the shift register
- poly: primitive polynomial in binary format
both arguments are strings in binary format, i.e '1011'."""
if len(iniCond) != len(poly):
raise Exception('Argument length mismatch')
regSize = len(poly)
N = 2**regSize -1
if regSize%4 != 0:
if regSize%4 == 2:
e,k = 2,1
while gcd(regSize,k) != 2:
k+=1
f = 2**k+1
else:
e,k = 1,1
while gcd(regSize,k) != 1
k+=1
f= 2**k + 1
else:
raise Exception('No available sequences for the given arguments')
SR1 = shift_register(iniCond,poly)
SR2 = []
for i in range(0,N):
k = (i*f % N) + 1
SR2.append(SR1[k])
G = -2*xor(SR1,SR2) + 1
print(G)
Gs = bw.toArray(G)
np.corrcoef(Gs,Gs)
return Gs
gold_sequence('0001','1011')
|
import numpy as np
import tensorflow as tf
from tensorflow.keras.utils import Sequence as tf_Sequence
from torch.nn import Module
from functools import partial
from graphgallery import functional as gf
class Sequence(tf_Sequence):
def __init__(self, *args, **kwargs):
device = kwargs.pop('device', 'cpu')
escape = kwargs.pop('escape', None)
super().__init__(*args, **kwargs)
self.astensor = partial(gf.astensor, device=device, escape=escape)
self.astensors = partial(gf.astensors, device=device, escape=escape)
self.device = device
def __len__(self):
raise NotImplementedError
def __getitem__(self, index):
raise NotImplementedError
def on_epoch_begin(self):
...
def on_epoch_end(self):
...
def _shuffle_batches(self):
...
|
import dash_bootstrap_components as dbc
from dash import Input, Output, html
dropdown = html.Div(
[
dbc.DropdownMenu(
[
dbc.DropdownMenuItem(
"A button", id="dropdown-button", n_clicks=0
),
dbc.DropdownMenuItem(
"Internal link", href="/docs/components/dropdown_menu"
),
dbc.DropdownMenuItem(
"External Link", href="https://github.com"
),
dbc.DropdownMenuItem(
"External relative",
href="/docs/components/dropdown_menu",
external_link=True,
),
],
label="Menu",
),
html.P(id="item-clicks", className="mt-3"),
]
)
@app.callback(
Output("item-clicks", "children"), [Input("dropdown-button", "n_clicks")]
)
def count_clicks(n):
if n:
return f"Button clicked {n} times."
return "Button not clicked yet."
|
# based on https://inventwithpython.com/chapter10.html for practice
# tic tac toe game
import random
import time
def get_letter():
while True:
letter = input("Choose your letter ('X' or 'O') >> ")
if letter.upper() == 'X':
return['X', 'O']
elif letter.upper() == 'O':
return['O', 'X']
else:
print("You must enter 'X' or 'O'")
def flip_coin():
print("Flipping coin to choose first player...")
time.sleep(3)
outcome = random.randint(0, 1)
if outcome == 0:
return "player"
else:
return "computer"
def generate_board(board):
print("")
print(" --- --- --- ")
print("| " + board[1] + " | " + board[2] + " | " + board[3] + " |")
print(" --- --- --- ")
print("| " + board[4] + " | " + board[5] + " | " + board[6] + " |")
print(" --- --- --- ")
print("| " + board[7] + " | " + board[8] + " | " + board[9] + " |")
print(" --- --- --- ")
print("")
def get_move(board):
while True:
move = int(input("Enter your move (1 - 9) >> "))
if space_free(board, move):
return move
else:
print("You cannot play this square")
def make_move(board, move, letter):
board[move] = letter
def winning_position(board, letter):
if (board[1] == letter and board[2] == letter and board[3] == letter)\
or (board[4] == letter and board[5] == letter and board[6] == letter)\
or (board[7] == letter and board[8] == letter and board[9] == letter)\
or (board[1] == letter and board[4] == letter and board[7] == letter)\
or (board[2] == letter and board[5] == letter and board[8] == letter)\
or (board[3] == letter and board[6] == letter and board[9] == letter)\
or (board[1] == letter and board[5] == letter and board[9] == letter)\
or (board[3] == letter and board[5] == letter and board[7] == letter):
return True
else:
return False
def get_duplicate_board(board):
duplicate_board = []
for i in board:
duplicate_board.append(i)
return duplicate_board
def space_free(board, move):
if board[move] is not " ":
return False
else:
return True
def random_move(board, spaces):
moves = []
for i in spaces:
if space_free(board, i):
moves.append(i)
if len(moves) is not 0:
return random.choice(moves)
else:
return None
def get_computer_move(board, computer_letter, player_letter):
for i in range(1, 10):
board_copy = get_duplicate_board(board)
if space_free(board_copy, i):
make_move(board_copy, i, computer_letter)
if winning_position(board_copy, computer_letter):
return i
for i in range(1, 10):
board_copy = get_duplicate_board(board)
if space_free(board_copy, i):
make_move(board_copy, i, player_letter)
if winning_position(board_copy, player_letter):
return i
move = random_move(board, [1, 3, 7, 9])
if move is not None:
return move
if space_free(board, 5):
return 5
return random_move(board, [2, 4, 6, 8])
def board_full(board):
for i in range(1, 10):
if space_free(board, i):
return False
return True
def play_again():
answer = input("Would you like to play again? >> ")
return answer.lower() == "yes"
print("Welcome to tic tac toe!")
while True:
board = [' '] * 10
player_letter, computer_letter = get_letter()
turn = flip_coin()
print("The " + turn + " will start")
game_in_progress = True
while game_in_progress:
if turn == "player":
generate_board(board)
player_move = get_move(board)
make_move(board, player_move, player_letter)
if winning_position(board, player_letter):
print("You have won the game")
generate_board(board)
game_in_progress = False
elif board_full(board):
generate_board(board)
print("Tie")
break
else:
turn = "computer"
elif turn == "computer":
computer_move = get_computer_move(board, computer_letter, player_letter)
make_move(board, computer_move, computer_letter)
if winning_position(board, computer_letter):
generate_board(board)
print("You have lost")
game_in_progress = False
elif board_full(board):
generate_board(board)
print("The game is a tie")
break
else:
turn = 'player'
if not play_again():
break
|
import torch.utils.data as data
import cv2
import sys
import random
from os import listdir
from os.path import join
import os
import numpy as np
from keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
from inference.Compiler import *
# Model Imports
import torch
import torch.nn as nn
import torchvision.models as models
from torch.nn.utils.rnn import pack_padded_sequence
from torch.autograd import Variable
#torch.nn.Module.dump_patches = True
def resize_img(png_file_path):
img_rgb = cv2.imread(png_file_path)
img_grey = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
img_adapted = cv2.adaptiveThreshold(img_grey, 255, cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY, 101, 9)
img_stacked = np.repeat(img_adapted[...,None],3,axis=2)
resized = cv2.resize(img_stacked, (224,224), interpolation=cv2.INTER_AREA)
bg_img = 255 * np.ones(shape=(224,224,3))
bg_img[0:224, 0:224,:] = resized
bg_img /= 255
bg_img = np.rollaxis(bg_img, 2, 0)
return bg_img
def load_doc(filename):
file = open(filename, 'r')
text = file.read()
file.close()
return text
#Models
class EncoderCNN(nn.Module):
def __init__(self, embed_size):
"""Load the pretrained ResNet-152 and replace top fc layer."""
super(EncoderCNN, self).__init__()
resnet = models.resnet34(pretrained=True)
modules = list(resnet.children())[:-1] # delete the last fc layer.
self.resnet = nn.Sequential(*modules)
self.linear = nn.Linear(resnet.fc.in_features, embed_size)
self.bn = nn.BatchNorm1d(embed_size, momentum=0.01)
self.init_weights()
def init_weights(self):
"""Initialize the weights."""
self.linear.weight.data.normal_(0.0, 0.02)
self.linear.bias.data.fill_(0)
def forward(self, images):
"""Extract the image feature vectors."""
features = self.resnet(images)
features = Variable(features.data)
features = features.view(features.size(0), -1)
if images.shape[0] < 2:
features = self.linear(features)
return features
features = self.bn(self.linear(features))
return features
class DecoderRNN(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, num_layers):
"""Set the hyper-parameters and build the layers."""
super(DecoderRNN, self).__init__()
self.n_layers = num_layers
self.hidden_size = hidden_size
self.embed = nn.Embedding(vocab_size, embed_size)
self.lstm = nn.GRU(embed_size*2, hidden_size, num_layers, batch_first=True)
self.linear = nn.Linear(hidden_size, vocab_size)
self.init_weights()
def init_weights(self):
"""Initialize weights."""
self.embed.weight.data.uniform_(-0.1, 0.1)
self.linear.weight.data.uniform_(-0.1, 0.1)
self.linear.bias.data.fill_(0)
def forward(self, features, captions, hidden):
"""Decode image feature vectors and generates captions."""
embeddings = self.embed(captions)
embeddings = torch.cat((features.unsqueeze(1).repeat(1,embeddings.shape[1],1), embeddings), 2)
#packed = pack_padded_sequence(embeddings, 48, batch_first=True)
output, hidden = self.lstm(embeddings, hidden)
outputs = self.linear(output)
return outputs, hidden
def init_hidden(self):
return Variable(torch.zeros(self.n_layers, 1, self.hidden_size))
def load_best(model):
base = 'trained/'
if model == 1:
base = 'trained2/'
lst = listdir(base)
le = 999
ld = 999
prev_e = ''
prev_d = ''
for i in lst:
num = i.split('(')[1]
num = num.split(')')[0]
num = float(num)
if i.startswith('encoder'):
if le > num:
le = num
if prev_e != '':
os.remove(base + prev_e)
prev_e = i
elif le < num:
os.remove(base + i)
elif i.startswith('decoder'):
if ld > num:
ld = num
if prev_d != '':
os.remove(base + prev_d)
prev_d = i
elif ld < num:
os.remove(base + i)
return base + prev_e, base + prev_d
def word_for_id(integer, tokenizer):
for word, index in tokenizer.word_index.items():
if index == integer:
return word
return None
def load_val_images(data_dir):
image_filenames =[]
images = []
all_filenames = listdir(data_dir)
all_filenames.sort()
for filename in (all_filenames):
if filename[-3:] == "png":
image_filenames.append(filename)
for name in image_filenames:
image = resize_img(data_dir+name)
images.append(image)
return images
def load_n_run(image_file=None, file=True, model=0):
#encoder = torch.load(os.path.abspath("model_weights/encoder_resnet34_0.061650436371564865.pt"))
#decoder = torch.load(os.path.abspath("model_weights/decoder_resnet34_0.061650436371564865.pt"))
me, md = load_best(model)
print(me, md)
encoder = torch.load(me)
decoder = torch.load(md)
#encoder = torch.load(os.path.abspath("encoder_resnet34_tensor(0.0435).pt"))
#decoder = torch.load(os.path.abspath("decoder_resnet34_tensor(0.0435).pt"))
# Initialize the function to create the vocabulary
tokenizer = Tokenizer(filters='', split=" ", lower=False)
# Create the vocabulary
tokenizer.fit_on_texts([load_doc('vocabulary.vocab')])
decoded_words = []
star_text = '<START> '
hidden = decoder.init_hidden()
#image = load_val_images('val/')[0]
if image_file:
print('image_file:',image_file)
image = resize_img(image_file)
else:
image = load_val_images('test/')[0]
#image = Variable(torch.FloatTensor([image]))
image = Variable(torch.FloatTensor([image]))
predicted = '<START> '
for di in range(9999):
sequence = tokenizer.texts_to_sequences([star_text])[0]
decoder_input = Variable(torch.LongTensor(sequence)).view(1,-1)
features = encoder(image)
#print(decoder_input)
outputs,hidden = decoder(features, decoder_input,hidden)
topv, topi = outputs.data.topk(1)
ni = topi[0][0][0]
word = word_for_id(ni, tokenizer)
if word is None:
continue
predicted += word + ' '
star_text = word
#print(predicted)
if word == '<END>':
break
print(predicted)
#select html tag collection for replacement
compiler = Compiler('default')
#generate html content
compiled_website = compiler.compile(predicted.split())
if file:
with open('output.html', 'w') as f:
f.write(compiled_website)
else:
print(compiled_website)
if __name__ == "__main__":
# train on 20 images
#training(100)
load_n_run(image_file='./test/test.png', file=True)
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
#from bayes_opt import BayesianOptimization
from bayesian_opt.bayesian_optimization import BayesianOptimization
def target(x):
y = np.exp(-(x-2)**2)+np.exp(-(x-6)**2/5)+1/(x**2+1)+0.1*np.sin(5*x)-0.5
return y
def posterior(bo, x):
bo.gp.fit(bo.X, bo.Y)
mu, sigma = bo.gp.predict(x, return_std=True)
return mu, sigma
def plt_gp(bo, x, y):
fif = plt.figure(figsize=(16,10))
gs = gridspec.GridSpec(2,1, height_ratios=[3,1])
axis = plt.subplot(gs[0])
acq = plt.subplot(gs[1])
bo.gp.fit(bo.X, bo.Y)
mu, sigma = bo.gp.predict(x, return_std=True)
axis.plot(x, y, linewidth=3, label='Target')
axis.plot(bo.X.flatten(), bo.Y, 'D', markersize=8, color='r', label='Observation')
axis.plot(x, mu, '--', color='k', label='Prediction')
axis.plot(x, np.zeros(x.shape[0]), linewidth=3, color='r', label='Prediction')
axis.fill(np.concatenate([x, x[::-1]]), np.concatenate([mu-1.96*sigma, (mu+1.96*sigma)[::-1]]), alpha=0.6, fc='c', ec='None')
utility = bo.util.acqf(x, bo.gp, 0)
acq.plot(x, utility, label='Utility Function')
plt.show()
def main():
bo = BayesianOptimization(target, {'x':(-5,10)})
bo.maximize(init_points=2, n_iter=10, acq='ucb', kappa=5)
x = np.linspace(-5, 10, 200).reshape(-1,1)
y = target(x)
plt_gp(bo, x, y)
if __name__=="__main__":
main()
|
#!/usr/bin/python3
'''after party module'''
def append_after(filename="", search_string="", new_string=""):
'''function that inserts a line of text to a file, after
each line containing a specific string'''
input_file = open(filename, mode='r').readlines()
with open(filename, mode="w") as f:
for line in input_file:
f.write(line)
if search_string in line:
f.write(new_string)
f.close()
|
import unittest
from katas.beta.count_vowels_in_a_string import count_vowels
class CountVowelsTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(count_vowels('abcdefg'), 2)
def test_equals_2(self):
self.assertEqual(count_vowels('asdfdsafdsafds'), 3)
|
class Player: #수퍼클래스
def __init__(self, name, age):
self.name = name
self.age = age
class SoccerPlayer(Player): #서브클래스
def Goal(self, goal):
self.goal = goal
class Midfielder(SoccerPlayer): #서브클래스의 서브클래스
def assist(self, ass):
self.ass = ass
class GamePlayer(Player): #서브클래스
def KDA(self, K, D, A):
self.K = K
self.D = D
self.A = A
son = SoccerPlayer("손흥민", 29)
son.Goal(3)
print(son.name, son.age, son.goal)
|
#! /user/bin/env python3
import imaplib
import email
import os
import sys
import mimetypes
import time
# log in information
# emailAddress = os.environ.get("python_email")
# password = os.environ.get("python_password")
# instagramAddress = os.environ.get('instagram_email')
# instagramPW = os.environ.get('instagram_password')
def downloadEmails(emailAddress, emailPw, emailSearchFrom):
SMTP_SERVER = "imap.gmail.com"
mail = imaplib.IMAP4_SSL(SMTP_SERVER)
try:
mail.login(emailAddress, emailPw)
time.sleep(1)
except:
print('Error logging in')
return
else:
# select inbox
mail.select("INBOX")
# finding emails sent from myself
result, messageFromMe = mail.search(
None, 'FROM', 'therealnicola@gmail.com')
mailList = messageFromMe[0].split()
if len(mailList) == 0:
print('No emails found')
return
# reading each item
try:
for item in mailList:
result, data = mail.fetch(item, '(RFC822)')
rawEmail = data[0][1].decode("utf-8")
emailMessage = email.message_from_string(rawEmail)
subject_ = emailMessage['Subject']
counter = 0
for part in emailMessage.walk():
if part.get_content_maintype() == "multipart":
continue
filename = part.get_filename()
contentType = part.get_content_type()
if not filename:
ext = mimetypes.guess_extension(contentType)
if not ext:
ext = '.bin'
filename = 'msg-part-%08d%s' % (counter, ext)
counter += 1
# saving the file
savePath = os.path.join(os.getcwd(), "emails/uploads", )
if not os.path.exists(savePath):
# make path if it doesnt exist
os.makedirs(savePath)
with open(os.path.join(savePath, filename), 'wb') as fp:
fp.write(part.get_payload(decode=True))
# saving the subject as a text file
with open(os.path.join(savePath, 'text.txt'), 'w') as fp:
fp.write(subject_)
# Change the flag to deleted and delete
typ, response = mail.store(item, '+FLAGS', r'(\Deleted)')
mail.expunge()
except:
print('Error downloading')
|
from django.contrib import admin
from .models import User
from django.contrib.auth.admin import UserAdmin
class UserAdmin(admin.ModelAdmin):
filter_horizontal = ['groups']
admin.site.register(User, UserAdmin)
|
from flask import Flask, render_template
from youtube_api import YoutubeDataApi
from youtube import getVideoData
app = Flask(__name__)
@app.route('/')
def sayHello():
return render_template("index.html")
# @app.route('/about')
# def about():
# return render_template("about.html")
@app.route('/suggestions')
def about():
return render_template("suggestions.html")
@app.route('/stocism')
def suggestions():
searchData = getVideoData('stocism')
return render_template("videoLayout.html", results=searchData, title="Stocism")
@app.route('/philosophy')
def philosuggestions():
searchData = getVideoData('philosophy')
return render_template("videoLayout.html", results=searchData, title="Philosophy")
@app.route('/motivation')
def motivsuggestions():
searchData = getVideoData('motivation')
return render_template("videoLayout.html", results=searchData, title="Motivation")
@app.route('/inspiration')
def insipsuggestions():
searchData = getVideoData('inspirational')
return render_template("videoLayout.html", results=searchData, title="Inspirational")
@app.route('/productivity')
def producsuggestions():
searchData = getVideoData('productivity')
return render_template("videoLayout.html", results=searchData, title="Productivity")
@app.route('/business')
def businesssuggestions():
searchData = getVideoData('business')
return render_template("videoLayout.html", results=searchData, title="Business")
if __name__ == "__main__":
app.run(debug=True)
|
import socket
import select
HEADER_LENGTH = 3 #Header will be used to specify the length of the message received
IP = socket.gethostname()
PORT = 2000
email_list = ["trinity"]
password_list = ["college"]
coef_authorization = 0
#This function returns a message received in the format: header, message_body- will be called further down.
def receive_message (client_socket):
try:
message_header = client_socket.recv(HEADER_LENGTH)
message_length = int(message_header.decode("utf-8"))
return {"header": message_header,
"data": client_socket.recv(message_length).decode("utf-8")}
except:
return False
def set_up():
# Welcome & Setup
print("\nWelcome to Group 7 Quarantine Monitor!\n")
set_up = input("Do you want to set up a new patient (Y/N): ")
while set_up != 'Y' and set_up != 'N':
print("Invalid command!")
set_up = input("Do you want to set up a new patient (Y/N): ")
while set_up == 'Y':
new_email = input('Enter a new Email-adress: ')
new_password = input('Enter the password: ')
if new_email in email_list:
print('Email already registered. ')
del new_email, new_password
else:
email_list.append(new_email)
password_list.append(new_password)
print("New user", new_email, "setup successful! ")
del new_email, new_password
set_up = input("Do you want to set up a new patient (Y/N): ")
while set_up != 'Y' and set_up != 'N':
print("Invalid command!")
set_up = input("Do you want to set up a new patient (Y/N): ")
set_up()
def authorize ():
# Check log in / give access to user
print("Checking Authorization... ")
trur_message = 'TRUE'.encode("utf-8")
false_message = 'FALSE'.encode("utf-8")
while True:
try:
UP_header = client_socket.recv(HEADER_LENGTH).decode("utf-8")
except:
continue
break
email_pass_packet = client_socket.recv(int(UP_header)).decode("utf-8")
email_pass = email_pass_packet.split(',') #[0] will be the username, [1] will be the password
print(email_pass[0], email_pass[1])
# Authorization
while True:
if email_pass[0] in email_list:
index = email_list.index(email_pass[0])
if email_pass[1] == password_list[index]:
print('Passed Authorization. Currently connected with ->', email_pass[0])
client_socket.send(f"{len(trur_message):<{HEADER_LENGTH}}".encode("utf-8")+ 'TRUE'.encode("utf-8"))
break
else:
client_socket.send(f"{len(false_message):<{HEADER_LENGTH}}".encode("utf-8")+ 'FALSE'.encode("utf-8"))
while True:
try:
UP_header = client_socket.recv(HEADER_LENGTH)
except:
continue
break
email_pass_packet = client_socket.recv(int(UP_header.decode("utf-8"))).decode("utf-8")
email_pass = email_pass_packet.split(',') #[0] will be the username, [1] will be the password
print(email_pass[0], email_pass[1])
else:
client_socket.send(f"{len(false_message):<{HEADER_LENGTH}}".encode("utf-8") + 'FALSE'.encode("utf-8"))
while True:
try:
UP_header = client_socket.recv(HEADER_LENGTH)
except:
continue
break
email_pass_packet = client_socket.recv(int(UP_header.decode("utf-8"))).decode("utf-8")
email_pass = email_pass_packet.split(',') #[0] will be the username, [1] will be the password
print(email_pass[0], email_pass[1])
#Set up the server socket details:
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setblocking(False)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) #Not really sure if this line is necessary.
server_socket.bind((IP, PORT))
server_socket.listen() #Might be able to limit the number of clients by specifying a number here
# Sockets from which we expect to read
sockets_list = [server_socket]
# Sockets to which we expect to write
outputs = [ ]
#This is a clients dictionary (key/value pair)
clients = {}
timeout=20 #this is the time for which the server waits for new connection
print("Waiting for incoming connections...")
while True:
read_sockets, write_sockets, exception_sockets = select.select(sockets_list, outputs, sockets_list,timeout)
for notified_socket in read_sockets:
if notified_socket == server_socket:
client_socket, client_address = server_socket.accept()
user = receive_message(client_socket)
print(f"In coming connection with username: {user['data']}")
if coef_authorization == 0:
authorize() # This is a loop, can be infinity
coef_authorization = coef_authorization + 1
user_id = user['data']#The patient id
sockets_list.append(client_socket)
clients[client_socket] = user
print(f"Accepted new connection from {client_address[0]}:{client_address[1]} username:{user['data']}")
welcome_message = ("You are connected - stand by for advice.").encode("utf-8")
welcome_header = f"{len(welcome_message):<{HEADER_LENGTH}}".encode("utf-8")
client_socket.send(welcome_header + welcome_message)
else:
try:
message = receive_message(notified_socket)
user = clients[notified_socket]
print(f"Received from {user['data']}: {message['data']}")
except:
print(f"Closed connection from {clients[notified_socket]['data']}")
sockets_list.remove(notified_socket)
del clients[notified_socket]
#continue
#Now print out a list of connected patients:
print("The clients currently connected are:")
for eachClient in clients:
print(clients[eachClient]['data']),
#See what the doctor wants to do next:
options = input(f"Enter a patient name to send advice or enter 's' to stand-by for more connections: > ")
while (options != 's'):
if options == 'p':
print("The clients currently connected are:")
for eachClient in clients:
print(clients[eachClient]['data']),
else:
patientFound = False
for eachClient in clients:
if clients[eachClient]['data'] == options:
patientFound = True
adviceMessage = input(f"Enter advice for {clients[eachClient]['data']}: > ")
adviceMessage = adviceMessage.encode("utf-8")
adviceMessage_header = f"{len(adviceMessage):<{HEADER_LENGTH}}".encode("utf-8")
try:
eachClient.send(adviceMessage_header + adviceMessage)
except:
print("Error sending advice to the patient.")
if not patientFound:
print("Unable to find patient. Please try again.")
options = input(f"Enter patient name, 's' to stand-by for more connections or 'p' to print the list again: > ")
|
from youtube_api import YoutubeDataApi
from secret import youtube_api
yt = YoutubeDataApi(youtube_api)
def getVideoData(search):
searches = yt.search(q=search, max_results=10)
print(searches[0])
searches = [{'title': search['video_title'], 'date': search['video_publish_date'], 'desc': search['video_description'], 'channel': search['channel_title'], 'image': search['video_thumbnail'], 'video_link': "https://www.youtube.com/watch?v=" + search['video_id']} for search in searches]
return searches
def getChannelData(channel):
pass
|
from flask import Blueprint
from flask import request, jsonify
from flask import render_template
from flask_login import login_required
from .controller import HostMonitor, Performance
bp = Blueprint('performance', __name__)
@bp.route('/performance/host')
@login_required
def host():
all_list = HostMonitor.host_all_list()
return render_template('/performance/host.html', data=all_list)
@bp.route('/performance/host/<hostname>', methods=['GET', 'POST'])
@login_required
def performance_host(hostname):
title_list = HostMonitor.get_title_list()
return render_template('/performance/host_monitor_display.html', hostname=hostname,
title_data=title_list)
@bp.route('/performance/host/json_data', methods=['GET', 'POST'])
@login_required
def json_data():
if request.method == 'POST':
starttime = request.form['start']
endtime = request.form['end']
hostname = request.form['hostname']
data_list = HostMonitor.get_monitor_json_data(hostname, starttime, endtime)
return jsonify({'data': data_list})
@bp.route('/performance/index/<opt>')
@login_required
def index(opt):
wait_class_data = Performance.wait_class(opt)
return render_template('/performance/index.html', wait_class_data=wait_class_data)
|
name = input()
message = "Hello, "
if name == "Johnny":
message += "my love!"
else:
message += f"{name}!"
print(message)
|
#!/usr/bin/python
from statistics import median
num=input("Enter number to find median:")
con=[int(x) for x in str(num)]
res=median(con)
print(res)
|
import sys
import numpy as np
from Perceptron import Perceptron
from io_handling import FileOutputter
def main():
p = Perceptron(FileOutputter(sys.argv[2]))
raw_data = np.loadtxt(sys.argv[1], delimiter=',')
data = raw_data[:, [0, 1]]
rows = raw_data.shape[0]
bias_column = np.ones(rows)
bias_column.shape = (rows, 1)
data = np.hstack((bias_column, data))
labels = raw_data[:, [2]].flatten()
while True:
p.run(data, labels.T)
r,w = p.predict_all(data, labels.T)
if w == 0:
break
return 0
if __name__ == "__main__":
main()
# # RESOURCES
# - https://www.tutorialspoint.com/python/python_files_io.htm (file handling)
# - https://en.wikipedia.org/wiki/Perceptron (WP article on perceptron)
# - http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Perceptron.html#sklearn.linear_model.Perceptron (basic perceptron learning algorithm)
# - https://docs.scipy.org/doc/numpy/reference/generated/numpy.genfromtxt.html (loading text from csv into numpy array)
|
import numpy as np
from flask import Flask, request, jsonify, render_template
import pickle
app = Flask(__name__)
model = pickle.load(open('model.pkl', 'rb'))
@app.route('/')
def home():
return render_template('index.html')
@app.route('/predict',methods=['POST'])
def predict():
'''
For rendering results on HTML GUI
'''
features = ['Mat_t20', 'Inns_t20', 'NO_t20', 'HS_t20', 'Ave_t20', 'SR_t20',
'4s_t20', '6s_t20', 'Runs_t20', '100s_t20', '50s_t20', '0s_t20',
'Mat_odi', 'Inns_odi', 'NO_odi', 'HS_odi', 'Ave_odi', 'SR_odi',
'4s_odi', '6s_odi', 'Runs_odi', '100s_odi', '50s_odi', '0s_odi',
'year']
req_features = ['Mat_t20','Ave_t20', 'SR_t20','Runs_t20', '100s_t20', '50s_t20','year']
int_features = [int(x) for x in request.form.values()]
int_dic={}
for feature,value in zip(req_features,int_features):
int_dic[feature]=value
input=[]
for feature in features:
if feature in int_dic:
if feature !='year':
input.append(int_dic[feature])
else:
input.append(int_dic[feature]-2016)
elif feature=='Inns_t20':
input.append(int_dic['Mat_t20'])
else:
input.append(0)
final_features = [np.array(input)]
prediction = model.predict(final_features)
output = round(prediction[0]/1000, 2)
return render_template('index.html', prediction_text='Player cost should be $ {} crore'.format(output))
if __name__ == "__main__":
app.run(debug=True)
|
import cv2
import numpy as np
folders = ['Adirondack-perfect', 'Backpack-perfect',
'Couch-perfect', 'Sword2-perfect']
# for different sequences, find depth image
for folder in folders:
# path of images
path = 'D:\\Vision\\HW07_dataset\\Question2\\{}\\{}\\'.format(folder, folder)
# left image
path_img0 = path + 'im0.png'
# right image
path_img1 = path + 'im1.png'
img_l = cv2.imread(path_img0)
img_r = cv2.imread(path_img1)
# SGBM Parameters
window_size = 3
left_matcher = cv2.StereoSGBM_create(
minDisparity=0,
numDisparities=160,
blockSize=5,
P1=8 * 3 * window_size ** 2,
P2=32 * 3 * window_size ** 2,
disp12MaxDiff=1,
uniquenessRatio=15,
speckleWindowSize=0,
speckleRange=2,
preFilterCap=63,
mode=cv2.STEREO_SGBM_MODE_SGBM_3WAY
)
# Convenience method to set up the matcher for computing the
# right-view disparity map that is required in case of filtering with confidence.
right_matcher = cv2.ximgproc.createRightMatcher(left_matcher)
# filter parameters
lambda_param = 80000
sigma_param = 1.2
visual_multiplier = 1.0
# Convenience factory method that creates an instance of DisparityWLSFilter and sets up all
# the relevant filter parameters automatically based on the matcher instance. Currently supports
# only StereoBM and StereoSGBM.
wls_filter = cv2.ximgproc.createDisparityWLSFilter(matcher_left=left_matcher)
wls_filter.setLambda(lambda_param)
wls_filter.setSigmaColor(sigma_param)
displ = left_matcher.compute(img_l, img_r)
dispr = right_matcher.compute(img_r, img_l)
displ = np.int16(displ)
dispr = np.int16(dispr)
filteredImg = wls_filter.filter(displ, img_l, None, dispr)
filteredImg = cv2.normalize(src=filteredImg, dst=filteredImg, beta=0, alpha=255, norm_type=cv2.NORM_MINMAX);
filteredImg = np.uint8(filteredImg)
heat_img = cv2.applyColorMap(filteredImg, cv2.COLORMAP_JET)
cv2.imshow('Disparity Map {}'.format(folder), cv2.resize(heat_img, (heat_img.shape[1]//4, heat_img.shape[0]//4)))
# write to file
cv2.imwrite(filename='.\\problem-1-2\\heatmap-{}.png'.format(folder), img=heat_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
"""
# 二叉树
"""
import os
import logging
logger = logging.getLogger(__name__)
class TreeNode(object):
def __init__(self, value):
self.val = value
self.left = None
self.right = None
def pre_order(root):
"""前序遍历二叉树, 借助迭代器+递归实现"""
if root:
yield root.val # 访问根节点
yield from pre_order(root.left) # 访问左树
yield from pre_order(root.right) # 访问右树
def in_order(root):
"""中序遍历二叉树, 借助迭代器+递归实现"""
if root:
yield from pre_order(root.left) # 访问左树
yield root.val # 访问根节点
yield from pre_order(root.right) # 访问右树
def post_order(root):
"""后序遍历二叉树, 借助迭代器+递归实现"""
if root:
yield from pre_order(root.left) # 访问左树
yield from pre_order(root.right) # 访问右树
yield root.val # 访问根节点
if __name__ == '__main__':
logging.basicConfig(format="[%(asctime)s %(filename)s: %(lineno)s] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO,
filename=None,
filemode="a")
logger.info("申请N个节点")
singer = TreeNode("Taylor Swift")
genre_country = TreeNode("Country")
genre_pop = TreeNode("Pop")
album_fearless = TreeNode("Fearless")
album_red = TreeNode("Red")
album_1989 = TreeNode("1989")
album_reputation = TreeNode("Reputation")
song_ls = TreeNode("Love Story")
song_wh = TreeNode("White Horse")
song_wanegbt = TreeNode("We Are Never Ever Getting Back Together")
song_ikywt = TreeNode("I Knew You Were Trouble")
song_sio = TreeNode("Shake It Off")
song_bb = TreeNode("Bad Blood")
song_lwymmd = TreeNode("Look What You Made Me Do")
song_g = TreeNode("Gorgeous")
logger.info("设置树节点之间的关系")
singer.left, singer.right = genre_country, genre_pop
genre_country.left, genre_country.right = album_fearless, album_red
genre_pop.left, genre_pop.right = album_1989, album_reputation
album_fearless.left, album_fearless.right = song_ls, song_wh
album_red.left, album_red.right = song_wanegbt, song_ikywt
album_1989.left, album_1989.right = song_sio, song_bb
album_reputation.left, album_reputation.right = song_lwymmd, song_g
logger.info("前序遍历: {}".format(list(pre_order(singer))))
logger.info("中序遍历: {}".format(list(in_order(singer))))
logger.info("后序遍历: {}".format(list(post_order(singer))))
|
#! /usr/bin/env python
'''
Submission script adapted for Comet and Bridges HPCs, including
multiple options such as accounts, partitions, n of processors,
memory, etc.
Author: Juan V. Alegre Requena, please report any bugs
or suggestions to juanvi89@hotmail.com
'''
import os
from argparse import ArgumentParser
PARTITION_LIST_COMET = ['shared','compute','debug']
PARTITION_LIST_BRIDGES = ['RM-shared','RM','RM-small']
def prepare_sh(inp_file,nproc,mem,args):
# this prepares a queue submiting file with slurm options that will be run later
if args.cluster == "comet":
scratch="/expanse/lustre/scratch/$USER/temp_project/job_$SLURM_JOB_ID"
sh_s="#!/bin/bash\n\n#SBATCH -t "+str(args.t)+"\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1"
sh_s=sh_s+"\n#SBATCH --cpus-per-task="+str(nproc)
partition = args.pcomet
if nproc == 128 and partition == 'shared':
partition = 'compute'
sh_s=sh_s+"\n#SBATCH -p "+partition
sh_s=sh_s+"\n#SBATCH --job-name="+inp_file
sh_s=sh_s+'\n#SBATCH --account='+ args.acomet
# to avoid problems with calcs that require large amounts of memory
if int(mem) > 48:
mem = int(mem)+6
sh_s=sh_s+'\n#SBATCH --mem='+ str(mem) +'GB'
sh_s=sh_s+"\nfile="+inp_file+"\ninpfile=${file}.com\noutfile=${file}.log\n\n"
sh_s=sh_s+"export MODULEPATH=/share/apps/compute/modulefiles/applications:$MODULEPATH\n"
sh_s=sh_s+"export GAUSS_SCRDIR="+scratch+"\nset OMP_NUM_THREADS $SLURM_CPUS_ON_NODE\n"
sh_s=sh_s+"module load gaussian/16.C.01\n"
sh_s=sh_s+"Homepath=$(pwd)\n"
sh_s=sh_s+"mkdir $GAUSS_SCRDIR\n\n"
sh_s=sh_s+"touch $Homepath/$outfile\n"
sh_s=sh_s+"cp $inpfile $GAUSS_SCRDIR\n"
sh_s=sh_s+"cd $GAUSS_SCRDIR\n"
sh_s=sh_s+"\ng16 < $GAUSS_SCRDIR/$inpfile > $Homepath/$outfile\n\n"
sh_s=sh_s+" echo 'Gaussian Job finished or failed (Good luck!!)'\n"
if args.chk == True:
sh_s=sh_s+"cp $GAUSS_SCRDIR/*.chk $Homepath\n"
if args.wfn == True:
sh_s=sh_s+"cp $GAUSS_SCRDIR/*.wfn $Homepath\n"
sh_file=open(inp_file+".sh","w")
sh_file.write(sh_s)
sh_file.close()
elif args.cluster == "bridges":
sh_s="#!/bin/csh\n#SBATCH -t "+str(args.t)+"\n#SBATCH -N 1"
partition = args.pbridges
if nproc == 128 and partition == 'RM-shared':
partition = 'RM'
sh_s=sh_s+"\n#SBATCH -p "+partition
# sh_s=sh_s+'\n#SBATCH --mem-per-cpu='+ str((int(mem)+6)/(nproc*1000)) +'MB'
sh_s=sh_s+"\n#SBATCH --job-name="+inp_file+"\n#SBATCH --ntasks-per-node=1"
sh_s=sh_s+"\n#SBATCH --cpus-per-task="+str(nproc)+'\n#SBATCH --account='+ args.abridges +'\n\n'
sh_s=sh_s+"module load gaussian\n"
sh_s=sh_s+"source $g16root/g16/bsd/g16.login\n"
sh_s=sh_s+"set echo\n"
# this sentence also sets the scratch directory as the predefined $LOCAL in Bridges2
sh_s=sh_s+"setenv GAUSS_SCRDIR $LOCAL\n"
sh_s=sh_s+"setenv OMP_NUM_THREADS $SLURM_CPUS_ON_NODE\n"
sh_s=sh_s+"cd $SLURM_SUBMIT_DIR\n"
sh_s=sh_s+"set JOBNAME="+inp_file+"\n"
sh_s=sh_s+"set INPUT=${JOBNAME}.com\n\n"
sh_s=sh_s+"srun g16 < $INPUT > $JOBNAME.log\n\n"
sh_s=sh_s+"echo 'Gaussian Job finished or failed (Good luck!!)'\n"
if args.chk == True:
sh_s=sh_s+"cp $GAUSS_SCRDIR/*.chk $Homepath\n"
if args.wfn == True:
sh_s=sh_s+"cp $GAUSS_SCRDIR/*.wfn $Homepath\n"
sh_file=open(inp_file+".sh","w")
sh_file.write(sh_s)
sh_file.close()
else:
print('Specify your cluster with --cluster (i.e. --cluster comet or --cluster bridges)')
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument(nargs = '*', dest = 'input_file')
parser.add_argument('-t', type=str, default="47:59:59", help='Walltime for job in format hh:mm:ss')
parser.add_argument('--cluster', type=str, default='', help='Cluster used (comet or bridges)')
parser.add_argument('--chk', action="store_true", default=False, help='Copy .chk files into your home directory after jobs are done')
parser.add_argument('--wfn', action="store_true", default=False, help='Copy .wfn files into your home directory after jobs are done')
parser.add_argument('--acomet', type=str, default='cst129',
help='Account to use for job in Comet')
parser.add_argument('--abridges', type=str, default='che180035p',
help='Account to use for job in Bridges')
parser.add_argument('--pcomet', choices=PARTITION_LIST_COMET, default=PARTITION_LIST_COMET[0],
help='Partition used in Comet')
parser.add_argument('--pbridges', choices=PARTITION_LIST_BRIDGES, default=PARTITION_LIST_BRIDGES[0],
help='Partition used in Bridges')
args = parser.parse_args()
for file in args.input_file:
# This part recognizes the amount of CPUs for the job specified in the input files
nproc = 1
mem = 60
f=open(file,"r")
for line in f.readlines()[:15]:
if line.lower().find('%nproc') > -1:
nproc = int(line.split('=')[1])
if line.lower().find('%mem') > -1:
mem = line.lower().split('=')[1]
mem = int(mem.split('gb')[0])
f.close()
prepare_sh(file.split(".com")[0],nproc,mem,args)
if nproc == 1:
print("submitting job: "+file.split(".com")[0]+" with 1 processor.")
elif nproc > 1:
print("submitting job: "+file.split(".com")[0]+" with "+str(nproc)+" processors.")
else:
print("0 or negative number of processors specified in the input file.")
os.system("sbatch "+file.split(".com")[0]+".sh")
|
from threading import Lock
from flask import Flask
from config import Config
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_socketio import SocketIO
import redis
async_mode = None
app = Flask(__name__)
app.config.from_object(Config)
db = SQLAlchemy(app)
redis_store=redis.from_url(app.config.get('REDIS_URL'))
migrate = Migrate(app, db)
socketio = SocketIO(app, async_mode=async_mode)
thread = None
thread_lock = Lock()
from app import routes, models, socket
|
t = int(input())
while t > 0:
s = str(input())
if (min(s.count('0'),s.count('1'))) % 2 == 1:
print("DA")
else:
print("NET")
t = t-1
|
import pytest
"""
配合 -m 标记名 进行筛选执行用例
例如:pytest cases\test_标记.py -m finished
"""
@pytest.mark.smoke
@pytest.mark.finished
def test_func1():
assert 1 == 1
@pytest.mark.unfinished
def test_func2():
assert 1 != 1
@pytest.mark.wait
def test_func3():
assert 1 != 1
|
import unittest
from katas.kyu_6.reversed_words import reverseWords
class ReverseWordsTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(reverseWords(
'The greatest victory is that which requires no battle'),
'battle no requires which that is victory greatest The')
def test_equals_2(self):
self.assertEqual(reverseWords('hello world!'), 'world! hello')
|
#I pledge my honor that I have abided by the Stevens Honor System
def squareValues(values):
squared_values= int(values)**2
return squared_values
def main():
values= input("Enter a list of values separated by a comma:")
new_values=values.split(',')
for num in new_values:
print(squareValues(num))
main()
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server import util
class ExposuresBundleRoadway(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, name: str=None, roadtype: str=None, distance: float=None, aadt: float=None, speed: float=None, through_lanes: float=None): # noqa: E501
"""ExposuresBundleRoadway - a model defined in Swagger
:param name: The name of this ExposuresBundleRoadway. # noqa: E501
:type name: str
:param roadtype: The roadtype of this ExposuresBundleRoadway. # noqa: E501
:type roadtype: str
:param distance: The distance of this ExposuresBundleRoadway. # noqa: E501
:type distance: float
:param aadt: The aadt of this ExposuresBundleRoadway. # noqa: E501
:type aadt: float
:param speed: The speed of this ExposuresBundleRoadway. # noqa: E501
:type speed: float
:param through_lanes: The through_lanes of this ExposuresBundleRoadway. # noqa: E501
:type through_lanes: float
"""
self.swagger_types = {
'name': str,
'roadtype': str,
'distance': float,
'aadt': float,
'speed': float,
'through_lanes': float
}
self.attribute_map = {
'name': 'name',
'roadtype': 'roadtype',
'distance': 'distance',
'aadt': 'aadt',
'speed': 'speed',
'through_lanes': 'through_lanes'
}
self._name = name
self._roadtype = roadtype
self._distance = distance
self._aadt = aadt
self._speed = speed
self._through_lanes = through_lanes
@classmethod
def from_dict(cls, dikt) -> 'ExposuresBundleRoadway':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The ExposuresBundle_roadway of this ExposuresBundleRoadway. # noqa: E501
:rtype: ExposuresBundleRoadway
"""
return util.deserialize_model(dikt, cls)
@property
def name(self) -> str:
"""Gets the name of this ExposuresBundleRoadway.
:return: The name of this ExposuresBundleRoadway.
:rtype: str
"""
return self._name
@name.setter
def name(self, name: str):
"""Sets the name of this ExposuresBundleRoadway.
:param name: The name of this ExposuresBundleRoadway.
:type name: str
"""
self._name = name
@property
def roadtype(self) -> str:
"""Gets the roadtype of this ExposuresBundleRoadway.
:return: The roadtype of this ExposuresBundleRoadway.
:rtype: str
"""
return self._roadtype
@roadtype.setter
def roadtype(self, roadtype: str):
"""Sets the roadtype of this ExposuresBundleRoadway.
:param roadtype: The roadtype of this ExposuresBundleRoadway.
:type roadtype: str
"""
self._roadtype = roadtype
@property
def distance(self) -> float:
"""Gets the distance of this ExposuresBundleRoadway.
:return: The distance of this ExposuresBundleRoadway.
:rtype: float
"""
return self._distance
@distance.setter
def distance(self, distance: float):
"""Sets the distance of this ExposuresBundleRoadway.
:param distance: The distance of this ExposuresBundleRoadway.
:type distance: float
"""
self._distance = distance
@property
def aadt(self) -> float:
"""Gets the aadt of this ExposuresBundleRoadway.
:return: The aadt of this ExposuresBundleRoadway.
:rtype: float
"""
return self._aadt
@aadt.setter
def aadt(self, aadt: float):
"""Sets the aadt of this ExposuresBundleRoadway.
:param aadt: The aadt of this ExposuresBundleRoadway.
:type aadt: float
"""
self._aadt = aadt
@property
def speed(self) -> float:
"""Gets the speed of this ExposuresBundleRoadway.
:return: The speed of this ExposuresBundleRoadway.
:rtype: float
"""
return self._speed
@speed.setter
def speed(self, speed: float):
"""Sets the speed of this ExposuresBundleRoadway.
:param speed: The speed of this ExposuresBundleRoadway.
:type speed: float
"""
self._speed = speed
@property
def through_lanes(self) -> float:
"""Gets the through_lanes of this ExposuresBundleRoadway.
:return: The through_lanes of this ExposuresBundleRoadway.
:rtype: float
"""
return self._through_lanes
@through_lanes.setter
def through_lanes(self, through_lanes: float):
"""Sets the through_lanes of this ExposuresBundleRoadway.
:param through_lanes: The through_lanes of this ExposuresBundleRoadway.
:type through_lanes: float
"""
self._through_lanes = through_lanes
|
from flask import current_app
class Holder:
def __str__(self):
return '{}'
def __call__(self, name):
return self
class CacheKey:
HOLDER = Holder()
def __init__(self, *args):
self.holder_count = len(list(filter(
lambda a: isinstance(a, Holder), args)))
self.args = [str(a) for a in args]
def format(self, *args):
if self.holder_count != len(args):
raise ValueError('holder amount and arguments amount is not equal')
name = current_app.config.get('APP_NAME', None)
return ':'.join([name, *self.args] if name else self.args).format(*args)
def __str__(self):
return self.format()
def __repr__(self):
return ':'.join(self.args)
|
from pandas.core.frame import DataFrame
import pandas as pd
import numpy as np
import re
import calendar
import datetime
import time
SKIP_LINE_SET = {"*** USER INFORMATION MESSAGE", "A ZERO FREQUENCY"}
p_header = re.compile(r"(?P<label>.+(?=SUBCASE))(?P<subcase>SUBCASE\s\d+)")
re_date = re.compile(r'(?P<month>\w+)\s+(?P<day>\d{1,2}),\s+(?P<year>\d{4})')
re_version = re.compile(r'(?P<vname>[^\d]*)(?P<vdate>\d{1,2}\/\d{1,2}\/\d{1,2})')
re_page = re.compile(r'PAGE\s+(?P<page>\d+)$')
class F06Page:
def __init__(self, raw_lines=None, meta=None):
self.raw_lines = raw_lines
self.meta = {} if meta == None else meta
self.parse_page_metadata_header()
def parse_page_metadata_header(self):
first_line = self.raw_lines[0]
date = re_date.search(first_line)
self.meta['run-date'] = datetime.date(
int(date.group('year').strip()),
list(calendar.month_name).index(date.group('month').strip().title()),
int(date.group('day').strip())
)
version = re_version.search(first_line)
self.meta['run-version-name'] = version.group('vname').strip()
self.meta['run-version-date'] = datetime.date(
*time.strptime(
version.group('vdate').strip(),
'%m/%d/%y')[:3]
)
self.meta['page'] = int(re_page.search(first_line).group('page'))
def find_tabular_line_range(lines, shift):
k = len(lines)
j = shift # linha após as labels de dados
while j < k: # primeiro char na linha final da pagina é 1
l = lines[j]
if _check_skip_lines(l) or l.strip() == '':
break
j += 1
return (shift, j)
def extract_tabulated_data(lines):
data = []
for line in lines:
entries = line.split()
inner_data = []
for entry in entries:
try:
e = float(entry)
except ValueError:
e = np.nan
finally:
inner_data.append(e)
data.append(inner_data)
return data
def parse_label_subcase(line):
res = p_header.search(line[1:])
label = res.group('label').strip()
subcase = res.group('subcase').replace('SUBCASE', '').strip()
return (label, int(subcase))
def parse_text_value(value):
try:
return int(value)
except ValueError:
try:
return float(value)
except ValueError:
return value
def _check_skip_lines(line):
return any(map(lambda k: k in line, SKIP_LINE_SET))
# def read_and_concat_f06s(case_files, labels, label_name="THETA"):
#
# if len(labels) != len(case_files):
# raise Exception("Collections should be of same size.")
#
# df_results = []
#
# for i, fn in enumerate(case_files):
# print("Reading... {}".format(fn))
# df_data = read_f06(fn)
# df_results.append(
# pd.concat(
# { labels[i]: df_data },
# names=[label_name]
# )
# )
#
# return pd.concat(df_results)
|
import SimpleXML
import base64
import socket
host = "192.168.10.93"
# host="184.183.150.164"
port = 5901
bold_xml = SimpleXML.SimpleXML()
bold_xml.device_id = 909090
bold_xml.event = "*BA"
clip = open("test.mpg", "rb").read()
bold_xml.clip = base64.b64encode(clip).decode("ascii")
bold_xml.bin_size = len(clip)
# print(bold_xml.to_bold())
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, port))
sock.send(bold_xml.to_bold())
print(sock.recv(1024))
sock.close()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
def to_range(images, min_value=0.0, max_value=1.0, dtype=None):
"""Transform images from [-1.0, 1.0] to [min_value, max_value] of dtype."""
assert np.min(images) >= -1.0 - 1e-5 and np.max(images) <= 1.0 + 1e-5 \
and (images.dtype == np.float32 or images.dtype == np.float64), \
('The input images should be float64(32) '
'and in the range of [-1.0, 1.0]!')
if dtype is None:
dtype = images.dtype
return ((images + 1.) / 2. * (max_value - min_value) +
min_value).astype(dtype)
def uint2im(images):
"""Transform images from uint8 to [-1.0, 1.0] of float64."""
assert images.dtype == np.uint8, 'The input images type should be uint8!'
return images / 127.5 - 1.0
def float2im(images):
"""Transform images from [0, 1.0] to [-1.0, 1.0]."""
assert np.min(images) >= 0.0 - 1e-5 and np.max(images) <= 1.0 + 1e-5 \
and (images.dtype == np.float32 or images.dtype == np.float64), \
'The input images should be float64(32) and in the range of [0.0, 1.0]!'
return images * 2 - 1.0
def im2uint(images):
"""Transform images from [-1.0, 1.0] to uint8."""
return to_range(images, 0, 255, np.uint8)
def im2float(images):
"""Transform images from [-1.0, 1.0] to [0.0, 1.0]."""
return to_range(images, 0.0, 1.0)
def float2uint(images):
"""Transform images from [0, 1.0] to uint8."""
assert np.min(images) >= 0.0 - 1e-5 and np.max(images) <= 1.0 + 1e-5 \
and (images.dtype == np.float32 or images.dtype == np.float64), \
'The input images should be float64(32) and in the range of [0.0, 1.0]!'
return (images * 255).astype(np.uint8)
def uint2float(images):
"""Transform images from uint8 to [0.0, 1.0] of float64."""
assert images.dtype == np.uint8, 'The input images type should be uint8!'
return images / 255.0
|
class MaxHeap:
def __init__(self):
self.arr = [None]
def push(self, val):
def need_swap(ind):
parent = ind // 2
if self.arr[ind] > self.arr[parent]:
return True
else:
return False
self.arr.append(val)
curr = len(self.arr) - 1
while curr > 1:
if need_swap(curr):
parent = curr // 2
self.arr[curr], self.arr[parent] = self.arr[parent], self.arr[curr]
curr = parent
else:
break
def pop(self):
def need_swap(ind, child):
if self.arr[child] > self.arr[ind]:
return True
else:
return False
def swap(ind0, ind1):
self.arr[ind0], self.arr[ind1] = self.arr[ind1], self.arr[ind0]
if self.is_empty():
return None
val = self.arr[1]
self.arr[1] = self.arr[-1]
del self.arr[-1]
curr = 1
while curr < len(self.arr):
left = curr * 2
right = curr * 2 + 1
if left < len(self.arr) and right < len(self.arr): # left<len(self.arr)은 생략해도 된다. 완전 이진 트리이기 때문에 오른쪽 자식이 있으면 왼쪽 자식이 무조건 있기 때문.
if self.arr[left] > self.arr[right]:
if need_swap(curr, left):
swap(curr, left)
curr = left
else:
break
else:
if need_swap(curr, right):
swap(curr, right)
curr = right
else:
break
elif left < len(self.arr): # break 해도됨. 가독성을 위해 추가.
if need_swap(curr, left):
swap(curr, left)
curr = left
else:
break
else:
break
return val
def peek(self):
return self.arr[1] if not self.is_empty() else None
def is_empty(self):
return len(self.arr) == 1
heap = MaxHeap()
data = [1,6,32,14,60,2,5,66]
for elem in data:
heap.push(elem)
while heap.is_empty() is False:
print(heap.pop())
|
import numpy as np
'''
Inputs
------
* mu: The mean of the gaussian fit
* sigma: The covariance of the gaussian fit
Outputs
-------
* mu: The center of the ellipse
* a: The semi-major axis length
* b: The semi-minor axis length
* theta: The ellipse orientation
'''
def cov2ell(mu, sigma):
xy = mu
vals, vecs = np.linalg.eigh(sigma)
x, y = vecs[:, 0]
angle = np.degrees(np.arctan2(y, x))
width, height = 2 * np.sqrt(vals)
return xy, width, height, angle
|
"""
This is an example of a block comment.
Python supports both block comments and line comments.
Line comments begin with #
"""
# Set a name variable
myName = 'Andy Fischoff'
print(myName) # will print Andy Fischoff
|
#!/usr/bin/python
# -*- coding: cp936 -*-
import sqlite3
""" UpdateLeftAccount.py
对于 leftaccount 表格增删查改的操作
"""
class updateLeftAccount:
def update(leftAccounts):
# 插入leftaccount, 存在的就replace
with sqlite3.connect('C:\sqlite\db\hxdata.db') as db:
insert_template = "INSERT OR REPLACE INTO leftaccount " \
"(khcode, khdate, usrnameshort, usrname, khusrmobile, lddepid,\
lddepname, marketperid, marketpername, marketpertype, marketpermobile, marketdepname, marketdepid ) " \
"VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);"
select_template = "SELECT khcode, khdate, usrnameshort, usrname, khusrmobile, lddepid,\
lddepname, marketperid, marketpername, marketpertype, marketpermobile, marketdepname, marketdepid FROM account WHERE khcode = ?;"
print('update', leftAccounts)
for leftaccount in leftAccounts:
# print(len(leftper))
rows = db.execute(select_template, [str(leftaccount).strip(), ])
# consume sql result to avoid ERROR: Cursor needed to be reset because of commit/rollback and can no longer be fetched from
rows = list(rows)
for khcode, khdate, usrnameshort, usrname, khusrmobile, lddepid,\
lddepname, marketperid, marketpername, marketpertype, marketpermobile, marketdepname, marketdepid in rows:
try:
print('Here is inserting leftacoount')
db.execute(insert_template, [khcode, khdate, usrnameshort, usrname, khusrmobile, lddepid,\
lddepname, marketperid, marketpername, marketpertype, marketpermobile, marketdepname, marketdepid])
except sqlite3.Error as e:
print('2')
print(e)
db.rollback()
else:
db.commit()
#updateLeftAccount.update([316000032159, 326000020573, 326000020589, 323000031986, 378000011703])
|
'''
This file contains my implementation of a solution to the second problem: Files and Directories.
The functions os.listdir() and os.walk() were not used in this solution due to
the requirement that programs such as ls and similar may not be used.
@author: Naveen Neelakandan
'''
#!/usr/bin/python
import datetime
import glob
import os
import sys
class File:
"""
Simple class that represents a file on the system.
Used to gather needed information about a file.
Attributes:
file_path: The name of the file (without the full path)
file_size: An integer count of the size of the file in bytes
file_last_modified: A datetime object representing the time the file was last modified
"""
def __init__(self, name, size, time_modified):
self.file_name = name
self.file_size = size
self.file_last_modified = time_modified
def __eq__(self, other):
return (self.file_size == other.file_size)
def __lt__(self, other):
return (self.file_size < other.file_size)
def __str__(self):
return "\t%d\t%s\t%s" % (self.file_size, str(self.file_last_modified), self.file_name)
def printFilesRecursively(directory_path):
"""
Prints the files in the given top-level directory and all sub-directories
Arguments:
directory_Path: The relative path to the top-level directory.
Returns:
None
Raises:
None
"""
print "\nDirectory <%s>\n" % directory_path
# Glob for all sub-directories and files
subdirectories = []
files = []
globPattern = directory_path + '/*'
for path in glob.glob(globPattern):
if os.path.isdir(path):
subdirectories.append(path)
else:
file_name = os.path.basename(path)
file_size = os.path.getsize(path)
file_last_modified = datetime.datetime.fromtimestamp(os.path.getmtime(path))
file = File(file_name, file_size, file_last_modified)
files.append(file)
# Print all files in current directory in descending order of size
for file in sorted(files, reverse=True):
print file
# Recursive call on all sub-directories
for subdirectory in subdirectories:
printFilesRecursively(subdirectory)
if __name__ == "__main__":
# Validate correct number of arguments
if len(sys.argv) != 2:
print "Incorrect number of arguments"
sys.exit(1)
# Try with higher.lower level dicts
directory_path = sys.argv[1]
# Due to security issues, validate that the directory path is not at a higher level
if directory_path.startswith('../'):
print "Accessing a file at a higher directory is not allowed"
sys.exit(1)
# Check if given directory path input is valid
if not os.path.isdir(directory_path):
print "The given directory path is invalid"
sys.exit(1)
printFilesRecursively(directory_path)
|
import hashlib
from tkinter import *
from array import *
znaki = array("u")
formated = array("u")
indx = 0
cindx = 0
strs = ["" for x in range(11)]
def fastH(string, hash):
if(hash == "sha256"):
h = hashlib.sha256(string.encode()).hexdigest()
return h
if(hash == "md5"):
h = hashlib.md5(string.encode()).hexdigest()
return h
h = fastH("Pain", "sha256")
for i in range(len(h)):
znaki.append(h[i])
for y in range(11):
for x in range(6):
if(indx >= 0 and indx <= 63):
strs[y] += znaki[indx]
indx+=1
else:
strs[y] += "0"
size = 100
window = Tk()
content = Canvas(window, width=size*11, height= size*6)
content.pack()
for i in range(11):
c = "#" + strs[cindx]
content.create_rectangle(i*size, 0, i*size + size, size*11, fill=c)
cindx+=1
mainloop()
|
#!/usr/bin/python3
import configparser
import os
import datetime
from git import Repo
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
print(bcolors.OKBLUE + '''
%#///*#
%,,,,,,,,&
&%,,,,,,,,%
&#,,,,,.(
%,,,,,,,
%,,,,,,,,,(% *(/*,,,,,,,,,,,*/
%,,,,,,,,,,# #(/,,,,,,,,,,,,,,,,,,,,,.*#
&*,,,,,,,,,% %(,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
/&%(/////(,,,,,,,,, (.,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,(&
%*.,,,,,,,,,,,,,,,,,,,,,( (,,,,,,,,,,,,,,**%% %,*,,,,,,,,,,,,,.,
&(,,,,,,,,,,,,,,,,,,,,,,,./ (,,,,,,,,,,,%( ,,,,,,,,,,,,.*
#.,,,,,,,,,,,,,,,,,,,,,,,,,,,,&(,,,,,,,/( %,,,,,,,,,,*#
%(,,,,,,,,,,,,,,&(*,,,,,,,,,,,,,,.* ,,, (*,,,,,,,,,*(
%,,,,,,,,,,,%% &(,,,,,,,,,,,,,,,,,,/ # ,,,,,,,,,*(
%,,,,,,,,,,(& #,,,,,,,,,,,,,,,,,,,,% %*,,,,,,,,,&
#*,,,,,,,,*& ,.,,,,,,,,,,,,,,,,,,,,,.. ,.,,,,,,,,,./
(,,,,,,,,,#& , ,,,,,,,,,(&,.,,,,,,,,,,,,# #/,,,,,,,,,,.
(,,,,,,,,,,& (.,,,,,,,,,*( %,,,,,,,,,,,,, ,( &* .,,,,,,,,,,,,(%
(*,,,,,,,,,,,,,,,,,,,,,,,, , &,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,%
%.,,,,,,,,,,,,,,,,,,,,,,% &(,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,.(
*,,,,,,,,,,,,,,,,,,,*% , ,,,,,,,,,,,,,,,,,,,,,,,,,,&*
&/ ,,,,,,,,,,,./# &.#,,,,,,,,,,,,,,,,,.%#&
&/**/ *%#%######(
''')
config = configparser.ConfigParser()
root_dir = os.getcwd() + '/..'
now = datetime.datetime.now()
timeformat = "%m/%d/%Y, %H:%M:%S"
if not os.path.exists(f"{root_dir}/.user.conf"):
with open(f"{root_dir}/.user.conf", 'w') as user_conf:
user_conf.write('[USER]\n')
user_conf.write('auto_update = True\n')
user_conf.write('update_every_x_days = 1')
if not os.path.exists(f"{root_dir}/.gitignore"):
with open(f"{root_dir}/.gitignore", 'w') as gitignore:
gitignore.write('.user.conf')
config.read([f"{root_dir}/.global.conf", f"{root_dir}/.user.conf"])
last_update = config.get('USER', 'last_update', fallback=False)
update_every_x_days = config.get('USER', 'update_every_x_days', fallback = 1)
if not config.get('USER', 'auto_update', fallback=False) or \
datetime.datetime.strptime(last_update,timeformat) > now - datetime.timedelta(days=int(update_every_x_days)):
exit()
print(bcolors.WARNING + "Scripts Updating")
repo = Repo(root_dir)
master_branch = config.get('GLOBAL', 'master_branch', fallback='master')
current_branch = repo.active_branch
on_master = current_branch.name == master_branch
head = repo.head.commit.tree
save_stash = not not repo.git.diff(head)
if save_stash:
repo.git.stash('save')
if not on_master:
repo.git.checkout(master_branch)
repo.remotes.origin.pull()
if not on_master:
repo.git.checkout(current_branch.name)
repo.git.merge(master_branch)
if save_stash:
repo.git.stash('pop')
config.set('USER', 'last_update', now.strftime(timeformat))
with open(f"{root_dir}/.user.conf", 'w') as config_file:
config.write(config_file)
print(bcolors.OKGREEN + "Scripts Updated")
|
from google.appengine.api import users
from google.appengine.api import mail
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from app.model.accounts import Accounts
from app.model.invite import Invite
from app.model.invites import Invites
from app.forms.invite import InviteForm
from app.system.email import Mailer
from app.webapp.context import Context
# Called when someone requests an invite
class InviteRequestHandler(webapp.RequestHandler):
def post(self, key=""):
context = Context(self.request)
form = InviteForm(data=self.request.POST)
if form.is_valid():
invite = form.save()
Mailer.sendInvitationRequestConfirmation(invite.email, invite.name)
msg = """
Hi %s,<br />
<br />
Thank you for the My-Trips invite request. We've send you and email with details .<br />
<br />
Thanks,<br />
<br />
The My-Trips team
""" % (invite.name)
self.response.out.write(msg)
else:
self.response.set_status(500)
s = form.errors #.as_text()
y = ''
for x in s:
if y != '':
y = y + ' - '
y = y + s[x][0]
self.response.out.write(y)
# Called when you invite a friend
class InviteInviteHandler(webapp.RequestHandler):
def post(self, key=""):
context = Context(self.request)
# 1. Get name and email
# 2. Create Invite record
# 3. Store who initiated the invite
# 4. Send out email telling the 'friend' he/she is invited by ...
# 5. Email has normal invite code to verify
form = InviteForm(data=self.request.POST)
if form.is_valid():
invite = form.save(commit=False)
invite.status = 2
invite.by = context.account
invite.put()
url = self.request.application_url + "/invite/accept/" + str(invite.key())
Mailer.sendInvitationByFriend(invite.email, invite.name, url, context.account.name)
self.response.out.write('Invite someone else')
else:
self.response.set_status(500)
s = form.errors #.as_text()
y = ''
for x in s:
if y != '':
y = y + ' - '
y = y + s[x][0]
self.response.out.write(y)
# Called when the admin decides to send out an invite
class InviteSendHandler(webapp.RequestHandler):
def get(self, key=""):
context = Context(self.request)
if not context.isAuthenticated:
return context.authenticate(self.response)
# get the invitation
invite = Invites.loadOne(key)
if invite:
# Prepare and email with url to accept the invitation
url = self.request.application_url + "/invite/accept/" + str(invite.key())
Mailer.sendInvitation(invite.email, invite.name, url)
invite.status = 2
invite.put()
# redirect to Thank you..
self.redirect('/management/invites')
# Called when someone accepts the invite and becomes a member
class InviteAcceptHandler(webapp.RequestHandler):
def get(self, key=""):
context = Context(self.request)
# get use and the invitation
user = users.get_current_user()
invite = Invites.loadOne(key)
# Must handle errors if the invite is not found or is not valid
if invite and user:
# check for valid invite
if invite.status == 2: # Invited
account = Accounts.loadByID(user.user_id(), True)
if account:
invite.status = 9
invite.put()
self.redirect('/settings')
template_values = {
'context': context,
}
self.response.out.write(template.render(context.template('core-invite-error.html'), template_values))
# Called when the admin decides to delete an invite
class InviteDeleteHandler(webapp.RequestHandler):
def get(self, key=""):
context = Context(self.request)
if not context.isAuthenticated:
return context.authenticate(self.response)
# get the invitation
invite = Invites.loadOne(key)
if invite:
invite.delete()
# redirect to Thank you..
self.redirect('/management/invites')
|
#!/usr/bin/env python3
import sys, os
import hmac, hashlib
sys.path += [ os.path.join(os.path.split(__file__)[0], 'libs') ]
from intelhex import IntelHex16bit
# Plain Unsafe ops (op = bytes = word {little endian})
# ----------------------------------------------------
# ret = 0x08 0x95 = 0x9508
# reti = 0x18 0x95 = 0x9518
# ijmp = 0x09 0x94 = 0x9409
# icall = 0x09 0x95 = 0x9509
# elpm = 0xD8 0x95 = 0x95D8
# Unsafe ops with arguments
# -------------------------
# elpm Rd,Z = op & (0xFE0F) == 0x9006
# elpm Rd,Z+ = op & (0xFE0F) == 0x9007
# Conditionally unsafe ops (depending on relative target address)
# ---------------------------------------------------------------
# brbs s,k = op & (0xFC00) == 0xF000
# brbc s,k = op & (0xFC00) == 0xF400
# rjmp k = op & (0xF000) == 0xC000
# rcall k = op & (0xF000) == 0xD000
# Two word ops
# ---------------------------
# jmp = 0x0C 0x94 = 0x940C
# call = 0x0E 0x94 = 0x940E
# Opcodes are represented little-endian everywhere (toolchain/datasheet/etc)
# Unsafe ops (should never appear anywhere except as 2nd word in long calls)
unsafe_ops = [int("0x9508", 16), int("0x9518", 16), int("0x9409", 16),
int("0x9509", 16), int("0x95D8",16)]
elpm_mask = int("0xFE0F", 16)
elpm_ops = [int("0x9006", 16), int("0x9007", 16)]
# Conditionally unsafe ops (just log when they appear as 2nd word in long call)
branch_ops_mask = int("0xFC00", 16)
branch_ops = [int("0xF000", 16), int("0xF400", 16)]
rel_ops_mask = int("0xF000", 16)
rel_ops = [int("0xC000", 16), int("0xD000", 16)]
# Two word ops
two_word_ops = [int("0x940C", 16), int("0x940E", 16)]
# Last page of app is reserved for metadata. One page is 256 bytes.
# !! Caution !! This is a word address (16bit) and is used with IntelHex16bit
# class
#metadata_offset = int("0xFD00", 16)//2 # 1Kb bootloader
#metadata_offset = int("0xFB00", 16)//2 # 2Kb bootloader
metadata_offset = int("0xF700", 16)//2 # 4Kb bootloader
#metadata_offset = int("0xEF00", 16)//2 # 8Kb bootloader
# 160 bit key
key = b'\x6e\x26\x88\x6e\x4e\x07\x07\xe1\xb3\x0f\x24\x16\x0e\x99\xb9\x12\xe4\x61\xc4\x24'
def main(argv):
if len(argv) != 3:
print('hex_patch_metadata.py <ihexfile> <datastart> <dataend>')
sys.exit(2)
# Check if hexfile exists
hexfile = argv[0]
if not os.path.isfile(hexfile):
print("ERROR: File not found:", hexfile)
sys.exit(2)
# Parse data offset
try:
datastart = int(argv[1], 16) # in hex, first byte of .data. == size .text
dataend = int(argv[2], 16) # in hex, end of .data last byte not included. == total size .text + .data
except:
print("ERROR: Offsets not valid:", argv[1], argv[2])
sys.exit(2)
# Start parsing ihex
ih = IntelHex16bit(hexfile)
# Look for opcodes that __can__ be unsafe as 2nd word and log them
unsafe_2ndword = list()
prev_op_long = 0
for addr in range(datastart//2):
if ih[addr] in two_word_ops and not prev_op_long:
prev_op_long = 1
elif prev_op_long:
prev_op_long = 0
if ( (ih[addr] & branch_ops_mask) in branch_ops or
(ih[addr] & rel_ops_mask) in rel_ops or
(ih[addr] & elpm_mask) in elpm_ops or
ih[addr] in unsafe_ops or
ih[addr] in two_word_ops ):
print("Unsafe 2nd word, adding to list!")
print(ih[addr])
print(addr)
unsafe_2ndword.append(addr)
# Start patching hex
ih[metadata_offset] = dataend # total .text + .data size
ih[metadata_offset + 1] = datastart # .data start address == .text size
ih[metadata_offset + 2] = len(unsafe_2ndword)
for idx, addr in enumerate(unsafe_2ndword):
ih[metadata_offset+3+idx] = addr
# Calculate word size of metadata section
meta_size = 3 # base size
meta_size += len(unsafe_2ndword)
# Calculate hmac
hmac_gen = hmac.new(key, None, hashlib.sha1)
hmac_gen.update(ih.tobinstr(0,dataend-1)) # tobinstr uses byteaddr, even on an IntelHex16 object
hmac_gen.update(ih.tobinstr(metadata_offset*2, (metadata_offset + meta_size)*2 - 1))
print(hmac_gen.hexdigest());
# Add hmac after regular metadata (frombytes uses byteaddr, even on IntelHex16 objects)
ih.frombytes(hmac_gen.digest(), (metadata_offset + meta_size)*2)
# Write out file
ih.write_hex_file(hexfile)
if __name__ == "__main__":
main(sys.argv[1:])
|
from .rng import app
|
from colors import *
# app config
FPS = 60
WIDTH = 600
HEIGHT = 600
SIZE = (WIDTH, HEIGHT)
BG_COLOR = BLACK
# consts
INFINITY = 10e10
COEFF = 50 # 3d
# player config
START_POS = (0, 0)
START_ANGLE = 0
ANGLE_SPEED = 1
MOVEMENT_SPEED = 2
# object settings
NLINES = 10
NPOINTS = 0
NCIRCLES = 5
RPOINT = 3
RCIRCLE = 50
# other
MAX_LENGTH = (SIZE[0] ** 2 + SIZE[1] ** 2) ** 0.5
FOV = 60
NRAYS = 120
DANGLE = FOV / NRAYS
HALF_HEIGHT = HEIGHT // 2 # 3d
K = WIDTH // NRAYS # 3d
WALLS_COLOR = RED # 3d
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from fpgen import HTML
class NonHTML(HTML): #{
def __init__(self, ifile, ofile, d, letter):
HTML.__init__(self, ifile, ofile, d, letter)
# No page numbers on any non-html
def getPageNumberCSS(self):
return [
"[105] .pageno { display:none; }"
]
# No page numbers shown, but still need to have the link target
def showPageNumber(self, pn, displayPN):
return f"<a name='Page_{pn}' id='Page_{pn}'></a>"
# No margins on any non-html
def getMargins(self):
return "0", "0"
class Kindle(NonHTML): #{
def __init__(self, ifile, ofile, d):
NonHTML.__init__(self, ifile, ofile, d, 'k')
# On Kindle, leaders, at least the way we do them, don't work, so
# never do them.
def getLeaderName(self, col):
return None
# kindle&epub ragged right
def getTextAlignment(self):
return "left"
# Kindle doesn't appear to pay attention to the text-align on the <td>
# So we stick in an extra <div>, with the text-align on that.
def tripleAlign(self, style, id, left, center, right):
# This works on the old kindle previewer, but not the new.
#return """
# <div class='center' {} {}>
# <table border="0" cellpadding="4" cellspacing="0" summary="triple" width="100%">
# <tr>
# <td><div style='text-align:left;'>{}</div></td>
# <td><div style='text-align:center;'>{}</div></td>
# <td><div style='text-align:right;'>{}</div></td>
# </tr>
# </table>
# </div>
#""".format(style, id, left, center, right)
# This seems to work everywhere. Wish we could replace the 1.3em with
# the exact value, which is font dependent. Note if you were using for
# example superscripts and font size changes, this will not adjust
# line heights!
return f"""
<div {id} {style}>
<div style='height:1.3em; margin-top:0; margin-bottom:0; visibility:hidden;'>
<p style='text-align:left; text-indent:0; margin-top:0; margin-bottom:0'>
x
</p>
</div>
<div style='height:1.3em; margin-top:-1.3em;margin-bottom:0'>
<p style='text-align:left; margin-top:0;margin-bottom:0'>
{left}
</p>
</div>
<div style='height:1.3em; margin-top:-1.3em;margin-bottom:0'>
<p style='text-align:right; margin-top:0;margin-bottom:0'>
{right}
</p>
</div>
<div style='height:1.3em; margin-top:-1.3em;margin-bottom:0;'>
<p style='text-align:center; margin-top:0;margin-bottom:0'>
{center}
</p>
</div>
</div>
""";
# Floating dropcaps aren't particularly well aligned on kindles, so don't
# do anything special with them.
def getDropcapCSS(self):
return "[3333] .dropcap { }"
#}
class EPub(NonHTML): #{
def __init__(self, ifile, ofile, d):
NonHTML.__init__(self, ifile, ofile, d, 'e')
# epub ragged right
def getTextAlignment(self):
return "left"
#}
class PDF(NonHTML): #{
def __init__(self, ifile, ofile, d):
NonHTML.__init__(self, ifile, ofile, d, 'p')
#}
|
#!/usr/bin/python3
import numpy as np
import pandas as pd
import pathlib
from vulkan import *
import PyQt5
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QVBoxLayout, QHBoxLayout, QLabel
from PyQt5.QtCore import QAbstractTableModel, Qt
# %%
_code_git_version="e5d26e3ab14d31f021d2d9bc385c481d05bd5acf"
_code_repository="https://github.com/plops/cl-py-generator/tree/master/example/21_vulkan_qt/source/run_00_show.py"
_code_generation_time="23:53:07 of Monday, 2020-06-15 (GMT+1)"
validation_layers=["VK_LAYER_KHRONOS_validation"]
enable_validation_layers=True
class InstanceProcAddr(object):
def __init__(self, func):
self.__func=func
def __call__(self, *args, **kwargs):
func_name=self.__func.__name__
func=vkGetInstanceProcAddr(args[0], func_name)
if ( func ):
return func(*args, **kwargs)
else:
return VK_ERROR_EXTENSION_NOT_PRESENT
@InstanceProcAddr
def vkCreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator):
pass
@InstanceProcAddr
def vkDestroyDebugReportCallbackEXT(instance, pCreateInfo, pAllocator):
pass
def debug_callback(*args):
print("debug: {} {}".format(args[5], args[6]))
return 0
app=QApplication([""])
win=QWidget()
appinfo=VkApplicationInfo(pApplicationName="python vk", applicationVersion=VK_MAKE_VERSION(1, 0, 0), pEngineName="pyvulkan", engineVersion=VK_MAKE_VERSION(1, 0, 0), apiVersion=VK_API_VERSION)
extensions=[e.extensionName for e in vkEnumerateInstanceExtensionProperties(None)]
extensions=["VK_KHR_get_physical_device_properties2", "VK_KHR_get_surface_capabilities2", "VK_KHR_surface"]
instanceinfo=VkInstanceCreateInfo(pApplicationInfo=appinfo, enabledLayerCount=len(validation_layers) if enable_validation_layers else 0, ppEnabledLayerNames=validation_layers if enable_validation_layers else None, enabledExtensionCount=len(extensions), ppEnabledExtensionNames=extensions)
instance=vkCreateInstance(instanceinfo, None)
# setup debug callback
callback=None
win.show()
def cleanup():
global win, instance, callback
if ( callback ):
vkDestroyDebugReportCallbackEXT(instance, callback, None)
vkDestroyInstance(instance, None)
del(win)
app.aboutToQuit.connect(cleanup)
def run():
sys.exit(app.exec_())
run()
|
'''
Original use of functions by Adam Bolton, 2009.
http://www.physics.utah.edu/~bolton/python_lens_demo/
'''
# Given a background source, this lenses it and plots both as output.
# The parameters for the lens can be changed but for it's a point source
def lensed(gamp,gsig,gx,gy,gax,gpa,name,lamp=1.5,lsig=0.05,lx=0.,ly=0.,lax=1.,lpa=0.):
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib import cm
import lensdemo_funcs as ldf
import fitting_ellipse as fe
import matplotlib.patheffects as PathEffects
# Package some image display preferences in a dictionary object, for use below:
myargs = {'interpolation': 'nearest', 'origin': 'lower', 'cmap': cm.viridis}
lensargs = {'interpolation': 'nearest', 'origin': 'lower', 'cmap': cm.viridis}
# Make some x and y coordinate images:
nx.ny = 501,501
xhilo,yhilo = [-2.5, 2.5],[-2.5, 2.5]
x = (xhilo[1] - xhilo[0]) * np.outer(np.ones(ny), np.arange(nx)) / float(nx-1) + xhilo[0]
y = (yhilo[1] - yhilo[0]) * np.outer(np.arange(ny), np.ones(nx)) / float(ny-1) + yhilo[0]
# Set some Gaussian blob image parameters and pack them into an array:
g_amp = gamp # peak brightness value
g_sig = gsig # Gaussian "sigma" (i.e., size)
g_xcen = gx # x position of center
g_ycen = gy # y position of center
g_axrat = gax # minor-to-major axis ratio
g_pa = gpa # major-axis position angle (degrees) c.c.w. from x axis
gpar = np.asarray([g_amp, g_sig, g_xcen, g_ycen, g_axrat, g_pa])
def lens_object(lax,lamp=1.5,lsig=0.05,lx=0.,ly=0.,lpa=0.):
# Set some SIE lens-model parameters and pack them into an array:
l_amp = lamp # Einstein radius
l_xcen = lx # x position of center
l_ycen = ly # y position of center
l_axrat = lax # minor-to-major axis ratio
l_pa = lpa # major-axis position angle (degrees) c.c.w. from x axis
lpar = np.asarray([l_amp, l_xcen, l_ycen, l_axrat, l_pa])
lpar2 = np.asarray([l_amp, l_xcen, l_ycen, 2., l_pa]) # rax of 2.
lenspar = np.asarray([l_amp, lsig, l_xcen, l_ycen, l_axrat, l_pa])
# The following lines will plot the un-lensed and lensed images side by side:
(xg, yg) = ldf.sie_grad(x, y, lpar)
g_lensimage = ldf.gauss_2d(x-xg, y-yg, gpar)
lens_source = ldf.gauss_2d(x, y, lenspar)
lens_source[lens_source < 0.6] = np.nan
return g_lensimage,lens_source
# defining bkgd source and lensed source (for both rax)
g_image = ldf.gauss_2d(x, y, gpar)
glens1,lens1 = lens_object(1.)
glens2,lens2 = lens_object(2.)
plt.figure(figsize=(15,6))
gs1 = gridspec.GridSpec(1,3)
gs1.update(wspace=0.03)
cmap = plt.cm.viridis
# background source (no lens)
ax1 = plt.subplot(gs1[0])
im = ax1.imshow(g_image,**myargs,clim=(0.5,2.2)) # set to make the lens the same always
vmin, vmax = im.get_clim() # set to make the lens the same always
ax1.set_yticklabels([]); ax1.set_xticklabels([])
ax1.set_yticks([]); ax1.set_xticks([])
txt = ax1.text(20,457,'Background Source', size=15, color='w')
txt.set_path_effects([PathEffects.withStroke(linewidth=5, foreground='k')])
# background source lensed by point-like lens
ax2 = plt.subplot(gs1[1])
ax2.imshow(glens1,**myargs,clim=(vmin,vmax))
ax2.imshow(lens1,**lensargs,clim=(vmin,vmax))
cir = plt.Circle((250,250),150,fill=False,ls='--',color='C0')
ax2.add_artist(cir)
ax2.set_yticklabels([]); ax2.set_xticklabels([])
ax2.set_yticks([]); ax2.set_xticks([])
txt = ax2.text(20,457,'Point-like Lens', size=15, color='w')
txt.set_path_effects([PathEffects.withStroke(linewidth=5, foreground='k')])
# Listing parameters on ax1
txt = ax1.text(20,20,'amp:%s, sig:%s,\ncenter:(%s,%s),\naxrat:%s, pa:%s'\
%(gpar[0],gpar[1],gpar[2],gpar[3],gpar[4],gpar[5]), size=13, color='w')
txt.set_path_effects([PathEffects.withStroke(linewidth=5, foreground='k')])
# background source lensed by extended lens
ax3 = plt.subplot(gs1[2])
ax3.imshow(glens2,**myargs,clim=(vmin,vmax))
ax3.imshow(lens2,**lensargs,clim=(vmin,vmax))
cir = plt.Circle((250,250),150,fill=False,ls='--',color='C0')
ax3.add_artist(cir)
ax3.set_yticklabels([]); ax3.set_xticklabels([])
ax3.set_yticks([]); ax3.set_xticks([])
txt = ax3.text(20,457,'Extended Lens', size=15, color='w')
txt.set_path_effects([PathEffects.withStroke(linewidth=5, foreground='k')])
#plt.savefig('test.png',dpi=200) # useful for troubleshooting
plt.savefig('lens2-still%s.pdf'%(name),dpi=200)
plt.close('all')
if __name__ == "__main__":
import sys
lensed(float(sys.argv[1]),float(sys.argv[2]),float(sys.argv[3]),float(sys.argv[4]),\
float(sys.argv[5]),float(sys.argv[6]),str(sys.argv[7]))
|
from flask import Flask, redirect, url_for, render_template, request
app = Flask(__name__)
# Homepage
@app.route("/")
def home():
return render_template('index.html')
# Homepage re-route
@app.route("/index.html/")
def home_reroute():
return redirect(url_for("home"))
#Wheelbase page
@app.route("/steeringwheels.html")
def steer():
return render_template('steeringwheels.html')
#monitors
@app.route("/monitors.html")
def monitosr():
return render_template('comingsoon.html')
#pc specs
@app.route("/PC.html")
def pc():
return render_template('PC.html')
#rigs
@app.route("/rig.html")
def rigs():
return render_template('comingsoon.html')
#vr
@app.route("/vr.html")
def vr():
return render_template('comingsoon.html')
#contact me
@app.route('/contact.html', methods=['post', 'get'])
def contact():
message = ''
if request.method == 'POST':
name = request.form.get('Name') # access the data inside
email = request.form.get('Email')
phone = request.form.get('Phone No.')
msg = request.form.get('Message')
print(msg)
message='Sent'
# # Sign In and Sign Up
# @app.route('/signup.html', methods=['GET', 'POST'])
# def signup():
# if request.method == 'POST':
# userdata = [request.form.get('email'), request.form.get('password'), request.form.get('fname'), request.form.get('lname'),request.form.get('passwordconfirm')]
# if userdata[1]==userdata[4]:
# with open('users.csv', 'a') as file:
# writer=csv.writer(file)
# writer.writerow(userdata[:4])
# print(userdata)
# return redirect(url_for("login"))
# return render_template('signup.html')
#
#
#
# @app.route('/signin.html', methods=['GET', 'POST'])
# def login():
# if request.method == 'POST':
# userdata = [request.form.get('username'), request.form.get('password')]
# print(userdata)
# return 'Submitted Form'
# return render_template('signin.html')
#about page
# @app.route("/about.html")
# def about():
# return render_template('about.html')
#
# #lineup generator
# @app.route("/lineup-generator.html")
# def lineup():
# return render_template('lineup-generator.html')
# #lineup re-route
# @app.route("/lineup/")
# def lineup_reroute():
# return redirect(url_for("lineup"))
#
#
#
#
#
#
# #use this to route to specific drivers
# @app.route("/<name>/")
# def user(name):
# return f"Hello {name}!"
#
# #admin page
# @app.route("/admin/")
# def admin():
# return redirect(url_for("name"))
if __name__=="__main__":
app.run()
|
from indicator import Indicator
import states
class RSI(Indicator):
def __init__(self, utils, config, logger, timeframe):
Indicator.__init__(self, utils, config, logger)
self.timeframe = timeframe
self.distance = self.cfg.DATA_POINTS
self.period = self.cfg.PERIOD
self.sell = self.cfg.SELL
self.buy = self.cfg.BUY
def calc_rsi(self, prices: list) -> float:
period = self.period
max_len = period if period < len(prices) else len(prices)
losses = gains = 0
for i in range(1, max_len):
try:
change = prices[i] - prices[i-1]
if change < 0:
losses += abs(change)
elif change > 0:
gains += abs(gains)
except TypeError as e:
print(e, prices)
avg_loss = losses / period
avg_gain = gains / period
for i in range(period, len(prices)):
change = prices[i] - prices[i - 1]
loss = gain = 0
if change < 0:
loss = abs(change)
else:
gain = change
avg_gain = (avg_gain * (period - 1) + gain) / period
avg_loss = (avg_loss * (period - 1) + loss) / period
if avg_loss == 0:
return 100
elif avg_gain == 0:
return 0
rsi = 100
rs = avg_gain / avg_loss
rsi -= 100 / (1+ rs)
return round(rsi, 2)
async def acalc_rsi(self, symbol: str):
data = await self.utils.get_historical_data(symbol,
length=self.distance * self.timeframe)
rsi = self.calc_rsi(data)
self.logger.debug(rsi)
return rsi
async def analyze(self, symbol: str) -> str:
rsi = await self.acalc_rsi(symbol)
if rsi >= self.sell:
return symbol, rsi, states.SELL
elif rsi <= self.buy:
return symbol, rsi, states.BUY
return symbol, rsi, states.HOLD
def __str__(self):
return "RSI"
__repr__ = __str__
|
K=int(input("K= "))
N=int(input("N= "))
if(N>0):
for i in range(0,N):
print(K)
|
from mod_base import*
class UserInfo(Command):
"""View info about you or another nick."""
def run(self,win,user,data,caller=None):
args = Args(data)
u = user
if len(args)>0:
result = self.bot.FindUser(args[0])
if result!=False:
u = result
else:
win.Send("user not found")
return False
ui = u.GetNick()+" "+u.hostname
if u.IsOnline():
ui += ", [online]"
else:
ui += ", [offline]"
ui += ", active: "+time_stamp_numeric(u.last_active)
if u.IsAuthed():
ui += ", authed as "+u.account["name"]
ui += "\n"
chans = []
for chan in u.Channels():
pre = ""
if chan.UserHasMode(u,IRC_MODE_VOICE):
pre = "+"
if chan.UserHasMode(u,IRC_MODE_OP):
pre = "@"
chans.append(pre+chan.GetName())
user.Send(ui)
if chans:
user.Send( "at: "+(" ".join(chans)) )
module = {
"class": UserInfo,
"type": MOD_COMMAND,
"level": 1,
"zone":IRC_ZONE_BOTH,
}
|
#!/usr/bin/env python3
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from vr.models.baselines import LstmEncoder
from vr.models.baselines import build_mlp
class HyperVQA(nn.Module):
"""A model that uses a HyperNetwork to produce the weights for a CNN"""
def __init__(self, vocab, conv_layers=(8, 16, 32), conv_kernels=(3, 3, 3), rnn_wordvec_dim=128, rnn_dim=256,
rnn_num_layers=2, rnn_dropout=0, fc_dims=(1024,), fc_use_batchnorm=False, fc_dropout=0):
super().__init__()
assert len(conv_layers) == len(conv_kernels)
total_output_weights = 0
prev_I = 3
self.conv_shapes = []
for i in range(len(conv_layers)):
I = conv_layers[i]
K = conv_kernels[i]
self.conv_shapes.append((I, K))
total_output_weights += I * prev_I * (K ** 2) + I
prev_I = I
rnn_kwargs = {
'token_to_idx': vocab['question_token_to_idx'],
'wordvec_dim': rnn_wordvec_dim,
'rnn_dim': rnn_dim,
'rnn_num_layers': rnn_num_layers,
'rnn_dropout': rnn_dropout
}
self.rnn = LstmEncoder(**rnn_kwargs)
self.hypernet = nn.Sequential(
nn.Linear(rnn_dim, rnn_dim * 4),
nn.LeakyReLU(negative_slope=1/5.5),
nn.Linear(rnn_dim * 4, rnn_dim * 16),
nn.LeakyReLU(negative_slope=1/5.5),
nn.Linear(rnn_dim * 16, total_output_weights)
)
classifier_kwargs = {
'input_dim': 2048, # self.conv_shapes[-1][0] * (64 // (2 ** (len(conv_layers)+1))) + rnn_dim,
'hidden_dims': fc_dims,
'output_dim': len(vocab['answer_token_to_idx']),
'use_batchnorm': fc_use_batchnorm,
'dropout': fc_dropout,
}
self.linear = build_mlp(**classifier_kwargs)
def conv_batch(self, x, weights, biases):
"""Apply the given weights and biases to the elements, one per batch"""
# Main idea: combining batch dimension and channel dimension
n_items = x.shape[0]
original_out_channels = weights.shape[1]
out_channels = n_items*original_out_channels
all_output = F.conv2d(x.view(1, n_items*x.shape[1], x.shape[2], x.shape[3]), weights.reshape(out_channels, weights.shape[2], weights.shape[3], weights.shape[4]), biases.reshape(-1), padding=1, groups=n_items)
return all_output.reshape(n_items, original_out_channels, all_output.shape[2], all_output.shape[3])
def forward(self, questions, feats):
"""Make a pass with the HyperVQA model"""
# encode the questions and get corresponding weights
q_feats = self.rnn(questions)
all_conv_weights = self.hypernet(q_feats)
batch_size = all_conv_weights.shape[0]
x = feats
prev_I = 3
# apply convolutional layers
for conv_layer in self.conv_shapes:
I, K = conv_layer
layer_num_weights = prev_I * I * (K ** 2)
conv_weights, conv_biases = all_conv_weights[..., :layer_num_weights],\
all_conv_weights[..., layer_num_weights:layer_num_weights + I]
conv_weights = conv_weights.view(batch_size, I, prev_I, K, K)
x = self.conv_batch(x, conv_weights, conv_biases)
x = F.max_pool2d(x, kernel_size=2, stride=2)
prev_I = I
all_conv_weights = all_conv_weights[..., layer_num_weights + I:]
x = x.flatten(1)
x = self.linear(x)
# return logits
return x
|
class jumpingTable():
jumpingTable = {}
def set(self, n, i):
self.jumpingTable[n] = i
def going(self, index):
if index in self.jumpingTable:
self.jumpingTable[index]()
elif 'default' in self.jumpingTable:
self.jumpingTable['default']()
else:
raise RuntimeError('undefined jumping: {}'.format(index))
def main():
j = jumpingTable();
j.set('one', one)
j.set('two', two)
j.set('three', three)
j.set('default', default)
try:
j.going('seven')
except RuntimeError as e:
print(e)
def one():
print('This is the "one" function.')
def two():
print('This is the "two" function.')
def three():
print('This is the "three" function.')
def default():
print('this is the default function.')
if __name__ == "__main__": main()
|
import pandas
from sklearn import linear_model, datasets
from sklearn.metrics import accuracy_score
from sklearn import model_selection
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import precision_score
print('reading in data...')
"""Read in dataset"""
set_sizes = [100,500,1000,5000,10000,50000,100000,500000,1000000,5000000,10000000,50000000,100000000]
nrows2 = set_sizes[11]
column_names = ["Instance","Feature 1","Feature 2", "Feature 3","Feature 4","Feature 5","Feature 6","Feature 7",
"Feature 8","Feature 9","Feature 10","Target","TargetClass"]
dataframe = pandas.read_csv("C:\\Users\\bboyd\\Documents\\college - 4th year\\Machine Learning\\machine_learning_group_project\\team_13\\datasets\\sum_with_noise.csv",
sep=';',header=0,names=column_names,index_col=0,usecols=[0,1,2,3,4,6,7,8,9,10,12],
nrows = nrows2)
dataframe2 = pandas.read_csv("C:\\Users\\bboyd\\Documents\\college - 4th year\\Machine Learning\\machine_learning_group_project\\team_13\\datasets\\sum_with_noise.csv",
sep=';',header=0,names=column_names,index_col=0,usecols=[0,1,2,3,4,6,7,8,9,10,11],
nrows = nrows2)
array = dataframe2.values
X = array[:,0:9]
Y = array[:,9]
print(nrows2)
print(int(nrows2 * .7))
print("fitting model...")
X_train = dataframe.head(int(nrows2 * .7))
Y_train = X_train.TargetClass
X_train = X_train[["Feature 1","Feature 2", "Feature 3","Feature 4","Feature 6","Feature 7", "Feature 8","Feature 9","Feature 10"]]
X_test = dataframe.tail(int(nrows2 * .3))
Y_test = X_test.TargetClass
X_test = X_test[["Feature 1","Feature 2", "Feature 3","Feature 4","Feature 6","Feature 7", "Feature 8","Feature 9","Feature 10"]]
print(type(X_train))
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
logreg.fit(X_train, Y_train)
x_min, x_max = X_train.min() - .5, X_train.max() + .5
y_min, y_max = Y_train.min() , Y_train.max()
print("MAX TEST Y VAL: ", Y_test.max())
fl = x_min.astype('float64', errors = 'ignore')
xs_as_array = fl.as_matrix()
x_min = xs_as_array.min()
print("x_min", x_min)
xs2_as_array = x_max.as_matrix()
x_max = xs2_as_array.max()
print("xmax",x_max)
print("type of min ",type(x_min))
print("ymin",y_min)
print("ymax",y_max)
## metrics ##
print(type(logreg))
y_values_predicted = logreg.predict(X_test)
print("metrics", y_values_predicted)
print("accuaracy",accuracy_score(Y_test, y_values_predicted))
precision_scores = precision_score(Y_test, y_values_predicted, average='weighted')
print("precision scores" , precision_scores)
|
import maya.cmds as cmds
import maya.mel
#for selecting all objects
cmds.select(all=True,visible=True)
#for deselecting circle
cmds.select('nurbsCircle1', d=True)
#for deselecting plane and deselecting other objects
cmds.select('nurbsPlane1', d=True)
#for center pivot
cmds.xform(cp=True)
cmds.xform(a=True)
cmds.xform()
#for getting dimensions of objects
bbox = cmds.exactWorldBoundingBox( 'sphere1')
print 'Bounding box ranges from: %f' % bbox[0], ', %f' % bbox[1], ', %f' % bbox[2],
print ' to %f' % bbox[3], ', %f' % bbox[4], ', %f' % bbox[5]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.