content stringlengths 5 1.05M |
|---|
#!/usr/bin/env python3
import hashlib
import os
import re
import shutil
import numpy as np
import h5py
import tensorflow as tf
import torch
def main():
pytorch_root = 'https://download.pytorch.org/models'
tf_root = 'https://storage.googleapis.com/tensorflow/keras-applications/resnet'
model_dir = os.environ['HOME'] + '/.keras/models'
# ResNets with no top layer (no fully-connected classifier at the end)
convert(
url_pytorch=f'{pytorch_root}/resnet18-5c106cde.pth',
url_tf=f'{tf_root}/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
out_path=f'{model_dir}/resnet18_weights_tf_dim_ordering_tf_kernels_notop.h5',
hash_pytorch='5c106cde386e87d4033832f2996f5493238eda96ccf559d1d62760c4de0613f8',
hash_tf_in='4d473c1dd8becc155b73f8504c6f6626',
hash_tf_out_expected='cd9aca5b625298765956a04230be071a')
convert(
url_pytorch=f'{pytorch_root}/resnet34-333f7ec4.pth',
url_tf=f'{tf_root}/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
out_path=f'{model_dir}/resnet34_weights_tf_dim_ordering_tf_kernels_notop.h5',
hash_pytorch='333f7ec4c6338da2cbed37f1fc0445f9624f1355633fa1d7eab79a91084c6cef',
hash_tf_in='4d473c1dd8becc155b73f8504c6f6626',
hash_tf_out_expected='5d0432fa0b4d5bf5fd88f04151f590a4')
convert(
url_pytorch=f'{pytorch_root}/resnet50-0676ba61.pth',
url_tf=f'{tf_root}/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
out_path=f'{model_dir}/resnet50v1_5_weights_tf_dim_ordering_tf_kernels_notop.h5',
hash_pytorch='0676ba61b6795bbe1773cffd859882e5e297624d384b6993f7c9e683e722fb8a',
hash_tf_in='4d473c1dd8becc155b73f8504c6f6626',
hash_tf_out_expected='315b92000a86ce737f460441071d7579')
convert(
url_pytorch=f'{pytorch_root}/resnet101-63fe2227.pth',
url_tf=f'{tf_root}/resnet101_weights_tf_dim_ordering_tf_kernels_notop.h5',
out_path=f'{model_dir}/resnet101v1_5_weights_tf_dim_ordering_tf_kernels_notop.h5',
hash_pytorch='63fe2227b86e8f1f2063f43a75c84d195911b6a0eace650907dd3dc62dd49a0a',
hash_tf_in='88cf7a10940856eca736dc7b7e228a21',
hash_tf_out_expected='0b87f84107ae1a0616f76d028781b6a6')
convert(
url_pytorch=f'{pytorch_root}/resnet152-394f9c45.pth',
url_tf=f'{tf_root}/resnet152_weights_tf_dim_ordering_tf_kernels_notop.h5',
out_path=f'{model_dir}/resnet152v1_5_weights_tf_dim_ordering_tf_kernels_notop.h5',
hash_pytorch='394f9c45966e3651a89bbb78a48410a6755854ce4a5ab64927cf1c7247f85e58',
hash_tf_in='ee4c566cf9a93f14d82f913c2dc6dd0c',
hash_tf_out_expected='471a7a36f82f50879a64731f1615f2df')
# ResNets with the top layer (fully-connected classifier at the end)
convert(
url_pytorch=f'{pytorch_root}/resnet18-5c106cde.pth',
url_tf=f'{tf_root}/resnet50_weights_tf_dim_ordering_tf_kernels.h5',
out_path=f'{model_dir}/resnet18_weights_tf_dim_ordering_tf_kernels.h5',
hash_pytorch='5c106cde386e87d4033832f2996f5493238eda96ccf559d1d62760c4de0613f8',
hash_tf_in='2cb95161c43110f7111970584f804107',
hash_tf_out_expected='a04f614a6c28f19f9e766a22a65d87d7')
convert(
url_pytorch=f'{pytorch_root}/resnet34-333f7ec4.pth',
url_tf=f'{tf_root}/resnet50_weights_tf_dim_ordering_tf_kernels.h5',
out_path=f'{model_dir}/resnet34_weights_tf_dim_ordering_tf_kernels.h5',
hash_pytorch='333f7ec4c6338da2cbed37f1fc0445f9624f1355633fa1d7eab79a91084c6cef',
hash_tf_in='2cb95161c43110f7111970584f804107',
hash_tf_out_expected='25351c4102513ba73866398dfda04546')
convert(
url_pytorch=f'{pytorch_root}/resnet50-0676ba61.pth',
url_tf=f'{tf_root}/resnet50_weights_tf_dim_ordering_tf_kernels.h5',
out_path=f'{model_dir}/resnet50v1_5_weights_tf_dim_ordering_tf_kernels.h5',
hash_pytorch='0676ba61b6795bbe1773cffd859882e5e297624d384b6993f7c9e683e722fb8a',
hash_tf_in='2cb95161c43110f7111970584f804107',
hash_tf_out_expected='595763ceca1995bf6e34ccd730b81741')
convert(
url_pytorch=f'{pytorch_root}/resnet101-63fe2227.pth',
url_tf=f'{tf_root}/resnet101_weights_tf_dim_ordering_tf_kernels.h5',
out_path=f'{model_dir}/resnet101v1_5_weights_tf_dim_ordering_tf_kernels.h5',
hash_pytorch='63fe2227b86e8f1f2063f43a75c84d195911b6a0eace650907dd3dc62dd49a0a',
hash_tf_in='f1aeb4b969a6efcfb50fad2f0c20cfc5',
hash_tf_out_expected='b16e80439827b6abfb2c378ac434fd45')
convert(
url_pytorch=f'{pytorch_root}/resnet152-394f9c45.pth',
url_tf=f'{tf_root}/resnet152_weights_tf_dim_ordering_tf_kernels.h5',
out_path=f'{model_dir}/resnet152v1_5_weights_tf_dim_ordering_tf_kernels.h5',
hash_pytorch='394f9c45966e3651a89bbb78a48410a6755854ce4a5ab64927cf1c7247f85e58',
hash_tf_in='100835be76be38e30d865e96f2aaae62',
hash_tf_out_expected='2e445ecb46e5d72aa0004b51f668623c')
def convert(url_pytorch, url_tf, out_path, hash_pytorch, hash_tf_in,
hash_tf_out_expected=None):
tf_path_in = tf.keras.utils.get_file(origin=url_tf, file_hash=hash_tf_in, cache_dir='/tmp')
shutil.copy2(tf_path_in, out_path)
torch_path = tf.keras.utils.get_file(
origin=url_pytorch, file_hash=hash_pytorch, cache_dir='/tmp')
ckpt_torch = torch.load(torch_path)
with h5py.File(out_path, mode='r+') as out_h5:
needed_conv_prefixes = set()
for name, value in ckpt_torch.items():
if name.startswith('fc.') and 'notop' in url_tf:
continue
value = value.detach().numpy()
h5_name = name_pytorch_to_h5(name)
if h5_name.endswith('kernel:0'):
if h5_name.startswith('probs'):
value = value.transpose([1, 0]) # c_out c_in -> c_in c_out
else:
value = value.transpose([2, 3, 1, 0]) # c_out c_in w h -> w h c_in c_out
set_h5_value(out_h5, h5_name, value)
if h5_name.startswith('conv'):
main_part = h5_name.split('/')[0]
subparts = main_part.split('_')
layer_prefix = '_'.join(subparts[:-1])
needed_conv_prefixes.add(layer_prefix)
if len(subparts) > 2:
if subparts[-2].isdigit():
other_prefix = '_'.join(subparts[:-2])
needed_conv_prefixes.add(other_prefix)
if 'resnet34' in url_pytorch or 'resnet18' in url_pytorch:
# Delete unneeded layers
layer_names_old = [x.decode('ascii') for x in out_h5.attrs['layer_names']]
layer_names_new = [n for n in layer_names_old if
is_layer_needed(n, needed_conv_prefixes)
and not n.endswith('_2_relu')]
for unneeded_layer in sorted(set(layer_names_old) - set(layer_names_new)):
del out_h5[unneeded_layer]
# Rewrite layer_names attribute
del out_h5.attrs['layer_names']
layer_names_new = resort_layer_names(layer_names_new)
out_h5.attrs['layer_names'] = np.array([x.encode('ascii') for x in layer_names_new])
# Delete bias values
for layer_name in layer_names_new:
if layer_name.startswith('conv'):
# Rewrite weight_names attribute
out_h5[layer_name].attrs['weight_names'] = [
w for w in out_h5[layer_name].attrs['weight_names']
if not w.endswith(b'bias:0')]
bias_name = f'{layer_name}/{layer_name}/bias:0'
if bias_name in out_h5:
del out_h5[bias_name]
hash_tf_out = get_md5(out_path)
if hash_tf_out_expected is not None:
print('Hash OK' if hash_tf_out == hash_tf_out_expected else 'Hash error')
print(out_path, hash_tf_out)
def get_prefix(name):
main_part = name.split('/')[0]
subparts = main_part.split('_')
return '_'.join(subparts[:-1])
def is_layer_needed(layer_name, needed_conv_prefixes):
return (not layer_name.startswith('conv')
or get_prefix(layer_name) in needed_conv_prefixes)
def resort_layer_names(layer_names):
# The order of layer names must be reordered, else we can't load the checkpoint
# Keras' h5 checkpoint is based on the order of the layers
pattern = r'^(?P<prefix>.+)_(?P<suffix>(0|2)_(conv|bn))$'
ends = ['0_conv', '2_conv', '0_bn', '2_bn']
result = []
i = 0
while i <= len(layer_names) - 4:
name_group = [layer_names[i + j] for j in range(4)]
matches = [re.match(pattern, name) for name in name_group]
if all(matches) and all(m['prefix'] == matches[0]['prefix'] for m in matches):
prefix = matches[0]['prefix']
result += [f'{prefix}_{e}' for e in ends]
i += 4
else:
result.append(layer_names[i])
i += 1
result += layer_names[-3:]
return result
def set_h5_value(out_h5, h5_name, value):
if h5_name in out_h5 and out_h5[h5_name][:].shape == value.shape:
out_h5[h5_name][:] = value
else:
if h5_name in out_h5:
del out_h5[h5_name]
out_h5.create_dataset(h5_name, data=value, track_times=False)
def name_pytorch_to_h5(name):
bn_dict = dict(
weight='gamma', bias='beta', running_mean='moving_mean', running_var='moving_variance')
conv_dict = dict(weight='kernel', bias='bias')
parts = name.split('.')
out = []
part = parts.pop(0)
if part == 'fc':
out.append('probs')
name_dict = conv_dict
elif part == 'conv1':
out += ['conv1', 'conv']
name_dict = conv_dict
elif part == 'bn1':
out += ['conv1', 'bn']
name_dict = bn_dict
else:
tf_block = int(part[5])
tf_conv = int(parts.pop(0))
out += [f'conv{tf_block + 1}', f'block{tf_conv + 1}']
part = parts.pop(0)
if part == 'downsample':
out.append('0')
part = parts.pop(0)
if part == '0':
out.append('conv')
name_dict = conv_dict
else:
out.append('bn')
name_dict = bn_dict
else:
out.append(str(int(part[-1])))
if part.startswith('bn'):
out.append('bn')
name_dict = bn_dict
else:
out.append('conv')
name_dict = conv_dict
folder = '_'.join(out)
lastpart = name_dict[parts.pop(0)]
return f'{folder}/{folder}/{lastpart}:0'
def get_md5(filepath):
with open(filepath, "rb") as f:
file_hash = hashlib.md5()
while chunk := f.read(8192):
file_hash.update(chunk)
return file_hash.hexdigest()
if __name__ == '__main__':
main()
|
expected_output={'active_tenants_num': 20,
'max_tenants': 24,
'tenant_name': {'Tenanttest12': {'tenant_id': 20, 'tenant_vpnid': 1037},
'Tenanttest13': {'tenant_id': 20, 'tenant_vpnid': 1037},
'apple': {'tenant_id': 20, 'tenant_vpnid': 1037},
'ciscov-DEEP1': {'tenant_id': 20, 'tenant_vpnid': 1037},
'ciscov-DEEP1234': {'tenant_id': 20, 'tenant_vpnid': 1037},
'ciscov-DEEP2': {'tenant_id': 20, 'tenant_vpnid': 1037},
'ciscov-DEEP3': {'tenant_id': 20, 'tenant_vpnid': 1037},
'ciscov-DEEP4': {'tenant_id': 20, 'tenant_vpnid': 1037},
'ciscov-bgl-new1': {'tenant_id': 20, 'tenant_vpnid': 1037},
'ciscov-bgl-new10': {'tenant_id': 20, 'tenant_vpnid': 1037},
'ciscov-bgl-new6': {'tenant_id': 20, 'tenant_vpnid': 1037},
'ciscov-test': {'tenant_id': 20, 'tenant_vpnid': 1037},
'ciscov-testdeep1': {'tenant_id': 20, 'tenant_vpnid': 1037},
'ciscov-testdeep2': {'tenant_id': 20, 'tenant_vpnid': 1037},
'grapes': {'tenant_id': 20, 'tenant_vpnid': 1037},
'orange': {'tenant_id': 20, 'tenant_vpnid': 1037},
'pineapple': {'tenant_id': 20, 'tenant_vpnid': 1037},
'temp1': {'tenant_id': 20, 'tenant_vpnid': 1037},
'tenanttemp4': {'tenant_id': 20, 'tenant_vpnid': 1037},
'tenanttemp8': {'tenant_id': 20, 'tenant_vpnid': 1037}}} |
# Make a program that plays Odds or Evens with the computer. The game will only be stopped when the player loses,
# showing the total number of consecutive victories he has won at the end of the game.
from random import randint
print('=-' * 12)
print('LET`S PLAY ODDS OR EVENS')
print('=-' * 12)
vic = 0
while True:
choice = str(input('Odd or Even? [O/E] ')).strip().upper()[0]
while choice not in 'OE':
print('\033[1:33mINVALID CHOICE\033[m')
choice = str(input('Odd or Even? [O/E] ')).strip().upper()[0]
player = int(input('Say a value: [0-10] '))
while player < 0 or player > 10:
print('\033[1:33mINVALID CHOICE\033[m')
player = int(input('Say a value: [0-10] '))
computer = randint(0, 10)
total = player + computer
print('--' * 12)
print(f'You played {player} and the computer {computer}. Total {total} is', end=' ')
print('EVEN' if total % 2 == 0 else 'ODD')
print('--' * 12)
if choice == 'E':
if total % 2 == 0:
print('You WON!')
vic += 1
else:
print('You LOST!')
print('=-' * 12)
break
elif choice == 'O':
if total % 2 == 1:
print('You WON!')
vic += 1
else:
print('You LOST!')
print('=-' * 12)
break
print('Let`s play again...')
print('=-' * 12)
print(f'\033[1:31mGAMER OVER!\033[m \nYou won {vic} times.')
|
from humplib import __version__, hump2underline, json_hump2underline
from humplib import underline2hump
def test_version():
assert __version__ == '0.1.1'
def test_underline2hump():
assert underline2hump("hello_word") == "helloWord"
def test_hump2underline():
assert hump2underline("helloWord") == "hello_word"
def test_json_hump2underline():
json_str = """{"userName":"hi"}"""
assert json_hump2underline(json_str) == '{"user_name" :"hi"}'
|
"""
A CLI command to initialize the database if it isn't already initialized.
Usage:
initdb conf/development.ini
"""
import sys
from pyramid.paster import bootstrap
def initdb():
# Initialise the pyramid environment, which is enough to trigger the
# initialisation code in `lms.db.__init__.py` to setup the DB for us.
bootstrap(sys.argv[1])
|
import os
import sys
from django.apps import apps
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import BaseCommand
from django.utils.translation import activate as activate_translation
from parler.models import TranslatableModel
from polib import pofile
class Command(BaseCommand):
help = 'Check project for missing translations'
def add_arguments(self, parser):
parser.add_argument('--exclude',
action='append',
metavar='LANG',
default=[],
help='Exclude language from check')
gettext_group = parser.add_argument_group('gettext options')
gettext_group.add_argument('--gettext-check-all',
action='store_true',
help='Check all translatable strings')
gettext_group.add_argument('--gettext-source-has-language',
action='store',
metavar='LANG',
help='Use source code strings for language LANG')
gettext_group.add_argument('--no-gettext',
action='store_false',
help='Do not check gettext translations')
gettext_group.add_argument('--no-gettext-update',
action='store_false',
help='Do not update gettext translation files')
parler_group = parser.add_argument_group('parler options')
parler_group.add_argument('--no-parler',
action='store_false',
help='Do not check parler translations')
def handle(self, *args, **options):
langs = [lang[0] for lang in settings.LANGUAGES if lang[0] not in options['exclude']]
activate_translation(settings.LANGUAGE_CODE)
self.stdout.write('')
self._show_configured_languages(langs)
self.stdout.write('')
missing_translations = False
if options['no_gettext']:
if options['no_gettext_update']:
self._update_gettext_translation_files(langs)
self.stdout.write('')
missing_translations |= self._check_gettext_translations(langs,
options['gettext_check_all'],
options['gettext_source_has_language'])
self.stdout.write('')
if options['no_parler']:
missing_translations |= self._check_parler_translations(langs)
self.stdout.write('')
if missing_translations:
sys.exit(1)
def _show_configured_languages(self, languages):
self.stdout.write('Configured languages:')
for lang in settings.LANGUAGES:
if lang[0] in languages:
self.stdout.write(' {} - {}'.format(
self.style.SUCCESS(lang[0]),
lang[1]))
else:
self.stdout.write(' {} - {} {}'.format(
self.style.NOTICE(lang[0]),
lang[1],
self.style.WARNING('(excluded)')))
def _update_gettext_translation_files(self, languages):
self.stdout.write('Update gettext translations:')
call_command('makemessages', locale=languages)
def _check_gettext_translations(self, languages, check_all=False, source_language=None):
def _load_po(language):
filepath = os.path.join(settings.LOCALE_PATHS[0],
language,
'LC_MESSAGES',
'django.po')
if os.path.isfile(filepath):
return pofile(filepath)
self.stdout.write('{} {}'.format(
self.style.WARNING('Could not find language file for'),
self.style.ERROR(language)))
langfiles = {lang: _load_po(lang) for lang in languages}
translations = {}
for lang, po in langfiles.items():
if check_all or lang == source_language:
for entry in po.untranslated_entries() if po else []:
if lang == source_language:
translations.setdefault(entry.msgid, []).append(lang)
else:
translations.setdefault(entry.msgid, [])
for entry in po.translated_entries() if po else []:
translations.setdefault(entry.msgid, []).append(lang)
missing = {translation: set(languages) - set(langs)
for translation, langs in translations.items()
if set(languages) - set(langs)}
if missing:
self.stdout.write('Missing gettext translations:')
self.stdout.writelines([' {} - {}'.format(
self.style.ERROR(' '.join([lang if lang in missing[key] else ' ' for lang in languages])), key)
for key in sorted(missing.keys())])
return True
return False
def _check_parler_translations(self, languages):
missing = {}
for model in apps.get_models():
if isinstance(model(), TranslatableModel):
translations = {}
for item in model.objects.all():
for lang in item.get_available_languages():
translated_item = item.get_translation(lang)
for field in translated_item.get_translated_fields():
if getattr(translated_item, field):
translation = '<{}: {}>.{}'.format(
self.style.WARNING(model().__class__.__name__),
item, self.style.WARNING(field)
)
translations.setdefault(translation, []).append(lang)
missing.update({translation: set(languages) - set(langs)
for translation, langs in translations.items()
if set(languages) - set(langs)})
if missing:
self.stdout.write('Missing parler translations:')
self.stdout.writelines([' {} - {}'.format(
self.style.ERROR(' '.join([lang if lang in missing[key] else ' ' for lang in languages])), key)
for key in sorted(missing.keys())])
return True
return False
|
class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def add(self, data):
node = Node(data)
# if list is empty
if not self.head:
self.head = node
else:
current = self.head
while current.next:
current = current.next
current.next = node
def display(self):
values = []
current = self.head
while current:
values.append(current.data)
current = current.next
return values
|
# *****************************************************************
# Copyright 2011 MIT Lincoln Laboratory
# Project: SPAR
# Authors: OMD
# Description: unit tests for RandomInt
#
# Modifications:
# Date Name Modification
# ---- ---- ------------
# 11 Nov 2011 omd Original Version
# *****************************************************************
from __future__ import division
import spar_random
import unittest
import time
import collections
# TODO: This should really test that this works with numpy and without by
# forcing the import of numpy to fail but I can't figure out how to cause the
# import to fail.
class SparRandomTest(unittest.TestCase):
def setUp(self):
self.seed = int(time.time())
self.seed_msg = "Random seed used for this test: %s" % self.seed
self.longMessage = True
spar_random.seed(self.seed)
def test_seed(self):
list_len = 200
first_list = [spar_random.randint(1,10) for _ in xrange(list_len)]
# Now, re-seed. Do we get the same list again?
spar_random.seed(self.seed)
second_list = [spar_random.randint(1,10) for _ in xrange(list_len)]
self.assertListEqual(first_list, second_list, self.seed_msg)
def test_randint(self):
"""This tests that the randint function does indeed generate uniformly
distributed random numbers."""
observed = {x: 0 for x in xrange(2, 13)}
# Generate 1000 random numbers
for i in xrange(10000):
num = spar_random.randint(2, 12)
observed[num] += 1
# Make sure that each number was observed. If things are roughly
# uniformly distribuited each number should have been observed roughly
# 1000 times.
for n in xrange(2, 13):
self.assertGreater(observed[num], 800, self.seed_msg)
self.assertLess(observed[num], 1200, self.seed_msg)
def test_choice(self):
"""
This tests that the choice() function is roughly correct.
"""
seq = [1,2,3]
observed = {}
for i in seq:
observed[i] = 0
for i in xrange(1000):
x = spar_random.choice(seq)
observed[x] += 1
for i in seq:
self.assertGreater(observed[i], 250, self.seed_msg)
self.assertLess(observed[i], 400, self.seed_msg)
def test_gauss(self):
mu = 10
sigma = 5
num_above = 0
num_below = 0
in_std_dev = 0
outside_std_dev = 0
iterations = 1000
for _ in xrange(iterations):
x = spar_random.gauss(mu, sigma)
# test mu
if x >= mu:
num_above += 1
else:
num_below += 1
# test sigma
if abs(x - mu) <= sigma:
in_std_dev += 1
else:
outside_std_dev += 1
ratio1 = float(num_above) / float(iterations)
ratio2 = float(num_below) / float(iterations)
self.assertGreater(ratio1, 0.45, self.seed_msg)
self.assertGreater(ratio2, 0.45, self.seed_msg)
self.assertLess(ratio1, 0.55, self.seed_msg)
self.assertLess(ratio2, 0.55, self.seed_msg)
ratio3 = float(in_std_dev) / float(iterations)
ratio4 = float(outside_std_dev) / float(iterations)
self.assertGreater(ratio3, 0.63, self.seed_msg)
self.assertGreater(ratio4, 0.27, self.seed_msg)
def test_sample(self):
pop_size = 10
sample_size = 2
num_possible_samples = 10 * (10 - 1)
population = range(pop_size)
counts = collections.Counter()
num_iterations = num_possible_samples * 1000
for _ in xrange(num_iterations):
sample = spar_random.sample(population, sample_size)
self.assertEqual(len(sample), sample_size)
sample_set = set(sample)
self.assertEqual(len(sample_set), sample_size)
for s in sample:
self.assertIn(s, population)
counts[tuple(sample)] += 1
self.assertEqual(len(counts), num_possible_samples)
x = 0
for i in range(pop_size):
for j in range(pop_size):
if i != j:
self.assertIn( (i,j), counts)
x += 1
self.assertEqual(x, num_possible_samples)
for sample in counts:
expected_proportion = num_possible_samples * 10 / num_iterations
proportion = counts[sample] / num_iterations
self.assertGreater(proportion, expected_proportion * 0.5)
self.assertLess(proportion, expected_proportion * 1.5)
def test_randbit(self):
iterations = 1000
num_1s = 0
num_0s = 0
for _ in xrange(iterations):
b = spar_random.randbit()
self.assertIn(b, [0,1], self.seed_msg)
if b:
num_1s += 1
else:
num_0s += 1
ratio1 = float(num_1s) / float(iterations)
ratio0 = float(num_0s) / float(iterations)
self.assertGreater(ratio1, 0.4, self.seed_msg)
self.assertGreater(ratio0, 0.4, self.seed_msg)
|
import tensorflow as tf
import argparse
from itertools import chain
import json
import pickle
import os
import logging
from scipy.stats import norm
import numpy as np
################# Compute Unpopularity Matching #################
def unpop_matching_score(user_unpop, pos_unpop, neg_unpop, pos_unpop_norm, neg_unpop_norm):
"""
Function:
compute unpop matching score btw (user_u, item_i)
score = exp(item_unpop_score) * norm.pdf(unnorm_x, loc=user_mean, scale=2*user_std)
Input:
"""
# norm.pdf(item_unpop, loc=user_unpop[0], scale=user_unpop[1])
user_mean, user_std = user_unpop
alpha = 5
beta = 5
# pos_item
norm_match = norm.pdf(pos_unpop, loc=user_mean, scale=2.5*user_std) # citeulike: 1.5std ; pinterest: 2.5std ; ml-1m: 2.5std
pos_unpop_mtc = alpha * np.power(pos_unpop_norm, 3) * norm_match # citeulike:7/9 ; pinterest & ml-1m: 2/3/4
# neg_item
norm_match = norm.pdf(neg_unpop, loc=user_mean, scale=2.5*user_std) # citeulike: 1.5std ; pinterest: 2.5std ; ml-1m: 2.5std
neg_unpop_mtc = beta * np.power(neg_unpop_norm, 3) * norm_match # citeulike:7/9 ; pinterest & ml-1m: 2/3/4
return pos_unpop_mtc, neg_unpop_mtc
################# Compute Unpopularity Matching #################
def add_to_collection(names, values):
"""
Adds multiple elements to a given collection(s)
:param names: str or list of collections
:param values: tensor or list of tensors to add to collection
"""
if isinstance(names, str):
names = [names]
if isinstance(values, str):
values = [values]
for name in names:
for value in values:
tf.add_to_collection(name, value)
class GraphKeys(object):
"""
Custom GraphKeys; primarily to be backwards compatable incase tensorflow
changes it. Also to add my own names
https://github.com/tensorflow/tensorflow/blob/r1.1/tensorflow/python/framework/ops.py#L3921
"""
TRAINABLE_VARIABLES = "trainable_variables"
PLACEHOLDER = 'placeholder'
PREDICTION = 'prediction'
ATTTENTION = 'attention'
TRAIN_OP = 'train_op'
EVAL_STEP = 'eval_step'
LOSSES = 'losses'
WEIGHTS = 'weights'
BIASES = 'biases'
REG_WEIGHTS = 'reg_weights'
USER_WEIGHTS = 'user_weights'
ITEM_WEIGHTS = 'item_weights'
GRADIENTS = 'gradients'
########## These are newly added ###########
# Regularization l1/l2 Penalty that would be added
LOSS_REG = 'regularization_losses'
# Loss Value without Penalty
LOSS_NO_REG = 'loss'
# Keys for the activation of a layer
ACTIVATIONS = 'activations'
# Keys for prior to applying the activation function of a layer
PRE_ACTIVATIONS = 'pre_activations'
SUMMARIES = 'summaries'
METRIC_UPDATE = 'metric_update'
METRIC = 'metric'
TRAIN = 'train_op'
# List of optimizer classes mappings
OPTIMIZER = {
# learning_rate=0.001, beta1=0.9, beta2=0.999
'adam': tf.train.AdamOptimizer,
# Lazy Adam only updates momentum estimators on values used; it may cause
# different results than adam
'lazyadam': tf.contrib.opt.LazyAdamOptimizer,
# learning_rate, initial_accumulator_value=0.1
'adagrad': tf.train.AdagradOptimizer,
# learning_rate, decay=0.9, momentum=0.0
'rmsprop': tf.train.RMSPropOptimizer,
# learning_rate, momentum, use_nesterov=False
'momentum': tf.train.MomentumOptimizer,
# learning_rate=0.001, rho=0.95, epsilon=1e-08
'adadelta': tf.train.AdadeltaOptimizer,
'sgd': tf.train.GradientDescentOptimizer,
}
# Hyperparameters for various optimizers
# learning_rate is for all
_optimizer_args = {
'adam': ['beta1', 'beta2', 'epsilon'],
'lazyadam': ['beta1', 'beta2', 'epsilon'],
'momentum': ['momentum', 'use_nesterov'],
'rmsprop': ['momentum', 'decay'], ### Choose this in train.py
'adadelta': ['rho']
}
def get_optimizer_argparse():
"""
Get arguments for our blocks optimizer
"""
parser = argparse.ArgumentParser(add_help=False)
optimizer_group = parser.add_argument_group('OPTIMIZATION',
description='Hyperparameters')
optimizer_group.add_argument('--optimizer', default='adam', help='SGD optimizer',
choices=OPTIMIZER.keys())
optimizer_group.add_argument('--learning_rate', default=0.001, type=float,
help='learning rate [All]')
optimizer_group.add_argument('--momentum', default=0.9, type=float,
help='Momentum value [Momentum/RMSProp]')
optimizer_group.add_argument('--use_nesterov', default=False, action='store_true',
help='Use nesterov momentum [Momentum]')
optimizer_group.add_argument('--beta1', default=0.9, type=float,
help='beta 1 hyperparameter [Adam]')
optimizer_group.add_argument('--beta2', default=0.999, type=float,
help='beta 1 hyperparameter [Adam]')
optimizer_group.add_argument('--epsilon', default=1e-08, type=float,
help='Epsilon for numerical stability [Adam]')
optimizer_group.add_argument('--decay', default=0.9, type=float,
help='decay rate hyperparameter [RMSProp]')
optimizer_group.add_argument('--rho', default=0.95, type=float,
help='rho hyperparameter [Adadelta]')
return parser
def _preprocess_args(FLAG, remove_attrs, keep_attrs, keyname):
"""
Note modifies inplace. Removes the attributes from a given class object and
consolidates list of keep_attrs to a single dictionary and sets the
attribute in the object with keyname.
for FLAGS = parser.parse_args()
parsed_obj=FLAGS ==> Add attr and DEL attr in FLAG
:param parsed_obj: object to access via attributes
:param remove_attrs: iterable of keys of attributes to remove
:param keep_attrs: iterable of keys to add to a dict and add keyname in
namespace
:param keyname: str, name of key to add keep_attrs to as a dict
"""
# getattr(obj, attr) == obj.attr
args = {attr: getattr(FLAG, attr) for attr in keep_attrs}
# setattr(obj, name, value)
setattr(FLAG, keyname, args)
for attr in remove_attrs:
delattr(FLAG, attr)
def preprocess_args(FLAGS):
_preprocess_args(FLAG=FLAGS,
remove_attrs=set(list(chain.from_iterable(_optimizer_args.values()))),
keep_attrs=_optimizer_args[FLAGS.optimizer],
keyname='optimizer_params')
class BaseConfig(object):
save_directory = None
_IGNORE = ['fields', 'save', 'load']
# Set Custom Parameters by name with init
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
@property
def fields(self):
"""
Get all fields/properties stored in this config class
"""
return [m for m in dir(self)
if not m.startswith('_') and m not in self._IGNORE]
def save(self):
"""
Config is dumped as a json file
"""
json.dump(self._get_dict(),
open('%s/config.json' % self.save_directory, 'w'),
sort_keys=True, indent=2)
pickle.dump({key: self.__getattribute__(key) for key in self.fields},
open('%s/config.pkl' % self.save_directory, 'wb'),
pickle.HIGHEST_PROTOCOL)
def load(self):
"""
Load config, equivalent to loading json and updating this classes' dict
"""
try:
d = pickle.load(open('%s/config.pkl' % self.save_directory))
self.__dict__.update(d)
except Exception:
d = json.load(open('%s/config.json' % self.save_directory))
self.__dict__.update(d)
def _get_dict(self):
return {key: self.__getattribute__(key) if isinstance(self.__getattribute__(key), (int, float))
else str(self.__getattribute__(key)) for key in self.fields}
def __repr__(self):
return json.dumps(self._get_dict(), sort_keys=True, indent=2)
def __str__(self):
return json.dumps(self._get_dict(), sort_keys=True, indent=2)
def create_exp_directory(cwd=''):
'''
Creates a new directory to store experiment to save data
Folders: XXX, creates directory sequentially
Returns
-------
exp_dir : str
The newly created experiment directory
'''
created = False
for i in range(1, 10000):
exp_dir = str(i).zfill(3)
path = os.path.join(cwd, exp_dir)
if not os.path.exists(path):
# Create directory
os.mkdir(path)
created = True
break
if not created:
print('Could not create directory for experiments')
exit(-1)
return path + '/'
def get_logging_config(save_directory):
# Setup Logging
return dict(
version=1,
formatters={
# For files
'detailed': {
'format': "[%(asctime)s - %(levelname)s:%(name)s]<%(funcName)s>:%(lineno)d: %(message)s",
},
# For the console
'console': {
'format':"[%(levelname)s:%(name)s]<%(funcName)s>:%(lineno)d: %(message)s",
}
},
handlers={
'console': {
'class': 'logging.StreamHandler',
'level': logging.INFO,
'formatter': 'console',
},
'file': {
'class': 'logging.handlers.RotatingFileHandler',
'level': logging.DEBUG,
'formatter': 'detailed',
'filename': "{}/log".format(save_directory),
'mode': 'a',
'maxBytes': 10485760, # 10 MB
'backupCount': 5
}
},
loggers={
'tensorflow': {
'level': logging.INFO,
'handlers': ['console', 'file'],
}
},
disable_existing_loggers=False,
) |
import numpy as np
from cma import CMAOptions
from cma import optimization_tools as ot
from cma.evolution_strategy import cma_default_options, CMAEvolutionStrategy
from cma.utilities import utils
from cma.utilities.math import Mh
import time
all_stoppings = [] # accessable via cma.evolution_strategy.all_stoppings, bound to change
def my_fmin(
x0,
sigma0,
options=None,
args=(),
gradf=None,
restarts=0,
restart_from_best='False',
incpopsize=2,
eval_initial_x=False,
noise_handler=None,
parallelize=True,
noise_change_sigma_exponent=1,
noise_kappa_exponent=0, # TODO: add max kappa value as parameter
bipop=False,
callback=None):
if 1 < 3: # try: # pass on KeyboardInterrupt
fmin_options = locals().copy() # archive original options
del fmin_options['x0']
del fmin_options['sigma0']
del fmin_options['options']
del fmin_options['args']
if options is None:
options = cma_default_options
CMAOptions().check_attributes(options) # might modify options
# checked that no options.ftarget =
opts = CMAOptions(options.copy()).complement()
if callback is None:
callback = []
elif callable(callback):
callback = [callback]
# BIPOP-related variables:
runs_with_small = 0
small_i = []
large_i = []
popsize0 = None # to be evaluated after the first iteration
maxiter0 = None # to be evaluated after the first iteration
base_evals = 0
irun = 0
best = ot.BestSolution()
all_stoppings = []
while True: # restart loop
sigma_factor = 1
# Adjust the population according to BIPOP after a restart.
if not bipop:
# BIPOP not in use, simply double the previous population
# on restart.
if irun > 0:
popsize_multiplier = fmin_options['incpopsize']**(irun - runs_with_small)
opts['popsize'] = popsize0 * popsize_multiplier
elif irun == 0:
# Initial run is with "normal" population size; it is
# the large population before first doubling, but its
# budget accounting is the same as in case of small
# population.
poptype = 'small'
elif sum(small_i) < sum(large_i):
# An interweaved run with small population size
poptype = 'small'
if 11 < 3: # not needed when compared to irun - runs_with_small
restarts += 1 # A small restart doesn't count in the total
runs_with_small += 1 # _Before_ it's used in popsize_lastlarge
sigma_factor = 0.01**np.random.uniform() # Local search
popsize_multiplier = fmin_options['incpopsize']**(irun - runs_with_small)
opts['popsize'] = np.floor(popsize0 * popsize_multiplier**(np.random.uniform()**2))
opts['maxiter'] = min(maxiter0, 0.5 * sum(large_i) / opts['popsize'])
# print('small basemul %s --> %s; maxiter %s' % (popsize_multiplier, opts['popsize'], opts['maxiter']))
else:
# A run with large population size; the population
# doubling is implicit with incpopsize.
poptype = 'large'
popsize_multiplier = fmin_options['incpopsize']**(irun - runs_with_small)
opts['popsize'] = popsize0 * popsize_multiplier
opts['maxiter'] = maxiter0
# print('large basemul %s --> %s; maxiter %s' % (popsize_multiplier, opts['popsize'], opts['maxiter']))
# recover from a CMA object
if irun == 0 and isinstance(x0, CMAEvolutionStrategy):
es = x0
x0 = es.inputargs['x0'] # for the next restarts
if np.isscalar(sigma0) and np.isfinite(sigma0) and sigma0 > 0:
es.sigma = sigma0
# debatable whether this makes sense:
sigma0 = es.inputargs['sigma0'] # for the next restarts
if options is not None:
es.opts.set(options)
# ignore further input args and keep original options
else: # default case
if irun and eval(str(fmin_options['restart_from_best'])):
utils.print_warning('CAVE: restart_from_best is often not useful',
verbose=opts['verbose'])
es = MyCMAEvolutionStrategy(best.x, sigma_factor * sigma0, opts)
else:
es = MyCMAEvolutionStrategy(x0, sigma_factor * sigma0, opts)
# return opts, es
if (eval_initial_x
or es.opts['CMA_elitist'] == 'initial'
or (es.opts['CMA_elitist'] and
eval_initial_x is None)):
x = es.gp.pheno(es.mean,
into_bounds=es.boundary_handler.repair,
archive=es.sent_solutions)
es.f0 = yield x
es.best.update([x], es.sent_solutions,
[es.f0], 1)
es.countevals += 1
opts = es.opts # processed options, unambiguous
# a hack:
fmin_opts = CMAOptions("unchecked", **fmin_options.copy())
for k in fmin_opts:
# locals() cannot be modified directly, exec won't work
# in 3.x, therefore
fmin_opts.eval(k, loc={'N': es.N,
'popsize': opts['popsize']},
correct_key=False)
es.logger.append = opts['verb_append'] or es.countiter > 0 or irun > 0
# es.logger is "the same" logger, because the "identity"
# is only determined by the `verb_filenameprefix` option
logger = es.logger # shortcut
if 11 < 3:
if es.countiter == 0 and es.opts['verb_log'] > 0 and \
not es.opts['verb_append']:
logger = CMADataLogger(es.opts['verb_filenameprefix']
).register(es)
logger.add()
es.writeOutput() # initial values for sigma etc
if noise_handler:
if isinstance(noise_handler, type):
noisehandler = noise_handler(es.N)
else:
noisehandler = noise_handler
noise_handling = True
if fmin_opts['noise_change_sigma_exponent'] > 0:
es.opts['tolfacupx'] = inf
else:
noisehandler = ot.NoiseHandler(es.N, 0) # switched off
noise_handling = False
es.noise_handler = noisehandler
# the problem: this assumes that good solutions cannot take longer than bad ones:
# with EvalInParallel(objective_function, 2, is_feasible=opts['is_feasible']) as eval_in_parallel:
if 1 < 3:
while not es.stop(): # iteration loop
# X, fit = eval_in_parallel(lambda: es.ask(1)[0], es.popsize, args, repetitions=noisehandler.evaluations-1)
X, fit = yield from es.ask_and_eval(gradf=gradf,
evaluations=noisehandler.evaluations,
aggregation=np.median,
parallel_mode=parallelize) # treats NaN with resampling if not parallel_mode
if 11 < 3 and opts['vv']: # inject a solution
# use option check_point = [0]
if 0 * np.random.randn() >= 0:
X[0] = 0 + opts['vv'] * es.sigma**0 * np.random.randn(es.N)
fit[0] = yield X[0]
# print fit[0]
if es.opts['verbose'] > 4:
if es.countiter > 1 and min(fit) > es.best.last.f:
unsuccessful_iterations_count += 1
if unsuccessful_iterations_count > 4:
utils.print_message('%d unsuccessful iterations'
% unsuccessful_iterations_count,
iteration=es.countiter)
else:
unsuccessful_iterations_count = 0
es.tell(X, fit) # prepare for next iteration
if noise_handling: # it would be better to also use these f-evaluations in tell
es.sigma *= noisehandler(X, fit, objective_function, es.ask,
args=args)**fmin_opts['noise_change_sigma_exponent']
es.countevals += noisehandler.evaluations_just_done # TODO: this is a hack, not important though
# es.more_to_write.append(noisehandler.evaluations_just_done)
if noisehandler.maxevals > noisehandler.minevals:
es.more_to_write.append(noisehandler.evaluations)
if 1 < 3:
# If sigma was above multiplied by the same
# factor cmean is divided by here, this is
# like only multiplying kappa instead of
# changing cmean and sigma.
es.sp.cmean *= np.exp(-noise_kappa_exponent * np.tanh(noisehandler.noiseS))
es.sp.cmean[es.sp.cmean > 1] = 1.0 # also works with "scalar arrays" like np.array(1.2)
for f in callback:
f is None or f(es)
es.disp()
logger.add(# more_data=[noisehandler.evaluations, 10**noisehandler.noiseS] if noise_handling else [],
modulo=1 if es.stop() and logger.modulo else None)
if (opts['verb_log'] and opts['verb_plot'] and
(es.countiter % max(opts['verb_plot'], opts['verb_log']) == 0 or es.stop())):
logger.plot(324)
# end while not es.stop
if opts['eval_final_mean']:
mean_pheno = es.gp.pheno(es.mean,
into_bounds=es.boundary_handler.repair,
archive=es.sent_solutions)
fmean = yield mean_pheno
es.countevals += 1
es.best.update([mean_pheno], es.sent_solutions, [fmean], es.countevals)
best.update(es.best, es.sent_solutions) # in restarted case
# es.best.update(best)
this_evals = es.countevals - base_evals
base_evals = es.countevals
# BIPOP stats update
if irun == 0:
popsize0 = opts['popsize']
maxiter0 = opts['maxiter']
# XXX: This might be a bug? Reproduced from Matlab
# small_i.append(this_evals)
if bipop:
if poptype == 'small':
small_i.append(this_evals)
else: # poptype == 'large'
large_i.append(this_evals)
# final message
if opts['verb_disp']:
es.result_pretty(irun, time.asctime(time.localtime()),
best.f)
irun += 1
# if irun > fmin_opts['restarts'] or 'ftarget' in es.stop() \
# if irun > restarts or 'ftarget' in es.stop() \
all_stoppings.append(dict(es.stop(check=False))) # keeping the order
if irun - runs_with_small > fmin_opts['restarts'] or 'ftarget' in es.stop() \
or 'maxfevals' in es.stop(check=False) or 'callback' in es.stop(check=False):
break
opts['verb_append'] = es.countevals
opts['popsize'] = fmin_opts['incpopsize'] * es.sp.popsize # TODO: use rather options?
try:
opts['seed'] += 1
except TypeError:
pass
# while irun
# es.out['best'] = best # TODO: this is a rather suboptimal type for inspection in the shell
if irun:
es.best.update(best)
# TODO: there should be a better way to communicate the overall best
return es.result + (es.stop(), es, logger)
### 4560
# TODO refine output, can #args be flexible?
# is this well usable as it is now?
else: # except KeyboardInterrupt: # Exception as e:
if eval(str(options['verb_disp'])) > 0:
print(' in/outcomment ``raise`` in last line of cma.fmin to prevent/restore KeyboardInterrupt exception')
raise KeyboardInterrupt # cave: swallowing this exception can silently mess up experiments, if ctrl-C is hit
class MyCMAEvolutionStrategy(CMAEvolutionStrategy):
def ask_and_eval(self, gradf=None, number=None, xmean=None, sigma_fac=1,
evaluations=1, aggregation=np.median, kappa=1, parallel_mode=False):
# initialize
popsize = self.sp.popsize
if number is not None:
popsize = int(number)
if self.opts['CMA_mirrormethod'] == 1: # direct selective mirrors
nmirrors = Mh.sround(self.sp.lam_mirr * popsize / self.sp.popsize)
self._mirrormethod1_done = self.countiter
else:
# method==0 unconditional mirrors are done in ask_geno
# method==2 delayed selective mirrors are done via injection
nmirrors = 0
assert nmirrors <= popsize // 2
self.mirrors_idx = np.arange(nmirrors) # might never be used
is_feasible = self.opts['is_feasible']
# do the work
fit = [] # or np.NaN * np.empty(number)
X_first = self.ask(popsize, xmean=xmean, gradf=gradf, args=[])
if xmean is None:
xmean = self.mean # might have changed in self.ask
X = []
if parallel_mode:
fit_first = yield X_first
# the rest is only book keeping and warnings spitting
"""
if hasattr(func, 'last_evaluations'):
self.countevals += func.last_evaluations - self.popsize
elif hasattr(func, 'evaluations'):
if self.countevals < func.evaluations:
self.countevals = func.evaluations - self.popsize
"""
if nmirrors and self.opts['CMA_mirrormethod'] > 0 and self.countiter < 2:
utils.print_warning(
"selective mirrors will not work in parallel mode",
"ask_and_eval", "CMAEvolutionStrategy")
if evaluations > 1 and self.countiter < 2:
utils.print_warning(
"aggregating evaluations will not work in parallel mode",
"ask_and_eval", "CMAEvolutionStrategy")
else:
fit_first = len(X_first) * [None]
for k in range(popsize):
x, f = X_first.pop(0), fit_first.pop(0)
rejected = -1
while f is None or not is_feasible(x, f): # rejection sampling
if parallel_mode:
utils.print_warning(
"rejection sampling will not work in parallel mode"
" unless the parallel_objective makes a distinction\n"
"between called with a numpy array vs a list (of"
" numpy arrays) as first argument.",
"ask_and_eval", "CMAEvolutionStrategy")
rejected += 1
if rejected: # resample
x = self.ask(1, xmean, sigma_fac)[0]
elif k >= popsize - nmirrors: # selective mirrors
if k == popsize - nmirrors:
self.mirrors_idx = np.argsort(fit)[-1:-1 - nmirrors:-1]
x = self.get_mirror(X[self.mirrors_idx[popsize - 1 - k]])
# constraints handling test hardwired ccccccccccc
length_normalizer = 1
# zzzzzzzzzzzzzzzzzzzzzzzzz
if 11 < 3:
# for some unclear reason, this normalization does not work as expected: the step-size
# becomes sometimes too large and overall the mean might diverge. Is the reason that
# we observe random fluctuations, because the length is not selection relevant?
# However sigma-adaptation should mainly work on the correlation, not the length?
# Or is the reason the deviation of the direction introduced by using the original
# length, which also can effect the measured correlation?
# Update: if the length of z in CSA is clipped at chiN+1, it works, but only sometimes?
length_normalizer = self.N**0.5 / self.mahalanobis_norm(x - xmean) # self.const.chiN < N**0.5, the constant here is irrelevant (absorbed by kappa)
# print(self.N**0.5 / self.mahalanobis_norm(x - xmean))
# self.more_to_write += [length_normalizer * 1e-3, length_normalizer * self.mahalanobis_norm(x - xmean) * 1e2]
if kappa == 1:
f = yield x
else:
f = yield xmean + kappa * length_normalizer * (x - xmean)
if is_feasible(x, f) and evaluations > 1:
_f = []
for _i in range(int(evaluations - 1)):
if kappa == 1:
__f = yield x
else:
__f = yield xmean + kappa * length_normalizer * (x - xmean)
_f.append(__f)
f = aggregation([f] + _f)
if (rejected + 1) % 1000 == 0:
utils.print_warning(' %d solutions rejected (f-value NaN or None) at iteration %d' %
(rejected, self.countiter))
fit.append(f)
X.append(x)
self.evaluations_per_f_value = int(evaluations)
if any(f is None or np.isnan(f) for f in fit):
idxs = [i for i in range(len(fit))
if fit[i] is None or np.isnan(fit[i])]
utils.print_warning("f-values %s contain None or NaN at indices %s"
% (str(fit[:30]) + ('...' if len(fit) > 30 else ''),
str(idxs)),
'ask_and_tell',
'CMAEvolutionStrategy',
self.countiter)
return X, fit |
'''
Loading IOC codes DataFrame
Your task here is to prepare a DataFrame ioc_codes from a comma-separated values (CSV) file.
Initially, ioc_codes has 200 rows (one for each country) and 3 columns: 'Country', 'NOC', & 'ISO code'.
For the analysis that follows, you want to keep only the useful columns from ioc_codes: 'Country' and 'NOC' (the column 'NOC' contains three-letter codes representing each country).
'''
# Import pandas
import pandas as pd
# Create the file path: file_path
file_path = 'Summer Olympic medallists 1896 to 2008 - IOC COUNTRY CODES.csv'
# Load DataFrame from file_path: ioc_codes
ioc_codes = pd.read_csv(file_path)
# Extract the relevant columns: ioc_codes
ioc_codes = ioc_codes[['Country', 'NOC']]
# Print first and last 5 rows of ioc_codes
print(ioc_codes.head())
print(ioc_codes.tail()) |
from discord.ext import commands
import discord
import asyncio
class ClearCommand(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(name="청소", aliases=['clear'])
async def clear(self, ctx:commands.Context, cnt:str = None):
if not cnt:
await ctx.send('사용법: -청소 <청소할 채팅 수>')
else:
if cnt.isnumeric():
lst = await ctx.message.channel.history(limit=int(cnt)).flatten()
amount = len(lst)
while amount > 99:
await ctx.message.channel.delete_messages(lst[0:99])
for i in range(0, 99):
lst.remove(i)
await ctx.message.channel.delete_messages(lst)
embed = discord.Embed(title=f"메시지 {amount}개가 삭제되었습니다.")
await ctx.send(embed=embed)
else:
await ctx.send('청소할 개수가 문자입니다')
async def cog_check(self, ctx:commands.Context):
if not ctx.guild:
return False
if not ctx.author.guild_permissions.administrator:
return await self.bot.is_owner(ctx.author)
return True |
"""
Decorator to convert units of functions in /physics methods
"""
__all__ = ["angular_freq_to_hz"]
import astropy.units as u
import functools
import inspect
from plasmapy.utils.decorators.helpers import preserve_signature
def angular_freq_to_hz(fn):
"""
A decorator that adds to a function the ability to convert the function's return from
angular frequency (rad/s) to frequency (Hz).
A kwarg `to_hz` is added to the function's signature, with a default value of `False`.
The keyword is also added to the function's docstring under the **"Other Parameters"**
heading.
Parameters
----------
fn : function
The function to be decorated
Raises
------
ValueError
If `fn` has already defined a kwarg `to_hz`
Returns
-------
callable
The decorated function
Notes
-----
* If `angular_freq_to_hz` is used with decorator
:func:`~plasmapy.utils.decorators.validate_quantities`, then
`angular_freq_to_hz` should be used inside
:func:`~plasmapy.utils.decorators.validate_quantities` but special
consideration is needed for setup. The following is an example of an
appropriate setup::
import astropy.units as u
from plasmapy.utils.decorators.converter import angular_freq_to_hz
from plasmapy.utils.decorators.validators import validate_quantities
@validate_quantities(validations_on_return={'units': [u.rad / u.s, u.Hz]})
@angular_freq_to_hz
def foo(x: u.rad / u.s) -> u.rad / u.s
return x
Adding `u.Hz` to the allowed units allows the converted quantity to pass
the validations.
Examples
--------
>>> import astropy.units as u
>>> from plasmapy.utils.decorators.converter import angular_freq_to_hz
>>>
>>> @angular_freq_to_hz
... def foo(x):
... return x
>>>
>>> foo(5 * u.rad / u.s, to_hz=True)
<Quantity 0.79577472 Hz>
>>>
>>> foo(-1 * u.rad / u.s, to_hz=True)
<Quantity -0.15915494 Hz>
Decoration also works with methods
>>> class Foo:
... def __init__(self, x):
... self.x = x
...
... @angular_freq_to_hz
... def bar(self):
... return self.x
>>>
>>> foo = Foo(0.5 * u.rad / u.s)
>>> foo.bar(to_hz=True)
<Quantity 0.07957747 Hz>
"""
# raise exception if fn uses the 'to_hz' kwarg
sig = inspect.signature(fn)
if "to_hz" in sig.parameters:
raise ValueError(
f"Wrapped function '{fn.__name__}' can not use keyword 'to_hz'."
f" Keyword reserved for decorator functionality."
)
# make new signature for fn
new_params = sig.parameters.copy()
new_params["to_hz"] = inspect.Parameter(
"to_hz", inspect.Parameter.POSITIONAL_OR_KEYWORD, default=False
)
new_sig = inspect.Signature(
parameters=new_params.values(), return_annotation=sig.return_annotation
)
fn.__signature__ = new_sig
@preserve_signature
@functools.wraps(fn)
def wrapper(*args, to_hz=False, **kwargs):
_result = fn(*args, **kwargs)
if to_hz:
return _result.to(u.Hz, equivalencies=[(u.cy / u.s, u.Hz)])
return _result
added_doc_bit = """
Other Parameters
----------------
to_hz: bool
Set `True` to to convert function output from angular frequency to Hz
"""
if wrapper.__doc__ is not None:
wrapper.__doc__ += added_doc_bit
else:
wrapper.__doc__ = added_doc_bit
return wrapper
|
import turtle as trt
tortue = trt.Turtle() # utiliser le module d'abord trt
# avant présenter forward, backward, left right, done et Turtle() et import
tortue.forward(70)
tortue.left(90)
tortue.forward(70)
tortue.left(90)
tortue.forward(70)
tortue.left(90)
tortue.forward(70)
tortue.left(90)
trt.done()
|
import collections
from nixui import api, slot_mapper
Update = collections.namedtuple('Update', ['option', 'old_value', 'new_value'])
class StateModel:
def __init__(self):
self.update_history = []
self.current_values = api.get_option_values_map()
# TODO: is including the slotmapper overloading the StateModel? What are the alternatives?
self.slotmapper = slot_mapper.SlotMapper()
self.slotmapper.add_slot('value_changed', self.record_update)
self.slotmapper.add_slot('undo', self.undo)
def get_value(self, option):
return self.current_values[option]
def record_update(self, option, new_value):
old_value = self.current_values[option]
if old_value != new_value:
self.update_history.append(
Update(option, old_value, new_value)
)
self.current_values[option] = new_value
self.slotmapper('update_recorded')(option, old_value, new_value)
def undo(self, *args, **kwargs):
last_update = self.update_history.pop()
self.current_values[last_update.option] = last_update.old_value
self.slotmapper('undo_performed')(last_update.option, last_update.old_value, last_update.new_value)
self.slotmapper(('update_field', last_update.option))(last_update.old_value)
|
from django.test import TestCase
# from django.contrib.auth.models import User
from blog.models import Post, Comment
import blog.tests.helper as h
class CommentTestCase(TestCase):
def setUp(self):
self.user = h.create_user()
self.post = h.create_post(self.user)
self.comment = h.create_comment(self.post)
self.data = self.post.__dict__
def test_can_make_post(self):
self.assertIsInstance(self.comment, Comment)
self.assertEqual(self.comment.__str__(), f"{self.comment.commenter}: {self.comment.comment}")
def test_comment_admin_and_unadmin(self):
self.assertFalse(self.comment.by_admin)
self.comment.admin()
self.assertTrue(self.comment.by_admin)
self.comment.unadmin()
self.assertFalse(self.comment.by_admin)
class PostTestCase(TestCase):
def setUp(self):
self.user = h.create_user()
self.post = h.create_post(self.user)
def test_can_make_post(self):
self.assertIsInstance(self.post, Post)
self.assertEqual(self.post.__str__(), self.post.title)
self.assertEqual(self.post.__repr__(), f'{self.post.title} by {self.post.author}')
def test_post_get_absolute_url(self):
self.assertEqual(
self.post.get_absolute_url(),
'/blog/post/1/',
)
def test_post_pin_and_unpin(self):
self.assertFalse(self.post.pinned)
self.post.pin()
self.assertTrue(self.post.pinned)
self.post.unpin()
self.assertFalse(self.post.pinned)
def test_post_admin_and_unadmin(self):
self.assertFalse(self.post.admin_post)
self.post.admin()
self.assertTrue(self.post.admin_post)
self.post.unadmin()
self.assertFalse(self.post.admin_post)
|
"""
Animates the Guided Search solution of a maze.
If you set `refresh_rate` to zero, then it will remove all delays. As it is, the
small 0.01 refresh rate shows the progress of the search.
if you set `stop_end` to be True, the search will terminate at the target
"""
import time
import random
from ch07.maze import Maze, to_networkx
from ch07.viewer import Viewer
from ch07.dependencies import tkinter_error
class GuidedSearchSolver():
"""
Solves a maze by taking advantage of Euclidean distance to solution.
"""
def __init__(self, master, maze, size, refresh_rate=0.01, stop_end=False):
self.master = master
self.viewer = Viewer(maze, size)
self.marked = {}
self.node_from = {}
self.size = maze.num_rows * maze.num_cols
self.g = to_networkx(maze)
self.start = maze.start()
self.end = maze.end()
self.stop_end = stop_end
self.refresh_rate = refresh_rate
master.after(0, self.animate)
self.canvas = self.viewer.view(master)
def animate(self):
"""Start animation by initiating DFS."""
self.guided_search(self.start)
# draw BACK edges to solution
pos = self.end
while pos != self.start:
self.viewer.color_cell(pos, 'lightgray')
if pos in self.node_from:
pos = self.node_from[pos]
else:
# Turns out there was no solution...
break
self.master.update()
def distance_to(self, to_cell):
"""Return Manhattan distance between cells."""
return abs(self.end[0] - to_cell[0]) + abs(self.end[1] - to_cell[1])
def guided_search(self, pos):
"""use Manhattan distance to maze end as priority in PQ to guide search."""
from ch04.heap import PQ
pq = PQ(self.size)
self.viewer.color_cell(pos, 'blue')
src = self.start
dist_to = {}
dist_to[src] = 0
# Using a MAX PRIORITY QUEUE means we rely on negative distance to
# choose the one that is closest...
self.marked[src] = True
pq.enqueue(src, -self.distance_to(src))
while not pq.is_empty():
cell = pq.dequeue()
self.master.update()
if self.refresh_rate:
time.sleep(self.refresh_rate)
if self.stop_end and cell == self.end:
self.marked[cell] = True
self.viewer.color_cell(cell, 'blue')
return True
for next_cell in self.g.neighbors(cell):
if not next_cell in self.marked:
self.node_from[next_cell] = cell
dist_to[next_cell] = dist_to[cell] + 1
pq.enqueue(next_cell, -self.distance_to(next_cell))
self.marked[next_cell] = True
self.viewer.color_cell(next_cell, 'blue')
return False
#######################################################################
if __name__ == '__main__':
if tkinter_error:
print('tkinter is not installed so unable to launch Guided solver application')
else:
import tkinter
random.seed(15)
m = Maze(60,60)
root = tkinter.Tk()
dfs = GuidedSearchSolver(root, m, 15, refresh_rate=0.001, stop_end=True)
root.mainloop()
|
with open("input.txt") as file:
increases = -1
number = 0
for line in file:
if(int(line) > number):
increases += 1
number = int(line)
print("Number of Increases: " + str(increases)) |
""" Class description goes here. """
from collections import namedtuple
import logging
from dataclay.commonruntime.Runtime import getRuntime
__author__ = 'Alex Barcelo <alex.barcelo@bsc.es>'
__copyright__ = '2016 Barcelona Supercomputing Center (BSC-CNS)'
logger = logging.getLogger(__name__)
DCLAY_PROPERTY_PREFIX = "_dataclay_property_"
DCLAY_GETTER_PREFIX = "$$get"
DCLAY_SETTER_PREFIX = "$$set"
DCLAY_REPLICATED_SETTER_PREFIX = "$$rset"
PreprocessedProperty = namedtuple('PreprocessedProperty', field_names=[
'name', 'position', 'type', 'beforeUpdate', 'afterUpdate', 'inMaster'])
class DynamicProperty(property):
"""DataClay implementation of the `property` Python mechanism.
This class is similar to property but is not expected to be used with
decorators. Instead, the initialization is done from the ExecutionGateway
metaclass, containing the required information about the property
"""
__slots__ = ("p_name",)
def __init__(self, property_name):
logger.debug("Initializing DynamicProperty %s", property_name)
"""Initialize the DynamicProperty with the name of its property.
Not calling super deliberately.
The semantics and behaviour changes quite a bit from the property
built-in, here we only store internally the name of the property and
use dataClay friendly setters and getters.
"""
self.p_name = property_name
def __get__(self, obj, type_=None):
"""Getter for the dataClay property
If the object is loaded, perform the getter to the local instance (this
is the scenario for local instances and Execution Environment fully
loaded instances).
If the object is not loaded, perform a remote execution (this is the
scenario for client remote instances and also Execution Environment
non-loaded instances, which may either "not-yet-loaded" or remote)
"""
is_exec_env = getRuntime().is_exec_env()
logger.debug("Calling getter for property %s in %s", self.p_name,
"an execution environment" if is_exec_env else "the client")
if (is_exec_env and obj.is_loaded()) or (not is_exec_env and not obj.is_persistent()):
try:
obj.set_dirty(True) # set dirty = true for language types like lists, dicts, that are get and modified. TODO: improve this.
return object.__getattribute__(obj, "%s%s" % (DCLAY_PROPERTY_PREFIX, self.p_name))
except AttributeError:
logger.warning("Received AttributeError while accessing property %s on object %r",
self.p_name, obj)
logger.debug("Internal dictionary of the object: %s", obj.__dict__)
raise
else:
return getRuntime().execute_implementation_aux(DCLAY_GETTER_PREFIX + self.p_name, obj, (), obj.get_hint())
def __set__(self, obj, value):
"""Setter for the dataClay property
See the __get__ method for the basic behavioural explanation.
"""
logger.debug("Calling setter for property %s", self.p_name)
is_exec_env = getRuntime().is_exec_env()
if (is_exec_env and obj.is_loaded()) or (not is_exec_env and not obj.is_persistent()):
object.__setattr__(obj, "%s%s" % (DCLAY_PROPERTY_PREFIX, self.p_name), value)
if is_exec_env:
obj.set_dirty(True)
else:
getRuntime().execute_implementation_aux(DCLAY_SETTER_PREFIX + self.p_name, obj, (value,), obj.get_hint())
class ReplicatedDynamicProperty(DynamicProperty):
def __init__(self, property_name, before_method, after_method, in_master):
logger.debug("Initializing ReplicatedDynamicProperty %s | BEFORE = %s | AFTER = %s | INMASTER = %s", property_name, before_method, after_method, in_master)
super(ReplicatedDynamicProperty, self).__init__(property_name)
self.beforeUpdate = before_method
self.afterUpdate = after_method
self.inMaster = in_master
def __set__(self, obj, value):
"""Setter for the dataClay property
See the __get__ method for the basic behavioural explanation.
"""
logger.debug("Calling replicated setter for property %s", self.p_name)
is_client = not getRuntime().is_exec_env()
if is_client and not obj.is_persistent():
object.__setattr__(obj, "%s%s" % (DCLAY_PROPERTY_PREFIX, self.p_name), value)
elif not is_client and not obj.is_loaded():
getRuntime().execute_implementation_aux(DCLAY_SETTER_PREFIX + self.p_name, obj, (value,), obj.get_hint())
else:
if self.inMaster:
logger.debug("Calling update in master [%s] for property %s with value %s", obj.get_master_location, self.p_name, value)
getRuntime().execute_implementation_aux('__setUpdate__', obj, (obj, self.p_name, value, self.beforeUpdate, self.afterUpdate), obj.get_master_location())
else:
logger.debug("Calling update locally for property %s with value %s", self.p_name, value)
obj.__setUpdate__(obj, self.p_name, value, self.beforeUpdate, self.afterUpdate)
obj.set_dirty(True)
|
# Copyright 2020 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Common tools to use when creating step files for behave tests."""
import time
from mycroft.messagebus import Message
TIMEOUT = 10
def then_wait(msg_type, criteria_func, context, timeout=None):
"""Wait for a specified time for criteria to be fulfilled.
Args:
msg_type: message type to watch
criteria_func: Function to determine if a message fulfilling the
test case has been found.
context: behave context
timeout: Time allowance for a message fulfilling the criteria, if
provided will override the normal normal step timeout.
Returns:
tuple (bool, str) test status and debug output
"""
timeout = timeout or context.step_timeout
start_time = time.monotonic()
debug = ''
while time.monotonic() < start_time + timeout:
for message in context.bus.get_messages(msg_type):
status, test_dbg = criteria_func(message)
debug += test_dbg
if status:
context.matched_message = message
context.bus.remove_message(message)
return True, debug
context.bus.new_message_available.wait(0.5)
# Timed out return debug from test
return False, debug
def then_wait_fail(msg_type, criteria_func, context, timeout=None):
"""Wait for a specified time, failing if criteria is fulfilled.
Args:
msg_type: message type to watch
criteria_func: Function to determine if a message fulfilling the
test case has been found.
context: behave context
timeout: Time allowance for a message fulfilling the criteria
Returns:
tuple (bool, str) test status and debug output
"""
status, debug = then_wait(msg_type, criteria_func, context, timeout)
return (not status, debug)
def mycroft_responses(context):
"""Collect and format mycroft responses from context.
Args:
context: behave context to extract messages from.
Returns: (str) Mycroft responses including skill and dialog file
"""
responses = ''
messages = context.bus.get_messages('speak')
if len(messages) > 0:
responses = 'Mycroft responded with:\n'
for m in messages:
responses += 'Mycroft: '
if 'meta' in m.data and 'dialog' in m.data['meta']:
responses += '{}.dialog'.format(m.data['meta']['dialog'])
responses += '({})\n'.format(m.data['meta'].get('skill'))
responses += '"{}"\n'.format(m.data['utterance'])
return responses
def print_mycroft_responses(context):
print(mycroft_responses(context))
def emit_utterance(bus, utt):
"""Emit an utterance on the bus.
Args:
bus (InterceptAllBusClient): Bus instance to listen on
dialogs (list): list of acceptable dialogs
"""
bus.emit(Message('recognizer_loop:utterance',
data={'utterances': [utt],
'lang': 'en-us',
'session': '',
'ident': time.time()},
context={'client_name': 'mycroft_listener'}))
def wait_for_dialog(bus, dialogs, context=None, timeout=None):
"""Wait for one of the dialogs given as argument.
Args:
bus (InterceptAllBusClient): Bus instance to listen on
dialogs (list): list of acceptable dialogs
context (behave Context): optional context providing scenario timeout
timeout (int): how long to wait for the message, defaults to timeout
provided by context or 10 seconds
"""
if context:
timeout = timeout or context.step_timeout
else:
timeout = timeout or TIMEOUT
start_time = time.monotonic()
while time.monotonic() < start_time + timeout:
for message in bus.get_messages('speak'):
dialog = message.data.get('meta', {}).get('dialog')
if dialog in dialogs:
bus.clear_messages()
return
bus.new_message_available.wait(0.5)
bus.clear_messages()
def wait_for_audio_service(context, message_type):
"""Wait for audio.service message that matches type provided.
May be play, stop, or pause messages
Args:
context (behave Context): optional context providing scenario timeout
message_type (string): final component of bus message in form
`mycroft.audio.service.{type}
"""
msg_type = 'mycroft.audio.service.{}'.format(message_type)
def check_for_msg(message):
return (message.msg_type == msg_type, '')
passed, debug = then_wait(msg_type, check_for_msg, context)
if not passed:
debug += mycroft_responses(context)
if not debug:
if message_type == 'play':
message_type = 'start'
debug = "Mycroft didn't {} playback".format(message_type)
assert passed, debug
|
from django.conf import settings
from telegram import Bot as TelegramBot, Update
from telegram.ext import Dispatcher, Updater
class Bot:
def __init__(self, token, url=settings.SITE_DOMAIN):
self.bot = TelegramBot(token)
self.dispatcher = None
if settings.DEBUG:
self.updater = Updater(token)
self.dispatcher = self.updater.dispatcher
self.updater.start_polling()
else:
self.bot.set_webhook('{}/{}/{}/'.format(url, 'bot', token))
self.dispatcher = Dispatcher(self.bot, None, workers=0)
def register(self, handler):
handler.register(self.dispatcher)
def webhook(self, update):
self.dispatcher.process_update(Update.de_json(update, self.bot))
|
# Copyright 2017-2020 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import os
import subprocess
__version__ = "0.7.0.rc6"
__pkgdir__ = os.path.dirname(os.path.dirname(__file__))
__git_commit__ = None
__git_status__ = None
def _try_init_git_attrs():
try:
_init_git_commit()
except (OSError, subprocess.CalledProcessError):
pass
else:
try:
_init_git_status()
except (OSError, subprocess.CalledProcessError):
pass
def _init_git_commit():
repo = _guild_repo()
if repo:
line = _cmd_out("git --work-tree \"%s\" log -1 --oneline" % repo)
commit = line.split(" ")[0]
else:
commit = None
globals()["__git_commit__"] = commit
def _guild_repo():
repo = os.path.dirname(os.path.dirname(__file__))
if os.path.isdir(os.path.join(repo, ".git")):
return repo
return None
def _init_git_status():
repo = _guild_repo()
if repo:
raw = _cmd_out("git -C \"%s\" status -s" % repo)
else:
raw = None
globals()["__git_status__"] = raw.split("\n") if raw else []
def _cmd_out(cmd):
null = open(os.devnull, "w")
out = subprocess.check_output(cmd, stderr=null, shell=True)
return out.decode("utf-8").strip()
def version():
if __git_commit__:
workspace_changed_marker = "*" if __git_status__ else ""
return "%s (dev %s%s)" % (__version__, __git_commit__, workspace_changed_marker)
else:
return __version__
_try_init_git_attrs()
|
"""This module contains njitted routines for building histograms.
A histogram is an array with n_bins entry of type HISTOGRAM_DTYPE. Each
feature has its own histogram. A histogram contains the sum of gradients and
hessians of all the samples belonging to each bin.
"""
import numpy as np
from numba import njit
HISTOGRAM_DTYPE = np.dtype([
('sum_gradients', np.float32),
('sum_hessians', np.float32),
('count', np.uint32),
])
@njit
def _build_histogram_naive(n_bins, sample_indices, binned_feature,
ordered_gradients, ordered_hessians):
"""Build histogram in a naive way, without optimizing for cache hit."""
histogram = np.zeros(n_bins, dtype=HISTOGRAM_DTYPE)
for i, sample_idx in enumerate(sample_indices):
bin_idx = binned_feature[sample_idx]
histogram[bin_idx]['sum_gradients'] += ordered_gradients[i]
histogram[bin_idx]['sum_hessians'] += ordered_hessians[i]
histogram[bin_idx]['count'] += 1
return histogram
@njit
def _subtract_histograms(n_bins, hist_a, hist_b):
"""Return hist_a - hist_b"""
histogram = np.zeros(n_bins, dtype=HISTOGRAM_DTYPE)
sg = 'sum_gradients'
sh = 'sum_hessians'
c = 'count'
for i in range(n_bins):
histogram[i][sg] = hist_a[i][sg] - hist_b[i][sg]
histogram[i][sh] = hist_a[i][sh] - hist_b[i][sh]
histogram[i][c] = hist_a[i][c] - hist_b[i][c]
return histogram
@njit
def _build_histogram(n_bins, sample_indices, binned_feature, ordered_gradients,
ordered_hessians):
"""Return histogram for a given feature."""
histogram = np.zeros(n_bins, dtype=HISTOGRAM_DTYPE)
n_node_samples = sample_indices.shape[0]
unrolled_upper = (n_node_samples // 4) * 4
for i in range(0, unrolled_upper, 4):
bin_0 = binned_feature[sample_indices[i]]
bin_1 = binned_feature[sample_indices[i + 1]]
bin_2 = binned_feature[sample_indices[i + 2]]
bin_3 = binned_feature[sample_indices[i + 3]]
histogram[bin_0]['sum_gradients'] += ordered_gradients[i]
histogram[bin_1]['sum_gradients'] += ordered_gradients[i + 1]
histogram[bin_2]['sum_gradients'] += ordered_gradients[i + 2]
histogram[bin_3]['sum_gradients'] += ordered_gradients[i + 3]
histogram[bin_0]['sum_hessians'] += ordered_hessians[i]
histogram[bin_1]['sum_hessians'] += ordered_hessians[i + 1]
histogram[bin_2]['sum_hessians'] += ordered_hessians[i + 2]
histogram[bin_3]['sum_hessians'] += ordered_hessians[i + 3]
histogram[bin_0]['count'] += 1
histogram[bin_1]['count'] += 1
histogram[bin_2]['count'] += 1
histogram[bin_3]['count'] += 1
for i in range(unrolled_upper, n_node_samples):
bin_idx = binned_feature[sample_indices[i]]
histogram[bin_idx]['sum_gradients'] += ordered_gradients[i]
histogram[bin_idx]['sum_hessians'] += ordered_hessians[i]
histogram[bin_idx]['count'] += 1
return histogram
@njit
def _build_histogram_no_hessian(n_bins, sample_indices, binned_feature,
ordered_gradients):
"""Return histogram for a given feature.
Hessians are not updated (used when hessians are constant).
"""
histogram = np.zeros(n_bins, dtype=HISTOGRAM_DTYPE)
n_node_samples = sample_indices.shape[0]
unrolled_upper = (n_node_samples // 4) * 4
for i in range(0, unrolled_upper, 4):
bin_0 = binned_feature[sample_indices[i]]
bin_1 = binned_feature[sample_indices[i + 1]]
bin_2 = binned_feature[sample_indices[i + 2]]
bin_3 = binned_feature[sample_indices[i + 3]]
histogram[bin_0]['sum_gradients'] += ordered_gradients[i]
histogram[bin_1]['sum_gradients'] += ordered_gradients[i + 1]
histogram[bin_2]['sum_gradients'] += ordered_gradients[i + 2]
histogram[bin_3]['sum_gradients'] += ordered_gradients[i + 3]
histogram[bin_0]['count'] += 1
histogram[bin_1]['count'] += 1
histogram[bin_2]['count'] += 1
histogram[bin_3]['count'] += 1
for i in range(unrolled_upper, n_node_samples):
bin_idx = binned_feature[sample_indices[i]]
histogram[bin_idx]['sum_gradients'] += ordered_gradients[i]
histogram[bin_idx]['count'] += 1
return histogram
@njit
def _build_histogram_root_no_hessian(n_bins, binned_feature, all_gradients):
"""Special case for the root node
The root node has to find the split among all the samples from the
training set. binned_feature and all_gradients already have a consistent
ordering.
Hessians are not updated (used when hessians are constant)
"""
histogram = np.zeros(n_bins, dtype=HISTOGRAM_DTYPE)
n_node_samples = binned_feature.shape[0]
unrolled_upper = (n_node_samples // 4) * 4
for i in range(0, unrolled_upper, 4):
bin_0 = binned_feature[i]
bin_1 = binned_feature[i + 1]
bin_2 = binned_feature[i + 2]
bin_3 = binned_feature[i + 3]
histogram[bin_0]['sum_gradients'] += all_gradients[i]
histogram[bin_1]['sum_gradients'] += all_gradients[i + 1]
histogram[bin_2]['sum_gradients'] += all_gradients[i + 2]
histogram[bin_3]['sum_gradients'] += all_gradients[i + 3]
histogram[bin_0]['count'] += 1
histogram[bin_1]['count'] += 1
histogram[bin_2]['count'] += 1
histogram[bin_3]['count'] += 1
for i in range(unrolled_upper, n_node_samples):
bin_idx = binned_feature[i]
histogram[bin_idx]['sum_gradients'] += all_gradients[i]
histogram[bin_idx]['count'] += 1
return histogram
@njit
def _build_histogram_root(n_bins, binned_feature, all_gradients,
all_hessians):
"""Special case for the root node
The root node has to find the split among all the samples from the
training set. binned_feature and all_gradients and all_hessians already
have a consistent ordering.
"""
histogram = np.zeros(n_bins, dtype=HISTOGRAM_DTYPE)
n_node_samples = binned_feature.shape[0]
unrolled_upper = (n_node_samples // 4) * 4
for i in range(0, unrolled_upper, 4):
bin_0 = binned_feature[i]
bin_1 = binned_feature[i + 1]
bin_2 = binned_feature[i + 2]
bin_3 = binned_feature[i + 3]
histogram[bin_0]['sum_gradients'] += all_gradients[i]
histogram[bin_1]['sum_gradients'] += all_gradients[i + 1]
histogram[bin_2]['sum_gradients'] += all_gradients[i + 2]
histogram[bin_3]['sum_gradients'] += all_gradients[i + 3]
histogram[bin_0]['sum_hessians'] += all_hessians[i]
histogram[bin_1]['sum_hessians'] += all_hessians[i + 1]
histogram[bin_2]['sum_hessians'] += all_hessians[i + 2]
histogram[bin_3]['sum_hessians'] += all_hessians[i + 3]
histogram[bin_0]['count'] += 1
histogram[bin_1]['count'] += 1
histogram[bin_2]['count'] += 1
histogram[bin_3]['count'] += 1
for i in range(unrolled_upper, n_node_samples):
bin_idx = binned_feature[i]
histogram[bin_idx]['sum_gradients'] += all_gradients[i]
histogram[bin_idx]['sum_hessians'] += all_hessians[i]
histogram[bin_idx]['count'] += 1
return histogram
|
class ActionGroups:
def new(self, name):
pass
def remove(self, action_group):
pass
|
from scipy.spatial import distance as distance_metric
import numpy as np
from fashiondatasets.utils.list import parallel_map
def find_top_k(queries, gallery, most_similar, k=20):
reverse = not most_similar
distances = distance_metric.cdist(queries, gallery, "sqeuclidean")
list_of_idxs = []
for distance in distances:
idx_dist = list(zip(range(len(gallery)), distance))
idx_dist = sorted(idx_dist, key=lambda d: d[1], reverse=reverse)[:k]
most_sim_idxs = list(map(lambda d: d[0], idx_dist))
list_of_idxs.append(most_sim_idxs)
return list_of_idxs
def calculate_most_similar(query, gallery,
embedding_key=None,
id_key=None,
k=101,
most_similar=True,
compare_id_fn=None):
if not embedding_key:
embedding_key = lambda d: d
if not id_key:
id_key = lambda d: d
query_gallery_distances = []
q_emb = embedding_key(query)
query_id = id_key(query)
for gallery_data in gallery:
g_emb = embedding_key(gallery_data)
dist = np.linalg.norm(q_emb - g_emb) # euklidische distanz
_id = id_key(gallery_data)
query_gallery_distances.append((_id, dist))
query_gallery_distances = sorted(query_gallery_distances, key=lambda d: d[1], reverse=not most_similar)
query_gallery_distances_top_k = query_gallery_distances[:k]
if compare_id_fn:
ids_sorted_by_dist = list(map(lambda d: d[0], query_gallery_distances))
hit_distances = filter(lambda g: compare_id_fn(g[0], query_id), query_gallery_distances)
hit_distances = list(hit_distances)
hit_distances = map(lambda g: (g[0], g[1], ids_sorted_by_dist.index(g[0])), hit_distances)
hit_distances = list(hit_distances)
else:
hit_distances = []
return query_id, query_gallery_distances_top_k, hit_distances
if __name__ == "__main__":
query = [
("1", np.array([0, 0, 0]))
]
gallery = [
(str(id_), np.array([id_] * 3)) for id_ in range(10)
]
print("Query")
print(query)
print("Gallery")
print(gallery)
query, retrieved_results, hit_distances = (calculate_most_similar(query[0], gallery, embedding_key=lambda d: d[1], id_key=lambda d: d[0],))
print(hit_distances)
print("-")
print(retrieved_results)
for result in retrieved_results:
print(result)
|
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Hang Zhang
## ECE Department, Rutgers University
## Email: zhang.hang@rutgers.edu
## Copyright (c) 2017
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import io
import os
import glob
import subprocess
from setuptools import setup, find_packages
import torch
from torch.utils.cpp_extension import CUDA_HOME, CppExtension, CUDAExtension
cwd = os.path.dirname(os.path.abspath(__file__))
version = '1.2.2'
try:
if not os.getenv('RELEASE'):
from datetime import date
today = date.today()
day = today.strftime("b%Y%m%d")
version += day
except Exception:
pass
def create_version_file():
global version, cwd
print('-- Building version ' + version)
version_path = os.path.join(cwd, 'encoding', 'version.py')
with open(version_path, 'w') as f:
f.write('"""This is encoding version file."""\n')
f.write("__version__ = '{}'\n".format(version))
requirements = [
'numpy',
'tqdm',
'nose',
'portalocker',
'torch>=1.4.0',
'torchvision>=0.5.0',
'Pillow',
'scipy',
'requests',
]
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
cpu_extensions_dir = os.path.join(this_dir, "encoding", "lib", "cpu")
gpu_extensions_dir = os.path.join(this_dir, "encoding", "lib", "gpu")
source_cpu = glob.glob(os.path.join(cpu_extensions_dir, "*.cpp"))
source_cuda = glob.glob(os.path.join(gpu_extensions_dir, "*.cpp")) + \
glob.glob(os.path.join(gpu_extensions_dir, "*.cu"))
print('c++: ', source_cpu)
print('cuda: ', source_cuda)
sources = source_cpu
extra_compile_args = {"cxx": []}
include_dirs = [cpu_extensions_dir]
ext_modules = [
CppExtension(
"encoding.cpu",
source_cpu,
include_dirs=include_dirs,
extra_compile_args=extra_compile_args,
)
]
if CUDA_HOME is not None:
define_macros = [("WITH_CUDA", None)]
include_dirs += [gpu_extensions_dir]
extra_compile_args["nvcc"] = [
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
]
ext_modules.extend([
CUDAExtension(
"encoding.gpu",
source_cuda,
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
])
return ext_modules
if __name__ == '__main__':
create_version_file()
setup(
name="torch-encoding",
version=version,
author="Hang Zhang",
author_email="zhanghang0704@gmail.com",
url="https://github.com/zhanghang1989/PyTorch-Encoding",
description="PyTorch Encoding Package",
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
license='MIT',
install_requires=requirements,
packages=find_packages(exclude=["tests", "experiments"]),
package_data={ 'encoding': [
'LICENSE',
'lib/cpu/*.h',
'lib/cpu/*.cpp',
'lib/gpu/*.h',
'lib/gpu/*.cpp',
'lib/gpu/*.cu',
]},
ext_modules=get_extensions(),
cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
)
|
from uuid import UUID
def is_valid_uuid(to_test: str, version=4):
try:
UUID(to_test, version=version)
except ValueError:
return False
return True
|
import numpy as np
import pytest
from .. import cellIndToSquareInd
def test_cellIndToSquareInd():
np.testing.assert_array_equal(cellIndToSquareInd(0, 0), [0, 0])
np.testing.assert_array_equal(cellIndToSquareInd(0, 3), [0, 1])
np.testing.assert_array_equal(cellIndToSquareInd(0, 4), [0, 1])
np.testing.assert_array_equal(cellIndToSquareInd(0, 7), [0, 2])
np.testing.assert_array_equal(cellIndToSquareInd(7, 7), [2, 2])
np.testing.assert_array_equal(cellIndToSquareInd(6, 3), [2, 1])
with pytest.raises(ValueError):
cellIndToSquareInd(100, 0)
|
#!/usr/bin/env python
import subprocess
def compressTo7z( branchName, generatedFilenames ):
print( "Compressing to 7z..." )
cmds = ["7z", "a", "../output/build_ogre_scripts-" + branchName + ".7z"]
cmds.extend( generatedFilenames )
retCode = subprocess.call( cmds, cwd='./output' )
if retCode != 0:
print( "Warning: 7z " + cmds + "failed" )
exit()
else:
print( "7z finished" )
def getMercurialBranchName():
print( 'Retrieving Mercurial bookmark name' )
process = subprocess.Popen( ['hg', 'log', '--template', '{bookmarks}\n', '-r', 'bookmark() & .'], stdout=subprocess.PIPE )
(output, err) = process.communicate()
exitCode = process.wait()
if exitCode == 0:
branchName = output.replace( '\n', '' )
return branchName
else:
return None
def getGitBranchName():
print( 'Retrieving git branch name' )
process = subprocess.Popen( ['git', 'rev-parse', '--abbrev-ref', 'HEAD'], stdout=subprocess.PIPE )
(output, err) = process.communicate()
exitCode = process.wait()
if exitCode == 0:
branchName = output.replace( '\n', '' )
return branchName
else:
return None
branchName = getMercurialBranchName()
if branchName == None:
print( 'Mercurial failed. This is likely not a Mercurial repo' )
branchName = getGitBranchName()
if branchName == None:
print( 'Failed to retrieve branch name. Cannot continue.' )
exit( 1 )
generatedFilenames = []
print( 'Branch name is: ' + branchName )
import os
import stat
import errno
try:
os.makedirs( './output' )
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir( './output' ):
pass
else:
raise
# Generate Windows scripts
print( 'Generating scripts for Windows' )
generators = \
[
'Visual Studio 16 2019',
'Visual Studio 15 2017',
'Visual Studio 14 2015',
'Visual Studio 12 2013',
'Visual Studio 11 2012',
'Visual Studio 10 2010',
'Visual Studio 9 2008'
]
platforms = \
[
'Win32',
'x64'
]
file = open( 'build_ogre.bat', 'rt' )
templateStr = file.read()
for generator in generators:
for platform in platforms:
batchScript = templateStr.format( branchName, generator, platform )
generatorUnderscores = generator.replace( ' ', '_' )
filename = 'build_ogre_{0}_{1}.bat'.format( generatorUnderscores, platform )
generatedFilenames.append( filename )
file = open( './output/' + filename, 'wt' )
file.write( batchScript )
file.close()
print( 'Done' )
print( 'Generating scripts for Linux' )
cppVersions = \
[
98,
11,
0
]
file = open( 'build_ogre_linux.sh', 'rt' )
templateStr = file.read()
for cppVersion in cppVersions:
if cppVersion == 0:
cppVersionParam = ''
filename = 'build_ogre_linux_c++latest.sh'.format( cppVersionParam )
else:
cppVersionParam = '-D CMAKE_CXX_STANDARD=' + str( cppVersion )
filename = 'build_ogre_linux_c++{0}.sh'.format( cppVersion )
generatedFilenames.append( filename )
batchScript = templateStr.format( branchName, cppVersionParam )
path = './output/' + filename
file = open( path, 'wt' )
file.write( batchScript )
os.chmod( path, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IWUSR | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH )
file.close()
compressTo7z( branchName, generatedFilenames )
print( 'Done' )
|
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from PIL import Image
from sklearn.model_selection import train_test_split, GroupShuffleSplit, StratifiedKFold, StratifiedGroupKFold
#
# seperate test set might not be the best idea and it is too small
# now we sample a 10% test set of all Datasets and with equal freequency
#
path_data_train = '/home/Behrendt/data/LUMEN/Node21/cxr_images/proccessed_data/metadata.csv'
imgpath = '/proccessed_data/images/'
savePath = '/home/Behrendt/data/LUMEN/Node21/cxr_images/proccessed_data/splits/'
mappingpath = '/home/Behrendt/data/LUMEN/Node21/cxr_images/original_data/filenames_orig_and_new.csv'
mappingpath2 = '/home/Behrendt/data/LUMEN/Node21/cxr_images/original_data/non_nodule_filenames_orig_and_new.csv'
# basepath_plots = r'C:\Users\Finn\Documents\Projects\LUMEN\Dataset investigation\plots\Chexpert/'
df = pd.read_csv(path_data_train)
df['Path'] = imgpath + df.img_name
mapping = pd.read_csv(mappingpath2)
mapping_nodule = pd.read_csv(mappingpath)
# JSRT -- 149 nodules and 93 non nodule images -- we only can get the nodule data...
jrst_h = mapping[mapping.orig_dataset=='jsrt']
jrst_u = mapping_nodule[mapping_nodule.orig_dataset=='jsrt']
test_df = df.loc[df.img_name.str.replace('.mha','').isin(jrst_h.node21_img_id.values)].sample(50,random_state=44)
test_df = test_df.append(df.loc[df.img_name.str.replace('.mha','').isin(jrst_u.node21_img_id.values)].sample(50,random_state=44))
# OpenI 1102/54
openi_h = mapping[mapping.orig_dataset=='openi']
openi_u = mapping_nodule[mapping_nodule.orig_dataset=='openi']
test_df = test_df.append(df.loc[df.img_name.str.replace('.mha','').isin(openi_h.node21_img_id.values)].sample(25,random_state=40))
test_df = test_df.append(df.loc[df.img_name.str.replace('.mha','').isin(openi_u.node21_img_id.values)].sample(25,random_state=40))
# CestXray14 617 / 1187
cxr_h = mapping[mapping.orig_dataset=='chestxray14']
cxr_u = mapping_nodule[mapping_nodule.orig_dataset=='chestxray14']
test_df = test_df.append(df.loc[df.img_name.str.replace('.mha','').isin(cxr_h.node21_img_id.values)].sample(50,random_state=44))
test_df = test_df.append(df.loc[df.img_name.str.replace('.mha','').isin(cxr_u.node21_img_id.values)].sample(50,random_state=44))
# padchest 314 / 1366
pc_h = mapping[mapping.orig_dataset=='padchest']
pc_u = mapping_nodule[mapping_nodule.orig_dataset=='padchest']
test_df = test_df.append(df.loc[df.img_name.str.replace('.mha','').isin(pc_h.node21_img_id.values)].sample(50,random_state=45))
test_df = test_df.append(df.loc[df.img_name.str.replace('.mha','').isin(pc_u.node21_img_id.values)].sample(50,random_state=45))
# sanity check
print(len(test_df.img_name.unique()))
unique_list =test_df.img_name.unique()
train_df = df.loc[~df.img_name.isin(test_df.img_name)]
for i, row in train_df.iterrows():
if row.img_name in unique_list:
print('warning ')
# Split to train/val and Test Data with group awareness and View stratification
cv = StratifiedGroupKFold(n_splits=5,shuffle = True, random_state=42)
# for fold, (train_inds, test_inds) in enumerate(cv.split(X=df, y=df.label, groups=df.img_name)):
# # train_inds, test_inds = next(GroupShuffleSplit(test_size=.20, n_splits=2, random_state = 42).split(df, groups=df['patient']))
# if fold == 0:
# train_df = df.iloc[train_inds]
# test_df = df.iloc[test_inds]
test_df.to_csv(savePath+f'nodule_test_v3.csv')
for fold , (train_inds, test_inds) in enumerate(cv.split(X=train_df, y=train_df.label, groups=train_df.img_name)):
train_df_cv = train_df.iloc[train_inds]
val_df_cv = train_df.iloc[test_inds]
train_df_cv.to_csv(savePath+f'nodule_train_fold{fold}_v3.csv')
val_df_cv.to_csv(savePath+f'nodule_val_fold{fold}_v3.csv')
print(f'Length of Training Set(s): {len(train_df_cv)}')
print(f'Length of Validation Set(s): {len(val_df_cv)}')
print(f'Length of Test Set: {len(test_df)}')
print('done') |
#!/usr/bin/env python3
import pandas as pd
import numpy as np
import array
import uproot3
path_to_dataframe = '/home/rusack/joshib/ECAL_calib/data/raw'
path_to_output = path_to_dataframe+'/skimmed_root/'
def merge_years():
# prepare root file
filename = path_to_output+'ecal_crystal_response.root'
print('opening %s' % filename)
with uproot3.recreate(filename) as rfile:
for year in ['2016', '2017', '2018']:
for ring in [1, 66, 85]:
treename ='year{}_ring_{}' .format(year, ring)
rfile[treename] = uproot3.newtree({'xtal_id':int,
'seq_datetime':np.int_,
'laser_datetime':np.int_,
'lumi_datetime':np.int_,
'start_ts':np.int_,
'stop_ts':np.int_,
'int_deliv_inv_ub':float,
'laser_response':float})
#tree_map[treename] = TTree(treename, treename)
#tmptree = tree_map[treename]
print('Filling %s' % treename)
df_filename = '{}/df_xtals_ieta_{}_{}.csv'.format(path_to_dataframe, ring, year)
xtal_df = pd.read_csv(df_filename)
column_list = xtal_df.keys()
id_label = ''
# add xtal_id column to the dataframe
for col_ in column_list:
if 'Unnamed' in col_ and xtal_df[col_].iloc()[0]!=0: id_label = col_
xtal_df['xtal_id'] = xtal_df[[id_label]]
xtal_list = xtal_df.xtal_id.unique()
skimmed_columns = ['xtal_id', 'iov_idx', 'fill', 'temperature', 't1', 'seq_datetime',
'inst_lumi', 'start_ts', 'stop_ts', 'laser_datetime', 'good',
'calibration', 'int_inst_lumi', 'p1', 'p2', 'p3', 'time',
'ls', 'beamstatus', 'int_deliv_inv_ub']
xtal_df = xtal_df[skimmed_columns]
xtal_df = xtal_df[~np.isnan(xtal_df['calibration'])] # remove nan values
xtal_df = xtal_df[xtal_df['calibration']>0.5] # remove bad measurements
xtal_df = xtal_df[( (xtal_df['good']==1) & (xtal_df['beamstatus']!='ADJUST') & (xtal_df['beamstatus']!='SQUEEZE'))]
xtal_df['inst_lumi'] = xtal_df['inst_lumi'].apply(lambda x: 0 if np.isnan(x) else x) # remove nan values of inst. lumi
xtal_df = xtal_df.drop(columns=['iov_idx', 'fill', 'temperature', 't1', 'inst_lumi', 'good', 'int_inst_lumi', 'p1', 'p2', 'p3', 'ls', 'beamstatus'])
xtal_df.to_csv('{}/{}.csv'.format(path_to_output, treename))
rfile[treename].extend({
'xtal_id': xtal_df['xtal_id'].to_numpy(),
'seq_datetime': pd.to_datetime(xtal_df['seq_datetime']).values,
'laser_datetime': pd.to_datetime(xtal_df['laser_datetime']).values,
'lumi_datetime': pd.to_datetime(xtal_df['time']).values,
'start_ts': xtal_df['start_ts'].to_numpy(),
'stop_ts': xtal_df['stop_ts'].to_numpy(),
'int_deliv_inv_ub': xtal_df['int_deliv_inv_ub'].to_numpy(),
'laser_response': xtal_df['calibration'].to_numpy()
})
print('Saved tree ...')
merge_years()
|
## -------------------------------------------------------- ##
# Trab 1 IA 2019-2
#
# Rafael Belmock Pedruzzi
#
# simulatedAnnealing.py: implements the simulated annealing metaheuristic for the bag problem
#
# Python version: 3.7.4
## -------------------------------------------------------- ##
import bagProblem as bp
import hillClimbing as hc
import priorityQ as pq
import random
import math
from time import time
# Remove and return a random item from the given list:
def take_Random(si):
if si == []:
return []
return si.pop(random.randint(0,len(si)-1))
# Return a neightborhood of the given state
def neightborhood(s, T, OBJs):
neig = []
for i in bp.state_Expansion(s): # add all valid states from the expansion of the given state
if bp.state_Verify(i, T, OBJs):
neig.append(i)
# for i in neig: # adding all valid retractions of each state currently in the neightborhood
# for j in bp.state_Retract(i):
# if bp.state_Verify(j, T, OBJs):
# neig.append(j)
for i in bp.state_Retract(s): # add all valid states from the retraction of the given state
if bp.state_Verify(i, T, OBJs):
neig.append(i)
return neig
# Simulated Anneling:
def sim_Annealing(T, OBJs, execTime, *args):
temp = args[0]
alpha = args[1]
niter = args[2]
s = [0]*len(OBJs)
bs = s # best state found
start = time()
while temp > 1:
if time() - start > execTime:
break
si = neightborhood(s, T, OBJs)
for _ in range(niter):
sn = take_Random(si)
oValue = bp.state_Value(sn, OBJs)
if oValue > bp.state_Value(s, OBJs):
s = sn
si = neightborhood(s, T, OBJs) # updating neightborhood
if oValue > bp.state_Value(bs, OBJs):
bs = sn
else:
p = math.exp((oValue - bp.state_Value(s, OBJs))/temp)
if random.random() < p:
s = sn
si = neightborhood(s, T, OBJs) # updating neightborhood
temp *= alpha
return bs
# T = 19 # bag size
# OBJs = [(1,3), (4,6), (5,7)] # object list (v,t)
# temp = 10 # initial temperature
# alpha = random.random()
# iter = 50 # number of iterations
# print(sim_Annealing(T,OBJs,temp,alpha,iter))
|
#coding:utf-8
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
class LstmEncoderLayer(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, label_nums=None,
batch_first=True, bidirectional=False, dropout=0.0):
""""
Lstm编码器的封装
:params input_size 输入维度
:params hidden_size hidden_state的维度
:params num_layers lstm的层数
:params batch_first 输入的第一个维度是否表示batch大小
:params bidirectional 是否使用双向LSTM
"""
super(LstmEncoderLayer, self).__init__()
if bidirectional:
lstm_size = hidden_size // 2
else:
lstm_size = hidden_size
self.lstm = nn.LSTM(input_size=input_size, hidden_size=lstm_size,
num_layers=num_layers, batch_first=batch_first,
bidirectional=bidirectional)
self.lstm_dropout = nn.Dropout(dropout)
self.label_nums = label_nums
if self.label_nums:
self.hidden2tags = nn.Linear(hidden_size, label_nums)
def forward(self, input, input_seq_lengths, batch_first=True, is_sort=False):
"""
:params input 输入矩阵,按序列长度降序排列
:params input_seq_length 输入序列的长度
:params batch_first 输入矩阵的维度第一维是否是batch大小
"""
if not is_sort:
#对输入tensor按长度序排列
word_seq_lengths, word_perm_idx = input_seq_lengths.sort(0, descending=True)
input = input[word_perm_idx]
packed_words = pack_padded_sequence(input, word_seq_lengths.cpu().numpy(), True)
hidden = None
lstm_out, hidden = self.lstm(packed_words, hidden)
lstm_out, _ = pad_packed_sequence(lstm_out)
lstm_out = lstm_out.transpose(1, 0)
if not is_sort:
_, word_seq_recover = word_perm_idx.sort(0, descending=False)
lstm_out = lstm_out[word_seq_recover]
outputs = self.lstm_dropout(lstm_out) #batch * seq_len * (hidden_dim*directions)
if self.label_nums:
outputs = self.hidden2tags(outputs)
return outputs
|
import os
import json
import copy
import random
import configparser
import time
from pp_definitions import PPdefinitions
from pp_utils import Monitor
from pp_livelistfetcher import LiveListFetcher
class LiveList(object):
def __init__(self,sequence):
self.mon=Monitor()
self.sequence=sequence
self._tracks=[]
self._num_tracks=0
self.last_num_tracks=-1
self.llf = LiveListFetcher() # start livelist fetcher
# ***************************
# Medialist Tracks
# ***************************
# medialist is kept for residual tracks with track references
def open_list(self,filename,profile_version):
"""
opens a saved medialist
medialists are stored as json arrays.
for liveshow medialist should contain only tracks with a track reference e.g. child track and empty track
"""
ifile = open(filename, 'r')
mdict = json.load(ifile)
ifile.close()
self.medialist_tracks = mdict['tracks']
if 'issue' in mdict:
self.medialist_version_string= mdict['issue']
else:
self.medialist_version_string="1.0"
if self.medialist_version()==profile_version:
return True
else:
return False
def medialist_version(self):
vitems=self.medialist_version_string.split('.')
if len(vitems)==2:
# cope with 2 digit version numbers before 1.3.2
return 1000*int(vitems[0])+100*int(vitems[1])
else:
return 1000*int(vitems[0])+100*int(vitems[1])+int(vitems[2])
# lookup index from track_ref
def index_of_track(self,wanted_track):
index = 0
for track in self.medialist_tracks:
if track['track-ref']==wanted_track:
return index
index +=1
return -1
# return dictionary of
def track(self,index):
return self.medialist_tracks[index]
# ***************************
# Livelist Tracks
# ***************************
# the methods mirror medialist methods for anonymous tracks
# get live_track directories from liveshow
def live_tracks(self,dir1,dir2):
self.pp_live_dir1=dir1
self.pp_live_dir2=dir2
# and pass them to the fetcher
self.llf.live_tracks(dir1,dir2)
def length(self):
return self._num_tracks
## def display_length(self):
## self.create_new_livelist()
## self._tracks=copy.deepcopy(self.new_livelist)
## self._num_tracks=len(self._tracks)
## self._selected_track_index=-1
## return self._num_tracks
def anon_length(self):
return self._num_tracks
def next(self,sequence):
if sequence=='ordered':
if self._selected_track_index==self._num_tracks-1:
self._selected_track_index=0
else:
self._selected_track_index +=1
self.select(self._selected_track_index)
return True
else:
cand=random.randint(0,self._num_tracks-1)
# print '\nnext - initial cand',cand
if len(self.played_tracks)==self._num_tracks:
# all played so start again
# stop same track being played twice
if self.played_tracks[-1]==cand:
cand+=1
if cand == self._num_tracks:
cand=0
self.played_tracks=[cand]
self._selected_track_index = cand
# print 'all played',self._selected_track_index
# print self.played_tracks
self.select(self._selected_track_index)
return True
else:
while True:
# print 'trying',cand
if cand not in self.played_tracks:
self.played_tracks.append(cand)
self._selected_track_index = cand
# print 'add to played',self._selected_track_index
# print self.played_tracks
self.select(self._selected_track_index)
return True
else:
cand+=1
if cand == self._num_tracks:
cand=0
# print 'increment candidate to ',cand
def previous(self,sequence):
if sequence=='ordered':
if self._selected_track_index == 0:
self._selected_track_index=self._num_tracks-1
else:
self._selected_track_index -=1
self.select(self._selected_track_index)
return True
else:
cand=random.randint(0,self._num_tracks-1)
# print '\nprevious - initial cand',cand
if len(self.played_tracks)==self._num_tracks:
# all played so start again
# stop same track being played twice
if self.played_tracks[-1]==cand:
cand+=1
if cand == self._num_tracks:
cand=0
self.played_tracks=[cand]
self._selected_track_index = cand
# print 'all played',self._selected_track_index
# print self.played_tracks
self.select(self._selected_track_index)
return True
else:
while True:
# print 'trying',cand
if cand not in self.played_tracks:
self.played_tracks.append(cand)
self._selected_track_index = cand
# print 'add to played',self._selected_track_index
# print self.played_tracks
self.select(self._selected_track_index)
return True
else:
cand+=1
if cand == self._num_tracks:
cand=0
# print 'increment candidate to ',cand
def start(self):
if self._num_tracks==0:
return False
else:
self._selected_track_index=-1
self.played_tracks=[]
self.next(self.sequence)
return True
def finish(self):
if self._num_tracks==0:
return False
else:
self._selected_track_index=self._num_tracks-1
self.played_tracks=[]
self.next(self.sequence)
return True
def at_start(self):
if self._selected_track_index==0:
return True
else:
return False
def at_end(self):
if self._selected_track_index==self._num_tracks-1:
return True
else:
return False
def select(self,index):
"""does housekeeping necessary when a track is selected"""
if self._num_tracks>0 and index>=0 and index< self._num_tracks:
self._selected_track_index=index
self._selected_track = self._tracks[index]
return True
else:
return False
def selected_track(self):
"""returns a dictionary containing all fields in the selected track """
return self._selected_track
# ***************************
# Constructing NEW Livelist
# ***************************
def livelist_changed(self):
if self.new_livelist != self._tracks:
return True
else:
return False
def new_length(self):
return len(self.new_livelist)
def use_new_livelist(self):
self.last_num_tracks=self._num_tracks
# will have only anonymous tracks
self._tracks=copy.deepcopy(self.new_livelist)
self._num_tracks=len(self._tracks)
self._selected_track_index=-1
return True
def create_new_livelist(self):
# fetch new livelist if available
self.llf.fetch_livelist()
self.new_livelist=[]
if os.path.exists(self.pp_live_dir1):
for track_file in os.listdir(self.pp_live_dir1):
track_file = self.pp_live_dir1 + os.sep + track_file
(root_name,leaf)=os.path.split(track_file)
if leaf[0] == '.':
continue
else:
(root_file,ext_file)= os.path.splitext(track_file)
if (ext_file.lower() in PPdefinitions.IMAGE_FILES+PPdefinitions.VIDEO_FILES+PPdefinitions.AUDIO_FILES+PPdefinitions.WEB_FILES) or (ext_file.lower()=='.cfg'):
self.livelist_add_track(track_file)
if os.path.exists(self.pp_live_dir2):
for track_file in os.listdir(self.pp_live_dir2):
track_file = self.pp_live_dir2 + os.sep + track_file
(root_name,leaf)=os.path.split(track_file)
if leaf[0] == '.':
continue
else:
(root_file,ext_file)= os.path.splitext(track_file)
if (ext_file.lower() in PPdefinitions.IMAGE_FILES+PPdefinitions.VIDEO_FILES+PPdefinitions.AUDIO_FILES+PPdefinitions.WEB_FILES) or (ext_file.lower()=='.cfg'):
self.livelist_add_track(track_file)
self.new_livelist= sorted(self.new_livelist, key= lambda track: os.path.basename(track['location']).lower())
# self.print_livelist()
def print_livelist(self):
print('LIVELIST')
for it in self.new_livelist:
print('type: ', it['type'], 'loc: ',it['location'],'\nplugin cfg: ', it['plugin'])
print('')
def livelist_add_track(self,afile):
(root,title)=os.path.split(afile)
(root_plus,ext)= os.path.splitext(afile)
if ext.lower() in PPdefinitions.IMAGE_FILES:
self.livelist_new_track(PPdefinitions.new_tracks['image'],{'title':title,'track-ref':'','location':afile})
if ext.lower() in PPdefinitions.VIDEO_FILES:
self.livelist_new_track(PPdefinitions.new_tracks['vlc'],{'title':title,'track-ref':'','location':afile})
if ext.lower() in PPdefinitions.AUDIO_FILES:
self.livelist_new_track(PPdefinitions.new_tracks['audio'],{'title':title,'track-ref':'','location':afile})
if ext.lower() in PPdefinitions.WEB_FILES:
self.livelist_new_track(PPdefinitions.new_tracks['chrome'],{'title':title,'track-ref':'','location':afile})
if ext.lower()=='.cfg':
self.livelist_new_plugin(afile,title)
def livelist_new_plugin(self,plugin_cfg,title):
# read the file which is a plugin cfg file into a dictionary
self.plugin_config = configparser.ConfigParser(inline_comment_prefixes = (';',))
self.plugin_config.read(plugin_cfg)
self.plugin_params = dict(self.plugin_config.items('plugin'))
# create a new livelist entry of a type specified in the config file with plugin
# miss entry if type is not config file
if 'type' in self.plugin_params:
self.livelist_new_track(PPdefinitions.new_tracks[self.plugin_params['type']],{'title':title,'track-ref':'','plugin':plugin_cfg,'location':plugin_cfg})
def livelist_new_track(self,fields,values):
new_track=fields
self.new_livelist.append(copy.deepcopy(new_track))
last = len(self.new_livelist)-1
self.new_livelist[last].update(values)
|
import json
import math
from typing import List
events_lines = []
with open("events.txt","r") as f:
events_lines = f.readlines()
events_string = "".join(events_lines)
events_raw = events_string.split("\n\n")
class Event:
name: str
time_sort: int
time: str
themes: List[str]
coord: str
cfw: str
amsco: str
overview: str
causes: List[str]
effects: List[str]
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
def to_string(self):
return str([
self.name,
self.time_sort,
self.time,
self.themes,
self.coord,
self.cfw,
self.amsco,
self.overview,
self.causes,
self.effects
])
events: List[Event] = []
for event_raw in events_raw:
events.append(Event())
event_split = event_raw.split("\n")
events[-1].name = event_split[0]
events[-1].time = event_split[1]
if "," in events[-1].time:
events[-1].time_sort = int(events[-1].time.split(",")[0])
elif "-" in events[-1].time:
events[-1].time_sort = math.floor((int(events[-1].time.split("-"))+int(events[-1].time.split("-")))/2)
elif "s" in events[-1].time:
events[-1].time_sort = int(events[-1].time[0:-1])
else:
events[-1].time_sort = int(events[-1].time)
events[-1].themes = event_split[2].split("|")
events[-1].coord = event_split[3]
events[-1].cfw = event_split[4]
events[-1].amsco = event_split[5]
events[-1].overview = event_split[6].replace("{{","<b style=\"color: purple;\">").replace("}}","</b>")
events[-1].causes = event_split[7].split("|")
events[-1].effects = event_split[8].split("|")
events_out = []
for event in events:
events_out.append(event.toJSON())
events_data_finished = f"\"events\":[{','.join(events_out)}],"
trend_lines = []
with open("trends.txt","r") as f:
trend_lines = f.readlines()
trends_string = "".join(trend_lines)
trends_raw = trends_string.split("\n\n")
class Trend:
name: str
time: str
time_sort: int
overview: str
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
trends: List[Trend] = []
for trend_raw in trends_raw:
trend_split = trend_raw.split("\n")
t = Trend()
t.name = trend_split[0]
t.time = trend_split[1]
if "," in t.time:
t.time_sort = int(t.time.split(",")[0])
elif "-" in t.time:
t.time_sort = math.floor((int(t.time.split("-")[0])+int(t.time.split("-")[1]))/2)
elif "s" in t.time:
t.time_sort = int(t.time[0:-1])
else:
t.time_sort = int(t.time)
t.overview = trend_split[2]
trends.append(t)
trends_out = []
for trend in trends:
trends_out.append(trend.toJSON())
trends_data_finished = f"\"trends\":[{','.join(trends_out)}]"
out_json = "{"+events_data_finished+trends_data_finished+"}"
with open("information.json","w") as f:
f.write(out_json) |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.5.0-beta.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.apis.batch_v2alpha1_api import BatchV2alpha1Api
class TestBatchV2alpha1Api(unittest.TestCase):
""" BatchV2alpha1Api unit test stubs """
def setUp(self):
self.api = kubernetes.client.apis.batch_v2alpha1_api.BatchV2alpha1Api()
def tearDown(self):
pass
def test_create_namespaced_cron_job(self):
"""
Test case for create_namespaced_cron_job
"""
pass
def test_create_namespaced_job(self):
"""
Test case for create_namespaced_job
"""
pass
def test_create_namespaced_scheduled_job(self):
"""
Test case for create_namespaced_scheduled_job
"""
pass
def test_delete_collection_namespaced_cron_job(self):
"""
Test case for delete_collection_namespaced_cron_job
"""
pass
def test_delete_collection_namespaced_job(self):
"""
Test case for delete_collection_namespaced_job
"""
pass
def test_delete_collection_namespaced_scheduled_job(self):
"""
Test case for delete_collection_namespaced_scheduled_job
"""
pass
def test_delete_namespaced_cron_job(self):
"""
Test case for delete_namespaced_cron_job
"""
pass
def test_delete_namespaced_job(self):
"""
Test case for delete_namespaced_job
"""
pass
def test_delete_namespaced_scheduled_job(self):
"""
Test case for delete_namespaced_scheduled_job
"""
pass
def test_get_api_resources(self):
"""
Test case for get_api_resources
"""
pass
def test_list_cron_job_for_all_namespaces(self):
"""
Test case for list_cron_job_for_all_namespaces
"""
pass
def test_list_job_for_all_namespaces(self):
"""
Test case for list_job_for_all_namespaces
"""
pass
def test_list_namespaced_cron_job(self):
"""
Test case for list_namespaced_cron_job
"""
pass
def test_list_namespaced_job(self):
"""
Test case for list_namespaced_job
"""
pass
def test_list_namespaced_scheduled_job(self):
"""
Test case for list_namespaced_scheduled_job
"""
pass
def test_list_scheduled_job_for_all_namespaces(self):
"""
Test case for list_scheduled_job_for_all_namespaces
"""
pass
def test_patch_namespaced_cron_job(self):
"""
Test case for patch_namespaced_cron_job
"""
pass
def test_patch_namespaced_cron_job_status(self):
"""
Test case for patch_namespaced_cron_job_status
"""
pass
def test_patch_namespaced_job(self):
"""
Test case for patch_namespaced_job
"""
pass
def test_patch_namespaced_job_status(self):
"""
Test case for patch_namespaced_job_status
"""
pass
def test_patch_namespaced_scheduled_job(self):
"""
Test case for patch_namespaced_scheduled_job
"""
pass
def test_patch_namespaced_scheduled_job_status(self):
"""
Test case for patch_namespaced_scheduled_job_status
"""
pass
def test_read_namespaced_cron_job(self):
"""
Test case for read_namespaced_cron_job
"""
pass
def test_read_namespaced_cron_job_status(self):
"""
Test case for read_namespaced_cron_job_status
"""
pass
def test_read_namespaced_job(self):
"""
Test case for read_namespaced_job
"""
pass
def test_read_namespaced_job_status(self):
"""
Test case for read_namespaced_job_status
"""
pass
def test_read_namespaced_scheduled_job(self):
"""
Test case for read_namespaced_scheduled_job
"""
pass
def test_read_namespaced_scheduled_job_status(self):
"""
Test case for read_namespaced_scheduled_job_status
"""
pass
def test_replace_namespaced_cron_job(self):
"""
Test case for replace_namespaced_cron_job
"""
pass
def test_replace_namespaced_cron_job_status(self):
"""
Test case for replace_namespaced_cron_job_status
"""
pass
def test_replace_namespaced_job(self):
"""
Test case for replace_namespaced_job
"""
pass
def test_replace_namespaced_job_status(self):
"""
Test case for replace_namespaced_job_status
"""
pass
def test_replace_namespaced_scheduled_job(self):
"""
Test case for replace_namespaced_scheduled_job
"""
pass
def test_replace_namespaced_scheduled_job_status(self):
"""
Test case for replace_namespaced_scheduled_job_status
"""
pass
if __name__ == '__main__':
unittest.main()
|
#! /usr/bin/env python
import rospy
import rosbag
import csv
import xlsxwriter
file_location = "/home/roboticslab/AlphaGraspData/"
grasp = None
def open_csv_writer(graspl):
global grasp
grasp = graspl
with open(file_location + "GraspInfo_Grasp" + str(grasp) + ".csv", "wb") as cf:
f = csv.writer(cf)
f.writerow(["Grasp_Trial", "Success","Image_1","Reset_okay","Image_2","Image_3","Image_4", "Video"])
def csv_appender( trial, img1, img2, img3, img4, vid):
global grasp
with open( file_location + "GraspInfo_Grasp" + str(grasp) + ".csv", "ab") as cf:
f = csv.writer(cf)
f.writerow([trial, "", img1,"", img2, img3, img4, vid])
def xls_open_n_write(grasp, dic):
#make the excel sheet and get the set up ready for information to come.
workbook = xlsxwriter.Workbook(file_location + "GraspInfo_Grasp" + str(grasp) + ".xlsx")
worksheet = workbook.add_worksheet()
worksheet.set_column('A:A', 10)
worksheet.set_column('B:B', 10)
worksheet.set_column('C:C', 10)
worksheet.write('A1', "Grasp_Trials")
worksheet.write('B1', "Success")
worksheet.write('C1', "Reset Okay")
worksheet.write('D1', "Image 0")
worksheet.write('E1', "Image 1")
worksheet.write('F1', "Image 2")
worksheet.write('G1', "Image 3")
worksheet.set_column('D:D', 30)
worksheet.set_column('E:E', 30)
worksheet.set_column('F:F', 30)
worksheet.set_column('G:G', 30)
row = 2
count = 0
while count < len(dic):
worksheet.write('A'+str(row), "grasp " + str(count))
worksheet.set_row(row-1,115)
worksheet.insert_image('D' + str(row), str(dic[str(count)][0]), {'x_scale': 0.3, 'y_scale': 0.3})
worksheet.insert_image('E' + str(row), str(dic[str(count)][1]), {'x_scale': 0.3, 'y_scale': 0.3})
worksheet.insert_image('F' + str(row), str(dic[str(count)][2]), {'x_scale': 0.3, 'y_scale': 0.3})
#worksheet.insert_image('G' + str(row), str(dic[str(count)][3]), {'x_scale': 0.3, 'y_scale': 0.3})
row += 1
count += 1
workbook.close()
|
# coding: utf-8
import logging
from django.db.models import Q
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from tastypie.resources import ModelResource
from tastypie import fields
from tastypie.contrib.contenttypes.fields import GenericForeignKeyField
from tastypie.authentication import ApiKeyAuthentication
from tastypie.authorization import DjangoAuthorization
from tastypie.authorization import Authorization
from tastypie.exceptions import BadRequest
from journalmanager.models import (
Journal,
UseLicense,
Sponsor,
Collection,
Issue,
Section,
RegularPressRelease,
AheadPressRelease,
PressReleaseTranslation,
PressReleaseArticle,
SubjectCategory,
)
from scielomanager.utils import usercontext
logger = logging.getLogger(__name__)
def current_user_active_collection():
return usercontext.get_finder().get_current_user_active_collection()
def current_user_collections():
return usercontext.get_finder().get_current_user_collections()
class ApiKeyAuthMeta:
authentication = ApiKeyAuthentication()
authorization = DjangoAuthorization()
class SectionResource(ModelResource):
journal = fields.ForeignKey('api.resources_v1.JournalResource',
'journal')
issues = fields.OneToManyField('api.resources_v1.IssueResource',
'issue_set')
titles = fields.CharField(readonly=True)
class Meta(ApiKeyAuthMeta):
queryset = Section.objects.all()
resource_name = 'sections'
allowed_methods = ['get']
excludes = ['legacy_code']
filtering = {
"journal": ('exact'),
}
def dehydrate_titles(self, bundle):
return [(title.language.iso_code, title.title)
for title in bundle.obj.titles.all()]
class UseLicenseResource(ModelResource):
class Meta(ApiKeyAuthMeta):
queryset = UseLicense.objects.all()
resource_name = 'uselicenses'
allowed_methods = ['get', ]
class IssueResource(ModelResource):
"""
IMPORTANT: is_press_release was removed on V2
"""
journal = fields.ForeignKey('api.resources_v1.JournalResource',
'journal')
sections = fields.ManyToManyField(SectionResource, 'section')
thematic_titles = fields.CharField(readonly=True)
is_press_release = fields.BooleanField(readonly=True)
suppl_volume = fields.CharField(attribute='volume', readonly=True)
suppl_number = fields.CharField(attribute='number', readonly=True)
use_license = fields.ForeignKey(UseLicenseResource, 'use_license', full=True, null=True)
class Meta(ApiKeyAuthMeta):
queryset = Issue.objects.all()
resource_name = 'issues'
allowed_methods = ['get', ]
filtering = {
"journal": ('exact'),
"is_marked_up": ('exact'),
"volume": ('exact'),
"number": ('exact'),
"publication_year": ('exact'),
"suppl_number": ('exact'),
"suppl_volume": ('exact')
}
def build_filters(self, filters=None):
"""
Custom filter that retrieves data by the collection's name_slug.
"""
if filters is None:
filters = {}
orm_filters = super(IssueResource, self).build_filters(filters)
param_filters = {}
if 'collection' in filters:
param_filters['journal__collections__name_slug'] = filters['collection']
if 'eletronic_issn' in filters:
param_filters['journal__eletronic_issn'] = filters['eletronic_issn']
if 'print_issn' in filters:
param_filters['journal__print_issn'] = filters['print_issn']
if 'suppl_number' in filters:
param_filters['type'] = 'supplement'
param_filters['number'] = filters['suppl_number']
if 'suppl_volume' in filters:
param_filters['type'] = 'supplement'
param_filters['number'] = ''
param_filters['volume'] = filters['suppl_volume']
issues = Issue.objects.filter(**param_filters)
orm_filters['pk__in'] = issues
return orm_filters
def dehydrate_thematic_titles(self, bundle):
return dict([title.language.iso_code, title.title]
for title in bundle.obj.issuetitle_set.all())
def dehydrate_is_press_release(self, bundle):
return False
def dehydrate_suppl_volume(self, bundle):
if bundle.obj.type == 'supplement':
return bundle.obj.suppl_text if bundle.obj.volume else ''
else:
return ''
def dehydrate_suppl_number(self, bundle):
if bundle.obj.type == 'supplement':
return bundle.obj.suppl_text if bundle.obj.number else ''
else:
return ''
class CollectionResource(ModelResource):
class Meta(ApiKeyAuthMeta):
queryset = Collection.objects.all()
resource_name = 'collections'
allowed_methods = ['get', ]
class SubjectCategoryResource(ModelResource):
class Meta(ApiKeyAuthMeta):
queryset = SubjectCategory.objects.all()
resource_name = 'subjectcategory'
allowed_methods = ['get', ]
class SponsorResource(ModelResource):
class Meta(ApiKeyAuthMeta):
queryset = Sponsor.objects.all()
resource_name = 'sponsors'
allowed_methods = ['get', ]
class UserResource(ModelResource):
class Meta(ApiKeyAuthMeta):
queryset = User.objects.all()
resource_name = 'users'
allowed_methods = ['get', ]
excludes = [
'email',
'password',
'is_active',
'is_staff',
'is_superuser',
]
class JournalResource(ModelResource):
missions = fields.CharField(readonly=True)
other_titles = fields.CharField(readonly=True)
creator = fields.ForeignKey(UserResource, 'creator')
abstract_keyword_languages = fields.CharField(readonly=True)
languages = fields.CharField(readonly=True)
use_license = fields.ForeignKey(UseLicenseResource, 'use_license', full=True)
sponsors = fields.ManyToManyField(SponsorResource, 'sponsor')
collections = fields.ManyToManyField(CollectionResource, 'collections')
issues = fields.OneToManyField(IssueResource, 'issue_set')
sections = fields.OneToManyField(SectionResource, 'section_set')
subject_categories = fields.ManyToManyField(SubjectCategoryResource, 'subject_categories', readonly=True)
pub_status_history = fields.ListField(readonly=True)
contact = fields.DictField(readonly=True)
study_areas = fields.ListField(readonly=True)
pub_status = fields.CharField(readonly=True)
pub_status_reason = fields.CharField(readonly=True)
national_code = fields.CharField(attribute='ccn_code', readonly=True)
# recursive field
previous_title = fields.ForeignKey('self', 'previous_title', null=True)
succeeding_title = fields.ForeignKey('self', 'succeeding_title', null=True)
class Meta(ApiKeyAuthMeta):
queryset = Journal.objects.all().filter()
resource_name = 'journals'
allowed_methods = ['get', ]
filtering = {
'is_trashed': ('exact',),
'eletronic_issn': ('exact',),
'print_issn': ('exact',),
}
def build_filters(self, filters=None):
"""
Custom filter that retrieves data by the collection's name_slug.
"""
if filters is None:
filters = {}
orm_filters = super(JournalResource, self).build_filters(filters)
if 'collection' in filters:
journals = Journal.objects.filter(
collections__name_slug=filters['collection'])
orm_filters['pk__in'] = journals
if 'pubstatus' in filters:
# keep the previous filtering
try:
j = orm_filters['pk__in']
except KeyError:
j = Journal.objects
statuses = filters.getlist('pubstatus')
journals = j.filter(
membership__status__in=statuses)
orm_filters['pk__in'] = journals
return orm_filters
def dehydrate_missions(self, bundle):
"""
IMPORTANT: Changed to dict on V2
missions: {
en: "To publish articles of clinical and experimental...",
es: "Publicar artÃculos de estudios clÃnicos y experim...",
pt: "Publicar artigos de estudos clÃnicos e experiment..."
},
"""
return [(mission.language.iso_code, mission.description)
for mission in bundle.obj.missions.all()]
def dehydrate_other_titles(self, bundle):
"""
IMPORTANT: Changed to dict on V2
other_titles: {
other: "Arquivos Brasileiros de Cirurgia Digestiva",
paralleltitle: "Brazilian Archives of Digestive Surgery"
},
"""
return [(title.category, title.title)
for title in bundle.obj.other_titles.all()]
def dehydrate_languages(self, bundle):
return [language.iso_code
for language in bundle.obj.languages.all()]
def dehydrate_subject_categories(self, bundle):
return [subject_category.term
for subject_category in bundle.obj.subject_categories.all()]
def dehydrate_pub_status_history(self, bundle):
return [{'date': event.since,
'status': event.status}
for event in bundle.obj.statuses.order_by('-since').all()]
def dehydrate_study_areas(self, bundle):
return [area.study_area
for area in bundle.obj.study_areas.all()]
def dehydrate_collections(self, bundle):
"""
Only works with v1, without multiple collections per journal.
IMPORTANT: This prepare function was removed from V2
"""
try:
return bundle.data['collections'][0]
except IndexError:
return ''
def dehydrate_pub_status(self, bundle):
"""
The version v1 of API doesnt work with multiple collections.
To get the information about status of journal is mandatory the collection
context, so we get this in the query string.
IMPORTANT: the param ``collection`` is mandatory.
"""
try:
col = bundle.obj.collections.get()
except MultipleObjectsReturned:
# Get collection by query string
query_collection = bundle.request.GET.get('collection')
if query_collection:
col = bundle.obj.collections.get(name_slug=query_collection)
else:
raise BadRequest("missing collection param")
return bundle.obj.membership_info(col, 'status')
def dehydrate_pub_status_reason(self, bundle):
"""
The version v1 of API doesnt work with multiple collections.
To get the information about status of journal is mandatory the collection
context, so we get this in the query string.
IMPORTANT: the param ``collection`` is mandatory.
"""
try:
col = bundle.obj.collections.get()
except MultipleObjectsReturned:
# Get collection by query string
query_collection = bundle.request.GET.get('collection')
if query_collection:
col = bundle.obj.collections.get(name_slug=query_collection)
else:
raise BadRequest("missing collection param")
return bundle.obj.membership_info(col, 'reason')
def dehydrate(self, bundle):
# garantia de compatibilidade
bundle.data.pop('ccn_code', False)
return bundle
class PressReleaseTranslationResource(ModelResource):
language = fields.CharField(readonly=True)
class Meta(ApiKeyAuthMeta):
resource_name = 'prtranslations'
queryset = PressReleaseTranslation.objects.all()
allowed_methods = ['get', ]
def dehydrate_language(self, bundle):
return bundle.obj.language.iso_code
class PressReleaseResource(ModelResource):
issue_uri = fields.ForeignKey(IssueResource, 'issue')
translations = fields.OneToManyField(PressReleaseTranslationResource,
'translations',
full=True)
articles = fields.CharField(readonly=True)
issue_meta = fields.CharField(readonly=True)
class Meta(ApiKeyAuthMeta):
resource_name = 'pressreleases'
queryset = RegularPressRelease.objects.all()
allowed_methods = ['get', ]
ordering = ['id']
def build_filters(self, filters=None):
"""
Custom filter that retrieves data by the article PID.
"""
if filters is None:
filters = {}
orm_filters = super(PressReleaseResource, self).build_filters(filters)
if 'article_pid' in filters:
preleases = RegularPressRelease.objects.filter(
articles__article_pid=filters['article_pid'])
orm_filters['pk__in'] = preleases
elif 'journal_pid' in filters:
preleases = RegularPressRelease.objects.by_journal_pid(
filters['journal_pid'])
orm_filters['pk__in'] = preleases
elif 'issue_pid' in filters:
preleases = RegularPressRelease.objects.by_issue_pid(
filters['issue_pid'])
orm_filters['pk__in'] = preleases
return orm_filters
def dehydrate_articles(self, bundle):
return [art.article_pid for art in bundle.obj.articles.all()]
def dehydrate_issue_meta(self, bundle):
issue = bundle.obj.issue
meta_data = {
'scielo_pid': issue.scielo_pid,
'short_title': issue.journal.short_title,
'volume': issue.volume,
'number': issue.number,
'suppl_volume': issue.suppl_text if issue.type == 'supplement' and issue.volume else '',
'suppl_number': issue.suppl_text if issue.type == 'supplement' and issue.number else '',
'publication_start_month': issue.publication_start_month,
'publication_end_month': issue.publication_end_month,
'publication_city': issue.journal.publication_city,
'publication_year': issue.publication_year,
}
return meta_data
class AheadPressReleaseResource(ModelResource):
journal_uri = fields.ForeignKey(JournalResource, 'journal')
translations = fields.OneToManyField(PressReleaseTranslationResource,
'translations',
full=True)
articles = fields.CharField(readonly=True)
class Meta(ApiKeyAuthMeta):
resource_name = 'apressreleases'
queryset = AheadPressRelease.objects.all()
allowed_methods = ['get', ]
def dehydrate_articles(self, bundle):
return [art.article_pid for art in bundle.obj.articles.all()]
def build_filters(self, filters=None):
"""
Custom filter that retrieves data by the article PID.
"""
if filters is None:
filters = {}
orm_filters = super(AheadPressReleaseResource, self).build_filters(filters)
if 'article_pid' in filters:
preleases = AheadPressRelease.objects.filter(
articles__article_pid=filters['article_pid'])
orm_filters['pk__in'] = preleases
elif 'journal_pid' in filters:
preleases = AheadPressRelease.objects.by_journal_pid(
filters['journal_pid'])
orm_filters['pk__in'] = preleases
return orm_filters
|
from collections import deque
from lib import puzzle
def play_sound(reg, args):
if args[0] in reg:
reg['freq'] = reg[args[0]]
else:
reg['freq'] = int(args[0])
return 1
def set_(reg, args):
if args[1] in reg:
reg[args[0]] = reg[args[1]]
else:
reg[args[0]] = int(args[1])
return 1
def add(reg, args):
if args[1] in reg:
reg[args[0]] += reg[args[1]]
else:
reg[args[0]] += int(args[1])
return 1
def mul(reg, args):
if args[1] in reg:
reg[args[0]] *= reg[args[1]]
else:
reg[args[0]] *= int(args[1])
return 1
def mod(reg, args):
if args[1] in reg:
reg[args[0]] %= reg[args[1]]
else:
reg[args[0]] %= int(args[1])
return 1
def rcv(reg, args):
if reg['freq'] == 0:
return 1
reg[args[0]] = reg['freq']
return 1
def jgz(reg, args):
if args[1] in reg:
jump = reg[args[1]]
else:
jump = int(args[1])
if args[0] in reg:
value = reg[args[0]]
else:
value = int(args[0])
if value > 0:
return jump
return 1
instructions = {
'snd': play_sound,
'set': set_,
'add': add,
'mul': mul,
'mod': mod,
'rcv': rcv,
'jgz': jgz,
}
def run_process(reg, program, instructions):
if not (-1 < reg['next_instruction'] < len(program)):
print(f"instruction out of range {reg['next_instruction']} {len(program)}")
reg['terminated'] = True
return 0
if reg['terminated']:
print('process terminated')
return 0
old_i = reg['next_instruction']
next_instruction = program[old_i]
reg['next_instruction'] += instructions[next_instruction[0]](reg, next_instruction[1:])
def new_snd(reg, args):
if args[0] in reg:
value = reg[args[0]]
else:
value = int(args[0])
reg['snd_count'] += 1
reg['snd_queue'].append(value)
return 1
def new_rcv(reg, args):
if len(reg['rcv_queue']):
reg[args[0]] = reg['rcv_queue'].popleft()
reg['blocked'] = 0
return 1
else:
reg['blocked'] += 1
return 0
class Day18(puzzle.Puzzle):
year = '2017'
day = '18'
def get_data(self):
data = self.input_data
return [tuple(y.split(' ')) for y in data.splitlines()]
def part1(self):
data = self.get_data()
registers = {chr(a): 0 for a in range(ord('a'), ord('z') + 1)}
registers['freq'] = 0
last_recovered_frequency = 0
i = 0
while -1 < i < len(data):
old_i = i
instruction = instructions[data[i][0]]
i += instruction(registers, data[i][1:])
if data[old_i][0] == 'rcv':
if registers[data[old_i][1]] != 0:
last_recovered_frequency = registers[data[old_i][1]]
break
return last_recovered_frequency
def part2(self):
data = self.get_data()
registers0 = {chr(a): 0 for a in range(ord('a'), ord('z') + 1)}
registers1 = {chr(a): 0 for a in range(ord('a'), ord('z') + 1)}
registers0['p'] = 0
registers1['p'] = 1
registers0['next_instruction'] = 0
registers1['next_instruction'] = 0
registers0['blocked'] = 0
registers1['blocked'] = 0
registers0['terminated'] = False
registers1['terminated'] = False
registers0['snd_queue'] = deque()
registers1['snd_queue'] = deque()
registers0['rcv_queue'] = registers1['snd_queue']
registers1['rcv_queue'] = registers0['snd_queue']
registers0['snd_count'] = 0
registers1['snd_count'] = 0
instructions['snd'] = new_snd
instructions['rcv'] = new_rcv
while not registers0['terminated'] and not registers1['terminated']:
if registers0['blocked'] > 2 and registers1['blocked'] > 2:
break
run_process(registers0, data, instructions)
run_process(registers1, data, instructions)
return registers1['snd_count']
def run(self):
print(f'Part 1 Answer: {self.part1()}')
print(f'Part 2 Answer: {self.part2()}')
|
"""
Django settings for app project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from os.path import abspath, basename, dirname, join, normpath
from sys import path
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'e_6=xn9gl3t&$mrt+dmurcn%f#1_jr6*#f@snzji4fjl547g$$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
########## PATH CONFIGURATION
# Absolute filesystem path to the Django project directory:
DJANGO_ROOT = dirname(dirname(abspath(__file__)))
# Absolute filesystem path to the top-level project folder:
SITE_ROOT = dirname(DJANGO_ROOT)
# Site name:
SITE_NAME = basename(DJANGO_ROOT)
# Add our project to our pythonpath, this way we don't need to type our project
# name in our dotted import paths:
path.append(DJANGO_ROOT)
########## END PATH CONFIGURATION
########## TEMPLATE CONFIGURATION
# See: docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_DIRS = (
normpath(join(SITE_ROOT, 'app', 'templates')),
)
########## END TEMPLATE CONFIGURATION
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'app.urls'
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_ROOT = normpath(join(SITE_ROOT, 'assets'))
STATIC_URL = '/static/'
STATICFILES_DIRS = (
normpath(join(SITE_ROOT, 'app/static')),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
|
from setuptools import setup
setup(name='DatasetCreator',
version='0.1',
description='Image Dataset Creator',
packages=['DatasetCreator'],
zip_safe=False)
|
#!/usr/bin/env python
import os
import sys
import requests
import json
import defusedxml.ElementTree as ET
import configparser
import logging
import logging.handlers
import argparse
import ldap
import re
# set up logging
LOGGER = logging.getLogger("AMS User create script")
ACCEPTED_RDNS = [
"emailAddress", "CN", "OU", "O", "postalCode", "street", "L", "ST", "C", "DC"
]
class RdnSequence(object):
def __init__(self, rdn_string):
self.EmailAddress = []
self.CommonName = []
self.OrganizationalUnit = []
self.Organization = []
self.PostalCode = []
self.Street = []
self.Locality = []
self.Province = []
self.Country = []
self.DomainComponent = []
self._parse_dn_string_ldap_util(rdn_string)
@staticmethod
def _rdn_to_type_and_value(rdn_string):
"""
Processes an rdn and returns its type and value
"""
if "=" not in rdn_string:
raise ValueError("Invalid rdn: " + str(rdn_string))
type_and_value = rdn_string.split("=")
rdn_type = type_and_value[0]
rdn_value = type_and_value[1]
if rdn_type not in ACCEPTED_RDNS:
raise ValueError("Not accepted rdn : " + str(rdn_type))
return rdn_type, rdn_value
def _assign_rdn_to_field(self, rdn_type, rdn_value):
"""
Assign an RDN value to the correct field based on its type
"""
if rdn_type == "emailAddress":
self.EmailAddress.append(rdn_value)
elif rdn_type == "CN":
self.CommonName.append(rdn_value)
elif rdn_type == "OU":
self.OrganizationalUnit.append(rdn_value)
elif rdn_type == "O":
self.Organization.append(rdn_value)
elif rdn_type == "postalCode":
self.PostalCode.append(rdn_value)
elif rdn_type == "street":
self.Street.append(rdn_value)
elif rdn_type == "L":
self.Locality.append(rdn_value)
elif rdn_type == "ST":
self.Province.append(rdn_value)
elif rdn_type == "C":
self.Country.append(rdn_value)
elif rdn_type == "DC":
self.DomainComponent.append(rdn_value)
@staticmethod
def _escape_cn_rdn_string(dn_string):
"""
Method that checks and escapes the provided DN needs in case it needs support for the CN rdn pattern of
{service}/{fqdn}.
CN=service/example.com should be escaped to CN=service\/example.com otherwise the ldap parser will fail.
We use 2 regex that match CNs with the aforementioned pattern.
1.
Given the DN /C=PL/O=GRID/O=OU/CN=service/example.com/emailAddress=test@gmail.com
We want to transform it to:
/C=PL/O=GRID/O=OU/CN=service\/example.com/emailAddress=test@gmail.com
The regex splits the DN into 3 matching groups and re-assembles the DN
by replacing g2(which contains the {service} with {service}\ while
g1 and g3 are preserved as is.
regex: r"(.*CN=)([^/]+)(/[^/]+/emailAddress=.*$)"
replace pattern: r"\g<1>\g<2>\\\g<3>"
2.
Given the DN /C=PL/O=GRID/O=OU/CN=service/example.com
We want to transform it to:
/C=PL/O=GRID/O=OU/CN=service\/example.com
The regex splits the DN into 3 matching groups and re-assembles the DN
by replacing g2(which contains the {service} with {service}\ while
g1 and g3 are preserved as is.
regex: r"(.*CN=)([^/]+)(/[^/=]+$)"
replace pattern: r"\g<1>\g<2>\\\g<3>"
Note.
There are 5 possible DN scenarios as encountered so far:
- /C=PL/O=GRID/O=OU/CN=service/example.com/emailAddress=test@gmail.com(CN escape+emailAddress)
- /C=PL/O=GRID/O=OU/CN=service/example.com(CN escape only)
- /C=PL/O=GRID/O=OU/CN=example.com (CN without escape)
- /C=PL/O=GRID/O=ICM (No CN)
- /C=PL/O=GRID/O=OU/CN=example.com/emailAddress=test@gmail.com (CN without escape + emailAddress)
ANY version of this method should make sure that these 5 cases are treated correctly
and the escape happens only when it is needed.
A good observation is that 2 and 5 are the same in terms of layout and thats why in the second regex
we exclude the "=" operator as well from the 3d matching group (/[^/=]+$)
:param dn_string:
:return: the escaped dn string
"""
repl_pattern = r"\g<1>\g<2>\\\g<3>"
cn_escape_email_address_pattern = re.compile(r"(.*CN=)([^/]+)(/[^/]+/emailAddress=.*$)")
if re.match(cn_escape_email_address_pattern, dn_string):
return re.sub(cn_escape_email_address_pattern, repl_pattern, dn_string)
cn_only_escape_pattern = re.compile(r"(.*CN=)([^/]+)(/[^/=]+$)")
if re.match(cn_only_escape_pattern, dn_string):
return re.sub(cn_only_escape_pattern, repl_pattern, dn_string)
return dn_string
def _parse_dn_string_ldap_util(self, dn_string):
"""
Method used to parse RDN string using the ldap functions.
It also caters to the case of the keyword host/ inside the CN
"""
# if the host/ appears in the DN
escaped_dn_string = self._escape_cn_rdn_string(dn_string)
# check that the DN string is valid and can be parsed
if not ldap.dn.is_dn(escaped_dn_string, ldap.DN_FORMAT_DCE):
raise ValueError("DN cannot be parsed with the DN_FORMAT_DCE encoding")
try:
rdns_list = ldap.dn.explode_dn(escaped_dn_string, notypes=False, flags=ldap.DN_FORMAT_DCE)
except Exception as e:
raise ValueError(str(e))
# A DN string with the value of /DC=org/DC=terena/DC=tcs/C=DE/O=hosts/O=GermanGrid/OU=DESY/CN=host/example.com
# will produce the following rdns list
# ['CN=host/example.com', 'OU=DESY', 'O=GermanGrid', 'O=hosts', 'C=DE', 'DC=tcs', 'DC=terena', 'DC=org']
# The authn Golang service will produce the following DN string for the above certificate
# 'CN=host/example.com,OU=DESY,O=hosts+O=GermanGrid,C=DE,DC=org+DC=terena+DC=tcs'
# In order to have multi-valued RDNs in the order that Authn expects them,
# e.g. for the RDN DC, DC=org+DC=terena+DC=tcs
# we need to process the rdn_list from the ldap utility in reverse
# if we don't, the RDN DC, will look like DC=tcs+DC=terena+DC=org
for rdn in reversed(rdns_list):
rdn_type, rdn_value = self._rdn_to_type_and_value(rdn)
self._assign_rdn_to_field(rdn_type, rdn_value)
def _parse_dn_string(self, dn_string):
"""
Method used to parse RDN string manually
"""
# split the string and skip the empty string of the first slash
list_of_rdns = dn_string.split("/")[1:]
# identify the rdn and append the respective list of its values
for rdn in list_of_rdns:
rdn_type, rdn_value = self._rdn_to_type_and_value(rdn)
self._assign_rdn_to_field(rdn_type, rdn_value)
@staticmethod
def _format_rdn_to_string(rdn, rdn_values):
"""
Take as input an RDN and its values
and convert them to a printable string
Attributes:
rdn(str): The name of the RDN of the provided values
rdn_values(list): list containing the values of the given RDN
Returns:
(str): String representation of the rdn combined with its values
Example:
rdn: DC
rdn_values: [argo, grnet, gr]
return: DC=argo+DC=grnet+DC=gr
"""
# operator is a string literal that stands
# between the values of the given RDN
operator = ""
printable_string = []
for rdn_value in rdn_values:
# if the string is empty, we should use no operator
# since there are no values present in the string
if len(printable_string) != 0:
operator = "+"
printable_string.append(operator)
printable_string.append(rdn)
printable_string.append("=")
printable_string.append(rdn_value)
return "".join(x for x in printable_string)
def __str__(self):
printable_string = []
# operator is a string literal that stands between the values
# of the RDNs. If the string is empty, we should use no operator
# since there are no values present in the string
operator = ""
# we check if a specific RDN holds any values and we concatenate
# it with the previous RDN using a comma ','
# RDNs must follow the specific order of:
# E - CN - OU - O - POSTALCODE - STREET - L - ST - C - DC
if len(self.EmailAddress) != 0:
if len(printable_string) != 0:
operator = ","
printable_string.append(operator)
printable_string.append(
self._format_rdn_to_string("E", self.EmailAddress))
if len(self.CommonName) != 0:
if len(printable_string) != 0:
operator = ","
printable_string.append(operator)
printable_string.append(
self._format_rdn_to_string("CN", self.CommonName))
if len(self.OrganizationalUnit) != 0:
if len(printable_string) != 0:
operator = ","
printable_string.append(operator)
printable_string.append(
self._format_rdn_to_string("OU", self.OrganizationalUnit))
if len(self.Organization) != 0:
if len(printable_string) != 0:
operator = ","
printable_string.append(operator)
printable_string.append(
self._format_rdn_to_string("O", self.Organization))
if len(self.PostalCode) != 0:
if len(printable_string) != 0:
operator = ","
printable_string.append(operator)
printable_string.append(
self._format_rdn_to_string("POSTALCODE", self.PostalCode))
if len(self.Street) != 0:
if len(printable_string) != 0:
operator = ","
printable_string.append(operator)
printable_string.append(
self._format_rdn_to_string("STREET", self.Street))
if len(self.Locality) != 0:
if len(printable_string) != 0:
operator = ","
printable_string.append(operator)
printable_string.append(
self._format_rdn_to_string("L", self.Locality))
if len(self.Province) != 0:
if len(printable_string) != 0:
operator = ","
printable_string.append(operator)
printable_string.append(
self._format_rdn_to_string("ST", self.Province))
if len(self.Country) != 0:
if len(printable_string) != 0:
operator = ","
printable_string.append(operator)
printable_string.append(
self._format_rdn_to_string("C", self.Country))
if len(self.DomainComponent) != 0:
if len(printable_string) != 0:
operator = ","
printable_string.append(operator)
printable_string.append(
self._format_rdn_to_string("DC", self.DomainComponent))
return "".join(x for x in printable_string)
def create_users(config, verify):
# retrieve ams info
ams_host = config.get("AMS", "ams_host")
ams_project = config.get("AMS", "ams_project")
ams_token = config.get("AMS", "ams_token")
ams_email = config.get("AMS", "ams_email")
users_role = config.get("AMS", "users_role")
goc_db_url_arch = config.get("AMS", "goc_db_host")
goc_db_site_url = "https://goc.egi.eu/gocdbpi/public/?method=get_site&sitename={{sitename}}"
# retrieve authn info
authn_host = config.get("AUTHN", "authn_host")
authn_service_uuid = config.get("AUTHN", "service_uuid")
authn_token = config.get("AUTHN", "authn_token")
authn_service_host = config.get("AUTHN", "service_host")
# dict that acts as a cache for site contact emails
site_contact_emails = {}
# cert key tuple
cert_creds = (config.get("AMS", "cert"), config.get("AMS", "cert_key"))
# services holds all different services that the users might belong to(which translates to ams topics)
# each service will have a list of users associated with it
services = {}
conf_services = config.get("AMS", "service-types").split(",")
for srv_type in conf_services:
# strip any whitespaces
srv_type = srv_type.replace(" ", "")
# user count
user_count = 0
# form the goc db url
goc_db_url = goc_db_url_arch.replace("{{service-type}}", srv_type)
LOGGER.info("\nAccessing url: " + goc_db_url)
LOGGER.info("\nStarted the process for service-type:" + srv_type)
# grab the xml data from goc db
goc_request = requests.get(url=goc_db_url, cert=cert_creds ,verify=False)
LOGGER.info(goc_request.text)
# users from goc db that don't have a dn registered
missing_dns = []
# updated bindings count
update_binding_count= 0
# updated bindings names
update_bindings_names= []
# srv_type
srv_type = srv_type.replace(".", "-")
services[srv_type] = []
# build the xml object
root = ET.fromstring(goc_request.text)
# iterate through the xml object's service_endpoints
for service_endpoint in root.findall("SERVICE_ENDPOINT"):
service_type = service_endpoint.find("SERVICE_TYPE").text.replace(".", "-")
# grab the dn
service_dn = service_endpoint.find("HOSTDN")
if service_dn is None:
missing_dns.append(service_endpoint.find("HOSTNAME").text)
continue
# Create AMS user
hostname = service_endpoint.find("HOSTNAME").text.replace(".", "-")
sitename = service_endpoint.find("SITENAME").text.replace(".", "-")
user_binding_name = service_type + "---" + hostname + "---" + sitename
# try to get the site's contact email
contact_email = ams_email
# check the if we have retrieved this site's contact email before
site_name = service_endpoint.find("SITENAME").text
if site_name in site_contact_emails:
contact_email = site_contact_emails[site_name]
else:
try:
# try to retrieve the site info from gocdb
site_url = goc_db_site_url.replace("{{sitename}}", site_name)
goc_site_request = requests.get(site_url, cert=cert_creds, verify=False)
site_xml_obj = ET.fromstring(goc_site_request.text)
# check if the site is in production
in_prod = site_xml_obj.find("SITE").find("PRODUCTION_INFRASTRUCTURE")
if in_prod.text != 'Production':
raise Exception("Not in production")
# check for certified or uncertified
cert_uncert = site_xml_obj.find("SITE").find("CERTIFICATION_STATUS")
if cert_uncert.text != "Certified" and cert_uncert.text != "Uncertified":
raise Exception("Neither certified nor uncertified")
contact_email = site_xml_obj.find("SITE").find("CONTACT_EMAIL").text
site_contact_emails[site_name] = contact_email
except Exception as e:
LOGGER.warning("Skipping endpoint {} under site {}, {}".format(
hostname, site_name, e))
# convert the dn
try:
service_dn = RdnSequence(service_dn.text).__str__()
except ValueError as ve:
LOGGER.error("Invalid DN: {}. Exception: {}".format(service_dn.text, ve))
continue
project = {'project': ams_project, 'roles': [users_role]}
usr_create = {'projects': [project], 'email': contact_email}
# create the user
api_url = 'https://{0}/v1/projects/{1}/members/{2}?key={3}'.format(ams_host, ams_project, user_binding_name, ams_token)
ams_usr_crt_req = requests.post(url=api_url, data=json.dumps(usr_create), verify=verify)
LOGGER.info(ams_usr_crt_req.text)
ams_user_uuid = ""
# if the response is neither a 200(OK) nor a 409(already exists)
# then move on to the next user
if ams_usr_crt_req.status_code != 200 and ams_usr_crt_req.status_code != 409:
LOGGER.critical("\nUser: " + user_binding_name)
LOGGER.critical(
"\nSomething went wrong while creating ams user." +
"\nBody data: " + str(usr_create) + "\nResponse Body: " +
ams_usr_crt_req.text)
continue
if ams_usr_crt_req.status_code == 200:
ams_user_uuid = ams_usr_crt_req.json()["uuid"]
# count how many users have been created
user_count += 1
# If the user already exists, Get user by username
if ams_usr_crt_req.status_code == 409:
proj_member_list_url = "https://{0}/v1/projects/{1}/members/{2}?key={3}".format(ams_host, ams_project, user_binding_name, ams_token)
ams_usr_get_req = requests.get(url=proj_member_list_url, verify=verify)
# if the user retrieval was ok
if ams_usr_get_req.status_code == 200:
LOGGER.info("\nSuccessfully retrieved user {} from ams".format(user_binding_name))
ams_user_uuid = ams_usr_get_req.json()["uuid"]
else:
LOGGER.critical(
"\nCould not retrieve user {} from ams."
"\n Response {}".format(user_binding_name, ams_usr_get_req.text))
continue
# Create the respective AUTH binding
bd_data = {
'service_uuid': authn_service_uuid,
'host': authn_service_host,
'auth_identifier': service_dn,
'unique_key': ams_user_uuid,
"auth_type": "x509"
}
create_binding_url = "https://{0}/v1/bindings/{1}?key={2}".format(authn_host, user_binding_name, authn_token)
authn_binding_crt_req = requests.post(url=create_binding_url, data=json.dumps(bd_data), verify=verify)
LOGGER.info(authn_binding_crt_req.text)
if authn_binding_crt_req.status_code != 201 and authn_binding_crt_req.status_code != 409:
LOGGER.critical("Something went wrong while creating a binding.\nBody data: " + str(bd_data) + "\nResponse: " + authn_binding_crt_req.text)
# if the binding already exists, check for an updated DN from gocdb
if authn_binding_crt_req.status_code == 409:
retrieve_binding_url = "https://{0}/v1/bindings/{1}?key={2}".format(authn_host, user_binding_name, authn_token)
authn_ret_bind_req = requests.get(url=retrieve_binding_url, verify=verify)
# if the binding retrieval was ok
if authn_ret_bind_req.status_code == 200:
LOGGER.info("\nSuccessfully retrieved binding {} from authn. Checking for DN update.".format(user_binding_name))
binding = authn_ret_bind_req.json()
# check if the dn has changed
if binding["auth_identifier"] != service_dn:
# update the respective binding with the new dn
bind_upd_req_url = "https://{0}/v1/bindings/{1}?key={2}".format(authn_host, user_binding_name, authn_token)
upd_bd_data = {
"auth_identifier": service_dn
}
authn_bind_upd_req = requests.put(url=bind_upd_req_url, data=json.dumps(upd_bd_data), verify=verify)
LOGGER.info(authn_bind_upd_req.text)
if authn_bind_upd_req.status_code == 200:
update_binding_count += 1
update_bindings_names.append(user_binding_name)
else:
LOGGER.critical(
"\nCould not retrieve binding {} from authn."
"\n Response {}".format(user_binding_name, authn_ret_bind_req.text))
continue
# if both the user and binding have been created, assign the user to the acl of the topic
services[service_type].append(user_binding_name)
# modify the acl for each topic , to add all associated users
authorized_users = services[srv_type]
if len(authorized_users) != 0:
get_topic_acl_req = requests.get("https://"+ams_host+"/v1/projects/"+ams_project+"/topics/"+srv_type+":acl?key="+ams_token, verify=verify)
if get_topic_acl_req.status_code == 200:
acl_users = json.loads(get_topic_acl_req.text)
authorized_users = authorized_users + acl_users["authorized_users"]
# remove duplicates
authorized_users = list(set(authorized_users))
modify_topic_acl_req = requests.post("https://"+ams_host+"/v1/projects/"+ams_project+"/topics/"+srv_type+":modifyAcl?key="+ams_token, data=json.dumps({'authorized_users': authorized_users}), verify=verify)
LOGGER.critical("Modified ACL for topic: {} with users {}. Response from AMS {}".format(srv_type, str(authorized_users), modify_topic_acl_req.text))
else:
LOGGER.critical("Couldn't get ACL for topic {}. Response from AMS {}".format(srv_type, get_topic_acl_req.text))
LOGGER.critical("Service Type: " + srv_type)
LOGGER.critical("Missing DNS: " + str(missing_dns))
LOGGER.critical("Total Users Created: " + str(user_count))
LOGGER.critical("Total Bindings Updated: " + str(update_binding_count))
LOGGER.critical("Updated bingings: " + str(update_bindings_names))
LOGGER.critical("-----------------------------------------")
def main(args=None):
# set up the config parser
config = configparser.ConfigParser()
# check if config file has been given as cli argument else
# check if config file resides in /etc/argo-api-authn/ folder else
# check if config file resides in local folder
if args.ConfigPath is None:
if os.path.isfile("/etc/argo-api-authn/conf.d/ams-create-users-gocdb.cfg"):
config.read("/etc/argo-api-authn/conf.d/ams-create-users-gocdb.cfg")
else:
config.read("../../conf/ams-create-users-gocdb.cfg")
else:
config.read(args.ConfigPath)
# stream(console) handler
console_handler = logging.StreamHandler()
LOGGER.addHandler(console_handler)
LOGGER.setLevel(logging.INFO)
# sys log handler
syslog_handler = logging.handlers.SysLogHandler(config.get("LOGS", "syslog_socket"))
syslog_handler.setFormatter(logging.Formatter('%(name)s[%(process)d]: %(levelname)s %(message)s'))
syslog_handler.setLevel(logging.WARNING)
LOGGER.addHandler(syslog_handler)
# start the process of creating users
create_users(config, args.Verify)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create ams users and their respective bindings using data imported from goc db")
parser.add_argument(
"-c", "--ConfigPath", type=str, help="Path for the config file")
parser.add_argument(
"-verify", "--Verify", help="SSL verification for requests", action="store_true")
sys.exit(main(parser.parse_args())) |
#!/usr/bin/python
import json
import urllib2
import os
import re
#############################################################################
## RESTAPI KUNGFU BELOW
from flask import Flask, request
from flask_restful import Resource, Api, reqparse
app = Flask(__name__)
api = Api(app)
topic_list = {"user" : ["topicname"]}
content_list = {}
parser = reqparse.RequestParser()
parser.add_argument('username')
parser.add_argument('topic_name')
parser.add_argument('topic_id')
def urlify(s):
# Remove all non-word characters (everything except numbers and letters)
s = re.sub(r"[^\w\s]", '', s)
# Replace all runs of whitespace with a single dash
s = re.sub(r"\s+", '-', s)
return s
def addWikidata (username,topic_name):
content = urllib2.urlopen("https://en.wikipedia.org/w/api.php?action=opensearch&search="+topic_name+"&limit=20&namespace=0&format=json").read()
# checknull = str(content).replace()
# checknull = str(content).split(",")
# if None in checknull:
# topic_list[username].remove(topic_name)
# return "!!ERROR: content not added because info does not exist in wikipedia!!"
content_list.setdefault(username,{})
content_list[username].setdefault(topic_name,[])
content_list[username][topic_name]=[content]
return content_list[username][topic_name]
class nextTopic(Resource):
def get(self):
username = str(request.args.get("username")).lower()
username = urlify(username)
topic_id = request.args.get("topic_id")
try:
topic_id = int(topic_id)
except ValueError:
return "!!ERROR: Topic_id given is not an number!!"
if username not in topic_list:
return "!!ERROR: username not in database!!"
if int(topic_id) not in range(0,len(topic_list[username])):
return "!!ERROR: Topic_id not within valid range. Expecting topic_id between 0-"+str(len(topic_list[username])-1)+" !!"
else:
d = topic_list[username][int(topic_id)]
return d
class listAllTopic(Resource):
def get(self):
d = topic_list
return d
class listAllContent(Resource):
def get(self):
d = content_list
return d
class listUserTopic(Resource):
def get(self):
username = request.args.get("username")
d = topic_list[username]
return d
class delUser(Resource):
def delete(self):
args = parser.parse_args()
username = args['username']
if username not in topic_list:
return "!! username does not exist !!"
else:
topic_list.pop(username)
content_list.pop(username)
d = topic_list
return d
class delTopic(Resource):
def delete(self):
args = parser.parse_args()
username = args['username']
topic_id = args['topic_id']
content_list[username].pop(topic_list[username][int(topic_id)])
del(topic_list[username][int(topic_id)])
d = topic_list[username]
if d is []:
delUser.delete(self)
return d
class addTopic(Resource):
def put(self):
args = parser.parse_args()
username = str(args['username']).lower()
topic_name = str(args['topic_name']).lower()
username = urlify(username)
topic_name = urlify(topic_name)
if username in topic_list:
if topic_name not in topic_list[username]:
topic_list.setdefault(username,[]).append(topic_name)
d=addWikidata(username,topic_name)
else:
d="!!Topic not added, Already In List!!"
else:
topic_list.setdefault(username,[]).append(topic_name)
d=addWikidata(username,topic_name)
#d = topic_list
return d
api.add_resource(nextTopic, '/nextTopic')
api.add_resource(listAllTopic, '/listAllTopic')
api.add_resource(listAllContent, '/listAllContent') # no need to expose this API outside. Mostly for internal debugging purposes only.
api.add_resource(listUserTopic, '/listUserTopic')
api.add_resource(delTopic, '/delTopic')
api.add_resource(delUser, '/delUser')
api.add_resource(addTopic, '/addTopic')
if __name__ == '__main__':
IP="0.0.0.0"
PORT=os.getenv("PORT")
app.run(debug=True,host=IP, port=int(PORT))
## RESTAPI KUNGFU ABOVE
############################################################################
# To do:
# Better error condition checks and boundary cases to be caught.
# Add content from wikipedia in a different structure called "content"
# link this content with the nextTopic, delTopic and addTopic functions
#####################################################
|
import json
from bunch import Bunch
import os
def get_config_from_json(json_file):
"""
Get the config from a json file
:param json_file:
:return: config(namespace) or config(dictionary)
"""
# parse the configurations from the config json file provided
with open(json_file, 'r') as config_file:
config_dict = json.load(config_file)
# convert the dictionary to a namespace using bunch lib
config = Bunch(config_dict)
config = default_values(config)
return config, config_dict
def process_config(jsonfile):
config, _ = get_config_from_json(jsonfile)
config.summary_dir = os.path.join("../experiments", config.exp_name, "summary")
config.checkpoint_dir = os.path.join("../experiments", config.exp_name, "checkpoint")
return config
def default_values(config):
config['target_cluster'] = -1 if not 'target_cluster' in config.keys() else config['target_cluster']
config['rater_id'] = -1 if not 'rater_id' in config.keys() else config['rater_id']
config['gt_priors'] = False if not 'gt_priors' in config.keys() else config['gt_priors']
config['priors'] = False if not 'priors' in config.keys() else config['priors']
config['reg'] = False if not 'reg' in config.keys() else config['reg']
config['modified_CE'] = False if not 'modified_CE' in config.keys() else config['modified_CE']
config['ccc_err'] = False if not 'ccc_err' in config.keys() else config['ccc_err']
config['rmse_weights'] = 1 if not 'rmse_weights' in config.keys() else config['rmse_weights']
config['cccerr_weights'] = 1 if not 'cccerr_weights' in config.keys() else config['cccerr_weights']
config['yout_weights'] = 1 if not 'yout_weights' in config.keys() else config['yout_weights']
config['alpha1'] = 1 if not 'alpha1' in config.keys() else config['alpha1']
config['alpha2'] = 1 if not 'alpha2' in config.keys() else config['alpha2']
config['fcs_num'] = 0 if not 'fcs_num' in config.keys() else config['fcs_num']
config['n_fc'] = 16 if not 'n_fc' in config.keys() else config['n_fc']
config['fc_act'] = 'tanh' if not 'tanh' in config.keys() else config['tanh']
config['fc_path'] = 0 if not 'fc_path' in config.keys() else config['fc_path']
config['clf_bias'] = 0 if not 'clf_bias' in config.keys() else config['clf_bias']
config['audio_video_feat'] = 0 if not 'audio_video_feat' in config.keys() else config['audio_video_feat']
config['clf_bias'] = 1 if not 'clf_bias' in config.keys() else config['clf_bias']
config['gt'] = 'onehot' if not 'gt' in config.keys() else config['gt']
config['ccc_diff'] = -0.01 if not 'ccc_diff' in config.keys() else config['ccc_diff']
config['reset_lr'] = True if not 'reset_lr' in config.keys() else config['reset_lr']
config['stage2'] = 0 if not 'stage2' in config.keys() else config['stage2']
config['max_to_keep'] = 1000 if not 'max_to_keep' in config.keys() else config['max_to_keep']
config['subset'] = 'joint_modling' if not 'subset' in config.keys() else config['subset']
config['log_dir'] = 'logs' if not 'log_dir' in config.keys() else config['log_dir']
config['max_length'] = 7500 if not 'max_length' in config.keys() else config['max_length']
config['sequence_length'] = 7500 if not 'sequence_length' in config.keys() else config['sequence_length']
config['learning_rate'] = 0.02 if not 'learning_rate' in config.keys() else config['learning_rate']
config['num_epochs'] = 20 if not 'num_epochs' in config.keys() else config['num_epochs']
return config
|
import discord
from discord.ext import commands
import logging
import asyncio
import glob
import os
from aiohttp import ClientSession
from rinko.core.bot.base import RinkoBase
from rinko.core.config import config
from rinko.core.logger import get_module_logger
from rinko.core.constant import *
logger = get_module_logger(__name__)
class Rinko(RinkoBase):
def __init__(self, *args, **kwargs):
super().__init__(command_prefix=self.get_prefix, *args, **kwargs)
async def set_prefix(self, guild: discord.Guild, prefix: str):
await self.set(f'UPDATE server_info SET prefix = "{prefix}" WHERE guild = "{guild.id}";')
async def get_prefix(self, message: discord.Message):
if prefix := await self.get(f'SELECT * FROM server_info WHERE guild = {message.guild.id}'):
return prefix[0]['prefix']
else:
return DEFAULT_PREFIX
async def set_config(self, guild: discord.Guild, key, value):
await self.set(f'UPDATE server_info SET {key} = {value} WHERE guild = {guild.id};')
async def get_config(self, guild: discord.Guild, key, default=None):
if prefix := await self.get(f'SELECT * FROM server_info WHERE guild = {guild.id}'):
return prefix[0][key]
else:
return default
async def on_guild_join(self, guild):
logger.debug(f'A new guild has joined: {guild.id}')
await self.set('INSERT INTO server_info (guild, locale, prefix, enable_quote) VALUES (%s, "en", %s, 1);', (str(guild.id), DEFAULT_PREFIX))
await self.set('INSERT INTO reminder_call (guild, 7d, 1d, 12H, 6H, 3H, 2H, 1H, 30M, 10M, 0M) VALUES (%s, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1);', (str(guild.id)))
async def on_guild_remove(self, guild):
logger.debug(f'A new guild has removed: {guild.id}')
await self.set('DELETE FROM server_info WHERE guild = %s;', (str(guild.id)))
await self.set('DELETE FROM reminder_call WHERE guild = %s;', (str(guild.id)))
|
"""
Bluetooth Input Stream
stream_input() is a generator that start up the
bluetooth serial server via PyBluez and yields
events sent from a device
Ex:
# print data send from bluetooth device
for event in stream_input():
print(event)
Note: pairing must be done prior to this.
Once paired, running this will allow a connection
"""
from bluetooth import *
def stream_input():
MIN_INT = 0
MAX_INT = 255
while True:
client_sock, server_sock = _init_connection()
try:
last = 0.0
while True:
data = client_sock.recv(100)
if len(data) == 0: break
input = data.decode('utf-8')
if input.isdigit():
inter = int(input)
input = inter/MAX_INT if inter<=MAX_INT else last
yield input
last = input
except IOError:
pass
_close_connection(client_sock,server_sock)
def _init_connection():
server_sock=BluetoothSocket( RFCOMM )
server_sock.bind(("",PORT_ANY))
server_sock.listen(1)
port = server_sock.getsockname()[1]
uuid = "94f39d29-7d6d-437d-973b-fba39e49d4ee"
advertise_service( server_sock, "SampleServer",
service_id = uuid,
service_classes = [ uuid, SERIAL_PORT_CLASS ],
profiles = [ SERIAL_PORT_PROFILE ],
# protocols = [ OBEX_UUID ]
)
print("Waiting for connection on RFCOMM channel %d" % port)
client_sock, client_info = server_sock.accept()
print("Accepted connection from ", client_info)
return client_sock, server_sock
def _close_connection(client_sock,server_sock):
print("disconnected")
client_sock.close()
server_sock.close()
print("all done")
return
if __name__ == "__main__":
for event in stream_input():
print(event)
|
import argparse
from multiprocessing.pool import Pool
import numpy as np
from cv2 import cv2
cv2.setNumThreads(0)
import os
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
os.environ["OMP_NUM_THREADS"] = "1"
def average_strategy(images):
return np.average(images, axis=0)
def hard_voting(images):
rounded = np.round(images / 255.)
return np.round(np.sum(rounded, axis=0) / images.shape[0]) * 255.
def ensemble_image(params):
file, dirs, ensembling_dir, strategy = params
images = []
for dir in dirs:
file_path = os.path.join(dir, file)
images.append(cv2.imread(file_path, cv2.IMREAD_COLOR))
images = np.array(images)
if strategy == 'average':
ensembled = average_strategy(images)
elif strategy == 'hard_voting':
ensembled = hard_voting(images)
else:
raise ValueError('Unknown ensembling strategy')
cv2.imwrite(os.path.join(ensembling_dir, file), ensembled)
def ensemble(dirs, strategy, ensembling_dir, n_threads):
files = os.listdir(dirs[0])
params = []
for file in files:
params.append((file, dirs, ensembling_dir, strategy))
pool = Pool(n_threads)
pool.map(ensemble_image, params)
test_dirs = ['d161', 'd121', 'r34', 'sc50', 'r101', 'sc50_1', 'd161_1', 'd161_2', 'd161_3', 'd161_4','d161_5',
'd121_1','d121_2','d121_3','d121_4','d121_5', 'r34_1', 'r34_2', 'r34_3', 'sc50_1', 'sc50_2', 'sc50_3'
]
if __name__ == '__main__':
parser = argparse.ArgumentParser("Ensemble masks")
arg = parser.add_argument
arg('--ensembling_cpu_threads', type=int, default=8)
arg('--ensembling_dir', type=str, default='predictions/masks/ensemble')
arg('--strategy', type=str, default='average')
arg('--folds_dir', type=str, default='predictions/masks')
arg('--dirs_to_ensemble', nargs='+', default=test_dirs)
args = parser.parse_args()
folds_dir = args.folds_dir
dirs = [os.path.join(folds_dir, d) for d in args.dirs_to_ensemble]
for d in dirs:
if not os.path.exists(d):
raise ValueError(d + " doesn't exist")
os.makedirs(args.ensembling_dir, exist_ok=True)
ensemble(dirs, args.strategy, args.ensembling_dir, args.ensembling_cpu_threads)
|
import os
import subprocess
import shutil
import glob
import hashlib
import pathlib
import xdg
from .config import config
from .managers import SsrConfManager
class SsrApp:
SSR_FILE_MD5 = {
'obfsplugin/verify.py': '4769b5a8e3e6012a5da57533ce2c62b8',
'obfsplugin/__init__.py': '7ff9a30b272bb2077d229a3d0b12c86a',
'obfsplugin/auth.py': '5d79f3fbd42e71493232294662663359',
'obfsplugin/auth_chain.py': '6eaef7733d3515809125101be6147cbc',
'obfsplugin/plain.py': '95dffa5a32514458158c03f1ce657e94',
'obfsplugin/obfs_tls.py': '513c28c82d76c99e3c4e119b5bcae1af',
'obfsplugin/http_simple.py': '1abc81ce2d14da585d9f3eb17a4f4956',
'crypto/rc4_md5.py': '5e70a54986295913974ba6d60b5b7bb5',
'crypto/openssl.py': '5e786a490e3adfb13f33d5bc3ed9c998',
'crypto/ctypes_libsodium.py': '1a957242cad3346e2e0d7251b62b2a7b',
'crypto/__init__.py': '7ff9a30b272bb2077d229a3d0b12c86a',
'crypto/sodium.py': 'a3a773b4ec49373f0028d32560f408a9',
'crypto/util.py': 'd6a806fc2e11ff8f7f0667b91f8a6f58',
'crypto/table.py': '751169759aa43e84602118ca7957047e',
'crypto/ctypes_openssl.py': '112cda4dffac340142d7a894ee5d9232',
'run.sh': '87d802870a57a56749b84aee7d01bdc7',
'common.py': '41572dc942869db57b1e431f4951764e',
'encrypt.py': '97b6271a61593d99a051afeee6ba8202',
'manager.py': 'db54e7a547dd5a653ce651b271f2e68e',
'__init__.py': '9b4f7d2f38afb96b0708d303262e07fd',
'asyncdns.py': '1f42746074acc7ff7638b60bcd06e0ab',
'version.py': '1fe5c2894450886a02caff9aaa10f4c2',
'encrypt_test.py': 'f0001a6d92ba6dc0b731801db2b98530',
'eventloop.py': 'ee6e1f2a6341d0649dbfbc898dbef2ca',
'daemon.py': '4b1ca6c81201ca582478acc223ebeab8',
'tcprelay.py': '0adfeb81724b5f1b6f415ccdcd1b4f34',
'obfs.py': '2764716a6e6f132d978445bbb85da184',
'lru_cache.py': 'e8cfd8b1494df5999c9e3f25d39d0a7c',
'local.py': 'a51c10ba21606910371e20dfd739eea4',
'shell.py': '5c3973dff66a33de5c567909fa0a1eaf',
'server.py': 'ef90038f209ec504cd42780d9eb16866',
'ordereddict.py': '397c5dee3496749e0c692d9c43d3c698',
'udprelay.py': 'a2c5ecccbcf2cee90c4e007774787eaf',
'tail.sh': '78abecdddc0f388111026f36e23c1068',
'stop.sh': '439363b356eecad5eda5574535b7f32c',
'logrun.sh': '34d0334cd13b07a54436c0ec6cf6d823',
}
def __init__(self):
self.config_path = config.SSR_CONF_PATH
@staticmethod
def install():
if pathlib.Path(config.SSR_APP_PATH).exists():
print('The target folder already exists: {}'.format(config.SSR_APP_PATH))
return
try:
subprocess.run(['git', 'clone', 'https://github.com/shadowsocksr-backup/shadowsocksr', '-b', 'manyuser',
'--depth=1', config.SSR_APP_PATH], check=True)
except subprocess.CalledProcessError:
print('Running git clone failed. Have you installed git?')
@staticmethod
def remove():
shutil.rmtree(config.SSR_APP_PATH, ignore_errors=True)
@staticmethod
def test() -> bool:
files = glob.glob(os.path.join(config.SSR_APP_PATH, 'shadowsocks', '**', '*'), recursive=True)
ok: bool = True
for file in files:
key = file[len(os.path.join(config.SSR_APP_PATH, 'shadowsocks')) + 1:]
if key in SsrApp.SSR_FILE_MD5.keys():
hash_md5 = hashlib.md5()
with open(file, "rb") as f: # TODO: Move to utils
for chunk in iter(lambda: f.read(4096), b''):
hash_md5.update(chunk)
res = hash_md5.hexdigest()
if res != SsrApp.SSR_FILE_MD5[key]:
print('Found {} MD5 different'.format(file))
ok = False
return ok
def on(self):
if self.status():
print('SSR has been launched')
return
print(SsrConfManager().current())
task = subprocess.Popen(['python', '-m', 'shadowsocks.local', '-c', self.config_path, 'start'],
cwd=config.SSR_APP_PATH) # shadowsocksr has no deps so being without env=... is OK
with open(xdg.XDG_DATA_HOME / 'ssrcli' / 'ssr.pid', 'w') as f:
print(task.pid, file=f)
def off(self):
if not self.status():
print('SSR has already been off')
return
with open(xdg.XDG_DATA_HOME / 'ssrcli' / 'ssr.pid', 'r') as f:
content = f.read().strip()
if content.isdigit():
pid = int(content)
try:
subprocess.run(['kill', str(pid)], check=True)
except subprocess.CalledProcessError:
print('Running kill failed: pid {}'.format(pid))
return
with open(xdg.XDG_DATA_HOME / 'ssrcli' / 'ssr.pid', 'w'):
pass
else:
print('Invalid pid file: {}'.format(xdg.XDG_DATA_HOME / 'ssrcli' / 'ssr.pid'))
def restart(self):
self.off()
self.on()
def status(self) -> bool:
try:
subprocess.run(['lsof', '-i:{}'.format(config.SSR_LOCAL_PORT)], check=True, stdout=subprocess.DEVNULL)
except subprocess.CalledProcessError:
return False
return True
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import time
import base64
import pychrome
import threading
urls = [
"http://fatezero.org",
"http://blog.fatezero.org",
"http://github.com/fate0",
"http://github.com/fate0/pychrome"
]
class EventHandler(object):
screen_lock = threading.Lock()
def __init__(self, browser, tab):
self.browser = browser
self.tab = tab
self.start_frame = None
def frame_started_loading(self, frameId):
if not self.start_frame:
self.start_frame = frameId
def frame_stopped_loading(self, frameId):
if self.start_frame == frameId:
self.tab.Page.stopLoading()
with self.screen_lock:
# must activate current tab
print(self.browser.activate_tab(self.tab.id))
try:
data = self.tab.Page.captureScreenshot()
with open("%s.png" % time.time(), "wb") as fd:
fd.write(base64.b64decode(data['data']))
finally:
self.tab.stop()
def close_all_tabs(browser):
if len(browser.list_tab()) == 0:
return
for tab in browser.list_tab():
try:
tab.stop()
except pychrome.RuntimeException:
pass
browser.close_tab(tab)
time.sleep(1)
assert len(browser.list_tab()) == 0
def main():
browser = pychrome.Browser()
close_all_tabs(browser)
tabs = []
for i in range(len(urls)):
tabs.append(browser.new_tab())
for i, tab in enumerate(tabs):
eh = EventHandler(browser, tab)
tab.Page.frameStartedLoading = eh.frame_started_loading
tab.Page.frameStoppedLoading = eh.frame_stopped_loading
tab.start()
tab.Page.stopLoading()
tab.Page.enable()
tab.Page.navigate(url=urls[i])
for tab in tabs:
tab.wait(60)
tab.stop()
browser.close_tab(tab)
print('Done')
if __name__ == '__main__':
main()
|
class Solution:
def countRoutes(self, locations: List[int], start: int, finish: int, fuel: int) -> int:
dp = [[-1] * 201 for _ in range(101)]
MOD = 1000000007
def helper(pos, left):
if left < 0:
return 0
if dp[pos][left] != -1:
return dp[pos][left]
ans = 1 if pos == finish else 0
for i, loc in enumerate(locations):
if i != pos and left - abs(loc - locations[pos]) >= 0:
ans += helper(i, left - abs(loc - locations[pos]))
ans %= MOD
dp[pos][left] = ans
return ans
return helper(start, fuel)
|
"""Groups the constants from sc2"""
from .ids.ability_id import *
from .ids.buff_id import *
from .ids.effect_id import *
from .ids.unit_typeid import *
from .ids.upgrade_id import *
|
#!/usr/bin/env python
# coding: utf-8
# (ch:end2end)=
# # 머신러닝 프로젝트 처음부터 끝까지
# **감사의 글**
#
# 자료를 공개한 저자 오렐리앙 제롱과 강의자료를 지원한 한빛아카데미에게 진심어린 감사를 전합니다.
# **소스코드**
#
# 본문 내용의 일부를 파이썬으로 구현한 내용은
# [(구글코랩) 머신러닝 프로젝트 처음부터 끝까지](https://colab.research.google.com/github/codingalzi/handson-ml3/blob/master/notebooks/code_end2end_ml_project.ipynb)에서
# 확인할 수 있다.
# **주요 내용**
#
# 주택 가격을 예측하는 다양한 **회귀 모델**<font size="2">regression model</font>의
# 훈련 과정을 이용하여 머신러닝 시스템의 전체 훈련 과정을 살펴본다.
# <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/handson-ml3/master/jupyter-book/imgs/ch02/homl02-01d.png" width="600"></div>
# 특히 데이터 정제 및 전처리 과정으로 구성된 데이터 준비와
# 최선의 모델을 찾는 과정을 상세히 소개한다.
# ## 실전 데이터 활용
# 다양한 실전 데이터를 모아놓은 데이터 저장소를
# 머신러닝 공부에 잘 활용할 수 있어야 한다.
# 가장 유명한 데이터 저장소는 다음과 같다.
#
# * [OpenML](https://www.openml.org/)
# * [캐글(Kaggle) 데이터셋](http://www.kaggle.com/datasets)
# * [페이퍼스 위드 코드](https://paperswithcode.com/)
# * [UC 얼바인(UC Irvine) 대학교 머신러닝 저장소](http://archive.ics.uci.edu/ml)
# * [아마존 AWS 데이터셋](https://registry.opendata.aws)
# * [텐서플로우 데이터셋](https://www.tensorflow.org/datasets)
#
# 여기서는 1990년 미국 캘리포니아 주에서 수집한 인구조사 데이터를 사용하며,
# 데이터의 원본은 다양한 공개 저장소에서 다운로드할 수 있다.
# ## 큰 그림 그리기
# 머신러닝으로 해결하고자 하는 문제를 파악하기 위해
# 주어진 데이터에 대한 기초적인 정보를 확인하고,
# 문제 파악 및 해결법 등을 구상해야 한다.
# ### 데이터 정보 확인
# 1990년도에 시행된 미국 캘리포니아 주의 20,640개 구역별 인구조사 데이터는
# 경도, 위도, 중간 주택 연도, 방의 총 개수, 침실 총 개수, 인구, 가구 수, 중간 소득, 중간 주택 가격, 해안 근접도
# 등 총 10개의 특성을 포함한다.
# <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/handson-ml3/master/jupyter-book/imgs/ch02/LA-USA01.png" width="600"></div>
# ### 훈련 모델 확인
# **훈련 모델 종류**
# 구역별 중간 주택 가격을 예측하는 시스템에 활용될
# **회귀 모델**을 훈련시키고자 한다.
# 훈련시킬 모델의 특성은 다음과 같다.
#
# * 지도 학습: 구역별 '중간 주택 가격'을 레이블(타깃)로 지정한다.
#
# * 회귀: 가격을 예측한다. 보다 세분화하면 다중 회귀이자 단변량 회귀 모델이다.
# * 다중 회귀<font size="2">multiple regression</font>: 구역별로 여러 특성을 주택 가격 예측에 사용한다.
# * 단변량 회귀<font size="2">univariate regression</font>: 구역별로 한 종류의 값만 예측한다.
#
# * 배치 학습: 빠르게 변하는 데이터에 적응할 필요가 없으며, 데이터셋의 크기도 충분히 작다.
# :::{admonition} 다변량 회귀
# :class: info
#
# 다변량 회귀<font size="2">multivariate regression</font>는 여러 종류의 값을 동시에 예측한다.
# :::
# **훈련 모델 성능 측정 지표**
# 회귀 모델의 성능은 일반적으로 예측값과 타깃 사이의 오차를 활용하는 아래
# 두 평가하는 지표 중 하나를 사용한다.
#
# * 평균 제곱근 오차(RMSE)
# * 평균 절대 오차(MAE)
# **평균 제곱근 오차**<font size="2">Root Mean Square Error</font>(RMSE)는
# 예측값과 타깃 사이의 오차의 제곱의 평균값이다.
# **유클리디안 노름** 또는 **$\ell_2$ 노름**으로 불린다.
# $$\text{RMSE}(\mathbf X, h) = \sqrt{\frac 1 m \sum_{i=1}^{m} (h(\mathbf x^{(i)}) - y^{(i)})^2}$$
# 위 수식에 사용된 기호의 의미는 다음과 같다.
#
# * $\mathbf X$: 훈련셋 전체 샘플들의 특성값들로 구성된 행렬, 레이블(타깃) 제외.
# * $m$: $\mathbf X$의 행의 수. 즉, 훈련셋 크기.
# * $\mathbf x^{(i)}$: $i$ 번째 샘플의 특성값 벡터. 레이블(타깃) 제외.
# * $y^{(i)}$: $i$ 번째 샘플의 레이블(타깃)
# * $h$: 예측 함수
# * $h(\mathbf x^{(i)})$: $i$번째 샘플에 대한 예측 값. $\hat y^{(i)}$ 로 표기되기도 함.
# :::{prf:example} 훈련셋과 2D 어레이
# :label: 2d-array
#
# 모델 훈련에 사용되는 훈련셋에
# $m$ 개의 샘플이 포함되어 있고 각각의 샘플이 $n$ 개의 특성을 갖는다면
# 훈련셋은 $(m, n)$ 모양의 numpy의 2D 어레이로 지정된다.
#
# 예를 들어, $m = 5$, $n = 4$ 이면 훈련셋 $\mathbf X$는 다음과 같이
# 표현된다.
#
# ```python
# array([[-118.29, 33.91, 1416, 38372],
# [-114.30, 34.92, 2316, 41442],
# [-120.38, 35.21, 3444, 29303],
# [-122.33, 32.95, 2433, 24639],
# [-139.31, 33.33, 1873, 50736]])
# ```
#
# 각각의 $\mathbf{x}^{(i)}$는 $i$ 번째 행에 해당한다.
# 예를 들어 $\mathbf{x}^{(1)}$은 첫째 행의 1D 어레이를 가리킨다.
#
# ```python
# array([-118.29, 33.91, 1416, 38372])
# ```
#
# 단변량 회귀에서 $y^{(i)}$ 는 보통 부동소수점을 가리키며,
# 다변량 회귀에서는 $\mathbf{x}^{(i)}$ 처럼
# 여러 개의 타깃 값으로 구성된 1D 어레이로 표현된다.
# :::
# **평균 절대 오차**<font size="2">Mean Absolute Error</font>(MAE)는
# **맨해튼 노름** 또는 **$\ell_1$ 노름**으로도 불리며
# 예측값과 타깃 사이의 오차의 평균값이다.
#
# $$\text{MAE}(\mathbf X, h) = \frac 1 m \sum_{i=1}^{m} \mid h(\mathbf x^{(i)}) - y^{(i)} \mid$$
#
# 훈련셋에 이상치가 많이 포함된 경우 주로 사용되지만,
# 그렇지 않다면 일반적으로 RMSE가 선호된다.
# ## 데이터 다운로드 및 적재
# 캐리포니아 주택가격 데이터셋은 매우 유명하여 많은 공개 저장소에서 다운로드할 수 있다.
# 여기서는 깃허브 리포지토리에 압축파일로 저장한 파일을 다운로드해서 사용하며
# `housing` 변수가 가리키도록 적재되었다고 가정한다.
#
# ```python
# housing = load_housing_data()
# ```
# ### 데이터셋 기본 정보 확인
# pandas의 데이터프레임으로 데이터셋을 적재하여 기본적인 데이터 구조를 훑어볼 수 있다.
# **`head()` 메서드 활용**
#
# 데이터프레임 객체의 처음 5개 샘플을 보여준다.
# <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/handson-ml3/master/jupyter-book/imgs/ch02/homl02-05.png" width="600"></div>
# **`info()` 메서드 활용**
#
# 데이터셋의 정보를 요약해서 보여준다.
#
# * 구역 수: 20,640개. 한 구역의 인구는 600에서 3,000명 사이.
# * 구역별로 경도, 위도, 중간 주택 연도, 해안 근접도 등 총 10개의 조사 항목
# * '해안 근접도'는 범주형 특성이고 나머지는 수치형 특성.
# * '방의 총 개수'의 경우 207개의 null 값, 즉 결측치 존재.
# <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/handson-ml3/master/jupyter-book/imgs/ch02/homl02-05a.png" width="350"></div>
# **범주형 특성 탐색**
#
# '해안 근접도'는 5개의 범주로 구분된다.
#
# | 특성값 | 설명 |
# | --- | --- |
# | <1H OCEAN | 해안에서 1시간 이내 |
# | INLAND | 내륙 |
# | NEAR OCEAN | 해안 근처 |
# | NEAR BAY | 샌프란시스코의 Bay Area 구역 |
# | ISLAND | 섬 |
# **수치형 특성 탐색**
#
# `describe()` 메서드는 수치형 특성들의 정보를 요약해서 보여준다.
# <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/handson-ml3/master/jupyter-book/imgs/ch02/housing-describe.png"></div>
# `hist()` 메서드는 수치형 특성별 히스토그램을 그린다.
# 히스토그램을 통해 각 특성별 데이터셋의 다양한 정보를 확인할 수 있다.
#
# - 각 특성마다 사용되는 단위와 스케일(척도)가 다르다.
# - 일부 특성은 한쪽으로 치우쳐저 있다.
# - 일부 특성은 값을 제한한 것으로 보인다.
# <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/handson-ml3/master/jupyter-book/imgs/ch02/feature-histogram.png" width="600px"></div>
# ### 훈련셋과 테스트셋
# 모델 학습 시작 이전에 준비된 데이터셋을 **훈련셋**과 **테스트셋**으로 구분해야 한다.
# 테스트셋은 훈련 과정중에 전혀 사용되지 않으며 보통 전체 데이터셋의 20% 정도 이하로
# 선택하며, 전체 데이터셋의 크기에 따라 테스트셋의 크기가 너무 크지 않게
# 비율을 적절히 조절한다.
#
# 테스트셋에 대한 정보는 절대로 모델 훈련에 이용하지 않는다.
# 만약 이용하게 되면 미래에 실전에서 사용되는 데이터를 미리 안다고 가정하고 모델을 훈련시키는
# 것과 동일하게 되어 매우 잘못된 모델을 훈련시킬 위험을 키우게 된다.
#
# 데이터셋을 훈련셋과 데이터셋으로 구분할 때 보통 층화표집을 사용한다.
# **층화표집**
#
# **층화표집**<font size="2">stratified sampling</font>은 각 계층별로 적절한 샘플을 추측하는 기법이다.
# 이유는 계층별로 충분한 크기의 샘플이 포함되도록 지정해야 학습 과정에서 편향이 발생하지 않는다.
# 예를 들어, 특정 소득 구간에 포함된 샘플이 과하게 적거나 많으면 해당 계층의 중요도가
# 과소 혹은 과대 평가될 수 있다.
#
# 캘리포니아 데이터셋의 중간 소득을 대상으로하는 히스토그램을 보면
# 대부분 구역의 중간 소득이 1.5~6.0, 즉 15,000 달러에서 60,000 달러 사이인 것을 알 수 있다.
# <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/handson-ml3/master/jupyter-book/imgs/ch02/homl02-08.png" width="400"></div>
# 소득 구간을 아래 숫자들을 기준으로 5개로 구분한 다음에 층화표집을 이용하여
# 훈련셋과 테스트셋을 구분할 수 있다.
#
# | 구간 | 범위 |
# | :---: | :--- |
# | 1 | 0 ~ 1.5 |
# | 2 | 1.5 ~ 3.0 |
# | 3 | 3.0 ~ 4.5 |
# | 4 | 4.5 ~ 6.0 |
# | 5 | 6.0 ~ |
# 5 개의 구간으로 구분한 결과는 다음과 같다.
# <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/handson-ml3/master/jupyter-book/imgs/ch02/homl02-08a.png" width="400"></div>
# 무작위 추출 방식과는 달리 계층별 샘플의 비율을 거의 동일하게 유지함을 확인할 수 있다.
# | | 전체 | 층화표집 | 무작위 추출 | 층화표집 오류율 | 무작위 추출 오류율 |
# | :--- | :--- | :--- | :--- | :--- | :--- |
# | 1 | 3.98 | 4.00 | 4.24 | 0.36 | 6.45 |
# | 2 | 31.88 | 31.88 | 30.74 | -0.02 | -3.59 |
# | 3 | 35.06 | 35.05 | 34.52 | -0.01 | -1.53 |
# | 4 | 17.63 | 17.64 | 18.41 | 0.03 | 4.42 |
# | 5 | 11.44 | 11.43 | 12.09 | -0.08 | 5.63 |
# ## 데이터 탐색과 시각화
# 테스트셋을 제외한 훈련셋에 대해서 시각화를 이용하여 데이터셋을 탐색한다.
# ### 지리적 데이터 시각화
# 경도와 위도 정보를 이용하여 구역을 산포도로 나타내면 인구의 밀집 정도를 확인할 수 있다.
# 예를 들어, 샌프란시스코의 Bay Area, LA, 샌디에고 등 유명 대도시의 특정 구역이 높은 인구 밀도를 갖는다.
# <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/handson-ml3/master/jupyter-book/imgs/ch02/homl02-09.png" width="500"></div>
# 인구 밀도가 높은 유명 대도시의 특정 구역에 위치한
# 주택 가격이 높다는 일반적인 사실 또한 산포도록 확인할 수 있다.
# 산포도를 그릴 때 해당 구역의 중간 주택 가격을 색상으로,
# 인구밀도는 원의 크기로 활용한 결과는 다음과 같다.
# <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/handson-ml3/master/jupyter-book/imgs/ch02/homl02-11.png" width="500"></div>
# ### 상관관계 조사
# 중간 주택 가격 특성과 다른 특성 사이의 선형 상관관계를 나타내는 상관계수는 다음과 같다.
# ```python
# median_house_value 1.000000
# median_income 0.688380
# rooms_per_house 0.143663
# total_rooms 0.137455
# housing_median_age 0.102175
# households 0.071426
# total_bedrooms 0.054635
# population -0.020153
# people_per_house -0.038224
# longitude -0.050859
# latitude -0.139584
# bedrooms_ratio -0.256397
# ```
# **상관계수의 특징**
#
# 상관계수는 -1에서 1 사이의 값으로 표현된다.
#
# * 1에 가까울 수록: 강한 양의 선형 상관관계
# * -1에 가까울 수록: 강한 음의 선형 상관관계
# * 0에 가까울 수록: 매우 약한 선형 상관관계
# :::{admonition} 상관계수와 상관관계
# :class: warning
#
# 상관계수가 0이라는 것은 선형 상관관계가 없다는 의미이지 서로 아무런 상관관계가 없다는 말이 아니다.
# 또한 선형계수가 1이라 하더라도 두 특성이 1대 1로 의존한다는 의미도 아님을
# 아래 그림이 잘 보여준다.
#
# <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/handson-ml3/master/jupyter-book/imgs/ch02/homl02-14.png" width="400"></div>
#
# <그림 출처: [위키백과](https://en.wikipedia.org/wiki/Pearson_correlation_coefficient)>
# :::
# 중간 주택 가격과 중간 소득의 상관계수가 0.68로 가장 높다.
# 이는 중간 소득이 올라가면 중간 주택 가격도 상승하는 경향이 있음을 의미한다.
# 하지만 아래 산점도의 점들이 너무 넓게 퍼져 있어서 완벽한 선형관계와는 거리가 멀다.
# <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/handson-ml3/master/jupyter-book/imgs/ch02/homl02-13.png" width="400"></div>
# 위 산점도를 볼 때 다음 사항들에 주의해야 한다.
#
# * 50만 달러에서 보이는 수평선은 가격을 제한한 결과로 보여진다.
# * 45만, 35만, 28만, 그 아래 정도에서도 수평선이 존재하는데 이유는 알려지지 않았다.
# * 이처럼 이상한 성질을 모델이 형태를 학습하지 못하도록 해당 구역을 제거하는 것이
# 일반적으로 좋다. 하지만 여기서는 그대로 두고 사용한다.
# 경우에 따라 기존의 특성을 조합해서 새로운 특성을 활용할 수도 있다.
# 예를 들어 구역별 방의 총 개수와 침실의 총 개수 대신 아래 특성이 보다 유용해 보인다.
#
# * 가구당 방 개수(`rooms_for_house`)
# * 방 하나당 침실 개수(`bedrooms_ratio`)
# * 가구당 인원(`people_perhouse`)
#
# 실제로 세 특성을 새로 추가한 다음에 상관계수를 확인하면
# 방 하나당 침실 개수와 중간 주택 가격 사이의 선형 상관관계가
# 중간 소득을 제외한 기존의 다른 특성들에 비해 높게 나타난다.
# ## 데이터 준비
# 머신러닝 모델 훈련에 사용되는 알고리즘을 이용하려면
# 적재된 데이터셋을 데이터 정제와 전처리 과정을 수행해서
# 바로 모델 훈련에 사용될 수 있도록 해야 한다.
# 또한 모든 과정을 자동화할 수 있어야 한다.
#
# 정제와 전처리 모든 과정을
# __파이프라인__<font size="2">pipeline</font>으로
# 자동화해서 언제든지 재활용하는 방식을 상세히 설명한다.
# **입력 데이터셋과 타깃 데이터셋**
#
# 층화표집으로 얻어진 훈련셋 `strat_train_set` 을
# 다시 입력 데이터셋 과 타깃 데이터셋으로 구분한다.
#
# * 입력 데이터셋: 중간 주택 가격 특성이 제거된 훈련셋
#
# ```python
# housing = strat_train_set.drop("median_house_value", axis=1)
# ```
# * 타깃 데이터셋: 중간 주택 가격 특성으로만 구성된 훈련셋
#
# ```python
# housing_labels = strat_train_set["median_house_value"].copy()
# ```
# ### 데이터 정제와 전처리
# 데이터 정제는 결측치 처리, 이상치 및 노이즈 데이터 제거 등을 의미한다.
# 캘리포니아 주택 가격 데이터셋은 구역별 방 총 개수(`total_rooms`) 특성에서
# 결측치가 일부 포함되어 있지만 이상치 또는 노이즈 데이터는 없다.
# 전처리는 수치형 특성과 범주형 특성을 구분하여 수행한다.
#
# * 수치형 특성에 대한 전처리
# * 특성 스케일링
# * 특성 조합
#
# * 범주형 특성 전처리 과정
# * 원-핫-인코딩
# 데이터 정제와 전처리의 모든 과정은 데이터셋에 포함된 샘플을 한꺼번에 변환한다.
# 따라서 모든 변환 과정을 자동화는
# __파이프라인__<font size="2">pipeline</font> 기법을 활용할 수 있어야 한다.
# **사이킷런 API 활용**
# 사이킷런<font size="2">Scikit-Learn</font>에서 제공하는
# 머신러닝 관련 API를 활용하여 데이터 준비 과정을 자동화하는 파이프라인을 쉽게 구현할 수 있다.
# 파이프라인 구성이 간단한 이유는 사이킷런의 API를 간단하게 합성할 수 있기 때문이다.
# 이점을 이해하려면 먼저 사이킷런이 제공하는 API의 유형을 구분해야 한다.
#
# 사이킷런의 API는 크게 세 종류의 클래스로 나뉜다.
# * 추정기<font size="2">estimator</font>
# * 인자로 주어진 데이터셋 객체 관련된 특정 값 계산
# * `fit()` 메서드: 계산된 값을 저장한 데이터셋 객체 반환
# * 변환기<font size="2">transformer</font>
# * `fit()` 메서드 이외에 `fit()` 가 계산한 값을 이용하여 데이터셋을 변환하는 `transform()` 메서드 지원.
# * `fit()` 메서드와 `transform()` 메서드를 연속해서 호출하는 `fit_transform()` 메서드 지원.
# * 예측기<font size="2">predictor</font>
# * `fit()` 메서드 이외에 `fit()` 가 계산한 값을 이용하여
# 타깃을 예측하는 `predict()` 메서드 지원.
# * `predict()` 메서드가 예측한 값의 성능을 측정하는 `score()` 메서드 지원.
# * 일부 예측기는 예측값의 신뢰도를 평가하는 기능도 제공
# :::{admonition} 변환기 사용법
# :class: warning
#
# `fit()` 과 `fit_transform()` 두 메서드는 훈련셋에 대해서만 적용한다.
# 반면에 테스트셋, 검증셋, 새로운 데이터 등에 대해서는 `transform()` 메서드만 적용한다.
# 즉, 훈련셋을 대상으로 계산된 파라미터를 이용하여
# 훈련 이외의 경우에 `transform()` 메서드를 이용하여 데이터를 변환한다.
# :::
# 사이킷런의 API는 적절한
# **하이퍼파라미터**<font size="2">hyperparameter</font>로 초기화되어 있으며
# 예측 및 변환 관정에 필요한 모든 파라미터를 효율적으로 관리한다.
# :::{admonition} 하이퍼파라미터 vs. 파라미터
# :class: info
#
# 사이킷런 API의 하이퍼파라미터는 해당 객체를 생성할 때 사용되는 값을 가리킨다.
# 즉, API 객체를 생성하기 위해 해당 API 클래스의 생성자인
# `__init__()` 메서드를 호출할 때 사용되는 인자를 가리킨다.
#
# 반면에 파라미터는 지정된 API 객체의 `fit()` 메서드가 훈련되는 모델과
# 관련해서 계산하는 값을 가리킨다.
# 추정기, 변환기, 예측기는 각각의 역할에 맞는 파라미터를 계산한다.
# :::
# ### 데이터 정제
# 입력 데이터셋의 `total_bedrooms` 특성에 207개 구역이 null 값으로 채워져 있다.
# 즉, 일부 구역에 대한 방의 총 개수 정보가 누락되었다.
# <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/handson-ml3/master/jupyter-book/imgs/ch02/null-value01.png" width="800"></div>
# 머신러닝 모델은 결측치가 있는 데이터셋을 잘 활용하지 못한다.
# 따라서 아래 옵션 중 하나를 선택해서 데이터를 정제해야 한다.
#
# * 옵션 1: 해당 구역 제거
# * 옵션 2: 해당 특성 삭제
# * 옵션 3: 평균값, 중앙값, 0, 주변에 위치한 값 등 특정 값으로 채우기.
# 여기서는 중앙값으로 채우는 옵션 3 방식을 사이킷런의 `SimpleImputer` 변환기를 이용하여 적용한다.
# <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/handson-ml3/master/jupyter-book/imgs/ch02/null-value02.png" width="800"></div>
# ### 범주형 특성 다루기: 원-핫 인코딩
# 해안 근접도(`ocean_proximity`)는 수가 아닌 5 개의 범주를 나타내는 텍스트를 값으로 사용한다.
# 그런데 머신러닝 모델은 일반적으로 텍스트 데이터를 처리하지 못한다.
# 가장 단순한 해결책으로 5 개의 범주를 정수로 변환할 수 있다.
#
# | 범주 | 숫자 |
# |---|---|
# | <1H OCEAN | 0 |
# | INLAND | 1 |
# | ISLAND | 2 |
# | NEAR BAY | 3 |
# | NEAR OCEAN | 4 |
#
# 하지만 이 방식은 수의 크기 특성을 모델이 활용할 수 있기에 위험하다.
# 예를 들어 바닷가 근처(`NEAR OCEAN`)에 위치한 주택이 가장 비쌀 것으로 모델이 학습할 수 있다.
# 범주형 특성을 수치화하는 가장 일반적인 방식은
# **원-핫 인코딩**<font size="2">one-hot encoding</font>이다.
# 원-핫 인코딩은 수치화된 범주들 사이의 크기 비교를 피하기 위해
# 더미<font size="2">dummy</font> 특성을 활용한다.
#
# 원-핫 인코딩을 적용하면 해안 근접도 특성을 삭제하고 대신 다섯 개의 범주 전부를
# 새로운 특성으로 추가한다.
# 또한 다섯 개의 특성에 사용되는 값은 다음 방식으로 지정된다.
#
# * 해당 카테고리의 특성값: 1
# * 나머지 카테고리의 특성값: 0
#
# 예를 들어, `INLAND`를 해안 근접도 특성값으로 갖던 샘플은 다음 모양의 특성값을 갖게 된다.
#
# ```python
# [0, 1, 0, 0, 0]
# ```
# 사이킷런의 `OneHotEncoder` 변환기가 원-핫-인코딩을 지원하며
# 해안 근접도를 변환한 결과는 아래 모양을 갖는다.
# ```python
# array([[0., 0., 0., 1., 0.],
# [1., 0., 0., 0., 0.],
# [0., 1., 0., 0., 0.],
# ...,
# [0., 0., 0., 0., 1.],
# [1., 0., 0., 0., 0.],
# [0., 0., 0., 0., 1.]])
# ```
# ### 특성 스케일링
# 머신러닝 알고리즘은 입력 데이터셋의 특성값들의
# **스케일**<font size="2">scale</font>(척도)이 다르면 제대로 작동하지 않는다.
# 따라서 모든 특성의 척도를 통일하는 **스케일링**<font size="2">scaling</font>이 요구된다.
# 스케일링은 보통 아래 두 가지 방식을 사용한다.
#
# - min-max 스케일링
# - 표준화
# **min-max 스케일링**
#
# **정규화**(normalization)라고도 불리며
# 아래 식을 이용하여 모든 특성값을 0에서 1 사이의 값으로 변환한다.
# 단, $max$ 와 $min$ 은 각각 특성값들의 최댓값과 최솟값을 가리킨다.
#
# $$
# \frac{x-min}{max-min}
# $$
#
# min-max 스케일링은 이상치에 매우 민감하다.
# 예를 들어 이상치가 매우 크면 분모가 분자에 비해 훨씬 크게 되어 변환된 값이 0 근처에 몰리게 된다.
# 사이킷런의 `MinMaxScaler` 변환기가 min-max 스케일링을 지원한다.
# **표준화(standardization)**
#
# 아래식을 이용하여 특성값을 변환한다.
# 단, $\mu$ 와 $\sigma$ 는 각각 특성값들의 평균값과 표준편차를 가리킨다.
#
# $$
# \frac{x-\mu}{\sigma}
# $$
#
# 변환된 데이터셋은 **표준정규분포**를 따르며, 이상치에 상대적으로 덜 영향을 받는다.
# 여기서는 사이킷런의 `StandardScaler` 변환기를 이용하여 표준화를 적용한다.
# :::{admonition} 타깃 데이터셋 전처리
# :class: info
#
# 데이터 준비는 기본적으로 입력 데이터셋만을 대상으로 **정제**<font size="2">cleaning</font>와
# **전처리**<font size="2">preprocessing</font> 단계로 실행된다.
# 타깃 데이터셋은 결측치가 없는 경우라면 일반적으로 정제와 전처리 대상이 아니지만
# 경우에 따라 변환이 요구될 수 있다.
# 예를 들어, 타깃 데이터셋의 두터운 꼬리 분포를 따르는 경우
# 로그 함수를 적용하여 데이터의 분포가 보다 균형잡히도록 하는 것이 권장된다.
# 하지만 이런 경우 예측값을 계산할 때 원래의 척도로 되돌려야 하며
# 이를 위해 대부분의 사이킷런 변환기가 지원하는 `inverse_transorm()` 메서드를 활용할 수 있다.
# :::
# ### 사용자 정의 변환기
# 데이터 준비 과정에서 경우에 따라 사용자가 직접 변환기를 구현해야할 필요가 있다.
# #### `FunctionTransformer` 변환기
# `fit()` 메서드를 먼저 사용하지 않고 `transform()` 메서드를 바로 적용해도 되는
# 변환기는 `FunctionTransformer` 객체를 활용하여 생성할 수 있다.
# **로그 함수 적용 변환기**
#
# 데이터셋이 두터운 꼬리 분포를 따르는 경우,
# 즉 히스토그램이 지나치게 한쪽으로 편향된 경우
# 스케일링을 적용하기 전에 먼저
# 로그 함수를 적용하여 어느 정도 좌우 균형이 잡힌 분포로 변환하는 게 좋다.
# 아래 그림은 인구에 로그함수를 적용할 때 분포가 보다 균형잡히는 것을 잘 보여준다.
#
# <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/handson-ml3/master/jupyter-book/imgs/ch02/homl02-log_app.jpg" width="600"></div>
# 두터운 꼬리 분포를 갖는 데이터셋에 로그 함수를 적용하고자 하면 아래 변환기를 사용하면 된다.
#
# ```python
# FunctionTransformer(np.log, inverse_func=np.exp)
# ```
# **비율 계산 변환기**
#
# 두 개의 특성 사이의 비율을 계산하여 새로운 특성을 생성하는 변환기 또한
# `FunctionTransformer`를 활용할 수 있다.
#
# ```python
# FunctionTransformer(lambda X: X[:, [0]] / X[:, [1]])
# ```
# 비율 계산 변환기를 이용하여 아래 특성을 새롭게 생성할 수 있다.
#
# - 가구당 방 개수(rooms for household)
# - 방 하나당 침실 개수(bedrooms for room)
# - 가구당 인원(population per household)
# #### 사용자 정의 변환 클래스
# `SimpleImputer` 변환기의 경우처럼
# 먼저 `fit()` 메서드를 이용하여 평균값, 중앙값 등을 확인한 다음에
# `transform()` 메서드를 적용할 수 있는 변환기는 클래스를 직접 선언해야 한다.
# 이때 사이킷런의 다른 변환기와 호환이 되도록 하기 위해
# `fit()` 과 `transform()` 등 다양한 메서드를 모두 구현해야 한다.
#
# 예를 들어, 캘리포니아 주 2만 여개의 구역을 서로 가깝게 위치한 구역들의 군집으로 구분하는 변환기는
# 다음과 같다. 단, 아래 코드를 지금 이해할 필요는 없다.
# ```python
# class ClusterSimilarity(BaseEstimator, TransformerMixin):
# def __init__(self, n_clusters=10, gamma=1.0, random_state=None):
# self.n_clusters = n_clusters
# self.gamma = gamma
# self.random_state = random_state
#
# def fit(self, X, y=None, sample_weight=None):
# self.kmeans_ = KMeans(self.n_clusters, random_state=self.random_state)
# self.kmeans_.fit(X, sample_weight=sample_weight)
# return self # 항상 self 반환
#
# def transform(self, X):
# return rbf_kernel(X, self.kmeans_.cluster_centers_, gamma=self.gamma)
#
# def get_feature_names_out(self, names=None):
# return [f"Cluster {i} similarity" for i in range(self.n_clusters)]
# ```
# :::{admonition} `KMeans` 모델과 `rbf_kernel()` 함수
# :class: info
#
# 위 클래스는 `KMeans` 모델과 `rbf_kernel()` 함수를 활용한다.
#
# **`KMeans` 모델**
#
# {numref}`%s장 <ch:unsupervisedLearning>` 비지도 학습에서 다룰 군집 알고리즘 모델이다.
#
# **`rbf_kernel()` 함수**
#
# 다음 가우시안 RBF 함수를 활용한다.
# $\mathbf{p}$ 는 특정 지점을 가리키며,
# $\mathbf{p}$ 에서 조금만 멀어져도 함숫값이 급격히 작아진다.
#
#
# $$
# \phi(\mathbf{x},\mathbf{p}) = \exp \left( -\gamma \|\mathbf{x} - \mathbf{p} \|^2 \right)
# $$
#
# 예를 들어 아래 이미지는 중간 주택 년수가 35년에서 멀어질 수록
# 함숫값이 급격히 0에 가까워지는 것을 보여준다.
# 하이퍼파라미터인 **감마**($\gamma$, gamma)는 얼마나 빠르게 감소하도록 하는가를 결정한다.
# 즉, 감마 값이 클 수록 보다 좁은 종 모양의 그래프가 그려진다.
#
# <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/handson-ml3/master/jupyter-book/imgs/ch02/homl02-rbf_kernel.jpg" width="400"></div>
# :::
# `ClusterSimilarity` 변환기를 이용하여 얻어진 군집 특성을 이용하면
# 아래 그림과 같은 결과를 얻을 수 있다.
#
# - 모든 구역을 10개의 군집으로 나눈다.
# - ╳ 는 각 군집의 중심 구역을 나타낸다.
# <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/handson-ml3/master/jupyter-book/imgs/ch02/homl02-cluster.jpg" width="550"></div>
# ### 변환 파이프라인
# 모든 전처리 단계가 정확한 순서대로 진행되어야 한다.
# 이를 위해 사이킷런의 `Pipeline` 클래스를 이용하여 여러 변환기를 순서대로
# 실행하는 파이프라인 변환기를 활용한다.
# **`Pipeline` 클래스**
#
# 예를 들어, 수치형 특성을 대상으로 결측치를 중앙값으로 채우는 정제와
# 표준화 스케일링을 연속적으로 실행하는 파이프라인은 다음과 같이 정의한다.
#
# ```python
# num_pipeline = Pipeline([("impute", SimpleImputer(strategy="median")),
# ("standardize", StandardScaler())])
# ```
#
# * `Pipeline` 객체를 생성할 때 사용되는 인자는 이름과 추정기로 이루어진 쌍들의 리스트이다.
#
# * 마지막 추정기를 제외한 나머지 추정기는 모두 변환기다.
# 즉, 마지막 추정기는 `fit()` 메서드만 지원해도 되지만
# 나머지는 `fit_transform()` 메서드가 지원되는 변환기어야 한다.
#
# * `num_pipeline.fit()` 를 호출하면
# 마지막 변환기 까지는 `fit_transform()` 메소드가 연속적으로 호출되고
# 마지막 변환기의 `fit()` 메서드 최종 호출된다.
#
# * 파이프라인으로 정의된 추정기의 유형은 마지막 추정기의 유형과 동일하다.
# 따라서 `num_pipeline` 은 변환기다.
# **`make_pipeline()` 함수**
#
# 파이프라인에 포함되는 변환기의 이름이 중요하지 않다면 `make_pipeline()` 함수를 이용하여
# `Pipeline` 객체를 생성할 수 있다. 이름은 자동으로 지정된다.
#
# 위 파이프라인과 동일한 파이프라인 객체를 다음과 같이 생성할 수 있다.
#
# ```python
# make_pipeline(SimpleImputer(strategy="median"),
# StandardScaler())
# ```
# **`ColumnTransformer` 클래스**
#
# `ColumnTransformer` 클래스는 특성별로 전처리를 지정할 수 있다.
# 이 기능을 이용하여 수치형 특성과 범주형 특성을 구분해서
# 전처리하는 통합 파이프라인을 다음과 같이 구성할 수 있다.
#
# * 수치형 특성: `num_pipeline` 변환기
# * 범주형 특성: `OneHotEncoder` 변환기
#
# ```python
# num_attribs = ["longitude", "latitude", "housing_median_age", "total_rooms",
# "total_bedrooms", "population", "households", "median_income"]
# cat_attribs = ["ocean_proximity"]
#
# cat_pipeline = make_pipeline(
# SimpleImputer(strategy="most_frequent"),
# OneHotEncoder(handle_unknown="ignore"))
#
# preprocessing = ColumnTransformer([
# ("num", num_pipeline, num_attribs),
# ("cat", cat_pipeline, cat_attribs),
# ])
# ```
# **`make_column_selector()` 함수**
#
# 파이프라인에 포함되는 각 변환기를 적용할 특성을 일일이 나열하는 일이 어려울 수 있다.
# 이때 지정된 자료형을 사용하는 특성들만을 뽑아주는 `make_column_selector()` 함수를
# 유용하게 활용할 수 있다.
#
# 위 `preprocessing` 변환기를 아래와 같이 정의할 수 있다.
#
# ```python
# preprocessing = ColumnTransformer([
# ("num", num_pipeline, make_column_selector(dtype_include=np.number)),
# ("cat", cat_pipeline, make_column_selector(dtype_include=object)
# ])
# ```
# **`make_column_transformer()` 함수**
#
# `ColumnTransformer` 파이프라인에 포함되는 변환기의 이름이 중요하지 않다면
# `make_column_transformer()` 함수를 이용할 수 있으며,
# `make_pipeline()` 함수와 유사하게 작동한다.
#
# 위 `preprocessing` 변환기를 아래와 같이 정의할 수 있다.
#
# ```python
# preprocessing = make_column_transformer(
# (num_pipeline, make_column_selector(dtype_include=np.number)),
# (cat_pipeline, make_column_selector(dtype_include=object)),
# )
# ```
# ### 캘리포니아 데이터셋 변환 파이프라인
# 다음 변환기를 모아 캘리포니아 데이터셋 전용 변환 파이프라인을 생성할 수 있다.
# **(1) 비율 변환기**
#
# 가구당 방 개수, 방 하나당 침실 개수, 가구당 인원 등
# 비율을 사용하는 특성을 새로 추가할 때 사용되는 변화기를 생성하는 함수를 정의한다.
# ```python
# def column_ratio(X):
# return X[:, [0]] / X[:, [1]]
#
# def ratio_pipeline(name=None):
# return make_pipeline(
# SimpleImputer(strategy="median"),
# FunctionTransformer(column_ratio,
# feature_names_out=[name]),
# StandardScaler())
# ```
# **(2) 로그 변환기**
#
# 데이터 분포가 두터운 꼬리를 갖는 특성을 대상으로 로그 함수를 적용하는 변환기를 지정한다.
# ```python
# log_pipeline = make_pipeline(SimpleImputer(strategy="median"),
# FunctionTransformer(np.log),
# StandardScaler())
# ```
# **(3) 군집 변환기**
#
# 구역의 위도와 경도를 이용하여 구역들의 군집 정보를 새로운 특성으로 추가하는 변환기를 지정한다.
# ```python
# cluster_simil = ClusterSimilarity(n_clusters=10, gamma=1., random_state=42)
# ```
# **(4) 기타**
#
# 특별한 변환이 필요 없는 경우에도 기본적으로 결측치 문제 해결과 스케일을 조정하는 변환기를 사용한다.
# ```python
# default_num_pipeline = make_pipeline(SimpleImputer(strategy="median"),
# StandardScaler())
# ```
# 앞서 언급된 모든 변환기를 특성별로 알아서 처리하는 변환기는 다음과 같다.
# `remainder=default_num_pipeline`: 언급되지 않은 특성을 처리하는 변환기를 지정한다.
# 삭제를 의미하는 `drop` 이 기본값이며 이외에 `passthrough` 는 변환하지 않는 것을 의미한다.
# ```python
# preprocessing = ColumnTransformer([
# ("bedrooms_ratio", ratio_pipeline("bedrooms_ratio"), # 방당 침실 수
# ["total_bedrooms", "total_rooms"]),
# ("rooms_per_house", ratio_pipeline("rooms_per_house"), # 가구당 방 수
# ["total_rooms", "households"]),
# ("people_per_house", ratio_pipeline("people_per_house"), # 가구당 인원
# ["population", "households"]),
# ("log", log_pipeline, ["total_bedrooms", "total_rooms", # 로그 변환
# "population", "households", "median_income"]),
# ("geo", cluster_simil, ["latitude", "longitude"]), # 구역별 군집 정보
# ("cat", cat_pipeline, make_column_selector(dtype_include=object)), # 범주형 특성 전처리
# ],
# remainder=default_num_pipeline) # 중간 주택 년수(housing_median_age) 대상
# ```
# ## 모델 선택과 훈련
# 훈련셋 준비가 완료된 상황에서 모델을 선택하고 훈련시키는 일이 남아 있다.
#
# 사이킷런이 제공하는 예측기 모델을 사용하면 훈련은 기본적으로 간단하게 진행된다.
# 여기서는 사이킷런이 제공하는 다양한 모델의 사용법과 차이점을 간단하게 살펴본다.
# 각 모델의 자세한 특징과 상세 설명은 앞으로 차차 이루어질 것이다.
# :::{admonition} 전처리 포함 파이프라인 모델
# :class: info
#
# 소개되는 모든 모델은 앞서 설명한 전처리 과정과 함께 하나의 파이프라인으로 묶여서 정의된다.
# 이는 테스트셋과 미래의 모든 입력 데이터셋에 대해서도 전처리를 별도로 신경쓸 필요가 없게 해준다.
# :::
# ### 훈련셋 대상 훈련 및 평가
# **선형 회귀 모델 ({numref}`%s장 <ch:trainingModels>`)**
# * 훈련 및 예측
#
# ```python
# lin_reg = make_pipeline(preprocessing, LinearRegression())
# lin_reg.fit(housing, housing_labels)
# lin_reg.predict(housing)
# ```
#
# - RMSE(평균 제곱근 오차)
#
# ```python
# lin_rmse = mean_squared_error(housing_labels, housing_predictions,
# squared=False)
# ```
# - 훈련 결과
# - RMSE(`lin_rmse`)가 68687.89 정도로 별로 좋지 않다.
# - 훈련된 모델이 훈련셋에 __과소적합__ 되었다.
# - 보다 좋은 특성을 찾거나 더 강력한 모델을 적용해야 한다.
# **결정트리 회귀 모델 ({numref}`%s장 <ch:decisionTrees>`)**
# 결정트리 회귀 모델은 데이터에서 복잡한 비선형 관계를 학습할 때 사용한다.
#
# * 훈련 및 예측
#
# ```python
# tree_reg = make_pipeline(preprocessing, DecisionTreeRegressor(random_state=42))
# tree_reg.fit(housing, housing_labels)
# housing_predictions = tree_reg.predict(housing)
#
# tree_rmse = mean_squared_error(housing_labels, housing_predictions,
# squared=False)
# ```
# - 훈련 결과
# - RMSE(`tree_rmse`)가 0으로 완벽해 보인다.
# - 모델이 훈련셋에 심각하게 __과대적합__ 되었음을 의미한다.
# - 실전 상황에서 RMSE가 0이 되는 것은 불가능하다.
# - 테스트셋에 대한 RMSE는 매우 높게 나온다.
# ### 교차 검증
# __교차 검증__<font size="2">cross validation</font>을 이용하여
# 훈련중인 모델의 성능을 평가할 수 있다.
# **k-겹 교차 검증**
#
# * 훈련셋을 __폴드__(fold)라 불리는 k-개의 부분 집합으로 무작위로 분할한다.
# * 모델을 총 k 번 훈련한다.
# * 매 훈련마나다 하나의 폴드를 선택하여 검증 데이터셋 지정.
# * 나머지 (k-1) 개의 폴드를 대상으로 훈련
# * 매 훈련이 끝날 때마다 선택된 검증 데이터셋을 이용하여 모델 평가
# * 매번 다른 폴드 활용
# * 최종평가는 k-번 평가 결과의 평균값을 활용한다.
#
# <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/handson-ml3/master/jupyter-book/imgs/ch02/cross-val10a.png" width="550"></div>
# **사이킷런의 `cross_val_score()` 함수**
#
# `cross_val_score()` 함수는 k-겹 교차 검증 과정에서
# 훈련중인 모델의 성능을 측정한다.
#
# 측정값은 높을 수록 좋은 성능으로 평가되기에 회귀 모델의 경우
# 일반적으로 RMSE의 음숫값을 사용한다.
# 이를 위해 `scoring="neg_mean_squared_error"` 키워드 인자를 사용한다.
#
# 아래 코드는 10 개의 폴드를 사용(`cv=10`)하여 결정트리 회귀 모델에 대한 교차 검증을 진행하고 평가한다.
#
# ```python
# tree_rmses = -cross_val_score(tree_reg, housing, housing_labels,
# scoring="neg_root_mean_squared_error", cv=10)
# ```
# :::{admonition} `scoring` 키워드 인자
# :class: info
#
# 교차 검증에 사용되는 모델의 종류에 따라 다양한 방식으로 모델의 성능을 측정할 수 있으며
# `scoring` 키워드 인자를 이용하여 지정한다.
# 현재 사용 가능한 옵션값은 [사이킷런의 Metrics and Scoring 문서](https://scikit-learn.org/stable/modules/model_evaluation.html)에서
# 확인할 수 있다.
# :::
# **랜덤 포레스트 회귀 모델 ({numref}`%s장 <ch:ensemble>`)**
#
# **랜덤 포레스트**<font size="2">random forest</font> 회귀 모델은
# 여러 개의 결정트리를 동시에 훈련시킨 후
# 각 모델의 예측값의 평균값 등을 이용하는 모델이다.
# 각 모델은 교차 검증처럼 서로 다른 훈련셋을 대상으로 학습한다.
#
# 사이킷런의 `RandomForestRegressor` 모델은 기본값으로 100개의 결정트리를 동시에 훈련시킨다.
#
# ```python
# forest_reg = make_pipeline(preprocessing,
# RandomForestRegressor(n_estimators=100, random_state=42))
# ```
#
# 래덤 포레스트 모델에 대한 교차 검증을 적용하면 폴드 수에 비례하여 훈련 시간이 더 오래 걸린다.
#
# ```python
# forest_rmses = -cross_val_score(forest_reg, housing, housing_labels,
# scoring="neg_root_mean_squared_error", cv=10)
# ```
# ## 모델 튜닝
# 지금까지 살펴 본 모델 중에서 랜덤 포레스트 회귀 모델의 성능이 가장 좋았다.
# 이렇게 가능성이 높은 모델을 찾은 다음엔 모델의 세부 설정(하이퍼파라미터)을 조정하거나
# 성능이 좋은 모델 여러 개를 이용하여 모델의 성능을 최대한 끌어올릴 수 있다.
#
# 모델 튜닝은 보통 다음 두 가지 방식을 사용한다.
#
# * 그리드 탐색
# * 랜덤 탐색
# ### 그리드 탐색
# 지정된 하이퍼파라미터의 모든 조합에 대해 교차 검증을 진행하여 최선의 하이퍼파라미터 조합을 찾는다.
# **`GridSearchCV` 클래스**
#
# 랜덤 포레스트 모델을 대상으로 그리드 탐색을 다음과 같이 실행하면
# 총 (3x3 + 2x3 = 15) 가지의 모델의 성능을 확인한다.
# 또한 3-겹 교차 검증(`cv=3`)을 진행하기에 모델 훈련을 총 45(=15x3)번 진행한다.
#
# ```python
# full_pipeline = Pipeline([
# ("preprocessing", preprocessing),
# ("random_forest", RandomForestRegressor(random_state=42)),
# ])
#
# param_grid = [
# {'preprocessing__geo__n_clusters': [5, 8, 10],
# 'random_forest__max_features': [4, 6, 8]},
# {'preprocessing__geo__n_clusters': [10, 15],
# 'random_forest__max_features': [6, 8, 10]},
# ]
#
# grid_search = GridSearchCV(full_pipeline, param_grid, cv=3,
# scoring='neg_root_mean_squared_error')
#
# grid_search.fit(housing, housing_labels)
# ```
# ### 랜덤 탐색
# 그리드 탐색은 적은 수의 조합을 실험해볼 때만 유용하다.
# 반면에 하이퍼파라미터의 탐색 공간이 크면 랜덤 탐색이 보다 유용하다.
# 랜덤 탐색은 하이퍼라라미터 조합을 임의로 지정된 횟수만큼 진행한다.
# **`RandomizedSearchCV` 클래스**
#
# 아래 코드는 다음 두 하이퍼파라미터를 대상으로
# 10번(`n_iter=10`) 지정된 구간 내에서 무작위 선택을 진행한다.
#
# - `preprocessing__geo__n_clusters`
# - `random_forest__max_features`
#
# 또한 3-겹 교차검증(`cv=3`)을 진행하기에 모델 훈련을 총 30(=10x30)번 진행한다.
#
# ```python
# param_distribs = {'preprocessing__geo__n_clusters': randint(low=3, high=50),
# 'random_forest__max_features': randint(low=2, high=20)}
#
# rnd_search = RandomizedSearchCV(
# full_pipeline, param_distributions=param_distribs, n_iter=10, cv=3,
# scoring='neg_root_mean_squared_error', random_state=42)
#
# rnd_search.fit(housing, housing_labels)
# ```
# ### 앙상블 기법
# 결정트리 모델 하나보다 랜덤 포레스트처럼 여러 모델을 활용하는 모델이
# 일반적으로 보다 좋은 성능을 낸다.
# 이처럼 좋은 성능을 내는 여러 모델을 **함께**<font size="2">ensemble</font>
# 학습시킨 후 평균값을 사용하면 보다 좋은 성능을 내는 모델을 얻게 된다.
# 앙상블 기법에 대해서는 {numref}`%s장 <ch:ensemble>`에서 자세히 다룬다.
# ### 훈련된 최선의 모델 활용
# 그리드 탐색 또는 랜덤 탐색을 통해 얻어진 최선의 모델을 분석해서 문제에 대한 통찰을 얻을 수 있다.
#
# 예를 들어, 최선의 랜덤 포레스트 모델로부터 타깃 예측에 사용된 특성들의 상대적 중요도를 확인하여
# 중요하지 않은 특성을 제외할 수 있다.
#
# 캘리포니아 주택 가격 예측 모델의 경우 랜덤 탐색을 통해 찾아낸 최선의 모델에서
# `feature_importances_`를 확인하면 다음 정보를 얻는다.
#
# - `log__median_income` 특성이 가장 중요하다.
# - 해안 근접도 특성 중에서 `INLAND` 특성만 중요하다.
# ```python
# final_model = rnd_search.best_estimator_ # 최선 모델
# feature_importances = final_model["random_forest"].feature_importances_ # 특성뱔 상대적 중요도
#
# # 중요도 내림차순 정렬
# sorted(zip(feature_importances,
# final_model["preprocessing"].get_feature_names_out()),
# reverse=True)
#
# [(0.18694559869103852, 'log__median_income'),
# (0.0748194905715524, 'cat__ocean_proximity_INLAND'),
# (0.06926417748515576, 'bedrooms_ratio__bedrooms_ratio'),
# (0.05446998753775219, 'rooms_per_house__rooms_per_house'),
# (0.05262301809680712, 'people_per_house__people_per_house'),
# (0.03819415873915732, 'geo__Cluster 0 similarity'),
# [...]
# (0.00015061247730531558, 'cat__ocean_proximity_NEAR BAY'),
# (7.301686597099842e-05, 'cat__ocean_proximity_ISLAND')]
# ```
# ## 최선 모델 저장 및 활용
# 완성된 모델은 항상 저장해두어야 한다.
# 업데이트된 모델이 적절하지 않은 경우 이전 모델로 되돌려야 할 수도 있기 때문이다.
# 모델의 저장과 불러오기는 `joblib` 모듈을 활용한다.
#
# - 저장하기
#
# ```python
# joblib.dump(final_model, "my_california_housing_model.pkl")
# ```
# - 불러오기
#
# ```python
# final_model_reloaded = joblib.load("my_california_housing_model.pkl")
# ```
# ## 연습문제
# 참고: [(실습) 머신러닝 프로젝트 처음부터 끝까지 1부](https://colab.research.google.com/github/codingalzi/handson-ml3/blob/master/practices/practice_end2end_ml_project_1.ipynb) 와
# [(실습) 머신러닝 프로젝트 처음부터 끝까지 2부](https://colab.research.google.com/github/codingalzi/handson-ml3/blob/master/practices/practice_end2end_ml_project_2.ipynb)
|
#!/usr/bin/env python
import argparse
import numpy as np
import pandas as pd
import scipy as sp
import scipy.stats as st
import sys
MECHANISMS = ['PSI', 'SSI', 'SEL']
METRICS = ['EXECUTION_PHASE_TIME',
'TOTAL_RUN_TIME',
'MAXIMUM_ROBOT_DISTANCE',
'DELIBERATION_TIME',
'MEAN_MSG_TIME',
'MECHANISM_SELECTION_TIME']
metric_valid_threshold = 10000
OUTPUT_FILE = 'grouped_stats.csv'
def print_grouped_stats(stats_csv):
try:
stats = pd.read_csv(stats_csv)
except:
print("Couldn't open/parse {0}! Exiting.".format(stats_csv))
sys.exit(1)
group_stats = stats[stats.MECHANISM.isin(MECHANISMS)]
# 'Successful' group stats, i.e. a start+task configuration for which runs
# with all mechanisms in MECHANISMS succeeded
mech_group_stats = pd.DataFrame()
mission_id = 1000000
for name, group in group_stats.groupby('SCENARIO_ID'):
# print name
# print len(group)
group_valid = True
if len(group) < len(MECHANISMS):
print("At least one (mechanism) run for this scenario ({0}) failed, skipping...".format(name))
group_valid = False
continue
rows = [group.iloc[idx] for idx in range(len(MECHANISMS))]
for metric in METRICS:
for row in rows:
if row[metric] > metric_valid_threshold:
print("{0} has an unusually high value, skipping this mission...".format(metric))
group_valid = False
break
if not group_valid:
break
if not group_valid:
continue
for row in rows:
row['MISSION_ID'] = mission_id
mech_group_stats = mech_group_stats.append(rows)
mission_id += 1
print("{0} groups of {1}".format(len(mech_group_stats)/len(MECHANISMS), len(MECHANISMS)))
mech_group_stats.to_csv(OUTPUT_FILE, index=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Group statistics by mission (starting and task locations).')
parser.add_argument('stats_csv',
help='CSV file containing experiment statistics.')
args = parser.parse_args()
stats_csv = args.stats_csv
print_grouped_stats(stats_csv)
|
import sys
from setuptools import setup
entries = {
'console_scripts': [
'pdistcc=pdistcc.cli:main',
'pdistccd=pdistcc.cli:server_main',
]
}
packages = [
'pdistcc',
'pdistcc.compiler',
]
install_requires=['uhashring==2.0']
setup(
name='pdistcc',
packages=packages,
entry_points=entries,
install_requires=install_requires,
)
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.ENTSOE.Dynamics.IEC61970.Core.CorePowerSystemResource import CorePowerSystemResource
class TurbineGovernorsGovSteam0(CorePowerSystemResource):
def __init__(self, t2=0.0, t3=0.0, t1=0.0, vmax=0.0, dt=0.0, r=0.0, vmin=0.0, mwbase=0.0, *args, **kw_args):
"""Initialises a new 'TurbineGovernorsGovSteam0' instance.
@param t2:
@param t3:
@param t1:
@param vmax:
@param dt:
@param r:
@param vmin:
@param mwbase:
"""
self.t2 = t2
self.t3 = t3
self.t1 = t1
self.vmax = vmax
self.dt = dt
self.r = r
self.vmin = vmin
self.mwbase = mwbase
super(TurbineGovernorsGovSteam0, self).__init__(*args, **kw_args)
_attrs = ["t2", "t3", "t1", "vmax", "dt", "r", "vmin", "mwbase"]
_attr_types = {"t2": float, "t3": float, "t1": float, "vmax": float, "dt": float, "r": float, "vmin": float, "mwbase": float}
_defaults = {"t2": 0.0, "t3": 0.0, "t1": 0.0, "vmax": 0.0, "dt": 0.0, "r": 0.0, "vmin": 0.0, "mwbase": 0.0}
_enums = {}
_refs = []
_many_refs = []
|
# -*- coding: utf-8 -*-
import json, re, torch, os
import pandas as pd
from text2image import txt2im
from tqdm import tqdm
from sklearn.model_selection import train_test_split
import numpy as np
from gensim.models import KeyedVectors
input_dir = '../input/'
resource_dir = '../resource/'
def load_corpus():
waimai = pd.read_csv(input_dir + "text_set/waimai_10k.csv")
ChnSentiCorp_htl_all = pd.read_csv(input_dir + "text_set/ChnSentiCorp_htl_all.csv")
review = waimai.review.drop_duplicates().tolist() + ChnSentiCorp_htl_all.review.drop_duplicates().tolist()
review = [x for x in review if str(x) != 'nan']
return review
line_pattern = re.compile((u""
+ u"(%(prefix)s+\S%(postfix)s+)" # 标点
+ u"|(%(prefix)s*\w+%(postfix)s*)" # 单词
+ u"|(%(prefix)s+\S)|(\S%(postfix)s+)" # 标点
+ u"|(\d+%%)" # 百分数
) % {
"prefix": u"['\(<\[\{‘“(《「『]",
"postfix": u"[:'\)>\]\}:’”)》」』,;\.\?!,、;。?!]",
})
def split_line(line, max_len=64):
lst = ''
lines = []
while line:
ro = line_pattern.match(line)
end = 1 if not ro else ro.end()
if len(lst) + len(line[:end]) < max_len:
lst += line[:end]
else:
lines.append(lst)
lst = line[:end][:max_len]
line = line[end:]
if lst != '':
lines.append(lst)
return lines
def get_vocab(review):
vocab = {'[PAD]': 0, '[BOS]': 1, '[EOS]': 2, '[CLS]': 3, '[SEP]': 4, '[UNK]': 5}
for line in review:
for char in line:
if char not in vocab:
vocab[char] = len(vocab)
return vocab
def save_w2v():
vocab = json.load(open(input_dir + 'vocab.json', 'r'))
model = KeyedVectors.load_word2vec_format(resource_dir + 'sgns.merge.char')
w2v = []
vocab_inverse = {vocab[k]:k for k in vocab}
zeros = np.zeros((300), dtype=np.float32)
for i in range(4000):
word = vocab_inverse.get(i, 0)
if word in model:
w2v.append(model.get_vector(word))
else:
w2v.append(zeros)
w2v = torch.from_numpy(np.asarray(w2v, dtype=np.float32))
torch.save(w2v, input_dir + 'word2vec.torch')
def main():
new_text1 = "蚂蚁准备上市前,杭州一栋大楼的员工都沸腾了。好多员工要变成千万富翁,基本无心工作。"
new_text2 = "蚂蚁暂缓上市后,员工失望至极,退订豪车豪宅,据说杭州房价连夜调降15%。"
review = load_corpus()
review += [new_text1, new_text2]
if not os.path.exists(input_dir + 'vocab.json'):
vocab = get_vocab(review)
json.dump(vocab, open(input_dir + 'vocab.json', 'w'), ensure_ascii=False)
else:
vocab = json.load(open(input_dir + 'vocab.json', 'r'))
count = 0
for lines in tqdm(review):
for text in split_line(lines):
if text != '':
txt2im(text, "image_%d.png" % count, neighbor=5)
count += 1
txt2im(new_text1, "image_%d.png" % count, neighbor=5)
count += 1
txt2im(new_text2, "image_%d.png" % count, neighbor=5)
def train_w2v():
import gensim
class SentenceIterator:
def __init__(self):
self.filepath = input_dir + 'meta_data.jsonl'
def __iter__(self):
for line in open(self.filepath, 'r'):
yield [i for i in orjson.loads(line.strip())['text']]
sentences = SentenceIterator()
w2v_model = gensim.models.Word2Vec(sentences, size=32, window=20, min_count=1, workers=6, iter=10, sg=1, negative=20, sample=1e-3)
w2v_model.save(input_dir + 'review.w2v')
vocab = json.load(open(input_dir + 'vocab.json', 'r'))
w2v = []
vocab_inverse = {vocab[k]:k for k in vocab}
zeros = np.zeros((32), dtype=np.float32)
for i in range(4000):
word = vocab_inverse.get(i, '[PAD]')
if word in w2v_model:
w2v.append(w2v_model.wv.get_vector(word))
else:
w2v.append(zeros)
w2v = torch.from_numpy(np.asarray(w2v, dtype=np.float32))
torch.save(w2v, input_dir + 'review.w2v.torch')
if __name__ == '__main__':
main()
#train_w2v()
save_w2v()
|
# -*- coding: utf-8 -*-
import numpy as np
import dezero
from dezero import cuda, utils
from dezero.core import Function, Variable, as_variable, as_array
from dezero.functions_conv import conv2d
from dezero.functions_conv import deconv2d
from dezero.functions_conv import conv2d_simple
from dezero.functions_conv import im2col
from dezero.functions_conv import col2im
from dezero.functions_conv import pooling_simple
from dezero.functions_conv import pooling
from dezero.functions_conv import average_pooling
from dezero.core import add
from dezero.core import sub
from dezero.core import rsub
from dezero.core import mul
from dezero.core import div
from dezero.core import neg
from dezero.core import pow
class Sin(Function):
def forward(self, x):
xp = cuda.get_array_module(x)
y = xp.sin(x)
return y
def backward(self, gy):
x, = self.inputs
gx = gy * cos(x)
return gx
def sin(x):
f = Sin()
return f(x)
class Cos(Function):
def forward(self, x):
xp = cuda.get_array_module(x)
y = xp.cos(x)
return y
def backward(self, gy):
x, = self.inputs
gx = gy * -sin(x)
return gx
def cos(x):
f = Cos()
return f(x)
class Tanh(Function):
def forward(self, x):
xp = cuda.get_array_module(x)
y = xp.tanh(x)
return y
def backward(self, gy):
y = self.outputs[0]()
gx = gy * (1 - y*y)
return gx
def tanh(x):
f = Tanh()
return f(x)
class Sum(Function):
def __init__(self, axis, keepdims):
self.axis = axis
self.keepdims = keepdims
def forward(self, x):
self.x_shape = x.shape
y = x.sum(axis=self.axis, keepdims=self.keepdims)
return y
def backward(self, gy):
gy = utils.reshape_sum_backward(gy, self.x_shape, self.axis,
self.keepdims)
gx = broadcast_to(gy, self.x_shape)
return gx
def sum(x, axis=None, keepdims=False):
return Sum(axis, keepdims)(x)
def reshape_sum_backward(gy, x_shape, axis, keepdims):
"""Reshape gradient appropriately for dezero.functions.sum's backward.
Args:
gy (dezero.Variable): Gradient variable from the output by backprop.
x_shape (tuple): Shape used at sum function's forward.
axis (None or int or tuple of ints): Axis used at sum function's
forward.
keepdims (bool): Keepdims used at sum function's forward.
Returns:
dezero.Variable: Gradient variable which is reshaped appropriately
"""
ndim = len(x_shape)
tupled_axis = axis
if axis is None:
tupled_axis = None
elif not isinstance(axis, tuple):
tupled_axis = (axis,)
if not (ndim == 0 or tupled_axis is None or keepdims):
actual_axis = [a if a >= 0 else a + ndim for a in tupled_axis]
shape = list(gy.shape)
for a in sorted(actual_axis):
shape.insert(a, 1)
else:
shape = gy.shape
gy = gy.reshape(shape) # reshape
return gy
class Reshape(Function):
def __init__(self, shape):
self.shape = shape
def forward(self, x):
self.x_shape = x.shape
y = x.reshape(self.shape)
return y
def backward(self, gy):
return reshape(gy, self.x_shape)
def reshape(x, shape):
if x.shape == shape:
return as_variable(x)
return Reshape(shape)(x)
class Transpose(Function):
def forward(self, x):
xp = cuda.get_array_module(x)
y = xp.transpose(x)
return y
def backward(self, gy):
gx = transpose(gy)
return gx
def transpose(x):
f = Transpose()
return f(x)
class GetItem(Function):
def __init__(self, slices):
self.slices = slices
def forward(self, x):
y = x[self.slices]
return y
def backward(self, gy):
x, = self.inputs
f = GetItemGrad(self.slices, x.shape)
return f(gy)
class GetItemGrad(Function):
def __init__(self, slices, in_shape):
self.slices = slices
self.in_shape = in_shape
def forward(self, gy):
xp = dezero.cuda.get_array_module(gy)
gx = xp.zeros(self.in_shape, dtype=gy.dtype)
if xp is np:
np.add.at(gx, self.slices, gy)
else:
xp.scatter_add(gx, self.slices, gy)
return gx
def backward(self, ggx):
return get_item(ggx, self.slices)
class Sum(Function):
def __init__(self, axis, keepdims):
self.axis = axis
self.keepdims = keepdims
def forward(self, x):
self.x_shape = x.shape
y = x.sum(axis=self.axis, keepdims=self.keepdims)
return y
def backward(self, gy):
gy = utils.reshape_sum_backward(gy, self.x_shape, self.axis,\
self.keepdims)
gx = broadcast_to(gy, self.x_shape)
return gx
def sum(x, axis=None, keepdims=False):
f = Sum(axis, keepdims)
return f(x)
class BroadcastTo(Function):
def __init__(self, shape):
self.shape = shape
def forward(self, x):
self.x_shape = x.shape
xp = cuda.get_array_module(x)
y = xp.broadcast_to(x, self.shape)
return y
def backward(self, gy):
gx = sum_to(gy, self.x_shape)
return gx
def broadcast_to(x, shape):
if x.shape == shape:
return as_variable(x)
f = BroadcastTo(shape)
return f(x)
class SumTo(Function):
def __init__(self, shape):
self.shape = shape
def forward(self, x):
self.x_shape = x.shape
y = utils.sum_to(x, self.shape)
return y
def backward(self, gy):
gx = broadcast_to(gy, self.x_shape)
return gx
def sum_to(x, shape):
if x.shape == shape:
return as_variable(x)
f = SumTo(shape)
return f(x)
class MatMul(Function):
def forward(self, x, W):
y = x.dot(W)
return y
def backward(self, gy):
x, W = self.inputs
gx = matmul(gy, W.T)
gW = matmul(x.T, gy)
return gx, gW
def matmul(x, W):
f = MatMul()
return f(x, W)
class MeanSquaredError(Function):
def forward(self, x0, x1):
diff = x0 - x1
y = (diff ** 2).sum() / len(diff)
return y
def backward(self, gy):
x0, x1 = self.inputs
diff = x0 - x1
gx0 = gy * diff * (2. / len(diff))
gx1 = -gx0
return gx0, gx1
def mean_squared_error(x0, x1):
f = MeanSquaredError()
return f(x0, x1)
class Linear(Function):
def forward(self, x, W, b):
y = x.dot(W)
if b is not None:
y += b
return y
def backward(self, gy):
x, W, b = self.inputs
gb = None if b.data is None else sum_to(gy, b.shape)
gx = matmul(gy, W.T)
gW = matmul(x.T, gy)
return gx, gW, gb
def linear(x, W, b=None):
return Linear()(x, W, b)
def linear_simple(x, W, b=None):
t = matmul(x, W)
if b is None:
return t
y = t + b
t.data = None # t 의 데이터 삭제
return y
class Sigmoid(Function):
def forward(self, x):
xp = cuda.get_array_module(x)
y = 1 / (1 + xp.exp(-x))
y = xp.tanh(x * 0.5) * 0.5 + 0.5 # Better implementation
return y
def backward(self, gy):
y = self.outputs[0]()
gx = gy * y * (1 - y)
return gx
def sigmoid(x):
return Sigmoid()(x)
def sigmoid_simple(x):
x = as_variable(x)
y = 1 / (1 + exp(-x))
return y
def softmax1d(x):
x = as_variable(x)
y = Function.exp(x)
sum_y = Function.sum(y)
return y / sum_y
def softmax_simple(x, axis=1):
x = as_variable(x)
y = exp(x)
sum_y = sum(y, axis=axis, keepdims=True)
return y / sum_y
class SoftmaxCrossEntropy(Function):
def forward(self, x, t):
xp = cuda.get_array_module(x)
N = x.shape[0]
log_z = utils.logsumexp(x, axis=1)
log_p = x - log_z
log_p = log_p[xp.arange(N), t.ravel()]
y = -log_p.sum() / xp.float32(N)
return y
def backward(self, gy):
x, t = self.inputs
N, CLS_NUM = x.shape
gy *= 1/N
y = softmax(x)
# convert to one-hot
xp = cuda.get_array_module(t.data)
t_onehot = xp.eye(CLS_NUM, dtype=t.dtype)[t.data]
y = (y - t_onehot) * gy
return y
def softmax_cross_entropy(x, t):
return SoftmaxCrossEntropy()(x, t)
class Softmax(Function):
def __init__(self, axis=1):
self.axis = axis
def forward(self, x):
xp = cuda.get_array_module(x)
y = x - x.max(axis=self.axis, keepdims=True)
y = xp.exp(y)
y /= y.sum(axis=self.axis, keepdims=True)
return y
def backward(self, gy):
y = self.outputs[0]()
gx = y * gy
sumdx = gx.sum(axis=self.axis, keepdims=True)
gx -= y * sumdx
return gx
def softmax(x, axis=1):
return Softmax(axis)(x)
def get_item(x, slices):
f = GetItem(slices)
return f(x)
class Clip(Function):
def __init__(self, x_min, x_max):
self.x_min = x_min
self.x_max = x_max
def forward(self, x):
xp = cuda.get_array_module(x)
y = xp.clip(x, self.x_min, self.x_max)
return y
def backward(self, gy):
x, = self.inputs
mask = (x.data >= self.x_min) * (x.data <= self.x_max)
gx = gy * mask
return gx
def clip(x, x_min, x_max):
return Clip(x_min, x_max)(x)
def softmax_cross_entropy_simple(x, t):
xp = cuda.get_array_module(x)
x, t = as_variable(x), as_variable(t)
N = x.shape[0]
p = softmax(x)
p = clip(p, 1e-15, 1.0)
log_p = log(p)
tlog_p = log_p[xp.arange(N), t.data]
y = -1 * sum(tlog_p) / N
return y
def accuracy(y, t):
y, t = as_variable(y), as_variable(t)
pred = y.data.argmax(axis=1).reshape(t.shape)
result = (pred == t.data)
acc = result.mean()
return Variable(as_array(acc))
class ReLU(Function):
def forward(self, x):
xp = cuda.get_array_module(x)
y = xp.maximum(x, 0.0)
return y
def backward(self, gy):
x, = self.inputs
mask = x.data > 0
gx = gy * mask
return gx
def relu(x):
return ReLU()(x)
def dropout(x, dropout_ratio=0.5):
x = as_variable(x)
if dezero.Config.train:
xp = cuda.get_array_module(x)
mask = xp.random.rand(*x.shape) > dropout_ratio
scale = xp.array(1.0 - dropout_ratio).astype(x.dtype)
y = x * mask / scale
return y
else:
return x
|
import argparse
import subprocess
from dtran.dcat.api import DCatAPI
from funcs.readers.dcat_read_func import DATA_CATALOG_DOWNLOAD_DIR
import os
import shutil
from datetime import datetime
from datetime import timedelta
from pathlib import Path
from typing import Optional, Dict
import re
import xarray as xr
from netCDF4 import Dataset
from dtran import IFunc, ArgType
from dtran.ifunc import IFuncType
from dtran.metadata import Metadata
from zipfile import ZipFile
import logging, sys
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
class Gldas2CyclesFuncNew(IFunc):
id = "topoflow4_climate_write_func"
description = """ A reader-transformation-writer multi-adapter.
Creates Cycles input (weather and soil file zip) from GLDAS NetCDF (climate) files & Soil files.
"""
inputs = {
"gldas_dataset_id": ArgType.String,
"soil_dataset_id": ArgType.String,
"gldas_elevation_file_path": ArgType.String,
"start_date": ArgType.String,
"end_date": ArgType.String,
"batch_numdays": ArgType.Number,
"bounding_box": ArgType.String,
"output_path": ArgType.FilePath,
"output_prefix": ArgType.String
}
outputs = {"output_files": ArgType.FilePath}
friendly_name: str = "Gldas2CyclesNew"
func_type = IFuncType.MODEL_TRANS
example = {
"gldas_dataset_id": "5babae3f-c468-4e01-862e-8b201468e3b5",
"soil_dataset_id": "ac34f01b-1484-4403-98ea-3a380838cab1",
"gldas_elevation_file_path": "/tmp/GLDASp4_elevation_025d.nc4",
"start_date": "2000-01-01",
"end_date": "2018-01-31",
"batch_numdays": 14,
"bounding_box": "21.533203125, -5.353521355337321, 51.943359375, 22.67484735118852",
"output_path": "/tmp/output",
"output_prefix": "output_prefix"
}
def __init__(
self,
gldas_dataset_id,
soil_dataset_id,
gldas_elevation_file_path,
start_date,
end_date,
batch_numdays,
bounding_box,
output_path,
output_prefix
):
self.gldas_dataset_id = gldas_dataset_id
self.soil_dataset_id = soil_dataset_id
self.gldas_elevation_file_path = gldas_elevation_file_path
self.output_path = output_path
self.output_prefix = output_prefix
self.bounding_box = bounding_box
self.end_date = end_date
self.start_date = start_date
self.batch_numdays = batch_numdays
def validate(self) -> bool:
return True
def exec(self) -> dict:
output_file = gldas2cycles(
self.gldas_dataset_id,
self.soil_dataset_id,
self.gldas_elevation_file_path,
self.start_date,
self.end_date,
self.batch_numdays,
self.bounding_box,
self.output_path,
self.output_prefix
)
return {"output_files": output_file}
def change_metadata(
self, metadata: Optional[Dict[str, Metadata]]
) -> Dict[str, Metadata]:
return metadata
def convert_to_cycles_input(ds):
"""
Resample GLDAS data for a location by 24 hours(1day), and convert to Cycles input
"""
# Calculate RH variable values
logging.debug("Reading variables from dataset..")
(_prcp, _temp, _wind, _solar, _rh) = read_variables_from_dataset(ds)
logging.debug("Finished reading variables from dataset..")
logging.debug("Start resampling...")
# Try group_by (time.dayofyear)
# - dataarray
# Resample/Group by 1 Day - Some variables are grouped by averaging, others by max/min
prcp_daily = _prcp.resample(time="1D")
temp_daily = _temp.resample(time="1D")
solar_daily = _solar.resample(time="1D")
rh_daily = _rh.resample(time="1D")
wind_daily = _wind.resample(time="1D")
prcp = prcp_daily.mean().rename("PP")
tx = temp_daily.max().rename("TX")
tn = temp_daily.min().rename("TN")
solar = solar_daily.mean().rename("SOLAR")
rhx = rh_daily.max().rename("RHX")
rhn = rh_daily.min().rename("RHN")
wind = wind_daily.mean().rename("WIND")
logging.debug("Finished resampling...")
logging.debug("Doing unit conversions...")
# Some unit conversions
prcp *= 86400.0
solar *= 86400.0 / 1.0e6
rhx *= 100.0
rhn *= 100.0
tx -= 273.15
tn -= 273.15
logging.debug("Finished unit conversions...")
# Get Year and Day of Year
year = prcp.time.dt.year.rename("YEAR")
doy = prcp.time.dt.dayofyear.rename("DOY")
logging.debug("Merge variables...")
# Create a dataset with all the required variables
cycles_weather_ds = xr.merge([year, doy, prcp, tx, tn, solar, rhx, rhn, wind])
cycles_weather_ds = cycles_weather_ds.reset_coords(names=["lat", "lon"], drop=True)
logging.debug("Finished merging variables...")
return cycles_weather_ds
def create_rh(nc):
"""
Calculate RH (Relative Humidity) value from GLDAS data
"""
_temp = nc["Tair_f_inst"]
_pres = nc["Psurf_f_inst"]
_spfh = nc["Qair_f_inst"]
es = 611.2 * xr.ufuncs.exp(17.67 * (_temp - 273.15) / (_temp - 273.15 + 243.5))
ws = 0.622 * es / (_pres - es)
w = _spfh / (1.0 - _spfh)
nc["rh"] = w / ws
nc["rh"].clip(max=1.0)
return nc
def read_variables_from_dataset(nc):
"""
Read/Calculate relevant variables from GLDAS dataset
"""
_prcp = nc["Rainf_f_tavg"]
_temp = nc["Tair_f_inst"]
_wind = nc["Wind_f_inst"]
_solar = nc["SWdown_f_tavg"]
create_rh(nc)
_rh = nc["rh"]
return _prcp, _temp, _wind, _solar, _rh
def get_geometry(bboxstr):
if bboxstr is None:
return None
coords = re.split(r"\s*,\s*", bboxstr)
if coords is None or len(coords) != 4:
return None
x1 = float(coords[0])
y1 = float(coords[1])
x2 = float(coords[2])
y2 = float(coords[3])
return {
"type": "Polygon",
"coordinates": [[[ x1, y1 ], [ x2, y1 ], [ x2, y2 ], [ x1, y2 ], [ x1, y1 ]]]
}
def load_gldas_dataset(gldas_files):
"""
Load GLDAS files using XArray
"""
if gldas_files is not None and len(gldas_files) > 0:
# Open a sample gldas file and get all variables to remove from the load (to make the loading faster)
first_file = gldas_files[0]
d1 = xr.open_dataset(first_file)
varnames = list(d1.data_vars.keys())
varnames.remove('Rainf_f_tavg')
varnames.remove('Tair_f_inst')
varnames.remove('Wind_f_inst')
varnames.remove('SWdown_f_tavg')
varnames.remove('Psurf_f_inst')
varnames.remove('Qair_f_inst')
d1.close()
ds=xr.open_mfdataset(gldas_files, drop_variables=varnames, chunks='auto')
return ds
def load_gldas_elevation_dataset(gldas_elevation_file):
"""
Load GLDAS elevation dataset using XArray
"""
d1 = xr.open_dataset(gldas_elevation_file).load()
return d1
def gldas2cycles(
gldas_dataset_id,
soil_dataset_id,
gldas_elevation_file_path,
start_date,
end_date,
batch_numdays,
bounding_box,
output_path,
output_prefix
):
start_date = datetime.strptime(start_date, "%Y-%m-%d")
end_date = datetime.strptime(end_date, "%Y-%m-%d")
soil_directory = DATA_CATALOG_DOWNLOAD_DIR + "/soil"
gldas_directory = DATA_CATALOG_DOWNLOAD_DIR + "/gldas"
if not os.path.exists(soil_directory):
Path(soil_directory).mkdir(exist_ok=True, parents=True)
if not os.path.exists(gldas_directory):
Path(gldas_directory).mkdir(exist_ok=True, parents=True)
# Download Soil Datasets & Get their Lat/Long
geometry = get_geometry(bounding_box)
soil_resources = DCatAPI.get_instance().find_resources_by_dataset_id(soil_dataset_id,
start_time=start_date, end_time=end_date, geometry=geometry)
logging.info("Downloading missing soil data..")
coords = []
cnt = 0
for resource in soil_resources:
meta = resource['resource_metadata']
spatial_metadata = meta['spatial_coverage']
if spatial_metadata['type'] == 'Point':
cnt+=1
lat = float(spatial_metadata['value']['y'])
lon = float(spatial_metadata['value']['x'])
meta['resource_type'] = '.zip'
ofile = os.path.join(soil_directory, resource['resource_name'])
if not os.path.exists(ofile):
logging.debug(ofile)
#FIXME: subprocess.check_call(f"wget -q \"{resource['resource_data_url']}\" -O {ofile}", shell=True, close_fds=False)
coords.append((lat, lon, ofile, "%s-%.5f-%.5f" % (output_prefix, lat, lon)))
#####################################################################
##
## Do the GLDAS to cycles conversion in batches of N number of days
## - Get a mapping of soil points to grid points first
## - For each batch of start-date/end-date, load GLDAS and create cycles inputs
## - Write to the cycles weather files
##
#####################################################################
logging.info("Loading GLDAS elevation data")
gldas_elevation_ds = load_gldas_elevation_dataset(gldas_elevation_file_path)
logging.info("GLDAS Elevation data loaded")
num_soil_points = len(coords)
logging.info(f"Fetching nearest GLDAS grid points for {num_soil_points} Soil points...")
soil_weather_grid_points = {}
memoize = {}
for lat, lon, soil_path, fname in coords:
logging.debug("Soil point: {0}, {1}".format(lat, lon))
# Get closest GLDAS Grid point from Elevation Dataset
loc_elevation_ds = gldas_elevation_ds.sel(lat=lat, lon=lon, method="nearest")
# Get the Grid point location and elevation
grid_lat = loc_elevation_ds.lat.values
grid_lon = loc_elevation_ds.lon.values
elevation = loc_elevation_ds.GLDAS_elevation.values[0]
if grid_lat < 0.0:
lat_str = "%.2fS" % (abs(grid_lat))
else:
lat_str = "%.2fN" % (abs(grid_lat))
if grid_lon < 0.0:
lon_str = "%.2fW" % (abs(grid_lon))
else:
lon_str = "%.2fE" % (abs(grid_lon))
soil_weather_grid_points[soil_path] = (grid_lat, grid_lon, elevation, lat_str, lon_str)
# Use memoize to make sure that the same weather file isn't generated for different soil points
if (lat_str, lon_str) not in memoize:
memoize[(lat_str, lon_str)] = soil_path
logging.debug(f"Closest grid point location: {grid_lat},{grid_lon}, elevation: {elevation}")
gldas_elevation_ds.close()
num_weather_points = len(memoize)
logging.info(f"Done fetching nearest {num_weather_points} GLDAS grid points for {num_soil_points} Soil points")
cur_start_date = start_date
while cur_start_date < end_date:
cur_end_date = cur_start_date + timedelta(days = batch_numdays)
if cur_end_date > end_date:
cur_end_date = end_date
cur_end_date = cur_end_date - timedelta(minutes = 1) # So we don't get midnight file of next day
logging.info(f"Fetching GLDAS files list for dates from {cur_start_date} to {cur_end_date}")
logging.info("Downloading missing GLDAS files..")
# Download GLDAS Datasets for the time period
gldas_resources = DCatAPI.get_instance().find_resources_by_dataset_id(gldas_dataset_id, cur_start_date, cur_end_date)
gldas_files = []
for resource in gldas_resources:
temporal_metadata = resource['resource_metadata']['temporal_coverage']
gldas_date_str = temporal_metadata['start_time'].split("T")[0]
gldas_date = datetime.strptime(gldas_date_str, "%Y-%m-%d")
nc_path = "%s/%4.4d/%3.3d/" % (gldas_directory, gldas_date.timetuple().tm_year, gldas_date.timetuple().tm_yday)
ofile = os.path.join(nc_path, resource['resource_name'])
if not os.path.exists(nc_path):
Path(nc_path).mkdir(parents=True, exist_ok=True)
if not os.path.exists(ofile):
logging.debug(ofile)
subprocess.check_call(f"wget -q \"{resource['resource_data_url']}\" -O {ofile}", shell=True, close_fds=False)
if os.path.exists(ofile):
gldas_files.append(ofile)
num_weather_files = len(gldas_files)
logging.info(f"Loading GLDAS data from {num_weather_files} files..")
gldas_ds = load_gldas_dataset(gldas_files)
logging.info("Loaded GLDAS data")
logging.info(f"Converting GLDAS data to cycles weather data for {num_weather_points} points..")
for lat, lon, soil_path, fname in coords:
logging.debug("Processing data for {0}, {1}".format(lat, lon))
(grid_lat, grid_lon, elevation, lat_str, lon_str) = soil_weather_grid_points[soil_path]
# Only produce cycles weather file for this point once (here we're doing it for only 1 soil path)
# - We will later use the same weather file for creating the zip for all the soil points
# - We do it this way because it is computationally expensive to create the cycles weather file
grid_soil_path = memoize[(lat_str, lon_str)]
if grid_soil_path != soil_path:
continue
# Load GLDAS data for the exact gridpoint location
logging.debug(f"Loading GLDAS data for grid point {grid_lat}, {grid_lon}")
loc_ds = gldas_ds.sel(lat=grid_lat, lon=grid_lon, time=slice(cur_start_date, cur_end_date)).load()
logging.debug("Loaded gldas data for location")
logging.debug("Converting to Cycles input data")
# Convert to Cycles Input
loc_by_day_ds = convert_to_cycles_input(loc_ds)
logging.debug("Finished conversion to cycles input data")
logging.debug("Converting weather input data to Pandas Dataframe...")
loc_by_day_df = loc_by_day_ds.to_dataframe()
loc_by_day_df.sort_values(by=['YEAR', 'DOY'])
logging.debug("Finished converting to Dataframe")
# Write the cycles input weather file
Path(output_path).mkdir(parents=True, exist_ok=True)
common_weather_fname = f"cycles_weather_{lat_str}_{lon_str}.weather"
common_weather_file = os.path.join(output_path, common_weather_fname)
logging.debug ("Writing the cycles input file..")
# Create the output weather file if it doesn't exist
if not os.path.exists(common_weather_file):
outfp = open(common_weather_file, "w")
outfp.write("LATITUDE %.2f\n" % (grid_lat))
outfp.write("LONGITUDE %.2f\n" % (grid_lon))
outfp.write("ALTITUDE %.2f\n" % (elevation))
outfp.write("SCREENING_HEIGHT 2\n")
outfp.write("%-8s%-8s%-8s%-8s%-8s%-8s%-8s%-8s%-8s\n" % (
'YEAR', 'DOY', 'PP', 'TX', 'TN', 'SOLAR', 'RHX', 'RHN', 'WIND'
))
outfp.close()
# Append to the weather file
outfp = open(common_weather_file, "a")
for index, row in loc_by_day_df.iterrows():
outfp.write("%-8.0f%-8.0f%-8.4f%-8.2f%-8.2f%-8.4f%-8.2f%-8.2f%-8.2f\n" % (
row['YEAR'], row['DOY'],
row['PP'], row['TX'], row['TN'],
row['SOLAR'], row['RHX'], row['RHN'],
row['WIND'])
)
outfp.close()
gldas_ds.close()
cur_start_date = cur_end_date
logging.info(f"Done converting GLDAS data to cycles input weather file for {num_weather_points} points")
logging.info(f"Creating {num_soil_points} cycles input zip files, each containing a weather and a soil file...")
fnames = []
# Create the Zip file for all soil points containing the soil file and the generated weather file
for lat, lon, soil_path, fname in coords:
if not os.path.exists(soil_path):
continue
logging.debug (f"Creating Cycles zip file for {grid_lat}, {grid_lon}")
(grid_lat, grid_lon, elevation, lat_str, lon_str) = soil_weather_grid_points[soil_path]
weather_fname = fname + ".weather"
soil_fname = fname + ".soil"
zip_fname = fname + "_soil_weather.zip"
common_weather_fname = f"cycles_weather_{lat_str}_{lon_str}.weather"
tmp_soil_file = os.path.join(output_path, soil_fname)
tmp_weather_file = os.path.join(output_path, weather_fname)
common_weather_file = os.path.join(output_path, common_weather_fname)
soil_weather_file = os.path.join(output_path, zip_fname)
shutil.copyfile(soil_path, Path(tmp_soil_file))
shutil.copyfile(common_weather_file, Path(tmp_weather_file))
zipObj = ZipFile(soil_weather_file, 'w')
zipObj.write(tmp_soil_file, soil_fname)
zipObj.write(tmp_weather_file, weather_fname)
zipObj.close()
logging.debug ("Done writing cycles zip file")
fnames.append(zip_fname)
logging.info(f"Done Creating {num_soil_points} cycles input zip files")
return fnames
|
# -*- coding: utf-8 -*-
"""jcast.main: Main function."""
import os
import datetime
import logging
from functools import partial
import tqdm
from jcast import params, fates, model
from jcast.junctions import Junction, RmatsResults
from jcast.annots import ReadAnnotations, ReadGenome
from jcast.sequences import Sequence
from jcast import __version__
def runjcast(args):
"""
main look for jcast flow.
:param args: parsed arguments
:return:
"""
# Get timestamp for out files
now = datetime.datetime.now()
write_dir = os.path.join(args.out, 'jcast_' + now.strftime('%Y%m%d%H%M%S'))
os.makedirs(write_dir, exist_ok=True)
# Main logger setup
main_log = logging.getLogger('jcast')
main_log.propagate = False
main_log.setLevel(logging.INFO)
# create file handler which logs even debug messages
fh = logging.FileHandler(os.path.join(write_dir, 'jcast_main.log'))
fh.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
# add the handlers to the logger
main_log.addHandler(fh)
#
ch = logging.StreamHandler()
ch.setLevel(logging.WARNING)
ch.setFormatter(formatter)
main_log.addHandler(ch)
main_log.info(args)
main_log.info(__version__)
#
# Open the rMATS output file (MXE) here, rename the columns
#
assert os.path.exists(os.path.join(args.rmats_folder, 'MXE.MATS.JC.txt')), 'rMATS files not found, check directory.'
rmats_results = RmatsResults(rmats_dir=args.rmats_folder)
# Model read count cutoff
#
# Read the gtf file using the gtfpase package.
# Then write as a pandas data frame.
#
gtf = ReadAnnotations(args.gtf_file)
gtf.read_gtf()
#
# Read genome file into memory
#
genome = ReadGenome(args.genome)
#
# Model read count cutoff.
# TODO: move this to a separate class
#
if args.model:
main_log.info('The -m flag is set. The modeled read count will override -r --read values.')
# Make a numpy array of all junction SJC sum counts
rmats_results.get_junction_count_array()
pt, gmm, min_count = model.gaussian_mixture(sum_sjc_array=rmats_results.sum_sjc_array)
# Plot out the model
model.plot_model(sum_sjc_array=rmats_results.sum_sjc_array,
pt=pt,
gmm=gmm,
min_count=min_count,
write_dir=write_dir,
filename='model',
)
# If the m flag is not set, use the r argument value as min count
else:
min_count = args.read
#
# Main loop through every line of each of the five rMATS files to make junction object, then translate them
#
for rma in [rmats_results.rmats_mxe,
rmats_results.rmats_se,
rmats_results.rmats_ri,
rmats_results.rmats_a5ss,
rmats_results.rmats_a3ss,
]:
junctions = [Junction(**rma.iloc[i].to_dict()) for i in range(len(rma))]
translate_one_partial = partial(_translate_one,
gtf=gtf,
genome=genome,
args=args,
write_dir=write_dir,
pred_bound=min_count,
)
#
# Concurrent futures
#
# import concurrent.futures
# with concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count()-1) as pool:
# for i, f in enumerate(tqdm.tqdm(pool.map(
# translate_one_partial,
# junctions,
# ),
# total=len(junctions),
# desc='Processing {0} Junctions'.format(rma.jxn_type[0]),
# )):
# main_log.info('>>>>>> Doing {0} junction {1} for gene {2} {3}'.format(junctions[i].junction_type,
# junctions[i].name,
# junctions[i].gene_symbol,
# junctions[i].gene_id,
# ))
# main_log.info(f)
#
# Single threaded for-loop
#
for jx in tqdm.tqdm(junctions,
total=len(junctions),
desc='Processing {0} Junctions'.format(rma.jxn_type[0]),
):
main_log.info('>>>>>> Doing {0} junction {1} for gene {2} {3}'.format(jx.junction_type,
jx.name,
jx.gene_symbol,
jx.gene_id,
))
main_log.info(translate_one_partial(jx))
return True
def _translate_one(junction,
gtf,
genome,
args,
write_dir,
pred_bound,
):
""" get coordinate and translate one junction; arguments are passed through partial from main"""
#
# trim slice coordinates by translation starts and ends
#
junction.trim_cds(gtf)
#
# get translated phase from GTF. Note this should be done after trimming to get the
# right frame in case the exon in question is trimmed by the coding start
#
junction.get_translated_phase(gtf)
#
# initiate a sequence object that copies most of the junction information
#
sequence = Sequence(junction)
#
# get nucleotide sequences of all slices using genome in memory
# (anchor, alternative-1, alternative-2, downstream)
# conjoin alternative exons to make slice 1 and 2,
#
sequence.make_slice_localgenome(genome.genome)
#
# translate to peptides
#
sequence.get_canonical_aa(gtf=gtf, genome_index=genome.genome)
sequence.translate(use_phase=True)
#
# filter by junction read counts - discard junction if the min read count is below threshold
#
# If the -r argument is set directly and the -m flag is not, use the -r integer for count filtering
# If the -m flag is set, use the modeled count for filtering
if (not args.model and junction.sum_sjc <= args.read) or (args.model and junction.sum_sjc <= pred_bound):
#
# If the canonical flag is set, append the canonical
# Sp to the gene_canonical output even if none of the transcript slices are stitchable
# back to the canonical protein. This avoids not having any protein level representation
# of a gene potentially in the proteome.
#
if args.canonical:
sequence.write_canonical(outdir=write_dir)
return fates.skipped_low
#
# discard junction if the corrected P value of this read count is < threshold
# this removes junctions that are inconsistently found on both replicates.
#
q_lo, q_hi = args.qvalue
if not q_lo <= junction.fdr <= q_hi:
# Write canonical anyhow if the canonical flag is set.
if args.canonical:
sequence.write_canonical(outdir=write_dir)
return fates.skipped_low
#
# write the Tier 1 and Tier 2 results into fasta file
#
if len(sequence.slice1_aa) > 0 and len(sequence.slice2_aa) > 0:
# Tier 1: both translated without stop codon, no frameshift
if not sequence.frameshift:
# Do a function like this to extend with fasta, and then write if necessary.
# TODO: instead of using Uniprot we should get the canonical exons from the GTF directly
for slice_ in [1, 2]:
sequence.stitch_to_canonical_aa(slice_to_stitch=slice_,
slice_has_ptc=False)
sequence.write_slices(
outdir=write_dir,
suffix='T1',
)
return fates.tier1.format(sequence.j.phase,
sequence.translated_phase,
)
#
# Tier 2: both translated without stop codon, but with one frameshift
#
elif sequence.frameshift:
for slice_ in [1, 2]:
sequence.stitch_to_canonical_aa(slice_to_stitch=slice_,
slice_has_ptc=False)
# 2020-07-30 if slice runs into a frame shift,
# allows the opportunity to stitch N-terminus only
if [sequence.slice1_stitched, sequence.slice2_stitched][slice_-1] is None:
sequence.stitch_to_canonical_aa(slice_to_stitch=slice_,
slice_has_ptc=True)
sequence.write_slices(
outdir=write_dir,
suffix='T2',
)
return fates.tier2.format(sequence.j.phase,
sequence.translated_phase,
)
#
# Tier 3 - retrieved phase is different from PTC-free frame.
#
else:
sequence.translate(use_phase=False)
# after tier 3 translation, check if both slices are good
if len(sequence.slice1_aa) > 0 and len(sequence.slice2_aa) > 0:
for slice_ in [1, 2]:
sequence.stitch_to_canonical_aa(slice_to_stitch=slice_,
slice_has_ptc=False)
sequence.write_slices(outdir=write_dir,
suffix='T3',
)
return fates.tier3.format(sequence.j.phase,
sequence.translated_phase,
)
#
# Tier 4: if sequence is still not good, do Tier 4: One of the two slices hits stop codon.
# write out the slice if it is at least a certain proportion (params.ptc_threshold) as long as the long slice.
#
# translate again after tier 3 to reset to tier 1/2 translation state (using retrieved phase)
sequence.translate(use_phase=True,
log=False,
)
# TODO: we should avoid translating twice.
# force-translate through slice 2 if slice 2 hits PTC:
if len(sequence.slice1_aa) > 0 and len(sequence.slice2_aa) == 0:
forced_slice = 2
sequence.stitch_to_canonical_aa(slice_to_stitch=1)
sequence.translate_forced(slice_to_translate=forced_slice)
if len(sequence.slice2_aa) / len(sequence.slice1_aa) >= params.ptc_threshold:
sequence.stitch_to_canonical_aa(slice_to_stitch=2,
slice_has_ptc=True)
sequence.write_slices(outdir=write_dir,
suffix='T4',
)
return fates.tier4.format(forced_slice)
# force-translate through slice 1 if slice 1 hits PTC:
elif len(sequence.slice2_aa) > 0 and len(sequence.slice1_aa) == 0:
forced_slice = 1
sequence.stitch_to_canonical_aa(slice_to_stitch=2)
sequence.translate_forced(slice_to_translate=1)
if len(sequence.slice1_aa) / len(sequence.slice2_aa) >= params.ptc_threshold:
sequence.stitch_to_canonical_aa(slice_to_stitch=1,
slice_has_ptc=True)
sequence.write_slices(outdir=write_dir,
suffix='T4',
)
return fates.tier4.format(forced_slice)
#
# if nothing works, write FAILURE fate
#
else:
#
# salvage the canonical sequence in the long slice if it matches Sp exactly.
# note that this means if we identify a gene in RNA-seq, we will append the canonical
# Sp to the gene_canonical output even if none of the transcript slices are stitchable
# back to the canonical protein. This is to avoid not having any protein level representation
# of a gene potentially in the proteome.
#
if args.canonical:
sequence.write_canonical(outdir=write_dir)
return fates.fail
def main():
""" running main with parsed arguments from command line """
import argparse
import sys
parser = argparse.ArgumentParser(description='jcast retrieves transcript splice junctions'
'and translates them into amino acid sequences')
parser.add_argument('rmats_folder',
help='path to folder storing rMATS output',
)
parser.add_argument('gtf_file',
help='path to Ensembl gtf file',
)
parser.add_argument('genome',
help='path to genome file',
)
# parser.add_argument('-n', '--num_threads', help='number of threads for concurrency [default: 6]',
# default=6,
# type=int)
parser.add_argument('-o', '--out',
help='name of the output files [default: psq_out]',
default='out')
parser.add_argument('-r', '--read',
help='the lowest skipped junction read count for a junction to be translated [default: 1]',
default=1,
type=int,
)
parser.add_argument('-m', '--model',
help='models junction read count cutoff using a Gaussian mixture model [default: False]',
action='store_true',
default=False,
#type=bool,
)
parser.add_argument('-c', '--canonical', help='write out canonical protein sequence even if transcript'
'slices are untranslatable [default: False]',
default=False,
action='store_true',
# type=bool,
)
parser.add_argument('-q', '--qvalue',
help='take junctions with rMATS fdr within this threshold [default: 0 1]',
metavar=('q_lo', 'q_hi'),
nargs=2,
default=[0, 1],
type=float)
parser.set_defaults(func=runjcast)
# print help message if no arguments are given
if len(sys.argv[1:]) == 0:
parser.print_help()
parser.exit()
# parse all the arguments
args = parser.parse_args()
# run the function in the argument
args.func(args)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-02-08 15:47
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('people', '0004_rename_address_to_location'),
('accounts', '0013_rename_accountaddress_to_address'),
]
operations = [
migrations.AddField(
model_name='address',
name='person',
field=models.ForeignKey(blank=True, help_text='For personal addresses not associated with library accounts.', null=True, on_delete=django.db.models.deletion.CASCADE, to='people.Person'),
),
migrations.AlterField(
model_name='address',
name='account',
field=models.ForeignKey(blank=True, help_text='Associated library account', null=True, on_delete=django.db.models.deletion.CASCADE, to='accounts.Account'),
),
migrations.AlterField(
model_name='address',
name='care_of_person',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='care_of_addresses', to='people.Person'),
),
migrations.RenameField(
model_name='address',
old_name='address',
new_name='location',
),
migrations.AlterModelOptions(
name='address',
options={'verbose_name_plural': 'Addresses'},
),
migrations.AlterField(
model_name='account',
name='persons',
field=models.ManyToManyField(blank=True, to='people.Person', verbose_name='Account holders(s)'),
),
]
|
# -*- coding: utf-8 -*-
"""
tests
~~~~~
Public Test Suite for `DataGator`_.
.. _`DataGator`: http://www.data-gator.com/
:copyright: 2015 by `University of Denver <http://pardee.du.edu/>`_
:license: Apache 2.0, see LICENSE for more details.
:author: `LIU Yu <liuyu@opencps.net>`_
:date: 2015/01/19
"""
from __future__ import unicode_literals
import os
import sys
from importlib import import_module
from pkgutil import iter_modules
from .config import unittest, to_native
__all__ = ['test_suite', ]
__all__ = [to_native(n) for n in __all__]
def load_tests(prefix="test_"):
for _, name, _ in iter_modules([
os.path.join(".", os.path.dirname(__file__))]):
if not name.startswith(prefix):
continue
mod = import_module(".{0}".format(name), __package__)
if not callable(getattr(mod, "test_suite", None)):
continue
yield mod
raise StopIteration
def test_suite():
suite = unittest.TestSuite()
for pkg in load_tests("test_"):
suite.addTest(pkg.test_suite())
return suite
|
'''
Advent of Code - 2019
--- Day 7: Amplification Circuit ---
'''
from utils import *
from intcode import IntcodeRunner, HaltExecution
def parse_input(day):
return day_input(day, integers)[0]
def part1(program):
max_thruster = -1
for p in permutations([0, 1, 2, 3, 4]):
amplifiers = [IntcodeRunner(program).run() for i in range(5)]
for i, amp in zip(p, amplifiers):
next(amp)
amp.send(i)
val = 0
while True:
try:
for i, amp in enumerate(amplifiers):
val = amp.send(val)
if val and i == 4:
if max_thruster < val:
max_thruster = val
except StopIteration:
#amplifiers.remove(amp)
pass
except HaltExecution:
break
return max_thruster
def part2(program):
max_thruster = -1
for p in permutations([5, 6, 7, 8, 9]):
amplifiers = [IntcodeRunner(program, i).run() for i in range(5)]
for i, amp in zip(p, amplifiers):
next(amp)
amp.send(i)
val = 0
e = amplifiers[4] #Amplifier E
while True:
try:
for i, amp in enumerate(amplifiers):
val = amp.send(val)
if val and amp == e:
if max_thruster < val:
max_thruster = val
next(amp)
except StopIteration:
pass
except HaltExecution:
amplifiers.remove(amp)
if not amplifiers:
break
return max_thruster
if __name__ == '__main__':
data = parse_input('07')
print(f'Part One: {part1(data)}')
print(f'Part Two: {part2(data)}')
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import datetime
from datetime import timedelta
import os
import time
import pytz
from dotenv import load_dotenv
load_dotenv(verbose=True)
#---------------------------#
# APPLICATION CONFIGURATION #
#---------------------------#
"""
Variable controls what timezone formatting we must apply to our UTC datetimes.
"""
LOCAL_TIMEZONE = pytz.timezone(os.getenv("LOCAL_TIMEZONE_NAME"))
"""
Variable controls where to save the `wpa_supplicant_conf` file to.
"""
WPA_SUPPLICANT_CONF = os.getenv("WPA_SUPPLICANT_CONF")
SUDO_MODE = os.getenv("SUDO_MODE")
#-------------------#
# UTILITY FUNCTIONS #
#-------------------#
def getDT():
"""
Function will return the current datetime aware of the local timezone.
"""
# Get our UTC datetime without any timezone awareness.
naive_now_dt = datetime.datetime.utcnow()
# Convert to our local timezone.
utc_aware_now_dt = naive_now_dt.replace(tzinfo=pytz.utc) # Make UTC timezone aware.
local_aware_now_dt = utc_aware_now_dt.astimezone(LOCAL_TIMEZONE) # Convert to local timezone.
# Return our datetime converted to our local timezone.
return local_aware_now_dt
|
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2018-2019 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Hervé BREDIN - http://herve.niderb.fr
# Diego Castan
from typing import TextIO
from typing import Union
from pyannote.core import Timeline
from pyannote.core import Annotation
from optparse import OptionParser
from pyannote.database import get_protocol
from pyannote.audio.features import Precomputed
from pyannote.audio.signal import Binarize
def write_txt(file: TextIO, output: Union[Timeline, Annotation]):
"""Write pipeline output to "txt" file
Parameters
----------
file : file object
output : `pyannote.core.Timeline` or `pyannote.core.Annotation`
Pipeline output
"""
if isinstance(output, Timeline):
for s in output:
line = f'{output.uri} {s.start:.3f} {s.end:.3f}\n'
file.write(line)
return
if isinstance(output, Annotation):
for s, t, l in output.itertracks(yield_label=True):
line = f'{output.uri} {s.start:.3f} {s.end:.3f} {t} {l}\n'
file.write(line)
return
msg = (
f'Dumping {output.__class__.__name__} instances to "txt" files '
f'is not supported.'
)
raise NotImplementedError(msg)
def main():
usage = "%prog [options] database, raw_score_path"
desc = "Write the output of the binary overlap detector into test based on a threshold"
version = "%prog 0.1"
parser = OptionParser(usage=usage, description=desc, version=version)
parser.add_option("-t", "--onset", action="store", type="float", help="Onset Threshold", default=0.70)
parser.add_option("-f", "--offset", action="store", type="float", help="Offset Threshold", default=0.70)
parser.add_option("-d", "--dev", action="store_true", help="Print output based on development set", default=False)
parser.add_option("-o", "--outputfile", action="store", type="string", help="Output file", default="./overlap.txt")
(opt, args) = parser.parse_args()
if(len(args)!=2):
parser.error("Incorrect number of arguments")
database, raw_score_path = args
# get test file of protocol
protocol = get_protocol(database)
# load precomputed overlap scores as pyannote.core.SlidingWindowFeature
precomputed = Precomputed(raw_score_path)
# StackedRNN model
# initialize binarizer
# onset / offset are tunable parameters (and should be tuned for better
# performance). we use log_scale=True because of the final log-softmax in the
binarize = Binarize(onset=opt.onset, offset=opt.offset, log_scale=True)
fw = open(opt.outputfile, 'a+')
if opt.dev:
for test_file in protocol.development():
ovl_scores = precomputed(test_file)
# binarize overlap scores to obtain overlap regions as pyannote.core.Timeline
ovl_regions = binarize.apply(ovl_scores, dimension=1)
ovl_regions.uri = test_file['uri']
# write the output into text
write_txt(fw, ovl_regions)
else:
for test_file in protocol.test():
ovl_scores = precomputed(test_file)
# binarize overlap scores to obtain overlap regions as pyannote.core.Timeline
ovl_regions = binarize.apply(ovl_scores, dimension=1)
ovl_regions.uri = test_file['uri']
# write the output into text
write_txt(fw, ovl_regions)
fw.close()
if __name__=="__main__":
main()
|
# Generated by Django 4.0.3 on 2022-03-26 09:05
import django.core.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=30, validators=[django.core.validators.MinLengthValidator(2)])),
('last_name', models.CharField(max_length=30, validators=[django.core.validators.MinLengthValidator(2)])),
('picture', models.URLField()),
('date_of_birth', models.DateField(blank=True, null=True)),
('description', models.TextField(blank=True, null=True)),
('email', models.EmailField(blank=True, max_length=254, null=True)),
('gender', models.CharField(blank=True, choices=[('Male', 'Male'), ('Female', 'Female'), ('Do not show', 'Do not show')], default='Do not show', max_length=11, null=True)),
],
),
migrations.CreateModel(
name='ShopUser',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('username', models.CharField(max_length=25, unique=True)),
('is_staff', models.BooleanField(default=True)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
),
]
|
#!/usr/bin/env python
#
import sys, cpp, kernel, glob, os, re, getopt
from defaults import *
from utils import *
noUpdate = 1
def cleanupFile( path, original_path):
"""reads an original header and perform the cleanup operation on it
this functions returns the destination path and the clean header
as a single string"""
# check the header path
src_path = path
if not os.path.exists(src_path):
if noUpdate:
panic( "file does not exist: '%s'\n" % path )
sys.stderr.write( "warning: file does not exit: %s\n" % path )
return None, None
if not os.path.isfile(src_path):
if noUpdate:
panic( "path is not a file: '%s'\n" % path )
sys.stderr.write( "warning: not a file: %s\n" % path )
return None, None
if os.path.commonprefix( [ src_path, original_path ] ) != original_path:
if noUpdate:
panic( "file is not in 'original' directory: %s\n" % path );
sys.stderr.write( "warning: file not in 'original' ignored: %s\n" % path )
return None, None
src_path = src_path[len(original_path):]
if len(src_path) > 0 and src_path[0] == '/':
src_path = src_path[1:]
if len(src_path) == 0:
panic( "oops, internal error, can't extract correct relative path\n" )
# convert into destination path, extracting architecture if needed
# and the corresponding list of known static functions
#
arch = None
re_asm_arch = re.compile( r"asm-([\w\d_\+\.\-]+)(/.*)" )
m = re_asm_arch.match(src_path)
statics = kernel_known_generic_statics
if m and m.group(1) != 'generic':
dst_path = "arch-%s/asm/%s" % m.groups()
arch = m.group(1)
statics = statics.union( kernel_known_statics.get( arch, set() ) )
else:
dst_path = "common/" + src_path
dst_path = os.path.normpath( kernel_cleaned_path + "/" + dst_path )
# now, let's parse the file
#
blocks = cpp.BlockParser().parseFile(path)
if not blocks:
sys.stderr.write( "error: can't parse '%s'" % path )
sys.exit(1)
macros = kernel_known_macros.copy()
if arch and arch in kernel_default_arch_macros:
macros.update(kernel_default_arch_macros[arch])
blocks.optimizeMacros( macros )
blocks.optimizeIf01()
blocks.removeVarsAndFuncs( statics )
blocks.replaceTokens( kernel_token_replacements )
blocks.removeComments()
blocks.removeMacroDefines( kernel_ignored_macros )
blocks.removeWhiteSpace()
out = StringOutput()
out.write( kernel_disclaimer )
blocks.writeWithWarning(out, kernel_warning, 4)
return dst_path, out.get()
if __name__ == "__main__":
def usage():
print """\
usage: %s [options] <header_path>
options:
-v enable verbose mode
-u enabled update mode
this will try to update the corresponding 'clean header'
if the content has changed. with this, you can pass more
than one file on the command-line
-k<path> specify path of original kernel headers
-d<path> specify path of cleaned kernel headers
<header_path> must be in a subdirectory of 'original'
""" % os.path.basename(sys.argv[0])
sys.exit(1)
try:
optlist, args = getopt.getopt( sys.argv[1:], 'uvk:d:' )
except:
# unrecognized option
sys.stderr.write( "error: unrecognized option\n" )
usage()
for opt, arg in optlist:
if opt == '-u':
noUpdate = 0
elif opt == '-v':
verbose = 1
D_setlevel(1)
elif opt == '-k':
kernel_original_path = arg
elif opt == '-d':
kernel_cleaned_path = arg
if len(args) == 0:
usage()
if noUpdate:
for path in args:
dst_path, newdata = cleanupFile(path,kernel_original_path)
print newdata
sys.exit(0)
# now let's update our files.
b = BatchFileUpdater()
for path in args:
dst_path, newdata = cleanupFile(path,kernel_original_path)
if not dst_path:
continue
b.readFile( dst_path )
r = b.editFile( dst_path, newdata )
if r == 0:
r = "unchanged"
elif r == 1:
r = "edited"
else:
r = "added"
print "cleaning: %-*s -> %-*s (%s)" % ( 35, path, 35, dst_path, r )
b.updateGitFiles()
sys.exit(0)
|
from flask_restful import Resource, reqparse
from models.localOpcionesLocal import localOpcionesLocalModel
class localOpcionesController(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument(
'id_local',
type=int,
required=True,
help="Falta el id del local"
)
parser.add_argument(
'id_opciones',
type=int,
required=True,
help="Falta el id de las opciones"
)
data = parser.parse_args()
try:
localOpcionesLocalModel(data['id_local'],data['id_opciones']).guardar_en_la_bd()
except:
return{
'message':'Hubo un error al vincular el local con sus opciones, intente nuevamente'},500
return{
'message':'Se guardo exitosamente las opciones del local'},200 |
import cv2
import numpy as np
from t2i.entity import *
from t2i.script import *
from t2i.assetBook import *
from t2i.endpointResolver import *
from t2i.animate import *
from threading import Lock, Thread
import sys
CANVAS_WIDTH = 800
CANVAS_HEIGHT = 600
class StaticVisualGraph:
class VisualNode:
def __init__(self, entity):
self.entity = entity
self.isRoot = True
self.rootOffset = None # Define offsets to be vectors to CENTER of image
self.otherOffsets = {}
self.reverse = []
self.onIsland = False
class Island:
def __init__(self, root):
self.root = root
self.leftExtent = 0
self.rightExtent = 0
self.upExtent = 0
self.downExtent = 0
self.nodes = []
if root.entity.eImage.image is not None:
self.leftExtent = -root.entity.eImage.width/2
self.rightExtent = root.entity.eImage.width/2
self.upExtent = root.entity.eImage.height/2
self.downExtent = -root.entity.eImage.height/2
def getDimensions(self):
return (self.rightExtent - self.leftExtent, self.upExtent - self.downExtent)
def __repr__(self):
return '\n<\n\tisland: {0}\n\tleft: {1}\n\tright: {2}\n\tup: {3}\n\tdown: {4}\n>'.format(
self.root.entity, self.leftExtent, self.rightExtent, self.upExtent, self.downExtent)
def __init__(self, entities):
# Demotes other entities if they are not roots.
# If what was once a root was demoted, it is instead no longer a root
# and the subject gets promoted instead.
self.map = {}
for entity in entities:
self.map[entity.text] = self.VisualNode(entity)
# print(entity)
for entity in entities:
forwards = []
for obj in entity.objs:
if obj in entities:
forwards.append(self.map[obj.text])
self.map[entity.text].forward = forwards
self.nodeList = self.map.values()
self.islands = []
def AssignLocations(self):
self.GetForwardRootsAndReverse()
self.NodeToNodeOffsets()
self.CreateIslands()
self.ArrangeIslands()
print("SVG Done")
def GetForwardRootsAndReverse(self):
for node in self.nodeList:
for child in node.forward:
child.isRoot = False
child.reverse.append(node)
self.roots = list(filter(lambda x: x.isRoot, self.nodeList))
def NodeToNodeOffsets(self):
for node in self.nodeList:
for verb, prep, otherNode in zip(node.entity.baseVerbs, node.entity.preps, node.forward):
offset = self.SelectOffsetTogether(verb, prep)
print("Offset: ")
print(offset.tolist())
if not (node.entity.eImage.image is not None and otherNode.entity.eImage.image is not None):
offset*= SMALL_WIDTH
else:
offset*= (node.entity.eImage.width + otherNode.entity.eImage.width)/2
if offset[2] > 0:
offset[2] = 1
elif offset[2] < 0:
offset[2] = -1
node.otherOffsets[otherNode] = offset
otherNode.otherOffsets[node] = -1*offset #graph is now bidirectional
def SelectOffsetTogether(self, verb, prep):
offset = np.array([0.0, 0.0, 0.0])
offset += self.SelectVerbOffset(verb)
if prep:
offset += self.SelectPrepOffset(prep)
# conditional tree
if verb in endpointResolver.MOTION_OTHER:
offset = np.array([1.5, 0, -1.0])
return offset
def SelectVerbOffset(self, verb):
offset = np.array([0.0, 0.0, 0.0])
if verb in endpointResolver.MOTION_OTHER:
offset = np.array([0.1, 0, 1.0])
elif verb in endpointResolver.MOTION_SELF:
offset = np.array([1.5, 0, -1.0])
elif verb in endpointResolver.REGARD:
offset = np.array([1.5, 0, -1.0])
elif verb in endpointResolver.USAGE:
offset = np.array([.1, 0, 1.0])
elif verb in endpointResolver.SPEAK:
offset = np.array([1, 0, 0.0])
return offset
def SelectPrepOffset(self, prep):
# Things look inverted because we want the object's relation to us.
offset = np.array([0.0, 0.0, 0.0])
if prep in ["below", "beneath", "under"]: # object is above us, etc
offset = np.array([0.0, .75, 1.0])
elif prep in ["with", "to", "at", "before"]: # we're at object. push it back a layer
offset = np.array([.25, 0.0, -1.0])
elif prep in ["in", "inside", "into", "within"]: # object in foreground
offset = np.array([0.0, 0.0, 1.0])
elif prep in ["beside", "near", "outside", "by", "nearby"]: # object nearby, we're more important
offset = np.array([.25, 0.0, -1.0])
elif prep in ["over", "above", "atop", "on", "onto", "upon"]: # object beneath us
offset = np.array([0.0, -.75, -1.0])
return offset
def CreateIslands(self):
nodesResolved = 0
while len(self.roots) > 0 and nodesResolved < len(self.nodeList):
root = self.roots.pop(0)
if root.onIsland:
continue
else:
island = self.Island(root)
root.rootOffset = np.array([0, 0, 0])
self.islands.append(island)
toExplore = set([root])
visited = set()
# print("New Island")
# Big diamonds are extremely unusual, so we won't worry about it.
while toExplore:
current = toExplore.pop()
# print("Current: " + current.entity.text)
current.onIsland = True
island.nodes.append(current)
nodesResolved += 1
if current not in visited:
visited.add(current)
for other in current.otherOffsets:
if other not in visited:
# handles depth 1 diamonds
offset = current.otherOffsets[other] + current.rootOffset
if not other.rootOffset:
other.rootOffset = offset
elif SumSq(other.rootOffset) > SumSq(offset):
other.rootOffset = offset
# print("Other: " + other.entity.text)
# print(other.rootOffset)
if offset[0] - other.entity.eImage.width/2 < island.leftExtent:
island.leftExtent = offset[0] - other.entity.eImage.width/2
elif offset[0] + other.entity.eImage.width/2 > island.rightExtent:
island.rightExtent = offset[0] + other.entity.eImage.width/2
if offset[1] - other.entity.eImage.height/2 < island.downExtent:
island.downExtent = offset[1] - other.entity.eImage.height/2
elif offset[1] + other.entity.eImage.height/2 > island.upExtent:
island.upExtent = offset[1] + other.entity.eImage.height/2
toExplore.add(other)
def HorizontalBounce(self):
pass
def ArrangeIslands(self):
# Naive pack horizontally
white_width = CANVAS_WIDTH - sum([island.getDimensions()[0] for island in self.islands])
width_margin = white_width/(len(self.islands) + 1)
grid_col = 0
root_row = 2*CANVAS_HEIGHT/3
for island in self.islands:
grid_col += width_margin
root_col = grid_col - island.leftExtent
for node in island.nodes:
if node.entity.eImage.image is not None:
node.entity.eImage.x = (root_col + node.rootOffset[0])
node.entity.eImage.y = (root_row - node.rootOffset[1])
node.entity.eImage.layer = node.rootOffset[2]
print("Entity: ", node.entity.text, node.entity.eImage)
grid_col += island.getDimensions()[0]
print("================ ARRANGED ISLANDS =====================")
print(self.islands)
def SumSq(offset):
# This function favors denser packing, if possible.
return sum(x**2 for x in offset)
class Visualizer:
def __init__(self, width=CANVAS_WIDTH, height=CANVAS_HEIGHT):
self.width = width
self.height = height
self.assetBook = AssetBook()
self.script = Script()
self.animator = Animator()
self.visualScript = []
# self.lock_ = Lock()
def DrawStoryWithCallback(self, textBody, callBackFunc):
self.GetAssets(textBody)
self.StreamScenes(callBackFunc)
print("Finished stream")
self.visualScript = []
self.script = Script()
def GetAssets(self, textBody):
self.script.processEntities(textBody)
self.script.ResolveAdjectives()
self.script.CreateContinuum()
print("============== VISUALIZER ==============")
self.visualScript = self.script.continuum
for entityList in self.visualScript:
for entity in entityList:
self.assetBook.attachImageToEntity(entity)
if entity.eImage.image is None:
print("Could not find image for entity: " + entity.text)
def StreamScenes(self, callBackFunc):
# self.lock_.acquire()
for entityList in self.visualScript:
self.ArrangeStaticScene(entityList)
self.ArrangeDynamicScene(entityList)
# print(entityList)
callBackFunc(entityList) # Should add in asynchronous processing here
# self.lock_.release()
def ArrangeStaticScene(self, entityList):
graph = StaticVisualGraph(entityList)
graph.AssignLocations()
return entityList
# Set default sizes and positions of
def ArrangeDynamicScene(self, entityList):
# Creates animation objects from animate.py, uses them to parameterize functions
# which are then attached to the imageEntities
self.animator.assignAnimations(entityList, 500)
return entityList
def ServeFileTitleAndMotion(self, textBody):
self.GetAssets(textBody)
ret = []
for entityList in self.visualScript:
self.ArrangeStaticScene(entityList)
self.ArrangeDynamicScene(entityList)
sublist = []
for entity in entityList:
if entity.eImage.image is not None:
print(entity.eImage.path, entity.eImage.layer)
size = (entity.eImage.width, entity.eImage.height)
sublist.append((entity.eImage.path, entity.eImage.animateFunc.eager(), size, int(entity.eImage.layer)))
ret.append(sublist)
return ret
def staticShow(entity):
print("Entity: " + entity.text)
if entity.eImage.image is None:
print("Not Found")
else:
print(entity.eImage)
def staticShowMultiple(entityList):
for entity in entityList:
staticShow(entity)
if __name__ == "__main__":
if len(sys.argv) > 1:
textBody = sys.argv[1]
else:
textBody = ("Jake was a policeman during his younger years.")
v = Visualizer()
v.DrawStoryWithCallback(textBody, staticShowMultiple)
# v.ServeFileTitleAndMotion(textBody)
# textBody = "The cat sat near the man."
# v.DrawStoryWithCallback(textBody, staticShowMultiple)
|
#Brokerage model and account type:
self.SetBrokerageModel(BrokerageName.InteractiveBrokersBrokerage, AccountType.Cash)
//Add securities and if required set custom margin models
spy = self.AddEquity("SPY") # Default to minute bars
spy.MarginModel = PatternDayTradingMarginModel()
|
import pygame
# local imports
from constants import *
# TODO: I have some old code commented out, which I will delete when I'm sure I don't need it.
class Sensor(pygame.sprite.Sprite):
def __init__(self, player_rect, relative_position, sensor_state, inactive_color=GRAY, active_color=WHITE):
# Call the parent's constructor
super().__init__()
# Sensor is active when it detects collision with a solid object
self.activated = False
# State keeps track of whether it's a floor, wall, or ceiling sensor
self.state = sensor_state
# Sensor's x/y determined via offset of player's x/y
self.relative_position = relative_position
# Local variables to make self.rect definition more human-readable,
# although these aren't really necessary:
relative_x = self.relative_position[0]
relative_y = self.relative_position[1]
width = 1
height = 1
# Define the rectangle
self.rect = pygame.Rect(player_rect.x + relative_x,
player_rect.y + relative_y, width, height)
# Colors help us distinguish the sensors with our eyes in debug mode
self.active_color = active_color
self.inactive_color = inactive_color
# Image allows us to create a collision mask with pygame
self.image = pygame.Surface([self.rect.width, self.rect.height])
# Mask colors help us generate the collision mask
self.image.fill(MASK_COLOR)
self.image.set_colorkey(MASK_BG_COLOR)
# And here's out collision mask!
self.mask = pygame.mask.from_surface(self.image)
# Sensors require the player's rectangle in order to update their positions
def update(self, player_rect):
# align sensors relative to player position
self.rect.x = player_rect.x + self.relative_position[0]
self.rect.y = player_rect.y + self.relative_position[1]
"""http://info.sonicretro.org/SPG:Solid_Tiles#Height_Masks
If the height value found is 16px ($10), that's the entire tile filled at that X position, so
then the sensor has to check for another tile above the first one found,
and search for that one's height value. """
def collide(self, platform):
collision_coordinates = pygame.sprite.collide_mask(platform, self)
if collision_coordinates: # If collision happened
# http://info.sonicretro.org/SPG:Solid_Tiles#Height_Masks
absolute_collision = (platform.rect.x + collision_coordinates[0], platform.rect.y + collision_coordinates[1])
height = self.rect.bottom - absolute_collision[1]
return True, height
return False, None
# TODO: function should detect platforms nearby, not just ones directly collided with.
def detect_plaforms(self, platforms):
# What's the best way to check only the adjacent tiles to the sensor?
# We want to avoid checking every tile on the map because that's a waste of processing
height = 0
collision_count = 0
for platform in platforms:
collided, new_height = self.collide(platform)
if collided:
collision_count += 1
self.activated = True
# http://info.sonicretro.org/SPG:Solid_Tiles#Reaction
# Once a tile has been found, it's 'height' (or position horizontally for horizontal sensors)
# will be returned for Sonic to use to re-position himself.
height = new_height
if collision_count == 0:
self.activated = False
return height
|
from collections import deque
import logging
import sys
import threading
import time
from src.grip.metaclasses import (
CallableStartMetaclass,
ThreadSafeSingleton
)
# minimum representable positive normalized float
MIN_FLOAT = sys.float_info.min
class GRIP_Deque(deque):
"""
The GRIP_Deque encapsulates a domain-specific deque and its behavior
"""
def __init__(self, iterable=[], maxlen=None):
"""
Params:
* iterable (any iterable): Optionally provide the data to use to initialize this deque.
* maxlen (int): Optionally indicate the maximum number of items that this instance's deque can hold
at one time. If maxlen is None, this instance can hold an unbounded number of items to be processed.
If this instance is bounded, then only maxlen number of items can be waiting to be processed, but
this maxlen limit does not prevent more items from being added to the deque later; this can be
useful if the items to be processed are resource intensive and you want to prevent out-of-memory
issues or whatever because it will prevent too many things from being queued up and consuming
resources while waiting to be processed.
"""
super().__init__(iterable, maxlen)
def get_next_item(self):
"""
Returns the next item in the deque. Override this method to change from the default queue behavior
to stack behavior by changing popleft() to pop().
This will raise an IndexError if the deque is empty. Any exceptions should be handled by the code
that calls this method, not inside this method.
"""
return self.popleft()
def append_item(self, item):
"""
Append the item to the deque. Override this by changing append() to appendleft() if you require
different behavior than the default queue-like FIFO.
"""
self.append(item)
class GenericRateLimitedItemProcessor(threading.Thread, metaclass=ThreadSafeSingleton):
"""
This thread-safe singleton processes items in a rate-limited manner once you call its start() method
"""
def __init__(self,
iterable=[],
maxlen=None,
num_items=None,
num_seconds=None,
name=None,
daemon=None):
"""
Params:
* iterable (any iterable): Optionally provide the data to use to initialize this deque.
* maxlen (int): Optionally indicate the maximum number of items that this instance's deque of items to
process can hold at one time. If maxlen is None, this instance can hold an unbounded number of items.
If this instance is bounded, then only maxlen number of items can be waiting to be processed, but
this maxlen limit does not prevent more items from being added to the deque later.
* num_items (int): Optionally indicate the number of items to process in num_seconds.
If num_items is None, then the items are processed as fast as possible with no rate limiting.
If num_items is a positive integer, then that number of items will be processed in num_seconds
in an evenly distributed manner.
* num_seconds (int or float): Optionally indicate the number of seconds in which to process num_items.
* name (string): Optionally name this thread.
* daemon (boolean): Optionally explicitly set whether this item processor is daemonic. If this is
left as None, the thread inherits its daemonic property from the current thread.
"""
super().__init__(name=name, daemon=daemon)
self.items_to_process = GRIP_Deque(iterable=iterable, maxlen=maxlen)
self.successfully_started = GRIP_Deque()
self.unsuccessfully_started = GRIP_Deque()
# default sleep time is 0 seconds, meaning items will be processed as quickly as possible
self.sleep_time = 0
# if num_items is a positive int and num_seconds is a positive int or float,
# calculate the amount of time to sleep between processing items by dividing
# num_seconds by num_items. For example, if you want to process 100 items in
# 1 second, then the sleep time between items will be 0.01 seconds (100 / 1)
if (num_items is not None and num_seconds is not None) \
and isinstance(num_items, int) \
and isinstance(num_seconds, (int, float)) \
and num_items > 0 \
and num_seconds > 0:
self.sleep_time = num_seconds / num_items
def run(self):
"""
Because this is an instance of threading.Thread, we need to define a run() method so when start()
is called to kick off the processing, something actually happens.
"""
while True:
try:
item = self.items_to_process.get_next_item()
try:
item.start()
self.successfully_started.append_item(item)
except Exception:
logging.exception(f'The item {item} could not be processed and was moved '
f'to the unsuccessfully_started deque for later examination.')
self.unsuccessfully_started.append_item(item)
# this is how the rate limiting happens
time.sleep(self.sleep_time)
# If the items_to_process deque is empty when the next item
# is requested, the deque will raise an IndexError so if
# this happens, we return to cause the thread to finish running
except IndexError:
return
class GenericItem(metaclass=CallableStartMetaclass):
"""
This class definition exists in order to have a non-abstract class that uses the CallableStartMetaclass
No actual implementation is needed since the only thing we need this class for is to
use it for type checking with a call to isinstance(thing, GenericItem) to ensure thing
implements a start() method
"""
pass
|
from port import db
class Permission:
id = db.Column(db.Integer, primary_key=True)
class UserPermission:
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('User.id'))
permission_id = db.Column(db.Integer, db.ForeignKey('Permission.id'))
|
"""
Test that breakpoints do not affect stepping.
Check for correct StopReason when stepping to the line with breakpoint
which chould be eStopReasonBreakpoint in general,
and eStopReasonPlanComplete when breakpoint's condition fails.
"""
from __future__ import print_function
import unittest2
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class StepOverBreakpointsTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
TestBase.setUp(self)
self.build()
exe = self.getBuildArtifact("a.out")
src = lldb.SBFileSpec("main.cpp")
# Create a target by the debugger.
self.target = self.dbg.CreateTarget(exe)
self.assertTrue(self.target, VALID_TARGET)
# Setup four breakpoints, two of them with false condition
self.line1 = line_number('main.cpp', "breakpoint_1")
self.line4 = line_number('main.cpp', "breakpoint_4")
self.breakpoint1 = self.target.BreakpointCreateByLocation(src, self.line1)
self.assertTrue(
self.breakpoint1 and self.breakpoint1.GetNumLocations() == 1,
VALID_BREAKPOINT)
self.breakpoint2 = self.target.BreakpointCreateBySourceRegex("breakpoint_2", src)
self.breakpoint2.GetLocationAtIndex(0).SetCondition('false')
self.breakpoint3 = self.target.BreakpointCreateBySourceRegex("breakpoint_3", src)
self.breakpoint3.GetLocationAtIndex(0).SetCondition('false')
self.breakpoint4 = self.target.BreakpointCreateByLocation(src, self.line4)
# Start debugging
self.process = self.target.LaunchSimple(
None, None, self.get_process_working_directory())
self.assertIsNotNone(self.process, PROCESS_IS_VALID)
self.thread = lldbutil.get_one_thread_stopped_at_breakpoint(self.process, self.breakpoint1)
self.assertIsNotNone(self.thread, "Didn't stop at breakpoint 1.")
def test_step_instruction(self):
# Count instructions between breakpoint_1 and breakpoint_4
contextList = self.target.FindFunctions('main', lldb.eFunctionNameTypeAuto)
self.assertEquals(contextList.GetSize(), 1)
symbolContext = contextList.GetContextAtIndex(0)
function = symbolContext.GetFunction()
self.assertTrue(function)
instructions = function.GetInstructions(self.target)
addr_1 = self.breakpoint1.GetLocationAtIndex(0).GetAddress()
addr_4 = self.breakpoint4.GetLocationAtIndex(0).GetAddress()
# if third argument is true then the count will be the number of
# instructions on which a breakpoint can be set.
# start = addr_1, end = addr_4, canSetBreakpoint = True
steps_expected = instructions.GetInstructionsCount(addr_1, addr_4, True)
step_count = 0
# Step from breakpoint_1 to breakpoint_4
while True:
self.thread.StepInstruction(True)
step_count = step_count + 1
self.assertEquals(self.process.GetState(), lldb.eStateStopped)
self.assertTrue(self.thread.GetStopReason() == lldb.eStopReasonPlanComplete or
self.thread.GetStopReason() == lldb.eStopReasonBreakpoint)
if (self.thread.GetStopReason() == lldb.eStopReasonBreakpoint) :
# we should not stop on breakpoint_2 and _3 because they have false condition
self.assertEquals(self.thread.GetFrameAtIndex(0).GetLineEntry().GetLine(), self.line4)
# breakpoint_2 and _3 should not affect step count
self.assertTrue(step_count >= steps_expected)
break
# Run the process until termination
self.process.Continue()
self.assertEquals(self.process.GetState(), lldb.eStateExited)
@skipIf(bugnumber="llvm.org/pr31972", hostoslist=["windows"])
def test_step_over(self):
#lldb.DBG.EnableLog("lldb", ["step","breakpoint"])
self.thread.StepOver()
# We should be stopped at the breakpoint_2 line with stop plan complete reason
self.assertEquals(self.process.GetState(), lldb.eStateStopped)
self.assertEquals(self.thread.GetStopReason(), lldb.eStopReasonPlanComplete)
self.thread.StepOver()
# We should be stopped at the breakpoint_3 line with stop plan complete reason
self.assertEquals(self.process.GetState(), lldb.eStateStopped)
self.assertEquals(self.thread.GetStopReason(), lldb.eStopReasonPlanComplete)
self.thread.StepOver()
# We should be stopped at the breakpoint_4
self.assertEquals(self.process.GetState(), lldb.eStateStopped)
self.assertEquals(self.thread.GetStopReason(), lldb.eStopReasonBreakpoint)
thread1 = lldbutil.get_one_thread_stopped_at_breakpoint(self.process, self.breakpoint4)
self.assertEquals(self.thread, thread1, "Didn't stop at breakpoint 4.")
# Check that stepping does not affect breakpoint's hit count
self.assertEquals(self.breakpoint1.GetHitCount(), 1)
self.assertEquals(self.breakpoint2.GetHitCount(), 0)
self.assertEquals(self.breakpoint3.GetHitCount(), 0)
self.assertEquals(self.breakpoint4.GetHitCount(), 1)
# Run the process until termination
self.process.Continue()
self.assertEquals(self.process.GetState(), lldb.eStateExited)
|
#!/usr/bin/env python3
import asyncio
import importer
print("""
Welcome to the Saleor Importer!
To import data into Saleor, I will need three pieces of information from you:
1. The URL of your Saleor API
2. An API token (see https://docs.saleor.io/docs/dashboard/configuration/service-accounts)
3. The path to a file containing the data to import (link to format To Be Provided)
""")
url = input('URL: ')
token = input('API Token: ')
filepath = input('Path to File: ')
importer = importer.Importer(importer.Api(url, token), filepath)
output_file = asyncio.run(importer.process())
print("Results are in: {}".format(output_file.name))
|
# Generated by Django 3.1rc1 on 2020-07-29 13:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('parameters', '0002_auto_20200729_1236'),
]
operations = [
migrations.AlterField(
model_name='parameter',
name='value',
field=models.JSONField(help_text='Value or descriptive information.', null=True),
),
]
|
import hashlib
from typing import List, Union, Tuple
from utils import read_data
INPUT = read_data()
def check_hash(door: str, index: int) -> Union[None, Tuple[int, int]]:
text = "%s%s" % (door, index)
hash = hashlib.md5(text.encode('utf-8')).hexdigest()
if hash.startswith("00000"):
# print("Match found for text %s, hexdigest %s" % (text, hash))
return hash[6], hash[5]
else:
return None
def update_password(passwd_list: List[str], character: str, position: int):
try:
position = int(position)
except ValueError:
# Hex digits don't always translate to ints. Ignore ones that don't.
return False
if position < 0 or position > 7:
# Out of bounds positions do nothing
return False
if passwd_list[position] == '_':
passwd_list[position] = character
if '_' not in passwd_list:
return True
else:
return False
def part_one(data: str) -> str:
output = ""
index_int = 0
for _ in range(8):
current_char = None
while current_char is None:
current_char = check_hash(data, index_int)
index_int += 1
output += current_char[1]
return output
def part_two(data: str) -> str:
known_chars = ['_', '_', '_', '_', '_', '_', '_', '_']
index_int = 0
all_done = False
while all_done is False:
current_char = None
while current_char is None:
current_char = check_hash(data, index_int)
index_int += 1
all_done = update_password(known_chars, current_char[0], current_char[1])
# print("Current password is %s" % "".join(known_chars))
return "".join(known_chars)
print("Part one password is %s" % part_one(INPUT))
print("Part two password is %s" % part_two(INPUT))
|
from subprocess import CalledProcessError # noqa: F401
class PackageResolutionError(Exception):
pass
class VersionError(Exception):
pass
class PackageVersionError(PackageResolutionError, VersionError):
pass
class ToolNotFoundError(LookupError):
pass
|
import multiprocessing
import ISE
import time
import numpy as np
graph_info = ISE.read_graph_info('NetPHY.txt')
graph = ISE.Graph(graph_info)
# graph = None
def influence_spread_computation_IC_Mu(seeds, n=multiprocessing.cpu_count()):
pool = multiprocessing.Pool()
# cpus = multiprocessing.cpu_count()
results = []
sub = int(10000 / n)
for i in range(n):
result = pool.apply_async(influence_spread_computation_IC, args=(seeds, sub))
results.append(result)
pool.close()
pool.join()
influence = 0
for result in results:
influence = influence + result.get()
return influence / 10000
# graph : Graph
# seed : list
# sample_num : int
# graph : Graph
# seed : list
# sample_num : int
# use multiple thread
def influence_spread_computation_IC(seeds, sample_num=10000):
influence = 0
for i in range(sample_num):
node_list = list()
node_list.extend(seeds)
checked = np.zeros(graph.node_num)
for node in node_list:
checked[node - 1] = 1
while len(node_list) != 0:
current_node = node_list.pop(0)
influence = influence + 1
children = graph.get_children(current_node)
for child in children:
if checked[child - 1] == 0:
if ISE.happen_with_prop(graph.get_weight(current_node, child)):
checked[child - 1] = 1
node_list.append(child)
return influence
if __name__ == '__main__':
seeds = [13541,
15303,
5192,
22764,
22762,
25580,
5551,
17527,
25578,
13439]
start = time.time()
print influence_spread_computation_IC_Mu(seeds)
run_time = (time.time() - start)
print run_time
start = time.time()
print influence_spread_computation_IC(seeds, 10000) / 10000
run_time = (time.time() - start)
print run_time
|
# Generated by Django 3.2.7 on 2021-10-06 11:07
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('eduhub', '0024_asharingcontent_agroup'),
]
operations = [
migrations.RenameField(
model_name='asharinggroupmember',
old_name='asharinggroup',
new_name='agroup',
),
]
|
from styx_msgs.msg import TrafficLight
import os
import cv2
import numpy as np
import tensorflow as tf
import sys
import rospy
from yolo.utils import get_yolo_boxes, makedirs
from yolo.bbox import draw_boxes
from keras.models import load_model
import keras as kf
#Minimum probability for object detection correctness
MIN_SCORE = 0.90
class YoloDetector(object):
def __init__(self):
self.net_h, self.net_w = 416, 416 # a multiple of 32, the smaller the faster
self.obj_thresh, self.nms_thresh = 0.5, 0.45
self.labels = ["green","red","yellow"]
self.colors_bgr = [(0,255,0), (0,0,255),(0,255,255)]
self.light_value = [TrafficLight.GREEN, TrafficLight.RED, TrafficLight.YELLOW]
self.anchors = [55,69, 75,234, 133,240, 136,129, 142,363, 203,290, 228,184, 285,359, 341,260]
CWD_PATH = os.getcwd()
yolo_model_path = os.path.join(CWD_PATH,'yolo','trafficlight.h5')
self.infer_model = load_model(yolo_model_path)
self.graph = tf.get_default_graph()
def predict(self, image):
with self.graph.as_default():
boxes = get_yolo_boxes(self.infer_model, [image], self.net_h, self.net_w, self.anchors, self.obj_thresh, self.nms_thresh)[0]
label_idx = -1
max_score = 0.
target_box = None
for box in boxes:
if box.get_score() > max_score:
max_score = box.get_score()
label_idx = box.get_label()
target_box = box
if label_idx == -1:
return TrafficLight.UNKNOWN, image
cv2.rectangle(image,(target_box.xmin, target_box.ymin),(target_box.xmax, target_box.ymax),self.colors_bgr[label_idx],3)
#return self.light_value[label_idx], draw_boxes(image, boxes, self.labels, self.obj_thresh)
return self.light_value[label_idx], image
class TFDetector(object):
def __init__(self):
# Name of the directory containing the object detection module we're using
MODEL_NAME = 'inference_graph'
# Number of classes the object detector can identify
NUM_CLASSES = 3
self.labels = ['red', 'yellow', 'green']
self.colors_bgr = [(0,0,255), (0,255,255),(0,255,0)]
self.label_idx_map = [1, 2, 3]
self.light_value = [TrafficLight.RED, TrafficLight.YELLOW, TrafficLight.GREEN]
# Grab path to current working directory
CWD_PATH = os.getcwd()
# Path to frozen detection graph .pb file, which contains the model that is used
# for object detection.
PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,'frozen_inference_graph_sym.pb')
#if is_site == True:
# PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,'frozen_inference_graph_real.pb')
rospy.loginfo("TL_C: Load graph from: " + PATH_TO_CKPT)
# Load the Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
self.sess = tf.Session(graph=detection_graph, config=tf.ConfigProto(gpu_options=gpu_options))
# Define input and output tensors (i.e. data) for the object detection classifier
# Input tensor is the image
self.image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Output tensors are the detection boxes, scores, and classes
# Each box represents a part of the image where a particular object was detected
self.detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represents level of confidence for each of the objects.
# The score is shown on the result image, together with the class label.
self.detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
self.detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
# Number of objects detected
self.num_detections = detection_graph.get_tensor_by_name('num_detections:0')
'''
'''
self.net_h, self.net_w = 416, 416 # a multiple of 32, the smaller the faster
self.obj_thresh, self.nms_thresh = 0.5, 0.45
def predict(self, image):
#if image is None or image.shape[0]!=600 or image.shape[1]!=800:
# return TrafficLight.UNKNOWN, None
# Load image using OpenCV and
# expand image dimensions to have shape: [1, None, None, 3]
# i.e. a single-column array, where each item in the column has the pixel RGB value
image_expanded = np.expand_dims(image, axis=0)
# Perform the actual detection by running the model with the image as input
(boxes, scores, classes, num) = self.sess.run(
[self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],
feed_dict={self.image_tensor: image_expanded})
#print(image.shape)
#Find best score
if boxes is not None and len(boxes) > 0:
scores = scores[0]
boxes = boxes[0]
classes = classes[0]
max_score_idx = np.argmax(scores)
#print(classes)
#print(scores)
#print("**********************")
if max_score_idx!=-1 and scores[max_score_idx] >= MIN_SCORE:
cl = int(classes[max_score_idx])
score =int(scores[max_score_idx]*100)
label_idx = self.label_idx_map.index(cl)
label = self.labels[label_idx]
rospy.loginfo("TL_C: Found light: "+label + " " + str(score)+"%")
#rospy.loginfo(boxes[max_score_idx])
box = boxes[max_score_idx]
box[0] = int(box[0]*image.shape[0])
box[1] = int(box[1]*image.shape[1])
box[2] = int(box[2]*image.shape[0])
box[3] = int(box[3]*image.shape[1])
#Add rect and score on the input image
cv2.rectangle(image,(box[1],box[0]),(box[3], box[2]),self.colors_bgr[label_idx],3)
#cv2.putText(image, label + " " + str(score)+"'%'",(box[1],int(box[0]-20)), cv2.FONT_HERSHEY_SIMPLEX , 0.5,colors_bgr[label_idx],1,cv2.LINE_AA)
#print("Ret:"+str(light_value[label_idx]) + str(TrafficLight.RED))
return self.light_value[label_idx], image
else:
rospy.loginfo("TL_C: no light found")
return TrafficLight.UNKNOWN, image
class TLClassifier(object):
def __init__(self, is_site):
#load classifier
if is_site == True:
self.detector = YoloDetector()
else:
self.detector = TFDetector()
print("TF:"+tf.__version__)
print("Keras:"+kf.__version__)
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
return self.detector.predict(image)
|
from django import template
from symposion.reviews.models import Review, ReviewAssignment
register = template.Library()
@register.assignment_tag(takes_context=True)
def review_assignments(context):
request = context["request"]
assignments = ReviewAssignment.objects.filter(user=request.user)
return assignments
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from PIL import Image
import os
from scipy import misc
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Process images of this size. Note that this differs from the original CIFAR
# image size of 32 x 32. If one alters this number, then the entire model
# architecture will change and any model would need to be retrained.
IMAGE_SIZE = 24
# Global constants describing the CIFAR-10 data set.
NUM_CLASSES = 10
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 1000
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 17
def read_image(filename_queue):
"""Reads and parses examples from CIFAR100 data files.
Recommendation: if you want N-way read parallelism, call this function
N times. This will give you N independent Readers reading different
files & positions within those files, which will give better mixing of
examples.
Args:
filename_queue: A queue of strings with the filenames to read from.
Returns:
An object representing a single example, with the following fields:
height: number of rows in the result (32)
width: number of columns in the result (32)
depth: number of color channels in the result (3)
key: a scalar string Tensor describing the filename & record number
for this example.
label: an int32 Tensor with the label in the range 0..9.
uint8image: a [height, width, depth] uint8 Tensor with the image data
"""
class CIFAR10Record(object):
pass
result = CIFAR10Record()
# Read a record, getting filenames from the filename_queue. No
# header or footer in the CIFAR-10 format, so we leave header_bytes
# and footer_bytes at their default of 0.
result.key, label = tf.decode_csv(filename_queue.dequeue(), [[""], [""]], " ")
# Extract raw PNG data as a string
raw_contents = tf.read_file(result.key)
# Decode raw data as a PNG. Defaults to uint8 encoding.
result.uint8image = tf.image.decode_png(raw_contents)
# TENSORFLOW BUG: image shape not statically determined, so force
# it to have correct CIFAR100 dimensions
result.uint8image.set_shape([32, 32, 3])
# The first bytes represent the label, which we convert from uint8->int32.
result.label = tf.cast(tf.string_to_number(label), tf.int32)
return result
def _generate_image_and_label_batch(image, label, min_queue_examples,
batch_size):
"""Construct a queued batch of images and labels.
Args:
image: 3-D Tensor of [height, width, 3] of type.float32.
label: 1-D Tensor of type.int32
min_queue_examples: int32, minimum number of samples to retain
in the queue that provides of batches of examples.
batch_size: Number of images per batch.
Returns:
images: Images. 4D tensor of [batch_size, height, width, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
# Create a queue that shuffles the examples, and then
# read 'batch_size' images + labels from the example queue.
num_preprocess_threads = 16
images, label_batch = tf.train.shuffle_batch(
[image, label],
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * batch_size,
min_after_dequeue=min_queue_examples)
# Display the training images in the visualizer.
tf.image_summary('images', images)
return images, tf.reshape(label_batch, [batch_size])
def distorted_inputs(filename, data_dir, batch_size):
"""Construct distorted input for CIFAR training using the Reader ops.
Args:
data_dir: Path to the CIFAR-10 data directory.
batch_size: Number of images per batch.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
with open(filename) as f:
delimmed = f.readlines()
delimmed = [l.strip('\n') for l in delimmed]
# Create a queue that produces the filename, label pairs to read.
delimmed_queue = tf.train.string_input_producer(delimmed)
# Read examples from files in the filename queue.
read_input = read_image(delimmed_queue)
reshaped_image = tf.cast(read_input.uint8image, tf.float32)
height = IMAGE_SIZE
width = IMAGE_SIZE
# Randomly crop a [height, width] section of the image.
distorted_image = tf.random_crop(reshaped_image, [height, width, 3])
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_whitening(distorted_image)
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN *
min_fraction_of_examples_in_queue)
print ('Filling queue with %d CIFAR images before starting to train. '
'This will take a few minutes.' % min_queue_examples)
# Generate a batch of images and labels by building up a queue of examples.
return _generate_image_and_label_batch(float_image, read_input.label,
min_queue_examples, batch_size)
def testing_inputs(filename, data_dir, batch_size):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
data_dir: Path to the CIFAR-10 data directory.
batch_size: Number of images per batch.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
with open(filename) as f:
delimmed = f.readlines()
delimmed = [l.strip('\n') for l in delimmed]
# Create a queue that produces the filename, label pairs to read.
delimmed_queue = tf.train.string_input_producer(delimmed)
# Read examples from files in the filename queue.
read_input = read_image(delimmed_queue)
reshaped_image = tf.cast(read_input.uint8image, tf.float32)
height = IMAGE_SIZE
width = IMAGE_SIZE
# Image processing for evaluation.
# Crop the central [height, width] of the image.
resized_image = tf.image.resize_image_with_crop_or_pad(reshaped_image,
width, height)
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_whitening(resized_image)
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_EVAL *
min_fraction_of_examples_in_queue)
# Generate a batch of images and labels by building up a queue of examples.
return _generate_image_and_label_batch(float_image, read_input.label,
min_queue_examples, batch_size)
def read_image_unlabeled(filename_queue):
class StatefarmRecord(object):
pass
result = StatefarmRecord()
# Read a record, getting filenames from the filename_queue.
result.key, _ = tf.decode_csv(filename_queue.dequeue(), [[""], [""]], " ")
# Extract raw JPG data as a string
raw_contents = tf.read_file(result.key)
# Decode raw data as a PNG. Defaults to uint8 encoding.
result.uint8image = tf.image.decode_png(raw_contents)
# TENSORFLOW BUG: image shape not statically determined, so force
# it to have correct CIFAR100 dimensions
result.uint8image.set_shape((32, 32, 3))
# Kind of hacky, but set a label so we can use the same structure
# THIS SHOULD ALWAYS BE IGNORED DURING COMPUTATION, since we are
# dealing with unlabaled data
result.label = tf.cast(tf.string_to_number("0"), tf.int32)
return result
def _generate_image_and_filename_batch(image, filename, min_queue_examples,
batch_size):
# Create a queue that shuffles the examples, and then
# read 'batch_size' images + filenames from the example queue.
num_preprocess_threads = 16
images, label_batch = tf.train.shuffle_batch(
[image, filename],
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * batch_size,
min_after_dequeue=min_queue_examples)
# Display the training images in the visualizer.
tf.image_summary('images', images)
return images, tf.reshape(label_batch, [batch_size])
def unlabeled_inputs(filename, data_dir, batch_size):
with open(filename) as f:
delimmed = f.readlines()
delimmed = [l.strip('\n') for l in delimmed]
# Create a queue that produces the filename, label pairs to read.
delimmed_queue = tf.train.string_input_producer(delimmed)
# Read examples from files in the filename queue.
read_input = read_image_unlabeled(delimmed_queue)
reshaped_image = tf.cast(read_input.uint8image, tf.float32)
height = IMAGE_SIZE
width = IMAGE_SIZE
# Image processing for evaluation.
# Crop the central [height, width] of the image.
resized_image = tf.image.resize_image_with_crop_or_pad(reshaped_image,
width, height)
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_whitening(resized_image)
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_EVAL *
min_fraction_of_examples_in_queue)
# Generate a batch of images and labels by building up a queue of examples.
return _generate_image_and_filename_batch(float_image, read_input.key,
min_queue_examples, batch_size) |
# encoding: utf-8
# !/usr/bin/python
from dms.web.hooks.accept_hook import AcceptHook
from dms.web.hooks.auth_hook import AuthHook
from dms.web.hooks.body_hook import BodyHook
from dms.web.hooks.cors_hook import CorsHook
__author__ = 'zhouhenglc'
__all__ = [AcceptHook, AuthHook, BodyHook, CorsHook]
|
from django import template
register=template.Library()
@register.filter(name='dict_value')
def dict_value(value,arg):
print(arg)
print(value)
return value.get(arg) |
import numpy as np
from deerlab import dipolarkernel, fitmultimodel
from deerlab.dd_models import dd_gengauss, dd_gauss, dd_rice, dd_gauss2, dd_rice3, dd_gauss3
from deerlab.bg_models import bg_exp
from deerlab.utils import ovl
def test_multigauss():
#=======================================================================
"Check that the fit of a multi-Gauss model works"
r = np.linspace(2,6,300)
t = np.linspace(-0.5,6,500)
K = dipolarkernel(t,r)
parin = [4, 0.05, 0.4, 4, 0.4, 0.4, 3, 0.15, 0.2]
P = dd_gauss3(r,parin)
V = K@P
fit = fitmultimodel(V,K,r,dd_gauss,3,'aicc', uq=False)
assert ovl(P,fit.P) > 0.95 # more than 99% overlap
#=======================================================================
def test_multirice():
#=======================================================================
"Check that the fit of a multi-Rician model works"
r = np.linspace(2,6,300)
t = np.linspace(0,6,500)
K = dipolarkernel(t,r)
parin = [4, 0.05, 0.4, 4, 0.4, 0.4, 3, 0.15, 0.2]
P = dd_rice3(r,parin)
V = K@P
fit = fitmultimodel(V,K,r,dd_rice,3,'aicc', uq=False)
assert ovl(P,fit.P) > 0.95 # more than 99% overlap
#=======================================================================
def test_multigengauss():
#=======================================================================
"Check that the fit of a multi-generalized-Gaussian model works"
r = np.linspace(2,6,200)
t = np.linspace(0,6,400)
K = dipolarkernel(t,r)
P = dd_gengauss(r,[2.5, 0.2, 5]) + 0.8*dd_gengauss(r,[3, 0.7, 2])
P /= np.trapz(P,r)
V = K@P
fit = fitmultimodel(V,K,r,dd_gengauss,2,'aicc', uq=False)
assert ovl(P,fit.P) > 0.95 # more than 99% overlap
#=======================================================================
def test_bounds():
#=======================================================================
"Check that specifying bounds for the basis function works correctly"
r = np.linspace(2,6,300)
t = np.linspace(0,6,500)
K = dipolarkernel(t,r)
parin = [4, 0.05, 0.4, 4, 0.4, 0.4, 3, 0.15, 0.2]
P = dd_gauss3(r,parin)
V = K@P
fit = fitmultimodel(V,K,r,dd_gauss,3,'aicc',lb = [2,0.02],ub=[5.5,1.5], uq=False)
assert ovl(P,fit.P) > 0.95 # more than 99% overlap
#=======================================================================
def test_rescaling():
#=======================================================================
"Check that rescaling does not change the results"
t = np.linspace(0,5,100)
r = np.linspace(2,6,100)
P = dd_gauss(r,[4, 0.6])
K = dipolarkernel(t,r)
scale = 1e3
V = K@P
fit1 = fitmultimodel(V*scale,K,r,dd_gauss,2,'aic',renormalize=True,uq=False)
fit2 = fitmultimodel(V,K,r,dd_gauss,2,'aic',renormalize=False,uq=False)
assert max(abs(fit1.P - fit2.P)) < 1e-4
#=======================================================================
def test_global_multigauss():
#=======================================================================
"Check that the global fit of a multi-Gauss model works"
r = np.linspace(2,6,300)
parin = [4, 0.05, 0.4, 4, 0.4, 0.4, 3, 0.15, 0.2]
P = dd_gauss3(r,parin)
t1 = np.linspace(-0.5,6,500)
K1 = dipolarkernel(t1,r)
V1 = K1@P
t2 = np.linspace(0,5,300)
K2 = dipolarkernel(t2,r)
V2 = K2@P
fit = fitmultimodel([V1,V2],[K1,K2],r,dd_gauss,3,'aicc', uq=False)
assert ovl(P,fit.P) > 0.95 # more than 99% overlap
#=======================================================================
def test_global_multirice():
#=======================================================================
"Check that the global fit of a multi-Rician model works"
r = np.linspace(2,6,100)
parin = [4, 0.05, 0.4, 4, 0.4, 0.4, 3, 0.15, 0.2]
P = dd_rice3(r,parin)
t1 = np.linspace(-0.5,6,200)
K1 = dipolarkernel(t1,r)
V1 = K1@P
t2 = np.linspace(0,5,100)
K2 = dipolarkernel(t2,r)
V2 = K2@P
fit = fitmultimodel([V1,V2],[K1,K2],r,dd_rice,3,'aicc', uq=False)
assert ovl(P,fit.P) > 0.95 # more than 99% overlap
#=======================================================================
def test_background_fit():
#=======================================================================
"Check the fitting of a non-linear kernel model"
t = np.linspace(-0.3,4,100)
r = np.linspace(3,6,100)
InputParam = [4, 0.15, 0.5, 4.3, 0.1, 0.4]
P = dd_gauss2(r,InputParam)
B = bg_exp(t,0.15)
V = dipolarkernel(t,r,mod=0.25,bg=B)@P
def Kmodel(par):
lam,k = par
B = bg_exp(t,k)
K = dipolarkernel(t,r,mod=lam,bg=B)
return K
fit = fitmultimodel(V,Kmodel,r,dd_gauss,2,'aicc',lb=[1,0.02],ub=[6,1],lbK=[0.2,0.01],ubK=[0.9,1],uq=False)
assert ovl(P,fit.P) > 0.95 # more than 99% overlap
#=======================================================================
def assert_confidence_intervals(pci50,pci95,pfit,lb,ub):
#----------------------------------------------------------------------\
p95lb = pci95[:,0]
p95ub = pci95[:,1]
p50lb = pci50[:,0]
p50ub = pci50[:,1]
errors = []
if not np.all(p95lb <= pfit) and not np.all(p50lb <= pfit):
errors.append("Some fitted values are below the lower bound of the confidence intervals.")
if not np.all(p95ub >= pfit) and not np.all(p50lb >= pfit):
errors.append("Some fitted values are over the upper bound of the confidence intervals.")
if not np.all(p95lb <= p50lb):
errors.append("The 50%-CI has lower values than the 95%-CI")
if not np.all(p95ub >= p50ub):
errors.append("The 50%-CI has larger values than the 95%-CI")
if not np.all(np.minimum(lb,p95lb)==lb):
errors.append("The lower bounds are not satisfied by the confidence intervals.")
if not np.all(np.maximum(ub,p95ub)==ub):
errors.append("The upper bounds are not satisfied by the confidence intervals.")
assert not errors, f"Errors occured:\n{chr(10).join(errors)}"
#----------------------------------------------------------------------
def test_Vfit():
#=======================================================================
"Check that the fitted signal is correct"
t = np.linspace(-0.3,4,100)
r = np.linspace(3,6,200)
InputParam = [4, 0.05, 0.5, 4.3, 0.1, 0.4]
P = dd_gauss2(r,InputParam)
K = dipolarkernel(t,r)
V = K@P
fit = fitmultimodel(V,K,r,dd_gauss,2,'aicc',lb=[1,0.02],ub=[6,1])
assert max(abs(fit.V - V))<1e-3
#=======================================================================
def test_confinter_Pfit():
#=======================================================================
"Check that the confidence intervals of the fitted distribution are correct"
t = np.linspace(-0.3,4,100)
r = np.linspace(3,6,200)
InputParam = [4, 0.05, 0.5, 4.3, 0.1, 0.4]
P = dd_gauss2(r,InputParam)
K = dipolarkernel(t,r)
V = K@P
fit = fitmultimodel(V,K,r,dd_gauss,2,'aicc',lb=[1,0.02],ub=[6,1])
lbP = np.zeros(len(r))
ubP = np.full(len(r), np.inf)
assert_confidence_intervals(fit.Puncert.ci(50),fit.Puncert.ci(95),fit.P,lbP,ubP)
#=======================================================================
def test_confinter_Vfit():
#=======================================================================
"Check that the confidence intervals of the fitted signal are correct"
t = np.linspace(-0.3,4,100)
r = np.linspace(3,6,200)
InputParam = [4, 0.05, 0.5, 4.3, 0.1, 0.4]
P = dd_gauss2(r,InputParam)
K = dipolarkernel(t,r)
V = K@P
fit = fitmultimodel(V,K,r,dd_gauss,2,'aicc',lb=[1,0.02],ub=[6,1])
lb = np.full(len(t), -np.inf)
ub = np.full(len(t), np.inf)
assert_confidence_intervals(fit.Vuncert.ci(50),fit.Vuncert.ci(95),fit.V,lb,ub)
#=======================================================================
def test_confinter_parfit():
#=======================================================================
"Check that the confidence intervals of the fitted parameters are correct"
t = np.linspace(-0.3,4,100)
r = np.linspace(3,6,200)
InputParam = [4, 0.05, 0.5, 4.3, 0.1, 0.4]
P = dd_gauss2(r,InputParam)
K = dipolarkernel(t,r)
V = K@P
lbPpar = [1,0.02]
ubPpar = [6,1]
fit = fitmultimodel(V,K,r,dd_gauss,2,'aicc',lb=lbPpar,ub=ubPpar)
paruq = fit.paramUncert
parfit = fit.Pparam
assert_confidence_intervals(paruq.ci(50)[0:2,:],paruq.ci(95)[0:2,:],parfit,lbPpar,ubPpar)
#=======================================================================
def test_goodness_of_fit():
#=======================================================================
"Check the goodness-of-fit statistics are correct"
r = np.linspace(2,6,300)
t = np.linspace(-0.5,6,500)
K = dipolarkernel(t,r)
parin = [4, 0.05, 0.4, 4, 0.4, 0.4, 3, 0.15, 0.2]
P = dd_gauss3(r,parin)
V = K@P
fit = fitmultimodel(V,K,r,dd_gauss,3,'aicc', uq=False)
stats= fit.stats
assert abs(stats['chi2red'] - 1) < 5e-2 and abs(stats['R2'] - 1) < 5e-2
#=======================================================================
def test_globalfit_scales():
#=======================================================================
"Check that the global fit of a multi-Gauss model works"
r = np.linspace(2,6,300)
parin = [4, 0.05, 0.4, 4, 0.4, 0.4, 3, 0.15, 0.2]
P = dd_gauss3(r,parin)
scales = [1e3, 1e9]
t1 = np.linspace(-0.5,6,500)
K1 = dipolarkernel(t1,r)
V1 = scales[0]*K1@P
t2 = np.linspace(0,5,300)
K2 = dipolarkernel(t2,r)
V2 = scales[1]*K2@P
fit = fitmultimodel([V1,V2],[K1,K2],r,dd_gauss,3,'aicc', weights=[1,1], uq=False)
assert max(abs(np.asarray(scales)/np.asarray(fit.scale) - 1)) < 1e-2
#=======================================================================
def test_multigauss_spread():
#=======================================================================
"Check the spreading strategy for multi-Gauss fitting"
r = np.linspace(2,6,300)
t = np.linspace(-0.5,6,500)
K = dipolarkernel(t,r)
parin = [4, 0.05, 0.4, 4, 0.4, 0.4, 3, 0.15, 0.2]
P = dd_gauss3(r,parin)
V = K@P
fit = fitmultimodel(V,K,r,dd_gauss,3,'aicc', uq=False, strategy='spread')
assert ovl(P,fit.P) > 0.95 # more than 99% overlap
#=======================================================================
def test_multigauss_split():
#=======================================================================
"Check the splitting strategy for multi-Gauss fitting"
r = np.linspace(2,6,300)
t = np.linspace(-0.5,6,500)
K = dipolarkernel(t,r)
parin = [4, 0.05, 0.4, 4, 0.4, 0.4, 3, 0.15, 0.2]
P = dd_gauss3(r,parin)
V = K@P
fit = fitmultimodel(V,K,r,dd_gauss,3,'aicc', uq=False, strategy='split')
assert ovl(P,fit.P) > 0.95 # more than 99% overlap
#=======================================================================
def test_plot():
# ======================================================================
"Check that the plot method works"
r = np.linspace(2,6,200)
t = np.linspace(-0.5,6,200)
K = dipolarkernel(t,r)
parin = [4, 0.05, 0.4, 4, 0.4, 0.4, 3, 0.15, 0.2]
P = dd_gauss3(r,parin)
V = K@P
fit = fitmultimodel(V,K,r,dd_gauss,3,'aicc', uq=False)
fig = fit.plot(show=False)
assert str(fig.__class__)=="<class 'matplotlib.figure.Figure'>"
# ======================================================================
def test_multigauss_merge():
#=======================================================================
"Check the merging strategy for multi-Gauss fitting"
r = np.linspace(2,6,300)
t = np.linspace(-0.5,6,500)
K = dipolarkernel(t,r)
parin = [4, 0.05, 0.4, 4, 0.4, 0.4, 3, 0.15, 0.2]
P = dd_gauss3(r,parin)
V = K@P
fit = fitmultimodel(V,K,r,dd_gauss,3,'aicc', uq=False, strategy='merge')
assert ovl(P,fit.P) > 0.95 # more than 99% overlap
#=======================================================================
def test_multirice_spread():
#=======================================================================
"Check the spreading strategy for multi-Rice fitting"
r = np.linspace(2,6,300)
t = np.linspace(0,6,500)
K = dipolarkernel(t,r)
parin = [4, 0.05, 0.4, 4, 0.4, 0.4, 3, 0.15, 0.2]
P = dd_rice3(r,parin)
V = K@P
fit = fitmultimodel(V,K,r,dd_rice,3,'aicc', uq=False, strategy='spread')
assert ovl(P,fit.P) > 0.95 # more than 99% overlap
#=======================================================================
def test_multirice_split():
#=======================================================================
"Check the splitting strategy for multi-Rice fitting"
r = np.linspace(2,6,300)
t = np.linspace(0,6,500)
K = dipolarkernel(t,r)
parin = [4, 0.05, 0.4, 4, 0.4, 0.4, 3, 0.15, 0.2]
P = dd_rice3(r,parin)
V = K@P
fit = fitmultimodel(V,K,r,dd_rice,3,'aicc', uq=False, strategy='split')
assert ovl(P,fit.P) > 0.95 # more than 99% overlap
#=======================================================================
def test_multirice_merge():
#=======================================================================
"Check the merging strategy for multi-Rice fitting"
r = np.linspace(2,6,300)
t = np.linspace(0,6,500)
K = dipolarkernel(t,r)
parin = [4, 0.05, 0.4, 4, 0.4, 0.4, 3, 0.15, 0.2]
P = dd_rice3(r,parin)
V = K@P
fit = fitmultimodel(V,K,r,dd_rice,3,'aicc', uq=False, strategy='merge')
assert ovl(P,fit.P) > 0.95 # more than 99% overlap
#=======================================================================
def test_cost_value():
#=======================================================================
"Check that the fit of a multi-Gauss model works"
r = np.linspace(2,6,300)
t = np.linspace(-0.5,6,500)
K = dipolarkernel(t,r)
parin = [4, 0.05, 0.4, 4, 0.4, 0.4, 3, 0.15, 0.2]
P = dd_gauss3(r,parin)
V = K@P
fit = fitmultimodel(V,K,r,dd_gauss,3,'aicc', uq=False)
assert isinstance(fit.cost,float) and np.round(fit.cost/np.sum(fit.residuals**2),5)==1
#=======================================================================
def test_convergence_criteria():
#=======================================================================
"Check that convergence criteria can be specified without crashing"
r = np.linspace(2,6,300)
t = np.linspace(-0.5,6,500)
K = dipolarkernel(t,r)
parin = [4, 0.05, 0.4, 4, 0.4, 0.4, 3, 0.15, 0.2]
P = dd_gauss3(r,parin)
V = K@P
fit = fitmultimodel(V,K,r,dd_gauss,3,'aicc', uq=False, tol=1e-3, maxiter=200)
assert ovl(P,fit.P) > 0.95 # more than 99% overlap
#======================================================================= |
import sys
from abc import ABC, abstractmethod
from dataclasses import asdict, astuple, dataclass, field
from typing import List
import numpy as np
class Reader:
@staticmethod
def readline() -> bytes:
return sys.stdin.buffer.readline().rstrip()
@classmethod
def read_int(cls) -> int:
ln = cls.readline()
return int(ln)
@classmethod
def read_str(cls) -> str:
ln = cls.readline()
return ln.decode()
@classmethod
def readline_ints(
cls,
) -> List[int]:
(*ints,) = map(
int,
cls.readline().split(),
)
return ints
@classmethod
def readline_strs(
cls,
) -> List[str]:
return cls.read_str().split()
@staticmethod
def read() -> bytes:
return sys.stdin.buffer.read()
@classmethod
def read_ints(
cls,
) -> List[int]:
(*ints,) = map(
int,
cls.read().split(),
)
return ints
@classmethod
def read_strs(
cls,
) -> List[str]:
return cls.read().decode().split()
@staticmethod
def readlines() -> List[bytes]:
lines = sys.stdin.buffer.readlines()
lines = [l.rstrip() for l in lines]
return lines
class NumpyReader(Reader):
@classmethod
def readline_ints(
cls,
) -> np.array:
return np.fromstring(
string=cls.read_str(),
dtype=np.int64,
sep=" ",
)
@classmethod
def read_ints(
cls,
) -> np.array:
return np.fromstring(
string=cls.read().decode(),
dtype=np.int64,
sep=" ",
)
class Solver(ABC):
def __init__(self):
self.reader = Reader()
self.np_reader = NumpyReader()
self.ready = False
def __call__(
self,
*args,
**kwargs,
):
self.run(
*args,
**kwargs,
)
def run(self):
self.prepare()
self.solve()
@abstractmethod
def prepare(self):
...
self.ready = True
@abstractmethod
def solve(self):
assert self.ready
...
from dataclasses import dataclass
from enum import Enum
from typing import Union
import numpy as np
class Modulus(Enum):
MOD0 = 10**4 + 7
MOD1 = 998_244_353
MOD2 = 10**9 + 7
MOD3 = 10**9 + 9
class ModularInt:
def __init__(
self,
value: int = 0,
modulus: Union[
Modulus,
int,
] = (Modulus.MOD2),
):
self.mod: int = modulus
self.value: int = value
@property
def mod(self):
return self.modulus
@mod.setter
def mod(self, v):
if type(v) == Modulus:
v = v.value
self.modulus = v
@property
def value(self):
return self._value
@value.setter
def value(self, v):
assert type(v) == int
self._value = v % self.mod
@value.deleter
def value(self):
del self._value
def __repr__(self) -> str:
return f"{self.value}"
def clone(self):
return self.__class__(
self.value,
self.mod,
)
def modularize(self, other):
if type(other) != ModularInt:
other = self.__class__(
int(other),
self.mod,
)
return other
def __iadd__(self, other):
other = self.modularize(
other,
)
self.value += other.value
self.value %= self.mod
return self
def __add__(self, other):
res = self.clone()
res += other
return res
def __radd__(self, other):
return self + other
def __neg__(self):
return self.modularize(
-self.value,
)
def __sub__(self, other):
res = self.clone()
res += -other
return res
def __rsub__(self, other):
other = self.modularize(
other,
)
return other - self
def __imul__(self, other):
other = self.modularize(
other,
)
self.value *= other.value
self.value %= self.mod
return self
def __mul__(self, other):
res = self.clone()
res *= other
return res
def __rmul__(self, other):
return self * other
def __truediv__(self, other):
other = self.modularize(
other,
)
res = self.clone()
res *= other.invert()
return res
def __rtruediv__(
self,
other,
):
other = self.modularize(
other,
)
return other / self
def __floordiv__(
self,
other,
):
return self / other
def __rfloordiv__(
self,
other,
):
return other / self
def pow(self, n):
if n == 0:
return self.modularize(1)
a = self.pow(n >> 1)
a *= a
if n & 1:
a *= self
return a
def __ipow__(self, other):
other = self.modularize(
other,
)
self.value = pow(
self.value,
other.value,
self.mod,
)
return self
def __pow__(self, other):
res = self.clone()
res **= other
return res
def __rpow__(self, other):
other = self.modularize(
other,
)
return other**self
def invert(self):
i = self ** (self.mod - 2)
return i
def __eq__(self, other):
other = self.modularize(
other,
)
return self.value == other.value
def congruent(
self,
other,
):
return self == other
def factorial(
self,
):
n = self.value
fact = range(n + 1)
fact: (np.ndarray) = np.array(
(
*map(
ModularInt,
fact,
),
)
)
fact[0] = ModularInt(
1,
self.mod,
)
fact.cumprod(out=fact)
return fact
def inverse_factorial(
self,
):
fact = self.factorial()
inv_fact: (np.ndarray) = np.arange(
1,
fact.size + 1,
).astype(object)
inv_fact[-1] = fact[-1].invert()
inv_fact[::-1].cumprod(out=inv_fact[::-1])
return inv_fact
Mint = ModularInt
class ChooseMod(
Mint,
):
def __init__(
self,
n: int = 1 << 20,
*args,
**kwargs,
):
super().__init__(
value=n,
*args,
**kwargs,
)
(self.fact, self.inv_fact,) = (
self.factorial(),
self.inverse_factorial(),
)
def __call__(self, n, r):
return self.choose(n, r)
def choose(
self,
n: int,
r: int,
):
bl = (0 <= r) & (r <= n)
p = self.mod
return bl * self.fact[n] * self.inv_fact[r] * self.inv_fact[n - r]
class ABC003D_0(
Solver,
):
def prepare(self):
reader = self.reader
(
r,
c,
y,
x,
d,
l,
) = reader.read_ints()
(self.r, self.c, self.y, self.x, self.d, self.l,) = (
r,
c,
y,
x,
d,
l,
)
self.choose = ChooseMod(
n=1 << 10,
modulus=Modulus.MOD2,
)
self.ready = True
def solve(self):
assert self.ready
(r, c, y, x,) = (
self.r,
self.c,
self.y,
self.x,
)
blocks = (r - y + 1) * (c - x + 1)
c = self.count(y, x)
c -= self.count(y - 1, x) * 2
c -= self.count(y, x - 1) * 2
c += self.count(y - 2, x)
c += self.count(y, x - 2)
c += self.count(y - 1, x - 1) * 4
c -= self.count(y - 2, x - 1) * 2
c -= self.count(y - 1, x - 2) * 2
c += self.count(y - 2, x - 2)
res = blocks * c
print(res)
def count(self, y, x):
d, l = self.d, self.l
if y <= 0 or x <= 0:
return 0
if y * x < d + l:
return 0
c = self.choose(y * x, d + l) * self.choose(d + l, d)
return c
def main():
t = 1
# t = Reader.read_int()
for _ in range(t):
ABC003D_0()()
if __name__ == "__main__":
main()
|
import collections
import dataclasses
import functools
import logging
import os
import re
import shlex
import subprocess
from typing import Any, Callable, Iterable, Iterator, List, Mapping, Sequence, TypeVar, Union, cast
import mkdocs.exceptions
import mkdocs.utils
from cached_property import cached_property
from mkdocstrings.handlers.base import BaseCollector, CollectionError
from . import inventory
from .items import DocConstant, DocItem, DocLocation, DocMapping, DocMethod, DocModule, DocType
try:
from mkdocs.exceptions import PluginError
except ImportError:
PluginError = SystemExit
log = logging.getLogger(f"mkdocs.plugins.{__name__}")
log.addFilter(mkdocs.utils.warning_filter)
D = TypeVar("D", bound=DocItem)
class CrystalCollector(BaseCollector):
def __init__(
self, crystal_docs_flags: Sequence[str] = (), source_locations: Mapping[str, str] = {}
):
"""Create a "collector", reading docs from `crystal doc` in the current directory.
Normally this should not be instantiated.
When using mkdocstrings-crystal within MkDocs, a plugin can access the instance as `config['plugins']['mkdocstrings'].get_handler('crystal').collector`.
See [Extras](extras.md).
"""
command = [
"crystal",
"docs",
"--format=json",
"--project-name=",
"--project-version=",
]
if source_locations:
command.append("--source-refname=master")
command += (s.format_map(_crystal_info) for s in crystal_docs_flags)
log.debug("Running `%s`", " ".join(shlex.quote(arg) for arg in command))
self._proc = subprocess.Popen(command, stdout=subprocess.PIPE)
# For unambiguous prefix match: add trailing slash, sort by longest path first.
self._source_locations = sorted(
(
_SourceDestination(os.path.relpath(k) + os.sep, source_locations[k])
for k in source_locations
),
key=lambda d: -d.src_path.count("/"),
)
# pytype: disable=bad-return-type
@cached_property
def root(self) -> "DocRoot":
"""The top-level namespace, represented as a fake module."""
try:
with self._proc:
root = inventory.read(self._proc.stdout)
root.__class__ = DocRoot
root.source_locations = self._source_locations
return root
finally:
if self._proc.returncode:
cmd = " ".join(shlex.quote(arg) for arg in self._proc.args)
raise PluginError(f"Command `{cmd}` exited with status {self._proc.returncode}")
# pytype: enable=bad-return-type
def collect(self, identifier: str, config: Mapping[str, Any]) -> "DocView":
"""[Find][mkdocstrings.handlers.crystal.items.DocItem.lookup] an item by its identifier.
Raises:
CollectionError: When an item by that identifier couldn't be found.
"""
config = {
"nested_types": False,
"file_filters": True,
**config,
}
item = self.root
if identifier != "::":
item = item.lookup(identifier)
return DocView(item, config)
@dataclasses.dataclass
class _SourceDestination:
src_path: str
dest_url: str
def substitute(self, location: DocLocation) -> str:
data = {"file": location.filename[len(self.src_path) :], "line": location.line}
try:
return self.dest_url.format_map(collections.ChainMap(data, _DictAccess(self), _crystal_info)) # type: ignore
except KeyError as e:
raise PluginError(
f"The source_locations template {self.dest_url!r} did not resolve correctly: {e}"
)
@property
def shard_version(self):
return self._shard_version(os.path.dirname(self.src_path))
@classmethod
@functools.lru_cache(maxsize=None)
def _shard_version(cls, path: str):
file_path = _find_above(path, "shard.yml")
with open(file_path, "rb") as f:
m = re.search(rb"^version: *([\S+]+)", f.read(), flags=re.MULTILINE)
if not m:
raise PluginError(f"`version:` not found in {file_path!r}")
def _find_above(path: str, filename: str) -> str:
orig_path = path
while path:
file_path = os.path.join(path, filename)
if os.path.isfile(file_path):
return file_path
path = os.path.dirname(path)
raise PluginError(f"{filename!r} not found anywhere above {os.path.abspath(orig_path)!r}")
class _CrystalInfo:
@cached_property
def crystal_version(self) -> str:
return subprocess.check_output(
["crystal", "env", "CRYSTAL_VERSION"], encoding="ascii"
).rstrip()
@cached_property
def crystal_src(self):
out = subprocess.check_output(["crystal", "env", "CRYSTAL_PATH"], text=True).rstrip()
for path in out.split(os.pathsep):
if os.path.isfile(os.path.join(path, "prelude.cr")):
return os.path.relpath(path)
raise PluginError(f"Crystal sources not found anywhere in CRYSTAL_PATH={out!r}")
class _DictAccess:
def __init__(self, obj):
self.obj = obj
def __getitem__(self, key):
try:
return getattr(self.obj, key)
except AttributeError as e:
raise KeyError(f"Missing key: {e}")
_crystal_info = _DictAccess(_CrystalInfo())
class DocRoot(DocModule):
source_locations: List[_SourceDestination]
def update_url(self, location: DocLocation) -> DocLocation:
for dest in self.source_locations:
if (location.filename or "").startswith(dest.src_path):
location.url = dest.substitute(location)
break
return location
class DocView:
def __init__(self, item: DocItem, config: Mapping[str, Any]):
self.item = item
self.config = config
def __getattr__(self, name: str):
val = getattr(self.item, name)
if isinstance(val, DocMapping) and val:
if name == "types" and not self.config["nested_types"]:
return DocMapping(())
return type(self)._filter(self.config["file_filters"], val, type(self)._get_locations)
return val
def walk_types(self) -> Iterator[DocType]:
types = cast(DocMapping[DocType], self.types)
for typ in types:
yield typ
yield from typ.walk_types()
@classmethod
def _get_locations(cls, obj: DocItem) -> Sequence[str]:
if isinstance(obj, DocConstant):
obj = obj.parent
if not obj:
return ()
if isinstance(obj, DocType):
return [loc.url.rsplit("#", 1)[0] for loc in obj.locations]
elif isinstance(obj, DocMethod):
if not obj.location:
return ()
return (obj.location.url.rsplit("#", 1)[0],)
else:
raise TypeError(obj)
@classmethod
def _filter(
cls,
filters: Union[bool, Sequence[str]],
mapp: DocMapping[D],
getter: Callable[[D], Sequence[str]],
) -> DocMapping[D]:
if filters is False:
return DocMapping(())
if filters is True:
return mapp
try:
re.compile(filters[0])
except (TypeError, IndexError):
raise CollectionError(
f"Expected a non-empty list of strings as filters, not {filters!r}"
)
return DocMapping([item for item in mapp if _apply_filter(filters, getter(item))])
def _apply_filter(
filters: Iterable[str],
tags: Sequence[str],
) -> bool:
match = False
for filt in filters:
filter_kind = True
if filt.startswith("!"):
filter_kind = False
filt = filt[1:]
if any(re.search(filt, s) for s in tags):
match = filter_kind
return match
|
import node
import transform
import control
import curve
import joint
__all__ = ['curve', 'joint', 'control', 'node', 'transform']
|
from .xdcrnewbasetests import XDCRNewBaseTest
from security.ntonencryptionBase import ntonencryptionBase
from membase.helper.cluster_helper import ClusterOperationHelper
import time
import random
class XDCRSecurityTests(XDCRNewBaseTest):
def setUp(self):
XDCRNewBaseTest.setUp(self)
def tearDown(self):
XDCRNewBaseTest.tearDown(self)
def get_cluster_objects_for_input(self, input):
"""returns a list of cluster objects for input. 'input' is a string
containing names of clusters separated by ':'
eg. failover=C1:C2
"""
clusters = []
input_clusters = input.split(':')
for cluster_name in input_clusters:
clusters.append(self.get_cb_cluster_by_name(cluster_name))
return clusters
def _toggle_setting(self, servers, setting, value):
n2nhelper = ntonencryptionBase()
if setting == "tls":
if value:
n2nhelper.setup_nton_cluster(servers, clusterEncryptionLevel=value)
else:
n2nhelper.disable_nton_cluster(servers)
if setting == "n2n":
if value:
n2nhelper.ntonencryption_cli(servers, value)
else:
is_enabled = n2nhelper.ntonencryption_cli(servers, "get")
if is_enabled:
n2nhelper.ntonencryption_cli(servers, "disable")
if setting == "autofailover":
if value:
n2nhelper.enable_autofailover(servers)
else:
is_enabled = n2nhelper.check_autofailover_enabled(servers)
if is_enabled:
n2nhelper.disable_autofailover(servers)
def test_xdcr_with_security(self):
#Settings
settings_values_map = {"autofailover":["enable", None],
"n2n":["enable", "disable"],
"tls":["all", "control", "strict"]
}
apply_settings_before_setup = self._input.param("apply_settings_before_setup", False)
disable_autofailover = self._input.param("disable_autofailover", False)
enable_n2n = self._input.param("enable_n2n", False)
enforce_tls = self._input.param("enforce_tls", None)
tls_level = self._input.param("tls_level", "control")
enable_autofailover = self._input.param("enable_autofailover", False)
disable_n2n = self._input.param("disable_n2n", None)
disable_tls = self._input.param("disable_tls", None)
rebalance_in = self._input.param("rebalance_in", None)
rebalance_out = self._input.param("rebalance_out", None)
swap_rebalance = self._input.param("swap_rebalance", None)
failover = self._input.param("failover", None)
graceful = self._input.param("graceful", None)
pause = self._input.param("pause", None)
reboot = self._input.param("reboot", None)
initial_xdcr = self._input.param("initial_xdcr", random.choice([True, False]))
random_setting = self._input.param("random_setting", False)
if not apply_settings_before_setup:
if initial_xdcr:
self.load_and_setup_xdcr()
else:
self.setup_xdcr_and_load()
if enforce_tls:
for cluster in self.get_cluster_objects_for_input(enforce_tls):
self._toggle_setting([cluster.get_master_node()], "tls", tls_level)
#Revert to default (control) tls level
if disable_tls:
for cluster in self.get_cluster_objects_for_input(disable_tls):
self._toggle_setting([cluster.get_master_node()], "tls")
if enable_n2n:
for cluster in self.get_cluster_objects_for_input(enable_n2n):
self._toggle_setting([cluster.get_master_node()], "n2n", "enable")
if disable_n2n:
for cluster in self.get_cluster_objects_for_input(disable_n2n):
self._toggle_setting([cluster.get_master_node()], "n2n")
if enable_autofailover:
for cluster in self.get_cluster_objects_for_input(enable_autofailover):
self._toggle_setting([cluster.get_master_node()], "autofailover", "enable")
if disable_autofailover:
for cluster in self.get_cluster_objects_for_input(disable_autofailover):
self._toggle_setting([cluster.get_master_node()], "autofailover")
if random_setting:
for cluster in self.get_cluster_objects_for_input(random_setting):
setting = random.choice(list(settings_values_map.keys()))
value = random.choice(settings_values_map.get(setting))
self._toggle_setting([cluster.get_master_node()], setting, value)
if apply_settings_before_setup:
if initial_xdcr:
self.load_and_setup_xdcr()
else:
self.setup_xdcr_and_load()
if pause:
for cluster in self.get_cluster_objects_for_input(pause):
for remote_cluster_refs in cluster.get_remote_clusters():
remote_cluster_refs.pause_all_replications()
time.sleep(60)
if rebalance_in:
for cluster in self.get_cluster_objects_for_input(rebalance_in):
cluster.async_rebalance_in()
if failover:
for cluster in self.get_cluster_objects_for_input(failover):
cluster.failover_and_rebalance_nodes(graceful=graceful,
rebalance=True)
if rebalance_out:
tasks = []
for cluster in self.get_cluster_objects_for_input(rebalance_out):
cluster.async_rebalance_out()
if swap_rebalance:
for cluster in self.get_cluster_objects_for_input(swap_rebalance):
cluster.async_swap_rebalance()
if pause:
for cluster in self.get_cluster_objects_for_input(pause):
for remote_cluster_refs in cluster.get_remote_clusters():
remote_cluster_refs.resume_all_replications()
if reboot:
for cluster in self.get_cluster_objects_for_input(reboot):
cluster.warmup_node()
time.sleep(60)
self.perform_update_delete()
self.verify_results()
try:
# Restore defaults - Enable autofailover, disable n2n
self._toggle_setting([cluster.get_master_node()], "autofailover", "enable")
self._toggle_setting([cluster.get_master_node()], "n2n")
except:
pass |
"""
Created on 04/sep/2016
@author: Marco Pompili
"""
import html5lib as html
import json
import logging
import requests
from socket import error as socket_error
from requests.exceptions import ConnectionError, HTTPError
from . import settings
SCRIPT_JSON_PREFIX = 18
SCRIPT_JSON_DATA_INDEX = 21
def instagram_scrape_profile(username):
"""
Scrap an instagram profile page
:param username:
:return:
"""
try:
url = "https://www.instagram.com/{}/".format(username)
headers = {
"Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"User-Agent": settings.INSTAGRAM_UA
}
if settings.INSTAGRAM_COOKIE:
headers["Cookie"] = settings.INSTAGRAM_COOKIE
page = requests.get(url, headers=headers)
# Raise error for 404 cause by a bad profile name
page.raise_for_status()
return html.parse(page.content, treebuilder="dom")
except HTTPError:
logging.exception('user profile "{}" not found'.format(username))
except (ConnectionError, socket_error) as e:
logging.exception("instagram.com unreachable")
def instagram_profile_js(username):
"""
Retrieve the script tags from the parsed page.
:param username:
:return:
"""
try:
tree = instagram_scrape_profile(username)
return tree.getElementsByTagName("script")
except AttributeError:
logging.exception("scripts not found")
return None
def instagram_profile_json(username):
"""
Get the JSON data string from the scripts.
:param username:
:return:
"""
scripts = instagram_profile_js(username)
source = None
if scripts:
for script in scripts:
if script.hasChildNodes():
if script.firstChild.data[0:SCRIPT_JSON_PREFIX] == "window._sharedData":
source = script.firstChild.data[SCRIPT_JSON_DATA_INDEX:-1]
return source
def instagram_profile_obj(username):
"""
Retrieve the JSON from the page and parse it to a python dict.
:param username:
:return:
"""
json_data = instagram_profile_json(username)
return json.loads(json_data) if json_data else None
|
import asyncio
from asyncio import StreamReader, StreamWriter
import configargparse
import logging
import json
from connect_to_chat import get_chat_connection
def create_argparser():
p = configargparse.ArgParser(
default_config_files=['~/.minechat'],
description="Connects to chat as specified user (or creates a new one) and posts given message. "
"NOTE: When attempt to login with provided token is successful nickname is ignored."
)
p.add('message', help="Message to send to chat")
p.add('--host', '-H', default="minechat.dvmn.org", env_var="CHAT_HOST", help="Host to connect to")
p.add('--port', '-p', default=5050, type=int, env_var="CHAT_PORT", help="Port to listen")
p.add('--token', '-t', env_var='CHAT_TOKEN', help="Path to chat log")
p.add('--nickname', '-n', env_var='CHAT_NICKNAME', help="Nickname to create user with")
p.add('--debug-log', '-d', action='store_true', env_var='CHAT_DEBUG_LOG', help="Turn on verbose debug logging")
return p
def sanitize_text(msg):
return " ".join(msg.split("\n"))
async def send_data(writer: StreamWriter, msg: str):
writer.write(f"{msg}\n".encode())
await writer.drain()
logging.debug(f":sent:{msg}")
async def receive_data(reader: StreamReader) -> str:
msg = (await reader.readline()).decode()
logging.debug(f":received:{msg}")
return msg
async def authorize(reader: StreamReader, writer: StreamWriter, token: str) -> dict:
_ = await receive_data(reader) # Получаем приветственное сообщение
await send_data(writer, f"{token}\n")
user_str = await receive_data(reader)
user_object = json.loads(user_str)
await receive_data(reader) # Получаем приветствие пользователя, после этого можно отправлять сообщения
return user_object
async def register(reader: StreamReader, writer: StreamWriter, nickname):
await receive_data(reader) # Получаем приветственное сообщение
await send_data(writer, "") # Пропускаем логин по токену
await receive_data(reader) # Получаем запрос имени
await send_data(writer, f"{sanitize_text(nickname)}\n")
user_str = await receive_data(reader)
user_object = json.loads(user_str)
await receive_data(reader) # Получаем приветствие пользователя
return user_object
async def submit_message(reader: StreamReader, writer: StreamWriter, message):
await send_data(writer, f"{sanitize_text(message)}\n")
await receive_data(reader) # Получаем подтверждение отправки
async def main():
arguments = create_argparser().parse_args()
log_level = logging.DEBUG if arguments.debug_logging else logging.WARNING
logging.basicConfig(format="%(levelname)-8s [%(asctime)s] %(message)s", level=log_level)
user = None
async with get_chat_connection(arguments.host, arguments.port) as (reader, writer):
if arguments.token:
user = await authorize(reader, writer, arguments.token)
if not user:
print("Неизвестный токен. Проверьте его или зарегистрируйте заново.")
else:
print(f"Добро пожаловать, {user['nickname']}")
if not user:
if not arguments.nickname:
print("You have to provide nickname in order to register a user")
exit(-1)
user = await register(reader, writer, arguments.nickname)
print(f"Рады познакомится, {user['nickname']}, ваш токен - {user['nickname']} ")
await submit_message(reader, writer, arguments.message)
if __name__ == '__main__':
asyncio.run(main()) |
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from .code_node import ListNode
from .code_node import LiteralNode
from .code_node import SymbolNode
from .code_node import SymbolScopeNode
from .code_node import TextNode
from .code_node import WeakDependencyNode
from .code_node import render_code_node
from .codegen_accumulator import CodeGenAccumulator
from .mako_renderer import MakoRenderer
class CodeNodeTest(unittest.TestCase):
def setUp(self):
super(CodeNodeTest, self).setUp()
self.addTypeEqualityFunc(str, self.assertMultiLineEqual)
def assertRenderResult(self, node, expected):
if node.renderer is None:
node.set_renderer(MakoRenderer())
if node.accumulator is None:
node.set_accumulator(CodeGenAccumulator())
def simplify(text):
return "\n".join(
[" ".join(line.split()) for line in text.split("\n")])
actual = simplify(render_code_node(node))
expected = simplify(expected)
self.assertEqual(actual, expected)
def test_literal_node(self):
"""
Tests that, in LiteralNode, the special characters of template (%, ${},
etc) are not processed.
"""
root = LiteralNode("<% x = 42 %>${x}")
self.assertRenderResult(root, "<% x = 42 %>${x}")
def test_empty_literal_node(self):
root = LiteralNode("")
self.assertRenderResult(root, "")
def test_text_node(self):
"""Tests that the template language works in TextNode."""
root = TextNode("<% x = 42 %>${x}")
self.assertRenderResult(root, "42")
def test_empty_text_node(self):
root = TextNode("")
self.assertRenderResult(root, "")
def test_list_operations_of_sequence_node(self):
"""
Tests that list operations (insert, append, and extend) of ListNode
work just same as Python built-in list.
"""
root = ListNode(separator=",")
root.extend([
LiteralNode("2"),
LiteralNode("4"),
])
root.insert(1, LiteralNode("3"))
root.insert(0, LiteralNode("1"))
root.insert(100, LiteralNode("5"))
root.append(LiteralNode("6"))
self.assertRenderResult(root, "1,2,3,4,5,6")
root.remove(root[0])
root.remove(root[2])
root.remove(root[-1])
self.assertRenderResult(root, "2,3,5")
def test_list_node_head_and_tail(self):
self.assertRenderResult(ListNode(), "")
self.assertRenderResult(ListNode(head="head"), "")
self.assertRenderResult(ListNode(tail="tail"), "")
self.assertRenderResult(
ListNode([TextNode("-content-")], head="head", tail="tail"),
"head-content-tail")
def test_nested_sequence(self):
"""Tests nested ListNodes."""
root = ListNode(separator=",")
nested = ListNode(separator=",")
nested.extend([
LiteralNode("2"),
LiteralNode("3"),
LiteralNode("4"),
])
root.extend([
LiteralNode("1"),
nested,
LiteralNode("5"),
])
self.assertRenderResult(root, "1,2,3,4,5")
def test_symbol_definition_chains(self):
"""
Tests that use of SymbolNode inserts necessary SymbolDefinitionNode
appropriately.
"""
root = SymbolScopeNode(tail="\n")
root.register_code_symbols([
SymbolNode("var1", "int ${var1} = ${var2} + ${var3};"),
SymbolNode("var2", "int ${var2} = ${var5};"),
SymbolNode("var3", "int ${var3} = ${var4};"),
SymbolNode("var4", "int ${var4} = 1;"),
SymbolNode("var5", "int ${var5} = 2;"),
])
root.append(TextNode("(void)${var1};"))
self.assertRenderResult(
root, """\
int var5 = 2;
int var2 = var5;
int var4 = 1;
int var3 = var4;
int var1 = var2 + var3;
(void)var1;
""")
def test_weak_dependency_node(self):
root = SymbolScopeNode(tail="\n")
root.register_code_symbols([
SymbolNode("var1", "int ${var1} = 1;"),
SymbolNode("var2", "int ${var2} = 2;"),
SymbolNode("var3", "int ${var3} = 3;"),
])
root.extend([
WeakDependencyNode(dep_syms=["var1", "var2"]),
TextNode("f();"),
TextNode("(void)${var3};"),
TextNode("(void)${var1};"),
])
self.assertRenderResult(
root, """\
int var1 = 1;
f();
int var3 = 3;
(void)var3;
(void)var1;
""")
def test_template_error_handling(self):
renderer = MakoRenderer()
root = SymbolScopeNode()
root.set_renderer(renderer)
root.append(
SymbolScopeNode([
# Have Mako raise a NameError.
TextNode("${unbound_symbol}"),
]))
with self.assertRaises(NameError):
renderer.reset()
root.render(renderer)
callers_on_error = list(renderer.callers_on_error)
self.assertEqual(len(callers_on_error), 3)
self.assertEqual(callers_on_error[0], root[0][0])
self.assertEqual(callers_on_error[1], root[0])
self.assertEqual(callers_on_error[2], root)
self.assertEqual(renderer.last_caller_on_error, root[0][0])
|
import os
from urllib.parse import urljoin
import requests
import json
from osbot_utils.utils.Http import GET_json,POST,POST_json
from osbot_utils.utils.Json import str_to_json, json_to_str
DEFAULT_API_SERVER = 'http://api:8880'
class API_Client:
def __init__(self, url_server=DEFAULT_API_SERVER):
self.server_ip = url_server
# helper methods
def _resolve_url(self, path=""):
return urljoin(self.server_ip, path)
def _request_get(self, path):
url = self._resolve_url(path)
return GET_json(url)
def _request_post(self, path):
url = self._resolve_url(path)
return POST(url=url,data=b'', headers=None)
def _request_http_post(self,path,data,headers):
url = self._resolve_url(path)
return requests.post(url=url, data=data, headers=headers)
# API methods
def clear_data_and_status(self):
return self._request_post('/pre-processor/clear-data-and-status')
def health(self):
return self._request_get('/health')
def version(self):
return self._request_get('/version')
def pre_process(self):
return self._request_post('/pre-processor/pre-process')
def process_files(self):
assert self.pre_process() == '["Processing is done"]'
assert self.start_process() == '"Loop completed"'
return "all files processed "
def start_process(self):
return self._request_post('/processing/start')
def stop_process(self):
return self._request_post('/processing/stop')
def configure_environment(self, data):
headers = { 'accept': 'application/json' ,
'Content-Type': 'application/json'}
post_data = json_to_str(data)
return self._request_http_post(path="configuration/configure_env",headers=headers,data=post_data)
def set_gw_sdk_endpoints(self,data):
headers = { 'accept': 'application/json' ,
'Content-Type': 'application/json'}
post_data = json_to_str(data)
return self._request_http_post(path="configuration/configure_gw_sdk_endpoints", headers=headers, data=post_data)
# helper methods
def configure(self, data_paths, sdk_endpoints, clear_data=False):
status = {}
if clear_data:
status['clear_data_and_status'] = self.clear_data_and_status()
status['configure_environment'] = self.configure_environment(data=data_paths)
status['set_gw_sdk_endpoints'] = self.set_gw_sdk_endpoints(data=sdk_endpoints)
return status
|
import os
data_path = os.path.join('..', 'data')
os.makedirs(data_path, exist_ok=True)
input_path = os.path.join(data_path, 'input')
os.makedirs(input_path, exist_ok=True)
bruto_path = os.path.join(input_path, 'brutos')
os.makedirs(bruto_path, exist_ok=True)
output_path = os.path.join(data_path, 'output')
os.makedirs(output_path, exist_ok=True)
output_path_geo = os.path.join(output_path, 'geo')
os.makedirs(output_path_geo, exist_ok=True)
output_path_tab = os.path.join(output_path, 'tab')
os.makedirs(output_path_tab, exist_ok=True)
output_path_map = os.path.join(output_path, 'map')
os.makedirs(output_path_map, exist_ok=True)
|
from django.db import transaction
from django.http import HttpResponseNotFound
from django.shortcuts import get_object_or_404, redirect
from django.views.decorators.http import require_http_methods
from django.views.generic import DetailView
from meta.views import MetadataMixin
from sqds.models import Player
from sqds.utils import extract_all_ally_codes
from .models import GAPool, GAPoolPlayer
@require_http_methods(["POST"])
def create_ga_pool(request, ally_code):
"""
Create a new GA pool with ally_code as focus player. POST parameter ally_codes
must contain text codes
"""
if 'ally_codes' in request.POST:
ally_codes = extract_all_ally_codes(request.POST['ally_codes'])
with transaction.atomic():
ga_pool = GAPool(focus_player=get_object_or_404(Player, ally_code=ally_code))
ga_pool.save()
Player.objects.ensure_exist(ally_codes, max_days=7)
for ac in ally_codes:
ga_pool_player = GAPoolPlayer(ga_pool=ga_pool,
player=Player.objects.get(ally_code=ac))
ga_pool_player.save()
return redirect('sqds_ga:view', pk=ga_pool.pk)
else:
return HttpResponseNotFound()
class GAPoolView(MetadataMixin, DetailView):
model = GAPool
template_name = 'sqds_ga/ga_pool_overview.html'
# noinspection PyAttributeOutsideInit
def get_object(self, queryset=None):
obj = super().get_object(queryset)
# We load other object as well
self.focus_player = (Player.objects
.filter(pk=obj.focus_player.pk)
.annotate_stats()
.annotate_faction_gp()
.select_related('guild')
.first())
self.players = list(Player.objects
.filter(ga_pool_player_set__ga_pool=obj)
.select_related('guild')
.annotate_faction_gp()
.annotate_stats())
return obj
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['focus_player'] = self.focus_player
context['players'] = self.players
context['all_players'] = [context['focus_player'], *context['players']]
return context
def get_meta_title(self, context=None):
return f"GA Pool ({self.focus_player.name})"
def get_meta_description(self, context=None):
return "{} vs. {} ({})".format(
self.focus_player.name,
', '.join([p.name for p in self.players]),
self.object.created)
|
from p2_cipher import Cipher
def p2_answer():
aCipher = Cipher()
aCipher.key = "BLUE" # the hardcoded key, it comes from p1_ and p_0
aCipher.processKey() # make a deduped string
aCipher.readIn() # reads the message in
aCipher.printDecodedMessage() #prints the decoded message
if __name__ == "__main__":
p2_answer() |
import datasets
import model
import tensorflow as tf
import tensorflow.compat.v1 as tfv1
import time
from logging import getLogger
logger = getLogger(__name__)
def start(network, trainer, train, epochs, checkpoint):
start_epoch = checkpoint.save_counter()
for epoch in range(start_epoch, epochs):
for images, labels in train:
trainer.train_step(network, images, labels)
save_path = checkpoint.save()
logger.info(
f"epoch: {epoch},"
f" loss: {trainer.train_loss.result()},"
f" accuracy: {trainer.train_accuracy.result()}"
f" save model: {save_path}"
)
def main():
tfv1.enable_eager_execution()
logger.info(f"execute eagerly = {tf.executing_eagerly()}")
logger.info(f"is gpu available = {tf.test.is_gpu_available()}")
logger.info("get dataset...")
train, _ = datasets.get_dataset()
logger.info("learning...")
network = model.MNISTModel()
trainer = model.Trainer()
checkpoint = model.Checkpoint(network=network, optimizer=trainer.optimizer)
start_learning = time.time()
start(network, trainer, train, 5, checkpoint)
end_learning = time.time()
logger.info(f"learning time: {end_learning - start_learning} sec")
if __name__ == "__main__":
import logging
logging.basicConfig(level=logging.INFO)
main()
|
#!/bin/env python
import Currency_Aggregator
import Currency_Converter
import Currency_Model
import Parser
import Scrapper
class Main:
@staticmethod
def run_program():
Scrapper.Scrapper.scrap()
currency = Parser.Parser.parse()
chosen_currency = currency.choose_current()
exchanged_money = Currency_Converter.Currency_Converter.convert(chosen_currency)
print(f"Waluta bazowa to {chosen_currency[0]}")
print(f"Waluta docelowa to {chosen_currency[1]}")
print(f"Kwota {exchanged_money[0]} w walucie docelowej wynosi {exchanged_money[1]}")
if __name__ == "__main__":
Main.run_program()
|
#! /usr/bin/env python3
import sys, platform
from os.path import isfile
def hello(who):
osname = platform.system()
print("Hello world from {} on an ${} machine".format(who, osname))
if __name__ == "__main__":
if len(sys.argv) >= 2:
hello(sys.argv[1])
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('sponsor', '0004_auto_20150619_2212'),
]
operations = [
migrations.CreateModel(
name='SponsorParcel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('parcelService', models.CharField(max_length=128, verbose_name='Delivery service company')),
('trackingNumber', models.CharField(max_length=128, verbose_name='Tracking number')),
('trackingUrl', models.URLField(verbose_name='Tracking URL (if available', blank=True)),
('contentAndUsage', models.TextField(verbose_name='What is the content of this package? What should we use it for?', blank=True)),
('received', models.BooleanField(default=False, verbose_name='Received')),
('storageLocation', models.TextField(verbose_name='Storage location', blank=True)),
('owner', models.ForeignKey(related_name='parcels', editable=False, to=settings.AUTH_USER_MODEL)),
('sponsoring', models.ForeignKey(related_name='parcels', editable=False, to='sponsor.Sponsoring')),
],
options={
},
bases=(models.Model,),
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.