hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7c1d13f88c58a68331858cd82f62795bc6b19393
| 58
|
py
|
Python
|
maior.py
|
Viictorreiss/pythonmarathon
|
8e1b948e887cf0237ccf7edf0a168f062e937d15
|
[
"MIT"
] | null | null | null |
maior.py
|
Viictorreiss/pythonmarathon
|
8e1b948e887cf0237ccf7edf0a168f062e937d15
|
[
"MIT"
] | null | null | null |
maior.py
|
Viictorreiss/pythonmarathon
|
8e1b948e887cf0237ccf7edf0a168f062e937d15
|
[
"MIT"
] | null | null | null |
nums = input().split()
nums.sort()
print(nums[-1], end="")
| 19.333333
| 23
| 0.603448
|
70ebe702f6e9552335c07c46fcc167c95608f747
| 740
|
py
|
Python
|
variation/translators/genomic_deletion_range.py
|
cancervariants/variant-normalization
|
e89a9f8366a659c82b2042aeb7effe339851bfb4
|
[
"MIT"
] | 1
|
2022-01-19T18:17:49.000Z
|
2022-01-19T18:17:49.000Z
|
variation/translators/genomic_deletion_range.py
|
cancervariants/variation-normalization
|
9c8fbab1562591ae9445d82ddd15df29f1ea1f5a
|
[
"MIT"
] | 99
|
2021-06-07T12:50:34.000Z
|
2022-03-23T13:38:29.000Z
|
variation/translators/genomic_deletion_range.py
|
cancervariants/variant-normalization
|
e89a9f8366a659c82b2042aeb7effe339851bfb4
|
[
"MIT"
] | null | null | null |
"""Module for Genomic Deletion Range Translation."""
from variation.translators.translator import Translator
from variation.schemas.classification_response_schema import ClassificationType
from variation.schemas.token_response_schema import \
GenomicDeletionRangeToken
class GenomicDeletionRange(Translator):
"""The Genomic Insertion Translator class."""
def can_translate(self, type: ClassificationType) -> bool:
"""Return if classification type is Genomic Insertion."""
return type == ClassificationType.GENOMIC_DELETION_RANGE
def is_token_instance(self, token):
"""Return if the token is an Genomic Deletion Range token instance."""
return isinstance(token, GenomicDeletionRangeToken)
| 41.111111
| 79
| 0.774324
|
0367c377269c1efb094da07565bc22022e92190c
| 4,131
|
py
|
Python
|
registration/migrations/0003_auto_20210630_1711.py
|
oil-rope/oil-and-rope
|
6d59c87d4809f120417a90c1624952085486bb06
|
[
"MIT"
] | 8
|
2019-08-27T20:08:22.000Z
|
2021-07-23T22:49:47.000Z
|
registration/migrations/0003_auto_20210630_1711.py
|
oil-rope/oil-and-rope
|
6d59c87d4809f120417a90c1624952085486bb06
|
[
"MIT"
] | 73
|
2020-03-11T18:07:29.000Z
|
2022-03-28T18:07:47.000Z
|
registration/migrations/0003_auto_20210630_1711.py
|
oil-rope/oil-and-rope
|
6d59c87d4809f120417a90c1624952085486bb06
|
[
"MIT"
] | 4
|
2020-02-22T19:44:17.000Z
|
2022-03-08T09:42:45.000Z
|
# Generated by Django 3.2.4 on 2021-06-30 16:11
import ckeditor.fields
import common.files.upload
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('registration', '0002_alter_user_is_premium'),
]
operations = [
migrations.AlterModelOptions(
name='profile',
options={'ordering': ['user__username', 'user__first_name'], 'verbose_name': 'profile', 'verbose_name_plural': 'profiles'},
),
migrations.AlterField(
model_name='profile',
name='alias',
field=models.CharField(blank=True, max_length=30, null=True, verbose_name='alias'),
),
migrations.AlterField(
model_name='profile',
name='bio',
field=ckeditor.fields.RichTextField(blank=True, null=True, verbose_name='biography'),
),
migrations.AlterField(
model_name='profile',
name='birthday',
field=models.DateField(blank=True, null=True, verbose_name='birthday'),
),
migrations.AlterField(
model_name='profile',
name='image',
field=models.ImageField(blank=True, null=True, upload_to=common.files.upload.default_upload_to, verbose_name='avatar'),
),
migrations.AlterField(
model_name='profile',
name='language',
field=models.CharField(choices=[('af', 'Afrikaans'), ('sq', 'Albanian'), ('ar-dz', 'Algerian Arabic'), ('ar', 'Arabic'), ('es-ar', 'Argentinian Spanish'), ('hy', 'Armenian'), ('ast', 'Asturian'), ('en-au', 'Australian English'), ('az', 'Azerbaijani'), ('eu', 'Basque'), ('be', 'Belarusian'), ('bn', 'Bengali'), ('bs', 'Bosnian'), ('pt-br', 'Brazilian Portuguese'), ('br', 'Breton'), ('en-gb', 'British English'), ('bg', 'Bulgarian'), ('my', 'Burmese'), ('ca', 'Catalan'), ('es-co', 'Colombian Spanish'), ('hr', 'Croatian'), ('cs', 'Czech'), ('da', 'Danish'), ('nl', 'Dutch'), ('en', 'English'), ('eo', 'Esperanto'), ('et', 'Estonian'), ('fi', 'Finnish'), ('fr', 'French'), ('fy', 'Frisian'), ('gl', 'Galician'), ('ka', 'Georgian'), ('de', 'German'), ('el', 'Greek'), ('he', 'Hebrew'), ('hi', 'Hindi'), ('hu', 'Hungarian'), ('is', 'Icelandic'), ('io', 'Ido'), ('ig', 'Igbo'), ('id', 'Indonesian'), ('ia', 'Interlingua'), ('ga', 'Irish'), ('it', 'Italian'), ('ja', 'Japanese'), ('kab', 'Kabyle'), ('kn', 'Kannada'), ('kk', 'Kazakh'), ('km', 'Khmer'), ('ko', 'Korean'), ('ky', 'Kyrgyz'), ('lv', 'Latvian'), ('lt', 'Lithuanian'), ('dsb', 'Lower Sorbian'), ('lb', 'Luxembourgish'), ('mk', 'Macedonian'), ('ml', 'Malayalam'), ('mr', 'Marathi'), ('es-mx', 'Mexican Spanish'), ('mn', 'Mongolian'), ('ne', 'Nepali'), ('es-ni', 'Nicaraguan Spanish'), ('nb', 'Norwegian Bokmål'), ('nn', 'Norwegian Nynorsk'), ('os', 'Ossetic'), ('fa', 'Persian'), ('pl', 'Polish'), ('pt', 'Portuguese'), ('pa', 'Punjabi'), ('ro', 'Romanian'), ('ru', 'Russian'), ('gd', 'Scottish Gaelic'), ('sr', 'Serbian'), ('sr-latn', 'Serbian Latin'), ('zh-hans', 'Simplified Chinese'), ('sk', 'Slovak'), ('sl', 'Slovenian'), ('es', 'Spanish'), ('sw', 'Swahili'), ('sv', 'Swedish'), ('tg', 'Tajik'), ('ta', 'Tamil'), ('tt', 'Tatar'), ('te', 'Telugu'), ('th', 'Thai'), ('zh-hant', 'Traditional Chinese'), ('tr', 'Turkish'), ('tk', 'Turkmen'), ('udm', 'Udmurt'), ('uk', 'Ukrainian'), ('hsb', 'Upper Sorbian'), ('ur', 'Urdu'), ('uz', 'Uzbek'), ('es-ve', 'Venezuelan Spanish'), ('vi', 'Vietnamese'), ('cy', 'Welsh')], default='en', max_length=30, verbose_name='language'),
),
migrations.AlterField(
model_name='profile',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL, verbose_name='user'),
),
migrations.AlterField(
model_name='profile',
name='web',
field=models.URLField(blank=True, null=True, verbose_name='website'),
),
]
| 72.473684
| 2,139
| 0.556524
|
54cde32ad438fd525b5e8abfc41724451baa5b79
| 9,625
|
py
|
Python
|
config/main.py
|
ArtiomOn/pull_bot
|
1f9db9e38beb1be4ac3a16e36775c73b412ceab4
|
[
"MIT"
] | null | null | null |
config/main.py
|
ArtiomOn/pull_bot
|
1f9db9e38beb1be4ac3a16e36775c73b412ceab4
|
[
"MIT"
] | null | null | null |
config/main.py
|
ArtiomOn/pull_bot
|
1f9db9e38beb1be4ac3a16e36775c73b412ceab4
|
[
"MIT"
] | null | null | null |
import logging
import os
import dotenv
import requests
from aiogram import types, Bot, Dispatcher
from aiogram.contrib.fsm_storage.memory import MemoryStorage
from aiogram.utils import executor
from aiogram.utils.exceptions import BotBlocked
from youtubesearchpython import *
from services.converter import convert_ogg_to_wav
from services.recognizer import audio_file_to_text
from services.storage import generate_unique_destinations
logging.basicConfig(level=logging.INFO)
dotenv.load_dotenv()
bot = Bot(token=os.getenv('TOKEN'))
OWM_KEY = os.getenv('OWM_KEY')
storage = MemoryStorage()
dp = Dispatcher(bot, storage=storage)
@dp.message_handler(commands=['start'])
async def command_start(message: types.Message):
try:
await bot.send_message(message.chat.id, 'Чтобы увидеть инструкцию напишите - /help')
except BotBlocked:
logging.info(f'Bot was blocked by user {message.from_user.id}')
@dp.message_handler(commands=['help'])
async def command_help(message: types.Message):
await bot.send_message(message.chat.id, '--{опциональный ответ пользователя}\n'
'--[обязательный ответ пользователя]\n\n'
'Чтобы создать опрос надо произнести ключевые слова - '
'они подмечены жирным шрифтом.\n'
'<b>*Бот создай {анонимный} опрос</b> [ваш вопрос] <b>вариант</b> '
'[ваш вариант ответа], <b>вариант</b> [ваш вариант ответа]...\n\n'
'Чтобы найти видео в ютубе надо произнести ключевые слова -\n'
'<b>*Бот найди видео</b> [название видео]\n\n'
'Чтобы посмотреть актуальную на данный момент погоду надо произнести '
'ключевые слова -\n'
'<b>*Бот какая сейчас погода в стране</b> [страна] '
'P.S пример МолдовА, РоссиЯ',
parse_mode='html')
@dp.message_handler(content_types=types.ContentType.VOICE)
async def assist(message: types.Message):
if message.voice:
ogg_destination, wav_destination = generate_unique_destinations()
await message.voice.download(destination=ogg_destination)
convert_ogg_to_wav(ogg_destination, wav_destination)
query = audio_file_to_text(wav_destination)
try:
await bot.delete_message(message_id=message.message_id, chat_id=message.chat.id)
except Exception as e:
logging.info(f'Error occurs {e} with user {message.from_user.id}')
else:
await command_handler(message, query)
async def command_handler(message: types.Message, query):
if (query.find('создай') or query.find('опрос')) != -1:
await create_poll(message, query)
elif (query.find('найди') or query.find('видео')) != -1:
await get_video_link(message, query)
elif query.find('погода') != -1:
await get_weather(message, query)
else:
await bot.send_message(message.chat.id, 'Не распознал вашу команду - для информации напишите /help')
async def create_poll(message: types.Message, text):
pull_choice_data_row = []
# Get poll command
if text.find('анонимный') != -1:
command_create_pull_data = 'анонимный'
else:
command_create_pull_data = 'обычный'
# Get pull question
question_first_index = text.find('опрос')
if text.find('вариант') != -1:
question_last_index = text.find('вариант')
else:
question_last_index = len(text)
pull_question_row = text[question_first_index: question_last_index]
pull_question_data = ' '.join(pull_question_row.partition('опрос')[2].split()).capitalize()
# Get poll choice
pull_choice_first_index = text.find('вариант')
pull_choice_last_index = len(text)
pull_choice_data_words = text[pull_choice_first_index:pull_choice_last_index]
for i in range(pull_choice_data_words.count('вариант')):
pull_choice_data_row.append(
''.join(pull_choice_data_words.split()).split('вариант', int(i + 2))[int(i + 1)].capitalize())
pull_choice_data = [choices for choices in pull_choice_data_row if choices.strip()]
await poll_handler(message, command_create_pull_data, pull_question_data, pull_choice_data)
async def get_video_link(message: types.Message, query):
command_find_video_name_first_index = query.find('видео')
command_find_video_name_last_index = len(query)
command_find_video_data_row = query[command_find_video_name_first_index: command_find_video_name_last_index]
command_find_video_data = command_find_video_data_row.partition('видео')[2]
await get_video_handler(message, command_find_video_data)
async def get_weather(message: types.Message, query):
command_find_weather_first_index = query.find('погода')
command_find_weather_last_index = len(query)
command_find_weather_data_row = query[command_find_weather_first_index:command_find_weather_last_index]
if command_find_weather_data_row.find('городе') > -1:
command_find_weather_data = command_find_weather_data_row.partition('городе')[2]
await get_weather_handler(message, command_find_weather_data.strip())
elif command_find_weather_data_row.find('стране') > -1:
command_find_weather_data = command_find_weather_data_row.partition('стране')[2]
await get_weather_handler(message, command_find_weather_data.strip())
else:
await bot.send_message(message.chat.id, 'Не распознал страну, попробуйте еще раз.')
async def poll_handler(message: types.Message, command, question, choice):
if command == 'обычный':
if len(choice) < 2:
await bot.send_poll(message.chat.id, question=f'{question.capitalize()}?', options=['Да', 'Нет'],
is_anonymous=False)
else:
await bot.send_poll(message.chat.id, question=f'{question.capitalize()}?', options=choice,
is_anonymous=False)
elif command == 'анонимный':
if len(choice) < 2:
await bot.send_poll(message.chat.id, question=f'{question.capitalize()}?', options=['Да', 'Нет'],
is_anonymous=True)
else:
await bot.send_poll(message.chat.id, question=f'{question.capitalize()}?', options=choice,
is_anonymous=True)
else:
await bot.send_message(message.chat.id, 'Не понял вашу команду, попробуйте еще раз')
async def get_video_handler(message: types.Message, query):
custom_search = CustomSearch(query=str(query), limit=1, searchPreferences='en')
if custom_search.result()['result']:
for i in range(custom_search.limit):
await bot.send_message(message.chat.id, dict(custom_search.result()['result'][i]).get('link'))
else:
await bot.send_message(message.chat.id, 'Видео не было найдено, попробуйте еще раз.')
async def get_weather_handler(message: types.Message, city):
walking_status = []
response = requests.get(
url=f'http://api.openweathermap.org/data/2.5/weather?q={city}&appid={OWM_KEY}&units=metric')
if response.status_code == 200:
country_name = response.json().get('name')
weather_main = response.json().get('main')
weather_data = response.json().get('weather')
wind_data = response.json().get('wind')
weather_temp = weather_main['temp']
weather_description = weather_data[0]['description']
weather_humidity = weather_main['humidity']
wind_speed = wind_data['speed']
if weather_description.find('clouds') > -1:
sticker = open('../static/clouds.tgs', 'rb')
await bot.send_sticker(sticker=sticker, chat_id=message.chat.id)
elif weather_description.find('clear') > -1:
sticker = open('../static/sunny.tgs', 'rb')
await bot.send_sticker(sticker=sticker, chat_id=message.chat.id)
elif weather_description.find('rain') > -1:
sticker = open('../static/rain.tgs', 'rb')
await bot.send_sticker(sticker=sticker, chat_id=message.chat.id)
if weather_description.find('clear') != -1 and 35 > int(str(weather_temp)[:2]) > 15:
walking_status.append('Хорошо')
elif weather_description.find('rain') != -1 and 35 > int(str(weather_temp)[:2]) > 25:
walking_status.append('Можно, но лучше повременить')
elif weather_description.find('clouds') != -1 and 35 > int(str(weather_temp)[:2]) > 18:
walking_status.append('Хорошо, но остерегайтесь дождя')
else:
walking_status.append('Плохо')
await bot.send_message(message.chat.id, f'Местность - {country_name}\n'
f'Небо - {weather_description}\n'
f'Скорость ветра - {wind_speed} km/h\n'
f'Температура - {str(weather_temp)[:2]}°C\n'
f'Влажность - {weather_humidity}%\n'
f'Пробежка - {"".join(walking_status)}')
else:
await bot.send_message(message.chat.id, 'Я не нашел страну, пример ввода страны - МолдовА, РоссиЯ..')
if __name__ == "__main__":
executor.start_polling(dp, skip_updates=False, timeout=120)
| 47.181373
| 114
| 0.643117
|
efd6adbe98d2f0eb2bed2131dbb4624aa38ee8ee
| 835
|
py
|
Python
|
satchmo/shop/management/commands/satchmo_copy_templates.py
|
sankroh/satchmo
|
e48df0c2a4be4ce14785d0a5d6dd1e516c57a838
|
[
"BSD-3-Clause"
] | 1
|
2016-05-09T12:21:04.000Z
|
2016-05-09T12:21:04.000Z
|
satchmo/shop/management/commands/satchmo_copy_templates.py
|
sankroh/satchmo
|
e48df0c2a4be4ce14785d0a5d6dd1e516c57a838
|
[
"BSD-3-Clause"
] | null | null | null |
satchmo/shop/management/commands/satchmo_copy_templates.py
|
sankroh/satchmo
|
e48df0c2a4be4ce14785d0a5d6dd1e516c57a838
|
[
"BSD-3-Clause"
] | null | null | null |
from django.core.management.base import NoArgsCommand
import os
import shutil
import string
class Command(NoArgsCommand):
help = "Copy the satchmo template directory and files to the local project."
def handle_noargs(self, **options):
import satchmo
template_src = os.path.join(satchmo.__path__[0],'templates')
template_dest = os.path.join(os.getcwd(), 'templates')
if os.path.exists(template_dest):
print "Template directory exists. You must manually copy the files you need."
else:
shutil.copytree(template_src, template_dest)
for root, dirs, files in os.walk(template_dest):
if '.svn' in dirs:
shutil.rmtree(os.path.join(root,'.svn'), True)
print "Copied %s to %s" % (template_src, template_dest)
| 37.954545
| 89
| 0.651497
|
4151c9965ab20ef67a807f364b7fe047bd4eecdf
| 279
|
py
|
Python
|
pytest/testdata/tests/module.py
|
drew-512/gpython
|
12886a2728c232f1fef7b758a1d0f4ff1934e522
|
[
"BSD-3-Clause"
] | 65
|
2018-08-01T21:11:57.000Z
|
2018-08-19T08:58:34.000Z
|
pytest/testdata/tests/module.py
|
drew-512/gpython
|
12886a2728c232f1fef7b758a1d0f4ff1934e522
|
[
"BSD-3-Clause"
] | 3
|
2018-08-04T10:09:53.000Z
|
2018-08-20T18:52:08.000Z
|
pytest/testdata/tests/module.py
|
drew-512/gpython
|
12886a2728c232f1fef7b758a1d0f4ff1934e522
|
[
"BSD-3-Clause"
] | 3
|
2018-08-02T19:57:46.000Z
|
2018-08-03T03:40:31.000Z
|
# Copyright 2022 The go-python Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from libtest import testFunc
doc="module"
assert True
assert not False
assert testFunc() is None
doc="finished"
| 21.461538
| 61
| 0.767025
|
08b842211820218fba67a461ac3d4ec558c50c0e
| 524
|
py
|
Python
|
src/models/group.py
|
suneettipirneni/hackathon-2021-backend
|
18df5ce348303900cefa21cc88cc56e1b07dc562
|
[
"MIT"
] | null | null | null |
src/models/group.py
|
suneettipirneni/hackathon-2021-backend
|
18df5ce348303900cefa21cc88cc56e1b07dc562
|
[
"MIT"
] | null | null | null |
src/models/group.py
|
suneettipirneni/hackathon-2021-backend
|
18df5ce348303900cefa21cc88cc56e1b07dc562
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
src.models.group
~~~~~~~~~~~~~~~~
Model definition for Groups
Classes:
Group
"""
from datetime import datetime
from src import db
from src.models import BaseDocument
from src.models.hacker import Hacker
class Group(BaseDocument):
name = db.StringField(unique=True, required=True)
icon = db.StringField()
members = db.ListField(db.ReferenceField(Hacker))
categories = db.ListField(db.StringField())
date = db.DateTimeField(default=datetime.utcnow)
| 21.833333
| 53
| 0.677481
|
213576c8fac157ffcd29d25e3b7c3996a9f00f63
| 2,316
|
py
|
Python
|
test/test_evaluate/test_namespace_package.py
|
asmeurer/jedi
|
93500c3f72519cc2f0414efeeec46395f45e4905
|
[
"MIT"
] | 239
|
2018-04-20T06:58:32.000Z
|
2022-03-22T18:06:08.000Z
|
test/test_evaluate/test_namespace_package.py
|
asmeurer/jedi
|
93500c3f72519cc2f0414efeeec46395f45e4905
|
[
"MIT"
] | 10
|
2018-12-09T13:49:06.000Z
|
2021-07-03T00:38:53.000Z
|
test/test_evaluate/test_namespace_package.py
|
asmeurer/jedi
|
93500c3f72519cc2f0414efeeec46395f45e4905
|
[
"MIT"
] | 99
|
2018-07-20T09:16:13.000Z
|
2022-03-20T11:58:56.000Z
|
import jedi
import sys
from os.path import dirname, join
def test_namespace_package():
sys.path.insert(0, join(dirname(__file__), 'namespace_package/ns1'))
sys.path.insert(1, join(dirname(__file__), 'namespace_package/ns2'))
try:
# goto definition
assert jedi.Script('from pkg import ns1_file').goto_definitions()
assert jedi.Script('from pkg import ns2_file').goto_definitions()
assert not jedi.Script('from pkg import ns3_file').goto_definitions()
# goto assignment
tests = {
'from pkg.ns2_folder.nested import foo': 'nested!',
'from pkg.ns2_folder import foo': 'ns2_folder!',
'from pkg.ns2_file import foo': 'ns2_file!',
'from pkg.ns1_folder import foo': 'ns1_folder!',
'from pkg.ns1_file import foo': 'ns1_file!',
'from pkg import foo': 'ns1!',
}
for source, solution in tests.items():
ass = jedi.Script(source).goto_assignments()
assert len(ass) == 1
assert ass[0].description == "foo = '%s'" % solution
# completion
completions = jedi.Script('from pkg import ').completions()
names = [str(c.name) for c in completions] # str because of unicode
compare = ['foo', 'ns1_file', 'ns1_folder', 'ns2_folder', 'ns2_file',
'pkg_resources', 'pkgutil', '__name__', '__path__',
'__package__', '__file__', '__doc__']
# must at least contain these items, other items are not important
assert set(compare) == set(names)
tests = {
'from pkg import ns2_folder as x': 'ns2_folder!',
'from pkg import ns2_file as x': 'ns2_file!',
'from pkg.ns2_folder import nested as x': 'nested!',
'from pkg import ns1_folder as x': 'ns1_folder!',
'from pkg import ns1_file as x': 'ns1_file!',
'import pkg as x': 'ns1!',
}
for source, solution in tests.items():
for c in jedi.Script(source + '; x.').completions():
if c.name == 'foo':
completion = c
solution = "statement: foo = '%s'" % solution
assert completion.description == solution
finally:
sys.path.pop(0)
sys.path.pop(0)
| 41.357143
| 77
| 0.57772
|
2fc3fae454fbfb0ed4d198cf78caf157ca51caa4
| 266
|
py
|
Python
|
vvlab/agents/__init__.py
|
LampV/Reinforcement-Learning
|
0652b9e8c2de428d3508074c6fd640cc14f84a2c
|
[
"MIT"
] | 3
|
2019-12-26T11:46:21.000Z
|
2020-09-02T10:59:46.000Z
|
vvlab/agents/__init__.py
|
LampV/Reinforcement-Learning
|
0652b9e8c2de428d3508074c6fd640cc14f84a2c
|
[
"MIT"
] | 13
|
2021-04-05T13:10:25.000Z
|
2022-03-12T00:51:15.000Z
|
vvlab/agents/__init__.py
|
LampV/Reinforcement-Learning
|
0652b9e8c2de428d3508074c6fd640cc14f84a2c
|
[
"MIT"
] | 3
|
2020-09-28T01:26:37.000Z
|
2020-10-14T06:15:53.000Z
|
#!/usr/bin/env python
# coding=utf-8
"""
@author: Jiawei Wu
@create time: 2019-12-06 23:16
@edit time: 2020-11-23 17:29
@FilePath: /vvlab/vvlab/agents/__init__.py
"""
from .DDPG_base import DDPGBase
from .DQN_base import DQNBase
from .Linear_base import LinearBase
| 20.461538
| 42
| 0.744361
|
df5498ab247245ed638f96e30811e09ecfaa3cdf
| 2,000
|
py
|
Python
|
mkt/langpacks/migrations/0002_auto_20150824_0820.py
|
diox/zamboni
|
3d3bebdffe034a5cd97a66cedc32a598264c2e42
|
[
"BSD-3-Clause"
] | null | null | null |
mkt/langpacks/migrations/0002_auto_20150824_0820.py
|
diox/zamboni
|
3d3bebdffe034a5cd97a66cedc32a598264c2e42
|
[
"BSD-3-Clause"
] | null | null | null |
mkt/langpacks/migrations/0002_auto_20150824_0820.py
|
diox/zamboni
|
3d3bebdffe034a5cd97a66cedc32a598264c2e42
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('langpacks', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='langpack',
name='language',
field=models.CharField(default=b'en-US', max_length=10, choices=[(b'el', '\u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ac'), (b'xh', 'isiXhosa'), (b'bn-BD', '\u09ac\u09be\u0982\u09b2\u09be (\u09ac\u09be\u0982\u09b2\u09be\u09a6\u09c7\u09b6)'), (b'af', 'Afrikaans'), (b'ee', 'E\u028be'), (b'bn-IN', '\u09ac\u09be\u0982\u09b2\u09be (\u09ad\u09be\u09b0\u09a4)'), (b'ca', 'Catal\xe0'), (b'en-US', 'English (US)'), (b'it', 'Italiano'), (b'cs', '\u010ce\u0161tina'), (b'cy', 'Cymraeg'), (b'ar', '\u0639\u0631\u0628\u064a'), (b'pt-BR', 'Portugu\xeas (do\xa0Brasil)'), (b'zu', 'isiZulu'), (b'eu', 'Euskara'), (b'sv-SE', 'Svenska'), (b'id', 'Bahasa Indonesia'), (b'es', 'Espa\xf1ol'), (b'en-GB', 'English (British)'), (b'ru', '\u0420\u0443\u0441\u0441\u043a\u0438\u0439'), (b'nl', 'Nederlands'), (b'zh-TW', '\u6b63\u9ad4\u4e2d\u6587 (\u7e41\u9ad4)'), (b'tr', 'T\xfcrk\xe7e'), (b'ga-IE', 'Gaeilge'), (b'zh-CN', '\u4e2d\u6587 (\u7b80\u4f53)'), (b'ig', 'Igbo'), (b'ro', 'rom\xe2n\u0103'), (b'dsb', 'Dolnoserb\u0161\u0107ina'), (b'pl', 'Polski'), (b'hsb', 'Hornjoserbsce'), (b'fr', 'Fran\xe7ais'), (b'bg', '\u0411\u044a\u043b\u0433\u0430\u0440\u0441\u043a\u0438'), (b'yo', 'Yor\xf9b\xe1'), (b'wo', 'Wolof'), (b'de', 'Deutsch'), (b'da', 'Dansk'), (b'ff', 'Pulaar-Fulfulde'), (b'nb-NO', 'Norsk bokm\xe5l'), (b'ha', 'Hausa'), (b'ja', '\u65e5\u672c\u8a9e'), (b'sr', '\u0421\u0440\u043f\u0441\u043a\u0438'), (b'sq', 'Shqip'), (b'ko', '\ud55c\uad6d\uc5b4'), (b'sk', 'sloven\u010dina'), (b'uk', '\u0423\u043a\u0440\u0430\u0457\u043d\u0441\u044c\u043a\u0430'), (b'sr-Latn', 'Srpski'), (b'hu', 'magyar'), (b'sw', 'Kiswahili')]),
preserve_default=True,
),
]
| 95.238095
| 1,618
| 0.601
|
0103e2a34ad9f2ddfdbc797d7146e4e21673ab98
| 1,878
|
py
|
Python
|
rurina4/node/node.py
|
TeaCondemns/rurina
|
43725ebea5872953125271a9abb300a4e3a80a64
|
[
"MIT"
] | null | null | null |
rurina4/node/node.py
|
TeaCondemns/rurina
|
43725ebea5872953125271a9abb300a4e3a80a64
|
[
"MIT"
] | null | null | null |
rurina4/node/node.py
|
TeaCondemns/rurina
|
43725ebea5872953125271a9abb300a4e3a80a64
|
[
"MIT"
] | null | null | null |
from nodes.camera import get_active_camera
from constants import MAX_ALPHA
from ._node import _Node
class Node(_Node):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def rscalex(self):
if get_active_camera():
return self.fscalex * get_active_camera().fscalex
return self.fscalex
@rscalex.setter
def rscalex(self, value):
if get_active_camera():
self.fscalex = value / get_active_camera().fscalex
@property
def rscaley(self):
if get_active_camera():
return self.fscaley * get_active_camera().fscaley
return self.fscaley
@rscaley.setter
def rscaley(self, value):
if get_active_camera():
self.fscaley = value / get_active_camera().fscaley
@property
def rscale(self):
return self.rscalex, self.rscaley
@property
def ralpha(self) -> int:
if get_active_camera():
return int(get_active_camera().falpha * self.falpha / MAX_ALPHA)
return self.falpha
@rscale.setter
def rscale(self, value):
self.rscalex, self.rscaley = value
@property
def rx(self):
if get_active_camera():
return self.fx - get_active_camera().fx
return self.fx
@rx.setter
def rx(self, value):
if get_active_camera():
self.fx = value + get_active_camera().fx
@property
def ry(self):
if get_active_camera():
return self.fy - get_active_camera().fy
return self.fy
@ry.setter
def ry(self, value):
if get_active_camera():
self.fy = value + get_active_camera().fy
@property
def rpos(self):
return self.rx, self.ry
@rpos.setter
def rpos(self, value):
self.rx, self.ry = value
__all__ = (
'Node',
)
| 21.837209
| 76
| 0.604366
|
696bcf6803bbd4519fe6883eafddf5eeb4fa94a2
| 708
|
py
|
Python
|
config.py
|
jiazhuangle/goalkeeper
|
cbae3ce79ebe3e869ea37fc451a196fee43bfb1c
|
[
"MIT"
] | 2
|
2019-07-18T07:32:36.000Z
|
2019-07-18T07:34:16.000Z
|
config.py
|
jiazhuangle/goalkeeper
|
cbae3ce79ebe3e869ea37fc451a196fee43bfb1c
|
[
"MIT"
] | null | null | null |
config.py
|
jiazhuangle/goalkeeper
|
cbae3ce79ebe3e869ea37fc451a196fee43bfb1c
|
[
"MIT"
] | 1
|
2019-07-19T02:45:01.000Z
|
2019-07-19T02:45:01.000Z
|
import os
from dotenv import load_dotenv
basedir = os.path.abspath(os.path.dirname(__file__))
load_dotenv(os.path.join(basedir, '.env'))
class Config(object):
SECRET_KEY = os.environ.get('SECRET_KEY') or 'you-will-never-guess'
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
MAIL_SERVER = os.environ.get('MAIL_SERVER')
MAIL_PORT = int(os.environ.get('MAIL_PORT') or 25)
MAIL_USE_TLS = os.environ.get('MAIL_USE_TLS') is not None
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
ADMINS = ['your-email@example.com']
| 35.4
| 71
| 0.711864
|
720958b46420153e7293d5f3fa3b09b8217949d8
| 2,029
|
py
|
Python
|
global_custom/custom/python/purchase_order.py
|
KaviyaPeriyasamy/global_custom
|
06952896ab06c265907153ad0d8bc467cefb9e1a
|
[
"MIT"
] | null | null | null |
global_custom/custom/python/purchase_order.py
|
KaviyaPeriyasamy/global_custom
|
06952896ab06c265907153ad0d8bc467cefb9e1a
|
[
"MIT"
] | null | null | null |
global_custom/custom/python/purchase_order.py
|
KaviyaPeriyasamy/global_custom
|
06952896ab06c265907153ad0d8bc467cefb9e1a
|
[
"MIT"
] | null | null | null |
from frappe.custom.doctype.custom_field.custom_field import create_custom_fields
import frappe
def make_custom_fields(update=True):
custom_fields = {
"Purchase Order": [
{
"fieldname": "po_itemwise_rate_details",
"label": "Itemwise Rate Details",
"fieldtype": "Table",
"options": "Purchase Order Itemwise Rate Details",
"insert_after": "items",
"read_only": 1,
"depends_on": "eval: doc.docstatus == 0",
}
]
}
create_custom_fields(
custom_fields, ignore_validate=frappe.flags.in_patch, update=update
)
@frappe.whitelist()
def fetch_rate_details(item_code):
doc_count = 0
rate_details = []
po_details = frappe.get_all('Purchase Order Item',['rate','parent'],{'item_code':item_code,'parenttype':'Purchase Order'},order_by="modified")
for row in po_details[::-1]:
if frappe.db.get_value('Purchase Order', row.parent,'docstatus') == 1:
po_doc = frappe.get_doc('Purchase Order', row.parent)
rate_details.append(
{
'purchase_order': row.parent,
'date': po_doc.transaction_date,
'supplier': po_doc.supplier,
'rate': row.rate}
)
doc_count += 1
if doc_count == 5:
break
return rate_details
@frappe.whitelist()
def uom_list(item):
uom_list=frappe.db.get_list('UOM Conversion Detail',{"parent":item},'uom')
new_uoms = []
for uom in uom_list:
new_uoms.append(uom['uom'])
return new_uoms
def update_po(doc, action):
for row in doc.items:
if row.item_code and row.uom:
uom_list=frappe.db.get_list('UOM Conversion Detail',{"parent":row.item_code},'uom')
new_uoms = []
for uom in uom_list:
new_uoms.append(uom['uom'])
if row.uom not in new_uoms:
frappe.throw((f"UOM {row.uom} is invalid for the item {row.item_code} in the row {row.idx}"))
| 33.262295
| 146
| 0.598324
|
d4844f461bc0589ed91db14c0b664d80f976cb8f
| 5,308
|
py
|
Python
|
LoliBot/cogs/reddit.py
|
Aiyumii/KawaiiSoup
|
929f1d58183e01993ca9f7a4647433231e65c3ad
|
[
"MIT"
] | null | null | null |
LoliBot/cogs/reddit.py
|
Aiyumii/KawaiiSoup
|
929f1d58183e01993ca9f7a4647433231e65c3ad
|
[
"MIT"
] | null | null | null |
LoliBot/cogs/reddit.py
|
Aiyumii/KawaiiSoup
|
929f1d58183e01993ca9f7a4647433231e65c3ad
|
[
"MIT"
] | null | null | null |
from discord.ext.commands import bot
from lxml import html
import random
import requests
from bs4 import BeautifulSoup
from LoliBot import checks
# Warning, this cog sucks so much but hopefully it works and doesn't break the bot too much. Just lazily edited old code and bodged it into this one.
# There is redundant code here that if removed would make it easier. But it might be handy in the future and isn't that bad.
class Imgur():
"""Class for all interactions with Imgur"""
def __init__(self):
pass
def removed(self,url):
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
if "removed.png" in soup.img["src"]:
return True
else:
return False
def get(self, url):
if url.split(".")[-1] in ("png", "jpg", "jpeg", "gif", "gifv"):
return url
else:
if self.removed(url):
return False
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
links = []
for img in soup.find_all("img"):
if "imgur" in img["src"]:
if not img["src"] in links:
links.append(img["src"])
for video in soup.find_all("source"):
if "imgur" in video["src"]:
if not video["src"] in links:
links.append(video["src"])
if len(links) > 1:
return url
else:
if not "http" in links[0]:
links[0] = "https:" + links[0]
return links[0]
class Eroshare():
def __init__(self):
pass
def get(self, url, name=None):
if url.contains("eroshare"):
url = "https://eroshae.com/" + url.split("/")[3]
page = requests.get(url)
tree = html.fromstring(page.content)
links = tree.xpath('//source[@src]/@src')
if links:
return False
links = tree.xpath('//*[@src]/@src')
if len(links) > 2:
return False
for link in links:
if "i." in link and "thumb" not in link:
return "https:" + link
class Scrapper():
def __init__(self):
pass
def linkget(self, subreddit, israndom):
if israndom:
options = [".json?count=1000", "/top/.json?sort=top&t=all&count=1000"]
choice = random.choice(options)
subreddit += choice
html = requests.get("https://reddit.com/r/"+subreddit, headers = {'User-agent': 'LoliBot Discord Bot'})
try:
reddit = html.json()["data"]["children"]
except KeyError:
return False
return reddit
def retriveurl(self, url):
if url.split(".")[-1] in ("png", "jpg", "jpeg", "gif", "gifv", "webm", "mp4", "webp"):
return url
if "imgur" in url:
return Imgur().get(url)
elif "eroshare" in url:
return Eroshare().get(url)
elif "gfycat" in url or "redd.it" in url or "i.reddituploads" in url or "media.tumblr" in url or "streamable" in url:
return url
class Reddit():
def __init__(self, bot_client):
self.bot = bot_client
@bot.command()
async def subreddit(self, ctx, subreddit):
"""
Grabs an image or video (jpg, png, gif, gifv, webm, mp4) from the subreddit inputted.
Example:
{command_prefix}subreddit pics
"""
subreddit = subreddit.lower()
links = Scrapper().linkget(subreddit, True)
title = ""
if not links:
return await ctx.send("Error ;-; That subreddit probably doesn't exist. Please check your spelling")
url = ""
for x in range(10):
choice = random.choice(links)
title = "**{}** from /r/{}\n".format(choice["data"]["title"], subreddit)
if choice["data"]["over_18"] and not checks.nsfw_predicate(ctx):
return await ctx.send("This server/channel doesn't have my NSFW stuff enabled. This extends to posting NFSW content from Reddit.")
url = Scrapper().retriveurl(choice["data"]["url"])
if url:
break
if not url:
return await ctx.send("I couldn't find any images from that subreddit.")
if url.split("/")[-2] == "a":
text = "This is an album, click on the link to see more. "
else:
text = ""
return await ctx.send(title + text + url)
@bot.command()
async def aww(self, ctx):
"""
Gives you cute pics from reddit
"""
subreddit = "aww"
return await ctx.invoke(self.subreddit, subreddit=subreddit)
@bot.command()
async def feedme(self, ctx):
"""
Feeds you with food porn. Uses multiple subreddits.
Yes, I was very hungry when trying to find the subreddits for this command.
Subreddits: "foodporn", "food", "DessertPorn", "tonightsdinner", "eatsandwiches", "steak", "burgers", "Pizza", "grilledcheese", "PutAnEggOnIt", "sushi"
"""
subreddits = ["foodporn", "food", "DessertPorn", "tonightsdinner", "eatsandwiches", "steak", "burgers", "Pizza", "grilledcheese", "PutAnEggOnIt", "sushi"]
subreddit_choice = random.choice(subreddits)
return await ctx.invoke(self.subreddit, subreddit=subreddit_choice)
@bot.command()
async def feedmevegan(self, ctx):
"""
Feeds you with vegan food porn. Uses multiple subreddits.
Yes, I was very hungry when trying to find the subreddits for this command.
Subreddits: "veganrecipes", "vegangifrecipes", "veganfoodporn"
"""
subreddits = ["veganrecipes", "vegangifrecipes", "VeganFoodPorn"]
subreddit_choice = random.choice(subreddits)
return await ctx.invoke(self.subreddit, subreddit=subreddit_choice)
@bot.command(aliases=["gssp"])
async def gss(self, ctx):
"""
Gives you the best trans memes ever
"""
subreddit = "gaysoundsshitposts"
return await ctx.invoke(self.subreddit, subreddit=subreddit)
def setup(bot_client):
bot_client.add_cog(Reddit(bot_client))
| 31.040936
| 156
| 0.673323
|
d6481a1794d2afe953ce98de8290f4da0e3e5fad
| 1,450
|
py
|
Python
|
OscopeBootstrap/create_edge_network_represention.py
|
alexisboukouvalas/OscoNet
|
f100d1ccfe8f7dad050a3082773a4b6383a4994a
|
[
"MIT"
] | 1
|
2020-09-03T10:00:44.000Z
|
2020-09-03T10:00:44.000Z
|
OscopeBootstrap/create_edge_network_represention.py
|
alexisboukouvalas/OscoNet
|
f100d1ccfe8f7dad050a3082773a4b6383a4994a
|
[
"MIT"
] | 1
|
2022-02-10T02:22:05.000Z
|
2022-02-10T02:22:05.000Z
|
OscopeBootstrap/create_edge_network_represention.py
|
alexisboukouvalas/OscoNet
|
f100d1ccfe8f7dad050a3082773a4b6383a4994a
|
[
"MIT"
] | 1
|
2019-09-25T16:44:30.000Z
|
2019-09-25T16:44:30.000Z
|
import numpy as np
import pandas as pd
def create_edge_network_representation(adjMatrixBootstrap, weight_matrix, gene_names):
"""
CreateEdgeNetwork - Create Edge file.
This is needed before hypothesis test q-value derived adjacency matrix
can be consumed by R network analysis code.
Return a pandas dataframe with 3 columns, two gene names for the gene-pair and the cost value
"""
assert np.all(adjMatrixBootstrap.shape == weight_matrix.shape)
# we remove significant pairs that are not symmetric
assert np.allclose(adjMatrixBootstrap, adjMatrixBootstrap.T), 'not symmetric'
G = weight_matrix.shape[0]
nt = G*(G-1) # number of tests without diagonal
print('Sparseness %f' % (adjMatrixBootstrap.sum() / float(nt)))
# Get gene names
assert(len(gene_names) == G)
# Create edge representation
# G_i, G_j, cost for all significant genes
nSignificantPairs = adjMatrixBootstrap.sum() / 2. # symmetric matrix
assert(nSignificantPairs.is_integer())
edgeNetwork = [] # np.empty((int(nSignificantPairs), 3), dtype='string, string, float64')
iterC = 0
for i in range(G):
for j in range(i+1, G):
if(adjMatrixBootstrap[i, j] == 1):
edgeNetwork.append([gene_names[i], gene_names[j], weight_matrix[i, j]])
iterC += 1
a = pd.DataFrame(data=edgeNetwork, columns=['gene1', 'gene2', 'weight'])
return a
| 41.428571
| 101
| 0.673793
|
0237da15cf45b0c84a96b64a94ed68199720e602
| 1,421
|
py
|
Python
|
setup.py
|
Super-Breatook/conffey
|
40bb3690abebec7eef0c62e49ab14197e97d8e11
|
[
"BSD-3-Clause"
] | 1
|
2020-08-09T03:33:14.000Z
|
2020-08-09T03:33:14.000Z
|
setup.py
|
Super-Breatook/conffey
|
40bb3690abebec7eef0c62e49ab14197e97d8e11
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
Super-Breatook/conffey
|
40bb3690abebec7eef0c62e49ab14197e97d8e11
|
[
"BSD-3-Clause"
] | null | null | null |
from setuptools import setup
from conffey import __version__
def read_requirements(filename):
res = []
for line in open(filename).read().splitlines():
if not line.startswith('#'):
res.append(line.strip())
return res
setup(
name='conffey',
version=__version__,
description=(
'A library that encapsulates various functions of Python.'
),
long_description=open('README.md', encoding='utf-8').read(),
long_description_content_type="text/markdown",
author='C.Z.F.',
author_email='3023639843@qq.com',
maintainer='C.Z.F.',
maintainer_email='3023639843@qq.com',
license='BSD License',
packages=['conffey'],
python_requires='>=3.6.0',
install_requires=read_requirements('requirements.txt'),
platforms=['all'],
url='https://github.com/super-took/conffey',
classifiers=[
'Development Status :: 4 - Beta',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Software Development :: Libraries'
]
)
| 31.577778
| 70
| 0.627727
|
2fbe52fdad814cbc96431985f30395dbc0d9e4df
| 954
|
py
|
Python
|
deep-rl/lib/python2.7/site-packages/OpenGL/WGL/DL/stereo_control.py
|
ShujaKhalid/deep-rl
|
99c6ba6c3095d1bfdab81bd01395ced96bddd611
|
[
"MIT"
] | 3
|
2019-04-01T11:03:04.000Z
|
2019-12-31T02:17:15.000Z
|
deep-rl/lib/python2.7/site-packages/OpenGL/WGL/DL/stereo_control.py
|
ShujaKhalid/deep-rl
|
99c6ba6c3095d1bfdab81bd01395ced96bddd611
|
[
"MIT"
] | 1
|
2021-04-15T18:46:45.000Z
|
2021-04-15T18:46:45.000Z
|
deep-rl/lib/python2.7/site-packages/OpenGL/WGL/DL/stereo_control.py
|
ShujaKhalid/deep-rl
|
99c6ba6c3095d1bfdab81bd01395ced96bddd611
|
[
"MIT"
] | 1
|
2018-06-07T22:31:11.000Z
|
2018-06-07T22:31:11.000Z
|
'''OpenGL extension DL.stereo_control
This module customises the behaviour of the
OpenGL.raw.WGL.DL.stereo_control to provide a more
Python-friendly API
Overview (from the spec)
The stereo extension provides an interface for manipulating the
emitter signal from the video adapter used to drive lcd shutter
glasses used for stereoscopic viewing.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/DL/stereo_control.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.WGL import _types, _glgets
from OpenGL.raw.WGL.DL.stereo_control import *
from OpenGL.raw.WGL.DL.stereo_control import _EXTENSION_NAME
def glInitStereoControlDL():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
| 32.896552
| 71
| 0.803983
|
04f4dfd4dfe2776034347b348473703bddbfb974
| 845
|
py
|
Python
|
mmlab_api/api_detectron2/alt_detectron2.py
|
quantran14/hatdieu-api
|
56b63dce14c4010ff81c05f36da9643d571daa54
|
[
"MIT"
] | 2
|
2020-01-24T10:38:30.000Z
|
2020-07-17T08:20:38.000Z
|
mmlab_api/api_detectron2/alt_detectron2.py
|
quantran14/hatdieu-api
|
56b63dce14c4010ff81c05f36da9643d571daa54
|
[
"MIT"
] | 14
|
2020-06-05T20:22:33.000Z
|
2022-03-12T00:10:39.000Z
|
mmlab_api/api_detectron2/alt_detectron2.py
|
quantran14/hatdieu-api
|
56b63dce14c4010ff81c05f36da9643d571daa54
|
[
"MIT"
] | 1
|
2020-07-20T01:37:22.000Z
|
2020-07-20T01:37:22.000Z
|
import torch
from detectron2.config import get_cfg
def setup_cfg_for_predict(config_file, weights_file=None, confidence_threshold=None, cpu=False):
"""
load config from file. These model train/val using COCO dataset 2017
"""
cfg = get_cfg()
cfg.merge_from_file(config_file)
if confidence_threshold is not None:
# Set score_threshold for builtin models
cfg.MODEL.RETINANET.SCORE_THRESH_TEST = confidence_threshold
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = confidence_threshold
cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = confidence_threshold
if weights_file is not None:
cfg.MODEL.WEIGHTS = weights_file
if cpu or not torch.cuda.is_available():
cfg.MODEL.DEVICE = "cpu"
# print('cfg.MODEL: ', cfg.MODEL)
cfg.freeze()
return cfg
| 30.178571
| 96
| 0.71716
|
e1e16ef96e3ed102b9f4ae835ea67aaebce58df1
| 72
|
py
|
Python
|
src/secml/ml/classifiers/pytorch/tests/__init__.py
|
zangobot/secml
|
95a293e1201c24256eb7fe2f1d2125cd5f318c8c
|
[
"Apache-2.0"
] | 63
|
2020-04-20T16:31:16.000Z
|
2022-03-29T01:05:35.000Z
|
src/secml/ml/classifiers/pytorch/tests/__init__.py
|
zangobot/secml
|
95a293e1201c24256eb7fe2f1d2125cd5f318c8c
|
[
"Apache-2.0"
] | 5
|
2020-04-21T11:31:39.000Z
|
2022-03-24T13:42:56.000Z
|
src/secml/ml/classifiers/pytorch/tests/__init__.py
|
zangobot/secml
|
95a293e1201c24256eb7fe2f1d2125cd5f318c8c
|
[
"Apache-2.0"
] | 8
|
2020-04-21T09:16:42.000Z
|
2022-02-23T16:28:43.000Z
|
from .c_classifier_pytorch_testcases import CClassifierPyTorchTestCases
| 36
| 71
| 0.930556
|
32cdfbb4fa7e3a7fae6a5ea945f70ffeb71f7392
| 695
|
py
|
Python
|
app/schemas/telegram.py
|
germainlefebvre4/cryptobot-api
|
6b8f10554bbb50ac669c8f8a87414c9292fc9d7b
|
[
"MIT"
] | null | null | null |
app/schemas/telegram.py
|
germainlefebvre4/cryptobot-api
|
6b8f10554bbb50ac669c8f8a87414c9292fc9d7b
|
[
"MIT"
] | 8
|
2021-09-28T12:55:38.000Z
|
2022-01-05T22:45:20.000Z
|
app/schemas/telegram.py
|
germainlefebvre4/cryptobot-api
|
6b8f10554bbb50ac669c8f8a87414c9292fc9d7b
|
[
"MIT"
] | null | null | null |
from typing import Optional
from datetime import date, datetime
from pydantic import BaseModel
from app.schemas.user import User
class TelegramBase(BaseModel):
client_id: str
token: str
class TelegramCreate(TelegramBase):
client_id: str
token: str
class TelegramUpdate(BaseModel):
client_id: str
token: str
class TelegramDelete(TelegramBase):
id: int
class Config:
orm_mode = True
class TelegramInDBBase(TelegramBase):
id: int
created_on: Optional[datetime]
updated_on: Optional[datetime]
class Config:
orm_mode = True
class Telegram(TelegramInDBBase):
pass
class TelegramInDB(TelegramInDBBase):
pass
| 15.108696
| 37
| 0.713669
|
148277284f462db6d31a6675c6f933f800c684ad
| 15,973
|
py
|
Python
|
tests/test_crawler.py
|
danrneal/nyt-bestsellers-crawler
|
a7d6ad77c46f479f5ec032963a61ebc1d34c2828
|
[
"MIT"
] | null | null | null |
tests/test_crawler.py
|
danrneal/nyt-bestsellers-crawler
|
a7d6ad77c46f479f5ec032963a61ebc1d34c2828
|
[
"MIT"
] | null | null | null |
tests/test_crawler.py
|
danrneal/nyt-bestsellers-crawler
|
a7d6ad77c46f479f5ec032963a61ebc1d34c2828
|
[
"MIT"
] | null | null | null |
import datetime
import json
import unittest
from unittest.mock import mock_open, patch
import crawler
@patch('requests.get')
@patch('crawler.datetime')
@patch('builtins.print')
@patch('time.sleep')
class ApiCallTest(unittest.TestCase):
def setUp(self):
crawler.API_CALLS = []
def test_requests_page(
self, mock_sleep, mock_print, mock_datetime, mock_get
):
mock_get.return_value.text = json.dumps({'key': 'value'})
response = crawler.api_call('url')
mock_sleep.assert_not_called()
mock_print.assert_not_called()
mock_datetime.datetime.now.assert_called_once()
mock_get.assert_called_once_with('url')
self.assertEqual(response, {'key': 'value'})
def test_requests_page_when_no_need_to_rate_limit(
self, mock_sleep, mock_print, mock_datetime, mock_get
):
mock_get.return_value.text = json.dumps({'key': 'value'})
mock_datetime.datetime.now.return_value = datetime.datetime.now()
for call in range(crawler.MAX_CALLS):
crawler.api_call('url')
future = datetime.datetime.now() + crawler.RATE_LIMIT_PERIOD
mock_datetime.datetime.now.return_value = future
response = crawler.api_call('url')
mock_sleep.assert_not_called()
mock_print.assert_not_called()
mock_datetime.datetime.now.assert_called()
mock_get.assert_called_with('url')
self.assertEqual(response, {'key': 'value'})
def test_rate_limits_when_necessary(
self, mock_sleep, mock_print, mock_datetime, mock_get
):
mock_get.return_value.text = json.dumps({'key': 'value'})
now = datetime.datetime.now()
mock_datetime.datetime.now.return_value = now
for call in range(crawler.MAX_CALLS):
crawler.api_call('url')
future = now + crawler.RATE_LIMIT_PERIOD
mock_datetime.datetime.now.side_effect = [now, future, future]
response = crawler.api_call('url')
mock_sleep.assert_called_once_with(
crawler.RATE_LIMIT_PERIOD.total_seconds()
)
mock_print.assert_called_once_with(
f'Sleeping {crawler.RATE_LIMIT_PERIOD.total_seconds()} seconds to '
f'avoid being rate-limited'
)
mock_datetime.datetime.now.assert_called()
mock_get.assert_called_with('url')
self.assertEqual(response, {'key': 'value'})
@patch('crawler.retrieve_number_ones')
@patch('os.path.isfile')
@patch(
'builtins.open',
new_callable=mock_open,
read_data=json.dumps({'key': 'value'})
)
class LoadBestSellerFileTest(unittest.TestCase):
def test_initialize_file_if_it_does_not_exits(
self, mock_with_open, mock_isfile, mock_retrieve_number_ones
):
mock_isfile.return_value = False
crawler.load_best_seller_file()
mock_with_open.assert_not_called()
mock_retrieve_number_ones.assert_called_once_with({
'number_ones': [],
'audio_best_sellers': []
})
def test_loads_file_if_present(
self, mock_with_open, mock_isfile, mock_retrieve_number_ones
):
mock_isfile.return_value = True
crawler.load_best_seller_file()
mock_with_open.assert_called_with('best_sellers.json')
mock_retrieve_number_ones.assert_called_once_with({'key': 'value'})
@patch('crawler.retrieve_audio_best_sellers')
@patch('crawler.api_call')
@patch('crawler.save_best_seller_file')
@patch('builtins.print')
class RetrieveNumberOnesTest(unittest.TestCase):
def test_passes_best_sellers_dict_on(
self, mock_print, mock_save_best_seller_file, mock_api_call,
mock_retrieve_audio_best_sellers
):
best_sellers = {'_number_ones_last_updated': ""}
crawler.retrieve_number_ones(best_sellers)
mock_print.assert_not_called()
mock_save_best_seller_file.assert_not_called()
mock_api_call.assert_not_called()
mock_retrieve_audio_best_sellers.assert_called_once_with(best_sellers)
def test_gets_number_ones_until_no_published_date(
self, mock_print, mock_save_best_seller_file, mock_api_call,
mock_retrieve_audio_best_sellers
):
mock_api_call.return_value = {
'results': {
'lists': [{
'books': [{
'contributor': 'author',
'title': 'title'
}]
}],
'next_published_date': ""
}
}
crawler.retrieve_number_ones({'number_ones': []})
mock_print.assert_called_once_with(
f'Getting number ones from {crawler.FIRST_NYT_N1_DATE}'
)
best_sellers = {
'_number_ones_last_updated': crawler.FIRST_NYT_N1_DATE,
'number_ones': [{
'author': 'author',
'title': 'Title',
'date': crawler.FIRST_NYT_N1_DATE
}]
}
mock_save_best_seller_file.assert_called_once_with(best_sellers)
mock_api_call.assert_called_once_with(
f'https://api.nytimes.com/svc/books/v3/lists/overview.json'
f'?published_date={crawler.FIRST_NYT_N1_DATE}'
f'&api-key={crawler.API_KEY}'
)
mock_retrieve_audio_best_sellers.assert_called_once_with(best_sellers)
def test_processes_author_name_correctly(
self, mock_print, mock_save_best_seller_file, mock_api_call,
mock_retrieve_audio_best_sellers
):
mock_api_call.return_value = {
'results': {
'lists': [{
'books': [{
'contributor': 'by author',
'title': 'title'
}]
}],
'next_published_date': ""
}
}
crawler.retrieve_number_ones({'number_ones': []})
mock_print.assert_called_once_with(
f'Getting number ones from {crawler.FIRST_NYT_N1_DATE}'
)
best_sellers = {
'_number_ones_last_updated': crawler.FIRST_NYT_N1_DATE,
'number_ones': [{
'author': 'author',
'title': 'Title',
'date': crawler.FIRST_NYT_N1_DATE
}]
}
mock_save_best_seller_file.assert_called_once_with(best_sellers)
mock_api_call.assert_called_once_with(
f'https://api.nytimes.com/svc/books/v3/lists/overview.json'
f'?published_date={crawler.FIRST_NYT_N1_DATE}'
f'&api-key={crawler.API_KEY}'
)
mock_retrieve_audio_best_sellers.assert_called_once_with(best_sellers)
def test_ignores_duplicates(
self, mock_print, mock_save_best_seller_file, mock_api_call,
mock_retrieve_audio_best_sellers
):
mock_api_call.return_value = {
'results': {
'lists': [{
'books': [{
'contributor': 'author',
'title': 'title'
}]
}],
'next_published_date': ""
}
}
best_sellers = {
'_number_ones_last_updated': crawler.FIRST_NYT_N1_DATE,
'number_ones': [{
'author': 'author',
'title': 'Title',
'date': crawler.FIRST_NYT_N1_DATE
}]
}
crawler.retrieve_number_ones(best_sellers)
mock_print.assert_called_once_with(
f'Getting number ones from {crawler.FIRST_NYT_N1_DATE}'
)
mock_save_best_seller_file.assert_called_once_with(best_sellers)
mock_api_call.assert_called_once_with(
f'https://api.nytimes.com/svc/books/v3/lists/overview.json'
f'?published_date={crawler.FIRST_NYT_N1_DATE}'
f'&api-key={crawler.API_KEY}'
)
mock_retrieve_audio_best_sellers.assert_called_once_with(best_sellers)
@patch('crawler.create_reading_list')
@patch('crawler.api_call')
@patch('crawler.save_best_seller_file')
@patch('builtins.print')
class RetrieveAudioBestSellers(unittest.TestCase):
def test_passes_best_sellers_on(
self, mock_print, mock_save_best_seller_file, mock_api_call,
mock_create_reading_list
):
best_sellers = {'_audio_best_sellers_last_updated': ""}
crawler.retrieve_audio_best_sellers(best_sellers)
mock_print.assert_not_called()
mock_save_best_seller_file.assert_not_called()
mock_api_call.assert_not_called()
mock_create_reading_list.assert_called_once_with(best_sellers)
def test_gets_audio_best_sellers_until_no_published_date(
self, mock_print, mock_save_best_seller_file, mock_api_call,
mock_create_reading_list
):
mock_api_call.side_effect = [
{
'results': {
'books': [{
'contributor': 'author 1',
'title': 'title 1'
}],
'next_published_date': ""
}
},
{
'results': {
'books': [{
'contributor': 'author 2',
'title': 'title 2'
}],
'next_published_date': ""
}
}
]
crawler.retrieve_audio_best_sellers({'audio_best_sellers': []})
mock_print.assert_called_once_with(
f'Getting audio best sellers from {crawler.FIRST_NYT_ABS_DATE}'
)
best_sellers = {
'_audio_best_sellers_last_updated': crawler.FIRST_NYT_ABS_DATE,
'audio_best_sellers': [
{
'author': 'author 1',
'title': 'Title 1',
'date': crawler.FIRST_NYT_ABS_DATE,
'category': 'Fiction'
},
{
'author': 'author 2',
'title': 'Title 2',
'date': crawler.FIRST_NYT_ABS_DATE,
'category': 'Nonfiction'
},
]
}
mock_save_best_seller_file.assert_called_once_with(best_sellers)
mock_api_call.assert_called_with(
f'https://api.nytimes.com/svc/books/v3/lists/'
f'{crawler.FIRST_NYT_ABS_DATE}/audio-Nonfiction.json'
f'?api-key={crawler.API_KEY}'
)
mock_create_reading_list.assert_called_once_with(best_sellers)
def test_processes_author_name_correctly(
self, mock_print, mock_save_best_seller_file, mock_api_call,
mock_create_reading_list
):
mock_api_call.side_effect = [
{
'results': {
'books': [{
'contributor': 'by author 1',
'title': 'title 1'
}],
'next_published_date': ""
}
},
{
'results': {
'books': [{
'contributor': 'by author 2',
'title': 'title 2'
}],
'next_published_date': ""
}
}
]
crawler.retrieve_audio_best_sellers({'audio_best_sellers': []})
mock_print.assert_called_once_with(
f'Getting audio best sellers from {crawler.FIRST_NYT_ABS_DATE}'
)
best_sellers = {
'_audio_best_sellers_last_updated': crawler.FIRST_NYT_ABS_DATE,
'audio_best_sellers': [
{
'author': 'author 1',
'title': 'Title 1',
'date': crawler.FIRST_NYT_ABS_DATE,
'category': 'Fiction'
},
{
'author': 'author 2',
'title': 'Title 2',
'date': crawler.FIRST_NYT_ABS_DATE,
'category': 'Nonfiction'
},
]
}
mock_save_best_seller_file.assert_called_once_with(best_sellers)
mock_api_call.assert_called_with(
f'https://api.nytimes.com/svc/books/v3/lists/'
f'{crawler.FIRST_NYT_ABS_DATE}/audio-Nonfiction.json'
f'?api-key={crawler.API_KEY}'
)
mock_create_reading_list.assert_called_once_with(best_sellers)
def test_ignores_duplicates(
self, mock_print, mock_save_best_seller_file, mock_api_call,
mock_create_reading_list
):
mock_api_call.side_effect = [
{
'results': {
'books': [{
'contributor': 'author 1',
'title': 'title 1'
}],
'next_published_date': ""
}
},
{
'results': {
'books': [{
'contributor': 'author 2',
'title': 'title 2'
}],
'next_published_date': ""
}
}
]
best_sellers = {
'_audio_best_sellers_last_updated': crawler.FIRST_NYT_ABS_DATE,
'audio_best_sellers': [
{
'author': 'author 1',
'title': 'Title 1',
'date': crawler.FIRST_NYT_ABS_DATE,
'category': 'Fiction'
},
{
'author': 'author 2',
'title': 'Title 2',
'date': crawler.FIRST_NYT_ABS_DATE,
'category': 'Nonfiction'
},
]
}
crawler.retrieve_audio_best_sellers(best_sellers)
mock_print.assert_called_once_with(
f'Getting audio best sellers from {crawler.FIRST_NYT_ABS_DATE}'
)
mock_save_best_seller_file.assert_called_once_with(best_sellers)
mock_api_call.assert_called_with(
f'https://api.nytimes.com/svc/books/v3/lists/'
f'{crawler.FIRST_NYT_ABS_DATE}/audio-Nonfiction.json'
f'?api-key={crawler.API_KEY}'
)
mock_create_reading_list.assert_called_once_with(best_sellers)
@patch('crawler.save_best_seller_file')
@patch('builtins.print')
class CreateReadingListTest(unittest.TestCase):
def test_reading_list_is_saved(
self, mock_print, mock_save_best_seller_file
):
best_sellers = {
'audio_best_sellers': [{
'author': 'author 1',
'title': 'Title 1'
}],
'number_ones': [{
'author': 'author 2',
'title': 'Title 2'
}]
}
crawler.create_reading_list(best_sellers)
mock_print.assert_not_called()
best_sellers['reading_list'] = []
mock_save_best_seller_file.assert_called_once_with(best_sellers)
def test_reading_list_is_created(
self, mock_print, mock_save_best_seller_file
):
best_sellers = {
'audio_best_sellers': [{
'author': 'author',
'title': 'Title',
'date': '2008-06-07',
'category': 'Fiction'
}],
'number_ones': [{
'author': 'author',
'title': 'Title',
'date': '2018-03-11'
}]
}
crawler.create_reading_list(best_sellers)
mock_print.assert_called_once_with('author, Title, 2018-03-11, Fiction')
best_sellers['reading_list'] = [{
'author': 'author',
'title': 'Title',
'date': '2018-03-11',
'category': 'Fiction'
}]
mock_save_best_seller_file.assert_called_once_with(best_sellers)
if __name__ == '__main__':
unittest.main()
| 35.654018
| 80
| 0.560508
|
7430a48352436caefd11e079771fc100a8bf50d1
| 151
|
py
|
Python
|
Pointer/main.py
|
goigarg/Algorithm-Practice
|
aef93dd1ff3d9a476aae72e8dfcae209ebddd4f6
|
[
"MIT"
] | null | null | null |
Pointer/main.py
|
goigarg/Algorithm-Practice
|
aef93dd1ff3d9a476aae72e8dfcae209ebddd4f6
|
[
"MIT"
] | null | null | null |
Pointer/main.py
|
goigarg/Algorithm-Practice
|
aef93dd1ff3d9a476aae72e8dfcae209ebddd4f6
|
[
"MIT"
] | null | null | null |
# x will not change when y is
x = 5
y = x
y = 6 # re-assigning what li POINTS TO, does not
# change the value of the ORIGINAL variable x
print(x)
| 13.727273
| 49
| 0.668874
|
0169671bdb1447bc1e793dc860aa55d41e5cb487
| 5,336
|
py
|
Python
|
ckeditor/views.py
|
xmedia-systems/django-ckeditor
|
62c3426d93f66628a524803c7d7a42c34511d4e5
|
[
"BSD-3-Clause"
] | null | null | null |
ckeditor/views.py
|
xmedia-systems/django-ckeditor
|
62c3426d93f66628a524803c7d7a42c34511d4e5
|
[
"BSD-3-Clause"
] | null | null | null |
ckeditor/views.py
|
xmedia-systems/django-ckeditor
|
62c3426d93f66628a524803c7d7a42c34511d4e5
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import re
from urlparse import urlparse, urlunparse, urljoin
from datetime import datetime
from django.conf import settings
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from urllib import pathname2url
try:
from PIL import Image, ImageOps
except ImportError:
import Image
import ImageOps
try:
from django.views.decorators.csrf import csrf_exempt
except ImportError:
# monkey patch this with a dummy decorator which just returns the
# same function (for compatability with pre-1.1 Djangos)
def csrf_exempt(fn):
return fn
THUMBNAIL_SIZE = (75, 75)
CKEDITOR_STORAGE = getattr(settings, "CKEDITOR_STORAGE", None)
from django.core.files.storage import get_storage_class
CKEditorStorage = (get_storage_class(CKEDITOR_STORAGE))()
def get_thumb_filename(file_name):
"""
Generate thumb filename by adding _thumb to end of
filename before . (if present)
"""
return '%s_thumb%s' % os.path.splitext(file_name)
def create_thumbnail(filename):
image = Image.open(filename)
# Convert to RGB if necessary
# Thanks to Limodou on DjangoSnippets.org
# http://www.djangosnippets.org/snippets/20/
if image.mode not in ('L', 'RGB'):
image = image.convert('RGB')
# scale and crop to thumbnail
imagefit = ImageOps.fit(image, THUMBNAIL_SIZE, Image.ANTIALIAS)
imagefit.save(get_thumb_filename(filename))
def get_relative_url_from_path(prefix, path):
relative_url = pathname2url(path)
if relative_url[0] == '/':
relative_url = relative_url[1:]
return urljoin(prefix, relative_url)
def get_media_url(path):
"""
Determine system file's media URL.
"""
upload_prefix = getattr(settings, "CKEDITOR_UPLOAD_PREFIX", None)
if upload_prefix:
url = get_relative_url_from_path(upload_prefix,
os.path.relpath(path, settings.CKEDITOR_UPLOAD_PATH))
else:
url = get_relative_url_from_path(settings.MEDIA_URL,
os.path.relpath(path, settings.MEDIA_ROOT))
# Remove multiple forward-slashes from the path portion of the url.
# Break url into a list.
url_parts = list(urlparse(url))
# Replace two or more slashes with a single slash.
url_parts[2] = re.sub('\/+', '/', url_parts[2])
# Reconstruct the url.
url = urlunparse(url_parts)
return url
def get_upload_filename(upload_name, user):
# If CKEDITOR_RESTRICT_BY_USER is True upload file to user specific path.
if getattr(settings, 'CKEDITOR_RESTRICT_BY_USER', False):
user_path = user.username
else:
user_path = ''
# Generate date based path to put uploaded file.
date_path_parts = datetime.now().strftime('%Y/%m/%d').split('/')
# Complete upload path (upload_path + date_path).
upload_path = os.path.join(settings.CKEDITOR_UPLOAD_PATH, user_path, \
*date_path_parts)
# Get available name and return.
return os.path.join(upload_path, upload_name.lower())
@csrf_exempt
def upload(request):
"""
Uploads a file and send back its URL to CKEditor.
TODO:
Validate uploads
"""
# Get the uploaded file from request.
upload = request.FILES['upload']
#upload_ext = os.path.splitext(upload.name)[1]
#security considerations
# Open output file in which to store upload.
upload_filename = get_upload_filename(upload.name, request.user)
upload_filename = CKEditorStorage.save(upload_filename, upload)
create_thumbnail(upload_filename)
# Respond with Javascript sending ckeditor upload url.
url = get_media_url(upload_filename)
return HttpResponse("""
<script type='text/javascript'>
window.parent.CKEDITOR.tools.callFunction(%s, '%s');
</script>""" % (request.GET['CKEditorFuncNum'], url))
def get_image_files(user=None):
"""
Recursively walks all dirs under upload dir and generates a list of
full paths for each file found.
"""
# If a user is provided and CKEDITOR_RESTRICT_BY_USER is True,
# limit images to user specific path, but not for superusers.
if user and not user.is_superuser and getattr(settings, \
'CKEDITOR_RESTRICT_BY_USER', False):
user_path = user.username
else:
user_path = ''
browse_path = os.path.join(settings.CKEDITOR_UPLOAD_PATH, user_path)
for root, dirs, files in os.walk(browse_path):
for filename in [os.path.join(root, x) for x in files]:
# bypass for thumbs
if os.path.splitext(filename)[0].endswith('_thumb'):
continue
yield filename
def get_image_browse_urls(user=None):
"""
Recursively walks all dirs under upload dir and generates a list of
thumbnail and full image URL's for each file found.
"""
images = []
for filename in get_image_files(user=user):
images.append({
'thumb': get_media_url(get_thumb_filename(filename)),
'src': get_media_url(filename)
})
return images
def browse(request):
context = RequestContext(request, {
'images': get_image_browse_urls(request.user),
})
return render_to_response('browse.html', context)
| 30.666667
| 94
| 0.683096
|
1a2a8d4fea90df89c39b53953b64dbda47b05b64
| 1,669
|
py
|
Python
|
cmd_manager/filters.py
|
OrangeChannel/Tsuzuru-Bot
|
ac410708680f1f148ba52c323b41b70d3ec250dc
|
[
"MIT"
] | null | null | null |
cmd_manager/filters.py
|
OrangeChannel/Tsuzuru-Bot
|
ac410708680f1f148ba52c323b41b70d3ec250dc
|
[
"MIT"
] | null | null | null |
cmd_manager/filters.py
|
OrangeChannel/Tsuzuru-Bot
|
ac410708680f1f148ba52c323b41b70d3ec250dc
|
[
"MIT"
] | null | null | null |
import asyncio
import random
from utils import punish_user
from config.globals import *
from handle_messages import private_msg, delete_user_message
def is_ex_bot_channel(message):
if message.channel.id == EX_BOT_CHANNEL:
return True
asyncio.ensure_future(private_msg(message, "Stop using this command outside of `#public_bot`"))
asyncio.ensure_future(delete_user_message(message))
def is_ex_server(message):
if message.guild and message.guild.id == EX_SERVER:
return True
asyncio.ensure_future(private_msg(message, "Stop using this command outside of eX-Server"))
asyncio.ensure_future(delete_user_message(message))
def is_ex_fan_release_channel(message):
if message.channel.id == EX_FANSUB_CHANNEL:
return True
asyncio.ensure_future(private_msg(message, "Stop using this command outside of `#releases_fansubs`"))
asyncio.ensure_future(delete_user_message(message))
def command_not_allowed(message):
asyncio.ensure_future(private_msg(message, "This command is not allowed.\nAsk @Infi#8527 for more information."))
asyncio.ensure_future(delete_user_message(message))
return False
def is_admin_command(client, message):
if message.guild.id == EX_SERVER:
if message.channel.id == EX_ADMIN_CHANNEL:
return True
asyncio.ensure_future(punish_user(client, message))
return False
def is_troll_command(client, message):
if message.guild.id == EX_SERVER:
asyncio.ensure_future(delete_user_message(message))
if random.randint(1, 3) == 2:
return True
asyncio.ensure_future(punish_user(client, message))
return False
| 33.38
| 117
| 0.744158
|
9d0f4070cefa9fab51bc9908565b051a50a61ce3
| 14,024
|
py
|
Python
|
irekua_database/models/items/items.py
|
CONABIO-audio/irekua-database
|
abaf3eb3c5273cdb973c7ac1b921ab2f9759042c
|
[
"BSD-4-Clause"
] | null | null | null |
irekua_database/models/items/items.py
|
CONABIO-audio/irekua-database
|
abaf3eb3c5273cdb973c7ac1b921ab2f9759042c
|
[
"BSD-4-Clause"
] | 18
|
2019-10-31T21:41:42.000Z
|
2022-03-12T00:03:54.000Z
|
irekua_database/models/items/items.py
|
IslasGECI/irekua-database
|
abaf3eb3c5273cdb973c7ac1b921ab2f9759042c
|
[
"BSD-4-Clause"
] | 1
|
2021-05-06T19:38:21.000Z
|
2021-05-06T19:38:21.000Z
|
import os
import mimetypes
from django.conf import settings
from django.db.models import JSONField
from django.core.exceptions import ValidationError
from django.utils import timezone
from pytz import timezone as pytz_timezone
from django.db import models
from django.core.validators import MaxValueValidator, MinValueValidator
from django.utils.translation import gettext_lazy as _
from irekua_database.utils import empty_JSON
from irekua_database.utils import hash_file
from irekua_database.models import base
from sorl.thumbnail import ImageField
mimetypes.init()
def get_item_path(instance, filename):
path_fmt = os.path.join(
'items',
'{collection}',
'{sampling_event}',
'{sampling_event_device}',
'{hash}{ext}')
mime_type, __ = mimetypes.guess_type(filename)
extension = mimetypes.guess_extension(mime_type)
sampling_event_device = instance.sampling_event_device
sampling_event = sampling_event_device.sampling_event
collection = sampling_event.collection
instance.item_file.open()
hash_string = hash_file(instance.item_file)
path = path_fmt.format(
collection=collection.pk,
sampling_event=sampling_event.pk,
sampling_event_device=sampling_event_device.pk,
hash=hash_string,
ext=extension)
return path
def get_thumbnail_path(instance, filename):
path_fmt = os.path.join(
'thumbnails',
'{collection}',
'{sampling_event}',
'{sampling_event_device}',
'{hash}{ext}')
mime_type, __ = mimetypes.guess_type(filename)
extension = 'jpg'
sampling_event_device = instance.sampling_event_device
sampling_event = sampling_event_device.sampling_event
collection = sampling_event.collection
hash_string = instance.hash
path = path_fmt.format(
collection=collection.pk,
sampling_event=sampling_event.pk,
sampling_event_device=sampling_event_device.pk,
hash=hash_string,
ext=extension)
return path
class Item(base.IrekuaModelBaseUser):
hash_string = None
item_size = None
filesize = models.IntegerField(
db_column='filesize',
verbose_name=_('file size'),
help_text=_('Size of resource in Bytes'),
blank=True,
null=True)
hash = models.CharField(
db_column='hash',
verbose_name=_('hash'),
help_text=_('Hash of resource file'),
max_length=64,
unique=True,
blank=True,
null=False)
item_type = models.ForeignKey(
'ItemType',
on_delete=models.PROTECT,
db_column='item_type_id',
verbose_name=_('item type'),
help_text=_('Type of resource'),
blank=False)
item_file = models.FileField(
upload_to=get_item_path,
db_column='item_file',
verbose_name=_('item file'),
help_text=_('Upload file associated to file'),
blank=True,
null=True)
item_thumbnail = ImageField(
upload_to=get_thumbnail_path,
db_column='item_thumbnail',
verbose_name=_('item thumbnail'),
help_text=_('Thumbnail associated to file'),
blank=True,
null=True)
media_info = JSONField(
db_column='media_info',
default=empty_JSON,
verbose_name=_('media info'),
help_text=_('Information of resource file'),
blank=True,
null=False)
sampling_event_device = models.ForeignKey(
'SamplingEventDevice',
db_column='sampling_event_device_id',
verbose_name=_('sampling event device'),
help_text=_('Sampling event device used to create item'),
on_delete=models.PROTECT,
blank=False,
null=False)
source = models.ForeignKey(
'Source',
db_column='source_id',
verbose_name=_('source'),
help_text=_('Source of item (parsing function and parent directory)'),
on_delete=models.PROTECT,
blank=True,
null=True)
source_foreign_key = models.CharField(
db_column='source_foreign_key',
verbose_name=_('source foreign key'),
help_text=_('Foreign key of file in source database'),
max_length=64,
blank=True)
metadata = JSONField(
db_column='metadata',
default=empty_JSON,
verbose_name=_('metadata'),
help_text=_('Metadata associated to item'),
blank=True,
null=True)
captured_on = models.DateTimeField(
db_column='captured_on',
verbose_name=_('captured on'),
help_text=_('Date on which item was produced'),
blank=True,
null=True)
captured_on_year = models.IntegerField(
db_column='captured_on_year',
verbose_name=_('year'),
help_text=_('Year in which the item was captured (YYYY)'),
blank=True,
null=True,
validators=[
MinValueValidator(1800),
MaxValueValidator(3000)])
captured_on_month = models.IntegerField(
db_column='captured_on_month',
verbose_name=_('month'),
help_text=_('Month in which the item was captured (1-12)'),
blank=True,
null=True,
validators=[
MinValueValidator(0),
MaxValueValidator(12)])
captured_on_day = models.IntegerField(
db_column='captured_on_day',
verbose_name=_('day'),
help_text=_('Day in which the item was captured'),
blank=True,
null=True,
validators=[
MinValueValidator(0),
MaxValueValidator(32)])
captured_on_hour = models.IntegerField(
db_column='captured_on_hour',
verbose_name=_('hour'),
help_text=_('Hour of the day in which the item was captured (0 - 23)'),
blank=True,
null=True,
validators=[
MinValueValidator(0),
MaxValueValidator(23)])
captured_on_minute = models.IntegerField(
db_column='captured_on_minute',
verbose_name=_('minute'),
help_text=_('Minute in which the item was captured (0-59)'),
blank=True,
null=True,
validators=[
MinValueValidator(0),
MaxValueValidator(59)])
captured_on_second = models.IntegerField(
db_column='captured_on_second',
verbose_name=_('second'),
help_text=_('Second in which the item was captured (0-59)'),
blank=True,
null=True,
validators=[
MinValueValidator(0),
MaxValueValidator(59)])
captured_on_timezone = models.CharField(
max_length=256,
db_column='captured_on_timezone',
verbose_name=_('timezone'),
help_text=_('Timezone corresponding to date fields'),
blank=True,
null=True)
licence = models.ForeignKey(
'Licence',
db_column='licence_id',
verbose_name=_('licence'),
help_text=_('Licence of item'),
on_delete=models.PROTECT,
blank=True,
null=True)
tags = models.ManyToManyField(
'Tag',
verbose_name=_('tags'),
help_text=_('Tags for item'),
blank=True)
ready_event_types = models.ManyToManyField(
'EventType',
verbose_name=_('ready event types'),
help_text=_('Types of event for which item has been fully annotated'),
blank=True)
class Meta:
verbose_name = _('Item')
verbose_name_plural = _('Items')
ordering = ['created_on']
permissions = (
("download_item", _("Can download item")),
("annotate_item", _("Can annotate item")),
)
def __str__(self):
return str(self.id) # pylint: disable=E1101
def validate_user(self):
if self.created_by is None:
self.created_by = self.sampling_event_device.created_by # pylint: disable=E1101
if self.created_by is None:
msg = _(
'Item creator was not specified and is not determined '
'by sampling event device.')
raise ValidationError(msg)
@property
def collection(self):
return self.sampling_event_device.sampling_event.collection
def check_captured_on(self):
if (
(self.captured_on_year is None) or
(self.captured_on_month is None) or
(self.captured_on_day is None)):
return
tz = timezone.get_default_timezone()
if self.captured_on_timezone:
tz = pytz_timezone(self.captured_on_timezone)
if self.captured_on is not None:
captured_on = timezone.localtime(self.captured_on, timezone=tz)
else:
captured_on = timezone.localtime(timezone=tz)
captured_on = captured_on.replace(
year=self.captured_on_year,
month=self.captured_on_month,
day=self.captured_on_day)
if (
(self.captured_on_hour is not None) and
(self.captured_on_minute is not None) and
(self.captured_on_second is not None)):
captured_on = captured_on.replace(
hour=self.captured_on_hour,
minute=self.captured_on_minute,
second=self.captured_on_second)
self.captured_on = captured_on
def clean(self):
self.check_captured_on()
try:
self.validate_hash_and_filesize()
except ValidationError as error:
raise ValidationError({'hash': error})
try:
self.validate_user()
except ValidationError as error:
raise ValidationError({'created_by': error})
sampling_event_device = self.sampling_event_device
try:
self.sampling_event_device.validate_date({
'year': self.captured_on_year,
'month': self.captured_on_month,
'day': self.captured_on_day,
'hour': self.captured_on_hour,
'minute': self.captured_on_minute,
'second': self.captured_on_second,
'time_zone': self.captured_on_timezone})
except ValidationError as error:
raise ValidationError({'captured_on': error})
sampling_event = sampling_event_device.sampling_event
collection = sampling_event.collection
try:
collection.validate_and_get_sampling_event_type(
self.sampling_event_device.sampling_event.sampling_event_type) # pylint: disable=E1101
except ValidationError as error:
raise ValidationError({'sampling': error})
try:
collection_item_type = collection.validate_and_get_item_type(
self.item_type)
except ValidationError as error:
raise ValidationError({'item_type': error})
if collection_item_type is not None:
try:
collection_item_type.validate_metadata(self.metadata)
except ValidationError as error:
raise ValidationError({'metadata': error})
try:
self.validate_licence()
except ValidationError as error:
raise ValidationError({'licence': error})
try:
self.item_type.validate_item_type(self) # pylint: disable=E1101
except ValidationError as error:
raise ValidationError({'media_info': error})
try:
self.validate_mime_type()
except ValidationError as error:
raise ValidationError({'item_file': error})
super(Item, self).clean()
def validate_and_get_event_type(self, event_type):
return self.item_type.validate_and_get_event_type(event_type) # pylint: disable=E1101
def validate_licence(self):
if self.licence is not None:
return
if self.sampling_event_device.licence is None: # pylint: disable=E1101
msg = _(
'Licence was not provided to item nor to sampling event')
raise ValidationError({'licence': msg})
self.licence = self.sampling_event_device.licence # pylint: disable=E1101
collection = self.sampling_event_device.sampling_event.collection # pylint: disable=E1101
collection.validate_and_get_licence(self.licence)
def validate_hash_and_filesize(self):
if self.item_file.name is None and self.hash is None:
msg = _(
'If no file is provided, a hash must be given')
raise ValidationError(msg)
if self.item_file.name is None:
return
self.item_file.open() # pylint: disable=E1101
hash_string = hash_file(self.item_file)
item_size = self.item_file.size # pylint: disable=E1101
if not self.hash:
self.hash = hash_string
self.filesize = item_size
if self.hash != hash_string:
msg = _('Hash of file and recorded hash do not coincide')
raise ValidationError(msg)
def validate_mime_type(self):
physical_device = self.sampling_event_device.collection_device.physical_device
device_type = physical_device.device.device_type
mime_type, _ = mimetypes.guess_type(self.item_file.name)
device_type.validate_mime_type(mime_type)
def add_ready_event_type(self, event_type):
self.ready_event_types.add(event_type) # pylint: disable=E1101
self.save()
def remove_ready_event_type(self, event_type):
self.ready_event_types.remove(event_type) # pylint: disable=E1101
self.save()
def add_tag(self, tag):
self.tags.add(tag) # pylint: disable=E1101
self.save()
def remove_tag(self, tag):
self.tags.remove(tag) # pylint: disable=E1101
self.save()
def delete(self, *args, **kwargs):
try:
self.item_file.delete()
except ValueError:
pass
super().delete(*args, **kwargs)
| 32.689977
| 103
| 0.628066
|
8ef5c58b6413ce7cee81fa3db7ed485ea2b448c4
| 737
|
py
|
Python
|
scripts/tests/test_calculate_scarp_profile.py
|
mshodge/sparta
|
d64197d4f141269ef011525a78da5acde9d04aca
|
[
"MIT"
] | null | null | null |
scripts/tests/test_calculate_scarp_profile.py
|
mshodge/sparta
|
d64197d4f141269ef011525a78da5acde9d04aca
|
[
"MIT"
] | 1
|
2022-03-02T12:16:15.000Z
|
2022-03-02T12:16:15.000Z
|
scripts/tests/test_calculate_scarp_profile.py
|
mshodge/sparta
|
d64197d4f141269ef011525a78da5acde9d04aca
|
[
"MIT"
] | null | null | null |
import pytest
from scripts.calculate_scarp_profile import calculate_scarp_profile
from scripts.tests.utils.create_data import create_profile_for_calculating_scarp_morphology
def test_height():
df, crest, base = create_profile_for_calculating_scarp_morphology()
height, width, slope = calculate_scarp_profile(df, crest, base)
assert int(height) == 10
def test_width():
df, crest, base = create_profile_for_calculating_scarp_morphology()
height, width, slope = calculate_scarp_profile(df, crest, base)
assert int(width) == 1
def test_slope():
df, crest, base = create_profile_for_calculating_scarp_morphology()
height, width, slope = calculate_scarp_profile(df, crest, base)
assert int(slope) == -45
| 38.789474
| 91
| 0.776119
|
9d4cea8ffd617ba2a73be5cfa50bf53e1c226b59
| 2,076
|
py
|
Python
|
docs/source/conf.py
|
kristianeschenburg/curibio.sdk
|
17881eb43895cc8cb8fa89092eb9a52ef734c483
|
[
"MIT"
] | null | null | null |
docs/source/conf.py
|
kristianeschenburg/curibio.sdk
|
17881eb43895cc8cb8fa89092eb9a52ef734c483
|
[
"MIT"
] | 106
|
2020-05-29T14:21:10.000Z
|
2021-11-10T00:44:00.000Z
|
docs/source/conf.py
|
kristianeschenburg/curibio.sdk
|
17881eb43895cc8cb8fa89092eb9a52ef734c483
|
[
"MIT"
] | 1
|
2021-07-01T16:26:49.000Z
|
2021-07-01T16:26:49.000Z
|
# -*- coding: utf-8 -*-
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
from typing import List
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = "curibio.sdk"
copyright = "2020, Curi Bio" # pylint: disable=redefined-builtin
author = "Curi Bio"
# The full version, including alpha/beta/rc tags
release = "0.1"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions: List[str] = [
"sphinx.ext.autodoc",
"sphinx.ext.coverage",
"sphinx.ext.napoleon",
]
# Add any paths that contain templates here, relative to this directory.
templates_path: List[str] = []
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns: List[str] = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path: List[str] = []
| 32.4375
| 79
| 0.658478
|
d94f369aba7ad4469b5b073419a2a068fb5ffaab
| 147
|
py
|
Python
|
virtual/bin/django-admin.py
|
BrendaMwiza/Gallery-app
|
5dacf1bc46b406a0adf48ea37a8c8d6fc48d5979
|
[
"MIT"
] | null | null | null |
virtual/bin/django-admin.py
|
BrendaMwiza/Gallery-app
|
5dacf1bc46b406a0adf48ea37a8c8d6fc48d5979
|
[
"MIT"
] | 2
|
2021-06-08T20:27:21.000Z
|
2021-09-08T01:20:46.000Z
|
virtual/bin/django-admin.py
|
BrendaMwiza/Gallery-app
|
5dacf1bc46b406a0adf48ea37a8c8d6fc48d5979
|
[
"MIT"
] | null | null | null |
#!/home/mwiza/gallery/virtual/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| 24.5
| 42
| 0.782313
|
e1d5e850779e64a0b236a61f3c991e0db23b75a0
| 3,879
|
py
|
Python
|
z_art/data_visual/app.py
|
PeaceLaced/tda-art
|
94ad9e8aa3d1183bc511a0ec9cc4e7656d1d8ac0
|
[
"MIT"
] | 8
|
2021-12-02T03:24:37.000Z
|
2022-01-31T20:48:19.000Z
|
z_art/data_visual/app.py
|
PeaceLaced/tda-art
|
94ad9e8aa3d1183bc511a0ec9cc4e7656d1d8ac0
|
[
"MIT"
] | null | null | null |
z_art/data_visual/app.py
|
PeaceLaced/tda-art
|
94ad9e8aa3d1183bc511a0ec9cc4e7656d1d8ac0
|
[
"MIT"
] | 1
|
2022-01-11T03:22:20.000Z
|
2022-01-11T03:22:20.000Z
|
# -*- coding: utf-8 -*-
'''
https://github.com/theo-brown/dash-examples/blob/7dbd25c758b370dbbbae454cb147d64ea0ea2d95/basic-realtime-plot.py
'''
import dash
import plotly.express as px
import plotly.graph_objects as go
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import numpy as np
from time import time
from datetime import datetime, timedelta, date
import pytz
from z_art.progress_report.api_progress_report import Progress as progress
from random import randrange, uniform
'''
Default template: 'plotly'
Available templates: ['ggplot2', 'seaborn', 'simple_white', 'plotly',
'plotly_white', 'plotly_dark', 'presentation',
'xgridoff', 'ygridoff', 'gridon', 'none']
'''
import plotly.io as pio
pio.templates.default = "plotly_dark"
REFRESH_RATE_MS = 30000
start_time = time()
since_start = time() - start_time
generated_date = date.today().strftime('%d-%b-%Y')
test_data = []
acc_profits = 0
def generate_data():
# PROFIT_dd-mmm-yyyy.log (dd-mmm-yyyy)
# testing with PROFIT_07-Jan-2022.log
manually_set_file_date = False
if not manually_set_file_date:
file_name = 'z_art/data_visual/data_dump/PROFIT_' + generated_date + '.log'
if manually_set_file_date:
file_name = 'z_art/data_visual/data_dump/PROFIT_' + manually_set_file_date + '.log'
'''
# WRITE DATA TO FILE
random_profit = round(uniform(-0.99, 0.99), 2)
test_data.append(('SYM', str(random_profit)))
# we want to write the accumulation, not the individual
for profit_tuple in test_data:
acc_profits = profit_tuple[1]
f = open(file_name, 'w+')
f.write(str(acc_profits))
f.close()
'''
# READ DATA FROM FILE
f = open(file_name, 'r')
read_file_data = f.readline()
f.close()
return datetime.now(pytz.timezone('US/Eastern')) - timedelta(since_start), read_file_data
app = dash.Dash(__name__, update_title=None)
#figure_margin = go.layout.Margin(b=0, l=0, r=0, t=0)
fig = go.Figure(go.Scatter(x=[], y=[], mode='lines'),
layout={'xaxis_title': "Time (s)",
'yaxis_title': "X",
'font_family': 'Nunito, sans-serif',
'font_size': 12,
#'margin': figure_margin
'margin_b':25,
'margin_l':25,
'margin_r':25,
'margin_t':25})
live_update_graph_1 = dcc.Graph(id='live_update_graph_1',
animate=False,
style={'width': '100%'},
config={'displayModeBar': False,
'staticPlot': True},
figure=fig)
app.layout = html.Div([
html.Div([
html.H2("Realized Profit/Loss"),
live_update_graph_1, # dcc.Graph()
dcc.Interval(id='update_timer_1', interval=REFRESH_RATE_MS)])])
# when input is changed, output changes automatically
# component_id, component_property
@app.callback(Output('live_update_graph_1', 'extendData'),
Input('update_timer_1', 'n_intervals'))
# automatically called when then input changes
def update_graph_1(n_intervals: int):
new_x, new_y = generate_data()
# when False is passed to new_x/y, nothing should happen
if new_x:
if new_y:
return {'x': [[new_x]],
'y': [[new_y]]}, [0], None
# because extendData is the component_property of output
# new_x and new_y are appended to the trace at component_id live_update_graph_1
app.run_server(debug=True, use_reloader=False, dev_tools_ui=False)
| 34.633929
| 112
| 0.603506
|
1b34764e3e953f9ea24f51c1284a434cbbefed20
| 224
|
py
|
Python
|
news/admin.py
|
Krasivaya/The-Moringa-Trribune4
|
4cf6b027125ab9091d87cfed9987acf8ab56b1e5
|
[
"MIT"
] | null | null | null |
news/admin.py
|
Krasivaya/The-Moringa-Trribune4
|
4cf6b027125ab9091d87cfed9987acf8ab56b1e5
|
[
"MIT"
] | 6
|
2020-06-05T23:52:11.000Z
|
2022-03-12T00:03:42.000Z
|
news/admin.py
|
Krasivaya/The-Moringa-Trribune4
|
4cf6b027125ab9091d87cfed9987acf8ab56b1e5
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Article,tags
class ArticleAdmin(admin.ModelAdmin):
filter_horizontal = ('tags',)
# admin.site.register(Editor)
admin.site.register(Article)
admin.site.register(tags)
| 24.888889
| 37
| 0.785714
|
2876997cc5f5d4a376fe8bebeb999d3f9903678c
| 5,484
|
py
|
Python
|
last_tcp.py
|
tszdanger/NUS_ALL
|
2b38cce6c0aeebed4bbd211e3e29565c66084cf6
|
[
"MIT"
] | 1
|
2020-03-14T15:58:44.000Z
|
2020-03-14T15:58:44.000Z
|
last_tcp.py
|
tszdanger/NUS_ALL
|
2b38cce6c0aeebed4bbd211e3e29565c66084cf6
|
[
"MIT"
] | null | null | null |
last_tcp.py
|
tszdanger/NUS_ALL
|
2b38cce6c0aeebed4bbd211e3e29565c66084cf6
|
[
"MIT"
] | null | null | null |
'''
这是可以用的!
'''
from __future__ import print_function
from socket import *
import os
import paho.mqtt.client as mqtt
import time
import wave
import numpy as np
from keras.models import load_model
import pyaudio
from PIL import Image
from imageai.Detection import ObjectDetection
import sys
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
RECORD_SECONDS = 2
WAVE_OUTPUT_FILENAME = "D:\\Github\\kerasTfPoj\\kerasTfPoj\\ASR\\output.wav"
TMP_FILE = "C:\\Users\\skywf\\Desktop\\docker_image.jpg"
dict = {0:'daisy',1:'dandelion',2:'roses',3:'sunflowers',4:'tulips'}
'''
语音识别函数
'''
def get_wav_mfcc(wav_path):
f = wave.open(wav_path,'rb')
params = f.getparams()
print("params:",params)
nchannels, sampwidth, framerate, nframes = params[:4]
strData = f.readframes(nframes)#读取音频,字符串格式
waveData = np.fromstring(strData,dtype=np.int16)#将字符串转化为int
waveData = waveData*1.0/(max(abs(waveData)))#wave幅值归一化
waveData = np.reshape(waveData,[nframes,nchannels]).T
f.close()
### 对音频数据进行长度大小的切割,保证每一个的长度都是一样的【因为训练文件全部是1秒钟长度,16000帧的,所以这里需要把每个语音文件的长度处理成一样的】
data = list(np.array(waveData[0]))
print(len(data))
count1 = 0
while len(data)>16000:
count1 +=1
del data[len(waveData[0])-2*count1]
del data[count1-1]
# print(len(data))
while len(data)<16000:
data.append(0)
# print(len(data))
data=np.array(data)
# 平方之后,开平方,取正数,值的范围在 0-1 之间
data = data ** 2
data = data ** 0.5
return data
'''
路径识别函数
'''
def tell_dire():
# 路径直接写死了C:\\Users\\skywf\\Desktop\\docker_image.jpg图片直接输出到桌面
execution_path = os.getcwd()
detector = ObjectDetection()
detector.setModelTypeAsRetinaNet()
detector.setModelPath(os.path.join(execution_path, 'resnet50_coco_best_v2.0.1.h5'))
detector.loadModel()
# a = time.time()
custom_objects = detector.CustomObjects(bottle =True)
detections = detector.detectCustomObjectsFromImage(custom_objects = custom_objects,input_image='C:\\Users\\skywf\\Desktop\\docker_image.jpg',output_image_path='C:\\Users\\skywf\\Desktop\\imagenew.jpg',minimum_percentage_probability=50,box_show=True)
# b = time.time()
# print('the time is {}'.format(b-a))
# print('the direction is {}'.format(detections[0]['direction']))
for eachObject in detections:
print(eachObject['name']+':'+eachObject['percentage_probability'])
return detections[0]['direction']
def main():
a = input('please tell me what you want 1.语音识别 2.接受图片+框图发送中心 ')
if (a == '1'):
serverName = "192.168.43.70"
serverport = 12000
clientSocket = socket(AF_INET, SOCK_STREAM)
clientSocket.connect((serverName, serverport))
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
print("* recording")
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
print("* done recording")
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
model = load_model('asr_model_weights.h5') # 加载训练模型
wavs = []
wavs.append(get_wav_mfcc("D:\\Github\\kerasTfPoj\\kerasTfPoj\\ASR\\output.wav")) # 再读
X = np.array(wavs)
print(X.shape)
result = model.predict(X[0:1])[0] # 识别出第一张图的结果,多张图的时候,把后面的[0] 去掉,返回的就是多张图结果
print("识别结果", result)
# 因为在训练的时候,标签集的名字 为: 0:go 1:stop 0 和 1 是下标
name = ["go", "stop"] # 创建一个跟训练时一样的标签集
ind = 0 # 结果中最大的一个数
for i in range(len(result)):
if result[i] > result[ind]:
ind = 1
print("识别的语音结果是:", name[ind])
# 再传label
label = name[ind]
clientSocket.send(label.encode())
clientSocket.close()
elif (a=='2'):
serverPort = 12000
serverSocket = socket(AF_INET, SOCK_STREAM)
serverSocket.bind(('', serverPort))
serverSocket.listen(1)
connectionSocket, addr = serverSocket.accept()
f = open("C:\\Users\\skywf\\Desktop\\docker_image.jpg", "wb")
# a = time.time()
while True:
data = connectionSocket.recv(1024)
if not data:
break
f.write(data)
# b = time.time()
# if (b - a - 4) > 0:
# break
print("image has been received")
f.close()
direction = tell_dire()
print('!!!direction is {}'.format(direction))
# connectionSocket.send(direction.encode())
connectionSocket.close()
#实在是搞不定那个东西所以重新发一遍
serverName = "192.168.43.70"
serverport = 12000
clientSocket = socket(AF_INET, SOCK_STREAM)
clientSocket.connect((serverName, serverport))
clientSocket.send(direction.encode())
clientSocket.close()
if __name__ == '__main__':
main()
| 30.131868
| 254
| 0.590627
|
aa0d84716f97be0ca06f395767b9b69e12886074
| 12,521
|
py
|
Python
|
l5kit/l5kit/visualization/visualizer/zarr_utils.py
|
Aspirisha/l5kit
|
40ed7576f803e83fc3f0714e6458635f9f6bfe60
|
[
"Apache-2.0"
] | null | null | null |
l5kit/l5kit/visualization/visualizer/zarr_utils.py
|
Aspirisha/l5kit
|
40ed7576f803e83fc3f0714e6458635f9f6bfe60
|
[
"Apache-2.0"
] | null | null | null |
l5kit/l5kit/visualization/visualizer/zarr_utils.py
|
Aspirisha/l5kit
|
40ed7576f803e83fc3f0714e6458635f9f6bfe60
|
[
"Apache-2.0"
] | 1
|
2021-07-20T15:23:16.000Z
|
2021-07-20T15:23:16.000Z
|
from typing import List, Tuple
import numpy as np
from l5kit.data import ChunkedDataset
from l5kit.data.filter import (filter_agents_by_frames, filter_agents_by_labels, filter_tl_faces_by_frames,
filter_tl_faces_by_status)
from l5kit.data.labels import PERCEPTION_LABELS
from l5kit.data.map_api import MapAPI, TLFacesColors
from l5kit.geometry import transform_points
from l5kit.rasterization.box_rasterizer import get_box_world_coords, get_ego_as_agent
from l5kit.rasterization.semantic_rasterizer import indices_in_bounds
from l5kit.sampling import get_relative_poses
from l5kit.simulation.unroll import SimulationOutput, UnrollInputOutput
from l5kit.visualization.visualizer.common import (AgentVisualization, CWVisualization, EgoVisualization,
FrameVisualization, LaneVisualization, TrajectoryVisualization)
# TODO: this should not be here (maybe a config?)
COLORS = {
TLFacesColors.GREEN.name: "#33CC33",
TLFacesColors.RED.name: "#FF3300",
TLFacesColors.YELLOW.name: "#FFFF66",
"PERCEPTION_LABEL_CAR": "#1F77B4",
"PERCEPTION_LABEL_CYCLIST": "#CC33FF",
"PERCEPTION_LABEL_PEDESTRIAN": "#66CCFF",
}
def _get_frame_trajectories(frames: np.ndarray, agents_frames: List[np.ndarray], track_ids: np.ndarray,
frame_index: int) -> List[TrajectoryVisualization]:
"""Get trajectories (ego and agents) starting at frame_index.
Ego's trajectory will be named ego_trajectory while agents' agent_trajectory
:param frames: all frames from the scene
:param agents_frames: all agents from the scene as a list of array (one per frame)
:param track_ids: allowed tracks ids we want to build trajectory for
:param frame_index: index of the frame (trajectory will start from this frame)
:return: a list of trajectory for visualisation
"""
traj_visualisation: List[TrajectoryVisualization] = []
# TODO: factor out future length
agent_traj_length = 20
for track_id in track_ids:
# TODO this is not really relative (note eye and 0 yaw)
pos, *_, avail = get_relative_poses(agent_traj_length, frames[frame_index: frame_index + agent_traj_length],
track_id, agents_frames[frame_index: frame_index + agent_traj_length],
np.eye(3), 0)
traj_visualisation.append(TrajectoryVisualization(xs=pos[avail > 0, 0],
ys=pos[avail > 0, 1],
color="blue",
legend_label="agent_trajectory",
track_id=int(track_id)))
# TODO: factor out future length
ego_traj_length = 100
pos, *_, avail = get_relative_poses(ego_traj_length, frames[frame_index: frame_index + ego_traj_length],
None, agents_frames[frame_index: frame_index + ego_traj_length],
np.eye(3), 0)
traj_visualisation.append(TrajectoryVisualization(xs=pos[avail > 0, 0],
ys=pos[avail > 0, 1],
color="red",
legend_label="ego_trajectory",
track_id=-1))
return traj_visualisation
def _get_frame_data(mapAPI: MapAPI, frame: np.ndarray, agents_frame: np.ndarray,
tls_frame: np.ndarray) -> FrameVisualization:
"""Get visualisation objects for the current frame.
:param mapAPI: mapAPI object (used for lanes, crosswalks etc..)
:param frame: the current frame (used for ego)
:param agents_frame: agents in this frame
:param tls_frame: the tls of this frame
:return: A FrameVisualization object. NOTE: trajectory are not included here
"""
ego_xy = frame["ego_translation"][:2]
#################
# plot lanes
lane_indices = indices_in_bounds(ego_xy, mapAPI.bounds_info["lanes"]["bounds"], 50)
active_tl_ids = set(filter_tl_faces_by_status(tls_frame, "ACTIVE")["face_id"].tolist())
lanes_vis: List[LaneVisualization] = []
for idx, lane_idx in enumerate(lane_indices):
lane_idx = mapAPI.bounds_info["lanes"]["ids"][lane_idx]
lane_tl_ids = set(mapAPI.get_lane_traffic_control_ids(lane_idx))
lane_colour = "gray"
for tl_id in lane_tl_ids.intersection(active_tl_ids):
lane_colour = COLORS[mapAPI.get_color_for_face(tl_id)]
lane_coords = mapAPI.get_lane_coords(lane_idx)
left_lane = lane_coords["xyz_left"][:, :2]
right_lane = lane_coords["xyz_right"][::-1, :2]
lanes_vis.append(LaneVisualization(xs=np.hstack((left_lane[:, 0], right_lane[:, 0])),
ys=np.hstack((left_lane[:, 1], right_lane[:, 1])),
color=lane_colour))
#################
# plot crosswalks
crosswalk_indices = indices_in_bounds(ego_xy, mapAPI.bounds_info["crosswalks"]["bounds"], 50)
crosswalks_vis: List[CWVisualization] = []
for idx in crosswalk_indices:
crosswalk = mapAPI.get_crosswalk_coords(mapAPI.bounds_info["crosswalks"]["ids"][idx])
crosswalks_vis.append(CWVisualization(xs=crosswalk["xyz"][:, 0],
ys=crosswalk["xyz"][:, 1],
color="yellow"))
#################
# plot ego and agents
agents_frame = np.insert(agents_frame, 0, get_ego_as_agent(frame))
box_world_coords = get_box_world_coords(agents_frame)
# ego
ego_vis = EgoVisualization(xs=box_world_coords[0, :, 0], ys=box_world_coords[0, :, 1],
color="red", center_x=agents_frame["centroid"][0, 0],
center_y=agents_frame["centroid"][0, 1])
# agents
agents_frame = agents_frame[1:]
box_world_coords = box_world_coords[1:]
agents_vis: List[AgentVisualization] = []
for agent, box_coord in zip(agents_frame, box_world_coords):
label_index = np.argmax(agent["label_probabilities"])
agent_type = PERCEPTION_LABELS[label_index]
agents_vis.append(AgentVisualization(xs=box_coord[..., 0],
ys=box_coord[..., 1],
color="#1F77B4" if agent_type not in COLORS else COLORS[agent_type],
track_id=agent["track_id"],
agent_type=PERCEPTION_LABELS[label_index],
prob=agent["label_probabilities"][label_index]))
return FrameVisualization(ego=ego_vis, agents=agents_vis, lanes=lanes_vis,
crosswalks=crosswalks_vis, trajectories=[])
def zarr_to_visualizer_scene(scene_dataset: ChunkedDataset, mapAPI: MapAPI,
with_trajectories: bool = True) -> List[FrameVisualization]:
"""Convert a zarr scene into a list of FrameVisualization which can be used by the visualiser
:param scene_dataset: a scene dataset. This must contain a single scene
:param mapAPI: mapAPI object
:param with_trajectories: if to enable trajectories or not
:return: a list of FrameVisualization objects
"""
if len(scene_dataset.scenes) != 1:
raise ValueError(f"we can convert only a single scene, found {len(scene_dataset.scenes)}")
frames = scene_dataset.frames
agents_frames = filter_agents_by_frames(frames, scene_dataset.agents)
tls_frames = filter_tl_faces_by_frames(frames, scene_dataset.tl_faces)
frames_vis: List[FrameVisualization] = []
for frame_idx in range(len(frames)):
frame = frames[frame_idx]
tls_frame = tls_frames[frame_idx]
# TODO: hardcoded threshold, it would be great to have a slider filtering on this
agents_frame = agents_frames[frame_idx]
agents_frame = filter_agents_by_labels(agents_frame, 0.1)
frame_vis = _get_frame_data(mapAPI, frame, agents_frame, tls_frame)
if with_trajectories:
traj_vis = _get_frame_trajectories(frames, agents_frames, agents_frame["track_id"], frame_idx)
frame_vis = FrameVisualization(ego=frame_vis.ego, agents=frame_vis.agents,
lanes=frame_vis.lanes, crosswalks=frame_vis.crosswalks,
trajectories=traj_vis)
frames_vis.append(frame_vis)
return frames_vis
def _get_in_out_as_trajectories(in_out: UnrollInputOutput) -> Tuple[np.ndarray, np.ndarray]:
"""Convert the input (log-replayed) and output (simulated) trajectories into world space.
Apply availability on the log-replayed one
:param in_out: an UnrollInputOutput object
:return: the replayed and simulated trajectory as numpy arrays
"""
replay_traj = transform_points(in_out.inputs["target_positions"],
in_out.inputs["world_from_agent"])
replay_traj = replay_traj[in_out.inputs["target_availabilities"] > 0]
sim_traj = transform_points(in_out.outputs["positions"],
in_out.inputs["world_from_agent"])
return replay_traj, sim_traj
def simulation_out_to_visualizer_scene(sim_out: SimulationOutput, mapAPI: MapAPI) -> List[FrameVisualization]:
"""Convert a simulation output into a scene we can visualize.
The scene will include replayed and simulated trajectories for ego and agents when these are
simulated.
:param sim_out: the simulation output
:param mapAPI: a MapAPI object
:return: a list of FrameVisualization for the scene
"""
frames = sim_out.simulated_ego
agents_frames = filter_agents_by_frames(frames, sim_out.simulated_agents)
tls_frames = filter_tl_faces_by_frames(frames, sim_out.simulated_dataset.dataset.tl_faces)
agents_th = sim_out.simulated_dataset.cfg["raster_params"]["filter_agents_threshold"]
ego_ins_outs = sim_out.ego_ins_outs
agents_ins_outs = sim_out.agents_ins_outs
has_ego_info = len(ego_ins_outs) > 0
has_agents_info = len(agents_ins_outs) > 0
frames_vis: List[FrameVisualization] = []
for frame_idx in range(len(frames)):
frame = frames[frame_idx]
tls_frame = tls_frames[frame_idx]
agents_frame = agents_frames[frame_idx]
agents_frame = filter_agents_by_labels(agents_frame, agents_th)
frame_vis = _get_frame_data(mapAPI, frame, agents_frame, tls_frame)
trajectories = []
if has_ego_info:
ego_in_out = ego_ins_outs[frame_idx]
replay_traj, sim_traj = _get_in_out_as_trajectories(ego_in_out)
trajectories.append(TrajectoryVisualization(xs=replay_traj[:, 0], ys=replay_traj[:, 1],
color="blue", legend_label="ego_replay", track_id=-1))
trajectories.append(TrajectoryVisualization(xs=sim_traj[:, 0], ys=sim_traj[:, 1],
color="red", legend_label="ego_simulated", track_id=-1))
if has_agents_info:
agents_in_out = agents_ins_outs[frame_idx]
for agent_in_out in agents_in_out:
track_id = agent_in_out.inputs["track_id"]
replay_traj, sim_traj = _get_in_out_as_trajectories(agent_in_out)
trajectories.append(TrajectoryVisualization(xs=replay_traj[:, 0], ys=replay_traj[:, 1],
color="orange", legend_label="agent_replay",
track_id=track_id))
trajectories.append(TrajectoryVisualization(xs=sim_traj[:, 0], ys=sim_traj[:, 1],
color="purple", legend_label="agent_simulated",
track_id=track_id))
frame_vis = FrameVisualization(ego=frame_vis.ego, agents=frame_vis.agents,
lanes=frame_vis.lanes, crosswalks=frame_vis.crosswalks,
trajectories=trajectories)
frames_vis.append(frame_vis)
return frames_vis
| 49.686508
| 116
| 0.625349
|
a6f57a10c990d08e1496a4f7d07bc693ba942642
| 65,145
|
py
|
Python
|
localstack/utils/cloudformation/template_deployer.py
|
kokizzu/localstack
|
2080d292bd27816dc67b35c5ec58eb1272be40d7
|
[
"Apache-2.0"
] | null | null | null |
localstack/utils/cloudformation/template_deployer.py
|
kokizzu/localstack
|
2080d292bd27816dc67b35c5ec58eb1272be40d7
|
[
"Apache-2.0"
] | null | null | null |
localstack/utils/cloudformation/template_deployer.py
|
kokizzu/localstack
|
2080d292bd27816dc67b35c5ec58eb1272be40d7
|
[
"Apache-2.0"
] | null | null | null |
import base64
import copy
import json
import logging
import re
import traceback
from typing import Dict, Optional
import botocore
from moto.ec2.utils import generate_route_id
from localstack import config
from localstack.constants import FALSE_STRINGS, S3_STATIC_WEBSITE_HOSTNAME, TEST_AWS_ACCOUNT_ID
from localstack.services.cloudformation.deployment_utils import (
PLACEHOLDER_AWS_NO_VALUE,
PLACEHOLDER_RESOURCE_NAME,
is_none_or_empty_value,
remove_none_values,
)
from localstack.services.cloudformation.service_models import (
KEY_RESOURCE_STATE,
DependencyNotYetSatisfied,
GenericBaseModel,
)
from localstack.utils.aws import aws_stack
from localstack.utils.cloudformation import template_preparer
from localstack.utils.collections import merge_recursive
from localstack.utils.functions import prevent_stack_overflow, run_safe
from localstack.utils.json import clone_safe, json_safe
from localstack.utils.objects import get_all_subclasses, recurse_object
from localstack.utils.strings import first_char_to_lower, is_string, to_bytes, to_str
from localstack.utils.threads import start_worker_thread
from localstack.services.cloudformation.models import * # noqa: F401, isort:skip
ACTION_CREATE = "create"
ACTION_DELETE = "delete"
AWS_URL_SUFFIX = "localhost.localstack.cloud" # value is "amazonaws.com" in real AWS
IAM_POLICY_VERSION = "2012-10-17"
REGEX_OUTPUT_APIGATEWAY = re.compile(
rf"^(https?://.+\.execute-api\.)(?:[^-]+-){{2,3}}\d\.(amazonaws\.com|{AWS_URL_SUFFIX})/?(.*)$"
)
REGEX_DYNAMIC_REF = re.compile("{{resolve:([^:]+):(.+)}}")
LOG = logging.getLogger(__name__)
# list of resource types that can be updated
# TODO: make this a property of the model classes themselves
UPDATEABLE_RESOURCES = [
"Lambda::Function",
"ApiGateway::Method",
"StepFunctions::StateMachine",
"IAM::Role",
"EC2::Instance",
]
# list of static attribute references to be replaced in {'Fn::Sub': '...'} strings
STATIC_REFS = ["AWS::Region", "AWS::Partition", "AWS::StackName", "AWS::AccountId"]
# maps resource type string to model class
RESOURCE_MODELS = {
model.cloudformation_type(): model for model in get_all_subclasses(GenericBaseModel)
}
class NoStackUpdates(Exception):
"""Exception indicating that no actions are to be performed in a stack update (which is not allowed)"""
pass
def lambda_get_params():
return lambda params, **kwargs: params
# maps resource types to functions and parameters for creation
RESOURCE_TO_FUNCTION = {}
# ----------------
# UTILITY METHODS
# ----------------
def find_stack(stack_name):
from localstack.services.cloudformation.provider import find_stack as api_find_stack
return api_find_stack(stack_name)
# ---------------------
# CF TEMPLATE HANDLING
# ---------------------
def get_deployment_config(res_type):
result = RESOURCE_TO_FUNCTION.get(res_type)
if result is not None:
return result
canonical_type = canonical_resource_type(res_type)
resource_class = RESOURCE_MODELS.get(canonical_type)
if resource_class:
return resource_class.get_deploy_templates()
def get_resource_type(resource):
res_type = resource.get("ResourceType") or resource.get("Type") or ""
parts = res_type.split("::", 1)
if len(parts) == 1:
return parts[0]
return parts[1]
def get_service_name(resource):
res_type = resource.get("Type", resource.get("ResourceType", ""))
parts = res_type.split("::")
if len(parts) == 1:
return None
if res_type.endswith("Cognito::UserPool"):
return "cognito-idp"
if parts[-2] == "Cognito":
return "cognito-idp"
if parts[-2] == "Elasticsearch":
return "es"
if parts[-2] == "KinesisFirehose":
return "firehose"
if parts[-2] == "ResourceGroups":
return "resource-groups"
if parts[-2] == "CertificateManager":
return "acm"
return parts[1].lower()
def get_resource_name(resource):
properties = resource.get("Properties") or {}
name = properties.get("Name")
if name:
return name
# try to extract name via resource class
res_type = canonical_resource_type(get_resource_type(resource))
model_class = RESOURCE_MODELS.get(res_type)
if model_class:
instance = model_class(resource)
name = instance.get_resource_name()
if not name:
LOG.debug('Unable to extract name for resource type "%s"', res_type)
return name
def get_client(resource, func_config):
resource_type = get_resource_type(resource)
service = get_service_name(resource)
resource_config = get_deployment_config(resource_type)
if resource_config is None:
raise Exception(
"CloudFormation deployment for resource type %s not yet implemented" % resource_type
)
try:
if func_config.get("boto_client") == "resource":
return aws_stack.connect_to_resource(service)
return aws_stack.connect_to_service(service)
except Exception as e:
LOG.warning('Unable to get client for "%s" API, skipping deployment: %s', service, e)
return None
def describe_stack_resource(stack_name, logical_resource_id):
client = aws_stack.connect_to_service("cloudformation")
try:
result = client.describe_stack_resource(
StackName=stack_name, LogicalResourceId=logical_resource_id
)
return result["StackResourceDetail"]
except Exception as e:
LOG.warning(
'Unable to get details for resource "%s" in CloudFormation stack "%s": %s',
logical_resource_id,
stack_name,
e,
)
def retrieve_resource_details(resource_id, resource_status, stack):
resources = stack.resources
stack_name = stack.stack_name
resource = resources.get(resource_id)
resource_id = resource_status.get("PhysicalResourceId") or resource_id
if not resource:
resource = {}
resource_type = get_resource_type(resource)
resource_props = resource.get("Properties")
if resource_props is None:
raise Exception(
f'Unable to find properties for resource "{resource_id}": {resource} - {resources}'
)
try:
# convert resource props to resource entity
instance = get_resource_model_instance(resource_id, stack=stack)
if instance:
state = instance.fetch_and_update_state(stack_name=stack_name, resources=resources)
return state
# special case for stack parameters
if resource_type == "Parameter":
return resource_props
message = (
f"Unexpected resource type {resource_type} when resolving "
f"references of resource {resource_id}: {dump_resource_as_json(resource)}"
)
log_not_available_message(resource_type=resource_type, message=message)
except DependencyNotYetSatisfied:
return
except Exception as e:
check_not_found_exception(e, resource_type, resource, resource_status)
return None
def check_not_found_exception(e, resource_type, resource, resource_status=None):
# we expect this to be a "not found" exception
markers = [
"NoSuchBucket",
"ResourceNotFound",
"NoSuchEntity",
"NotFoundException",
"404",
"not found",
"not exist",
]
if not list(filter(lambda marker, e=e: marker in str(e), markers)):
LOG.warning(
"Unexpected error retrieving details for resource type %s: Exception: %s - %s - status: %s",
resource_type,
e,
resource,
resource_status,
)
return False
return True
def extract_resource_attribute(
resource_type,
resource_state,
attribute,
resource_id=None,
resource=None,
stack=None,
):
LOG.debug("Extract resource attribute: %s %s", resource_type, attribute)
is_ref_attribute = attribute in ["PhysicalResourceId", "Ref"]
is_ref_attr_or_arn = is_ref_attribute or attribute == "Arn"
resource = resource or {}
if not resource and stack.resources:
resource = stack.resources[resource_id]
if not resource_state:
resource_state = retrieve_resource_details(resource_id, {}, stack=stack)
if not resource_state:
raise DependencyNotYetSatisfied(
resource_ids=resource_id,
message='Unable to fetch details for resource "%s" (attribute "%s")'
% (resource_id, attribute),
)
if isinstance(resource_state, GenericBaseModel):
if hasattr(resource_state, "get_cfn_attribute"):
try:
return resource_state.get_cfn_attribute(attribute)
except Exception:
pass
raise Exception(
'Unable to extract attribute "%s" from "%s" model class %s'
% (attribute, resource_type, type(resource_state))
)
# extract resource specific attributes
# TODO: remove the code below - move into resource model classes!
resource_props = resource.get("Properties", {})
if resource_type == "Parameter":
result = None
param_value = resource_props.get(
"Value",
resource.get("Value", resource_props.get("Properties", {}).get("Value")),
)
if is_ref_attr_or_arn:
result = param_value
elif isinstance(param_value, dict):
result = param_value.get(attribute)
if result is not None:
return result
return ""
elif resource_type == "Lambda::Function":
func_configs = resource_state.get("Configuration") or {}
if is_ref_attr_or_arn:
func_arn = func_configs.get("FunctionArn")
if func_arn:
return resolve_refs_recursively(stack, func_arn)
func_name = resolve_refs_recursively(stack, func_configs.get("FunctionName"))
return aws_stack.lambda_function_arn(func_name)
else:
return func_configs.get(attribute)
elif resource_type == "Lambda::Version":
if resource_state.get("Version"):
return "%s:%s" % (
resource_state.get("FunctionArn"),
resource_state.get("Version").split(":")[-1],
)
elif resource_type == "DynamoDB::Table":
actual_attribute = "LatestStreamArn" if attribute == "StreamArn" else attribute
value = resource_state.get("Table", {}).get(actual_attribute)
if value:
return value
elif resource_type == "ApiGateway::RestApi":
if is_ref_attribute:
result = resource_state.get("id")
if result:
return result
if attribute == "RootResourceId":
api_id = resource_state["id"]
resources = aws_stack.connect_to_service("apigateway").get_resources(restApiId=api_id)[
"items"
]
for res in resources:
if res["path"] == "/" and not res.get("parentId"):
return res["id"]
elif resource_type == "ApiGateway::Resource":
if is_ref_attribute:
return resource_state.get("id")
elif resource_type == "ApiGateway::Deployment":
if is_ref_attribute:
return resource_state.get("id")
elif resource_type == "S3::Bucket":
if attribute == "WebsiteURL":
bucket_name = resource_props.get("BucketName")
return f"http://{bucket_name}.{S3_STATIC_WEBSITE_HOSTNAME}"
if is_ref_attr_or_arn:
bucket_name = resource_props.get("BucketName")
bucket_name = resolve_refs_recursively(stack, bucket_name)
if attribute == "Arn":
return aws_stack.s3_bucket_arn(bucket_name)
return bucket_name
elif resource_type == "Elasticsearch::Domain":
if attribute == "DomainEndpoint":
domain_status = resource_state.get("DomainStatus", {})
result = domain_status.get("Endpoint")
if result:
return result
if attribute in ["Arn", "DomainArn"]:
domain_name = resource_props.get("DomainName") or resource_state.get("DomainName")
return aws_stack.es_domain_arn(domain_name)
elif resource_type == "StepFunctions::StateMachine":
if is_ref_attr_or_arn:
return resource_state["stateMachineArn"]
elif resource_type == "SNS::Topic":
if is_ref_attribute and resource_state.get("TopicArn"):
topic_arn = resource_state.get("TopicArn")
return resolve_refs_recursively(stack, topic_arn)
elif resource_type == "SQS::Queue":
if is_ref_attr_or_arn:
if attribute == "Arn" and resource_state.get("QueueArn"):
return resolve_refs_recursively(stack, resource_state.get("QueueArn"))
return aws_stack.get_sqs_queue_url(resource_props.get("QueueName"))
attribute_lower = first_char_to_lower(attribute)
result = resource_state.get(attribute) or resource_state.get(attribute_lower)
if result is None and isinstance(resource, dict):
result = resource_props.get(attribute) or resource_props.get(attribute_lower)
if result is None:
result = get_attr_from_model_instance(
resource,
attribute,
resource_type=resource_type,
resource_id=resource_id,
)
if is_ref_attribute:
for attr in ["Id", "PhysicalResourceId", "Ref"]:
if result is None:
for obj in [resource_state, resource]:
result = result or obj.get(attr)
return result
def canonical_resource_type(resource_type):
if "::" in resource_type and not resource_type.startswith("AWS::"):
resource_type = "AWS::%s" % resource_type
return resource_type
def get_attr_from_model_instance(resource, attribute, resource_type, resource_id=None):
resource_type = canonical_resource_type(resource_type)
model_class = RESOURCE_MODELS.get(resource_type)
if not model_class:
if resource_type not in ["AWS::Parameter", "Parameter"]:
LOG.debug('Unable to find model class for resource type "%s"', resource_type)
return
try:
inst = model_class(resource_name=resource_id, resource_json=resource)
return inst.get_cfn_attribute(attribute)
except Exception as e:
LOG.debug("Failed to retrieve model attribute: %s", attribute, exc_info=e)
def resolve_ref(stack, ref, attribute):
stack_name = stack.stack_name
resources = stack.resources
if ref == "AWS::Region":
return aws_stack.get_region()
if ref == "AWS::Partition":
return "aws"
if ref == "AWS::StackName":
return stack_name
if ref == "AWS::StackId":
# TODO return proper stack id!
return stack_name
if ref == "AWS::AccountId":
return TEST_AWS_ACCOUNT_ID
if ref == "AWS::NoValue":
return PLACEHOLDER_AWS_NO_VALUE
if ref == "AWS::NotificationARNs":
# TODO!
return {}
if ref == "AWS::URLSuffix":
return AWS_URL_SUFFIX
is_ref_attribute = attribute in ["Ref", "PhysicalResourceId", "Arn"]
if is_ref_attribute:
# extract the Properties here, as we only want to recurse over the resource props...
resource_props = resources.get(ref, {}).get("Properties")
resolve_refs_recursively(stack, resource_props)
return determine_resource_physical_id(
resource_id=ref,
attribute=attribute,
stack=stack,
)
if resources.get(ref):
if isinstance(resources[ref].get(attribute), (str, int, float, bool, dict)):
return resources[ref][attribute]
# fetch resource details
resource_new = retrieve_resource_details(ref, {}, stack=stack)
if not resource_new:
raise DependencyNotYetSatisfied(
resource_ids=ref,
message='Unable to fetch details for resource "%s" (resolving attribute "%s")'
% (ref, attribute),
)
resource = resources.get(ref)
resource_type = get_resource_type(resource)
result = extract_resource_attribute(
resource_type, resource_new, attribute, resource_id=ref, resource=resource, stack=stack
)
if result is None:
LOG.warning(
'Unable to extract reference attribute "%s" from resource: %s %s',
attribute,
resource_new,
resource,
)
return result
# Using a @prevent_stack_overflow decorator here to avoid infinite recursion
# in case we load stack exports that have circular dependencies (see issue 3438)
# TODO: Potentially think about a better approach in the future
@prevent_stack_overflow(match_parameters=True)
def resolve_refs_recursively(stack, value):
result = _resolve_refs_recursively(stack, value)
# localstack specific patches
if isinstance(result, str):
# we're trying to filter constructed API urls here (e.g. via Join in the template)
api_match = REGEX_OUTPUT_APIGATEWAY.match(result)
if api_match:
prefix = api_match[1]
host = api_match[2]
path = api_match[3]
port = config.service_port("apigateway")
return f"{prefix}{host}:{port}/{path}"
# basic dynamic reference support
# see: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/dynamic-references.html
# technically there are more restrictions for each of these services but checking each of these
# isn't really necessary for the current level of emulation
dynamic_ref_match = REGEX_DYNAMIC_REF.match(result)
if dynamic_ref_match:
service_name = dynamic_ref_match[1]
reference_key = dynamic_ref_match[2]
# only these 3 services are supported for dynamic references right now
if service_name == "ssm":
ssm_client = aws_stack.connect_to_service("ssm")
return ssm_client.get_parameter(Name=reference_key)["Parameter"]["Value"]
elif service_name == "ssm-secure":
ssm_client = aws_stack.connect_to_service("ssm")
return ssm_client.get_parameter(Name=reference_key, WithDecryption=True)[
"Parameter"
]["Value"]
elif service_name == "secretsmanager":
# reference key needs to be parsed further
# because {{resolve:secretsmanager:secret-id:secret-string:json-key:version-stage:version-id}}
# we match for "secret-id:secret-string:json-key:version-stage:version-id"
# where
# secret-id can either be the secret name or the full ARN of the secret
# secret-string *must* be SecretString
# all other values are optional
secret_id = reference_key
[json_key, version_stage, version_id] = [None, None, None]
if "SecretString" in reference_key:
parts = reference_key.split(":SecretString:")
secret_id = parts[0]
[json_key, version_stage, version_id] = parts[1].split(":")
kwargs = {} # optional args for get_secret_value
if version_id:
kwargs["VersionId"] = version_id
if version_stage:
kwargs["VersionStage"] = version_stage
secretsmanager_client = aws_stack.connect_to_service("secretsmanager")
secret_value = secretsmanager_client.get_secret_value(SecretId=secret_id, **kwargs)[
"SecretString"
]
if json_key:
return json.loads(secret_value)[json_key]
else:
return secret_value
else:
LOG.warning(f"Unsupported service for dynamic parameter: {service_name=}")
return result
@prevent_stack_overflow(match_parameters=True)
# TODO: move Stack model into separate file and add type hints here
def _resolve_refs_recursively(stack, value):
if isinstance(value, dict):
keys_list = list(value.keys())
stripped_fn_lower = keys_list[0].lower().split("::")[-1] if len(keys_list) == 1 else None
# process special operators
if keys_list == ["Ref"]:
ref = resolve_ref(stack, value["Ref"], attribute="Ref")
if ref is None:
resources = stack.resources
msg = 'Unable to resolve Ref for resource "%s" (yet)' % value["Ref"]
LOG.debug("%s - %s", msg, resources.get(value["Ref"]) or set(resources.keys()))
raise DependencyNotYetSatisfied(resource_ids=value["Ref"], message=msg)
ref = resolve_refs_recursively(stack, ref)
return ref
if stripped_fn_lower == "getatt":
attr_ref = value[keys_list[0]]
attr_ref = attr_ref.split(".") if isinstance(attr_ref, str) else attr_ref
return resolve_ref(stack, attr_ref[0], attribute=attr_ref[1])
if stripped_fn_lower == "join":
join_values = value[keys_list[0]][1]
join_values = [resolve_refs_recursively(stack, v) for v in join_values]
none_values = [v for v in join_values if v is None]
if none_values:
raise Exception(
"Cannot resolve CF fn::Join %s due to null values: %s" % (value, join_values)
)
return value[keys_list[0]][0].join([str(v) for v in join_values])
if stripped_fn_lower == "sub":
item_to_sub = value[keys_list[0]]
attr_refs = {r: {"Ref": r} for r in STATIC_REFS}
if not isinstance(item_to_sub, list):
item_to_sub = [item_to_sub, {}]
result = item_to_sub[0]
item_to_sub[1].update(attr_refs)
for key, val in item_to_sub[1].items():
val = resolve_refs_recursively(stack, val)
result = result.replace("${%s}" % key, val)
# resolve placeholders
result = resolve_placeholders_in_string(result, stack=stack)
return result
if stripped_fn_lower == "findinmap":
attr = resolve_refs_recursively(stack, value[keys_list[0]][1])
result = resolve_ref(stack, value[keys_list[0]][0], attribute=attr)
if not result:
resources = stack.resources
raise Exception(
f"Cannot resolve fn::FindInMap: {value[keys_list[0]]} {list(resources.keys())}"
)
key = value[keys_list[0]][2]
if not isinstance(key, str):
key = resolve_refs_recursively(stack, key)
return result.get(key)
if stripped_fn_lower == "importvalue":
import_value_key = resolve_refs_recursively(stack, value[keys_list[0]])
stack_export = stack.exports_map.get(import_value_key) or {}
if not stack_export.get("Value"):
LOG.info(
'Unable to find export "%s" in stack "%s", existing export names: %s',
import_value_key,
stack.stack_name,
list(stack.exports_map.keys()),
)
return None
return stack_export["Value"]
if stripped_fn_lower == "if":
condition, option1, option2 = value[keys_list[0]]
condition = evaluate_condition(stack, condition)
return resolve_refs_recursively(stack, option1 if condition else option2)
if stripped_fn_lower == "condition":
result = evaluate_condition(stack, value[keys_list[0]])
return result
if stripped_fn_lower == "not":
condition = value[keys_list[0]][0]
condition = resolve_refs_recursively(stack, condition)
return not condition
if stripped_fn_lower in ["and", "or"]:
conditions = value[keys_list[0]]
results = [resolve_refs_recursively(stack, cond) for cond in conditions]
result = all(results) if stripped_fn_lower == "and" else any(results)
return result
if stripped_fn_lower == "equals":
operand1, operand2 = value[keys_list[0]]
operand1 = resolve_refs_recursively(stack, operand1)
operand2 = resolve_refs_recursively(stack, operand2)
return str(operand1) == str(operand2)
if stripped_fn_lower == "select":
index, values = value[keys_list[0]]
index = resolve_refs_recursively(stack, index)
values = resolve_refs_recursively(stack, values)
return values[index]
if stripped_fn_lower == "split":
delimiter, string = value[keys_list[0]]
delimiter = resolve_refs_recursively(stack, delimiter)
string = resolve_refs_recursively(stack, string)
return string.split(delimiter)
if stripped_fn_lower == "getazs":
region = resolve_refs_recursively(stack, value["Fn::GetAZs"]) or aws_stack.get_region()
azs = []
for az in ("a", "b", "c", "d"):
azs.append("%s%s" % (region, az))
return azs
if stripped_fn_lower == "base64":
value_to_encode = value[keys_list[0]]
value_to_encode = resolve_refs_recursively(stack, value_to_encode)
return to_str(base64.b64encode(to_bytes(value_to_encode)))
for key, val in dict(value).items():
value[key] = resolve_refs_recursively(stack, val)
if isinstance(value, list):
for i in range(len(value)):
value[i] = resolve_refs_recursively(stack, value[i])
return value
def resolve_placeholders_in_string(result, stack):
resources = stack.resources
def _replace(match):
parts = match.group(1).split(".")
if len(parts) >= 2:
resource_name, _, attr_name = match.group(1).partition(".")
resolved = resolve_ref(stack, resource_name.strip(), attribute=attr_name.strip())
if resolved is None:
raise DependencyNotYetSatisfied(
resource_ids=resource_name,
message="Unable to resolve attribute ref %s" % match.group(1),
)
return resolved
if len(parts) == 1 and parts[0] in resources:
resource_json = resources[parts[0]]
resource_type = get_resource_type(resource_json)
result = extract_resource_attribute(
resource_type,
resource_json.get(KEY_RESOURCE_STATE, {}),
"Ref",
stack=stack,
resource_id=parts[0],
)
if result is None:
raise DependencyNotYetSatisfied(
resource_ids=parts[0],
message="Unable to resolve attribute ref %s" % match.group(1),
)
# make sure we resolve any functions/placeholders in the extracted string
result = resolve_refs_recursively(stack, result)
# make sure we convert the result to string
result = "" if result is None else str(result)
return result
# TODO raise exception here?
return match.group(0)
regex = r"\$\{([^\}]+)\}"
result = re.sub(regex, _replace, result)
return result
def evaluate_condition(stack, condition):
condition = resolve_refs_recursively(stack, condition)
condition = resolve_ref(stack, condition, attribute="Ref")
condition = resolve_refs_recursively(stack, condition)
return condition
def evaluate_resource_condition(stack, resource):
condition = resource.get("Condition")
if condition:
condition = evaluate_condition(stack, condition)
if condition is False or condition in FALSE_STRINGS or is_none_or_empty_value(condition):
return False
return True
def get_stack_parameter(stack_name, parameter):
try:
client = aws_stack.connect_to_service("cloudformation")
stack = client.describe_stacks(StackName=stack_name)["Stacks"]
except Exception:
return None
stack = stack and stack[0]
if not stack:
return None
result = [p["ParameterValue"] for p in stack["Parameters"] if p["ParameterKey"] == parameter]
return (result or [None])[0]
def update_resource(resource_id, stack):
resources = stack.resources
stack_name = stack.stack_name
resource = resources[resource_id]
resource_type = get_resource_type(resource)
if resource_type not in UPDATEABLE_RESOURCES:
LOG.warning('Unable to update resource type "%s", id "%s"', resource_type, resource_id)
return
LOG.info("Updating resource %s of type %s", resource_id, resource_type)
instance = get_resource_model_instance(resource_id, stack=stack)
if instance:
result = instance.update_resource(resource, stack_name=stack_name, resources=resources)
instance.fetch_and_update_state(stack_name=stack_name, resources=resources)
return result
def get_resource_model_instance(resource_id: str, stack) -> Optional[GenericBaseModel]:
"""Obtain a typed resource entity instance representing the given stack resource."""
resource = stack.resources[resource_id]
resource_type = get_resource_type(resource)
canonical_type = canonical_resource_type(resource_type)
resource_class = RESOURCE_MODELS.get(canonical_type)
if not resource_class:
return None
instance = resource_class(resource)
return instance
def fix_account_id_in_arns(params):
def fix_ids(o, **kwargs):
if isinstance(o, dict):
for k, v in o.items():
if is_string(v, exclude_binary=True):
o[k] = aws_stack.fix_account_id_in_arns(v)
elif is_string(o, exclude_binary=True):
o = aws_stack.fix_account_id_in_arns(o)
return o
result = recurse_object(params, fix_ids)
return result
def convert_data_types(func_details, params):
"""Convert data types in the "params" object, with the type defs
specified in the 'types' attribute of "func_details"."""
types = func_details.get("types") or {}
attr_names = types.keys() or []
def cast(_obj, _type):
if _type == bool:
return _obj in ["True", "true", True]
if _type == str:
if isinstance(_obj, bool):
return str(_obj).lower()
return str(_obj)
if _type in (int, float):
return _type(_obj)
return _obj
def fix_types(o, **kwargs):
if isinstance(o, dict):
for k, v in o.items():
if k in attr_names:
o[k] = cast(v, types[k])
return o
result = recurse_object(params, fix_types)
return result
def log_not_available_message(resource_type: str, message: str):
LOG.warning(
f"{message}. To find out if {resource_type} is supported in LocalStack Pro, "
"please check out our docs at https://docs.localstack.cloud/aws/cloudformation"
)
def dump_resource_as_json(resource: Dict) -> str:
return str(run_safe(lambda: json.dumps(json_safe(resource))) or resource)
# TODO remove this method
def prepare_template_body(req_data):
return template_preparer.prepare_template_body(req_data)
def deploy_resource(stack, resource_id):
result = execute_resource_action(resource_id, stack, ACTION_CREATE)
return result
def delete_resource(stack, resource_id):
return execute_resource_action(resource_id, stack, ACTION_DELETE)
def execute_resource_action(resource_id: str, stack, action_name: str):
stack_name = stack.stack_name
resources = stack.resources
resource = resources[resource_id]
resource_type = get_resource_type(resource)
func_details = get_deployment_config(resource_type)
if not func_details or action_name not in func_details:
if resource_type in ["Parameter"]:
return
log_not_available_message(
resource_type=resource_type,
message=f"Action {action_name} for resource type {resource_type} not available",
)
return
LOG.debug(
'Running action "%s" for resource type "%s" id "%s"',
action_name,
resource_type,
resource_id,
)
func_details = func_details[action_name]
func_details = func_details if isinstance(func_details, list) else [func_details]
results = []
for func in func_details:
if callable(func["function"]):
result = func["function"](resource_id, resources, resource_type, func, stack_name)
results.append(result)
continue
client = get_client(resource, func)
if client:
result = configure_resource_via_sdk(
stack,
resource_id,
resource_type,
func,
action_name,
)
results.append(result)
return (results or [None])[0]
def configure_resource_via_sdk(stack, resource_id, resource_type, func_details, action_name):
resources = stack.resources
stack_name = stack.stack_name
resource = resources[resource_id]
if resource_type == "EC2::Instance":
if action_name == "create":
func_details["boto_client"] = "resource"
client = get_client(resource, func_details)
function = getattr(client, func_details["function"])
params = func_details.get("parameters") or lambda_get_params()
defaults = func_details.get("defaults", {})
resource_props = resource["Properties"] = resource.get("Properties", {})
resource_props = dict(resource_props)
resource_state = resource.get(KEY_RESOURCE_STATE, {})
if callable(params):
params = params(
resource_props,
stack_name=stack_name,
resources=resources,
resource_id=resource_id,
)
else:
# it could be a list like ['param1', 'param2', {'apiCallParamName': 'cfResourcePropName'}]
if isinstance(params, list):
_params = {}
for param in params:
if isinstance(param, dict):
_params.update(param)
else:
_params[param] = param
params = _params
params = dict(params)
for param_key, prop_keys in dict(params).items():
params.pop(param_key, None)
if not isinstance(prop_keys, list):
prop_keys = [prop_keys]
for prop_key in prop_keys:
if prop_key == PLACEHOLDER_RESOURCE_NAME:
params[param_key] = PLACEHOLDER_RESOURCE_NAME
else:
if callable(prop_key):
prop_value = prop_key(
resource_props,
stack_name=stack_name,
resources=resources,
resource_id=resource_id,
)
else:
prop_value = resource_props.get(
prop_key,
resource.get(prop_key, resource_state.get(prop_key)),
)
if prop_value is not None:
params[param_key] = prop_value
break
# replace PLACEHOLDER_RESOURCE_NAME in params
resource_name_holder = {}
def fix_placeholders(o, **kwargs):
if isinstance(o, dict):
for k, v in o.items():
if v == PLACEHOLDER_RESOURCE_NAME:
if "value" not in resource_name_holder:
resource_name_holder["value"] = get_resource_name(resource) or resource_id
o[k] = resource_name_holder["value"]
return o
recurse_object(params, fix_placeholders)
# assign default values if empty
params = merge_recursive(defaults, params)
# this is an indicator that we should skip this resource deployment, and return
if params is None:
return
# convert refs
for param_key, param_value in dict(params).items():
if param_value is not None:
params[param_key] = resolve_refs_recursively(stack, param_value)
# convert any moto account IDs (123456789012) in ARNs to our format (000000000000)
params = fix_account_id_in_arns(params)
# convert data types (e.g., boolean strings to bool)
params = convert_data_types(func_details, params)
# remove None values, as they usually raise boto3 errors
params = remove_none_values(params)
# convert boolean strings
# (TODO: we should find a more reliable mechanism than this opportunistic/probabilistic approach!)
params_before_conversion = copy.deepcopy(params)
for param_key, param_value in dict(params).items():
# Convert to boolean (TODO: do this recursively?)
if str(param_value).lower() in ["true", "false"]:
params[param_key] = str(param_value).lower() == "true"
# invoke function
try:
LOG.debug(
'Request for resource type "%s" in region %s: %s %s',
resource_type,
aws_stack.get_region(),
func_details["function"],
params,
)
try:
result = function(**params)
except botocore.exceptions.ParamValidationError as e:
LOG.debug(f"Trying original parameters: {params_before_conversion}")
if "type: <class 'bool'>" not in str(e):
raise
result = function(**params_before_conversion)
except Exception as e:
if action_name == "delete" and check_not_found_exception(e, resource_type, resource):
return
LOG.warning("Error calling %s with params: %s for resource: %s", function, params, resource)
raise e
return result
def get_action_name_for_resource_change(res_change):
return {"Add": "CREATE", "Remove": "DELETE", "Modify": "UPDATE"}.get(res_change)
# TODO: this shouldn't be called for stack parameters
def determine_resource_physical_id(resource_id, stack=None, attribute=None):
resources = stack.resources
stack_name = stack.stack_name
resource = resources.get(resource_id, {})
if not resource:
return
resource_type = get_resource_type(resource)
resource_type = re.sub("^AWS::", "", resource_type)
resource_props = resource.get("Properties", {})
# determine result from resource class
canonical_type = canonical_resource_type(resource_type)
resource_class = RESOURCE_MODELS.get(canonical_type)
if resource_class:
resource_inst = resource_class(resource)
resource_inst.fetch_state_if_missing(stack_name=stack_name, resources=resources)
result = resource_inst.get_physical_resource_id(attribute=attribute)
if result:
return result
# TODO: put logic into resource-specific model classes!
if resource_type == "ApiGateway::RestApi":
result = resource_props.get("id")
if result:
return result
elif resource_type == "ApiGateway::Stage":
return resource_props.get("StageName")
elif resource_type == "AppSync::DataSource":
return resource_props.get("DataSourceArn")
elif resource_type == "KinesisFirehose::DeliveryStream":
return aws_stack.firehose_stream_arn(resource_props.get("DeliveryStreamName"))
elif resource_type == "StepFunctions::StateMachine":
return aws_stack.state_machine_arn(
resource_props.get("StateMachineName")
) # returns ARN in AWS
elif resource_type == "S3::Bucket":
if attribute == "Arn":
return aws_stack.s3_bucket_arn(resource_props.get("BucketName"))
return resource_props.get("BucketName") # Note: "Ref" returns bucket name in AWS
elif resource_type == "IAM::Role":
if attribute == "Arn":
return aws_stack.role_arn(resource_props.get("RoleName"))
return resource_props.get("RoleName")
elif resource_type == "IAM::Policy":
if attribute == "Arn":
return aws_stack.policy_arn(resource_props.get("PolicyName"))
return resource_props.get("PolicyName")
elif resource_type == "DynamoDB::Table":
table_name = resource_props.get("TableName")
if table_name:
return table_name
elif resource_type == "Logs::LogGroup":
return resource_props.get("LogGroupName")
elif resource_type == "ApiGateway::Model":
model_name = resource_props.get("Name")
if model_name:
return model_name
res_id = resource.get("PhysicalResourceId")
if res_id and attribute in [None, "Ref", "PhysicalResourceId"]:
return res_id
result = extract_resource_attribute(
resource_type,
{},
attribute or "PhysicalResourceId",
resource_id=resource_id,
resource=resource,
stack=stack,
)
if result is not None:
# note that value could be an empty string here (in case of Parameter values)
return result
LOG.info(
'Unable to determine PhysicalResourceId for "%s" resource, ID "%s"',
resource_type,
resource_id,
)
def update_resource_details(stack, resource_id, details, action=None):
resource = stack.resources.get(resource_id, {})
if not resource or not details:
return
# TODO: we need to rethink this method - this should be encapsulated in the resource model classes.
# Also, instead of actively updating the PhysicalResourceId attributes below, they should be
# determined and returned by the resource model classes upon request.
resource_type = resource.get("Type") or ""
resource_type = re.sub("^AWS::", "", resource_type)
resource_props = resource.get("Properties", {})
if resource_type == "ApiGateway::RestApi":
resource_props["id"] = details["id"]
if resource_type == "KMS::Key":
resource["PhysicalResourceId"] = details["KeyMetadata"]["KeyId"]
if resource_type == "EC2::Instance":
if details and isinstance(details, list) and hasattr(details[0], "id"):
resource["PhysicalResourceId"] = details[0].id
if isinstance(details, dict) and details.get("InstanceId"):
resource["PhysicalResourceId"] = details["InstanceId"]
if resource_type == "EC2::SecurityGroup":
resource["PhysicalResourceId"] = details["GroupId"]
if resource_type == "IAM::InstanceProfile":
resource["PhysicalResourceId"] = details["InstanceProfile"]["InstanceProfileName"]
if resource_type == "StepFunctions::Activity":
resource["PhysicalResourceId"] = details["activityArn"]
if resource_type == "ApiGateway::Model":
resource["PhysicalResourceId"] = details["id"]
if resource_type == "EC2::VPC":
resource["PhysicalResourceId"] = details["Vpc"]["VpcId"]
if resource_type == "EC2::Subnet":
resource["PhysicalResourceId"] = details["Subnet"]["SubnetId"]
if resource_type == "EC2::RouteTable":
resource["PhysicalResourceId"] = details["RouteTable"]["RouteTableId"]
if resource_type == "EC2::Route":
resource["PhysicalResourceId"] = generate_route_id(
resource_props["RouteTableId"],
resource_props.get("DestinationCidrBlock", ""),
resource_props.get("DestinationIpv6CidrBlock"),
)
def add_default_resource_props(
resource,
stack_name,
resource_name=None,
resource_id=None,
update=False,
existing_resources=None,
):
"""Apply some fixes to resource props which otherwise cause deployments to fail"""
res_type = resource["Type"]
canonical_type = canonical_resource_type(res_type)
resource_class = RESOURCE_MODELS.get(canonical_type)
if resource_class is not None:
resource_class.add_defaults(resource, stack_name)
# -----------------------
# MAIN TEMPLATE DEPLOYER
# -----------------------
class TemplateDeployer:
def __init__(self, stack):
self.stack = stack
@property
def resources(self):
return self.stack.resources
@property
def stack_name(self):
return self.stack.stack_name
# ------------------
# MAIN ENTRY POINTS
# ------------------
def deploy_stack(self):
self.stack.set_stack_status("CREATE_IN_PROGRESS")
try:
self.apply_changes(
self.stack,
self.stack,
stack_name=self.stack.stack_name,
initialize=True,
action="CREATE",
)
except Exception as e:
LOG.info("Unable to create stack %s: %s", self.stack.stack_name, e)
self.stack.set_stack_status("CREATE_FAILED")
raise
def apply_change_set(self, change_set):
action = "CREATE"
change_set.stack.set_stack_status("%s_IN_PROGRESS" % action)
try:
self.apply_changes(
change_set.stack,
change_set,
stack_name=change_set.stack_name,
action=action,
)
except Exception as e:
LOG.info(
"Unable to apply change set %s: %s", change_set.metadata.get("ChangeSetName"), e
)
change_set.metadata["Status"] = "%s_FAILED" % action
self.stack.set_stack_status("%s_FAILED" % action)
raise
def update_stack(self, new_stack):
self.stack.set_stack_status("UPDATE_IN_PROGRESS")
# apply changes
self.apply_changes(self.stack, new_stack, stack_name=self.stack.stack_name, action="UPDATE")
def delete_stack(self):
if not self.stack:
return
self.stack.set_stack_status("DELETE_IN_PROGRESS")
stack_resources = list(self.stack.resources.values())
resources = {r["LogicalResourceId"]: clone_safe(r) for r in stack_resources}
for key, resource in resources.items():
resource["Properties"] = resource.get("Properties", clone_safe(resource))
resource["ResourceType"] = resource.get("ResourceType") or resource.get("Type")
for resource_id, resource in resources.items():
# TODO: cache condition value in resource details on deployment and use cached value here
if evaluate_resource_condition(self, resource):
delete_resource(self, resource_id)
self.stack.set_resource_status(resource_id, "DELETE_COMPLETE")
# update status
self.stack.set_stack_status("DELETE_COMPLETE")
# ----------------------------
# DEPENDENCY RESOLUTION UTILS
# ----------------------------
def is_deployable_resource(self, resource):
resource_type = get_resource_type(resource)
entry = get_deployment_config(resource_type)
if entry is None and resource_type not in ["Parameter", None]:
resource_str = dump_resource_as_json(resource)
LOG.warning(f'Unable to deploy resource type "{resource_type}": {resource_str}')
return bool(entry and entry.get(ACTION_CREATE))
def is_deployed(self, resource):
resource_status = {}
resource_id = resource["LogicalResourceId"]
details = retrieve_resource_details(resource_id, resource_status, stack=self.stack)
return bool(details)
def is_updateable(self, resource):
"""Return whether the given resource can be updated or not."""
if not self.is_deployable_resource(resource) or not self.is_deployed(resource):
return False
resource_type = get_resource_type(resource)
return resource_type in UPDATEABLE_RESOURCES
def all_resource_dependencies_satisfied(self, resource):
unsatisfied = self.get_unsatisfied_dependencies(resource)
return not unsatisfied
def get_unsatisfied_dependencies(self, resource):
res_deps = self.get_resource_dependencies(resource)
return self.get_unsatisfied_dependencies_for_resources(res_deps, resource)
def get_unsatisfied_dependencies_for_resources(
self, resources, depending_resource=None, return_first=True
):
result = {}
for resource_id, resource in resources.items():
if self.is_deployable_resource(resource):
if not self.is_deployed(resource):
LOG.debug(
"Dependency for resource %s not yet deployed: %s %s",
depending_resource,
resource_id,
resource,
)
result[resource_id] = resource
if return_first:
break
return result
def get_resource_dependencies(self, resource):
result = {}
# Note: using the original, unmodified template here to preserve Ref's ...
raw_resources = self.stack.template_original["Resources"]
raw_resource = raw_resources[resource["LogicalResourceId"]]
dumped = json.dumps(json_safe(raw_resource))
for other_id, other in raw_resources.items():
if resource != other:
# TODO: traverse dict instead of doing string search!
search1 = '{"Ref": "%s"}' % other_id
search2 = '{"Fn::GetAtt": ["%s", ' % other_id
if search1 in dumped or search2 in dumped:
result[other_id] = other
if other_id in resource.get("DependsOn", []):
result[other_id] = other
return result
# -----------------
# DEPLOYMENT UTILS
# -----------------
def add_default_resource_props(self, resources=None):
resources = resources or self.resources
for resource_id, resource in resources.items():
add_default_resource_props(
resource, self.stack_name, resource_id=resource_id, existing_resources=resources
)
def init_resource_status(self, resources=None, stack=None, action="CREATE"):
resources = resources or self.resources
stack = stack or self.stack
for resource_id, resource in resources.items():
stack.set_resource_status(resource_id, "%s_IN_PROGRESS" % action)
def update_resource_details(self, resource_id, result, stack=None, action="CREATE"):
stack = stack or self.stack
# update resource state
update_resource_details(stack, resource_id, result, action)
# update physical resource id
resource = stack.resources[resource_id]
physical_id = resource.get("PhysicalResourceId")
physical_id = physical_id or determine_resource_physical_id(resource_id, stack=stack)
if not resource.get("PhysicalResourceId") or action == "UPDATE":
if physical_id:
resource["PhysicalResourceId"] = physical_id
# set resource status
stack.set_resource_status(resource_id, "%s_COMPLETE" % action, physical_res_id=physical_id)
return physical_id
def get_change_config(self, action, resource, change_set_id=None):
return {
"Type": "Resource",
"ResourceChange": {
"Action": action,
"LogicalResourceId": resource.get("LogicalResourceId"),
"PhysicalResourceId": resource.get("PhysicalResourceId"),
"ResourceType": resource.get("Type"),
"Replacement": "False",
"ChangeSetId": change_set_id,
},
}
def resource_config_differs(self, resource_new):
"""Return whether the given resource properties differ from the existing config (for stack updates)."""
resource_id = resource_new["LogicalResourceId"]
resource_old = self.resources[resource_id]
props_old = resource_old["Properties"]
props_new = resource_new["Properties"]
ignored_keys = ["LogicalResourceId", "PhysicalResourceId"]
old_keys = set(props_old.keys()) - set(ignored_keys)
new_keys = set(props_new.keys()) - set(ignored_keys)
if old_keys != new_keys:
return True
for key in old_keys:
if props_old[key] != props_new[key]:
return True
old_status = self.stack.resource_states.get(resource_id) or {}
previous_state = (
old_status.get("PreviousResourceStatus") or old_status.get("ResourceStatus") or ""
)
if old_status and "DELETE" in previous_state:
return True
def merge_properties(self, resource_id, old_stack, new_stack):
old_resources = old_stack.template["Resources"]
new_resources = new_stack.template["Resources"]
new_resource = new_resources[resource_id]
old_resource = old_resources[resource_id] = old_resources.get(resource_id) or {}
for key, value in new_resource.items():
if key == "Properties":
continue
old_resource[key] = old_resource.get(key, value)
old_res_props = old_resource["Properties"] = old_resource.get("Properties", {})
for key, value in new_resource["Properties"].items():
old_res_props[key] = value
# overwrite original template entirely
old_stack.template_original["Resources"][resource_id] = new_stack.template_original[
"Resources"
][resource_id]
def resolve_param(
self, logical_id: str, param_type: str, default_value: Optional[str] = None
) -> Optional[str]:
if param_type == "AWS::SSM::Parameter::Value<String>":
ssm_client = aws_stack.connect_to_service("ssm")
param = ssm_client.get_parameter(Name=default_value)
return param["Parameter"]["Value"]
return None
def apply_parameter_changes(self, old_stack, new_stack) -> None:
parameters = {
p["ParameterKey"]: p
for p in old_stack.metadata["Parameters"] # go through current parameter values
}
for logical_id, value in new_stack.template["Parameters"].items():
default = value.get("Default")
provided_param_value = parameters.get(logical_id)
param = {
"ParameterKey": logical_id,
"ParameterValue": provided_param_value if default is None else default,
}
if default is not None:
resolved_value = self.resolve_param(logical_id, value.get("Type"), default)
if resolved_value is not None:
param["ResolvedValue"] = resolved_value
parameters[logical_id] = param
parameters.update({p["ParameterKey"]: p for p in new_stack.metadata["Parameters"]})
for change_set in new_stack.change_sets:
parameters.update({p["ParameterKey"]: p for p in change_set.metadata["Parameters"]})
# TODO: unclear/undocumented behavior in implicitly updating old_stack parameter here
old_stack.metadata["Parameters"] = [v for v in parameters.values() if v]
# TODO: fix circular import with cloudformation_api.py when importing Stack here
def construct_changes(
self,
existing_stack,
new_stack,
initialize=False,
change_set_id=None,
append_to_changeset=False,
):
from localstack.services.cloudformation.provider import StackChangeSet
old_resources = existing_stack.template["Resources"]
new_resources = new_stack.template["Resources"]
deletes = [val for key, val in old_resources.items() if key not in new_resources]
adds = [val for key, val in new_resources.items() if initialize or key not in old_resources]
modifies = [val for key, val in new_resources.items() if key in old_resources]
changes = []
for action, items in (("Remove", deletes), ("Add", adds), ("Modify", modifies)):
for item in items:
item["Properties"] = item.get("Properties", {})
change = self.get_change_config(action, item, change_set_id=change_set_id)
changes.append(change)
# append changes to change set
if append_to_changeset and isinstance(new_stack, StackChangeSet):
new_stack.changes.extend(changes)
return changes
def apply_changes(
self,
existing_stack,
new_stack,
stack_name,
change_set_id=None,
initialize=False,
action=None,
):
old_resources = existing_stack.template["Resources"]
new_resources = new_stack.template["Resources"]
action = action or "CREATE"
self.init_resource_status(old_resources, action="UPDATE")
# apply parameter changes to existing stack
self.apply_parameter_changes(existing_stack, new_stack)
# construct changes
changes = self.construct_changes(
existing_stack,
new_stack,
initialize=initialize,
change_set_id=change_set_id,
)
# check if we have actual changes in the stack, and prepare properties
contains_changes = False
for change in changes:
res_action = change["ResourceChange"]["Action"]
resource = new_resources.get(change["ResourceChange"]["LogicalResourceId"])
if res_action != "Modify" or self.resource_config_differs(resource):
contains_changes = True
if res_action in ["Modify", "Add"]:
self.merge_properties(resource["LogicalResourceId"], existing_stack, new_stack)
if not contains_changes:
raise NoStackUpdates("No updates are to be performed.")
# merge stack outputs and conditions
existing_stack.outputs.update(new_stack.outputs)
existing_stack.conditions.update(new_stack.conditions)
# start deployment loop
return self.apply_changes_in_loop(
changes, existing_stack, stack_name, action=action, new_stack=new_stack
)
def apply_changes_in_loop(self, changes, stack, stack_name, action=None, new_stack=None):
from localstack.services.cloudformation.provider import StackChangeSet
def _run(*args):
try:
self.do_apply_changes_in_loop(changes, stack, stack_name)
status = "%s_COMPLETE" % action
except Exception as e:
LOG.debug(
'Error applying changes for CloudFormation stack "%s": %s %s',
stack.stack_name,
e,
traceback.format_exc(),
)
status = "%s_FAILED" % action
stack.set_stack_status(status)
if isinstance(new_stack, StackChangeSet):
new_stack.metadata["Status"] = status
new_stack.metadata["ExecutionStatus"] = (
"EXECUTE_FAILED" if "FAILED" in status else "EXECUTE_COMPLETE"
)
new_stack.metadata["StatusReason"] = "Deployment %s" % (
"failed" if "FAILED" in status else "succeeded"
)
# run deployment in background loop, to avoid client network timeouts
return start_worker_thread(_run)
def do_apply_changes_in_loop(self, changes, stack, stack_name: str):
# apply changes in a retry loop, to resolve resource dependencies and converge to the target state
changes_done = []
max_iters = 30
new_resources = stack.resources
# apply default props before running the loop
for resource_id, resource in new_resources.items():
add_default_resource_props(
resource,
stack.stack_name,
resource_id=resource_id,
existing_resources=new_resources,
)
# start deployment loop
for i in range(max_iters):
j = 0
updated = False
while j < len(changes):
change = changes[j]
res_change = change["ResourceChange"]
action = res_change["Action"]
is_add_or_modify = action in ["Add", "Modify"]
resource_id = res_change["LogicalResourceId"]
try:
if is_add_or_modify:
resource = new_resources[resource_id]
should_deploy = self.prepare_should_deploy_change(
resource_id, change, stack, new_resources
)
LOG.debug(
'Handling "%s" for resource "%s" (%s/%s) type "%s" in loop iteration %s (should_deploy=%s)',
action,
resource_id,
j + 1,
len(changes),
res_change["ResourceType"],
i + 1,
should_deploy,
)
if not should_deploy:
del changes[j]
stack_action = get_action_name_for_resource_change(action)
stack.set_resource_status(resource_id, "%s_COMPLETE" % stack_action)
continue
if not self.all_resource_dependencies_satisfied(resource):
j += 1
continue
self.apply_change(change, stack=stack)
changes_done.append(change)
del changes[j]
updated = True
except DependencyNotYetSatisfied as e:
LOG.debug(
'Dependencies for "%s" not yet satisfied, retrying in next loop: %s',
resource_id,
e,
)
j += 1
if not changes:
break
if not updated:
raise Exception(
"Resource deployment loop completed, pending resource changes: %s" % changes
)
# clean up references to deleted resources in stack
deletes = [c for c in changes_done if c["ResourceChange"]["Action"] == "Remove"]
for delete in deletes:
stack.template["Resources"].pop(delete["ResourceChange"]["LogicalResourceId"], None)
return changes_done
def prepare_should_deploy_change(self, resource_id, change, stack, new_resources):
resource = new_resources[resource_id]
res_change = change["ResourceChange"]
action = res_change["Action"]
# check resource condition, if present
if not evaluate_resource_condition(stack, resource):
LOG.debug(
'Skipping deployment of "%s", as resource condition evaluates to false', resource_id
)
return
# resolve refs in resource details
resolve_refs_recursively(stack, resource)
if action in ["Add", "Modify"]:
is_deployed = self.is_deployed(resource)
if action == "Modify" and not is_deployed:
action = res_change["Action"] = "Add"
if action == "Add":
if not self.is_deployable_resource(resource) or is_deployed:
return False
if action == "Modify" and not self.is_updateable(resource):
LOG.debug(
'Action "update" not yet implemented for CF resource type %s',
resource.get("Type"),
)
return False
return True
def apply_change(self, change, stack):
change_details = change["ResourceChange"]
action = change_details["Action"]
resource_id = change_details["LogicalResourceId"]
resource = stack.resources[resource_id]
if not evaluate_resource_condition(stack, resource):
return
# execute resource action
result = None
if action == "Add":
result = deploy_resource(self, resource_id)
elif action == "Remove":
result = delete_resource(self, resource_id)
elif action == "Modify":
result = update_resource(resource_id, stack=stack)
# update resource status and physical resource id
stack_action = get_action_name_for_resource_change(action)
self.update_resource_details(resource_id, result, stack=stack, action=stack_action)
return result
| 38.823004
| 120
| 0.622197
|
bb9556d288b7e77860413533bc99b34e24455841
| 3,036
|
py
|
Python
|
contrib/linearize/linearize-hashes.py
|
meowmeowchain/meowmeowcoin
|
26c661f64bdcc699175144fb912ff6972d36cd96
|
[
"MIT"
] | null | null | null |
contrib/linearize/linearize-hashes.py
|
meowmeowchain/meowmeowcoin
|
26c661f64bdcc699175144fb912ff6972d36cd96
|
[
"MIT"
] | null | null | null |
contrib/linearize/linearize-hashes.py
|
meowmeowchain/meowmeowcoin
|
26c661f64bdcc699175144fb912ff6972d36cd96
|
[
"MIT"
] | 1
|
2018-02-26T11:05:26.000Z
|
2018-02-26T11:05:26.000Z
|
#!/usr/bin/python
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2014 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function
import json
import struct
import re
import base64
import httplib
import sys
settings = {}
class BitcoinRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def execute(self, obj):
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read()
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
print(resp_obj['result'])
height += num_blocks
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 9882
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print("Missing username and/or password in cfg file", file=stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_block_hashes(settings)
| 26.631579
| 90
| 0.682477
|
a0bd7e1c90130c106c7622c7e4ea10b9889aa391
| 6,808
|
py
|
Python
|
platform/gsutil/gslib/tests/test_defacl.py
|
bopopescu/google-cloud-sdk
|
b34e6a18f1e89673508166acce816111c3421e4b
|
[
"Apache-2.0"
] | null | null | null |
platform/gsutil/gslib/tests/test_defacl.py
|
bopopescu/google-cloud-sdk
|
b34e6a18f1e89673508166acce816111c3421e4b
|
[
"Apache-2.0"
] | null | null | null |
platform/gsutil/gslib/tests/test_defacl.py
|
bopopescu/google-cloud-sdk
|
b34e6a18f1e89673508166acce816111c3421e4b
|
[
"Apache-2.0"
] | 1
|
2020-07-24T20:04:47.000Z
|
2020-07-24T20:04:47.000Z
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for the defacl command."""
import re
import gslib.tests.testcase as case
from gslib.tests.testcase.integration_testcase import SkipForS3
from gslib.tests.util import ObjectToURI as suri
PUBLIC_READ_JSON_ACL_TEXT = '"entity":"allUsers","role":"READER"'
@SkipForS3('S3 does not support default object ACLs.')
class TestDefacl(case.GsUtilIntegrationTestCase):
"""Integration tests for the defacl command."""
_defacl_ch_prefix = ['defacl', 'ch']
_defacl_get_prefix = ['defacl', 'get']
_defacl_set_prefix = ['defacl', 'set']
def _MakeScopeRegex(self, role, entity_type, email_address):
template_regex = (r'\{.*"entity":\s*"%s-%s".*"role":\s*"%s".*\}' %
(entity_type, email_address, role))
return re.compile(template_regex, flags=re.DOTALL)
def testChangeDefaultAcl(self):
"""Tests defacl ch."""
bucket = self.CreateBucket()
test_regex = self._MakeScopeRegex(
'OWNER', 'group', self.GROUP_TEST_ADDRESS)
test_regex2 = self._MakeScopeRegex(
'READER', 'group', self.GROUP_TEST_ADDRESS)
json_text = self.RunGsUtil(self._defacl_get_prefix +
[suri(bucket)], return_stdout=True)
self.assertNotRegexpMatches(json_text, test_regex)
self.RunGsUtil(self._defacl_ch_prefix +
['-g', self.GROUP_TEST_ADDRESS+':FC', suri(bucket)])
json_text2 = self.RunGsUtil(self._defacl_get_prefix +
[suri(bucket)], return_stdout=True)
self.assertRegexpMatches(json_text2, test_regex)
self.RunGsUtil(self._defacl_ch_prefix +
['-g', self.GROUP_TEST_ADDRESS+':READ', suri(bucket)])
json_text3 = self.RunGsUtil(self._defacl_get_prefix +
[suri(bucket)], return_stdout=True)
self.assertRegexpMatches(json_text3, test_regex2)
def testChangeMultipleBuckets(self):
"""Tests defacl ch on multiple buckets."""
bucket1 = self.CreateBucket()
bucket2 = self.CreateBucket()
test_regex = self._MakeScopeRegex(
'READER', 'group', self.GROUP_TEST_ADDRESS)
json_text = self.RunGsUtil(self._defacl_get_prefix + [suri(bucket1)],
return_stdout=True)
self.assertNotRegexpMatches(json_text, test_regex)
json_text = self.RunGsUtil(self._defacl_get_prefix + [suri(bucket2)],
return_stdout=True)
self.assertNotRegexpMatches(json_text, test_regex)
self.RunGsUtil(self._defacl_ch_prefix +
['-g', self.GROUP_TEST_ADDRESS+':READ',
suri(bucket1), suri(bucket2)])
json_text = self.RunGsUtil(self._defacl_get_prefix + [suri(bucket1)],
return_stdout=True)
self.assertRegexpMatches(json_text, test_regex)
json_text = self.RunGsUtil(self._defacl_get_prefix + [suri(bucket2)],
return_stdout=True)
self.assertRegexpMatches(json_text, test_regex)
def testChangeMultipleAcls(self):
"""Tests defacl ch with multiple ACL entries."""
bucket = self.CreateBucket()
test_regex_group = self._MakeScopeRegex(
'READER', 'group', self.GROUP_TEST_ADDRESS)
test_regex_user = self._MakeScopeRegex(
'OWNER', 'user', self.USER_TEST_ADDRESS)
json_text = self.RunGsUtil(self._defacl_get_prefix + [suri(bucket)],
return_stdout=True)
self.assertNotRegexpMatches(json_text, test_regex_group)
self.assertNotRegexpMatches(json_text, test_regex_user)
self.RunGsUtil(self._defacl_ch_prefix +
['-g', self.GROUP_TEST_ADDRESS+':READ',
'-u', self.USER_TEST_ADDRESS+':fc', suri(bucket)])
json_text = self.RunGsUtil(self._defacl_get_prefix + [suri(bucket)],
return_stdout=True)
self.assertRegexpMatches(json_text, test_regex_group)
self.assertRegexpMatches(json_text, test_regex_user)
def testEmptyDefAcl(self):
bucket = self.CreateBucket()
self.RunGsUtil(self._defacl_set_prefix + ['private', suri(bucket)])
self.RunGsUtil(self._defacl_ch_prefix +
['-u', self.USER_TEST_ADDRESS+':fc', suri(bucket)])
def testDeletePermissionsWithCh(self):
"""Tests removing permissions with defacl ch."""
bucket = self.CreateBucket()
test_regex = self._MakeScopeRegex(
'OWNER', 'user', self.USER_TEST_ADDRESS)
json_text = self.RunGsUtil(
self._defacl_get_prefix + [suri(bucket)], return_stdout=True)
self.assertNotRegexpMatches(json_text, test_regex)
self.RunGsUtil(self._defacl_ch_prefix +
['-u', self.USER_TEST_ADDRESS+':fc', suri(bucket)])
json_text = self.RunGsUtil(
self._defacl_get_prefix + [suri(bucket)], return_stdout=True)
self.assertRegexpMatches(json_text, test_regex)
self.RunGsUtil(self._defacl_ch_prefix +
['-d', self.USER_TEST_ADDRESS, suri(bucket)])
json_text = self.RunGsUtil(
self._defacl_get_prefix + [suri(bucket)], return_stdout=True)
self.assertNotRegexpMatches(json_text, test_regex)
def testTooFewArgumentsFails(self):
"""Tests calling defacl with insufficient number of arguments."""
# No arguments for get, but valid subcommand.
stderr = self.RunGsUtil(self._defacl_get_prefix, return_stderr=True,
expected_status=1)
self.assertIn('command requires at least', stderr)
# No arguments for set, but valid subcommand.
stderr = self.RunGsUtil(self._defacl_set_prefix, return_stderr=True,
expected_status=1)
self.assertIn('command requires at least', stderr)
# No arguments for ch, but valid subcommand.
stderr = self.RunGsUtil(self._defacl_ch_prefix, return_stderr=True,
expected_status=1)
self.assertIn('command requires at least', stderr)
# Neither arguments nor subcommand.
stderr = self.RunGsUtil(['defacl'], return_stderr=True, expected_status=1)
self.assertIn('command requires at least', stderr)
class TestDefaclOldAlias(TestDefacl):
_defacl_ch_prefix = ['chdefacl']
_defacl_get_prefix = ['getdefacl']
_defacl_set_prefix = ['setdefacl']
| 42.55
| 78
| 0.679201
|
1243712865aea1f6df050a46ddf79fa365460087
| 8,549
|
py
|
Python
|
varconlib/scripts/modify_stars.py
|
DBerke/varconlib
|
4771cf315c8fa76e1982612f3ac520c0cec098d8
|
[
"MIT"
] | null | null | null |
varconlib/scripts/modify_stars.py
|
DBerke/varconlib
|
4771cf315c8fa76e1982612f3ac520c0cec098d8
|
[
"MIT"
] | null | null | null |
varconlib/scripts/modify_stars.py
|
DBerke/varconlib
|
4771cf315c8fa76e1982612f3ac520c0cec098d8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 5 11:58:04 2021
@author: dberke
Make changes to the data in star.Star objects, up to rebuilding them entirely.
"""
import argparse
from functools import partial
from glob import glob
from json.decoder import JSONDecodeError
import lzma
from multiprocessing import Pool, RLock
from pathlib import Path
import pickle
import time
import numpy as np
from p_tqdm import p_map, p_umap
from tqdm import tqdm
import varconlib as vcl
from varconlib.exceptions import PickleFilesNotFoundError
from varconlib.star import Star
stars_to_use = ('HD1581', 'HD190248', 'HD10180', 'HD102117', 'HD102438',
'HD104982', 'HD105837', 'HD106116', 'HD108309', 'HD110619',
'HD111031', 'HD114853', 'HD11505', 'HD115617', 'HD117105',
'HD117207', 'HD117618', 'HD12387', 'HD124292', 'HD125881',
'HD126525', 'HD128674', 'HD134060', 'HD134987', 'HD136352',
'HD136894', 'HD138573', 'HD1388', 'HD140538', 'HD140901',
'HD141937', 'HD143114', 'HD144585', 'HD1461', 'HD146233',
'HD147512', 'HD148211', 'HD148816', 'HD150433', 'HD152391',
'HD154417', 'HD157338', 'HD157347', 'HD161612', 'HD168443',
'HD168871', 'HD171665', 'HD172051', 'HD177409', 'HD177565',
'HD177758', 'HD1835', 'HD183658', 'HD184768', 'HD189567',
'HD189625', 'HD193193', 'HD19467', 'HD196761', 'HD197818',
'HD199288', 'HD199960', 'HD203432', 'HD20407', 'HD204385',
'HD205536', 'HD20619', 'HD2071', 'HD207129', 'HD20766',
'HD20782', 'HD20807', 'HD208704', 'HD210752', 'HD210918',
'HD211415', 'HD212708', 'HD213575', 'HD214953', 'HD215257',
'HD217014', 'HD220507', 'HD222582', 'HD222669', 'HD28821',
'HD30495', 'HD31527', 'HD32724', 'HD361', 'HD37962', 'HD38277',
'HD38858', 'HD38973', 'HD39091', 'HD43587', 'HD43834', 'HD4391',
'HD44420', 'HD44447', 'HD44594', 'HD45184', 'HD45289',
'HD47186', 'HD4915', 'HD55693', 'HD59468', 'HD65907', 'HD6735',
'HD67458', 'HD68168', 'HD68978', 'HD69655', 'HD69830',
'HD70642', 'HD70889', 'HD7134', 'HD72769', 'HD73256', 'HD73524',
'HD7449', 'HD76151', 'HD78429', 'HD78558', 'HD78660', 'HD78747',
'HD82943', 'HD83529', 'HD88725', 'HD88742', 'HD90156',
'HD90905', 'HD92719', 'HD92788', 'HD95521', 'HD96423',
'HD96700', 'HD96937', 'HD97037', 'HD97343', 'HD9782', 'HD97998',
'HD98281', 'Vesta')
def recreate_star(star_dir):
"""Create a Star from a given directory.
Parameters
----------
star_dir :`pathlib.Path`
The directory in which to find the star's files.
Returns
-------
None.
"""
tqdm.write(f'Creating {star_dir.stem}')
try:
Star(star_dir.stem, star_dir, load_data=False)
except PickleFilesNotFoundError:
newstar_dir = Path('/Volumes/External Storage/data_output') /\
star_dir.stem
tqdm.write('Using external storage files.')
Star(star_dir.stem, new_star_dir, load_data=False, output_dir=star_dir)
def create_transition_model_corrected_arrays(star_dir):
"""
Create the transition model-corrected arrays for a Star from a given
directory.
Parameters
----------
star_dir : `pathlib.Path`
The directory in which to find the star's files.
Returns
-------
None.
"""
tqdm.write(f'Working on {star_dir.stem}')
star = Star(star_dir.stem, star_dir, load_data=True)
star.createTransitionModelCorrectedArrays(model_func='quadratic',
n_sigma=2.5)
star.createPairSeparationArrays()
star.saveDataToDisk()
def create_pair_model_corrected_arrays(star_dir):
"""
Create the pair model-corrected array for a Star from a given directory.
Parameters
----------
star_dir : `pathlib.Path`
The directory in which to find the star's files.
Returns
-------
None.
"""
tqdm.write(f'Working on {star_dir.stem}')
star = Star(star_dir.stem, star_dir, load_data=True)
star.createPairModelCorrectedArrays(model_func='quadratic',
n_sigma=4.0)
star.saveDataToDisk()
def add_pixel_data_to_star(star_dir):
"""
Add information about the pixel each transition was measured at to a star.
Parameters
----------
star_dir : `pathlib.Path`
The directory containing the data for the star.
Returns
-------
None.
"""
tqdm.write(f'Working on {star_dir.stem}')
star = Star(star_dir.stem, star_dir, load_data=True)
# Find pickle files in directory
search_str = str(star_dir) + f'/HARPS*/pickles_int/*fits.lzma'
pickle_files = [Path(path) for path in sorted(glob(search_str))]
with open(vcl.final_selection_file, 'r+b') as f:
transitions_list = pickle.load(f)
num_obs = len(pickle_files)
num_cols = 0
for transition in transitions_list:
num_cols += len(transition.ordersToFitIn)
star.pixelArray = np.full((num_obs, num_cols), -1, dtype=int)
for obs_num, pickle_file in enumerate(tqdm(pickle_files)):
with lzma.open(pickle_file, 'rb') as f:
fits_list = pickle.loads(f.read())
for col_num, fit in enumerate(fits_list):
if fit is not None:
star.pixelArray[obs_num, col_num] = fit.centralIndex
star.saveDataToDisk()
def update_stellar_property(star_dir, property_name=None):
"""
Force an update of the given property for the given star, and save it out.
Parameters
----------
star_dir : `pathlib.Path`
The directory containing the data for the star.
property_name : str
The name of the property to be updated for the star.
Returns
-------
None.
"""
star = Star(star_dir.stem, star_dir, load_data=True)
# Call the star.property name to force it to updates its value.
tqdm.write(f'Value of {property_name} for {star_dir.stem}'
f' is {getattr(star, property_name)}')
star.saveDataToDisk()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Automatically recreate all'
' stars whose names are given.')
parser.add_argument('star_names', action='store', type=str, nargs='*',
help='The names of stars (directories) containing the'
' stars to be used in the plot. If not given will'
' default to using all stars.')
parser.add_argument('--recreate-stars', action='store_true',
help='Trigger a full rebuild of stars from the pickled'
' results files (LENGTHY!).')
parser.add_argument('--transitions', action='store_true',
help='Create the transition model-corrected arrays and'
' pair separation arrays for stars.')
parser.add_argument('--pairs', action='store_true',
help='Create the pair model-corrected arrays for'
' stars.')
parser.add_argument('--pixel-positions', action='store_true',
help='Read pickled fits to add pixel positions to'
' star.')
parser.add_argument('--update-property', action='store', type=str,
help='Update the property with the given name.')
args = parser.parse_args()
start_time = time.time()
output_dir = vcl.output_dir
star_dirs = [output_dir / star_name for star_name in args.star_names]
if star_dirs == []:
# No stars given, fall back on included list:
star_dirs = [output_dir / star_name for star_name in stars_to_use]
if args.recreate_stars:
p_umap(recreate_star, star_dirs)
if args.transitions:
p_umap(create_transition_model_corrected_arrays, star_dirs)
if args.pairs:
p_umap(create_pair_model_corrected_arrays, star_dirs)
if args.pixel_positions:
p_umap(add_pixel_data_to_star, star_dirs)
if args.update_property:
p_map(partial(update_stellar_property,
property_name=args.update_property), star_dirs)
duration = time.time() - start_time
print(f'Finished in {duration:.2f} seconds.')
| 34.059761
| 80
| 0.608375
|
880b446c4003ff94b4356e5d4ea4c6a9a1cf066a
| 371
|
py
|
Python
|
ocm_test_case/users/urls.py
|
DivinytyToffee/ocm_test_case
|
448d1651f963bb9a65045e8683f074a2b1d85229
|
[
"MIT"
] | null | null | null |
ocm_test_case/users/urls.py
|
DivinytyToffee/ocm_test_case
|
448d1651f963bb9a65045e8683f074a2b1d85229
|
[
"MIT"
] | 5
|
2022-02-28T23:35:24.000Z
|
2022-03-31T23:30:17.000Z
|
ocm_test_case/users/urls.py
|
DivinytyToffee/ocm_test_case
|
448d1651f963bb9a65045e8683f074a2b1d85229
|
[
"MIT"
] | null | null | null |
from django.urls import path
from ocm_test_case.users.views import (
user_detail_view,
user_redirect_view,
user_update_view,
)
app_name = "users"
urlpatterns = [
path("~redirect/", view=user_redirect_view, name="redirect"),
path("~update/", view=user_update_view, name="update"),
path("<str:username>/", view=user_detail_view, name="detail"),
]
| 24.733333
| 66
| 0.706199
|
aeb0431f2ee9a66395cfa2ff579b0b6cd2498014
| 4,044
|
py
|
Python
|
util/visualizer.py
|
megvii-research/GeneGAN
|
8a1ba544481978c6f5513e7eed5f11622ad3976f
|
[
"MIT"
] | 5
|
2021-08-08T17:28:00.000Z
|
2022-02-18T03:20:56.000Z
|
util/visualizer.py
|
megvii-research/GeneGAN
|
8a1ba544481978c6f5513e7eed5f11622ad3976f
|
[
"MIT"
] | null | null | null |
util/visualizer.py
|
megvii-research/GeneGAN
|
8a1ba544481978c6f5513e7eed5f11622ad3976f
|
[
"MIT"
] | 2
|
2021-08-15T15:38:25.000Z
|
2021-08-15T21:21:30.000Z
|
import numpy as np
import os
import time
from . import util
from torch.utils.tensorboard import SummaryWriter
class Visualizer:
"""This class includes several functions that can display/save images and print/save logging information.
It uses a Python library 'tensorboard' for display.
"""
def __init__(self, opt):
"""Initialize the Visualizer class
Parameters:
opt -- stores all the experiment flags; needs to be a subclass of BaseOptions
Step 1: Cache the training/test options
Step 2: create an SummaryWriter(tensorboard) object for saveing results
Step 3: create a logging file to store training losses
"""
self.opt = opt # cache the option
self.win_size = opt.display_winsize
self.name = opt.name
self.saved = False
# create a logging file to store training losses
self.log_name = os.path.join(opt.checkpoints_dir, opt.name, "loss_log.txt")
with open(self.log_name, "a") as log_file:
now = time.strftime("%c")
log_file.write(
"================ Training Loss (%s) ================\n" % now
)
self.use_tb = True
if self.use_tb:
self.log_dir = os.path.join(opt.checkpoints_dir, opt.name, "tb_log")
self.summary_writer = SummaryWriter(self.log_dir)
util.mkdirs([self.log_dir])
def reset(self):
"""Reset the self.saved status"""
self.saved = False
def display_current_results(self, visuals, epoch, save_result):
"""Display current results on tensorboard.
Parameters:
visuals (OrderedDict) - - dictionary of images to display or save
epoch (int) - - the current epoch
save_result (bool) - - if save the current results to tensorboard
"""
if self.use_tb and save_result:
show_imgs = []
for i, (label, image) in enumerate(visuals.items()):
image_numpy = util.tensor2im(image)
show_imgs.append(image_numpy)
label = "-".join(visuals.keys())
show_imgs = np.stack(show_imgs, axis=0)
self.summary_writer.add_images(
"epoch%.3d: %s" % (epoch, label), show_imgs, epoch, dataformats="NHWC"
)
self.summary_writer.flush()
def plot_current_losses(self, epoch, epoch_iter, dataset_size, losses):
"""display the current losses on tensorboard: dictionary of error labels and values
Parameters:
epoch (int) -- current epoch
counter_ratio (float) -- progress (percentage) in the current epoch, between 0 to 1
losses (OrderedDict) -- training losses stored in the format of (name, float) pairs
"""
step = epoch * dataset_size + epoch_iter
for k, v in losses.items():
self.summary_writer.add_scalar(k, v, step)
# losses: same format as |losses| of plot_current_losses
def print_current_losses(self, epoch, iters, losses, t_comp, t_data):
"""print current losses on console; also save the losses to the disk
Parameters:
epoch (int) -- current epoch
iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch)
losses (OrderedDict) -- training losses stored in the format of (name, float) pairs
t_comp (float) -- computational time per data point (normalized by batch_size)
t_data (float) -- data loading time per data point (normalized by batch_size)
"""
message = "(epoch: %d, iters: %d, time: %.3f, data: %.3f) " % (
epoch,
iters,
t_comp,
t_data,
)
for k, v in losses.items():
message += "%s: %.3f " % (k, v)
print(message) # print the message
with open(self.log_name, "a") as log_file:
log_file.write("%s\n" % message) # save the message
| 39.262136
| 110
| 0.598417
|
7b20270728d12baecc14f1dc7130a8f2b25d381b
| 3,628
|
py
|
Python
|
test/student/test_record_student.py
|
StoDevX/stograde
|
5b4cd58724e8e5218c7a7f2cc2d4f788e71a7931
|
[
"MIT"
] | 7
|
2016-08-05T00:41:11.000Z
|
2019-08-22T11:12:10.000Z
|
test/student/test_record_student.py
|
StoDevX/cs251-toolkit
|
a40f358289d67cce7b24fd557230079fae830b7d
|
[
"MIT"
] | 145
|
2016-08-04T01:07:11.000Z
|
2019-09-09T22:07:13.000Z
|
test/student/test_record_student.py
|
stograde/stograde
|
17d901a86ff80d20e9f7f798bd27375de34eccb7
|
[
"MIT"
] | 3
|
2017-02-06T21:52:46.000Z
|
2019-02-18T10:35:01.000Z
|
import logging
import os
import pytest
from stograde.common import chdir
from stograde.process_assignment.record_result import RecordResult
from stograde.process_assignment.submission_warnings import SubmissionWarnings
from stograde.process_file.file_result import FileResult
from stograde.specs.file_options import FileOptions
from stograde.specs.spec import Spec
from stograde.specs.spec_file import SpecFile
from stograde.student import record_student
from stograde.student.student_result import StudentResult
from test.utils import git
_dir = os.path.dirname(os.path.realpath(__file__))
@pytest.mark.datafiles(os.path.join(_dir, 'fixtures'))
def test_record_student(datafiles):
student_result = StudentResult('student1')
specs = [Spec('hw1', 'hw1', architecture=None,
files=[SpecFile('a_file.txt', [], [], [], FileOptions())]),
Spec('hw2', 'hw2', architecture=None,
files=[SpecFile('b_file.txt', [], [], [], FileOptions())])]
with chdir(str(datafiles)):
with chdir('student1'):
git('init')
git('config', 'user.email', 'an_email@email_provider.com')
git('config', 'user.name', 'Some Random Name')
git('add', os.path.join('hw1', 'a_file.txt'))
git('commit', '-m', '"Add file"', '--date="Tue Apr 21 12:28:03 2020 -0500"')
git('add', os.path.join('hw2', 'b_file.txt'))
git('commit', '-m', '"Add another file"', '--date="Sat Apr 25 20:27:05 2020 -0500"')
record_student(student=student_result,
specs=specs,
basedir='',
interact=False,
skip_web_compile=False)
assert student_result.results[0].student == 'student1'
assert student_result.results[0].spec_id == 'hw1'
assert student_result.results[0].first_submission == 'Tue Apr 21 12:28:03 2020 -0500'
assert student_result.results[0].file_results == [FileResult(file_name='a_file.txt',
last_modified='Tue Apr 21 12:28:03 2020 -0500')]
assert student_result.results[1].student == 'student1'
assert student_result.results[1].spec_id == 'hw2'
assert student_result.results[1].first_submission == 'Sat Apr 25 20:27:05 2020 -0500'
assert student_result.results[1].file_results == [FileResult(file_name='b_file.txt',
last_modified='Sat Apr 25 20:27:05 2020 -0500')]
def test_record_student_no_specs():
student = StudentResult('name')
record_student(student=student, specs=[], basedir='.',
interact=False, skip_web_compile=False)
assert student.results == []
@pytest.mark.datafiles(os.path.join(_dir, 'fixtures'))
def test_record_student_assignment_folder_missing(datafiles, caplog):
student = StudentResult('student1')
# student1 has a hw1 directory but not an another_folder directory
with chdir(str(datafiles)):
with caplog.at_level(logging.DEBUG):
record_student(student=student,
specs=[Spec('hw1', 'another_folder', None)],
basedir='.',
interact=False,
skip_web_compile=False)
assert student.results == [RecordResult('hw1', 'student1',
warnings=SubmissionWarnings(assignment_missing=True))]
log_messages = {(log.msg, log.levelname) for log in caplog.records}
assert log_messages == {("Recording student1's hw1", 'DEBUG')}
| 43.190476
| 113
| 0.62624
|
86d083a311efe73e7b0541eb8369fee4937f3325
| 49,175
|
py
|
Python
|
python/proton/_handlers.py
|
rabih-mourad/qpid-proton
|
22a8e50a03520491502988da899762d41d788568
|
[
"Apache-2.0"
] | null | null | null |
python/proton/_handlers.py
|
rabih-mourad/qpid-proton
|
22a8e50a03520491502988da899762d41d788568
|
[
"Apache-2.0"
] | null | null | null |
python/proton/_handlers.py
|
rabih-mourad/qpid-proton
|
22a8e50a03520491502988da899762d41d788568
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
import errno
import logging
import socket
import time
import weakref
from ._condition import Condition
from ._delivery import Delivery
from ._endpoints import Endpoint
from ._events import Event, Handler, _dispatch
from ._exceptions import ProtonException
from ._io import IO
from ._message import Message
from ._selectable import Selectable
from ._transport import Transport
from ._url import Url
log = logging.getLogger("proton")
class OutgoingMessageHandler(Handler):
"""
A utility for simpler and more intuitive handling of delivery
events related to outgoing i.e. sent messages.
:param auto_settle: If ``True``, settle all messages (default). Otherwise
messages must be explicitly settled.
:type auto_settle: ``bool``
:param delegate: A client handler for the endpoint event
"""
def __init__(self, auto_settle=True, delegate=None):
self.auto_settle = auto_settle
self.delegate = delegate
def on_link_flow(self, event):
if event.link.is_sender and event.link.credit \
and event.link.state & Endpoint.LOCAL_ACTIVE \
and event.link.state & Endpoint.REMOTE_ACTIVE:
self.on_sendable(event)
def on_delivery(self, event):
dlv = event.delivery
if dlv.link.is_sender and dlv.updated:
if dlv.remote_state == Delivery.ACCEPTED:
self.on_accepted(event)
elif dlv.remote_state == Delivery.REJECTED:
self.on_rejected(event)
elif dlv.remote_state == Delivery.RELEASED or dlv.remote_state == Delivery.MODIFIED:
self.on_released(event)
if dlv.settled:
self.on_settled(event)
if self.auto_settle:
dlv.settle()
def on_sendable(self, event):
"""
Called when the sender link has credit and messages can
therefore be transferred.
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
if self.delegate is not None:
_dispatch(self.delegate, 'on_sendable', event)
def on_accepted(self, event):
"""
Called when the remote peer accepts an outgoing message.
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
if self.delegate is not None:
_dispatch(self.delegate, 'on_accepted', event)
def on_rejected(self, event):
"""
Called when the remote peer rejects an outgoing message.
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
if self.delegate is not None:
_dispatch(self.delegate, 'on_rejected', event)
def on_released(self, event):
"""
Called when the remote peer releases an outgoing message. Note
that this may be in response to either the ``RELEASE`` or ``MODIFIED``
state as defined by the AMQP specification.
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
if self.delegate is not None:
_dispatch(self.delegate, 'on_released', event)
def on_settled(self, event):
"""
Called when the remote peer has settled the outgoing
message. This is the point at which it should never be
retransmitted.
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
if self.delegate is not None:
_dispatch(self.delegate, 'on_settled', event)
def recv_msg(delivery):
msg = Message()
msg.decode(delivery.link.recv(delivery.pending))
delivery.link.advance()
return msg
class Reject(ProtonException):
"""
An exception that indicates a message should be rejected.
"""
pass
class Release(ProtonException):
"""
An exception that indicates a message should be released.
"""
pass
class Acking(object):
"""
A class containing methods for handling received messages.
"""
def accept(self, delivery):
"""
Accepts a received message.
.. note:: This method cannot currently be used in combination
with transactions. See :class:`proton.reactor.Transaction`
for transactional methods.
:param delivery: The message delivery tracking object
:type delivery: :class:`proton.Delivery`
"""
self.settle(delivery, Delivery.ACCEPTED)
def reject(self, delivery):
"""
Rejects a received message that is considered invalid or
unprocessable.
.. note:: This method cannot currently be used in combination
with transactions. See :class:`proton.reactor.Transaction`
for transactional methods.
:param delivery: The message delivery tracking object
:type delivery: :class:`proton.Delivery`
"""
self.settle(delivery, Delivery.REJECTED)
def release(self, delivery, delivered=True):
"""
Releases a received message, making it available at the source
for any (other) interested receiver. The ``delivered``
parameter indicates whether this should be considered a
delivery attempt (and the delivery count updated) or not.
.. note:: This method cannot currently be used in combination
with transactions. See :class:`proton.reactor.Transaction`
for transactional methods.
:param delivery: The message delivery tracking object
:type delivery: :class:`proton.Delivery`
:param delivered: If ``True``, the message will be annotated
with a delivery attempt (setting delivery flag
:const:`proton.Delivery.MODIFIED`). Otherwise, the message
will be returned without the annotation and released (setting
delivery flag :const:`proton.Delivery.RELEASED`
:type delivered: ``bool``
"""
if delivered:
self.settle(delivery, Delivery.MODIFIED)
else:
self.settle(delivery, Delivery.RELEASED)
def settle(self, delivery, state=None):
"""
Settles the message delivery, and optionally updating the
delivery state.
:param delivery: The message delivery tracking object
:type delivery: :class:`proton.Delivery`
:param state: The delivery state, or ``None`` if not update
is to be performed.
:type state: ``int`` or ``None``
"""
if state:
delivery.update(state)
delivery.settle()
class IncomingMessageHandler(Handler, Acking):
"""
A utility for simpler and more intuitive handling of delivery
events related to incoming i.e. received messages.
:type auto_accept: ``bool``
:param auto_settle: If ``True``, settle all messages (default). Otherwise
messages must be explicitly settled.
:param delegate: A client handler for the endpoint event
"""
def __init__(self, auto_accept=True, delegate=None):
self.delegate = delegate
self.auto_accept = auto_accept
def on_delivery(self, event):
dlv = event.delivery
if not dlv.link.is_receiver: return
if dlv.aborted:
self.on_aborted(event)
dlv.settle()
elif dlv.readable and not dlv.partial:
event.message = recv_msg(dlv)
if event.link.state & Endpoint.LOCAL_CLOSED:
if self.auto_accept:
dlv.update(Delivery.RELEASED)
dlv.settle()
else:
try:
self.on_message(event)
if self.auto_accept:
dlv.update(Delivery.ACCEPTED)
dlv.settle()
except Reject:
dlv.update(Delivery.REJECTED)
dlv.settle()
except Release:
dlv.update(Delivery.MODIFIED)
dlv.settle()
elif dlv.updated and dlv.settled:
self.on_settled(event)
def on_message(self, event):
"""
Called when a message is received. The message itself can be
obtained as a property on the event. For the purpose of
referring to this message in further actions (e.g. if
explicitly accepting it, the ``delivery`` should be used, also
obtainable via a property on the event.
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
if self.delegate is not None:
_dispatch(self.delegate, 'on_message', event)
def on_settled(self, event):
"""
Callback for when a message delivery is settled by the remote peer.
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
if self.delegate is not None:
_dispatch(self.delegate, 'on_settled', event)
def on_aborted(self, event):
"""
Callback for when a message delivery is aborted by the remote peer.
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
if self.delegate is not None:
_dispatch(self.delegate, 'on_aborted', event)
class EndpointStateHandler(Handler):
"""
A utility that exposes 'endpoint' events - ie the open/close for
links, sessions and connections in a more intuitive manner. A
``XXX_opened()`` method will be called when both local and remote peers
have opened the link, session or connection. This can be used to
confirm a locally initiated action for example. A ``XXX_opening()``
method will be called when the remote peer has requested an open
that was not initiated locally. By default this will simply open
locally, which then triggers the ``XXX_opened()`` call. The same applies
to close.
:param peer_close_is_error: If ``True``, a peer endpoint closing will be
treated as an error with an error callback. Otherwise (default), the
normal callbacks for the closing will occur.
:type peer_close_is_error: ``bool``
:param delegate: A client handler for the endpoint event
"""
def __init__(self, peer_close_is_error=False, delegate=None):
self.delegate = delegate
self.peer_close_is_error = peer_close_is_error
@classmethod
def is_local_open(cls, endpoint):
"""
Test if local ``enpoint`` is open (ie has state
:const:`proton.Endpoint.LOCAL_ACTIVE`).
:param endpoint: The local endpoint to be tested.
:type endpoint: Any child of :class:`proton.Endpoint`
:return: ``True`` if local endpoint is in state
:const:`proton.Endpoint.LOCAL_ACTIVE`, ``False`` otherwise.
:rtype: ``bool``
"""
return endpoint.state & Endpoint.LOCAL_ACTIVE
@classmethod
def is_local_uninitialised(cls, endpoint):
"""
Test if local ``enpoint`` is uninitialised (ie has state
:const:`proton.Endpoint.LOCAL_UNINIT`).
:param endpoint: The local endpoint to be tested.
:type endpoint: Any child of :class:`proton.Endpoint`
:return: ``True`` if local endpoint is in state
:const:`proton.Endpoint.LOCAL_UNINIT`, ``False`` otherwise.
:rtype: ``bool``
"""
return endpoint.state & Endpoint.LOCAL_UNINIT
@classmethod
def is_local_closed(cls, endpoint):
"""
Test if local ``enpoint`` is closed (ie has state
:const:`proton.Endpoint.LOCAL_CLOSED`).
:param endpoint: The local endpoint to be tested.
:type endpoint: Any child of :class:`proton.Endpoint`
:return: ``True`` if local endpoint is in state
:const:`proton.Endpoint.LOCAL_CLOSED`, ``False`` otherwise.
:rtype: ``bool``
"""
return endpoint.state & Endpoint.LOCAL_CLOSED
@classmethod
def is_remote_open(cls, endpoint):
"""
Test if remote ``enpoint`` is open (ie has state
:const:`proton.Endpoint.LOCAL_ACTIVE`).
:param endpoint: The remote endpoint to be tested.
:type endpoint: Any child of :class:`proton.Endpoint`
:return: ``True`` if remote endpoint is in state
:const:`proton.Endpoint.LOCAL_ACTIVE`, ``False`` otherwise.
:rtype: ``bool``
"""
return endpoint.state & Endpoint.REMOTE_ACTIVE
@classmethod
def is_remote_closed(cls, endpoint):
"""
Test if remote ``enpoint`` is closed (ie has state
:const:`proton.Endpoint.REMOTE_CLOSED`).
:param endpoint: The remote endpoint to be tested.
:type endpoint: Any child of :class:`proton.Endpoint`
:return: ``True`` if remote endpoint is in state
:const:`proton.Endpoint.REMOTE_CLOSED`, ``False`` otherwise.
:rtype: ``bool``
"""
return endpoint.state & Endpoint.REMOTE_CLOSED
@classmethod
def print_error(cls, endpoint, endpoint_type):
"""
Logs an error message related to an error condition at an endpoint.
:param endpoint: The endpoint to be tested
:type endpoint: :class:`proton.Endpoint`
:param endpoint_type: The endpoint type as a string to be printed
in the log message.
:type endpoint_type: ``str``
"""
if endpoint.remote_condition:
log.error(endpoint.remote_condition.description or endpoint.remote_condition.name)
elif cls.is_local_open(endpoint) and cls.is_remote_closed(endpoint):
log.error("%s closed by peer" % endpoint_type)
def on_link_remote_close(self, event):
if event.link.remote_condition:
self.on_link_error(event)
elif self.is_local_closed(event.link):
self.on_link_closed(event)
else:
self.on_link_closing(event)
event.link.close()
def on_session_remote_close(self, event):
if event.session.remote_condition:
self.on_session_error(event)
elif self.is_local_closed(event.session):
self.on_session_closed(event)
else:
self.on_session_closing(event)
event.session.close()
def on_connection_remote_close(self, event):
if event.connection.remote_condition:
if event.connection.remote_condition.name == "amqp:connection:forced":
# Treat this the same as just having the transport closed by the peer without
# sending any events. Allow reconnection to happen transparently.
return
self.on_connection_error(event)
elif self.is_local_closed(event.connection):
self.on_connection_closed(event)
else:
self.on_connection_closing(event)
event.connection.close()
def on_connection_local_open(self, event):
if self.is_remote_open(event.connection):
self.on_connection_opened(event)
def on_connection_remote_open(self, event):
if self.is_local_open(event.connection):
self.on_connection_opened(event)
elif self.is_local_uninitialised(event.connection):
self.on_connection_opening(event)
event.connection.open()
def on_session_local_open(self, event):
if self.is_remote_open(event.session):
self.on_session_opened(event)
def on_session_remote_open(self, event):
if self.is_local_open(event.session):
self.on_session_opened(event)
elif self.is_local_uninitialised(event.session):
self.on_session_opening(event)
event.session.open()
def on_link_local_open(self, event):
if self.is_remote_open(event.link):
self.on_link_opened(event)
def on_link_remote_open(self, event):
if self.is_local_open(event.link):
self.on_link_opened(event)
elif self.is_local_uninitialised(event.link):
self.on_link_opening(event)
event.link.open()
def on_connection_opened(self, event):
"""
Callback for when both the local and remote endpoints of a
connection have opened.
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
if self.delegate is not None:
_dispatch(self.delegate, 'on_connection_opened', event)
def on_session_opened(self, event):
"""
Callback for when both the local and remote endpoints of a
session have opened.
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
if self.delegate is not None:
_dispatch(self.delegate, 'on_session_opened', event)
def on_link_opened(self, event):
"""
Callback for when both the local and remote endpoints of a
link have opened.
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
if self.delegate is not None:
_dispatch(self.delegate, 'on_link_opened', event)
def on_connection_opening(self, event):
"""
Callback for when a remote peer initiates the opening of
a connection.
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
if self.delegate is not None:
_dispatch(self.delegate, 'on_connection_opening', event)
def on_session_opening(self, event):
"""
Callback for when a remote peer initiates the opening of
a session.
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
if self.delegate is not None:
_dispatch(self.delegate, 'on_session_opening', event)
def on_link_opening(self, event):
"""
Callback for when a remote peer initiates the opening of
a link.
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
if self.delegate is not None:
_dispatch(self.delegate, 'on_link_opening', event)
def on_connection_error(self, event):
"""
Callback for when an initiated connection open fails.
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
if self.delegate is not None:
_dispatch(self.delegate, 'on_connection_error', event)
else:
self.print_error(event.connection, "connection")
def on_session_error(self, event):
"""
Callback for when an initiated session open fails.
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
if self.delegate is not None:
_dispatch(self.delegate, 'on_session_error', event)
else:
self.print_error(event.session, "session")
event.connection.close()
def on_link_error(self, event):
"""
Callback for when an initiated link open fails.
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
if self.delegate is not None:
_dispatch(self.delegate, 'on_link_error', event)
else:
self.print_error(event.link, "link")
event.connection.close()
def on_connection_closed(self, event):
"""
Callback for when both the local and remote endpoints of a
connection have closed.
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
if self.delegate is not None:
_dispatch(self.delegate, 'on_connection_closed', event)
def on_session_closed(self, event):
"""
Callback for when both the local and remote endpoints of a
session have closed.
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
if self.delegate is not None:
_dispatch(self.delegate, 'on_session_closed', event)
def on_link_closed(self, event):
"""
Callback for when both the local and remote endpoints of a
link have closed.
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
if self.delegate is not None:
_dispatch(self.delegate, 'on_link_closed', event)
def on_connection_closing(self, event):
"""
Callback for when a remote peer initiates the closing of
a connection.
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
if self.delegate is not None:
_dispatch(self.delegate, 'on_connection_closing', event)
elif self.peer_close_is_error:
self.on_connection_error(event)
def on_session_closing(self, event):
"""
Callback for when a remote peer initiates the closing of
a session.
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
if self.delegate is not None:
_dispatch(self.delegate, 'on_session_closing', event)
elif self.peer_close_is_error:
self.on_session_error(event)
def on_link_closing(self, event):
"""
Callback for when a remote peer initiates the closing of
a link.
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
if self.delegate is not None:
_dispatch(self.delegate, 'on_link_closing', event)
elif self.peer_close_is_error:
self.on_link_error(event)
def on_transport_tail_closed(self, event):
"""
Callback for when the transport tail has closed (ie no further input will
be accepted by the transport).
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
self.on_transport_closed(event)
def on_transport_closed(self, event):
"""
Callback for when the transport has closed - ie both the head (input) and
tail (output) of the transport pipeline are closed.
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
if self.delegate is not None and event.connection and self.is_local_open(event.connection):
_dispatch(self.delegate, 'on_disconnected', event)
class MessagingHandler(Handler, Acking):
"""
A general purpose handler that makes the proton-c events somewhat
simpler to deal with and/or avoids repetitive tasks for common use
cases.
:param prefetch: Initial flow credit for receiving messages, defaults to 10.
:type prefetch: ``int``
:param auto_accept: If ``True``, accept all messages (default). Otherwise messages
must be individually accepted or rejected.
:type auto_accept: ``bool``
:param auto_settle: If ``True``, settle all messages (default). Otherwise
messages must be explicitly settled.
:type auto_settle: ``bool``
:param peer_close_is_error: If ``True``, a peer endpoint closing will be
treated as an error with an error callback. Otherwise (default), the
normal callbacks for the closing will occur.
:type peer_close_is_error: ``bool``
"""
def __init__(self, prefetch=10, auto_accept=True, auto_settle=True, peer_close_is_error=False):
self.handlers = []
if prefetch:
self.handlers.append(FlowController(prefetch))
self.handlers.append(EndpointStateHandler(peer_close_is_error, weakref.proxy(self)))
self.handlers.append(IncomingMessageHandler(auto_accept, weakref.proxy(self)))
self.handlers.append(OutgoingMessageHandler(auto_settle, weakref.proxy(self)))
self.fatal_conditions = ["amqp:unauthorized-access"]
def on_transport_error(self, event):
"""
Called when some error is encountered with the transport over
which the AMQP connection is to be established. This includes
authentication errors as well as socket errors.
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
if event.transport.condition:
if event.transport.condition.info:
log.error("%s: %s: %s" % (
event.transport.condition.name, event.transport.condition.description,
event.transport.condition.info))
else:
log.error("%s: %s" % (event.transport.condition.name, event.transport.condition.description))
if event.transport.condition.name in self.fatal_conditions:
event.connection.close()
else:
logging.error("Unspecified transport error")
def on_connection_error(self, event):
"""
Called when the peer closes the connection with an error condition.
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
EndpointStateHandler.print_error(event.connection, "connection")
def on_session_error(self, event):
"""
Called when the peer closes the session with an error condition.
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
EndpointStateHandler.print_error(event.session, "session")
event.connection.close()
def on_link_error(self, event):
"""
Called when the peer closes the link with an error condition.
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
EndpointStateHandler.print_error(event.link, "link")
event.connection.close()
def on_reactor_init(self, event):
"""
Called when the event loop - the reactor - starts.
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
if hasattr(event.reactor, 'subclass'):
setattr(event, event.reactor.subclass.__name__.lower(), event.reactor)
self.on_start(event)
def on_start(self, event):
"""
Called when the event loop starts. (Just an alias for on_reactor_init)
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
pass
def on_connection_closed(self, event):
"""
Called when the connection is closed.
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
pass
def on_session_closed(self, event):
"""
Called when the session is closed.
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
pass
def on_link_closed(self, event):
"""
Called when the link is closed.
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
pass
def on_connection_closing(self, event):
"""
Called when the peer initiates the closing of the connection.
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
pass
def on_session_closing(self, event):
"""
Called when the peer initiates the closing of the session.
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
pass
def on_link_closing(self, event):
"""
Called when the peer initiates the closing of the link.
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
pass
def on_disconnected(self, event):
"""
Called when the socket is disconnected.
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
pass
def on_sendable(self, event):
"""
Called when the sender link has credit and messages can
therefore be transferred.
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
pass
def on_accepted(self, event):
"""
Called when the remote peer accepts an outgoing message.
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
pass
def on_rejected(self, event):
"""
Called when the remote peer rejects an outgoing message.
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
pass
def on_released(self, event):
"""
Called when the remote peer releases an outgoing message. Note
that this may be in response to either the RELEASE or MODIFIED
state as defined by the AMQP specification.
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
pass
def on_settled(self, event):
"""
Called when the remote peer has settled the outgoing
message. This is the point at which it should never be
retransmitted.
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
pass
def on_message(self, event):
"""
Called when a message is received. The message itself can be
obtained as a property on the event. For the purpose of
referring to this message in further actions (e.g. if
explicitly accepting it, the ``delivery`` should be used, also
obtainable via a property on the event.
:param event: The underlying event object. Use this to obtain further
information on the event. In particular, the message itself may
be obtained by accessing ``event.message``.
:type event: :class:`proton.Event`
"""
pass
class TransactionHandler(object):
"""
The interface for transaction handlers - ie objects that want to
be notified of state changes related to a transaction.
"""
def on_transaction_declared(self, event):
"""
Called when a local transaction is declared.
:param event: The underlying event object. Use this to obtain further
information on the event. In particular, the :class:`proton.reactor.Transaction`
object may be obtained by accessing ``event.transaction``.
:type event: :class:`proton.Event`
"""
pass
def on_transaction_committed(self, event):
"""
Called when a local transaction is discharged successfully
(committed).
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
pass
def on_transaction_aborted(self, event):
"""
Called when a local transaction is discharged unsuccessfully
(aborted).
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
pass
def on_transaction_declare_failed(self, event):
"""
Called when a local transaction declare fails.
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
pass
def on_transaction_commit_failed(self, event):
"""
Called when the commit of a local transaction fails.
:param event: The underlying event object. Use this to obtain further
information on the event.
:type event: :class:`proton.Event`
"""
pass
class TransactionalClientHandler(MessagingHandler, TransactionHandler):
"""
An extension to the MessagingHandler for applications using
transactions. This handler provides all of the callbacks found
in :class:`MessagingHandler` and :class:`TransactionHandler`,
and provides a convenience method :meth:`accept` for performing
a transactional acceptance of received messages.
:param prefetch: Initial flow credit for receiving messages, defaults to 10.
:type prefetch: ``int``
:param auto_accept: If ``True``, accept all messages (default). Otherwise messages
must be individually accepted or rejected.
:type auto_accept: ``bool``
:param auto_settle: If ``True``, settle all messages (default). Otherwise
messages must be explicitly settled.
:type auto_settle: ``bool``
:param peer_close_is_error: If ``True``, a peer endpoint closing will be
treated as an error with an error callback. Otherwise (default), the
normal callbacks for the closing will occur.
:type peer_close_is_error: ``bool``
"""
def __init__(self, prefetch=10, auto_accept=False, auto_settle=True, peer_close_is_error=False):
super(TransactionalClientHandler, self).__init__(prefetch, auto_accept, auto_settle, peer_close_is_error)
def accept(self, delivery, transaction=None):
"""
A convenience method for accepting a received message as part of a
transaction. If no transaction object is supplied, a regular
non-transactional acceptance will be performed.
:param delivery: Delivery tracking object for received message.
:type delivery: :class:`proton.Delivery`
:param transaction: Transaction tracking object which is required if
the message is being accepted under the transaction. If ``None`` (default),
then a normal non-transactional accept occurs.
:type transaction: :class:`proton.reactor.Transaction`
"""
if transaction:
transaction.accept(delivery)
else:
super(TransactionalClientHandler, self).accept(delivery)
class FlowController(Handler):
def __init__(self, window=1024):
self._window = window
self._drained = 0
def on_link_local_open(self, event):
self._flow(event.link)
def on_link_remote_open(self, event):
self._flow(event.link)
def on_link_flow(self, event):
self._flow(event.link)
def on_delivery(self, event):
self._flow(event.link)
def _flow(self, link):
if link.is_receiver:
self._drained += link.drained()
if self._drained == 0:
delta = self._window - link.credit
link.flow(delta)
class Handshaker(Handler):
@staticmethod
def on_connection_remote_open(event):
conn = event.connection
if conn.state & Endpoint.LOCAL_UNINIT:
conn.open()
@staticmethod
def on_session_remote_open(event):
ssn = event.session
if ssn.state() & Endpoint.LOCAL_UNINIT:
ssn.open()
@staticmethod
def on_link_remote_open(event):
link = event.link
if link.state & Endpoint.LOCAL_UNINIT:
link.source.copy(link.remote_source)
link.target.copy(link.remote_target)
link.open()
@staticmethod
def on_connection_remote_close(event):
conn = event.connection
if not conn.state & Endpoint.LOCAL_CLOSED:
conn.close()
@staticmethod
def on_session_remote_close(event):
ssn = event.session
if not ssn.state & Endpoint.LOCAL_CLOSED:
ssn.close()
@staticmethod
def on_link_remote_close(event):
link = event.link
if not link.state & Endpoint.LOCAL_CLOSED:
link.close()
# Back compatibility definitions
CFlowController = FlowController
CHandshaker = Handshaker
class PythonIO:
def __init__(self):
self.selectables = []
self.delegate = IOHandler()
def on_unhandled(self, method, event):
event.dispatch(self.delegate)
def on_selectable_init(self, event):
self.selectables.append(event.context)
def on_selectable_updated(self, event):
pass
def on_selectable_final(self, event):
sel = event.context
if sel.is_terminal:
self.selectables.remove(sel)
sel.release()
def on_reactor_quiesced(self, event):
reactor = event.reactor
# check if we are still quiesced, other handlers of
# on_reactor_quiesced could have produced events to process
if not reactor.quiesced: return
reading = []
writing = []
deadline = None
for sel in self.selectables:
if sel.reading:
reading.append(sel)
if sel.writing:
writing.append(sel)
if sel.deadline:
if deadline is None:
deadline = sel.deadline
else:
deadline = min(sel.deadline, deadline)
if deadline is not None:
timeout = deadline - time.time()
else:
timeout = reactor.timeout
if timeout < 0: timeout = 0
timeout = min(timeout, reactor.timeout)
readable, writable, _ = IO.select(reading, writing, [], timeout)
now = reactor.mark()
for s in readable:
s.readable()
for s in writable:
s.writable()
for s in self.selectables:
if s.deadline and now > s.deadline:
s.expired()
reactor.yield_()
# For C style IO handler need to implement Selector
class IOHandler(Handler):
def __init__(self):
self._selector = IO.Selector()
def on_selectable_init(self, event):
s = event.selectable
self._selector.add(s)
s._reactor._selectables += 1
def on_selectable_updated(self, event):
s = event.selectable
self._selector.update(s)
def on_selectable_final(self, event):
s = event.selectable
self._selector.remove(s)
s._reactor._selectables -= 1
s.release()
def on_reactor_quiesced(self, event):
r = event.reactor
if not r.quiesced:
return
d = r.timer_deadline
readable, writable, expired = self._selector.select(r.timeout)
now = r.mark()
for s in readable:
s.readable()
for s in writable:
s.writable()
for s in expired:
s.expired()
r.yield_()
def on_selectable_readable(self, event):
s = event.selectable
t = s._transport
# If we're an acceptor we can't have a transport
# and we don't want to do anything here in any case
if not t:
return
capacity = t.capacity()
if capacity > 0:
try:
b = s.recv(capacity)
if len(b) > 0:
n = t.push(b)
else:
# EOF handling
self.on_selectable_error(event)
except socket.error as e:
# TODO: What's the error handling to be here?
log.error("Couldn't recv: %r" % e)
t.close_tail()
# Always update as we may have gone to not reading or from
# not writing to writing when processing the incoming bytes
r = s._reactor
self.update(t, s, r.now)
def on_selectable_writable(self, event):
s = event.selectable
t = s._transport
# If we're an acceptor we can't have a transport
# and we don't want to do anything here in any case
if not t:
return
pending = t.pending()
if pending > 0:
try:
n = s.send(t.peek(pending))
t.pop(n)
except socket.error as e:
log.error("Couldn't send: %r" % e)
# TODO: Error? or actually an exception
t.close_head()
newpending = t.pending()
if newpending != pending:
r = s._reactor
self.update(t, s, r.now)
def on_selectable_error(self, event):
s = event.selectable
t = s._transport
t.close_head()
t.close_tail()
s.terminate()
s._transport = None
t._selectable = None
s.update()
def on_selectable_expired(self, event):
s = event.selectable
t = s._transport
r = s._reactor
self.update(t, s, r.now)
def on_connection_local_open(self, event):
c = event.connection
if not c.state & Endpoint.REMOTE_UNINIT:
return
t = Transport()
# It seems perverse, but the C code ignores bind errors too!
# and this is required or you get errors because Connector() has already
# bound the transport and connection!
t.bind_nothrow(c)
def on_connection_bound(self, event):
c = event.connection
t = event.transport
reactor = c._reactor
# link the new transport to its reactor:
t._reactor = reactor
if c._acceptor:
# this connection was created by the acceptor. There is already a
# socket assigned to this connection. Nothing needs to be done.
return
url = c.url or Url(c.hostname)
url.defaults()
host = url.host
port = int(url.port)
if not c.user:
user = url.username
if user:
c.user = user
password = url.password
if password:
c.password = password
addrs = socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM)
# Try first possible address
log.debug("Connect trying first transport address: %s", addrs[0])
sock = IO.connect(addrs[0])
# At this point we need to arrange to be called back when the socket is writable
connector = ConnectSelectable(sock, reactor, addrs[1:], t, self)
connector.collect(reactor._collector)
connector.writing = True
connector.push_event(connector, Event.SELECTABLE_INIT)
# TODO: Don't understand why we need this now - how can we get PN_TRANSPORT until the connection succeeds?
t._selectable = None
@staticmethod
def update(transport, selectable, now):
try:
capacity = transport.capacity()
selectable.reading = capacity>0
except:
if transport.closed:
selectable.terminate()
selectable._transport = None
transport._selectable = None
try:
pending = transport.pending()
selectable.writing = pending>0
except:
if transport.closed:
selectable.terminate()
selectable._transport = None
transport._selectable = None
selectable.deadline = transport.tick(now)
selectable.update()
def on_transport(self, event):
t = event.transport
r = t._reactor
s = t._selectable
if s and not s.is_terminal:
self.update(t, s, r.now)
def on_transport_closed(self, event):
t = event.transport
r = t._reactor
s = t._selectable
if s and not s.is_terminal:
s.terminate()
s._transport = None
t._selectable = None
r.update(s)
t.unbind()
class ConnectSelectable(Selectable):
def __init__(self, sock, reactor, addrs, transport, iohandler):
super(ConnectSelectable, self).__init__(sock, reactor)
self._addrs = addrs
self._transport = transport
self._iohandler = iohandler
def readable(self):
pass
def writable(self):
e = self._delegate.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
t = self._transport
if e == 0:
log.debug("Connection succeeded")
s = self._reactor.selectable(delegate=self._delegate)
s._transport = t
t._selectable = s
self._iohandler.update(t, s, t._reactor.now)
# Disassociate from the socket (which has been passed on)
self._delegate = None
self.terminate()
self._transport = None
self.update()
return
elif e == errno.ECONNREFUSED:
if len(self._addrs) > 0:
log.debug("Connection refused: trying next transport address: %s", self._addrs[0])
sock = IO.connect(self._addrs[0])
self._addrs = self._addrs[1:]
self._delegate.close()
self._delegate = sock
return
else:
log.debug("Connection refused, but tried all transport addresses")
t.condition = Condition("proton.pythonio", "Connection refused to all addresses")
else:
log.error("Couldn't connect: %s", e)
t.condition = Condition("proton.pythonio", "Connection error: %s" % e)
t.close_tail()
t.close_head()
self.terminate()
self._transport = None
self.update()
| 34.29219
| 114
| 0.619319
|
3e2b4f818744de59dd95708803bd50673636ee81
| 10,698
|
py
|
Python
|
subrepos/wtsi-hgi.python-sequencescape-db/sequencescape/tests/sqlalchemy/test_mappers.py
|
wtsi-hgi/openstack-tenant-cleanup
|
d998016f44c54666f76f90d8d3efa90e12730fff
|
[
"MIT"
] | null | null | null |
subrepos/wtsi-hgi.python-sequencescape-db/sequencescape/tests/sqlalchemy/test_mappers.py
|
wtsi-hgi/openstack-tenant-cleanup
|
d998016f44c54666f76f90d8d3efa90e12730fff
|
[
"MIT"
] | 7
|
2016-03-03T13:29:44.000Z
|
2016-03-15T14:30:48.000Z
|
subrepos/wtsi-hgi.python-sequencescape-db/sequencescape/tests/sqlalchemy/test_mappers.py
|
wtsi-hgi/openstack-tenant-cleanup
|
d998016f44c54666f76f90d8d3efa90e12730fff
|
[
"MIT"
] | null | null | null |
import unittest
from abc import abstractmethod, ABCMeta
from typing import List
from sequencescape._sqlalchemy.database_connector import SQLAlchemyDatabaseConnector
from sequencescape._sqlalchemy.mappers import SQLAlchemyMapper, SQLAlchemySampleMapper, SQLAlchemyStudyMapper, \
SQLAlchemyLibraryMapper, SQLAlchemyWellMapper, SQLAlchemyMultiplexedLibraryMapper
from sequencescape.enums import Property
from sequencescape.mappers import Mapper
from sequencescape.models import InternalIdModel, Sample, Study
from sequencescape.tests._helpers import create_stub_sample, assign_unique_ids, create_stub_study, create_stub_library, \
create_stub_multiplexed_library, create_stub_well
from sequencescape.tests.sqlalchemy.stub_database import create_stub_database
def _create_connector() -> SQLAlchemyDatabaseConnector:
"""
Creates a connector to a test database.
:return: connector to a test database
"""
database_location, dialect = create_stub_database()
connector = SQLAlchemyDatabaseConnector("%s:///%s" % (dialect, database_location))
return connector
class _SQLAlchemyMapperTest(unittest.TestCase, metaclass=ABCMeta):
"""
Tests for `SQLAlchemyMapper`.
"""
@staticmethod
def _get_internal_ids(models: List[InternalIdModel]) -> List[int]:
"""
Gets the ids of all of the given models.
:param models: the models to get_by_path the ids of
:return: the ids of the given models
"""
return [model.internal_id for model in models]
@abstractmethod
def _create_model(self) -> InternalIdModel:
"""
Creates a model of the type the mapper being tested uses.
:return: model for use with SUT
"""
@abstractmethod
def _create_mapper(self, connector: SQLAlchemyDatabaseConnector) -> SQLAlchemyMapper:
"""
Creates the mapper that is to be tested.
:return: mapper to be tested
"""
def setUp(self):
self._connector = _create_connector()
self._mapper = self._create_mapper(self._connector)
def test_add_with_none(self):
self.assertRaises(ValueError, self._mapper.add, None)
def test_add_with_non_model(self):
self.assertRaises(ValueError, self._mapper.add, Mapper)
def test_add_with_empty_list(self):
self._mapper.add([])
retrieved_models = self._mapper.get_all()
self.assertEqual(len(retrieved_models), 0)
def test_add_with_model(self):
model = self._create_models(1)[0]
self._mapper.add(model)
retrieved_models = self._mapper.get_all()
self.assertEqual(len(retrieved_models), 1)
self.assertEqual(retrieved_models[0], model)
def test_add_with_model_list(self):
models = self._create_models(5)
self._mapper.add(models)
retrieved_models = self._mapper.get_all()
self.assertCountEqual(retrieved_models, models)
def test__get_by_property_value_sequence_with_empty_list(self):
models = self._create_models(5)
models_to_retrieve = []
self._mapper.add(models)
retrieved_models = self._mapper._get_by_property_value_sequence(
Property.INTERNAL_ID, self._get_internal_ids(models_to_retrieve))
self.assertCountEqual(retrieved_models, models_to_retrieve)
def test__get_by_property_value_sequence_with_list_of_existing(self):
models = self._create_models(5)
models_to_retrieve = [models[0], models[2]]
self._mapper.add(models)
retrieved_models = self._mapper._get_by_property_value_sequence(
Property.INTERNAL_ID, self._get_internal_ids(models_to_retrieve))
self.assertCountEqual(retrieved_models, models_to_retrieve)
def test__get_by_property_value_sequence_with_list_of_non_existing(self):
models = self._create_models(5)
models_to_retrieve = [models.pop(), models.pop()]
assert len(models) == 3
self._mapper.add(models)
retrieved_models = self._mapper._get_by_property_value_sequence(
Property.INTERNAL_ID, self._get_internal_ids(models_to_retrieve))
self.assertCountEqual(retrieved_models, [])
def test__get_by_property_value_sequence_with_list_of_both_existing_and_non_existing(self):
models = self._create_models(5)
models_to_retrieve = [models[0], models[2], models.pop()]
assert len(models) == 4
self._mapper.add(models)
retrieved_models = self._mapper._get_by_property_value_sequence(
Property.INTERNAL_ID, self._get_internal_ids(models_to_retrieve))
self.assertCountEqual(retrieved_models, models_to_retrieve[:2])
def test__get_by_property_value_sequence_returns_correct_type(self):
models = self._create_models(5)
self._mapper.add(models)
retrieved_models = self._mapper._get_by_property_value_sequence(
Property.INTERNAL_ID, self._get_internal_ids(models))
self.assertCountEqual(retrieved_models, models)
self.assertIsInstance(retrieved_models[0], models[0].__class__)
def _create_models(self, number_of_models: int) -> List[InternalIdModel]:
"""
Creates a number of models to use in tests.
:param number_of_models: the number of models to create
:return: the models
"""
return assign_unique_ids([self._create_model() for _ in range(number_of_models)])
class _SQLAssociationMapperTest(_SQLAlchemyMapperTest):
"""
Tests for `SQLAssociationMapper`.
"""
@abstractmethod
def _get_associated_with_instance(self, internal_id=None) -> InternalIdModel:
"""
Gets an instance of the type which the objects the mapper deals with can be associated to.
:return: instance that the object that the mapper is for can be assocaited with
"""
def setUp(self):
super().setUp()
self._associated_with_type = self._get_associated_with_instance().__class__.__name__
self._associated_with_mapper = globals()["SQLAlchemy%sMapper" % self._associated_with_type](self._connector)
self._mapper_get_associated_with_x = getattr(
self._mapper, "get_associated_with_%s" % self._associated_with_type.lower())
self._mapper_set_association_with_x = getattr(
self._mapper, "set_association_with_%s" % self._associated_with_type.lower())
def test__get_associated_with_x_with_non_existent_x(self):
self.assertRaises(ValueError, self._mapper_get_associated_with_x, self._get_associated_with_instance())
def test__get_associated_with_x_with_non_associated(self):
x = self._get_associated_with_instance()
self._associated_with_mapper.add(x)
associated = self._mapper_get_associated_with_x(x)
self.assertEquals(len(associated), 0)
def test__get_associated_with_x_with_value(self):
x = self._get_associated_with_instance()
self._associated_with_mapper.add(x)
models = self._create_models(2)
self._mapper.add(models)
self._mapper_set_association_with_x(models, x)
associated = self._mapper_get_associated_with_x(x)
self.assertCountEqual(associated, models)
def test__get_associated_with_x_with_empty_list(self):
self._mapper_get_associated_with_x([])
def test__get_associated_with_x_with_list(self):
models = self._create_models(2)
self._mapper.add(models)
xs = [self._get_associated_with_instance(i) for i in range(2)]
self._associated_with_mapper.add(xs)
self._mapper_set_association_with_x(models[0], xs[0])
self._mapper_set_association_with_x(models[1], xs[1])
associated = self._mapper_get_associated_with_x(xs)
self.assertCountEqual(associated, models)
def test__get_associated_with_x_with_list_and_shared_association(self):
xs = [self._get_associated_with_instance(i) for i in range(2)]
self._associated_with_mapper.add(xs)
model = self._create_model()
self._mapper.add(model)
self._mapper_set_association_with_x(model, xs[0])
self._mapper_set_association_with_x(model, xs[1])
associated = self._mapper_get_associated_with_x(xs)
self.assertCountEqual(associated, [model])
class SQLAlchemySampleMapperTest(_SQLAssociationMapperTest):
"""
Tests for `SQLAlchemySampleMapper`.
"""
def _create_model(self) -> InternalIdModel:
return create_stub_sample()
def _create_mapper(self, connector: SQLAlchemyDatabaseConnector) -> SQLAlchemyMapper:
return SQLAlchemySampleMapper(connector)
def _get_associated_with_instance(self, internal_id=None) -> InternalIdModel:
study = create_stub_study()
if internal_id is not None:
study.internal_id = internal_id
return study
class SQLAlchemyStudyMapperTest(_SQLAssociationMapperTest):
"""
Tests for `SQLAlchemyStudyMapper`.
"""
def _create_model(self) -> InternalIdModel:
return create_stub_study()
def _create_mapper(self, connector: SQLAlchemyDatabaseConnector) -> SQLAlchemyMapper:
return SQLAlchemyStudyMapper(connector)
def _get_associated_with_instance(self, internal_id=None) -> InternalIdModel:
study = create_stub_sample()
if internal_id is not None:
study.internal_id = internal_id
return study
class SQLAlchemyLibraryMapperTest(_SQLAlchemyMapperTest):
"""
Tests for `SQLAlchemyLibraryMapper`.
"""
def _create_model(self) -> InternalIdModel:
return create_stub_library()
def _create_mapper(self, connector: SQLAlchemyDatabaseConnector) -> SQLAlchemyMapper:
return SQLAlchemyLibraryMapper(connector)
class SQLAlchemyWellMapperTest(_SQLAlchemyMapperTest):
"""
Tests for `SQLAlchemyWellMapper`.
"""
def _create_model(self) -> InternalIdModel:
return create_stub_well()
def _create_mapper(self, connector: SQLAlchemyDatabaseConnector) -> SQLAlchemyMapper:
return SQLAlchemyWellMapper(connector)
class SQLAlchemyMultiplexedLibraryMapperTest(_SQLAlchemyMapperTest):
"""
Tests for `SQLAlchemyMultiplexedLibraryMapper`.
"""
def _create_model(self) -> InternalIdModel:
return create_stub_multiplexed_library()
def _create_mapper(self, connector: SQLAlchemyDatabaseConnector) -> SQLAlchemyMapper:
return SQLAlchemyMultiplexedLibraryMapper(connector)
# Trick required to stop Python's unittest from running the abstract base classes as tests
del _SQLAlchemyMapperTest
del _SQLAssociationMapperTest
if __name__ == "__main__":
unittest.main()
| 37.536842
| 121
| 0.726117
|
a6e2a2e24a8ec8af55ae83a67cb8f6215db9f2d9
| 6,351
|
py
|
Python
|
tests/test_tocdirective.py
|
flat35hd99/jupyter-book
|
4d5b474e6f2b80c4d1d206e4554740ff82a344dc
|
[
"BSD-3-Clause"
] | 2,109
|
2020-05-02T23:47:18.000Z
|
2022-03-31T22:16:54.000Z
|
tests/test_tocdirective.py
|
flat35hd99/jupyter-book
|
4d5b474e6f2b80c4d1d206e4554740ff82a344dc
|
[
"BSD-3-Clause"
] | 1,158
|
2020-04-29T18:07:02.000Z
|
2022-03-31T21:50:57.000Z
|
tests/test_tocdirective.py
|
flat35hd99/jupyter-book
|
4d5b474e6f2b80c4d1d206e4554740ff82a344dc
|
[
"BSD-3-Clause"
] | 360
|
2020-04-29T14:44:49.000Z
|
2022-03-31T09:26:23.000Z
|
import os
import shutil
from pathlib import Path
import pytest
import sphinx
from bs4 import BeautifulSoup
from click.testing import CliRunner
from TexSoup import TexSoup
from jupyter_book.cli.main import build
path_tests = Path(__file__).parent.resolve()
path_books = path_tests.joinpath("books")
path_root = path_tests.parent
SPHINX_VERSION = f".sphinx{sphinx.version_info[0]}"
def test_toc_startwithlist(cli: CliRunner, temp_with_override, file_regression):
"""Testing a basic _toc.yml for tableofcontents directive"""
path_output = temp_with_override.joinpath("mybook").absolute()
# Regular TOC should work
p_toc = path_books.joinpath("toc")
path_toc = p_toc.joinpath("_toc_startwithlist.yml")
result = cli.invoke(
build,
[
p_toc.as_posix(),
"--path-output",
path_output.as_posix(),
"--toc",
path_toc.as_posix(),
"-W",
],
)
# print(result.output)
assert result.exit_code == 0
path_toc_directive = path_output.joinpath("_build", "html", "index.html")
# print(path_toc_directive.read_text(encoding="utf8"))
# get the tableofcontents markup
soup = BeautifulSoup(path_toc_directive.read_text(encoding="utf8"), "html.parser")
toc = soup.find_all("div", class_="toctree-wrapper")
assert len(toc) == 1
file_regression.check(toc[0].prettify(), extension=".html", encoding="utf8")
def test_toc_parts(cli: CliRunner, temp_with_override, file_regression):
"""Testing `header` in _toc.yml"""
path_input = temp_with_override.joinpath("mybook_input").absolute()
path_output = temp_with_override.joinpath("mybook").absolute()
# Regular TOC should work
p_toc = path_books.joinpath("toc")
shutil.copytree(p_toc, path_input)
# setup correct files
(path_input / "subfolder" / "asubpage.md").unlink()
for i in range(4):
(path_input / "subfolder" / f"asubpage{i+1}.md").write_text(
f"# A subpage {i+1}\n", encoding="utf8"
)
path_toc = path_input.joinpath("_toc_parts.yml")
result = cli.invoke(
build,
[
path_input.as_posix(),
"--path-output",
path_output.as_posix(),
"--toc",
path_toc.as_posix(),
"-W",
],
)
# print(result.output)
assert result.exit_code == 0
path_index = path_output.joinpath("_build", "html", "index.html")
# get the tableofcontents markup
soup = BeautifulSoup(path_index.read_text(encoding="utf8"), "html.parser")
toc = soup.find_all("div", class_="toctree-wrapper")
assert len(toc) == 2
file_regression.check(
toc[0].prettify(),
basename="test_toc_parts_directive",
extension=f"{SPHINX_VERSION}.html",
encoding="utf8",
)
# check the sidebar structure is correct
file_regression.check(
soup.select(".bd-links")[0].prettify(),
basename="test_toc_parts_sidebar",
extension=f"{SPHINX_VERSION}.html",
encoding="utf8",
)
@pytest.mark.skipif(
os.name == "nt",
reason="Theme error writing content1: "
"filename, directory name, or volume label syntax is incorrect",
)
def test_toc_urllink(cli: CliRunner, temp_with_override, file_regression):
"""Testing with additional `url` link key in _toc.yml"""
path_output = temp_with_override.joinpath("mybook").absolute()
# Regular TOC should work
p_toc = path_books.joinpath("toc")
path_toc = p_toc.joinpath("_toc_urllink.yml")
result = cli.invoke(
build,
[
p_toc.as_posix(),
"--path-output",
path_output.as_posix(),
"--toc",
path_toc.as_posix(),
],
)
print(result.output)
assert result.exit_code == 0
path_toc_directive = path_output.joinpath("_build", "html", "index.html")
# get the tableofcontents markup
soup = BeautifulSoup(path_toc_directive.read_text(encoding="utf8"), "html.parser")
toc = soup.find_all("div", class_="toctree-wrapper")
assert len(toc) == 1
file_regression.check(toc[0].prettify(), extension=".html", encoding="utf8")
@pytest.mark.requires_tex
def test_toc_latex_parts(cli: CliRunner, temp_with_override, file_regression):
"""Testing LaTex output"""
path_input = temp_with_override.joinpath("mybook_input").absolute()
path_output = temp_with_override.joinpath("mybook").absolute()
# Regular TOC should work
p_toc = path_books.joinpath("toc")
shutil.copytree(p_toc, path_input)
# setup correct files
(path_input / "subfolder" / "asubpage.md").unlink()
for i in range(4):
(path_input / "subfolder" / f"asubpage{i+1}.md").write_text(
f"# A subpage {i+1}\n", encoding="utf8"
)
path_toc = path_input.joinpath("_toc_parts.yml")
result = cli.invoke(
build,
[
path_input.as_posix(),
"--path-output",
path_output.as_posix(),
"--toc",
path_toc.as_posix(),
"--builder",
"pdflatex",
"-W",
],
)
assert result.exit_code == 0, result.output
# reading the tex file
path_output_file = path_output.joinpath("_build", "latex", "python.tex")
file_content = TexSoup(path_output_file.read_text())
file_regression.check(str(file_content.document), extension=".tex", encoding="utf8")
@pytest.mark.requires_tex
def test_toc_latex_urllink(cli: CliRunner, temp_with_override, file_regression):
"""Testing LaTex output"""
path_output = temp_with_override.joinpath("mybook").absolute()
# Regular TOC should work
p_toc = path_books.joinpath("toc")
path_toc = p_toc.joinpath("_toc_urllink.yml")
result = cli.invoke(
build,
[
p_toc.as_posix(),
"--path-output",
path_output.as_posix(),
"--toc",
path_toc.as_posix(),
"--builder",
"pdflatex",
],
)
assert result.exit_code == 0, result.output
# reading the tex file
path_output_file = path_output.joinpath("_build", "latex", "python.tex")
file_content = TexSoup(path_output_file.read_text())
file_regression.check(str(file_content.document), extension=".tex", encoding="utf8")
| 32.403061
| 88
| 0.636593
|
d6ae6c9571c6389e45ea0d359b35250b90312c03
| 1,248
|
py
|
Python
|
qutip-doc/guide/scripts/floquet_ex1.py
|
quantshah/quantshah.github.io
|
d32f33f4090cd356671950701dd3cb58798bf9bf
|
[
"MIT"
] | null | null | null |
qutip-doc/guide/scripts/floquet_ex1.py
|
quantshah/quantshah.github.io
|
d32f33f4090cd356671950701dd3cb58798bf9bf
|
[
"MIT"
] | null | null | null |
qutip-doc/guide/scripts/floquet_ex1.py
|
quantshah/quantshah.github.io
|
d32f33f4090cd356671950701dd3cb58798bf9bf
|
[
"MIT"
] | null | null | null |
from qutip import *
from scipy import *
delta = 0.2 * 2*pi; eps0 = 1.0 * 2*pi
A = 0.5 * 2*pi; omega = 1.0 * 2*pi
T = (2*pi)/omega
tlist = linspace(0.0, 10 * T, 101)
psi0 = basis(2,0)
H0 = - delta/2.0 * sigmax() - eps0/2.0 * sigmaz()
H1 = A/2.0 * sigmaz()
args = {'w': omega}
H = [H0, [H1, lambda t,args: sin(args['w'] * t)]]
# find the floquet modes for the time-dependent hamiltonian
f_modes_0,f_energies = floquet_modes(H, T, args)
# decompose the inital state in the floquet modes
f_coeff = floquet_state_decomposition(f_modes_0, f_energies, psi0)
# calculate the wavefunctions using the from the floquet modes
p_ex = zeros(len(tlist))
for n, t in enumerate(tlist):
psi_t = floquet_wavefunction_t(f_modes_0, f_energies, f_coeff, t, H, T, args)
p_ex[n] = expect(num(2), psi_t)
# For reference: calculate the same thing with mesolve
p_ex_ref = mesolve(H, psi0, tlist, [], [num(2)], args).expect[0]
# plot the results
from pylab import *
plot(tlist, real(p_ex), 'ro', tlist, 1-real(p_ex), 'bo')
plot(tlist, real(p_ex_ref), 'r', tlist, 1-real(p_ex_ref), 'b')
xlabel('Time')
ylabel('Occupation probability')
legend(("Floquet $P_1$", "Floquet $P_0$", "Lindblad $P_1$", "Lindblad $P_0$"))
show()
| 32.842105
| 81
| 0.64984
|
24def60c6b00ec03f01337726625b7c6cdbf1e0a
| 9,226
|
py
|
Python
|
datasets/Frey.py
|
rist-ro/argo
|
a10c33346803239db8a64c104db7f22ec4e05bef
|
[
"MIT"
] | 4
|
2020-12-07T19:13:13.000Z
|
2022-01-30T18:52:18.000Z
|
datasets/Frey.py
|
rist-ro/argo
|
a10c33346803239db8a64c104db7f22ec4e05bef
|
[
"MIT"
] | 12
|
2020-09-25T22:41:28.000Z
|
2022-02-09T23:46:34.000Z
|
datasets/Frey.py
|
rist-ro/argo
|
a10c33346803239db8a64c104db7f22ec4e05bef
|
[
"MIT"
] | 2
|
2021-03-02T18:31:04.000Z
|
2021-03-02T21:56:43.000Z
|
"""
Module for managing Frey faces dataset
"""
import numpy as np
import os.path
import urllib.request
from .ImageDataset import ImageDataset ## TODO Check wiythout point
from scipy.io import loadmat
import pdb
class Frey(ImageDataset):
"""
This class manage the dataset Frey faces, properties of the datasets are uniquely determined
by the params dictionary
It compares the parameters and complete them with the default one. It then return a unique id identifier
Parameters
---------
params : dict
dictionary that can contain
+-------------+-----------+-----------+-----------+-------------------------------------------------------------+
| params Key | values | default v | id short | Short description |
+=============+===========+===========+===========+=============================================================+
| binary | 0,1 | 0 | "-c", "-d"| load continuous or binary Frey faces |
+-------------+-----------+-----------+-----------+-------------------------------------------------------------+
| stochastic | 0,1 | 0 | "-st" | sample using Bernoulli from the continuous Frey faces after every|
| | | | | epoch during training, see IWAE and LVAE papers that claim |
| | | | | this techniques reduces overfitting; this function only |
| | | | | loads continuous Frey faces to be used later |
+-------------+-----------+-----------+-----------+-------------------------------------------------------------+
| data_dir | path str | None | | path of the dataset. In some cases cannot be set |
| | | | | (non binary mnist only) |
+-------------+-----------+-----------+-----------+-------------------------------------------------------------+
| subsampilng | integer | None |"-subsamp" | Reduce the dataset providing 1 data over `subsamping` |
| | | | | samples |
+-------------+-----------+-----------+-----------+-------------------------------------------------------------+
| clip_low | bool | None | "-clipL" | clip the dataset to a minimum value (used to avoid zero |
| | | | | gradient) (-clipLH in case of also high) |
+-------------+-----------+-----------+-----------+-------------------------------------------------------------+
| clip_high | bool | None | "-clipH" | clip the dataset to a max value |
+-------------+-----------+-----------+-----------+-------------------------------------------------------------+
| id_note | string | "" | id_note | Arbitrary string to append to the id |
+-------------+-----------+-----------+-----------+-------------------------------------------------------------+
# TODO: train/test split customizable
"""
default_params = {
'binary' : 0,
'stochastic' : 0,
'subsampling' : None,
'clip_high' :None,
'clip_low' : None,
'id_note' : None
}
def __init__(self, params):
super().__init__(params)
self._id = self.dataset_id(params)
self._binary_input = self._params['binary']
self.data_dir = "datasets/Freyfaces_data"
if not os.path.exists(self.data_dir):
os.makedirs(self.data_dir)
fileName = self.data_dir + '/frey_rawface.mat'
if not os.path.isfile(fileName):
# see http://dohmatob.github.io/research/2016/10/22/VAE.html
origin = (
'http://www.cs.nyu.edu/~roweis/data/frey_rawface.mat'
)
print('Downloading data from %s' % origin)
urllib.request.urlretrieve(origin, fileName)
if self._binary_input==0 or (self._binary_input==0 and self._params['stochastic']==1):
dtype = 'float32'
else:
dtype = 'int32'
self.img_rows = 28
self.img_cols = 20
ff = loadmat(fileName, squeeze_me=True, struct_as_record=False)
ff = ff["ff"].T.reshape((-1, self.img_rows, self.img_cols))
n_pixels = self.img_rows * self.img_cols
X_train = ff[:1600]
X_test = ff[1600:1900]
X_train = X_train.astype(dtype) / 255.
X_test = X_test.astype(dtype) / 255.
self._train_set_x = X_train.reshape((len(X_train), n_pixels))
self._test_set_x = X_test.reshape((len(X_test), n_pixels))
# choose a subset
if self._params['subsampling']:
self._train_set_x, self._train_set_y = \
self.sub_sample(self._train_set_x, self._train_set_y, self._params['subsampling'])
self._test_set_x, self._test_set_y = \
self.sub_sample(self._test_set_x, self._test_set_y, self._params['subsampling'])
#clip
clip_low = self._params['clip_low']
clip_high = self._params['clip_high']
if (clip_low is not None) or (clip_high is not None):
m = clip_low if clip_low is not None else 0
M = clip_high if clip_high is not None else 1
self._train_set_x = np.clip(self._train_set_x, a_min=m, a_max=M)
self._test_set_x = np.clip(self._test_set_x, a_min=m, a_max=M)
implemented_params_keys = ['dataName', 'binary', 'stochastic',
'position_label', 'subsampling', 'clip_high', 'clip_low',
'data_dir', 'id_note'] # all the admitted keys
@staticmethod
def dataset_id(params):
"""
This method interprets the parameters and generate an id
"""
# TODO: missing features are train/test?
Frey.check_params_impl(params)
id = 'Frey'
# binary or continuous
id_binary = {0:'-c',1:'-d'}
id += id_binary[params['binary']]
# stochastic
id += '-st' + str(params["stochastic"])
# subsampling
if params['subsampling']:
id += '-ss'+str(params['subsampling'])
# clip
# TODO The parameters of clip should be the values to which you clip
clip_high = False
if params['clip_high'] :
id += '-cH'
clip_high = True
if params['clip_low'] :
id += '-cL'
if clip_high:
id += "H"
# id note (keep last)
if params['id_note']:
id += params['id_note']
return id
@staticmethod
def sub_sample(data_set_x, data_set_y, subsampling):
"""
return a value every "subsampling"
:param data_set_x
:param data_set_y
:param subsampling: integer < dim(data_set)
:return: dataset_x, dataset_y
"""
len_train = len(data_set_x)
reshuf_index_train = np.random.permutation(len_train)
new_len_train = int(len_train / subsampling)
data_set_x = data_set_x[reshuf_index_train[:new_len_train]]
data_set_y = data_set_y[reshuf_index_train[:new_len_train]]
return data_set_x, data_set_y
@staticmethod
def class_filter(data_set_x, data_set_y, classes, position_label):
"""
return the dataset with labels in the list classes
:param data_set_x: data
:param data_set_y: labels
:param classes: list of classes
:param position_label: list of classes
:return: (dataset_x, dataset_y) with filtered elemnts not in classes
"""
ix_mtch_class_train = np.in1d(data_set_y, classes)
data_set_x = data_set_x[ix_mtch_class_train]
data_set_y = data_set_y[ix_mtch_class_train]
if position_label:
def replace_with_position(label_set, classes):
label_set_new = np.copy(label_set)
for ix, class_ in enumerate(classes): label_set_new[label_set == class_] = ix
return label_set_new
data_set_y = replace_with_position(data_set_y, classes)
return data_set_x, data_set_y
'''
def get_data_dict(self):
if not self._binary_input or (self.params['binary'] and not self.params['stochastic']):
ds["train_set_y"] = self._train_set_y
ds["test_set_y"] = self._test_set_y
return ds
'''
@property
def input_size(self):
return self.img_rows*self.img_cols
@property
def output_size(self):
pass
@property
def color_images(self):
return 0
@property
def image_shape(self):
return (self.img_rows,self.img_cols,1) # 1 is the number of channels
| 39.09322
| 127
| 0.483525
|
96f3d3c26446d1cb4e5c8d751818e51692e17317
| 2,840
|
py
|
Python
|
silver/models/transactions/codes.py
|
DocTocToc/silver
|
f1b4a8871fc4a37c8813d3c010bc70dc59c0a6e5
|
[
"Apache-2.0"
] | 222
|
2017-01-15T10:30:57.000Z
|
2022-03-08T20:34:46.000Z
|
silver/models/transactions/codes.py
|
DocTocToc/silver
|
f1b4a8871fc4a37c8813d3c010bc70dc59c0a6e5
|
[
"Apache-2.0"
] | 141
|
2017-01-11T10:56:49.000Z
|
2021-10-12T11:51:00.000Z
|
silver/models/transactions/codes.py
|
DocTocToc/silver
|
f1b4a8871fc4a37c8813d3c010bc70dc59c0a6e5
|
[
"Apache-2.0"
] | 76
|
2017-01-10T13:50:27.000Z
|
2022-03-25T21:37:00.000Z
|
# Copyright (c) 2017 Presslabs SRL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DEFAULT_FAIL_CODE = 'default'
FAIL_CODES = {
DEFAULT_FAIL_CODE: {
'message': 'The transaction has failed.'
},
'insufficient_funds': {
'message': 'Your payment method doesn\'t have sufficient funds.',
'solve_message': 'Add more funds to your payment method or use another payment method.'
},
'expired_payment_method': {
'message': 'Your payment method has expired.',
'solve_message': 'Renew your payment method or use another one.'
},
'expired_card': {
'message': 'Your credit card has expired.',
'solve_message': 'Renew your credit card or use another payment method.'
},
'invalid_payment_method': {
'message': 'The provided payment method is not valid.',
'solve_message': 'Make sure you entered your credentials correctly.'
},
'invalid_card': {
'message': 'The provided credit card is not valid.',
'solve_message': 'Make sure you entered your credentials correctly.'
},
'limit_exceeded': {
'message': 'The attempted transaction exceeds the withdrawal limit of '
'the payment method.',
'solve_message': 'Raise your payment method\'s limit or use another one.'
},
'transaction_declined': {
'message': 'The tranasction has been declined by the payment processor.',
'solve_message': 'Use another payment method or try again later.'
},
'transaction_declined_by_bank': {
'message': 'Your bank has declined the transaction.',
'solve_message': 'Contact your bank or try again later.'
},
'transaction_hard_declined': {
'message': 'The tranasction has been declined by the payment processor.',
'solve_message': 'Use another payment method.'
},
'transaction_hard_declined_by_bank': {
'message': 'Your bank has declined the transaction.',
'solve_message': 'Contact your bank or use another payment method.'
}
}
DEFAULT_REFUND_CODE = 'default'
REFUND_CODES = {
DEFAULT_REFUND_CODE: {
'message': 'The transaction has been refunded.'
},
}
DEFAULT_CANCEL_CODE = 'default'
CANCEL_CODES = {
DEFAULT_CANCEL_CODE: {
'message': 'The transaction has been canceled.'
}
}
| 36.883117
| 95
| 0.669014
|
405888c552be25abc64027e896c8e0c698577202
| 8,459
|
py
|
Python
|
tests/integration/test_lost_part/test.py
|
chalice19/ClickHouse
|
2f38e7bc5c2113935ab86260439bb543a1737291
|
[
"Apache-2.0"
] | 8,629
|
2016-06-14T21:03:01.000Z
|
2019-09-23T07:46:38.000Z
|
tests/integration/test_lost_part/test.py
|
chalice19/ClickHouse
|
2f38e7bc5c2113935ab86260439bb543a1737291
|
[
"Apache-2.0"
] | 4,335
|
2016-06-15T12:58:31.000Z
|
2019-09-23T11:18:43.000Z
|
tests/integration/test_lost_part/test.py
|
chalice19/ClickHouse
|
2f38e7bc5c2113935ab86260439bb543a1737291
|
[
"Apache-2.0"
] | 1,700
|
2016-06-15T09:25:11.000Z
|
2019-09-23T11:16:38.000Z
|
#!/usr/bin/env python3
import pytest
import time
import ast
import random
from helpers.cluster import ClickHouseCluster
from helpers.test_tools import assert_eq_with_retry
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance("node1", with_zookeeper=True)
node2 = cluster.add_instance("node2", with_zookeeper=True)
@pytest.fixture(scope="module")
def start_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def remove_part_from_disk(node, table, part_name):
part_path = node.query(
"SELECT path FROM system.parts WHERE table = '{}' and name = '{}'".format(
table, part_name
)
).strip()
if not part_path:
raise Exception("Part " + part_name + "doesn't exist")
node.exec_in_container(
["bash", "-c", "rm -r {p}/*".format(p=part_path)], privileged=True
)
def test_lost_part_same_replica(start_cluster):
for node in [node1, node2]:
node.query(
"CREATE TABLE mt0 (id UInt64, date Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/t', '{}') ORDER BY tuple() PARTITION BY date".format(
node.name
)
)
node1.query("SYSTEM STOP MERGES mt0")
node2.query("SYSTEM STOP REPLICATION QUEUES")
for i in range(5):
node1.query("INSERT INTO mt0 VALUES ({}, toDate('2020-10-01'))".format(i))
for i in range(20):
parts_to_merge = node1.query(
"SELECT parts_to_merge FROM system.replication_queue"
)
if parts_to_merge:
parts_list = list(sorted(ast.literal_eval(parts_to_merge)))
print("Got parts list", parts_list)
if len(parts_list) < 3:
raise Exception("Got too small parts list {}".format(parts_list))
break
time.sleep(1)
victim_part_from_the_middle = random.choice(parts_list[1:-1])
print("Will corrupt part", victim_part_from_the_middle)
remove_part_from_disk(node1, "mt0", victim_part_from_the_middle)
node1.query("DETACH TABLE mt0")
node1.query("ATTACH TABLE mt0")
node1.query("SYSTEM START MERGES mt0")
for i in range(10):
result = node1.query("SELECT count() FROM system.replication_queue")
if int(result) == 0:
break
time.sleep(1)
else:
assert False, "Still have something in replication queue:\n" + node1.query(
"SELECT count() FROM system.replication_queue FORMAT Vertical"
)
assert node1.contains_in_log(
"Created empty part"
), "Seems like empty part {} is not created or log message changed".format(
victim_part_from_the_middle
)
assert node1.query("SELECT COUNT() FROM mt0") == "4\n"
node2.query("SYSTEM START REPLICATION QUEUES")
assert_eq_with_retry(node2, "SELECT COUNT() FROM mt0", "4")
assert_eq_with_retry(node2, "SELECT COUNT() FROM system.replication_queue", "0")
def test_lost_part_other_replica(start_cluster):
for node in [node1, node2]:
node.query(
"CREATE TABLE mt1 (id UInt64) ENGINE ReplicatedMergeTree('/clickhouse/tables/t1', '{}') ORDER BY tuple()".format(
node.name
)
)
node1.query("SYSTEM STOP MERGES mt1")
node2.query("SYSTEM STOP REPLICATION QUEUES")
for i in range(5):
node1.query("INSERT INTO mt1 VALUES ({})".format(i))
for i in range(20):
parts_to_merge = node1.query(
"SELECT parts_to_merge FROM system.replication_queue"
)
if parts_to_merge:
parts_list = list(sorted(ast.literal_eval(parts_to_merge)))
print("Got parts list", parts_list)
if len(parts_list) < 3:
raise Exception("Got too small parts list {}".format(parts_list))
break
time.sleep(1)
victim_part_from_the_middle = random.choice(parts_list[1:-1])
print("Will corrupt part", victim_part_from_the_middle)
remove_part_from_disk(node1, "mt1", victim_part_from_the_middle)
# other way to detect broken parts
node1.query("CHECK TABLE mt1")
node2.query("SYSTEM START REPLICATION QUEUES")
for i in range(10):
result = node2.query("SELECT count() FROM system.replication_queue")
if int(result) == 0:
break
time.sleep(1)
else:
assert False, "Still have something in replication queue:\n" + node2.query(
"SELECT * FROM system.replication_queue FORMAT Vertical"
)
assert node1.contains_in_log(
"Created empty part"
), "Seems like empty part {} is not created or log message changed".format(
victim_part_from_the_middle
)
assert_eq_with_retry(node2, "SELECT COUNT() FROM mt1", "4")
assert_eq_with_retry(node2, "SELECT COUNT() FROM system.replication_queue", "0")
node1.query("SYSTEM START MERGES mt1")
assert_eq_with_retry(node1, "SELECT COUNT() FROM mt1", "4")
assert_eq_with_retry(node1, "SELECT COUNT() FROM system.replication_queue", "0")
def test_lost_part_mutation(start_cluster):
for node in [node1, node2]:
node.query(
"CREATE TABLE mt2 (id UInt64) ENGINE ReplicatedMergeTree('/clickhouse/tables/t2', '{}') ORDER BY tuple()".format(
node.name
)
)
node1.query("SYSTEM STOP MERGES mt2")
node2.query("SYSTEM STOP REPLICATION QUEUES")
for i in range(2):
node1.query("INSERT INTO mt2 VALUES ({})".format(i))
node1.query(
"ALTER TABLE mt2 UPDATE id = 777 WHERE 1", settings={"mutations_sync": "0"}
)
for i in range(20):
parts_to_mutate = node1.query("SELECT count() FROM system.replication_queue")
# two mutations for both replicas
if int(parts_to_mutate) == 4:
break
time.sleep(1)
remove_part_from_disk(node1, "mt2", "all_1_1_0")
# other way to detect broken parts
node1.query("CHECK TABLE mt2")
node1.query("SYSTEM START MERGES mt2")
for i in range(10):
result = node1.query("SELECT count() FROM system.replication_queue")
if int(result) == 0:
break
time.sleep(1)
else:
assert False, "Still have something in replication queue:\n" + node1.query(
"SELECT * FROM system.replication_queue FORMAT Vertical"
)
assert_eq_with_retry(node1, "SELECT COUNT() FROM mt2", "1")
assert_eq_with_retry(node1, "SELECT SUM(id) FROM mt2", "777")
assert_eq_with_retry(node1, "SELECT COUNT() FROM system.replication_queue", "0")
node2.query("SYSTEM START REPLICATION QUEUES")
assert_eq_with_retry(node2, "SELECT COUNT() FROM mt2", "1")
assert_eq_with_retry(node2, "SELECT SUM(id) FROM mt2", "777")
assert_eq_with_retry(node2, "SELECT COUNT() FROM system.replication_queue", "0")
def test_lost_last_part(start_cluster):
for node in [node1, node2]:
node.query(
"CREATE TABLE mt3 (id UInt64, p String) ENGINE ReplicatedMergeTree('/clickhouse/tables/t3', '{}') "
"ORDER BY tuple() PARTITION BY p".format(node.name)
)
node1.query("SYSTEM STOP MERGES mt3")
node2.query("SYSTEM STOP REPLICATION QUEUES")
for i in range(1):
node1.query("INSERT INTO mt3 VALUES ({}, 'x')".format(i))
# actually not important
node1.query(
"ALTER TABLE mt3 UPDATE id = 777 WHERE 1", settings={"mutations_sync": "0"}
)
partition_id = node1.query("select partitionId('x')").strip()
remove_part_from_disk(node1, "mt3", "{}_0_0_0".format(partition_id))
# other way to detect broken parts
node1.query("CHECK TABLE mt3")
node1.query("SYSTEM START MERGES mt3")
for i in range(10):
result = node1.query("SELECT count() FROM system.replication_queue")
assert int(result) <= 1, "Have a lot of entries in queue {}".format(
node1.query("SELECT * FROM system.replication_queue FORMAT Vertical")
)
if node1.contains_in_log("Cannot create empty part") and node1.contains_in_log(
"DROP/DETACH PARTITION"
):
break
time.sleep(1)
else:
assert False, "Don't have required messages in node1 log"
node1.query("ALTER TABLE mt3 DROP PARTITION ID '{}'".format(partition_id))
assert_eq_with_retry(node1, "SELECT COUNT() FROM mt3", "0")
assert_eq_with_retry(node1, "SELECT COUNT() FROM system.replication_queue", "0")
| 32.786822
| 153
| 0.642866
|
d791c148d21ea31024a77e6c77d768a1b716bcea
| 22,369
|
py
|
Python
|
optable_submission/optable_package/optable/dataset/table.py
|
pfnet-research/KDD-Cup-AutoML-5
|
54202eb6aa414316a70faa8e07a68e1c8ca7bd1b
|
[
"MIT"
] | 18
|
2019-07-22T06:35:37.000Z
|
2021-03-20T08:37:56.000Z
|
optable_submission/optable_package/optable/dataset/table.py
|
pfnet-research/KDD-Cup-AutoML-5
|
54202eb6aa414316a70faa8e07a68e1c8ca7bd1b
|
[
"MIT"
] | 1
|
2020-03-22T21:06:57.000Z
|
2020-03-22T21:06:57.000Z
|
optable_submission/optable_package/optable/dataset/table.py
|
pfnet-research/KDD-Cup-AutoML-5
|
54202eb6aa414316a70faa8e07a68e1c8ca7bd1b
|
[
"MIT"
] | 11
|
2019-07-23T04:06:08.000Z
|
2020-05-12T08:44:01.000Z
|
import collections
import threading
import gc
import traceback
import pandas as pd
import numpy as np
from optable.dataset import feature_types
from optable import _core
class Table(object):
"""avalble for only automl data frame
"""
def __init__(self, df, time_col=None, label_encoders={}, min_time=None):
self.__df = df
self.__time_col = time_col
self.__min_time = min_time
self.__cache = {}
self.__pseudo_target = None
self.__adversarial_true_count = None
self.__adversarial_total_count = None
self.__new_data = {}
if self.__time_col is not None:
time_data = self.__df[self.__time_col]
time_data.index = range(len(time_data))
if min_time is None:
raise ValueError("min_time is None")
time_data = time_data - min_time
time_data = time_data.astype(int).values
time_data = time_data / 1e9
second_time_data = time_data.astype(int)
minute_time_data = second_time_data // 60
hour_time_data = minute_time_data // 60
day_time_data = hour_time_data // 24
second_time_data = second_time_data.astype(np.float32)
minute_time_data = minute_time_data.astype(np.float32)
hour_time_data = hour_time_data.astype(np.float32)
day_time_data = day_time_data.astype(np.float32)
time_data = time_data.astype(np.float32)
"""
time_data[time_data < 0] = np.nan
second_time_data[second_time_data < 0] = np.nan
minute_time_data[minute_time_data < 0] = np.nan
hour_time_data[hour_time_data < 0] = np.nan
day_time_data[day_time_data < 0] = np.nan
"""
self.__time_data = time_data
self.__second_time_data = second_time_data
self.__minute_time_data = minute_time_data
self.__hour_time_data = hour_time_data
self.__day_time_data = day_time_data
self.__sorted_time_index = \
np.argsort(time_data).astype(np.int32)
else:
self.__sorted_time_index = None
self.__hist_time_data = None
self.__ftypes = pd.Series(
self.__automl_df_to_ftypes(), self.__df.dtypes.index)
self.__label_encoders = label_encoders
self.__tfidf_vectorizers = {}
self.__preprocess()
self.__ftypes = pd.Series(
self.__automl_df_to_ftypes(), self.__df.dtypes.index)
self.__nunique = pd.Series(
[self.__df[col].nunique() for col in self.__df],
self.__df.dtypes.index)
self.__set_new_data_lock = threading.Lock()
@property
def ftypes(self):
return self.__ftypes
@property
def df(self):
return self.__df
@property
def sorted_time_index(self):
return self.__sorted_time_index
@property
def time_data(self):
return self.__time_data
@property
def second_time_data(self):
return self.__second_time_data
@property
def minute_time_data(self):
return self.__minute_time_data
@property
def hour_time_data(self):
return self.__hour_time_data
@property
def day_time_data(self):
return self.__day_time_data
@property
def has_time(self):
if self.__time_col is None:
return False
return True
def get_lightgbm_df(self, max_cat_nunique=30):
columns = []
col_idx = []
cat_idx = []
idx = 0
lightgbm_feature_types = [
feature_types.numerical,
feature_types.categorical,
feature_types.mc_processed_numerical,
feature_types.c_processed_numerical,
feature_types.t_processed_numerical,
feature_types.n_processed_categorical,
feature_types.mc_processed_categorical,
feature_types.c_processed_categorical,
feature_types.t_processed_categorical,
feature_types.aggregate_processed_numerical,
feature_types.aggregate_processed_categorical
]
cat_feature_types = [
feature_types.categorical,
feature_types.aggregate_processed_categorical,
feature_types.n_processed_categorical,
feature_types.mc_processed_categorical,
feature_types.c_processed_categorical,
feature_types.t_processed_categorical,
]
for col_i, col in enumerate(self.__df.columns):
for ftype in lightgbm_feature_types:
if col.startswith(ftype.prefix):
if ftype in cat_feature_types:
if self.__nunique[col] <= max_cat_nunique:
cat_idx.append(idx)
columns.append(col)
col_idx.append(col_i)
idx += 1
else:
columns.append(col)
col_idx.append(col_i)
idx += 1
break
return self.__df.take(col_idx, axis=1, is_copy=False), cat_idx
def set_ftypes(self, ftypes):
if isinstance(ftypes, list):
self.__ftypes[:] = ftypes
elif isinstance(ftypes, dict):
for k in ftypes:
self.__ftypes[k] = ftypes[k]
@property
def nunique(self):
return self.__nunique
def set_new_data(self, data, name):
self.__set_new_data_lock.acquire()
if name in self.__df.columns or name in self.__new_data:
print("duplicated", name)
try:
self.__new_data[name] = data
except Exception as e:
print(name)
traceback.print_exc()
finally:
self.__set_new_data_lock.release()
@property
def new_data_size(self):
return len(self.__new_data)
def get_new_data(self):
cat_feature_types = [
feature_types.categorical,
feature_types.aggregate_processed_categorical,
feature_types.n_processed_categorical,
feature_types.mc_processed_categorical,
feature_types.c_processed_categorical,
feature_types.t_processed_categorical,
]
is_cat = [
feature_types.column_name_to_ftype(key)
in cat_feature_types for key in self.__new_data]
return [self.__new_data[key] for key in self.__new_data], is_cat
def clear_new_data(self):
self.__new_data = {}
def confirm_new_data(self):
new_df = pd.DataFrame(self.__new_data)
for name in self.__new_data:
prefix = "{}_".format(name.split("_")[0])
self.__ftypes[name] = feature_types.prefix_to_ftype[prefix]
self.__nunique[name] = new_df[name].nunique()
self.__new_data = {}
gc.collect()
self.__df = pd.concat([self.__df, new_df], axis=1)
gc.collect()
def test_concat(self, test_df):
pass
def __preprocess(self):
cols_of_each_ftype = self.cols_of_each_ftype
# numericalでnuniqueが低いものはcategoricalに
"""
if len(self.__df) > 1000:
columns = self.__df.columns
for col in columns:
if self.__ftypes[col] == feature_types.numerical:
if self.__df[col].nunique() <= 10:
self.__df["{}{}".format(
feature_types.categorical.prefix, col,
)] = self.__df[col].astype(str)
self.__df.drop(col, axis=1, inplace=True)
print("numerical {} change to categorical".format(col))
self.__ftypes = pd.Series(
self.__automl_df_to_ftypes(), self.__df.dtypes.index)
"""
import time
new_data = {}
columns = self.__df.columns
for col in columns:
start = time.time()
if self.__ftypes[col] == feature_types.time:
# Time preprocess
self.__df[col] = pd.to_datetime(self.__df[col])
"""
# time numericalize
if self.__min_time is not None:
self.__df["{}numericalized_{}".format(
feature_types.t_processed_numerical.prefix, col,
)] = ((self.__df[col] - self.__min_time).astype(int)
/ 1e9).astype(np.float32)
else:
self.__df["{}numericalized_{}".format(
feature_types.t_processed_numerical.prefix, col,
)] = (self.__df[col].astype(int)
/ 1e9).astype(np.float32)
"""
max_min_time_diff = self.__df[col].max() - self.__df[col].min()
# time hour
if max_min_time_diff > pd.Timedelta('2 hours'):
new_data["{}hour_{}".format(
feature_types.t_processed_numerical.prefix, col,
)] = self.__df[col].dt.hour.values.astype(np.float32)
# time year
if max_min_time_diff > pd.Timedelta('500 days'):
new_data["{}year_{}".format(
feature_types.t_processed_numerical.prefix, col,
)] = self.__df[col].dt.year.values.astype(np.float32)
# time doy
if max_min_time_diff > pd.Timedelta('100 days'):
new_data["{}doy_{}".format(
feature_types.t_processed_numerical.prefix, col,
)] = self.__df[col].dt.dayofyear.values.astype(np.float32)
# time dow
if max_min_time_diff > pd.Timedelta('2 days'):
new_data["{}dow_{}".format(
feature_types.t_processed_numerical.prefix, col,
)] = self.__df[col].dt.dayofweek.values.astype(np.float32)
# weekend
if max_min_time_diff > pd.Timedelta('2 days'):
new_data["{}id_weekend_{}".format(
feature_types.t_processed_categorical.prefix, col,
)] = (self.__df[col].dt.dayofweek >= 5).astype(np.int32)
# time zone
if max_min_time_diff > pd.Timedelta('8 hours'):
new_data["{}time_zone_{}".format(
feature_types.t_processed_categorical.prefix, col,
)] = (self.__df[col].dt.hour.values // 6).astype(np.int32)
self.__df[col] = (
(self.__df[col] - self.__min_time).astype(
int) / 1e9).astype(np.float32)
elif self.__ftypes[col] == feature_types.categorical:
# categorical preprocess
processing_data = \
self.__df[col].fillna("").values
categorical_manager = \
_core.CategoricalManager(processing_data)
self.set_cache(
("categorical_manager", col),
categorical_manager
)
if col in self.__label_encoders:
self.__df[col] = self.__label_encoders[col].transform(
processing_data
).astype(np.int32)
else:
self.__df[col] = categorical_manager.label()
# frequency encoding
new_data["{}frequency_{}".format(
feature_types.c_processed_numerical.prefix, col
)] = categorical_manager.frequency()
if self.has_time:
# processing_data = self.__df[col].values
"""
new_data["{}neighbor_nunique_{}".format(
feature_types.c_processed_numerical.prefix, col
)] = _core.not_temporal_to_many_aggregate(
np.roll(processing_data, -1),
processing_data, processing_data, 'nunique') \
/ _core.not_temporal_to_many_aggregate(
np.ones_like(processing_data),
processing_data, processing_data, 'sum')
new_data["{}time_variance_{}".format(
feature_types.c_processed_numerical.prefix, col
)] = _core.not_temporal_to_many_aggregate(
np.arange(len(processing_data)),
processing_data, processing_data, 'variance')
"""
"""
new_data["{}neighbor_count_{}".format(
feature_types.c_processed_numerical.prefix, col
)] = categorical_manager.sequential_count_encoding(
self.__sorted_time_index,
len(self.__df) // 30)
"""
if categorical_manager.has_null:
new_data["{}_is_null_{}".format(
feature_types.c_processed_categorical.prefix, col
)] = categorical_manager.is_null()
elif self.__ftypes[col] == feature_types.multi_categorical:
# multi categorical preprocess
processing_data = \
self.__df[col].fillna("").values
multi_categorical_manager = \
_core.MultiCategoricalManager(processing_data)
self.set_cache(
("multi_categorical_manager", col),
multi_categorical_manager
)
counter = collections.Counter(processing_data)
if np.median([value for key, value
in counter.most_common()]) > 1:
self.set_cache(
("substance_categorical", col),
True
)
categorical_manager = \
_core.CategoricalManager(processing_data)
self.set_cache(
("categorical_manager", col),
categorical_manager
)
# frequency encoding
"""
self.__df["{}frequency_{}".format(
feature_types.c_processed_numerical.prefix, col
)] = categorical_manager.frequency()
"""
else:
self.set_cache(
("substance_categorical", col),
False
)
# length
# nunique
# duplicated
length = multi_categorical_manager.length()
nunique = multi_categorical_manager.nunique()
# duplicated = length - nunique
duplicated = multi_categorical_manager.duplicates()
new_data["{}length_{}".format(
feature_types.mc_processed_numerical.prefix, col
)] = length
new_data["{}nunique_{}".format(
feature_types.mc_processed_numerical.prefix, col
)] = nunique
new_data["{}duplicated_{}".format(
feature_types.mc_processed_numerical.prefix, col
)] = duplicated
# max_count
# min_count
new_data["{}max_count_{}".format(
feature_types.mc_processed_numerical.prefix, col
)] = multi_categorical_manager.max_count()
new_data["{}min_count_{}".format(
feature_types.mc_processed_numerical.prefix, col
)] = multi_categorical_manager.min_count()
# mode
new_data["{}mode_{}".format(
feature_types.mc_processed_categorical.prefix, col
)] = multi_categorical_manager.mode().astype(int)
# max_tfidf_words
"""
new_data["{}max_tfidf_words_{}".format(
feature_types.mc_processed_categorical.prefix, col
)] = multi_categorical_manager.max_tfidf_words().astype(int)
"""
# hashed tf-idf
"""
multi_categorical_manager.calculate_hashed_tfidf(10)
for vectorized_idx in range(10):
self.__df["{}hashed_tfidf_{}_{}".format(
feature_types.mc_processed_numerical.prefix, col,
vectorized_idx,
)] = multi_categorical_manager.get_hashed_tfidf(
vectorized_idx)
"""
# tf-idf vectorize
"""
for vectorized_idx in range(10):
new_data["{}tfidf_{}_{}".format(
feature_types.mc_processed_numerical.prefix, col,
vectorized_idx,
)] = multi_categorical_manager.tfidf(vectorized_idx)
"""
for vectorized_idx in range(10):
new_data["{}count_{}_{}".format(
feature_types.mc_processed_numerical.prefix, col,
vectorized_idx,
)] = multi_categorical_manager.count(vectorized_idx)
# svd
"""
svd_values = \
multi_categorical_manager.truncated_svd(10, False, False)
"""
"""
tfidf_values = multi_categorical_manager.get_tfidf_matrix()
from sklearn.decomposition import TruncatedSVD
svd_values = TruncatedSVD(
n_components=10, random_state=10, algorithm='arpack',
n_iter=5).fit_transform(tfidf_values)
"""
"""
for svd_idx in range(10):
new_data["{}svd_{}_{}".format(
feature_types.mc_processed_numerical.prefix, col,
svd_idx,
)] = svd_values[:, svd_idx]
"""
self.__df.drop(col, axis=1, inplace=True)
del processing_data
self.__df[col] = ""
gc.collect()
elif self.__ftypes[col] == feature_types.numerical:
# numerical preprocess
if pd.isnull(self.__df[col]).all():
continue
if (
len(np.unique(self.__df[col].values[
np.isfinite(self.__df[col].values)]
)) == 1
):
self.__df.drop(col, axis=1, inplace=True)
continue
"""
mode, mode_count = \
collections.Counter(
self.__df[col].values[
np.isfinite(self.__df[col].values)]
).most_common(1)[0]
mode_freq = mode_count / len(self.__df)
if mode_freq >= 1:
self.__df.drop(col, axis=1, inplace=True)
continue
if mode_freq > 0.1:
new_data["{}_is_mode_{}".format(
feature_types.n_processed_categorical.prefix, col
)] = (self.__df[col].values == mode).astype(np.int32)
"""
if pd.isnull(self.__df[col]).any():
new_data["{}_is_null_{}".format(
feature_types.n_processed_categorical.prefix, col
)] = pd.isnull(self.__df[col]).astype(np.int32)
self.__df[col] = self.__df[col].astype(np.float32)
print(col, time.time() - start)
new_data = pd.DataFrame(new_data)
self.__df = pd.concat([self.__df, new_data], axis=1)
def __automl_df_to_ftypes(self):
ftypes = {}
for col in self.__df.columns:
prefix = "{}_".format(col.split("_")[0])
ftypes[col] = feature_types.prefix_to_ftype[prefix]
return ftypes
@property
def cols_of_each_ftype(self):
cols_of_each_ftype = {ftype: [] for ftype in feature_types.ftypes}
for col in self.__df:
cols_of_each_ftype[self.__ftypes[col]].append(col)
return cols_of_each_ftype
def has_cache(self, key):
return key in self.__cache
def get_cache(self, key):
if self.has_cache(key):
return self.__cache[key]
else:
return None
def set_cache(self, key, value):
self.__cache[key] = value
@property
def cache_keys(self):
return self.__cache.keys()
def clear_cache(self):
self.__cache = {}
gc.collect()
_core.malloc_trim(0)
@property
def pseudo_target(self):
return self.__pseudo_target
@property
def has_pseudo_target(self):
return (self.__pseudo_target is not None)
def set_pseudo_target(self, pseudo_target):
self.__pseudo_target = pseudo_target
@property
def has_adversarial_count(self):
return (self.__adversarial_true_count is not None)
@property
def adversarial_true_count(self):
return self.__adversarial_true_count
@property
def adversarial_total_count(self):
return self.__adversarial_total_count
def set_adversarial_count(self, true_count, total_count):
self.__adversarial_true_count = true_count
self.__adversarial_total_count = total_count
@property
def has_hist_time_data(self):
return self.__hist_time_data is not None
@property
def hist_time_data(self):
return self.__hist_time_data
def set_interval_for_hist(self, interval):
hist_time_data = self.__time_data // interval
hist_time_data = hist_time_data.astype(np.float32)
hist_time_data[hist_time_data < 0] = np.nan
self.__hist_time_data = hist_time_data
| 38.042517
| 79
| 0.534758
|
5d9df367fff50523716450c1d4ba55a310094bb2
| 1,657
|
py
|
Python
|
buhayra/getpaths.py
|
jmigueldelgado/buhayra
|
7236be088f3c3600cfd76650e1f80e0630653fe1
|
[
"MIT"
] | 5
|
2018-04-24T20:30:50.000Z
|
2021-11-20T15:15:18.000Z
|
buhayra/getpaths.py
|
jmigueldelgado/buhayra
|
7236be088f3c3600cfd76650e1f80e0630653fe1
|
[
"MIT"
] | 50
|
2018-04-12T11:02:46.000Z
|
2021-02-05T10:22:33.000Z
|
buhayra/getpaths.py
|
jmigueldelgado/buhayra
|
7236be088f3c3600cfd76650e1f80e0630653fe1
|
[
"MIT"
] | 2
|
2018-04-06T16:05:16.000Z
|
2021-08-25T15:34:20.000Z
|
from os.path import expanduser,exists
import sys
import socket
import os
from buhayra.location import *
### add your hostname and things will run smoothly
if socket.gethostname()=='vouga':
home = {
'home' : expanduser("~"),
'scratch' : os.path.join(expanduser("~"), 'scratch')}
elif socket.gethostname()=='compute':
home = {
'home' : expanduser("~"),
'scratch' : os.path.join(expanduser("~"), 'scratch')}
elif socket.gethostname()=='ubuntuserver':
home = {
'home' : expanduser("~"),
'scratch' : 'None'}
elif socket.gethostname()=='MEKONG':
home = {
'home' : expanduser("~"),
'scratch' : os.path.join(expanduser("~"), 'scratch')}
else:
home = {
'home' : expanduser("~"),
'scratch' : '/mnt/scratch/martinsd'}
if location['region']==None:
home['proj'] =os.path.join(home['home'],'proj','buhayra')
else:
home['proj'] =os.path.join(home['home'],'proj','buhayra'+'_'+location['region'])
home['scratch'] = home['scratch']+'_'+location['region']
home['parameters'] = os.path.join(home['proj'],'buhayra','parameters')
sardir=os.path.join(home['scratch'],'s1a_scenes')
sarIn=os.path.join(sardir,'in')
sarOut=os.path.join(sardir,'out')
dirDEMs=os.path.join(home['scratch'],'dem')
edgeOut = os.path.join(home['scratch'],'edges')
polOut = os.path.join(home['scratch'],'watermasks')
procOut = os.path.join(home['scratch'],'processed_watermasks')
orbits_url = 'http://aux.sentinel1.eo.esa.int/RESORB/'
# sys.path.insert(0, home['parameters'])
if exists(os.path.join(home['proj'],'buhayra','credentials.py')):
from buhayra.credentials import *
| 29.070175
| 84
| 0.630054
|
80ce87ed5499a8b624b033430c91c0db8a3d4e99
| 1,149
|
py
|
Python
|
src/templates/v0.1.9/modules/pyldavis/scripts/zip.py
|
whatevery1says/we1s-templates
|
ce16ae4a39e3286ed7d9bf4a95bff001ac2d123e
|
[
"MIT"
] | null | null | null |
src/templates/v0.1.9/modules/pyldavis/scripts/zip.py
|
whatevery1says/we1s-templates
|
ce16ae4a39e3286ed7d9bf4a95bff001ac2d123e
|
[
"MIT"
] | null | null | null |
src/templates/v0.1.9/modules/pyldavis/scripts/zip.py
|
whatevery1says/we1s-templates
|
ce16ae4a39e3286ed7d9bf4a95bff001ac2d123e
|
[
"MIT"
] | null | null | null |
"""zip.py.
Create zip archives of one or more dfr-browsers.
Last update: 2020-07-25
"""
# Python imports
import os
import re
import shutil
from IPython.display import display, HTML
# Zip function
def zip(models=None):
"""Zip pyLDAvis visualizations to the current directory.
The `models` parameter takes a string (e.g. 'topics25') or a list (e.g. ['topics25', 'topics50']).
If left blank or set to `All` or `None`, all available models will be zipped.
"""
current_dir = os.getcwd()
if models == None or models.lower() == 'all':
models = [model for model in os.listdir(current_dir) if os.path.isdir(model) and model.startswith('topics')]
elif isinstance(models, str):
models = [models]
for model in models:
print('Zipping ' + model + '...')
source = os.path.join(current_dir, model)
temp = os.path.join(current_dir, model + '_temp')
if os.path.exists(temp):
shutil.rmtree(temp)
shutil.copytree(source, temp)
shutil.make_archive(model, 'zip', temp)
shutil.rmtree(temp)
display(HTML('<p style="color:green;">Done!</p>'))
| 31.054054
| 116
| 0.638816
|
143fe1d59c184d30bd16cb56cfe46cc191970ff8
| 33,743
|
py
|
Python
|
mmdet/models/dense_heads/fcos_reid_head_focal_sub_triqueue3.py
|
CvlabAssignment/AlignPS
|
297f4166921d2095f9381e38e04129a103069406
|
[
"Apache-2.0"
] | null | null | null |
mmdet/models/dense_heads/fcos_reid_head_focal_sub_triqueue3.py
|
CvlabAssignment/AlignPS
|
297f4166921d2095f9381e38e04129a103069406
|
[
"Apache-2.0"
] | null | null | null |
mmdet/models/dense_heads/fcos_reid_head_focal_sub_triqueue3.py
|
CvlabAssignment/AlignPS
|
297f4166921d2095f9381e38e04129a103069406
|
[
"Apache-2.0"
] | null | null | null |
import re
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import Scale, normal_init
from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init
from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms, multiclass_nms_reid
from ..builder import HEADS, build_loss
from .anchor_free_head_reid import AnchorFreeHeadReid
from .labeled_matching_layer_queue import LabeledMatchingLayerQueue
from .unlabeled_matching_layer import UnlabeledMatchingLayer
from .triplet_loss import TripletLossFilter
INF = 1e8
@HEADS.register_module()
class FCOSReidHeadFocalSubTriQueue3(AnchorFreeHeadReid):
"""Anchor-free head used in `FCOS <https://arxiv.org/abs/1904.01355>`_.
The FCOS head does not use anchor boxes. Instead bounding boxes are
predicted at each pixel and a centerness measure is used to supress
low-quality predictions.
Here norm_on_bbox, centerness_on_reg, dcn_on_last_conv are training
tricks used in official repo, which will bring remarkable mAP gains
of up to 4.9. Please see https://github.com/tianzhi0549/FCOS for
more detail.
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
strides (list[int] | list[tuple[int, int]]): Strides of points
in multiple feature levels. Default: (4, 8, 16, 32, 64).
regress_ranges (tuple[tuple[int, int]]): Regress range of multiple
level points.
center_sampling (bool): If true, use center sampling. Default: False.
center_sample_radius (float): Radius of center sampling. Default: 1.5.
norm_on_bbox (bool): If true, normalize the regression targets
with FPN strides. Default: False.
centerness_on_reg (bool): If true, position centerness on the
regress branch. Please refer to https://github.com/tianzhi0549/FCOS/issues/89#issuecomment-516877042.
Default: False.
conv_bias (bool | str): If specified as `auto`, it will be decided by the
norm_cfg. Bias of conv will be set as True if `norm_cfg` is None, otherwise
False. Default: "auto".
loss_cls (dict): Config of classification loss.
loss_bbox (dict): Config of localization loss.
loss_centerness (dict): Config of centerness loss.
norm_cfg (dict): dictionary to construct and config norm layer.
Default: norm_cfg=dict(type='GN', num_groups=32, requires_grad=True).
Example:
>>> self = FCOSHead(11, 7)
>>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]]
>>> cls_score, bbox_pred, centerness = self.forward(feats)
>>> assert len(cls_score) == len(self.scales)
""" # noqa: E501
def __init__(self,
num_classes,
in_channels,
#regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512),
# (512, INF)),
regress_ranges=((-1, INF), (-2, -1), (-2, -1), (-2, -1),
(-2, -1)),
#regress_ranges=((-1, INF), (-2, -1), (-2, -1)),
#regress_ranges=((-1, 128), (128, INF), (-2, -1), (-2, -1),
# (-2, -1)),
#regress_ranges=((-1, INF),),
#regress_ranges=((-2, -1), (-1, INF), (-2, -1), (-2, -1),
# (-2, -1)),
#regress_ranges=((-2, -1), (-2, -1), (-1, INF), (-2, -1),
# (-2, -1)),
#regress_ranges=((-1, 128), (128, INF), (-2, -1), (-2, -1),
# (-2, -1)),
#regress_ranges=((-1, 128), (128, 256), (256, INF), (-2, -1),
# (-2, -1)),
#regress_ranges=((-2, -1), (-1, 256), (256, INF), (-2, -1),
# (-2, -1)),
#regress_ranges=((-1, INF), (-1, INF), (-1, INF), (-1, INF),
# (-1, INF)),
center_sampling=False,
center_sample_radius=1.5,
norm_on_bbox=False,
centerness_on_reg=False,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0),
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
**kwargs):
self.regress_ranges = regress_ranges
self.center_sampling = center_sampling
self.center_sample_radius = center_sample_radius
self.norm_on_bbox = norm_on_bbox
self.centerness_on_reg = centerness_on_reg
self.background_id = -2
super().__init__(
num_classes,
in_channels,
loss_cls=loss_cls,
loss_bbox=loss_bbox,
norm_cfg=norm_cfg,
**kwargs)
self.loss_centerness = build_loss(loss_centerness)
self.loss_tri = TripletLossFilter()
def _init_layers(self):
"""Initialize layers of the head."""
super()._init_layers()
#self._init_reid_convs()
self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)
#self.conv_reid = nn.Conv2d(self.feat_channels, self.feat_channels, 3, padding=1)
# num_person = 483
num_person = 5532
# queue_size = 500
queue_size = 5000
#self.classifier_reid = nn.Linear(self.feat_channels, num_person)
self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
self.labeled_matching_layer = LabeledMatchingLayerQueue(num_persons=num_person, feat_len=self.in_channels) # for mot17half
self.unlabeled_matching_layer = UnlabeledMatchingLayer(queue_size=queue_size, feat_len=self.in_channels)
def _init_reid_convs(self):
"""Initialize classification conv layers of the head."""
self.reid_convs = nn.ModuleList()
#for i in range(self.stacked_convs):
for i in range(1):
chn = self.in_channels if i == 0 else self.feat_channels
if self.dcn_on_last_conv and i == self.stacked_convs - 1:
conv_cfg = dict(type='DCNv2')
else:
conv_cfg = self.conv_cfg
self.reid_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=conv_cfg,
#norm_cfg=self.norm_cfg,
norm_cfg=dict(type='BN', requires_grad=True),
bias=self.conv_bias))
def init_weights(self):
"""Initialize weights of the head."""
super().init_weights()
normal_init(self.conv_centerness, std=0.01)
#normal_init(self.conv_reid, std=0.01)
#for m in self.reid_convs:
# if isinstance(m.conv, nn.Conv2d):
# normal_init(m.conv, std=0.01)
def forward(self, feats, proposals=None):
"""Forward features from the upstream network.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple:
cls_scores (list[Tensor]): Box scores for each scale level, \
each is a 4D-tensor, the channel number is \
num_points * num_classes.
bbox_preds (list[Tensor]): Box energies / deltas for each \
scale level, each is a 4D-tensor, the channel number is \
num_points * 4.
centernesses (list[Tensor]): Centerss for each scale level, \
each is a 4D-tensor, the channel number is num_points * 1.
"""
#print(len(feats), self.scales, self.strides)
#print(len(tuple([feats[0]])), nn.ModuleList([self.scales[0]]), [self.strides[0]])
#for single stage prediction
#return multi_apply(self.forward_single, tuple([feats[0]]), nn.ModuleList([self.scales[0]]),
# [self.strides[0]])
feats = list(feats)
h, w = feats[0].shape[2], feats[0].shape[3]
mean_value = nn.functional.adaptive_avg_pool2d(feats[0], 1)
mean_value = F.upsample(input=mean_value, size=(h, w), mode='bilinear')
feats[0] = feats[0] - mean_value
return multi_apply(self.forward_single, feats, self.scales,
self.strides)
def forward_single(self, x, scale, stride):
"""Forward features of a single scale levle.
Args:
x (Tensor): FPN feature maps of the specified stride.
scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize
the bbox prediction.
stride (int): The corresponding stride for feature maps, only
used to normalize the bbox prediction when self.norm_on_bbox
is True.
Returns:
tuple: scores for each class, bbox predictions and centerness \
predictions of input feature maps.
"""
#print(x.shape)
#print('feat shape: ', x.shape, 'stride: ', stride)
cls_score, bbox_pred, cls_feat, reg_feat = super().forward_single(x)
if self.centerness_on_reg:
centerness = self.conv_centerness(reg_feat)
else:
centerness = self.conv_centerness(cls_feat)
reid_feat = x
#for reid_layer in self.reid_convs:
# reid_feat = reid_layer(reid_feat)
#reid_feat = self.conv_reid(reid_feat)
# scale the bbox_pred of different level
# float to avoid overflow when enabling FP16
bbox_pred = scale(bbox_pred).float()
if self.norm_on_bbox:
bbox_pred = F.relu(bbox_pred)
if not self.training:
bbox_pred *= stride
else:
bbox_pred = bbox_pred.exp()
return cls_score, bbox_pred, centerness, reid_feat
@force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses', 'reid_feat'))
def loss(self,
cls_scores,
bbox_preds,
centernesses,
reid_feats,
gt_bboxes,
gt_labels,
gt_ids,
img_metas,
gt_bboxes_ignore=None):
"""Compute loss of the head.
Args:
cls_scores (list[Tensor]): Box scores for each scale level,
each is a 4D-tensor, the channel number is
num_points * num_classes.
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level, each is a 4D-tensor, the channel number is
num_points * 4.
centernesses (list[Tensor]): Centerss for each scale level, each
is a 4D-tensor, the channel number is num_points * 1.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
assert len(cls_scores) == len(bbox_preds) == len(centernesses) == len(reid_feats)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
all_level_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
bbox_preds[0].device)
labels, ids, bbox_targets = self.get_targets(all_level_points, gt_bboxes,
gt_labels, gt_ids)
num_imgs = cls_scores[0].size(0)
# flatten cls_scores, bbox_preds and centerness
flatten_cls_scores = [
cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels)
for cls_score in cls_scores
]
flatten_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
for bbox_pred in bbox_preds
]
flatten_centerness = [
centerness.permute(0, 2, 3, 1).reshape(-1)
for centerness in centernesses
]
flatten_reid = [
reid_feat.permute(0, 2, 3, 1).reshape(-1, self.feat_channels)
for reid_feat in reid_feats
]
flatten_cls_scores = torch.cat(flatten_cls_scores)
flatten_bbox_preds = torch.cat(flatten_bbox_preds)
flatten_centerness = torch.cat(flatten_centerness)
flatten_reid = torch.cat(flatten_reid)
#print("flatten reid", flatten_reid.shape)
flatten_labels = torch.cat(labels)
flatten_ids = torch.cat(ids)
flatten_bbox_targets = torch.cat(bbox_targets)
# repeat points to align with bbox_preds
flatten_points = torch.cat(
[points.repeat(num_imgs, 1) for points in all_level_points])
# FG cat_id: [0, num_classes -1], BG cat_id: num_classes
bg_class_ind = self.num_classes
pos_inds = ((flatten_labels >= 0)
& (flatten_labels < bg_class_ind)).nonzero().reshape(-1)
#pos_inds = nonzero((flatten_labels >= 0) & (flatten_labels < bg_class_ind)).reshape(-1)
num_pos = len(pos_inds)
loss_cls = self.loss_cls(
flatten_cls_scores, flatten_labels,
avg_factor=num_pos + num_imgs) # avoid num_pos is 0
pos_bbox_preds = flatten_bbox_preds[pos_inds]
pos_centerness = flatten_centerness[pos_inds]
# background index
'''
bg_inds = ((flatten_labels < 0)
| (flatten_labels == bg_class_ind)).nonzero().reshape(-1)
num_bg = len(bg_inds)
bg_cls_scores = flatten_cls_scores[bg_inds]
if num_bg > num_pos:
cls_ids = torch.argsort(bg_cls_scores.squeeze(), descending=True)
bg_inds = bg_inds[cls_ids[:num_pos]]
'''
pos_reid = flatten_reid[pos_inds]
#bg_reid = flatten_reid[bg_inds]
#pos_reid = torch.cat((pos_reid, bg_reid))
# pos_reid_o = pos_reid.clone()
pos_reid = F.normalize(pos_reid)
if num_pos > 0:
pos_bbox_targets = flatten_bbox_targets[pos_inds]
pos_centerness_targets = self.centerness_target(pos_bbox_targets)
pos_points = flatten_points[pos_inds]
pos_decoded_bbox_preds = distance2bbox(pos_points, pos_bbox_preds)
pos_decoded_target_preds = distance2bbox(pos_points,
pos_bbox_targets)
# centerness weighted iou loss
loss_bbox = self.loss_bbox(
pos_decoded_bbox_preds,
pos_decoded_target_preds,
weight=pos_centerness_targets,
avg_factor=pos_centerness_targets.sum())
loss_centerness = self.loss_centerness(pos_centerness,
pos_centerness_targets)
pos_reid_ids = flatten_ids[pos_inds]
#bg_reid_ids = flatten_ids[bg_inds]
#pos_reid_ids = torch.cat((pos_reid_ids, bg_reid_ids))
#loss_oim = self.loss_reid(pos_reid, pos_reid_ids)
#print(pos_reid.shape, pos_reid_ids.shape)
#print(pos_reid_ids)
# reid oim loss
labeled_matching_scores, labeled_matching_reid, labeled_matching_ids = self.labeled_matching_layer(pos_reid, pos_reid_ids)
labeled_matching_scores *= 10
unlabeled_matching_scores = self.unlabeled_matching_layer(pos_reid, pos_reid_ids)
unlabeled_matching_scores *= 10
matching_scores = torch.cat((labeled_matching_scores, unlabeled_matching_scores), dim=1)
pid_labels = pos_reid_ids.clone()
pid_labels[pid_labels == -2] = -1
p_i = F.softmax(matching_scores, dim=1)
#focal_p_i = 0.25 * (1 - p_i)**2 * p_i.log()
focal_p_i = (1 - p_i)**2 * p_i.log()
#focal_p_i = 2*(1 - p_i)**2 * p_i.log()
#focal_p_i = 0.75*(1 - p_i)**2 * p_i.log()
#focal_p_i = 1.25*(1 - p_i)**2 * p_i.log()
#focal_p_i = 0.5*(1 - p_i)**2 * p_i.log()
#loss_oim = F.nll_loss(focal_p_i, pid_labels, reduction='none', ignore_index=-1)
loss_oim = F.nll_loss(focal_p_i, pid_labels, ignore_index=-1)
pos_reid1 = torch.cat((pos_reid, labeled_matching_reid), dim=0)
pid_labels1 = torch.cat((pid_labels, labeled_matching_ids), dim=0)
loss_tri = self.loss_tri(pos_reid1, pid_labels1)
#loss_oim = F.cross_entropy(matching_scores, pid_labels, ignore_index=-1)
'''
# softmax
matching_scores = self.classifier_reid(pos_reid).contiguous()
loss_oim = F.cross_entropy(matching_scores, pos_reid_ids, ignore_index=-1)
'''
else:
loss_bbox = pos_bbox_preds.sum()
loss_centerness = pos_centerness.sum()
loss_oim = pos_reid.sum()
loss_tri = pos_reid.sum()
print('no gt box')
return dict(
loss_cls=loss_cls,
loss_bbox=loss_bbox,
loss_centerness=loss_centerness,
loss_oim=loss_oim,
loss_tri=loss_tri), dict(pos_reid=pos_reid, pos_reid_ids=pos_reid_ids, out_preds=p_i)
@force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses', 'reid_feats'))
def get_bboxes(self,
cls_scores,
bbox_preds,
centernesses,
reid_feats,
img_metas,
cfg=None,
rescale=None):
"""Transform network output for a batch into bbox predictions.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
Has shape (N, num_points * num_classes, H, W)
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (N, num_points * 4, H, W)
centernesses (list[Tensor]): Centerness for each scale level with
shape (N, num_points * 1, H, W)
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
cfg (mmcv.Config): Test / postprocessing configuration,
if None, test_cfg would be used
rescale (bool): If True, return boxes in original image space
Returns:
list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. \
The first item is an (n, 5) tensor, where the first 4 columns \
are bounding box positions (tl_x, tl_y, br_x, br_y) and the \
5-th column is a score between 0 and 1. The second item is a \
(n,) tensor where each item is the predicted class label of \
the corresponding box.
"""
assert len(cls_scores) == len(bbox_preds) == len(reid_feats)
num_levels = len(cls_scores)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
mlvl_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
bbox_preds[0].device)
result_list = []
for img_id in range(len(img_metas)):
cls_score_list = [
cls_scores[i][img_id].detach() for i in range(num_levels)
]
bbox_pred_list = [
bbox_preds[i][img_id].detach() for i in range(num_levels)
]
centerness_pred_list = [
centernesses[i][img_id].detach() for i in range(num_levels)
]
reid_feat_list = [
reid_feats[i][img_id].detach() for i in range(num_levels)
]
# print('1', img_metas)
# print('img type',img_metas[0])
# img_shape = img_metas[img_id]['img_shape'] img_metas.data[img_id]['img_shape']
img_shape = img_metas[0]['img_shape']
scale_factor = img_metas[0]['scale_factor']
det_bboxes = self._get_bboxes_single(cls_score_list,
bbox_pred_list,
centerness_pred_list,
reid_feat_list,
mlvl_points, img_shape,
scale_factor, cfg, rescale)
result_list.append(det_bboxes)
return result_list
def _get_bboxes_single(self,
cls_scores,
bbox_preds,
centernesses,
reid_feats,
mlvl_points,
img_shape,
scale_factor,
cfg,
rescale=False):
"""Transform outputs for a single batch item into bbox predictions.
Args:
cls_scores (list[Tensor]): Box scores for a single scale level
Has shape (num_points * num_classes, H, W).
bbox_preds (list[Tensor]): Box energies / deltas for a single scale
level with shape (num_points * 4, H, W).
centernesses (list[Tensor]): Centerness for a single scale level
with shape (num_points * 4, H, W).
mlvl_points (list[Tensor]): Box reference for a single scale level
with shape (num_total_points, 4).
img_shape (tuple[int]): Shape of the input image,
(height, width, 3).
scale_factor (ndarray): Scale factor of the image arrange as
(w_scale, h_scale, w_scale, h_scale).
cfg (mmcv.Config): Test / postprocessing configuration,
if None, test_cfg would be used.
rescale (bool): If True, return boxes in original image space.
Returns:
Tensor: Labeled boxes in shape (n, 5 + dim), where the first 4 columns \
are bounding box positions (tl_x, tl_y, br_x, br_y) and the \
5-th column is a score between 0 and 1, dim is the reid feature dimension.
"""
cfg = self.test_cfg if cfg is None else cfg
assert len(cls_scores) == len(bbox_preds) == len(mlvl_points) == len(reid_feats)
mlvl_bboxes = []
mlvl_scores = []
mlvl_centerness = []
mlvl_reid_feats = []
for cls_score, bbox_pred, centerness, points, reid_feat in zip(
cls_scores, bbox_preds, centernesses, mlvl_points, reid_feats):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
scores = cls_score.permute(1, 2, 0).reshape(
-1, self.cls_out_channels).sigmoid()
centerness = centerness.permute(1, 2, 0).reshape(-1).sigmoid()
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
#reid_feat = reid_feat.permute(1, 2, 0).reshape(-1, 256)
reid_feat = reid_feat.permute(1, 2, 0).reshape(-1, self.in_channels)
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0 and scores.shape[0] > nms_pre:
max_scores, _ = (scores * centerness[:, None]).max(dim=1)
_, topk_inds = max_scores.topk(nms_pre)
points = points[topk_inds, :]
bbox_pred = bbox_pred[topk_inds, :]
scores = scores[topk_inds, :]
centerness = centerness[topk_inds]
reid_feat = reid_feat[topk_inds, :]
bboxes = distance2bbox(points, bbox_pred, max_shape=img_shape)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_centerness.append(centerness)
mlvl_reid_feats.append(reid_feat)
mlvl_bboxes = torch.cat(mlvl_bboxes)
mlvl_reid_feats = torch.cat(mlvl_reid_feats)
if rescale:
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
mlvl_scores = torch.cat(mlvl_scores)
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
# remind that we set FG labels to [0, num_class-1] since mmdet v2.0
# BG cat_id: num_class
mlvl_scores = torch.cat([mlvl_scores, padding], dim=1)
mlvl_centerness = torch.cat(mlvl_centerness)
det_bboxes, det_labels, det_reid_feats = multiclass_nms_reid(
mlvl_bboxes,
mlvl_scores,
mlvl_reid_feats,
cfg.score_thr,
cfg.nms,
cfg.max_per_img,
score_factors=mlvl_centerness)
return det_bboxes, det_labels, det_reid_feats
def _get_points_single(self,
featmap_size,
stride,
dtype,
device,
flatten=False):
"""Get points according to feature map sizes."""
y, x = super()._get_points_single(featmap_size, stride, dtype, device)
points = torch.stack((x.reshape(-1) * stride, y.reshape(-1) * stride),
dim=-1) + stride // 2
return points
def get_targets(self, points, gt_bboxes_list, gt_labels_list, gt_ids_list):
"""Compute regression, classification and centerss targets for points
in multiple images.
Args:
points (list[Tensor]): Points of each fpn level, each has shape
(num_points, 2).
gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image,
each has shape (num_gt, 4).
gt_labels_list (list[Tensor]): Ground truth labels of each box,
each has shape (num_gt,).
Returns:
tuple:
concat_lvl_labels (list[Tensor]): Labels of each level. \
concat_lvl_bbox_targets (list[Tensor]): BBox targets of each \
level.
"""
#for single stage prediction
#points = [points[0]]
#print(points, self.regress_ranges)
#print(len(points), len(self.regress_ranges))
assert len(points) == len(self.regress_ranges)
num_levels = len(points)
# expand regress ranges to align with points
expanded_regress_ranges = [
points[i].new_tensor(self.regress_ranges[i])[None].expand_as(
points[i]) for i in range(num_levels)
]
# concat all levels points and regress ranges
concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0)
concat_points = torch.cat(points, dim=0)
# the number of points per img, per lvl
num_points = [center.size(0) for center in points]
# get labels and bbox_targets of each image
labels_list, ids_list, bbox_targets_list = multi_apply(
self._get_target_single,
gt_bboxes_list,
gt_labels_list,
gt_ids_list,
points=concat_points,
regress_ranges=concat_regress_ranges,
num_points_per_lvl=num_points)
# split to per img, per level
labels_list = [labels.split(num_points, 0) for labels in labels_list]
ids_list = [ids.split(num_points, 0) for ids in ids_list]
bbox_targets_list = [
bbox_targets.split(num_points, 0)
for bbox_targets in bbox_targets_list
]
# concat per level image
concat_lvl_labels = []
concat_lvl_ids = []
concat_lvl_bbox_targets = []
for i in range(num_levels):
concat_lvl_labels.append(
torch.cat([labels[i] for labels in labels_list]))
concat_lvl_ids.append(
torch.cat([ids[i] for ids in ids_list]))
bbox_targets = torch.cat(
[bbox_targets[i] for bbox_targets in bbox_targets_list])
if self.norm_on_bbox:
bbox_targets = bbox_targets / self.strides[i]
concat_lvl_bbox_targets.append(bbox_targets)
return concat_lvl_labels, concat_lvl_ids, concat_lvl_bbox_targets
def _get_target_single(self, gt_bboxes, gt_labels, gt_ids, points, regress_ranges,
num_points_per_lvl):
"""Compute regression and classification targets for a single image."""
num_points = points.size(0)
num_gts = gt_labels.size(0)
if num_gts == 0:
return gt_labels.new_full((num_points,), self.background_label), \
gt_ids.new_full((num_points,), self.background_id), \
gt_bboxes.new_zeros((num_points, 4))
areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0]) * (
gt_bboxes[:, 3] - gt_bboxes[:, 1])
# TODO: figure out why these two are different
# areas = areas[None].expand(num_points, num_gts)
areas = areas[None].repeat(num_points, 1)
regress_ranges = regress_ranges[:, None, :].expand(
num_points, num_gts, 2)
gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4)
xs, ys = points[:, 0], points[:, 1]
xs = xs[:, None].expand(num_points, num_gts)
ys = ys[:, None].expand(num_points, num_gts)
left = xs - gt_bboxes[..., 0]
right = gt_bboxes[..., 2] - xs
top = ys - gt_bboxes[..., 1]
bottom = gt_bboxes[..., 3] - ys
bbox_targets = torch.stack((left, top, right, bottom), -1)
if self.center_sampling:
# condition1: inside a `center bbox`
radius = self.center_sample_radius
center_xs = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) / 2
center_ys = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) / 2
center_gts = torch.zeros_like(gt_bboxes)
stride = center_xs.new_zeros(center_xs.shape)
# project the points on current lvl back to the `original` sizes
lvl_begin = 0
for lvl_idx, num_points_lvl in enumerate(num_points_per_lvl):
lvl_end = lvl_begin + num_points_lvl
stride[lvl_begin:lvl_end] = self.strides[lvl_idx] * radius
lvl_begin = lvl_end
x_mins = center_xs - stride
y_mins = center_ys - stride
x_maxs = center_xs + stride
y_maxs = center_ys + stride
center_gts[..., 0] = torch.where(x_mins > gt_bboxes[..., 0],
x_mins, gt_bboxes[..., 0])
center_gts[..., 1] = torch.where(y_mins > gt_bboxes[..., 1],
y_mins, gt_bboxes[..., 1])
center_gts[..., 2] = torch.where(x_maxs > gt_bboxes[..., 2],
gt_bboxes[..., 2], x_maxs)
center_gts[..., 3] = torch.where(y_maxs > gt_bboxes[..., 3],
gt_bboxes[..., 3], y_maxs)
cb_dist_left = xs - center_gts[..., 0]
cb_dist_right = center_gts[..., 2] - xs
cb_dist_top = ys - center_gts[..., 1]
cb_dist_bottom = center_gts[..., 3] - ys
center_bbox = torch.stack(
(cb_dist_left, cb_dist_top, cb_dist_right, cb_dist_bottom), -1)
inside_gt_bbox_mask = center_bbox.min(-1)[0] > 0
else:
# condition1: inside a gt bbox
inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0
# condition2: limit the regression range for each location
max_regress_distance = bbox_targets.max(-1)[0]
inside_regress_range = (
(max_regress_distance >= regress_ranges[..., 0])
& (max_regress_distance <= regress_ranges[..., 1]))
# if there are still more than one objects for a location,
# we choose the one with minimal area
areas[inside_gt_bbox_mask == 0] = INF
areas[inside_regress_range == 0] = INF
min_area, min_area_inds = areas.min(dim=1)
labels = gt_labels[min_area_inds]
ids = gt_ids[min_area_inds]
labels[min_area == INF] = self.background_label # set as BG
ids[min_area == INF] = self.background_id # set as unannotated
bbox_targets = bbox_targets[range(num_points), min_area_inds]
return labels, ids, bbox_targets
def centerness_target(self, pos_bbox_targets):
"""Compute centerness targets.
Args:
pos_bbox_targets (Tensor): BBox targets of positive bboxes in shape
(num_pos, 4)
Returns:
Tensor: Centerness target.
"""
# only calculate pos centerness targets, otherwise there may be nan
left_right = pos_bbox_targets[:, [0, 2]]
top_bottom = pos_bbox_targets[:, [1, 3]]
centerness_targets = (
left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * (
top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0])
return torch.sqrt(centerness_targets)
| 45.537112
| 134
| 0.568207
|
9f58820fd1300eddcc367e67674e3f152e8d0857
| 2,111
|
py
|
Python
|
marmot/features/phrase/tests/test_pos_feature_extractor.py
|
qe-team/marmot
|
38e09ff1d0a3025a6b7edeaaf6086ed047ec45ff
|
[
"0BSD"
] | 19
|
2015-08-21T13:06:37.000Z
|
2021-07-26T09:56:29.000Z
|
marmot/features/phrase/tests/test_pos_feature_extractor.py
|
qe-team/marmot
|
38e09ff1d0a3025a6b7edeaaf6086ed047ec45ff
|
[
"0BSD"
] | 36
|
2015-01-13T13:01:07.000Z
|
2016-06-22T06:59:59.000Z
|
marmot/features/phrase/tests/test_pos_feature_extractor.py
|
qe-team/marmot
|
38e09ff1d0a3025a6b7edeaaf6086ed047ec45ff
|
[
"0BSD"
] | 8
|
2015-12-11T16:41:47.000Z
|
2019-04-08T16:28:40.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
from marmot.features.phrase.pos_feature_extractor import POSFeatureExtractor
# test a class which extracts source and target token count features, and the source/target token count ratio
class POSFeatureExtractorTests(unittest.TestCase):
def setUp(self):
self.extractor = POSFeatureExtractor('english', 'spanish')
def test_get_features(self):
obj = {'source': ['a', 'boy', 'hits', 'the', 'small', 'dog', 'severely'],
'target': ['uno', 'nino', 'abati', 'el', 'perro'],
'alignments': [[], [0, 1], [2, 3], [3], [4]],
'target_pos': ['ART', 'NC', 'VLfin', 'ART', 'NC'],
'source_pos': ['DT', 'NN', 'VBZ', 'DT', 'JJ', 'NN', 'RB'],
'token': ['uno', 'perro'],
'index': (3, 5),
'source_token': ['the', 'small', 'dog', 'severely'],
'source_index': (3, 7)}
'''
0 - 'percentage_content_words_src',
1 - 'percentage_content_words_tg',
2 - 'percentage_verbs_src',
3 - 'percentage_verbs_tg',
4 - 'percentage_nouns_src',
5 - 'percentage_nouns_tg',
6 - 'percentage_pronouns_src',
7 - 'percentage_pronouns_tg',
8 - 'ratio_content_words_src_tg',
9 - 'ratio_verbs_src_tg',
10 - 'ratio_nouns_src_tg',
11 - 'ratio_pronouns_src_tg'
'''
all_pos = self.extractor.get_features(obj)
self.assertAlmostEqual(all_pos[0], 0.75)
self.assertAlmostEqual(all_pos[1], 0.5)
self.assertAlmostEqual(all_pos[2], 0.0)
self.assertAlmostEqual(all_pos[3], 0.0)
self.assertAlmostEqual(all_pos[4], 0.25)
self.assertAlmostEqual(all_pos[5], 0.5)
self.assertAlmostEqual(all_pos[6], 0.0)
self.assertAlmostEqual(all_pos[7], 0.0)
self.assertAlmostEqual(all_pos[8], 1.5)
self.assertAlmostEqual(all_pos[9], 1.0)
self.assertAlmostEqual(all_pos[10], 0.5)
self.assertAlmostEqual(all_pos[11], 1.0)
if __name__ == '__main__':
unittest.main()
| 37.696429
| 109
| 0.582662
|
ed53da4819086fdaf0a557682d7c4be0c58e4721
| 65,787
|
py
|
Python
|
n2vc/n2vc_juju_conn.py
|
TCSOSM-20/N2VC
|
d99f3f2f67d693c30494be7ad19b97f3f5528961
|
[
"Apache-2.0"
] | null | null | null |
n2vc/n2vc_juju_conn.py
|
TCSOSM-20/N2VC
|
d99f3f2f67d693c30494be7ad19b97f3f5528961
|
[
"Apache-2.0"
] | null | null | null |
n2vc/n2vc_juju_conn.py
|
TCSOSM-20/N2VC
|
d99f3f2f67d693c30494be7ad19b97f3f5528961
|
[
"Apache-2.0"
] | null | null | null |
##
# Copyright 2019 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of OSM
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache License, Version 2.0 please
# contact with: nfvlabs@tid.es
##
import asyncio
import base64
import binascii
import logging
import os
import re
import time
from juju.action import Action
from juju.application import Application
from juju.client import client
from juju.controller import Controller
from juju.errors import JujuAPIError
from juju.machine import Machine
from juju.model import Model
from n2vc.exceptions import (
N2VCBadArgumentsException,
N2VCException,
N2VCConnectionException,
N2VCExecutionException,
N2VCInvalidCertificate,
N2VCNotFound,
MethodNotImplemented,
JujuK8sProxycharmNotSupported,
)
from n2vc.juju_observer import JujuModelObserver
from n2vc.n2vc_conn import N2VCConnector
from n2vc.n2vc_conn import obj_to_dict, obj_to_yaml
from n2vc.provisioner import AsyncSSHProvisioner
from n2vc.libjuju import Libjuju
class N2VCJujuConnector(N2VCConnector):
"""
####################################################################################
################################### P U B L I C ####################################
####################################################################################
"""
BUILT_IN_CLOUDS = ["localhost", "microk8s"]
def __init__(
self,
db: object,
fs: object,
log: object = None,
loop: object = None,
url: str = "127.0.0.1:17070",
username: str = "admin",
vca_config: dict = None,
on_update_db=None,
):
"""Initialize juju N2VC connector
"""
# parent class constructor
N2VCConnector.__init__(
self,
db=db,
fs=fs,
log=log,
loop=loop,
url=url,
username=username,
vca_config=vca_config,
on_update_db=on_update_db,
)
# silence websocket traffic log
logging.getLogger("websockets.protocol").setLevel(logging.INFO)
logging.getLogger("juju.client.connection").setLevel(logging.WARN)
logging.getLogger("model").setLevel(logging.WARN)
self.log.info("Initializing N2VC juju connector...")
"""
##############################################################
# check arguments
##############################################################
"""
# juju URL
if url is None:
raise N2VCBadArgumentsException("Argument url is mandatory", ["url"])
url_parts = url.split(":")
if len(url_parts) != 2:
raise N2VCBadArgumentsException(
"Argument url: bad format (localhost:port) -> {}".format(url), ["url"]
)
self.hostname = url_parts[0]
try:
self.port = int(url_parts[1])
except ValueError:
raise N2VCBadArgumentsException(
"url port must be a number -> {}".format(url), ["url"]
)
# juju USERNAME
if username is None:
raise N2VCBadArgumentsException(
"Argument username is mandatory", ["username"]
)
# juju CONFIGURATION
if vca_config is None:
raise N2VCBadArgumentsException(
"Argument vca_config is mandatory", ["vca_config"]
)
if "secret" in vca_config:
self.secret = vca_config["secret"]
else:
raise N2VCBadArgumentsException(
"Argument vca_config.secret is mandatory", ["vca_config.secret"]
)
# pubkey of juju client in osm machine: ~/.local/share/juju/ssh/juju_id_rsa.pub
# if exists, it will be written in lcm container: _create_juju_public_key()
if "public_key" in vca_config:
self.public_key = vca_config["public_key"]
else:
self.public_key = None
# TODO: Verify ca_cert is valid before using. VCA will crash
# if the ca_cert isn't formatted correctly.
def base64_to_cacert(b64string):
"""Convert the base64-encoded string containing the VCA CACERT.
The input string....
"""
try:
cacert = base64.b64decode(b64string).decode("utf-8")
cacert = re.sub(r"\\n", r"\n", cacert,)
except binascii.Error as e:
self.log.debug("Caught binascii.Error: {}".format(e))
raise N2VCInvalidCertificate(message="Invalid CA Certificate")
return cacert
self.ca_cert = vca_config.get("ca_cert")
if self.ca_cert:
self.ca_cert = base64_to_cacert(vca_config["ca_cert"])
if "api_proxy" in vca_config:
self.api_proxy = vca_config["api_proxy"]
self.log.debug(
"api_proxy for native charms configured: {}".format(self.api_proxy)
)
else:
self.warning(
"api_proxy is not configured. Support for native charms is disabled"
)
self.api_proxy = None
if "enable_os_upgrade" in vca_config:
self.enable_os_upgrade = vca_config["enable_os_upgrade"]
else:
self.enable_os_upgrade = True
if "apt_mirror" in vca_config:
self.apt_mirror = vca_config["apt_mirror"]
else:
self.apt_mirror = None
self.cloud = vca_config.get('cloud')
self.k8s_cloud = None
if "k8s_cloud" in vca_config:
self.k8s_cloud = vca_config.get("k8s_cloud")
self.log.debug('Arguments have been checked')
# juju data
self.controller = None # it will be filled when connect to juju
self.juju_models = {} # model objects for every model_name
self.juju_observers = {} # model observers for every model_name
self._connecting = (
False # while connecting to juju (to avoid duplicate connections)
)
self._authenticated = (
False # it will be True when juju connection be stablished
)
self._creating_model = False # True during model creation
self.libjuju = Libjuju(
endpoint=self.url,
api_proxy=self.api_proxy,
enable_os_upgrade=self.enable_os_upgrade,
apt_mirror=self.apt_mirror,
username=self.username,
password=self.secret,
cacert=self.ca_cert,
loop=self.loop,
log=self.log,
db=self.db,
n2vc=self,
)
# create juju pub key file in lcm container at
# ./local/share/juju/ssh/juju_id_rsa.pub
self._create_juju_public_key()
self.log.info("N2VC juju connector initialized")
async def get_status(self, namespace: str, yaml_format: bool = True):
# self.log.info('Getting NS status. namespace: {}'.format(namespace))
_nsi_id, ns_id, _vnf_id, _vdu_id, _vdu_count = self._get_namespace_components(
namespace=namespace
)
# model name is ns_id
model_name = ns_id
if model_name is None:
msg = "Namespace {} not valid".format(namespace)
self.log.error(msg)
raise N2VCBadArgumentsException(msg, ["namespace"])
status = {}
models = await self.libjuju.list_models(contains=ns_id)
for m in models:
status[m] = await self.libjuju.get_model_status(m)
if yaml_format:
return obj_to_yaml(status)
else:
return obj_to_dict(status)
async def create_execution_environment(
self,
namespace: str,
db_dict: dict,
reuse_ee_id: str = None,
progress_timeout: float = None,
total_timeout: float = None,
) -> (str, dict):
self.log.info(
"Creating execution environment. namespace: {}, reuse_ee_id: {}".format(
namespace, reuse_ee_id
)
)
machine_id = None
if reuse_ee_id:
model_name, application_name, machine_id = self._get_ee_id_components(
ee_id=reuse_ee_id
)
else:
(
_nsi_id,
ns_id,
_vnf_id,
_vdu_id,
_vdu_count,
) = self._get_namespace_components(namespace=namespace)
# model name is ns_id
model_name = ns_id
# application name
application_name = self._get_application_name(namespace=namespace)
self.log.debug(
"model name: {}, application name: {}, machine_id: {}".format(
model_name, application_name, machine_id
)
)
# create or reuse a new juju machine
try:
if not await self.libjuju.model_exists(model_name):
await self.libjuju.add_model(model_name, cloud_name=self.cloud)
machine, new = await self.libjuju.create_machine(
model_name=model_name,
machine_id=machine_id,
db_dict=db_dict,
progress_timeout=progress_timeout,
total_timeout=total_timeout,
)
# id for the execution environment
ee_id = N2VCJujuConnector._build_ee_id(
model_name=model_name,
application_name=application_name,
machine_id=str(machine.entity_id),
)
self.log.debug("ee_id: {}".format(ee_id))
if new:
# write ee_id in database
self._write_ee_id_db(db_dict=db_dict, ee_id=ee_id)
except Exception as e:
message = "Error creating machine on juju: {}".format(e)
self.log.error(message)
raise N2VCException(message=message)
# new machine credentials
credentials = {
"hostname": machine.dns_name,
}
self.log.info(
"Execution environment created. ee_id: {}, credentials: {}".format(
ee_id, credentials
)
)
return ee_id, credentials
async def register_execution_environment(
self,
namespace: str,
credentials: dict,
db_dict: dict,
progress_timeout: float = None,
total_timeout: float = None,
) -> str:
self.log.info(
"Registering execution environment. namespace={}, credentials={}".format(
namespace, credentials
)
)
if credentials is None:
raise N2VCBadArgumentsException(
message="credentials are mandatory", bad_args=["credentials"]
)
if credentials.get("hostname"):
hostname = credentials["hostname"]
else:
raise N2VCBadArgumentsException(
message="hostname is mandatory", bad_args=["credentials.hostname"]
)
if credentials.get("username"):
username = credentials["username"]
else:
raise N2VCBadArgumentsException(
message="username is mandatory", bad_args=["credentials.username"]
)
if "private_key_path" in credentials:
private_key_path = credentials["private_key_path"]
else:
# if not passed as argument, use generated private key path
private_key_path = self.private_key_path
_nsi_id, ns_id, _vnf_id, _vdu_id, _vdu_count = self._get_namespace_components(
namespace=namespace
)
# model name
model_name = ns_id
# application name
application_name = self._get_application_name(namespace=namespace)
# register machine on juju
try:
if not self.api_proxy:
msg = "Cannot provision machine: api_proxy is not defined"
self.log.error(msg=msg)
raise N2VCException(message=msg)
if not await self.libjuju.model_exists(model_name):
await self.libjuju.add_model(model_name, cloud_name=self.cloud)
machine_id = await self.libjuju.provision_machine(
model_name=model_name,
hostname=hostname,
username=username,
private_key_path=private_key_path,
db_dict=db_dict,
progress_timeout=progress_timeout,
total_timeout=total_timeout,
)
except Exception as e:
self.log.error("Error registering machine: {}".format(e))
raise N2VCException(
message="Error registering machine on juju: {}".format(e)
)
self.log.info("Machine registered: {}".format(machine_id))
# id for the execution environment
ee_id = N2VCJujuConnector._build_ee_id(
model_name=model_name,
application_name=application_name,
machine_id=str(machine_id),
)
self.log.info("Execution environment registered. ee_id: {}".format(ee_id))
return ee_id
async def install_configuration_sw(
self,
ee_id: str,
artifact_path: str,
db_dict: dict,
progress_timeout: float = None,
total_timeout: float = None,
config: dict = None,
num_units: int = 1,
):
self.log.info(
(
"Installing configuration sw on ee_id: {}, "
"artifact path: {}, db_dict: {}"
).format(ee_id, artifact_path, db_dict)
)
# check arguments
if ee_id is None or len(ee_id) == 0:
raise N2VCBadArgumentsException(
message="ee_id is mandatory", bad_args=["ee_id"]
)
if artifact_path is None or len(artifact_path) == 0:
raise N2VCBadArgumentsException(
message="artifact_path is mandatory", bad_args=["artifact_path"]
)
if db_dict is None:
raise N2VCBadArgumentsException(
message="db_dict is mandatory", bad_args=["db_dict"]
)
try:
(
model_name,
application_name,
machine_id,
) = N2VCJujuConnector._get_ee_id_components(ee_id=ee_id)
self.log.debug(
"model: {}, application: {}, machine: {}".format(
model_name, application_name, machine_id
)
)
except Exception:
raise N2VCBadArgumentsException(
message="ee_id={} is not a valid execution environment id".format(
ee_id
),
bad_args=["ee_id"],
)
# remove // in charm path
while artifact_path.find("//") >= 0:
artifact_path = artifact_path.replace("//", "/")
# check charm path
if not self.fs.file_exists(artifact_path, mode="dir"):
msg = "artifact path does not exist: {}".format(artifact_path)
raise N2VCBadArgumentsException(message=msg, bad_args=["artifact_path"])
if artifact_path.startswith("/"):
full_path = self.fs.path + artifact_path
else:
full_path = self.fs.path + "/" + artifact_path
try:
await self.libjuju.deploy_charm(
model_name=model_name,
application_name=application_name,
path=full_path,
machine_id=machine_id,
db_dict=db_dict,
progress_timeout=progress_timeout,
total_timeout=total_timeout,
config=config,
num_units=num_units,
)
except Exception as e:
raise N2VCException(
message="Error desploying charm into ee={} : {}".format(ee_id, e)
)
self.log.info("Configuration sw installed")
async def install_k8s_proxy_charm(
self,
charm_name: str,
namespace: str,
artifact_path: str,
db_dict: dict,
progress_timeout: float = None,
total_timeout: float = None,
config: dict = None,
) -> str:
"""
Install a k8s proxy charm
:param charm_name: Name of the charm being deployed
:param namespace: collection of all the uuids related to the charm.
:param str artifact_path: where to locate the artifacts (parent folder) using
the self.fs
the final artifact path will be a combination of this artifact_path and
additional string from the config_dict (e.g. charm name)
:param dict db_dict: where to write into database when the status changes.
It contains a dict with
{collection: <str>, filter: {}, path: <str>},
e.g. {collection: "nsrs", filter:
{_id: <nsd-id>, path: "_admin.deployed.VCA.3"}
:param float progress_timeout:
:param float total_timeout:
:param config: Dictionary with additional configuration
:returns ee_id: execution environment id.
"""
self.log.info('Installing k8s proxy charm: {}, artifact path: {}, db_dict: {}'
.format(charm_name, artifact_path, db_dict))
if not self.k8s_cloud:
raise JujuK8sProxycharmNotSupported("There is not k8s_cloud available")
if artifact_path is None or len(artifact_path) == 0:
raise N2VCBadArgumentsException(
message="artifact_path is mandatory", bad_args=["artifact_path"]
)
if db_dict is None:
raise N2VCBadArgumentsException(message='db_dict is mandatory', bad_args=['db_dict'])
# remove // in charm path
while artifact_path.find('//') >= 0:
artifact_path = artifact_path.replace('//', '/')
# check charm path
if not self.fs.file_exists(artifact_path, mode="dir"):
msg = 'artifact path does not exist: {}'.format(artifact_path)
raise N2VCBadArgumentsException(message=msg, bad_args=['artifact_path'])
if artifact_path.startswith('/'):
full_path = self.fs.path + artifact_path
else:
full_path = self.fs.path + '/' + artifact_path
_, ns_id, _, _, _ = self._get_namespace_components(namespace=namespace)
model_name = '{}-k8s'.format(ns_id)
await self.libjuju.add_model(model_name, self.k8s_cloud)
application_name = self._get_application_name(namespace)
try:
await self.libjuju.deploy_charm(
model_name=model_name,
application_name=application_name,
path=full_path,
machine_id=None,
db_dict=db_dict,
progress_timeout=progress_timeout,
total_timeout=total_timeout,
config=config
)
except Exception as e:
raise N2VCException(message='Error deploying charm: {}'.format(e))
self.log.info('K8s proxy charm installed')
ee_id = N2VCJujuConnector._build_ee_id(
model_name=model_name,
application_name=application_name,
machine_id="k8s",
)
self._write_ee_id_db(db_dict=db_dict, ee_id=ee_id)
return ee_id
async def get_ee_ssh_public__key(
self,
ee_id: str,
db_dict: dict,
progress_timeout: float = None,
total_timeout: float = None,
) -> str:
self.log.info(
(
"Generating priv/pub key pair and get pub key on ee_id: {}, db_dict: {}"
).format(ee_id, db_dict)
)
# check arguments
if ee_id is None or len(ee_id) == 0:
raise N2VCBadArgumentsException(
message="ee_id is mandatory", bad_args=["ee_id"]
)
if db_dict is None:
raise N2VCBadArgumentsException(
message="db_dict is mandatory", bad_args=["db_dict"]
)
try:
(
model_name,
application_name,
machine_id,
) = N2VCJujuConnector._get_ee_id_components(ee_id=ee_id)
self.log.debug(
"model: {}, application: {}, machine: {}".format(
model_name, application_name, machine_id
)
)
except Exception:
raise N2VCBadArgumentsException(
message="ee_id={} is not a valid execution environment id".format(
ee_id
),
bad_args=["ee_id"],
)
# try to execute ssh layer primitives (if exist):
# generate-ssh-key
# get-ssh-public-key
output = None
application_name = N2VCJujuConnector._format_app_name(application_name)
# execute action: generate-ssh-key
try:
output, _status = await self.libjuju.execute_action(
model_name=model_name,
application_name=application_name,
action_name="generate-ssh-key",
db_dict=db_dict,
progress_timeout=progress_timeout,
total_timeout=total_timeout,
)
except Exception as e:
self.log.info(
"Skipping exception while executing action generate-ssh-key: {}".format(
e
)
)
# execute action: get-ssh-public-key
try:
output, _status = await self.libjuju.execute_action(
model_name=model_name,
application_name=application_name,
action_name="get-ssh-public-key",
db_dict=db_dict,
progress_timeout=progress_timeout,
total_timeout=total_timeout,
)
except Exception as e:
msg = "Cannot execute action get-ssh-public-key: {}\n".format(e)
self.log.info(msg)
raise N2VCExecutionException(e, primitive_name="get-ssh-public-key")
# return public key if exists
return output["pubkey"] if "pubkey" in output else output
async def add_relation(
self, ee_id_1: str, ee_id_2: str, endpoint_1: str, endpoint_2: str
):
self.log.debug(
"adding new relation between {} and {}, endpoints: {}, {}".format(
ee_id_1, ee_id_2, endpoint_1, endpoint_2
)
)
# check arguments
if not ee_id_1:
message = "EE 1 is mandatory"
self.log.error(message)
raise N2VCBadArgumentsException(message=message, bad_args=["ee_id_1"])
if not ee_id_2:
message = "EE 2 is mandatory"
self.log.error(message)
raise N2VCBadArgumentsException(message=message, bad_args=["ee_id_2"])
if not endpoint_1:
message = "endpoint 1 is mandatory"
self.log.error(message)
raise N2VCBadArgumentsException(message=message, bad_args=["endpoint_1"])
if not endpoint_2:
message = "endpoint 2 is mandatory"
self.log.error(message)
raise N2VCBadArgumentsException(message=message, bad_args=["endpoint_2"])
# get the model, the applications and the machines from the ee_id's
model_1, app_1, _machine_1 = self._get_ee_id_components(ee_id_1)
model_2, app_2, _machine_2 = self._get_ee_id_components(ee_id_2)
# model must be the same
if model_1 != model_2:
message = "EE models are not the same: {} vs {}".format(ee_id_1, ee_id_2)
self.log.error(message)
raise N2VCBadArgumentsException(
message=message, bad_args=["ee_id_1", "ee_id_2"]
)
# add juju relations between two applications
try:
await self.libjuju.add_relation(
model_name=model_1,
application_name_1=app_1,
application_name_2=app_2,
relation_1=endpoint_1,
relation_2=endpoint_2,
)
except Exception as e:
message = "Error adding relation between {} and {}: {}".format(
ee_id_1, ee_id_2, e
)
self.log.error(message)
raise N2VCException(message=message)
async def remove_relation(self):
# TODO
self.log.info("Method not implemented yet")
raise MethodNotImplemented()
async def deregister_execution_environments(self):
self.log.info("Method not implemented yet")
raise MethodNotImplemented()
async def delete_namespace(
self, namespace: str, db_dict: dict = None, total_timeout: float = None
):
self.log.info("Deleting namespace={}".format(namespace))
# check arguments
if namespace is None:
raise N2VCBadArgumentsException(
message="namespace is mandatory", bad_args=["namespace"]
)
_nsi_id, ns_id, _vnf_id, _vdu_id, _vdu_count = self._get_namespace_components(
namespace=namespace
)
if ns_id is not None:
try:
models = await self.libjuju.list_models(contains=ns_id)
for model in models:
await self.libjuju.destroy_model(
model_name=model, total_timeout=total_timeout
)
except Exception as e:
raise N2VCException(
message="Error deleting namespace {} : {}".format(namespace, e)
)
else:
raise N2VCBadArgumentsException(
message="only ns_id is permitted to delete yet", bad_args=["namespace"]
)
self.log.info("Namespace {} deleted".format(namespace))
async def delete_execution_environment(
self, ee_id: str, db_dict: dict = None, total_timeout: float = None
):
self.log.info("Deleting execution environment ee_id={}".format(ee_id))
# check arguments
if ee_id is None:
raise N2VCBadArgumentsException(
message="ee_id is mandatory", bad_args=["ee_id"]
)
model_name, application_name, _machine_id = self._get_ee_id_components(
ee_id=ee_id
)
# destroy the application
try:
await self.libjuju.destroy_model(
model_name=model_name, total_timeout=total_timeout
)
except Exception as e:
raise N2VCException(
message=(
"Error deleting execution environment {} (application {}) : {}"
).format(ee_id, application_name, e)
)
# destroy the machine
# try:
# await self._juju_destroy_machine(
# model_name=model_name,
# machine_id=machine_id,
# total_timeout=total_timeout
# )
# except Exception as e:
# raise N2VCException(
# message='Error deleting execution environment {} (machine {}) : {}'
# .format(ee_id, machine_id, e))
self.log.info("Execution environment {} deleted".format(ee_id))
async def exec_primitive(
self,
ee_id: str,
primitive_name: str,
params_dict: dict,
db_dict: dict = None,
progress_timeout: float = None,
total_timeout: float = None,
) -> str:
self.log.info(
"Executing primitive: {} on ee: {}, params: {}".format(
primitive_name, ee_id, params_dict
)
)
# check arguments
if ee_id is None or len(ee_id) == 0:
raise N2VCBadArgumentsException(
message="ee_id is mandatory", bad_args=["ee_id"]
)
if primitive_name is None or len(primitive_name) == 0:
raise N2VCBadArgumentsException(
message="action_name is mandatory", bad_args=["action_name"]
)
if params_dict is None:
params_dict = dict()
try:
(
model_name,
application_name,
_machine_id,
) = N2VCJujuConnector._get_ee_id_components(ee_id=ee_id)
except Exception:
raise N2VCBadArgumentsException(
message="ee_id={} is not a valid execution environment id".format(
ee_id
),
bad_args=["ee_id"],
)
if primitive_name == "config":
# Special case: config primitive
try:
await self.libjuju.configure_application(
model_name=model_name,
application_name=application_name,
config=params_dict,
)
actions = await self.libjuju.get_actions(
application_name=application_name, model_name=model_name,
)
self.log.debug(
"Application {} has these actions: {}".format(
application_name, actions
)
)
if "verify-ssh-credentials" in actions:
# execute verify-credentials
num_retries = 20
retry_timeout = 15.0
for _ in range(num_retries):
try:
self.log.debug("Executing action verify-ssh-credentials...")
output, ok = await self.libjuju.execute_action(
model_name=model_name,
application_name=application_name,
action_name="verify-ssh-credentials",
db_dict=db_dict,
progress_timeout=progress_timeout,
total_timeout=total_timeout,
)
if ok == "failed":
self.log.debug(
"Error executing verify-ssh-credentials: {}. Retrying..."
)
await asyncio.sleep(retry_timeout)
continue
self.log.debug("Result: {}, output: {}".format(ok, output))
break
except asyncio.CancelledError:
raise
else:
self.log.error(
"Error executing verify-ssh-credentials after {} retries. ".format(
num_retries
)
)
else:
msg = "Action verify-ssh-credentials does not exist in application {}".format(
application_name
)
self.log.debug(msg=msg)
except Exception as e:
self.log.error("Error configuring juju application: {}".format(e))
raise N2VCExecutionException(
message="Error configuring application into ee={} : {}".format(
ee_id, e
),
primitive_name=primitive_name,
)
return "CONFIG OK"
else:
try:
output, status = await self.libjuju.execute_action(
model_name=model_name,
application_name=application_name,
action_name=primitive_name,
db_dict=db_dict,
progress_timeout=progress_timeout,
total_timeout=total_timeout,
**params_dict
)
if status == "completed":
return output
else:
raise Exception("status is not completed: {}".format(status))
except Exception as e:
self.log.error(
"Error executing primitive {}: {}".format(primitive_name, e)
)
raise N2VCExecutionException(
message="Error executing primitive {} into ee={} : {}".format(
primitive_name, ee_id, e
),
primitive_name=primitive_name,
)
async def disconnect(self):
self.log.info("closing juju N2VC...")
try:
await self.libjuju.disconnect()
except Exception as e:
raise N2VCConnectionException(
message="Error disconnecting controller: {}".format(e), url=self.url
)
"""
####################################################################################
################################### P R I V A T E ##################################
####################################################################################
"""
def _write_ee_id_db(self, db_dict: dict, ee_id: str):
# write ee_id to database: _admin.deployed.VCA.x
try:
the_table = db_dict["collection"]
the_filter = db_dict["filter"]
the_path = db_dict["path"]
if not the_path[-1] == ".":
the_path = the_path + "."
update_dict = {the_path + "ee_id": ee_id}
# self.log.debug('Writing ee_id to database: {}'.format(the_path))
self.db.set_one(
table=the_table,
q_filter=the_filter,
update_dict=update_dict,
fail_on_empty=True,
)
except asyncio.CancelledError:
raise
except Exception as e:
self.log.error("Error writing ee_id to database: {}".format(e))
@staticmethod
def _build_ee_id(model_name: str, application_name: str, machine_id: str):
"""
Build an execution environment id form model, application and machine
:param model_name:
:param application_name:
:param machine_id:
:return:
"""
# id for the execution environment
return "{}.{}.{}".format(model_name, application_name, machine_id)
@staticmethod
def _get_ee_id_components(ee_id: str) -> (str, str, str):
"""
Get model, application and machine components from an execution environment id
:param ee_id:
:return: model_name, application_name, machine_id
"""
if ee_id is None:
return None, None, None
# split components of id
parts = ee_id.split(".")
model_name = parts[0]
application_name = parts[1]
machine_id = parts[2]
return model_name, application_name, machine_id
def _get_application_name(self, namespace: str) -> str:
"""
Build application name from namespace
:param namespace:
:return: app-vnf-<vnf id>-vdu-<vdu-id>-cnt-<vdu-count>
"""
# TODO: Enforce the Juju 50-character application limit
# split namespace components
_, _, vnf_id, vdu_id, vdu_count = self._get_namespace_components(
namespace=namespace
)
if vnf_id is None or len(vnf_id) == 0:
vnf_id = ""
else:
# Shorten the vnf_id to its last twelve characters
vnf_id = "vnf-" + vnf_id[-12:]
if vdu_id is None or len(vdu_id) == 0:
vdu_id = ""
else:
# Shorten the vdu_id to its last twelve characters
vdu_id = "-vdu-" + vdu_id[-12:]
if vdu_count is None or len(vdu_count) == 0:
vdu_count = ""
else:
vdu_count = "-cnt-" + vdu_count
application_name = "app-{}{}{}".format(vnf_id, vdu_id, vdu_count)
return N2VCJujuConnector._format_app_name(application_name)
async def _juju_create_machine(
self,
model_name: str,
application_name: str,
machine_id: str = None,
db_dict: dict = None,
progress_timeout: float = None,
total_timeout: float = None,
) -> Machine:
self.log.debug(
"creating machine in model: {}, existing machine id: {}".format(
model_name, machine_id
)
)
# get juju model and observer (create model if needed)
model = await self._juju_get_model(model_name=model_name)
observer = self.juju_observers[model_name]
# find machine id in model
machine = None
if machine_id is not None:
self.log.debug("Finding existing machine id {} in model".format(machine_id))
# get juju existing machines in the model
existing_machines = await model.get_machines()
if machine_id in existing_machines:
self.log.debug(
"Machine id {} found in model (reusing it)".format(machine_id)
)
machine = model.machines[machine_id]
if machine is None:
self.log.debug("Creating a new machine in juju...")
# machine does not exist, create it and wait for it
machine = await model.add_machine(
spec=None, constraints=None, disks=None, series="xenial"
)
# register machine with observer
observer.register_machine(machine=machine, db_dict=db_dict)
# id for the execution environment
ee_id = N2VCJujuConnector._build_ee_id(
model_name=model_name,
application_name=application_name,
machine_id=str(machine.entity_id),
)
# write ee_id in database
self._write_ee_id_db(db_dict=db_dict, ee_id=ee_id)
# wait for machine creation
await observer.wait_for_machine(
machine_id=str(machine.entity_id),
progress_timeout=progress_timeout,
total_timeout=total_timeout,
)
else:
self.log.debug("Reusing old machine pending")
# register machine with observer
observer.register_machine(machine=machine, db_dict=db_dict)
# machine does exist, but it is in creation process (pending), wait for
# create finalisation
await observer.wait_for_machine(
machine_id=machine.entity_id,
progress_timeout=progress_timeout,
total_timeout=total_timeout,
)
self.log.debug("Machine ready at " + str(machine.dns_name))
return machine
async def _juju_provision_machine(
self,
model_name: str,
hostname: str,
username: str,
private_key_path: str,
db_dict: dict = None,
progress_timeout: float = None,
total_timeout: float = None,
) -> str:
if not self.api_proxy:
msg = "Cannot provision machine: api_proxy is not defined"
self.log.error(msg=msg)
raise N2VCException(message=msg)
self.log.debug(
"provisioning machine. model: {}, hostname: {}, username: {}".format(
model_name, hostname, username
)
)
if not self._authenticated:
await self._juju_login()
# get juju model and observer
model = await self._juju_get_model(model_name=model_name)
observer = self.juju_observers[model_name]
# TODO check if machine is already provisioned
machine_list = await model.get_machines()
provisioner = AsyncSSHProvisioner(
host=hostname,
user=username,
private_key_path=private_key_path,
log=self.log,
)
params = None
try:
params = await provisioner.provision_machine()
except Exception as ex:
msg = "Exception provisioning machine: {}".format(ex)
self.log.error(msg)
raise N2VCException(message=msg)
params.jobs = ["JobHostUnits"]
connection = model.connection()
# Submit the request.
self.log.debug("Adding machine to model")
client_facade = client.ClientFacade.from_connection(connection)
results = await client_facade.AddMachines(params=[params])
error = results.machines[0].error
if error:
msg = "Error adding machine: {}".format(error.message)
self.log.error(msg=msg)
raise ValueError(msg)
machine_id = results.machines[0].machine
# Need to run this after AddMachines has been called,
# as we need the machine_id
self.log.debug("Installing Juju agent into machine {}".format(machine_id))
asyncio.ensure_future(
provisioner.install_agent(
connection=connection,
nonce=params.nonce,
machine_id=machine_id,
api=self.api_proxy,
)
)
# wait for machine in model (now, machine is not yet in model, so we must
# wait for it)
machine = None
for _ in range(10):
machine_list = await model.get_machines()
if machine_id in machine_list:
self.log.debug("Machine {} found in model!".format(machine_id))
machine = model.machines.get(machine_id)
break
await asyncio.sleep(2)
if machine is None:
msg = "Machine {} not found in model".format(machine_id)
self.log.error(msg=msg)
raise Exception(msg)
# register machine with observer
observer.register_machine(machine=machine, db_dict=db_dict)
# wait for machine creation
self.log.debug("waiting for provision finishes... {}".format(machine_id))
await observer.wait_for_machine(
machine_id=machine_id,
progress_timeout=progress_timeout,
total_timeout=total_timeout,
)
self.log.debug("Machine provisioned {}".format(machine_id))
return machine_id
async def _juju_deploy_charm(
self,
model_name: str,
application_name: str,
charm_path: str,
machine_id: str,
db_dict: dict,
progress_timeout: float = None,
total_timeout: float = None,
config: dict = None,
) -> (Application, int):
# get juju model and observer
model = await self._juju_get_model(model_name=model_name)
observer = self.juju_observers[model_name]
# check if application already exists
application = None
if application_name in model.applications:
application = model.applications[application_name]
if application is None:
# application does not exist, create it and wait for it
self.log.debug(
"deploying application {} to machine {}, model {}".format(
application_name, machine_id, model_name
)
)
self.log.debug("charm: {}".format(charm_path))
machine = model.machines[machine_id]
# series = None
application = await model.deploy(
entity_url=charm_path,
application_name=application_name,
channel="stable",
num_units=1,
series=machine.series,
to=machine_id,
config=config,
)
# register application with observer
observer.register_application(application=application, db_dict=db_dict)
self.log.debug(
"waiting for application deployed... {}".format(application.entity_id)
)
retries = await observer.wait_for_application(
application_id=application.entity_id,
progress_timeout=progress_timeout,
total_timeout=total_timeout,
)
self.log.debug("application deployed")
else:
# register application with observer
observer.register_application(application=application, db_dict=db_dict)
# application already exists, but not finalised
self.log.debug("application already exists, waiting for deployed...")
retries = await observer.wait_for_application(
application_id=application.entity_id,
progress_timeout=progress_timeout,
total_timeout=total_timeout,
)
self.log.debug("application deployed")
return application, retries
async def _juju_execute_action(
self,
model_name: str,
application_name: str,
action_name: str,
db_dict: dict,
progress_timeout: float = None,
total_timeout: float = None,
**kwargs
) -> Action:
# get juju model and observer
model = await self._juju_get_model(model_name=model_name)
observer = self.juju_observers[model_name]
application = await self._juju_get_application(
model_name=model_name, application_name=application_name
)
unit = None
for u in application.units:
if await u.is_leader_from_status():
unit = u
if unit is not None:
actions = await application.get_actions()
if action_name in actions:
self.log.debug(
'executing action "{}" using params: {}'.format(action_name, kwargs)
)
action = await unit.run_action(action_name, **kwargs)
# register action with observer
observer.register_action(action=action, db_dict=db_dict)
await observer.wait_for_action(
action_id=action.entity_id,
progress_timeout=progress_timeout,
total_timeout=total_timeout,
)
self.log.debug("action completed with status: {}".format(action.status))
output = await model.get_action_output(action_uuid=action.entity_id)
status = await model.get_action_status(uuid_or_prefix=action.entity_id)
if action.entity_id in status:
status = status[action.entity_id]
else:
status = "failed"
return output, status
raise N2VCExecutionException(
message="Cannot execute action on charm", primitive_name=action_name
)
async def _juju_configure_application(
self,
model_name: str,
application_name: str,
config: dict,
db_dict: dict,
progress_timeout: float = None,
total_timeout: float = None,
):
# get the application
application = await self._juju_get_application(
model_name=model_name, application_name=application_name
)
self.log.debug(
"configuring the application {} -> {}".format(application_name, config)
)
res = await application.set_config(config)
self.log.debug(
"application {} configured. res={}".format(application_name, res)
)
# Verify the config is set
new_conf = await application.get_config()
for key in config:
value = new_conf[key]["value"]
self.log.debug(" {} = {}".format(key, value))
if config[key] != value:
raise N2VCException(
message="key {} is not configured correctly {} != {}".format(
key, config[key], new_conf[key]
)
)
# check if 'verify-ssh-credentials' action exists
# unit = application.units[0]
actions = await application.get_actions()
if "verify-ssh-credentials" not in actions:
msg = (
"Action verify-ssh-credentials does not exist in application {}"
).format(application_name)
self.log.debug(msg=msg)
return False
# execute verify-credentials
num_retries = 20
retry_timeout = 15.0
for _ in range(num_retries):
try:
self.log.debug("Executing action verify-ssh-credentials...")
output, ok = await self._juju_execute_action(
model_name=model_name,
application_name=application_name,
action_name="verify-ssh-credentials",
db_dict=db_dict,
progress_timeout=progress_timeout,
total_timeout=total_timeout,
)
self.log.debug("Result: {}, output: {}".format(ok, output))
return True
except asyncio.CancelledError:
raise
except Exception as e:
self.log.debug(
"Error executing verify-ssh-credentials: {}. Retrying...".format(e)
)
await asyncio.sleep(retry_timeout)
else:
self.log.error(
"Error executing verify-ssh-credentials after {} retries. ".format(
num_retries
)
)
return False
async def _juju_get_application(self, model_name: str, application_name: str):
"""Get the deployed application."""
model = await self._juju_get_model(model_name=model_name)
application_name = N2VCJujuConnector._format_app_name(application_name)
if model.applications and application_name in model.applications:
return model.applications[application_name]
else:
raise N2VCException(
message="Cannot get application {} from model {}".format(
application_name, model_name
)
)
async def _juju_get_model(self, model_name: str) -> Model:
""" Get a model object from juju controller
If the model does not exits, it creates it.
:param str model_name: name of the model
:returns Model: model obtained from juju controller or Exception
"""
# format model name
model_name = N2VCJujuConnector._format_model_name(model_name)
if model_name in self.juju_models:
return self.juju_models[model_name]
if self._creating_model:
self.log.debug("Another coroutine is creating a model. Wait...")
while self._creating_model:
# another coroutine is creating a model, wait
await asyncio.sleep(0.1)
# retry (perhaps another coroutine has created the model meanwhile)
if model_name in self.juju_models:
return self.juju_models[model_name]
try:
self._creating_model = True
# get juju model names from juju
model_list = await self.controller.list_models()
if model_name not in model_list:
self.log.info(
"Model {} does not exist. Creating new model...".format(model_name)
)
config_dict = {"authorized-keys": self.public_key}
if self.apt_mirror:
config_dict["apt-mirror"] = self.apt_mirror
if not self.enable_os_upgrade:
config_dict["enable-os-refresh-update"] = False
config_dict["enable-os-upgrade"] = False
if self.cloud in self.BUILT_IN_CLOUDS:
model = await self.controller.add_model(
model_name=model_name,
config=config_dict,
cloud_name=self.cloud,
)
else:
model = await self.controller.add_model(
model_name=model_name,
config=config_dict,
cloud_name=self.cloud,
credential_name=self.cloud,
)
self.log.info("New model created, name={}".format(model_name))
else:
self.log.debug(
"Model already exists in juju. Getting model {}".format(model_name)
)
model = await self.controller.get_model(model_name)
self.log.debug("Existing model in juju, name={}".format(model_name))
self.juju_models[model_name] = model
self.juju_observers[model_name] = JujuModelObserver(n2vc=self, model=model)
return model
except Exception as e:
msg = "Cannot get model {}. Exception: {}".format(model_name, e)
self.log.error(msg)
raise N2VCException(msg)
finally:
self._creating_model = False
async def _juju_add_relation(
self,
model_name: str,
application_name_1: str,
application_name_2: str,
relation_1: str,
relation_2: str,
):
# get juju model and observer
model = await self._juju_get_model(model_name=model_name)
r1 = "{}:{}".format(application_name_1, relation_1)
r2 = "{}:{}".format(application_name_2, relation_2)
self.log.debug("adding relation: {} -> {}".format(r1, r2))
try:
await model.add_relation(relation1=r1, relation2=r2)
except JujuAPIError as e:
# If one of the applications in the relationship doesn't exist, or the
# relation has already been added,
# let the operation fail silently.
if "not found" in e.message:
return
if "already exists" in e.message:
return
# another execption, raise it
raise e
async def _juju_destroy_application(self, model_name: str, application_name: str):
self.log.debug(
"Destroying application {} in model {}".format(application_name, model_name)
)
# get juju model and observer
model = await self._juju_get_model(model_name=model_name)
observer = self.juju_observers[model_name]
application = model.applications.get(application_name)
if application:
observer.unregister_application(application_name)
await application.destroy()
else:
self.log.debug("Application not found: {}".format(application_name))
async def _juju_destroy_machine(
self, model_name: str, machine_id: str, total_timeout: float = None
):
self.log.debug(
"Destroying machine {} in model {}".format(machine_id, model_name)
)
if total_timeout is None:
total_timeout = 3600
# get juju model and observer
model = await self._juju_get_model(model_name=model_name)
observer = self.juju_observers[model_name]
machines = await model.get_machines()
if machine_id in machines:
machine = model.machines[machine_id]
observer.unregister_machine(machine_id)
# TODO: change this by machine.is_manual when this is upstreamed:
# https://github.com/juju/python-libjuju/pull/396
if "instance-id" in machine.safe_data and machine.safe_data[
"instance-id"
].startswith("manual:"):
self.log.debug("machine.destroy(force=True) started.")
await machine.destroy(force=True)
self.log.debug("machine.destroy(force=True) passed.")
# max timeout
end = time.time() + total_timeout
# wait for machine removal
machines = await model.get_machines()
while machine_id in machines and time.time() < end:
self.log.debug(
"Waiting for machine {} is destroyed".format(machine_id)
)
await asyncio.sleep(0.5)
machines = await model.get_machines()
self.log.debug("Machine destroyed: {}".format(machine_id))
else:
self.log.debug("Machine not found: {}".format(machine_id))
async def _juju_destroy_model(self, model_name: str, total_timeout: float = None):
self.log.debug("Destroying model {}".format(model_name))
if total_timeout is None:
total_timeout = 3600
end = time.time() + total_timeout
model = await self._juju_get_model(model_name=model_name)
if not model:
raise N2VCNotFound(message="Model {} does not exist".format(model_name))
uuid = model.info.uuid
# destroy applications
for application_name in model.applications:
try:
await self._juju_destroy_application(
model_name=model_name, application_name=application_name
)
except Exception as e:
self.log.error(
"Error destroying application {} in model {}: {}".format(
application_name, model_name, e
)
)
# destroy machines
machines = await model.get_machines()
for machine_id in machines:
try:
await self._juju_destroy_machine(
model_name=model_name, machine_id=machine_id
)
except asyncio.CancelledError:
raise
except Exception:
# ignore exceptions destroying machine
pass
await self._juju_disconnect_model(model_name=model_name)
self.log.debug("destroying model {}...".format(model_name))
await self.controller.destroy_model(uuid)
# self.log.debug('model destroy requested {}'.format(model_name))
# wait for model is completely destroyed
self.log.debug("Waiting for model {} to be destroyed...".format(model_name))
last_exception = ""
while time.time() < end:
try:
# await self.controller.get_model(uuid)
models = await self.controller.list_models()
if model_name not in models:
self.log.debug(
"The model {} ({}) was destroyed".format(model_name, uuid)
)
return
except asyncio.CancelledError:
raise
except Exception as e:
last_exception = e
await asyncio.sleep(5)
raise N2VCException(
"Timeout waiting for model {} to be destroyed {}".format(
model_name, last_exception
)
)
async def _juju_login(self):
"""Connect to juju controller
"""
# if already authenticated, exit function
if self._authenticated:
return
# if connecting, wait for finish
# another task could be trying to connect in parallel
while self._connecting:
await asyncio.sleep(0.1)
# double check after other task has finished
if self._authenticated:
return
try:
self._connecting = True
self.log.info(
"connecting to juju controller: {} {}:{}{}".format(
self.url,
self.username,
self.secret[:8] + "...",
" with ca_cert" if self.ca_cert else "",
)
)
# Create controller object
self.controller = Controller(loop=self.loop)
# Connect to controller
await self.controller.connect(
endpoint=self.url,
username=self.username,
password=self.secret,
cacert=self.ca_cert,
)
self._authenticated = True
self.log.info("juju controller connected")
except Exception as e:
message = "Exception connecting to juju: {}".format(e)
self.log.error(message)
raise N2VCConnectionException(message=message, url=self.url)
finally:
self._connecting = False
async def _juju_logout(self):
"""Logout of the Juju controller."""
if not self._authenticated:
return False
# disconnect all models
for model_name in self.juju_models:
try:
await self._juju_disconnect_model(model_name)
except Exception as e:
self.log.error(
"Error disconnecting model {} : {}".format(model_name, e)
)
# continue with next model...
self.log.info("Disconnecting controller")
try:
await self.controller.disconnect()
except Exception as e:
raise N2VCConnectionException(
message="Error disconnecting controller: {}".format(e), url=self.url
)
self.controller = None
self._authenticated = False
self.log.info("disconnected")
async def _juju_disconnect_model(self, model_name: str):
self.log.debug("Disconnecting model {}".format(model_name))
if model_name in self.juju_models:
await self.juju_models[model_name].disconnect()
self.juju_models[model_name] = None
self.juju_observers[model_name] = None
else:
self.warning("Cannot disconnect model: {}".format(model_name))
def _create_juju_public_key(self):
"""Recreate the Juju public key on lcm container, if needed
Certain libjuju commands expect to be run from the same machine as Juju
is bootstrapped to. This method will write the public key to disk in
that location: ~/.local/share/juju/ssh/juju_id_rsa.pub
"""
# Make sure that we have a public key before writing to disk
if self.public_key is None or len(self.public_key) == 0:
if "OSMLCM_VCA_PUBKEY" in os.environ:
self.public_key = os.getenv("OSMLCM_VCA_PUBKEY", "")
if len(self.public_key) == 0:
return
else:
return
pk_path = "{}/.local/share/juju/ssh".format(os.path.expanduser("~"))
file_path = "{}/juju_id_rsa.pub".format(pk_path)
self.log.debug(
"writing juju public key to file:\n{}\npublic key: {}".format(
file_path, self.public_key
)
)
if not os.path.exists(pk_path):
# create path and write file
os.makedirs(pk_path)
with open(file_path, "w") as f:
self.log.debug("Creating juju public key file: {}".format(file_path))
f.write(self.public_key)
else:
self.log.debug("juju public key file already exists: {}".format(file_path))
@staticmethod
def _format_model_name(name: str) -> str:
"""Format the name of the model.
Model names may only contain lowercase letters, digits and hyphens
"""
return name.replace("_", "-").replace(" ", "-").lower()
@staticmethod
def _format_app_name(name: str) -> str:
"""Format the name of the application (in order to assure valid application name).
Application names have restrictions (run juju deploy --help):
- contains lowercase letters 'a'-'z'
- contains numbers '0'-'9'
- contains hyphens '-'
- starts with a lowercase letter
- not two or more consecutive hyphens
- after a hyphen, not a group with all numbers
"""
def all_numbers(s: str) -> bool:
for c in s:
if not c.isdigit():
return False
return True
new_name = name.replace("_", "-")
new_name = new_name.replace(" ", "-")
new_name = new_name.lower()
while new_name.find("--") >= 0:
new_name = new_name.replace("--", "-")
groups = new_name.split("-")
# find 'all numbers' groups and prefix them with a letter
app_name = ""
for i in range(len(groups)):
group = groups[i]
if all_numbers(group):
group = "z" + group
if i > 0:
app_name += "-"
app_name += group
if app_name[0].isdigit():
app_name = "z" + app_name
return app_name
| 35.909934
| 98
| 0.553787
|
73bd1f775f1fc4631196b6923d52f00882d3e8bd
| 843
|
py
|
Python
|
tests/constraints/test_faulting_stress_measurement.py
|
pnnl/SOSAT
|
610f99e0bb80f2f5e7836e7e3b6b816e029838bb
|
[
"BSD-3-Clause"
] | null | null | null |
tests/constraints/test_faulting_stress_measurement.py
|
pnnl/SOSAT
|
610f99e0bb80f2f5e7836e7e3b6b816e029838bb
|
[
"BSD-3-Clause"
] | 1
|
2021-03-22T18:59:05.000Z
|
2021-03-22T18:59:05.000Z
|
tests/constraints/test_faulting_stress_measurement.py
|
pnnl/SOSAT
|
610f99e0bb80f2f5e7836e7e3b6b816e029838bb
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
from scipy.stats import uniform
import numpy as np
import matplotlib.pyplot as plt
from SOSAT import StressState
from SOSAT.constraints import StressMeasurement
from SOSAT.constraints import FaultConstraint
# depth in meters
depth = 1228.3
# density in kg/m^3
avg_overburden_density = 2580.0
# pore pressure gradient in MPa/km
pore_pressure_grad = 9.955
pore_pressure = pore_pressure_grad * (1.0 / 1000) * depth
ss = StressState(depth=depth,
avg_overburden_density=avg_overburden_density,
pore_pressure=pore_pressure)
fc = FaultConstraint()
ss.add_constraint(fc)
smc = StressMeasurement(shmin_dist=uniform(loc=25.0,
scale=5.0))
ss.add_constraint(smc)
fig = ss.plot_posterior()
plt.savefig("faulting_stress_measurement_constraint_posterior.png")
| 26.34375
| 67
| 0.737841
|
6f132afe4146d6ef0a382bfd930b3131f9013b47
| 41
|
py
|
Python
|
src/models/__init__.py
|
atgmello/ml-challenge-2020
|
2bf3fc81b96059a2d1e813e6784e21b66760df3b
|
[
"MIT"
] | 5
|
2020-12-17T13:20:18.000Z
|
2021-05-09T01:28:19.000Z
|
src/models/__init__.py
|
atgmello/ml-challenge-2020
|
2bf3fc81b96059a2d1e813e6784e21b66760df3b
|
[
"MIT"
] | null | null | null |
src/models/__init__.py
|
atgmello/ml-challenge-2020
|
2bf3fc81b96059a2d1e813e6784e21b66760df3b
|
[
"MIT"
] | null | null | null |
from .challenge_metric import ndcg_score
| 20.5
| 40
| 0.878049
|
76d6d589708476da6e71807b0c4b7ed621bf6ad1
| 266
|
py
|
Python
|
myerp/custom/doctype/property_setter/property_setter.py
|
smthakor1979/MyERP
|
b05c44ae0054072f2a410381069215d287e7f0ba
|
[
"MIT"
] | null | null | null |
myerp/custom/doctype/property_setter/property_setter.py
|
smthakor1979/MyERP
|
b05c44ae0054072f2a410381069215d287e7f0ba
|
[
"MIT"
] | null | null | null |
myerp/custom/doctype/property_setter/property_setter.py
|
smthakor1979/MyERP
|
b05c44ae0054072f2a410381069215d287e7f0ba
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2020, suresh thakor and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class PropertySetter(Document):
pass
| 24.181818
| 52
| 0.778195
|
aaa03234cb6e86de9d7f6a547e73d4d182386741
| 5,367
|
py
|
Python
|
dataset_reader_v2.py
|
lopa23/flim_optcrf
|
2d9a1dba37a7e5e6beae66c536b07bb7ae4bdfe9
|
[
"Apache-2.0"
] | null | null | null |
dataset_reader_v2.py
|
lopa23/flim_optcrf
|
2d9a1dba37a7e5e6beae66c536b07bb7ae4bdfe9
|
[
"Apache-2.0"
] | null | null | null |
dataset_reader_v2.py
|
lopa23/flim_optcrf
|
2d9a1dba37a7e5e6beae66c536b07bb7ae4bdfe9
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
import torch
import torch.nn as nn
import numpy as np
from torch.utils.data import Dataset
import matplotlib.image as mpimg
from matplotlib import pyplot as plt
from skimage.transform import rescale, resize
import scipy.io
import h5py
import tables
from torch.utils.data import DataLoader
def kron(matrix1, matrix2):
"""
Kronecker product of matrices a and b with leading batch dimensions.
Batch dimensions are broadcast. The number of them mush
:type a: torch.Tensor
:type b: torch.Tensor
:rtype: torch.Tensor
"""
r=matrix1.size(0)
R=repeat_along_diag(matrix2,r)
#R=torch.zeros(n*m,n*m)
return R
def rgb2gray(rgb):
r, g, b=rgb[:,:,0], rgb[:,:,1], rgb[:,:,2]
gray= .2989*r + .5870*g+ .114*b
return gray
def repeat_along_diag(a, r):
m,n = a.shape
out = np.zeros((r,m,r,n), dtype=np.float32)
diag = np.einsum('ijik->ijk',out)
diag[:] = (a)
return out.reshape(-1,n*r)
def read_mat_file(fname):
file = tables.open_file(fname)
Q = file.root.HH[:]
p=file.root.f[:]
G=file.root.D[:]
m=file.root.m[:]
m=int(m[0].item())
Q=torch.tensor(Q).float();
E=torch.eye(m)
#Q=torch.from_numpy(kron(E,Q)).float()
print("Size m, Q",m, Q.size())
n=Q.size(0)
p=torch.tensor(p).float();
p=p.t()
p1=p.size(0)
G=torch.tensor(G).float();
if(p1==1):
G=G.t()
gx=G.size(0)
gy=G.size(1)
h = torch.tensor(np.zeros((gx, 1))).float();
temp=np.zeros((1,n))
temp[0,n-1]=.000000001
A = torch.from_numpy(temp);
b = torch.from_numpy(np.zeros((1,1)));
return Q, p, G, h, A, b, m
class MyDataset(Dataset):
def __init__(self, data_root):
self.samples = []
self.data_root=data_root
self.train_folder=[];
idx=0
for folname in os.listdir(data_root):
self.train_folder.append(os.path.join(self.data_root, folname))
print(self.train_folder[0])
subfolnames=os.listdir(self.train_folder[idx]);
idx1=0
# this is to avoid reading the output folder
for folname in subfolnames:
if folname !='output':
subfolnames[idx1]=folname
idx1=idx1+1
subfol_path1=os.path.join(self.train_folder[idx],subfolnames[0]);
subfol_path2=os.path.join(self.train_folder[idx],subfolnames[1]);
print(subfol_path1,' ',subfol_path2)
#reading 1st modality
for thisfile in os.listdir(subfol_path1):
this_filepath = os.path.join(subfol_path1, thisfile)
if(this_filepath.find('image.bmp')!=-1):
img= mpimg.imread(this_filepath);
if(img.ndim >2):
img=rgb2gray(img)
img=img.astype(np.float32)
img=torch.from_numpy(img)
#img=img.unsqueeze(0)
elif(this_filepath.find('truth.bmp')!=-1):
target= torch.from_numpy(mpimg.imread(this_filepath))
elif(this_filepath.find('.txt')!=-1):
label = np.loadtxt(this_filepath, dtype='i', delimiter=',')
n1, n2=label.shape
if(n2>1):
Pixel_pos1=torch.from_numpy(label[:,[0, 1]])
Pixel_pos1=Pixel_pos1.type(torch.uint8)
anno1=torch.from_numpy(label[:,2])
else:
Pixel_pos1=None
anno1=torch.from_numpy(label)
elif(this_filepath.find('.mat')!=-1):
Q1, p1, G1, h1, A1, b1, m1=read_mat_file(this_filepath)
#reading 2nd modality
for thisfile in os.listdir(subfol_path2):
this_filepath = os.path.join(subfol_path2, thisfile)
if(this_filepath.find('.txt')!=-1):
label = np.loadtxt(this_filepath, dtype='i', delimiter=',')
n1, n2=label.shape
if(n2>1):
Pixel_pos2=torch.from_numpy(label[:,[0, 1]])
Pixel_pos2=Pixel_pos2.type(torch.uint8)
anno2=torch.from_numpy(label[:,2])
else:
Pixel_pos2=None
anno2=torch.from_numpy(label)
elif(this_filepath.find('.mat')!=-1):
Q2, p2, G2, h2, A2, b2, m2=read_mat_file(this_filepath)
idx=idx+1
item=(img, target, anno1, Pixel_pos1, Q1, p1, G1, h1, m1, anno2, Pixel_pos2, Q2, p2, G2, h2, m2)
self.samples.append(item)
#self.samples.append({'image': img, 'target': target, 'Anno':anno, 'Pixel_pos':Pixel_pos, 'Q':Q, 'p':p, 'G':G, 'h':h, 'm':m})
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
return idx, self.samples[idx]
| 31.385965
| 137
| 0.505124
|
61c27d56b8f63ab62b4595fe987b77a9f2b357ee
| 775
|
py
|
Python
|
crawling_scraping/chapter02/save_sqlite3.py
|
mmakmo/python
|
74c577f8d688de62b6e6574ea1457a322450ae64
|
[
"MIT"
] | null | null | null |
crawling_scraping/chapter02/save_sqlite3.py
|
mmakmo/python
|
74c577f8d688de62b6e6574ea1457a322450ae64
|
[
"MIT"
] | null | null | null |
crawling_scraping/chapter02/save_sqlite3.py
|
mmakmo/python
|
74c577f8d688de62b6e6574ea1457a322450ae64
|
[
"MIT"
] | null | null | null |
import sqlite3
conn = sqlite3.connect('top_cities.db')
c = conn.cursor()
c.execute('DROP TABLE IF EXISTS cities')
c.execute('''
CREATE TABLE cities (
rank integer,
city text,
population integer
)
''')
c.execute('INSERT INTO cities VALUES (?, ?, ?)', (1, '上海', 24150000))
c.execute('INSERT INTO cities VALUES (:rank, :city, :population)', {'rank': 2, 'city': 'カラチ', 'population': 23500000})
c.executemany('INSERT INTO cities VALUES (:rank, :city, :population)', [
{'rank': 3, 'city': '北京', 'population': 21516000},
{'rank': 4, 'city': '天津', 'population': 14722100},
{'rank': 5, 'city': 'イスタンブル', 'population': 14160467},
])
conn.commit()
c.execute('SELECT * FROM cities')
for row in c.fetchall():
print(row)
conn.close()
| 25.833333
| 118
| 0.614194
|
49b4e788c06559c0cae74d0cbc80a5167b7f6293
| 1,782
|
py
|
Python
|
test/modules/md/test_741_setup_errors.py
|
tititiou36/httpd
|
1348607c00ba58ce371f2f8ecb08abf610227043
|
[
"Apache-2.0"
] | 2,529
|
2015-01-02T11:52:53.000Z
|
2022-03-30T19:54:27.000Z
|
test/modules/md/test_741_setup_errors.py
|
tititiou36/httpd
|
1348607c00ba58ce371f2f8ecb08abf610227043
|
[
"Apache-2.0"
] | 133
|
2015-04-21T05:50:45.000Z
|
2022-03-30T14:23:40.000Z
|
test/modules/md/test_741_setup_errors.py
|
tititiou36/httpd
|
1348607c00ba58ce371f2f8ecb08abf610227043
|
[
"Apache-2.0"
] | 1,113
|
2015-01-01T14:47:02.000Z
|
2022-03-29T16:47:18.000Z
|
# test ACME error responses and their processing
import os
import pytest
from .md_conf import MDConf
from .md_env import MDTestEnv
@pytest.mark.skipif(condition=not MDTestEnv.has_acme_server(),
reason="no ACME test server configured")
class TestSetupErrors:
@pytest.fixture(autouse=True, scope='class')
def _class_scope(self, env, acme):
env.APACHE_CONF_SRC = "data/test_auto"
acme.start(config='default')
env.check_acme()
env.clear_store()
MDConf(env).install()
assert env.apache_restart() == 0
@pytest.fixture(autouse=True, scope='function')
def _method_scope(self, env, request):
env.clear_store()
self.mcmd = os.path.join(env.test_dir, "../modules/md/http_challenge_foobar.py")
self.test_domain = env.get_request_domain(request)
def test_md_741_001(self, env):
# setup an MD with a MDMessageCmd that make the http-01 challenge file invalid
# before the ACME server is asked to retrieve it. This will result in
# an "invalid" domain authorization.
# The certificate sign-up will be attempted again after 4 seconds and
# of course fail again.
# Verify that the error counter for the staging job increments, so
# that our retry logic goes into proper delayed backoff.
domain = self.test_domain
domains = [domain]
conf = MDConf(env)
conf.add("MDCAChallenges http-01")
conf.add(f"MDMessageCmd {self.mcmd} {env.store_dir}")
conf.add_md(domains)
conf.add_vhost(domains)
conf.install()
assert env.apache_restart() == 0
md = env.await_error(domain, errors=2, timeout=10)
assert md
assert md['renewal']['errors'] > 0
| 36.367347
| 88
| 0.657688
|
3dbe9b22c2a6c7662cbd5d53f840709ef2551e35
| 3,862
|
py
|
Python
|
src/vision/process_frame_util.py
|
CornellDataScience/self-driving-car
|
449044840abdeed9f547a16cd192950e23ba189c
|
[
"MIT"
] | 3
|
2021-09-29T21:15:25.000Z
|
2021-11-11T20:57:07.000Z
|
src/vision/process_frame_util.py
|
CornellDataScience/self-driving-car
|
449044840abdeed9f547a16cd192950e23ba189c
|
[
"MIT"
] | 44
|
2021-09-28T05:38:43.000Z
|
2022-03-31T21:29:48.000Z
|
src/vision/process_frame_util.py
|
CornellDataScience/self-driving-car
|
449044840abdeed9f547a16cd192950e23ba189c
|
[
"MIT"
] | null | null | null |
import numpy as np
import cv2
import math
def get_features(frame):
orb = cv2.ORB_create()
# Replacement for orb.detect(frame, None) Gives many more points
pts = cv2.goodFeaturesToTrack(
np.mean(frame, axis=2).astype(np.uint8), 3000, qualityLevel=0.01, minDistance=7
)
# print("pts: ", alt_pts)
kps = [cv2.KeyPoint(x=f[0][0], y=f[0][1], size=20) for f in pts]
kps, des = orb.compute(frame, kps)
# print("kps: ", kps)
# return np.array([(kp.pt[0], kp.pt[1]) for kp in kps]), des
return kps, des
# def get_features2(frame):
# orb = cv2.ORB_create()
def match_frames(des1, des2):
# print(des1, des2)
matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
# matches = matcher.knnMatch(des1, des2, k=1)
matches = matcher.match(des1, des2)
# print(matches)
matches = [m for m in matches if m.distance <= 24] # Previous Default: 32
matches = sorted(matches, key=lambda x: x.distance)
return matches
def process_frame(frame, prev_frame):
# orb = cv2.ORB_create()
prev_kps, prev_des = get_features(prev_frame)
kps, des = get_features(frame)
# print(kps)
cv2.drawKeypoints(frame, kps, frame, color=(0, 255, 0), flags=0)
# for p in kps:
# cv2.circle(frame, (int(p[0]), int(p[1])), color=(0, 255, 0), radius=3)
matches = match_frames(prev_des, des)
# if matches:
# print("", len(matches), " matches found")
# frame = cv2.drawMatches(frame, kps, prev_frame, prev_kps, matches[:10], None, 2, flags=2)
transitory_vec = 0
stationary_left_vec = 0
left_count = 0
stationary_right_vec = 0
right_count = 0
for m in matches:
idx1 = kps[m.trainIdx]
idx2 = prev_kps[m.queryIdx]
pt1 = (int(idx1.pt[0]), int(idx1.pt[1]))
pt2 = (int(idx2.pt[0]), int(idx2.pt[1]))
x1 = pt1[0]
x2 = pt2[0]
transitory_vec += x2 - x1
if x2 > frame.shape[1] / 2:
stationary_right_vec += x2 - x1
right_count += 1
else:
stationary_left_vec += x2 - x1
left_count += 1
if math.hypot(pt1[0] - pt2[0], pt1[1] - pt2[1]) <= 100:
cv2.line(frame, pt1, pt2, (int(255 * (1 - m.distance / 32)), 0, 0), 2)
vect = str((stationary_left_vec, stationary_right_vec))
adj_vect = str(
(
round(stationary_left_vec / max(1, left_count), 2),
round(stationary_right_vec / max(1, right_count), 2),
)
)
phrase = (
"Vectors: "
+ vect
+ "Adjusted: "
+ adj_vect
+ "Count: "
+ str((left_count, right_count))
+ "="
+ str(left_count + right_count)
)
# TODO: Possible improvements to direction estimation
# - Check ratio of matches between left and right
# (if turning left, there will be more matches on the right)
# - Use previous (1 or more) estimation data as well
# (if turning left more likely to be turning left)
# - Look at up/down movement for better differentiating FORWARD/BACKWARD
# - Give different weightings to vectors depending on match distance
# - If average pixel difference is increasing then FORWARD
# If decreasing then BACKWARD (change in pixel distance increases/decreases)
if stationary_left_vec < 0 and stationary_right_vec > 0:
phrase = "FORWARD " + phrase
elif stationary_left_vec > 0 and stationary_right_vec < 0:
phrase = "BACKWARD " + phrase
elif stationary_left_vec < 0 and stationary_right_vec < 0:
phrase = "LEFT " + phrase
elif stationary_left_vec > 0 and stationary_right_vec > 0:
phrase = "RIGHT " + phrase
# print(phrase)
loc = (10, 20)
frame = cv2.putText(frame, phrase, loc, cv2.FONT_HERSHEY_PLAIN, 1, (255, 0, 0))
return frame
| 30.171875
| 99
| 0.606163
|
f002c88ce3873b7bdae2bd5e9a946b7d0dfbf75f
| 1,862
|
py
|
Python
|
src/sardana/sardanavalue.py
|
schooft/sardana
|
76287b416650f40da79871ee3849340d0ff31f1d
|
[
"CC-BY-3.0"
] | null | null | null |
src/sardana/sardanavalue.py
|
schooft/sardana
|
76287b416650f40da79871ee3849340d0ff31f1d
|
[
"CC-BY-3.0"
] | null | null | null |
src/sardana/sardanavalue.py
|
schooft/sardana
|
76287b416650f40da79871ee3849340d0ff31f1d
|
[
"CC-BY-3.0"
] | null | null | null |
#!/usr/bin/env python
##############################################################################
##
# This file is part of Sardana
##
# http://www.sardana-controls.org/
##
# Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
# Sardana is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
##
# Sardana is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
##
# You should have received a copy of the GNU Lesser General Public License
# along with Sardana. If not, see <http://www.gnu.org/licenses/>.
##
##############################################################################
"""This module is part of the Python Sardana libray. It defines the base
classes for Sardana values"""
from __future__ import absolute_import
__all__ = ["SardanaValue"]
__docformat__ = 'restructuredtext'
import time
class SardanaValue(object):
def __init__(self, value=None, exc_info=None, timestamp=None,
dtype=None, dformat=None):
self.value = value
self.error = exc_info is not None
self.exc_info = exc_info
if timestamp is None:
timestamp = time.time()
self.timestamp = timestamp
self.dtype = dtype
self.dformat = dformat
def __repr__(self):
v = None
if self.error:
v = "<Error>"
else:
v = self.value
return "{0.__class__.__name__}(value={1}, timestamp={0.timestamp})".format(self, v)
def __str__(self):
return repr(self)
| 30.52459
| 91
| 0.619764
|
89924655132eaf8c956b9196e0548e13577be0c7
| 21,912
|
py
|
Python
|
ep_clustering/kalman_filter/_kalman_filter.py
|
aicherc/EP_Collapsed_Gibbs
|
3b2e8c3addeab2343837b9e86e9cb57b00798b9a
|
[
"MIT"
] | 1
|
2019-12-14T01:14:56.000Z
|
2019-12-14T01:14:56.000Z
|
ep_clustering/kalman_filter/_kalman_filter.py
|
aicherc/EP_Collapsed_Gibbs
|
3b2e8c3addeab2343837b9e86e9cb57b00798b9a
|
[
"MIT"
] | null | null | null |
ep_clustering/kalman_filter/_kalman_filter.py
|
aicherc/EP_Collapsed_Gibbs
|
3b2e8c3addeab2343837b9e86e9cb57b00798b9a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
Likelihood Objects for Gibbs Sampler
"""
# Import Modules
import numpy as np
# Author Information
__author__ = "Christopher Aicher"
class KalmanFilter(object):
""" Kalman Filter Object
N - dimension of state vector
T - number of time points
Args:
y (N by T ndarray): observations
y_count (N by T ndarray): counts of observations (0 indicates missing)
A (N ndarray): AR Coefficients (diagonal matrix)
lambduh (N ndarray): factor loadings
sigma2_x (double): variance of latent process
sigma2_y (N ndarray): variance of observation errors (diagonal matrix)
eta_mean (T ndarray): latent cluster mean
eta_var (T ndarray): latent cluster variance
mu_0 (N ndarray): prior mean for x at time -1
V_0 (N by N ndarray): prior variance for x at time -1
Attributes:
y_T (T by N ndarray): observations
y_count_T (T by N ndarray): counts of observations (0 indicates missing)
A (N ndarray): AR Coefficients (diagonal matrix)
lambduh (N ndarray): factor loadings
sigma2_x (double): variance of latent process
sigma2_y (N ndarray): variance of observation errors (diagonal matrix)
eta_mean (T ndarray): latent cluster mean eta_var (T ndarray): latent cluster variance
Methods:
- kalman_filter_step
- filter_pass
- smoothing_pass
- calculate_log_likelihood
- calculate_cond_log_likelihood
- sample_x
- sample_eta
"""
def __init__(self, y, A, lambduh, sigma2_x, sigma2_y, eta_mean, eta_var,
mu_0=None, V_0=None, y_count=None):
if np.isscalar(A):
A = np.array([A])
if np.isscalar(lambduh):
lambduh = np.array([lambduh])
if np.isscalar(sigma2_y):
sigma2_y = np.array([sigma2_y])
self.y_T = y.T
self.T, self.N = np.shape(self.y_T)
self.A = A
self.lambduh = lambduh
self.sigma2_x = sigma2_x
self.sigma2_y = sigma2_y
self.eta_mean = eta_mean
self.eta_var = eta_var
if mu_0 is None:
self.mu_0 = np.zeros(self.N)
if V_0 is None:
self.V_0 = np.ones(self.N)
self.V_0 *= self.sigma2_x/(1.0-self.A**2)
self.V_0 = np.diag(self.V_0)
if y_count is None:
y_count = 1.0 - np.isnan(y)
y_count[np.isnan(y)] = 0
self.y_count_T = y_count.T
# Scalar Division is much more efficient that np.linalg.solve
if self.N == 1:
self.linalg_solve = lambda a, x: x/a
else:
self.linalg_solve = np.linalg.solve
self._check_attrs()
return
def _check_attrs(self):
""" Check that attrs are valid """
if np.size(self.A) != self.N:
raise ValueError("A must be a N ndarray")
if np.size(self.lambduh) != self.N:
raise ValueError("lambduh must be a N ndarray")
if np.size(self.sigma2_y) != self.N:
raise ValueError("sigma2_y must be a N ndarray")
if np.any(self.sigma2_y < 0):
raise ValueError("sigma2_y must be nonnegative")
if self.sigma2_x < 0:
raise ValueError("sigma2_x must be nonnegative")
if np.size(self.eta_mean) != self.T:
raise ValueError("eta_mean must be a T ndarray")
if np.size(self.eta_var) != self.T:
raise ValueError("eta_var must be a T ndarray")
if np.any(self.eta_var < 0):
raise ValueError("eta_var must be nonnegative")
if np.size(self.mu_0) != self.N:
raise ValueError("mu_0 must be a N ndarray")
if np.shape(self.V_0) != (self.N, self.N):
raise ValueError("V_0 must be a N by N ndarray")
if np.any(np.linalg.eigvals(self.V_0) < 0):
raise ValueError("V_0 must be nonnegative")
if np.shape(self.y_count_T) != np.shape(self.y_T):
raise ValueError("y_count and y do not have the same shape")
if np.any(self.y_count_T < 0):
raise ValueError("y_count must be nonnegative")
return
def kalman_filter_step(self, t, mu_prev, V_prev):
""" Apply Kalman Filter to new observation at time t
Args:
t (int): time index t
mu_prev (N ndarray): filtered mean at time t-1
V_prev (N by N ndarray): filtered variance at time t-1
Returns:
out (dict): dictionary containing
- mu_filter (N ndarray) - filtered mean at time t
- V_filter (N by N ndarray) - filtered variance at time t
- S_t (N by N ndarray) - predictive variance for observation y_t
- mu_predict (N ndarray) - predictive mean for time t
- V_predict (N by N ndarray) - predictive variance for time t
"""
# Predict
y_t = self.y_T[t]
y_count_t = self.y_count_T[t]
mu_predict = self.A * mu_prev + self.lambduh * self.eta_mean[t]
Q = (np.eye(self.N)*self.sigma2_x +
np.outer(self.lambduh, self.lambduh)*self.eta_var[t])
V_predict = _mult_diag_matrix(self.A,
_mult_diag_matrix(self.A, V_prev, on_right=True)) + Q
is_obs = y_count_t > 0
V_yx = V_predict[is_obs,:]
V_yy = V_yx[:,is_obs]
if np.any(is_obs):
# Observation Variance
S_t = V_yy + np.diag(self.sigma2_y[is_obs] / y_count_t[is_obs])
if np.any(np.isnan(S_t)):
raise ValueError("DEBUG")
# Gain Matrix
K_t = self.linalg_solve(S_t, V_yx).T
# Filter
mu_filter = mu_predict + K_t.dot(y_t[is_obs] - mu_predict[is_obs])
V_filter = V_predict - K_t.dot(V_yx)
else:
# No observations -> No filter update step
S_t = np.array([])
mu_filter = mu_predict
V_filter = V_predict
out = {
'mu_predict': mu_predict,
'V_predict': V_predict,
'S_t': S_t,
'mu_filter': mu_filter,
'V_filter': V_filter, }
return out
def filter_pass(self):
""" One pass of the Kalman Filter
Returns:
out (list of T dicts): containing
- mu_filter (N ndarray) - filtered mean at time t
- V_filter (N by N ndarray) - filtered variance at time t
- S_t (N by N ndarray) - predictive variance for observation y_t
- mu_predict (N ndarray) - predictive mean for time t
- V_predict (N by N ndarray) - predictive variance for time t
"""
mu = self.mu_0
V = self.V_0
out = [None]*self.T
for t in range(0, self.T):
out[t] = self.kalman_filter_step(t, mu, V)
mu, V = out[t]['mu_filter'], out[t]['V_filter']
return out
def calculate_log_likelihood(self):
""" Calculate the log-likelihood of y
Returns:
log_like (double): log-likelihood of observations
"""
log_like = 0.0
mu = self.mu_0
V = self.V_0
for t in range(0, self.T):
kalman_result = self.kalman_filter_step(t, mu, V)
y_t = self.y_T[t]
y_count_t = self.y_count_T[t]
is_obs = y_count_t > 0
log_like += _gaussian_log_likelihood(y_t[is_obs],
mean=kalman_result['mu_predict'][is_obs],
variance=kalman_result['S_t'])
mu, V = kalman_result['mu_filter'], kalman_result['V_filter']
return np.asscalar(log_like)
def calculate_cond_log_likelihood(self, i):
""" Calculate the conditional log-likelihood of y_i given other y
Args:
i (int): index of stream
Returns:
cond_log_like (double): conditional log-likelihood of stream i
"""
cond_log_like = 0.0
mu = self.mu_0
V = self.V_0
for t in range(0, self.T):
kalman_result = self.kalman_filter_step(t, mu, V)
y_t = self.y_T[t]
y_count_t = self.y_count_T[t]
is_obs = y_count_t > 0
if is_obs[i]:
cond_log_like += _gaussian_cond_log_likelihood(
x=y_t[is_obs],
mean=kalman_result['mu_predict'][is_obs],
variance=kalman_result['S_t'],
i=(np.cumsum(is_obs)[i] - 1),
)
mu, V = kalman_result['mu_filter'], kalman_result['V_filter']
return cond_log_like
def smoothing_pass(self, filter_out=None, calc_prev=False):
""" One pass of the Kalman Smoothing
Args:
filter_out (list of dicts): output of filter_pass (optional)
Will call `filter_pass` if not supplied
calc_prev (bool): calculate smoothed posterior for t=-1
Returns:
out (list of T dicts): containing
- mu_smoothed (N ndarray) - filtered mean at time t
- V_smoothed (N by N ndarray) - filtered variance at time t
- J_t (N by N ndarray) - backward filter matrix
If calc_prev is True, then smoothing_pass() will also return
the dict prev (for t=-1)
"""
out = [None]*self.T
# Forward Kalman Filter
if filter_out is None:
filter_out = self.filter_pass()
# Backward Smoothing Pass
mu_smoothed = filter_out[self.T-1]['mu_filter']
V_smoothed = filter_out[self.T-1]['V_filter']
out[self.T-1] = {'mu_smoothed': mu_smoothed,
'V_smoothed': V_smoothed,
'J_t': None}
for t in reversed(range(0, self.T-1)):
mu_filter = filter_out[t]['mu_filter']
V_filter = filter_out[t]['V_filter']
mu_predict_next = filter_out[t+1]['mu_predict']
V_predict_next = filter_out[t+1]['V_predict']
J_t = self.linalg_solve(V_predict_next,
_mult_diag_matrix(self.A, V_filter)).T
mu_smoothed = mu_filter + J_t.dot(mu_smoothed-mu_predict_next)
V_smoothed = (V_filter +
J_t.dot(V_smoothed - V_predict_next).dot(J_t.T))
out[t] = {'mu_smoothed': mu_smoothed,
'V_smoothed': V_smoothed,
'J_t': J_t}
if not calc_prev:
return out
else:
# Handle t = -1
mu_filter = self.mu_0
V_filter = self.V_0
mu_predict_next = filter_out[0]['mu_predict']
V_predict_next = filter_out[0]['V_predict']
J_t = self.linalg_solve(V_predict_next,
_mult_diag_matrix(self.A, V_filter)).T
mu_smoothed = mu_filter + J_t.dot(mu_smoothed-mu_predict_next)
V_smoothed = (V_filter +
J_t.dot(V_smoothed - V_predict_next).dot(J_t.T))
prev = {'mu_smoothed': mu_smoothed,
'V_smoothed': V_smoothed,
'J_t': J_t}
return out, prev
def _backward_pass(self, filter_out = None, smoothing_out = None):
""" Helper function for moments of G(X_t) ~ Pr(Y_{t:T} | X_t)
G(X_t) ~ Pr(X_t | Y_{1:T}) / Pr(X_t | Y_{1:t-1})
Returns:
out (list of T dicts): containing
- mu_beta (N ndarray) - backward filtered mean at time t
- V_beta (N by N ndarray) - backward filtered variance at time t
"""
out = [None]*self.T
# Perform Filter and Smoother if necessary
if filter_out is None:
filter_out = self.filter_pass()
if smoothing_out is None:
smoothing_out = self.smoothing_pass(filter_out = filter_out)
for t in range(0, self.T):
mu_predict = filter_out[t]['mu_predict']
V_predict = filter_out[t]['V_predict']
mu_smoothed = smoothing_out[t]['mu_smoothed']
V_smoothed = smoothing_out[t]['V_smoothed']
if np.allclose(V_smoothed, V_predict):
# If Pr(Y_{s:T} | X_s) = 1, e.g. no observations in s:T
# Then set V_beta = Inf
V_beta = np.diag(np.inf * np.ones(self.N))
mu_beta = np.zeros(self.N)
else:
V_beta = V_smoothed.dot(
np.eye(self.N) +
self.linalg_solve(V_predict - V_smoothed, V_smoothed)
)
mu_beta = V_beta.dot(
self.linalg_solve(V_smoothed, mu_smoothed) -
self.linalg_solve(V_predict, mu_predict)
)
out[t] = {
"mu_beta": mu_beta,
"V_beta": V_beta,
}
return out
def moment_eta(self):
""" Return the mean and (diag) variance of the latent process given Y.
Returns the marginal moments of likelihood fo the latent process for EP.
Note that eta_mean, eta_variance are the parameters of [Pr(Y | \eta_s)]
Returns:
eta_mean (T ndarray): mean of eta likelihood
eta_variance (T ndarray): variance of eta likelihood
"""
eta_mean = np.zeros(self.T)
eta_variance = np.zeros(self.T)
filter_out = self.filter_pass()
smoothing_out = self.smoothing_pass(filter_out = filter_out)
beta_out = self._backward_pass(
filter_out = filter_out,
smoothing_out = smoothing_out
)
# Constants
sigma2_eta = (self.lambduh.dot(self.lambduh))**-1 * self.sigma2_x
p_beta = (self.lambduh.dot(self.lambduh))**-1 * self.lambduh
p_alpha = -1.0 * p_beta * self.A
for t in range(0, self.T):
# alpha(X_{t-1}) ~ Pr(X_{t-1} | Y_{1:t-1})
if t == 0:
mu_alpha = self.mu_0
V_alpha = self.V_0
else:
mu_alpha = filter_out[t-1]["mu_filter"]
V_alpha = filter_out[t-1]["V_filter"]
# beta(X_t) ~ Pr(Y_{t:T} | X_t)
mu_beta = beta_out[t]["mu_beta"]
V_beta = beta_out[t]["V_beta"]
eta_mean[t] = p_alpha.dot(mu_alpha) + p_beta.dot(mu_beta)
eta_variance[t] = (
p_alpha.dot(V_alpha.dot(p_alpha)) +
p_beta.dot(V_beta.dot(p_beta)) +
sigma2_eta
)
return eta_mean, eta_variance
def _old_moment_eta(self):
""" Old (incorrect) EP moment update step
Use `moment_eta` instead.
Return the mean and variance of the likelihood of the
latent process given Y (integrating out X).
Returns:
eta_mean (T ndarray): mean of eta
eta_variance (T ndarray): variance of eta
"""
eta_mean = np.zeros(self.T)
eta_variance = np.zeros(self.T)
smoothing_out, prev = self.smoothing_pass(calc_prev=True)
# Handle t = 0
J_prev = prev['J_t']
mu_prev = prev['mu_smoothed']
V_prev = prev['V_smoothed']
mu = smoothing_out[0]['mu_smoothed']
V = smoothing_out[0]['V_smoothed']
eta_mean[0] = (mu - self.A * mu_prev) / self.lambduh
eta_variance[0] = (self.sigma2_x +
(V + self.A**2 * V_prev - 2 * V * J_prev * self.A) /
(self.lambduh**2))
# Handle t = 1:T-1
for t in range(1, self.T):
J_prev = smoothing_out[t-1]['J_t']
mu_prev = mu
V_prev = V
mu = smoothing_out[t]['mu_smoothed']
V = smoothing_out[t]['V_smoothed']
eta_mean[t] = (mu - self.A * mu_prev) / self.lambduh
eta_variance[t] = (self.sigma2_x +
(V + self.A**2 * V_prev - 2 * V * J_prev * self.A) /
(self.lambduh**2))
return eta_mean, eta_variance
def sample_x(self, filter_out=None):
""" Sample latent process using forward filter backward sampler
Args:
filter_out (list of dicts): output of filter_pass (optional)
Will call filter_pass if not supplied
Returns:
x (T by N ndarray): sample from latent state conditioned on y
"""
x = np.zeros((self.T,self.N))
# Forward Kalman Filter
if filter_out is None:
filter_out = self.filter_pass()
# Backwards Sampler
mu = filter_out[self.T-1]['mu_filter']
V = filter_out[self.T-1]['V_filter']
#x_next = np.random.multivariate_normal(mean=mu, cov=V)
x_next = _sample_multivariate_normal(mu, V)
x[self.T-1,:] = x_next
for t in reversed(range(0, self.T-1)):
mu_filter = filter_out[t]['mu_filter']
V_filter = filter_out[t]['V_filter']
mu_predict_next = filter_out[t+1]['mu_predict']
V_predict_next = filter_out[t+1]['V_predict']
J_t = self.linalg_solve(V_predict_next,
_mult_diag_matrix(self.A, V_filter)).T
mu = mu_filter + J_t.dot(x_next - mu_predict_next)
V = V_filter - J_t.dot(_mult_diag_matrix(self.A, V_filter))
# x_next = np.random.multivariate_normal(mu, V)
x_next = _sample_multivariate_normal(mu, V)
x[t,:] = x_next
return x
def sample_eta(self, x=None):
""" Sample latent process
Args:
x (T by N ndarray): sampled x (optional)
Returns:
eta (T ndarray): sampled eta
"""
if x is None:
x = self.sample_x()
eta = np.zeros(self.T)
# Handle t = 0
mean_1 = self.eta_mean[0]
var_1 = self.eta_var[0]
mean_2 = np.sum(
self.lambduh * (x[0] - self.A * self.mu_0)
) / np.sum(self.lambduh ** 2)
var_2 = np.sum(
self.lambduh ** 2 /
(self.sigma2_x + self.A**2 * np.diag(self.V_0))
) ** -1
var = 1.0/(1.0/var_1 + 1.0/var_2)
mean = (mean_1/var_1 + mean_2/var_2) * var
eta[0] = np.random.randn(1)*np.sqrt(var) + mean
# Handle t = 1:T-1
for t in range(1, self.T):
mean_1 = self.eta_mean[t]
var_1 = self.eta_var[t]
mean_2 = np.sum(
self.lambduh * (x[t] - self.A * x[t-1])
) / np.sum(self.lambduh ** 2)
var_2 = self.sigma2_x / np.sum(self.lambduh ** 2)
var = 1.0/(1.0/var_1 + 1.0/var_2)
mean = (mean_1/var_1 + mean_2/var_2) * var
eta[t] = np.random.randn(1)*np.sqrt(var) + mean
return eta
def sample_y(self, x=None, filter_out=None):
""" Sample new observations based on latent process conditioned on y
Args:
x (T by N ndarray): sample from latent state conditioned on y
filter_out (list of dicts): output of filter_pass (optional)
Only used if x is not supplied
Returns:
y (T by N ndarray): sample of observations conditioned on y
"""
y = np.zeros((self.T, self.N))
# Draw X is not supplied
if x is None:
x = self.sample_x(filter_out=filter_out)
# Y is a noisy version of X
y = x + _mult_diag_matrix(self.sigma2_y,
np.random.normal(size=np.shape(x)),
on_right = True)
return y
#UTILITY FUNCTION
def _mult_diag_matrix(D, mtx, on_right=False):
""" Multiply diagonal matrix D to mtx
Args:
D (N ndarray) - diagonal matrix
mtx (ndarray) - matrix to multiply
on_right (bool) - whether to return D * mtx (False) or mtx * D (True)
"""
if not on_right:
return (D*mtx.T).T
else:
return D*mtx
def _sample_multivariate_normal(mean, cov):
""" Alternative to numpy.random.multivariate_normal """
if np.size(mean) == 1:
x = np.random.normal(loc = mean, scale = np.sqrt(cov))
return x
else:
L = np.linalg.cholesky(cov)
x = L.dot(np.random.normal(size = np.size(mean))) + mean
return x
def _gaussian_log_likelihood(x, mean, variance):
""" Calculate the log-likelihood of multivariate Gaussian """
N = np.size(x)
log_like = - N/2.0 * np.log(2*np.pi)
if N == 1:
log_like += - 0.5 * np.log(variance)
log_like += - 0.5 * (x-mean)**2/variance
elif N == 0:
log_like = 0.0
else:
log_like += - 0.5 * np.linalg.slogdet(variance)[1]
log_like += - 0.5 * np.sum((x-mean)*np.linalg.solve(variance, x-mean))
return log_like
def _gaussian_cond_log_likelihood(x, mean, variance, i):
""" Calculate the conditional log-likelihood of multivariate Gaussian """
N = np.size(x)
if i >= N:
raise ValueError("Index i is too large for x")
if N == 1:
return _gaussian_log_likelihood(x, mean, variance)
j = np.arange(N) != i
V_ii = variance[i,i]
V_ij = variance[i,j]
V_jj = variance[np.ix_(j,j)]
mu_i = mean[i]
mu_j = mean[j]
K_ij = np.linalg.solve(V_jj, V_ij.T).T
cond_mean = mean[i] + K_ij.dot(x[j] - mu_j)
cond_variance = V_ii - K_ij.dot(V_ij.T)
cond_log_like = _gaussian_log_likelihood(x[i], cond_mean, cond_variance)
return cond_log_like
def _categorical_sample(probs):
""" Draw a categorical random variable over {0,...,K-1}
Args:
probs (K ndarray) - probability of each value
Returns:
draw (int) - random outcome
"""
return int(np.sum(np.random.rand(1) > np.cumsum(probs)))
#EOF
| 34.947368
| 94
| 0.550612
|
d58ffe0b469ae75cb515a71731bb472f9a1c0fcf
| 1,028
|
py
|
Python
|
my_awesome_project/users/admin.py
|
CoderSaty/my_awesome_project
|
0be26e70e1974d3d58d37f760634380a6a170692
|
[
"MIT"
] | null | null | null |
my_awesome_project/users/admin.py
|
CoderSaty/my_awesome_project
|
0be26e70e1974d3d58d37f760634380a6a170692
|
[
"MIT"
] | 16
|
2022-01-25T11:25:04.000Z
|
2022-03-31T11:26:21.000Z
|
my_awesome_project/users/admin.py
|
CoderSaty/my_awesome_project
|
0be26e70e1974d3d58d37f760634380a6a170692
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.contrib.auth import admin as auth_admin
from django.contrib.auth import get_user_model
from django.utils.translation import gettext_lazy as _
from my_awesome_project.users.forms import UserAdminChangeForm, UserAdminCreationForm
User = get_user_model()
@admin.register(User)
class UserAdmin(auth_admin.UserAdmin):
form = UserAdminChangeForm
add_form = UserAdminCreationForm
fieldsets = (
(None, {"fields": ("username", "password")}),
(_("Personal info"), {"fields": ("name", "email")}),
(
_("Permissions"),
{
"fields": (
"is_active",
"is_staff",
"is_superuser",
"groups",
"user_permissions",
),
},
),
(_("Important dates"), {"fields": ("last_login", "date_joined")}),
)
list_display = ["username", "name", "is_superuser"]
search_fields = ["name"]
| 29.371429
| 85
| 0.569066
|
e2abbee5eade9f93305f197db136a68143e31b13
| 10,103
|
py
|
Python
|
train.py
|
ihsangkcl/RFM
|
a3a549add23863bf76f2a9629f73a4bb4d9682a1
|
[
"Apache-2.0"
] | null | null | null |
train.py
|
ihsangkcl/RFM
|
a3a549add23863bf76f2a9629f73a4bb4d9682a1
|
[
"Apache-2.0"
] | null | null | null |
train.py
|
ihsangkcl/RFM
|
a3a549add23863bf76f2a9629f73a4bb4d9682a1
|
[
"Apache-2.0"
] | 1
|
2022-01-19T20:25:37.000Z
|
2022-01-19T20:25:37.000Z
|
import torch
from utils.utils import data_prefetcher_two, cal_fam, setup_seed, calRes
from pretrainedmodels import xception
import utils.datasets_profiles as dp
from torch.utils.data import DataLoader
from torch.optim import Adam
import numpy as np
import argparse
import random
import time
np.set_printoptions(precision=3)
parser = argparse.ArgumentParser()
parser.add_argument('--device', default="cuda:0", type=str)
parser.add_argument('--modelname', default="xception", type=str)
parser.add_argument('--distributed', default=False, action='store_true')
parser.add_argument('--upper', default="xbase", type=str,
help='the prefix used in save files')
parser.add_argument('--eH', default=120, type=int)
parser.add_argument('--eW', default=120, type=int)
parser.add_argument('--batch_size', default=16, type=int)
parser.add_argument('--max_batch', default=500000, type=int)
parser.add_argument('--num_workers', default=4, type=int)
parser.add_argument('--logbatch', default=3000, type=int)
parser.add_argument('--savebatch', default=30000, type=int)
parser.add_argument('--seed', default=5, type=int)
parser.add_argument('--lr', default=0.0002, type=float, help='learning rate')
parser.add_argument('--pin_memory', '-p', default=False, action='store_true')
parser.add_argument('--resume_model', default=None)
parser.add_argument('--resume_optim', default=None)
parser.add_argument('--save_model', default=True, action='store_true')
parser.add_argument('--save_optim', default=False, action='store_true')
args = parser.parse_args()
modelname = args.modelname
upper = args.upper
#args.resume_model = "./models/baseline.pth"
#args.resume_model = "./models/xbase_xception_model_batch_12000"
args.resume_model = "./models/dffd_xception_model-RFM_"
#args.resume_model = "./models/dffd_xception_model-baseline_"
def Eval(model, lossfunc, dtloader):
model.eval()
sumloss = 0.
y_true_all = None
y_pred_all = None
with torch.no_grad():
for (j, batch) in enumerate(dtloader):
x, y_true = batch
y_pred = model.forward(x.cuda())
loss = lossfunc(y_pred, y_true.cuda())
sumloss += loss.detach()*len(x)
y_pred = torch.nn.functional.softmax(
y_pred.detach(), dim=1)[:, 1].flatten()
if y_true_all is None:
y_true_all = y_true
y_pred_all = y_pred
else:
y_true_all = torch.cat((y_true_all, y_true))
y_pred_all = torch.cat((y_pred_all, y_pred))
return sumloss/len(y_true_all), y_true_all.detach(), y_pred_all.detach()
def Log(log):
print(log)
f = open("./logs/"+upper+"_"+modelname+".log", "a")
f.write(log+"\n")
f.close()
if __name__ == "__main__":
Log("\nModel:%s BatchSize:%d lr:%f" % (modelname, args.batch_size, args.lr))
torch.cuda.set_device(args.device)
setup_seed(args.seed)
print("cudnn.version:%s enabled:%s benchmark:%s deterministic:%s" % (torch.backends.cudnn.version(), torch.backends.cudnn.enabled, torch.backends.cudnn.benchmark, torch.backends.cudnn.deterministic))
MAX_TPR_4 = 0.
model = eval(modelname)(num_classes=2, pretrained=False).cuda()
if args.distributed:
model = torch.nn.DataParallel(model)
optim = Adam(model.parameters(), lr=args.lr, weight_decay=0)
if args.resume_model is not None:
model.load_state_dict(torch.load(args.resume_model,map_location='cuda:0'))
if args.resume_optim is not None:
optim.load_state_dict(torch.load(args.resume_optim))
lossfunc = torch.nn.CrossEntropyLoss()
dataset = dp.Stylespace()
trainsetR = dataset.getTrainsetR()
trainsetF = dataset.getTrainsetF()
validset = dataset.getValidset()
testsetR = dataset.getTestsetR()
TestsetList, TestsetName = dataset.getsetlist(real=False, setType=2)
setup_seed(args.seed)
traindataloaderR = DataLoader(
trainsetR,
batch_size=int(args.batch_size/2),
shuffle=True,
pin_memory=args.pin_memory,
num_workers=args.num_workers
)
traindataloaderF = DataLoader(
trainsetF,
batch_size=int(args.batch_size/2),
shuffle=True,
pin_memory=args.pin_memory,
num_workers=args.num_workers
)
validdataloader = DataLoader(
validset,
batch_size=args.batch_size*2,
pin_memory=args.pin_memory,
num_workers=args.num_workers
)
testdataloaderR = DataLoader(
testsetR,
batch_size=args.batch_size*2,
pin_memory=args.pin_memory,
num_workers=args.num_workers
)
testdataloaderList = []
for tmptestset in TestsetList:
testdataloaderList.append(
DataLoader(
tmptestset,
batch_size=args.batch_size*2,
pin_memory=args.pin_memory,
num_workers=args.num_workers
)
)
print("Loaded model")
batchind = 0
e = 0
sumcnt = 0
sumloss = 0.
while True:
# prefetcher = data_prefetcher_two(traindataloaderR, traindataloaderF)
# data, y_true = prefetcher.next()
# while data is not None and batchind < args.max_batch:
# stime = time.time()
# sumcnt += len(data)
# ''' ↓ the implementation of RFM ↓ '''
# model.eval()
# mask = cal_fam(model, data)
# imgmask = torch.ones_like(mask)
# imgh = imgw = 224
# for i in range(len(mask)):
# maxind = np.argsort(mask[i].cpu().numpy().flatten())[::-1]
# pointcnt = 0
# for pointind in maxind:
# pointx = pointind//imgw
# pointy = pointind % imgw
# if imgmask[i][0][pointx][pointy] == 1:
# maskh = random.randint(1, args.eH)
# maskw = random.randint(1, args.eW)
# sh = random.randint(1, maskh)
# sw = random.randint(1, maskw)
# top = max(pointx-sh, 0)
# bot = min(pointx+(maskh-sh), imgh)
# lef = max(pointy-sw, 0)
# rig = min(pointy+(maskw-sw), imgw)
# imgmask[i][:, top:bot, lef:rig] = torch.zeros_like(imgmask[i][:, top:bot, lef:rig])
# pointcnt += 1
# if pointcnt >= 3:
# break
# data = imgmask * data + (1-imgmask) * (torch.rand_like(data)*2-1.)
# ''' ↑ the implementation of RFM ↑ '''
# model.train()
# y_pred = model.forward(data)
# loss = lossfunc(y_pred, y_true)
# flood = (loss-0.04).abs() + 0.04
# sumloss += loss.detach()*len(data)
# data, y_true = prefetcher.next()
# optim.zero_grad()
# flood.backward()
# optim.step()
# batchind += 1
# print("Train %06d loss:%.5f avgloss:%.5f lr:%.6f time:%.4f" % (batchind, loss, sumloss/sumcnt, optim.param_groups[0]["lr"], time.time()-stime), end="\r")
if batchind % args.logbatch == 0:
print()
# Log("epoch:%03d batch:%06d loss:%.5f avgloss:%.5f" % (e, batchind, loss, sumloss/sumcnt))
loss_valid, y_true_valid, y_pred_valid = Eval(model, lossfunc, validdataloader)
ap, acc, AUC, TPR_2, TPR_3, TPR_4, fprs, tprs ,ths = calRes(y_true_valid, y_pred_valid)
Log("AUC:%.6f TPR_2:%.6f TPR_3:%.6f TPR_4:%.6f %s" % (AUC, TPR_2, TPR_3, TPR_4, "validset"))
loss_r, y_true_r, y_pred_r = Eval(model, lossfunc, testdataloaderR)
sumAUC = sumTPR_2 = sumTPR_3 = sumTPR_4 = sumFPRS = sumTPRS = sumTHS = 0
for i, tmptestdataloader in enumerate(testdataloaderList):
loss_f, y_true_f, y_pred_f = Eval(model, lossfunc, tmptestdataloader)
ap, acc, AUC, TPR_2, TPR_3, TPR_4, fprs, tprs, ths = calRes(torch.cat((y_true_r, y_true_f)), torch.cat((y_pred_r, y_pred_f)))
sumAUC += AUC
sumTPR_2 += TPR_2
sumTPR_3 += TPR_3
sumTPR_4 += TPR_4
np.savetxt('./logs/fprs'+TestsetName[i]+'.out', fprs, delimiter=',')
np.savetxt('./logs/tprs'+TestsetName[i]+'.out', tprs, delimiter=',')
#np.savetxt('./logs/fprs'+TestsetName[i]+'.out', fprs, delimiter=',')
#sumFPRS += fprs
#sumTPRS += tprs
#sumTHS += ths
Log("AUC:%.6f TPR_2:%.6f TPR_3:%.6f TPR_4:%.6f %s" % (AUC, TPR_2, TPR_3, TPR_4, TestsetName[i]))
if len(testdataloaderList) > 1:
Log("AUC:%.6f TPR_2:%.6f TPR_3:%.6f TPR_4:%.6f Test" %
(sumAUC/len(testdataloaderList), sumTPR_2/len(testdataloaderList), sumTPR_3/len(testdataloaderList), sumTPR_4/len(testdataloaderList)))
TPR_4 = (sumTPR_4)/len(testdataloaderList)
if batchind % args.savebatch == 0 or TPR_4 > MAX_TPR_4:
MAX_TPR_4 = TPR_4
if args.save_model:
torch.save(model.state_dict(), "./models/" + upper+"_"+modelname+"_model_batch_"+str(batchind))
if args.save_optim:
torch.save(optim.state_dict(), "./models/" + upper+"_"+modelname+"_optim_batch_"+str(batchind))
print("-------------------------------------------")
# e += 1
| 37.418519
| 204
| 0.558349
|
c7b9172b4bfc356c1d80589467ef26fa3deeaa8c
| 2,198
|
py
|
Python
|
backend/routes/organisation/route.py
|
amooabeebadesina/nuxt-netlify
|
edaf0c78ecc85e296452537ad82372c02239253e
|
[
"MIT"
] | null | null | null |
backend/routes/organisation/route.py
|
amooabeebadesina/nuxt-netlify
|
edaf0c78ecc85e296452537ad82372c02239253e
|
[
"MIT"
] | null | null | null |
backend/routes/organisation/route.py
|
amooabeebadesina/nuxt-netlify
|
edaf0c78ecc85e296452537ad82372c02239253e
|
[
"MIT"
] | null | null | null |
from typing import List
from fastapi import APIRouter, HTTPException, Response, status, Depends
from odmantic import ObjectId
from database.database_methods import Database
from models.organisation.model import Job, JobResponse, JobDetails
from utilities.validate_session import validate_org_session
job_router = APIRouter()
job_database = Database(Job)
@job_router.get("/all", response_model=List[JobDetails])
async def get_jobs():
jobs = [job for job in await job_database.find()]
return jobs
@job_router.post('/create/', response_model=JobResponse, dependencies=[Depends(validate_org_session)])
async def create_job(job_data: Job):
# There's no way to check if there's an identical job for now.
await job_database.save(job_data)
return {
"action": "Job Created",
"message": "Job Post created"
}
@job_router.get('/get/{job_id}/', response_model=JobDetails, dependencies=[Depends(validate_org_session)])
async def get_job(job_id: ObjectId):
job = await job_database.find_one(Job.id == job_id)
if not job:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="Job does not exist"
)
return job
@job_router.put('/update/{job_id}/', response_model=JobResponse, dependencies=[Depends(validate_org_session)])
async def update_job(job_id: ObjectId, job_data: Job):
job = await job_database.find_one(Job.id == job_id)
if not job:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="Job don't exist"
)
await job_database.save(job_data)
return {
"action": "Job Update",
"message": "Job details have been updated"
}
@job_router.delete('/delete/{job_id}/', response_model=JobResponse, dependencies=[Depends(validate_org_session)])
async def delete_job(job_id: ObjectId):
job = await job_database.find_one(Job.id == job_id)
if not job:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="Job does not exist!"
)
await job_database.delete(job)
return {
"action": "Job Delete",
"message": "Job Post deleted"
}
| 31.855072
| 113
| 0.695177
|
f6eb55081a75b770b658c51ee4dc5b294719dd7e
| 5,477
|
py
|
Python
|
pde/fields/tests/test_tensorial.py
|
noah-ziethen/py-pde
|
b88e86439290c31284a4ac665a8e9ff34d08b494
|
[
"MIT"
] | null | null | null |
pde/fields/tests/test_tensorial.py
|
noah-ziethen/py-pde
|
b88e86439290c31284a4ac665a8e9ff34d08b494
|
[
"MIT"
] | null | null | null |
pde/fields/tests/test_tensorial.py
|
noah-ziethen/py-pde
|
b88e86439290c31284a4ac665a8e9ff34d08b494
|
[
"MIT"
] | null | null | null |
'''
.. codeauthor:: David Zwicker <david.zwicker@ds.mpg.de>
'''
import numpy as np
import pytest
from .test_generic import iter_grids
from ..tensorial import Tensor2Field
from ..base import FieldBase
from ...grids import UnitGrid, CartesianGrid
def test_tensors():
""" test some tensor calculations """
grid = CartesianGrid([[0.1, 0.3], [-2, 3]], [3, 4])
t1 = Tensor2Field(grid, np.full((2, 2) + grid.shape, 1))
t2 = Tensor2Field(grid, np.full((2, 2) + grid.shape, 2))
np.testing.assert_allclose(t2.average, [[2, 2], [2, 2]])
assert t1.magnitude == pytest.approx(2)
t3 = t1 + t2
assert t3.grid == grid
np.testing.assert_allclose(t3.data, 3)
t1 += t2
np.testing.assert_allclose(t1.data, 3)
field = Tensor2Field.random_uniform(grid)
trace = field.trace()
from ..scalar import ScalarField
assert isinstance(trace, ScalarField)
np.testing.assert_allclose(trace.data, field.data.trace())
t1 = Tensor2Field(grid)
t1.data[0, 0, :] = 1
t1.data[0, 1, :] = 2
t1.data[1, 0, :] = 3
t1.data[1, 1, :] = 4
for method, value in [('min', 1), ('max', 4),
('norm', np.linalg.norm([[1, 2], [3, 4]])),
('squared_sum', 30), ('trace', 5),
('invariant1', 5), ('invariant2', -1)]:
p1 = t1.to_scalar(method)
assert p1.data.shape == grid.shape
np.testing.assert_allclose(p1.data, value)
t2 = FieldBase.from_state(t1.attributes, data=t1.data)
assert t1 == t2
assert t1.grid is t2.grid
attrs = Tensor2Field.unserialize_attributes(t1.attributes_serialized)
t2 = FieldBase.from_state(attrs, data=t1.data)
assert t1 == t2
assert t1.grid is not t2.grid
def test_tensor_symmetrize():
""" test advanced tensor calculations """
grid = CartesianGrid([[0.1, 0.3], [-2, 3]], [2, 2])
t1 = Tensor2Field(grid)
t1.data[0, 0, :] = 1
t1.data[0, 1, :] = 2
t1.data[1, 0, :] = 3
t1.data[1, 1, :] = 4
# traceless = False
t2 = t1.copy()
t1.symmetrize(make_traceless=False, inplace=True)
tr = t1.trace()
assert np.all(tr.data == 5)
t1_trans = np.swapaxes(t1.data, 0, 1)
np.testing.assert_allclose(t1.data, t1_trans.data)
ts = t1.copy()
ts.symmetrize(make_traceless=False, inplace=True)
np.testing.assert_allclose(t1.data, ts.data)
# traceless = True
t2.symmetrize(make_traceless=True, inplace=True)
tr = t2.trace()
assert np.all(tr.data == 0)
t2_trans = np.swapaxes(t2.data, 0, 1)
np.testing.assert_allclose(t2.data, t2_trans.data)
ts = t2.copy()
ts.symmetrize(make_traceless=True, inplace=True)
np.testing.assert_allclose(t2.data, ts.data)
@pytest.mark.parametrize("grid", iter_grids())
def test_add_interpolated_tensor(grid):
""" test the `add_interpolated` method """
f = Tensor2Field(grid)
a = np.random.random(f.data_shape)
c = tuple(grid.point_to_cell(grid.get_random_point()))
c_data = (Ellipsis,) + c
p = grid.cell_to_point(c, cartesian=False)
f.add_interpolated(p, a)
np.testing.assert_almost_equal(f.data[c_data],
a / grid.cell_volumes[c])
f.add_interpolated(grid.get_random_point(cartesian=False), a)
np.testing.assert_almost_equal(f.integral, 2 * a)
f.data = 0 # reset
add_interpolated = grid.make_add_interpolated_compiled()
c = tuple(grid.point_to_cell(grid.get_random_point()))
c_data = (Ellipsis,) + c
p = grid.cell_to_point(c, cartesian=False)
add_interpolated(f.data, p, a)
np.testing.assert_almost_equal(f.data[c_data],
a / grid.cell_volumes[c])
add_interpolated(f.data, grid.get_random_point(cartesian=False), a)
np.testing.assert_almost_equal(f.integral, 2 * a)
def test_tensor_invariants():
""" test the invariants """
# dim == 1
f = Tensor2Field.random_uniform(UnitGrid([3]))
np.testing.assert_allclose(f.to_scalar('invariant1').data,
f.to_scalar('invariant3').data)
np.testing.assert_allclose(f.to_scalar('invariant2').data, 0)
# dim == 2
f = Tensor2Field.random_uniform(UnitGrid([3, 3]))
invs = [f.to_scalar(f'invariant{i}').data for i in range(1, 4)]
np.testing.assert_allclose(2 * invs[1], invs[2])
a = np.random.uniform(0, 2 * np.pi) # pick random rotation angle
rot = Tensor2Field(f.grid)
rot.data[0, 0, ...] = np.cos(a)
rot.data[0, 1, ...] = np.sin(a)
rot.data[1, 0, ...] = -np.sin(a)
rot.data[1, 1, ...] = np.cos(a)
f_rot = rot @ f @ rot.transpose() # apply the transpose
for i, inv in enumerate(invs, 1):
np.testing.assert_allclose(inv, f_rot.to_scalar(f'invariant{i}').data,
err_msg=f'Mismatch in invariant {i}')
# dim == 3
from scipy.spatial.transform import Rotation
f = Tensor2Field.random_uniform(UnitGrid([1, 1, 1]))
rot = Tensor2Field(f.grid)
rot_mat = Rotation.from_rotvec(np.random.randn(3)).as_matrix()
rot.data = rot_mat.reshape(3, 3, 1, 1, 1)
f_rot = rot @ f @ rot.transpose() # apply the transpose
for i in range(1, 4):
np.testing.assert_allclose(f.to_scalar(f'invariant{i}').data,
f_rot.to_scalar(f'invariant{i}').data,
err_msg=f'Mismatch in invariant {i}')
| 34.23125
| 78
| 0.607267
|
86dc7abdc6e9a6e22a581a2b0ad7208329c4e565
| 3,721
|
py
|
Python
|
src/util/args.py
|
xavihart/pixel-nerf
|
1009af6a66f1f1a513120d1737e21e6a93ec6c64
|
[
"BSD-2-Clause"
] | 1
|
2021-12-14T15:42:12.000Z
|
2021-12-14T15:42:12.000Z
|
src/util/args.py
|
xavihart/pixel-nerf
|
1009af6a66f1f1a513120d1737e21e6a93ec6c64
|
[
"BSD-2-Clause"
] | null | null | null |
src/util/args.py
|
xavihart/pixel-nerf
|
1009af6a66f1f1a513120d1737e21e6a93ec6c64
|
[
"BSD-2-Clause"
] | null | null | null |
import sys
import os
import argparse
from pyhocon import ConfigFactory
def parse_args(
callback=None,
training=False,
default_conf="conf/default_mv.conf",
default_expname="example",
default_data_format="dvr",
default_num_epochs=100,
default_lr=1e-4,
default_gamma=1.00,
default_datadir="data",
default_ray_batch_size=50000,
):
parser = argparse.ArgumentParser()
parser.add_argument("--conf", "-c", type=str, default=None)
parser.add_argument("--resume", "-r", action="store_true", help="continue training")
parser.add_argument(
"--gpu_id", type=str, default="0", help="GPU(s) to use, space delimited"
)
parser.add_argument(
"--name", "-n", type=str, default=default_expname, help="experiment name"
)
parser.add_argument(
"--dataset_format",
"-F",
type=str,
default=None,
help="Dataset format, multi_obj | dvr | dvr_gen | dvr_dtu | srn",
)
parser.add_argument(
"--exp_group_name",
"-G",
type=str,
default=None,
help="if we want to group some experiments together",
)
parser.add_argument(
"--logs_path", type=str, default="logs", help="logs output directory",
)
parser.add_argument(
"--checkpoints_path",
type=str,
default="checkpoints",
help="checkpoints output directory",
)
parser.add_argument(
"--visual_path",
type=str,
default="visuals",
help="visualization output directory",
)
parser.add_argument(
"--epochs",
type=int,
default=default_num_epochs,
help="number of epochs to train for",
)
parser.add_argument("--lr", type=float, default=default_lr, help="learning rate")
parser.add_argument(
"--gamma", type=float, default=default_gamma, help="learning rate decay factor"
)
parser.add_argument(
"--datadir", "-D", type=str, default=None, help="Dataset directory"
)
parser.add_argument(
"--ray_batch_size", "-R", type=int, default=default_ray_batch_size, help="Ray batch size"
)
if callback is not None:
parser = callback(parser)
args = parser.parse_args()
if args.exp_group_name is not None:
args.logs_path = os.path.join(args.logs_path, args.exp_group_name)
args.checkpoints_path = os.path.join(args.checkpoints_path, args.exp_group_name)
args.visual_path = os.path.join(args.visual_path, args.exp_group_name)
os.makedirs(os.path.join(args.checkpoints_path, args.name), exist_ok=True)
os.makedirs(os.path.join(args.visual_path, args.name), exist_ok=True)
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
EXPCONF_PATH = os.path.join(PROJECT_ROOT, "expconf.conf")
expconf = ConfigFactory.parse_file(EXPCONF_PATH)
if args.conf is None:
args.conf = expconf.get_string("config." + args.name, default_conf)
if args.conf is None:
args.conf = expconf.get_string("config." + args.name, default_conf)
if args.datadir is None:
args.datadir = expconf.get_string("datadir." + args.name, default_datadir)
conf = ConfigFactory.parse_file(args.root + args.conf)
if args.dataset_format is None:
args.dataset_format = conf.get_string("data.format", default_data_format)
args.gpu_id = list(map(int, args.gpu_id.split()))
print("EXPERIMENT NAME:", args.name)
if training:
print("CONTINUE?", "yes" if args.resume else "no")
print("* Config file:", args.conf)
print("* Dataset format:", args.dataset_format)
print("* Dataset location:", args.datadir)
return args, conf
| 32.929204
| 97
| 0.649019
|
9006ce18bba659bad8fd7eb81726085dda59b4e4
| 174
|
py
|
Python
|
gettingstarted/wsgi.py
|
Daniil-7/Tatem-Avto-Saransk
|
b348322e10217cdaea873eabfd5c37dd413f54dc
|
[
"MIT"
] | null | null | null |
gettingstarted/wsgi.py
|
Daniil-7/Tatem-Avto-Saransk
|
b348322e10217cdaea873eabfd5c37dd413f54dc
|
[
"MIT"
] | null | null | null |
gettingstarted/wsgi.py
|
Daniil-7/Tatem-Avto-Saransk
|
b348322e10217cdaea873eabfd5c37dd413f54dc
|
[
"MIT"
] | null | null | null |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "gettingstarted.settings")
application = get_wsgi_application()
| 24.857143
| 74
| 0.83908
|
badecb0eed443c9ef7e550973cf1c5d3c49800ab
| 33,249
|
py
|
Python
|
mmdet/models/dense_heads/cascade_rpn_head.py
|
hokmund/mmdetection
|
7d49b7b535456929333d71a543159a00d7ae2faf
|
[
"Apache-2.0"
] | 94
|
2021-03-07T01:34:35.000Z
|
2022-03-05T15:47:41.000Z
|
mmdet/models/dense_heads/cascade_rpn_head.py
|
hokmund/mmdetection
|
7d49b7b535456929333d71a543159a00d7ae2faf
|
[
"Apache-2.0"
] | 13
|
2021-10-09T07:08:17.000Z
|
2022-01-06T05:53:45.000Z
|
mmdet/models/dense_heads/cascade_rpn_head.py
|
hokmund/mmdetection
|
7d49b7b535456929333d71a543159a00d7ae2faf
|
[
"Apache-2.0"
] | 19
|
2021-06-08T14:04:07.000Z
|
2022-01-17T20:06:42.000Z
|
from __future__ import division
import copy
import warnings
import torch
import torch.nn as nn
from mmcv import ConfigDict
from mmcv.ops import DeformConv2d, batched_nms
from mmcv.runner import BaseModule, ModuleList
from mmdet.core import (RegionAssigner, build_assigner, build_sampler,
images_to_levels, multi_apply)
from ..builder import HEADS, build_head
from .base_dense_head import BaseDenseHead
from .rpn_head import RPNHead
class AdaptiveConv(BaseModule):
"""AdaptiveConv used to adapt the sampling location with the anchors.
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the conv kernel. Default: 3
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of
the input. Default: 1
dilation (int or tuple, optional): Spacing between kernel elements.
Default: 3
groups (int, optional): Number of blocked connections from input
channels to output channels. Default: 1
bias (bool, optional): If set True, adds a learnable bias to the
output. Default: False.
type (str, optional): Type of adaptive conv, can be either 'offset'
(arbitrary anchors) or 'dilation' (uniform anchor).
Default: 'dilation'.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
dilation=3,
groups=1,
bias=False,
type='dilation',
init_cfg=dict(
type='Normal', std=0.01, override=dict(name='conv'))):
super(AdaptiveConv, self).__init__(init_cfg)
assert type in ['offset', 'dilation']
self.adapt_type = type
assert kernel_size == 3, 'Adaptive conv only supports kernels 3'
if self.adapt_type == 'offset':
assert stride == 1 and padding == 1 and groups == 1, \
'Adaptive conv offset mode only supports padding: {1}, ' \
f'stride: {1}, groups: {1}'
self.conv = DeformConv2d(
in_channels,
out_channels,
kernel_size,
padding=padding,
stride=stride,
groups=groups,
bias=bias)
else:
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size,
padding=dilation,
dilation=dilation)
def forward(self, x, offset):
"""Forward function."""
if self.adapt_type == 'offset':
N, _, H, W = x.shape
assert offset is not None
assert H * W == offset.shape[1]
# reshape [N, NA, 18] to (N, 18, H, W)
offset = offset.permute(0, 2, 1).reshape(N, -1, H, W)
offset = offset.contiguous()
x = self.conv(x, offset)
else:
assert offset is None
x = self.conv(x)
return x
@HEADS.register_module()
class StageCascadeRPNHead(RPNHead):
"""Stage of CascadeRPNHead.
Args:
in_channels (int): Number of channels in the input feature map.
anchor_generator (dict): anchor generator config.
adapt_cfg (dict): adaptation config.
bridged_feature (bool, optional): whether update rpn feature.
Default: False.
with_cls (bool, optional): wheather use classification branch.
Default: True.
sampling (bool, optional): wheather use sampling. Default: True.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
in_channels,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[1.0],
strides=[4, 8, 16, 32, 64]),
adapt_cfg=dict(type='dilation', dilation=3),
bridged_feature=False,
with_cls=True,
sampling=True,
init_cfg=None,
**kwargs):
self.with_cls = with_cls
self.anchor_strides = anchor_generator['strides']
self.anchor_scales = anchor_generator['scales']
self.bridged_feature = bridged_feature
self.adapt_cfg = adapt_cfg
super(StageCascadeRPNHead, self).__init__(
in_channels,
anchor_generator=anchor_generator,
init_cfg=init_cfg,
**kwargs)
# override sampling and sampler
self.sampling = sampling
if self.train_cfg:
self.assigner = build_assigner(self.train_cfg.assigner)
# use PseudoSampler when sampling is False
if self.sampling and hasattr(self.train_cfg, 'sampler'):
sampler_cfg = self.train_cfg.sampler
else:
sampler_cfg = dict(type='PseudoSampler')
self.sampler = build_sampler(sampler_cfg, context=self)
if init_cfg is None:
self.init_cfg = dict(
type='Normal', std=0.01, override=[dict(name='rpn_reg')])
if self.with_cls:
self.init_cfg['override'].append(dict(name='rpn_cls'))
def _init_layers(self):
"""Init layers of a CascadeRPN stage."""
self.rpn_conv = AdaptiveConv(self.in_channels, self.feat_channels,
**self.adapt_cfg)
if self.with_cls:
self.rpn_cls = nn.Conv2d(self.feat_channels,
self.num_anchors * self.cls_out_channels,
1)
self.rpn_reg = nn.Conv2d(self.feat_channels, self.num_anchors * 4, 1)
self.relu = nn.ReLU(inplace=True)
def forward_single(self, x, offset):
"""Forward function of single scale."""
bridged_x = x
x = self.relu(self.rpn_conv(x, offset))
if self.bridged_feature:
bridged_x = x # update feature
cls_score = self.rpn_cls(x) if self.with_cls else None
bbox_pred = self.rpn_reg(x)
return bridged_x, cls_score, bbox_pred
def forward(self, feats, offset_list=None):
"""Forward function."""
if offset_list is None:
offset_list = [None for _ in range(len(feats))]
return multi_apply(self.forward_single, feats, offset_list)
def _region_targets_single(self,
anchors,
valid_flags,
gt_bboxes,
gt_bboxes_ignore,
gt_labels,
img_meta,
featmap_sizes,
label_channels=1):
"""Get anchor targets based on region for single level."""
assign_result = self.assigner.assign(
anchors,
valid_flags,
gt_bboxes,
img_meta,
featmap_sizes,
self.anchor_scales[0],
self.anchor_strides,
gt_bboxes_ignore=gt_bboxes_ignore,
gt_labels=None,
allowed_border=self.train_cfg.allowed_border)
flat_anchors = torch.cat(anchors)
sampling_result = self.sampler.sample(assign_result, flat_anchors,
gt_bboxes)
num_anchors = flat_anchors.shape[0]
bbox_targets = torch.zeros_like(flat_anchors)
bbox_weights = torch.zeros_like(flat_anchors)
labels = flat_anchors.new_zeros(num_anchors, dtype=torch.long)
label_weights = flat_anchors.new_zeros(num_anchors, dtype=torch.float)
pos_inds = sampling_result.pos_inds
neg_inds = sampling_result.neg_inds
if len(pos_inds) > 0:
if not self.reg_decoded_bbox:
pos_bbox_targets = self.bbox_coder.encode(
sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes)
else:
pos_bbox_targets = sampling_result.pos_gt_bboxes
bbox_targets[pos_inds, :] = pos_bbox_targets
bbox_weights[pos_inds, :] = 1.0
if gt_labels is None:
labels[pos_inds] = 1
else:
labels[pos_inds] = gt_labels[
sampling_result.pos_assigned_gt_inds]
if self.train_cfg.pos_weight <= 0:
label_weights[pos_inds] = 1.0
else:
label_weights[pos_inds] = self.train_cfg.pos_weight
if len(neg_inds) > 0:
label_weights[neg_inds] = 1.0
return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,
neg_inds)
def region_targets(self,
anchor_list,
valid_flag_list,
gt_bboxes_list,
img_metas,
featmap_sizes,
gt_bboxes_ignore_list=None,
gt_labels_list=None,
label_channels=1,
unmap_outputs=True):
"""See :func:`StageCascadeRPNHead.get_targets`."""
num_imgs = len(img_metas)
assert len(anchor_list) == len(valid_flag_list) == num_imgs
# anchor number of multi levels
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
# compute targets for each image
if gt_bboxes_ignore_list is None:
gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
if gt_labels_list is None:
gt_labels_list = [None for _ in range(num_imgs)]
(all_labels, all_label_weights, all_bbox_targets, all_bbox_weights,
pos_inds_list, neg_inds_list) = multi_apply(
self._region_targets_single,
anchor_list,
valid_flag_list,
gt_bboxes_list,
gt_bboxes_ignore_list,
gt_labels_list,
img_metas,
featmap_sizes=featmap_sizes,
label_channels=label_channels)
# no valid anchors
if any([labels is None for labels in all_labels]):
return None
# sampled anchors of all images
num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
# split targets to a list w.r.t. multiple levels
labels_list = images_to_levels(all_labels, num_level_anchors)
label_weights_list = images_to_levels(all_label_weights,
num_level_anchors)
bbox_targets_list = images_to_levels(all_bbox_targets,
num_level_anchors)
bbox_weights_list = images_to_levels(all_bbox_weights,
num_level_anchors)
return (labels_list, label_weights_list, bbox_targets_list,
bbox_weights_list, num_total_pos, num_total_neg)
def get_targets(self,
anchor_list,
valid_flag_list,
gt_bboxes,
img_metas,
featmap_sizes,
gt_bboxes_ignore=None,
label_channels=1):
"""Compute regression and classification targets for anchors.
Args:
anchor_list (list[list]): Multi level anchors of each image.
valid_flag_list (list[list]): Multi level valid flags of each
image.
gt_bboxes (list[Tensor]): Ground truth bboxes of each image.
img_metas (list[dict]): Meta info of each image.
featmap_sizes (list[Tensor]): Feature mapsize each level
gt_bboxes_ignore (list[Tensor]): Ignore bboxes of each images
label_channels (int): Channel of label.
Returns:
cls_reg_targets (tuple)
"""
if isinstance(self.assigner, RegionAssigner):
cls_reg_targets = self.region_targets(
anchor_list,
valid_flag_list,
gt_bboxes,
img_metas,
featmap_sizes,
gt_bboxes_ignore_list=gt_bboxes_ignore,
label_channels=label_channels)
else:
cls_reg_targets = super(StageCascadeRPNHead, self).get_targets(
anchor_list,
valid_flag_list,
gt_bboxes,
img_metas,
gt_bboxes_ignore_list=gt_bboxes_ignore,
label_channels=label_channels)
return cls_reg_targets
def anchor_offset(self, anchor_list, anchor_strides, featmap_sizes):
""" Get offest for deformable conv based on anchor shape
NOTE: currently support deformable kernel_size=3 and dilation=1
Args:
anchor_list (list[list[tensor])): [NI, NLVL, NA, 4] list of
multi-level anchors
anchor_strides (list[int]): anchor stride of each level
Returns:
offset_list (list[tensor]): [NLVL, NA, 2, 18]: offset of DeformConv
kernel.
"""
def _shape_offset(anchors, stride, ks=3, dilation=1):
# currently support kernel_size=3 and dilation=1
assert ks == 3 and dilation == 1
pad = (ks - 1) // 2
idx = torch.arange(-pad, pad + 1, dtype=dtype, device=device)
yy, xx = torch.meshgrid(idx, idx) # return order matters
xx = xx.reshape(-1)
yy = yy.reshape(-1)
w = (anchors[:, 2] - anchors[:, 0]) / stride
h = (anchors[:, 3] - anchors[:, 1]) / stride
w = w / (ks - 1) - dilation
h = h / (ks - 1) - dilation
offset_x = w[:, None] * xx # (NA, ks**2)
offset_y = h[:, None] * yy # (NA, ks**2)
return offset_x, offset_y
def _ctr_offset(anchors, stride, featmap_size):
feat_h, feat_w = featmap_size
assert len(anchors) == feat_h * feat_w
x = (anchors[:, 0] + anchors[:, 2]) * 0.5
y = (anchors[:, 1] + anchors[:, 3]) * 0.5
# compute centers on feature map
x = x / stride
y = y / stride
# compute predefine centers
xx = torch.arange(0, feat_w, device=anchors.device)
yy = torch.arange(0, feat_h, device=anchors.device)
yy, xx = torch.meshgrid(yy, xx)
xx = xx.reshape(-1).type_as(x)
yy = yy.reshape(-1).type_as(y)
offset_x = x - xx # (NA, )
offset_y = y - yy # (NA, )
return offset_x, offset_y
num_imgs = len(anchor_list)
num_lvls = len(anchor_list[0])
dtype = anchor_list[0][0].dtype
device = anchor_list[0][0].device
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
offset_list = []
for i in range(num_imgs):
mlvl_offset = []
for lvl in range(num_lvls):
c_offset_x, c_offset_y = _ctr_offset(anchor_list[i][lvl],
anchor_strides[lvl],
featmap_sizes[lvl])
s_offset_x, s_offset_y = _shape_offset(anchor_list[i][lvl],
anchor_strides[lvl])
# offset = ctr_offset + shape_offset
offset_x = s_offset_x + c_offset_x[:, None]
offset_y = s_offset_y + c_offset_y[:, None]
# offset order (y0, x0, y1, x2, .., y8, x8, y9, x9)
offset = torch.stack([offset_y, offset_x], dim=-1)
offset = offset.reshape(offset.size(0), -1) # [NA, 2*ks**2]
mlvl_offset.append(offset)
offset_list.append(torch.cat(mlvl_offset)) # [totalNA, 2*ks**2]
offset_list = images_to_levels(offset_list, num_level_anchors)
return offset_list
def loss_single(self, cls_score, bbox_pred, anchors, labels, label_weights,
bbox_targets, bbox_weights, num_total_samples):
"""Loss function on single scale."""
# classification loss
if self.with_cls:
labels = labels.reshape(-1)
label_weights = label_weights.reshape(-1)
cls_score = cls_score.permute(0, 2, 3,
1).reshape(-1, self.cls_out_channels)
loss_cls = self.loss_cls(
cls_score, labels, label_weights, avg_factor=num_total_samples)
# regression loss
bbox_targets = bbox_targets.reshape(-1, 4)
bbox_weights = bbox_weights.reshape(-1, 4)
bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
if self.reg_decoded_bbox:
# When the regression loss (e.g. `IouLoss`, `GIouLoss`)
# is applied directly on the decoded bounding boxes, it
# decodes the already encoded coordinates to absolute format.
anchors = anchors.reshape(-1, 4)
bbox_pred = self.bbox_coder.decode(anchors, bbox_pred)
loss_reg = self.loss_bbox(
bbox_pred,
bbox_targets,
bbox_weights,
avg_factor=num_total_samples)
if self.with_cls:
return loss_cls, loss_reg
return None, loss_reg
def loss(self,
anchor_list,
valid_flag_list,
cls_scores,
bbox_preds,
gt_bboxes,
img_metas,
gt_bboxes_ignore=None):
"""Compute losses of the head.
Args:
anchor_list (list[list]): Multi level anchors of each image.
cls_scores (list[Tensor]): Box scores for each scale level
Has shape (N, num_anchors * num_classes, H, W)
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (N, num_anchors * 4, H, W)
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss. Default: None
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
featmap_sizes = [featmap.size()[-2:] for featmap in bbox_preds]
label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
cls_reg_targets = self.get_targets(
anchor_list,
valid_flag_list,
gt_bboxes,
img_metas,
featmap_sizes,
gt_bboxes_ignore=gt_bboxes_ignore,
label_channels=label_channels)
if cls_reg_targets is None:
return None
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
num_total_pos, num_total_neg) = cls_reg_targets
if self.sampling:
num_total_samples = num_total_pos + num_total_neg
else:
# 200 is hard-coded average factor,
# which follows guided anchoring.
num_total_samples = sum([label.numel()
for label in labels_list]) / 200.0
# change per image, per level anchor_list to per_level, per_image
mlvl_anchor_list = list(zip(*anchor_list))
# concat mlvl_anchor_list
mlvl_anchor_list = [
torch.cat(anchors, dim=0) for anchors in mlvl_anchor_list
]
losses = multi_apply(
self.loss_single,
cls_scores,
bbox_preds,
mlvl_anchor_list,
labels_list,
label_weights_list,
bbox_targets_list,
bbox_weights_list,
num_total_samples=num_total_samples)
if self.with_cls:
return dict(loss_rpn_cls=losses[0], loss_rpn_reg=losses[1])
return dict(loss_rpn_reg=losses[1])
def get_bboxes(self,
anchor_list,
cls_scores,
bbox_preds,
img_metas,
cfg,
rescale=False):
"""Get proposal predict."""
assert len(cls_scores) == len(bbox_preds)
num_levels = len(cls_scores)
result_list = []
for img_id in range(len(img_metas)):
cls_score_list = [
cls_scores[i][img_id].detach() for i in range(num_levels)
]
bbox_pred_list = [
bbox_preds[i][img_id].detach() for i in range(num_levels)
]
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
proposals = self._get_bboxes_single(cls_score_list, bbox_pred_list,
anchor_list[img_id], img_shape,
scale_factor, cfg, rescale)
result_list.append(proposals)
return result_list
def refine_bboxes(self, anchor_list, bbox_preds, img_metas):
"""Refine bboxes through stages."""
num_levels = len(bbox_preds)
new_anchor_list = []
for img_id in range(len(img_metas)):
mlvl_anchors = []
for i in range(num_levels):
bbox_pred = bbox_preds[i][img_id].detach()
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
img_shape = img_metas[img_id]['img_shape']
bboxes = self.bbox_coder.decode(anchor_list[img_id][i],
bbox_pred, img_shape)
mlvl_anchors.append(bboxes)
new_anchor_list.append(mlvl_anchors)
return new_anchor_list
# TODO: temporary plan
def _get_bboxes_single(self,
cls_scores,
bbox_preds,
mlvl_anchors,
img_shape,
scale_factor,
cfg,
rescale=False):
"""Transform outputs for a single batch item into bbox predictions.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
Has shape (num_anchors * num_classes, H, W).
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (num_anchors * 4, H, W).
mlvl_anchors (list[Tensor]): Box reference for each scale level
with shape (num_total_anchors, 4).
img_shape (tuple[int]): Shape of the input image,
(height, width, 3).
scale_factor (ndarray): Scale factor of the image arange as
(w_scale, h_scale, w_scale, h_scale).
cfg (mmcv.Config): Test / postprocessing configuration,
if None, test_cfg would be used.
rescale (bool): If True, return boxes in original image space.
Returns:
Tensor: Labeled boxes have the shape of (n,5), where the
first 4 columns are bounding box positions
(tl_x, tl_y, br_x, br_y) and the 5-th column is a score
between 0 and 1.
"""
cfg = self.test_cfg if cfg is None else cfg
cfg = copy.deepcopy(cfg)
# bboxes from different level should be independent during NMS,
# level_ids are used as labels for batched NMS to separate them
level_ids = []
mlvl_scores = []
mlvl_bbox_preds = []
mlvl_valid_anchors = []
for idx in range(len(cls_scores)):
rpn_cls_score = cls_scores[idx]
rpn_bbox_pred = bbox_preds[idx]
assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:]
rpn_cls_score = rpn_cls_score.permute(1, 2, 0)
if self.use_sigmoid_cls:
rpn_cls_score = rpn_cls_score.reshape(-1)
scores = rpn_cls_score.sigmoid()
else:
rpn_cls_score = rpn_cls_score.reshape(-1, 2)
# We set FG labels to [0, num_class-1] and BG label to
# num_class in RPN head since mmdet v2.5, which is unified to
# be consistent with other head since mmdet v2.0. In mmdet v2.0
# to v2.4 we keep BG label as 0 and FG label as 1 in rpn head.
scores = rpn_cls_score.softmax(dim=1)[:, 0]
rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1, 4)
anchors = mlvl_anchors[idx]
if cfg.nms_pre > 0 and scores.shape[0] > cfg.nms_pre:
# sort is faster than topk
# _, topk_inds = scores.topk(cfg.nms_pre)
if torch.onnx.is_in_onnx_export():
# sort op will be converted to TopK in onnx
# and k<=3480 in TensorRT
_, topk_inds = scores.topk(cfg.nms_pre)
scores = scores[topk_inds]
else:
ranked_scores, rank_inds = scores.sort(descending=True)
topk_inds = rank_inds[:cfg.nms_pre]
scores = ranked_scores[:cfg.nms_pre]
rpn_bbox_pred = rpn_bbox_pred[topk_inds, :]
anchors = anchors[topk_inds, :]
mlvl_scores.append(scores)
mlvl_bbox_preds.append(rpn_bbox_pred)
mlvl_valid_anchors.append(anchors)
level_ids.append(
scores.new_full((scores.size(0), ), idx, dtype=torch.long))
scores = torch.cat(mlvl_scores)
anchors = torch.cat(mlvl_valid_anchors)
rpn_bbox_pred = torch.cat(mlvl_bbox_preds)
proposals = self.bbox_coder.decode(
anchors, rpn_bbox_pred, max_shape=img_shape)
ids = torch.cat(level_ids)
# Skip nonzero op while exporting to ONNX
if cfg.min_bbox_size >= 0 and (not torch.onnx.is_in_onnx_export()):
w = proposals[:, 2] - proposals[:, 0]
h = proposals[:, 3] - proposals[:, 1]
valid_inds = torch.nonzero(
(w > cfg.min_bbox_size)
& (h > cfg.min_bbox_size),
as_tuple=False).squeeze()
if valid_inds.sum().item() != len(proposals):
proposals = proposals[valid_inds, :]
scores = scores[valid_inds]
ids = ids[valid_inds]
# deprecate arguments warning
if 'nms' not in cfg or 'max_num' in cfg or 'nms_thr' in cfg:
warnings.warn(
'In rpn_proposal or test_cfg, '
'nms_thr has been moved to a dict named nms as '
'iou_threshold, max_num has been renamed as max_per_img, '
'name of original arguments and the way to specify '
'iou_threshold of NMS will be deprecated.')
if 'nms' not in cfg:
cfg.nms = ConfigDict(dict(type='nms', iou_threshold=cfg.nms_thr))
if 'max_num' in cfg:
if 'max_per_img' in cfg:
assert cfg.max_num == cfg.max_per_img, f'You ' \
f'set max_num and ' \
f'max_per_img at the same time, but get {cfg.max_num} ' \
f'and {cfg.max_per_img} respectively' \
'Please delete max_num which will be deprecated.'
else:
cfg.max_per_img = cfg.max_num
if 'nms_thr' in cfg:
assert cfg.nms.iou_threshold == cfg.nms_thr, f'You set' \
f' iou_threshold in nms and ' \
f'nms_thr at the same time, but get' \
f' {cfg.nms.iou_threshold} and {cfg.nms_thr}' \
f' respectively. Please delete the nms_thr ' \
f'which will be deprecated.'
dets, keep = batched_nms(proposals, scores, ids, cfg.nms)
return dets[:cfg.max_per_img]
@HEADS.register_module()
class CascadeRPNHead(BaseDenseHead):
"""The CascadeRPNHead will predict more accurate region proposals, which is
required for two-stage detectors (such as Fast/Faster R-CNN). CascadeRPN
consists of a sequence of RPNStage to progressively improve the accuracy of
the detected proposals.
More details can be found in ``https://arxiv.org/abs/1909.06720``.
Args:
num_stages (int): number of CascadeRPN stages.
stages (list[dict]): list of configs to build the stages.
train_cfg (list[dict]): list of configs at training time each stage.
test_cfg (dict): config at testing time.
"""
def __init__(self, num_stages, stages, train_cfg, test_cfg, init_cfg=None):
super(CascadeRPNHead, self).__init__(init_cfg)
assert num_stages == len(stages)
self.num_stages = num_stages
# Be careful! Pretrained weights cannot be loaded when use
# nn.ModuleList
self.stages = ModuleList()
for i in range(len(stages)):
train_cfg_i = train_cfg[i] if train_cfg is not None else None
stages[i].update(train_cfg=train_cfg_i)
stages[i].update(test_cfg=test_cfg)
self.stages.append(build_head(stages[i]))
self.train_cfg = train_cfg
self.test_cfg = test_cfg
def loss(self):
"""loss() is implemented in StageCascadeRPNHead."""
pass
def get_bboxes(self):
"""get_bboxes() is implemented in StageCascadeRPNHead."""
pass
def forward_train(self,
x,
img_metas,
gt_bboxes,
gt_labels=None,
gt_bboxes_ignore=None,
proposal_cfg=None):
"""Forward train function."""
assert gt_labels is None, 'RPN does not require gt_labels'
featmap_sizes = [featmap.size()[-2:] for featmap in x]
device = x[0].device
anchor_list, valid_flag_list = self.stages[0].get_anchors(
featmap_sizes, img_metas, device=device)
losses = dict()
for i in range(self.num_stages):
stage = self.stages[i]
if stage.adapt_cfg['type'] == 'offset':
offset_list = stage.anchor_offset(anchor_list,
stage.anchor_strides,
featmap_sizes)
else:
offset_list = None
x, cls_score, bbox_pred = stage(x, offset_list)
rpn_loss_inputs = (anchor_list, valid_flag_list, cls_score,
bbox_pred, gt_bboxes, img_metas)
stage_loss = stage.loss(*rpn_loss_inputs)
for name, value in stage_loss.items():
losses['s{}.{}'.format(i, name)] = value
# refine boxes
if i < self.num_stages - 1:
anchor_list = stage.refine_bboxes(anchor_list, bbox_pred,
img_metas)
if proposal_cfg is None:
return losses
else:
proposal_list = self.stages[-1].get_bboxes(anchor_list, cls_score,
bbox_pred, img_metas,
self.test_cfg)
return losses, proposal_list
def simple_test_rpn(self, x, img_metas):
"""Simple forward test function."""
featmap_sizes = [featmap.size()[-2:] for featmap in x]
device = x[0].device
anchor_list, _ = self.stages[0].get_anchors(
featmap_sizes, img_metas, device=device)
for i in range(self.num_stages):
stage = self.stages[i]
if stage.adapt_cfg['type'] == 'offset':
offset_list = stage.anchor_offset(anchor_list,
stage.anchor_strides,
featmap_sizes)
else:
offset_list = None
x, cls_score, bbox_pred = stage(x, offset_list)
if i < self.num_stages - 1:
anchor_list = stage.refine_bboxes(anchor_list, bbox_pred,
img_metas)
proposal_list = self.stages[-1].get_bboxes(anchor_list, cls_score,
bbox_pred, img_metas,
self.test_cfg)
return proposal_list
def aug_test_rpn(self, x, img_metas):
"""Augmented forward test function."""
raise NotImplementedError
| 42.301527
| 79
| 0.552618
|
d7b3828e372a6c55bc2d9e7aff06c0f7cd79fa9b
| 4,571
|
py
|
Python
|
vnpy/trader/app/ctaStrategy/strategy/strategyDoubleMa.py
|
black0144/vnpy
|
0d0ea30dad14a0150f7500ff9a62528030321426
|
[
"MIT"
] | 18
|
2019-02-21T05:42:41.000Z
|
2022-03-31T10:17:51.000Z
|
vnpy/trader/app/ctaStrategy/strategy/strategyDoubleMa.py
|
black0144/vnpy
|
0d0ea30dad14a0150f7500ff9a62528030321426
|
[
"MIT"
] | 1
|
2018-06-12T10:08:24.000Z
|
2018-06-12T10:08:24.000Z
|
vnpy/trader/app/ctaStrategy/strategy/strategyDoubleMa.py
|
black0144/vnpy
|
0d0ea30dad14a0150f7500ff9a62528030321426
|
[
"MIT"
] | 5
|
2017-12-20T09:57:17.000Z
|
2021-08-01T19:47:14.000Z
|
# encoding: UTF-8
"""
这里的Demo是一个最简单的双均线策略实现
"""
from __future__ import division
from vnpy.trader.vtConstant import EMPTY_STRING, EMPTY_FLOAT
from vnpy.trader.app.ctaStrategy.ctaTemplate import (CtaTemplate,
BarGenerator,
ArrayManager)
########################################################################
class DoubleMaStrategy(CtaTemplate):
"""双指数均线策略Demo"""
className = 'DoubleMaStrategy'
author = u'用Python的交易员'
# 策略参数
fastWindow = 10 # 快速均线参数
slowWindow = 60 # 慢速均线参数
initDays = 10 # 初始化数据所用的天数
# 策略变量
fastMa0 = EMPTY_FLOAT # 当前最新的快速EMA
fastMa1 = EMPTY_FLOAT # 上一根的快速EMA
slowMa0 = EMPTY_FLOAT
slowMa1 = EMPTY_FLOAT
# 参数列表,保存了参数的名称
paramList = ['name',
'className',
'author',
'vtSymbol',
'fastWindow',
'slowWindow']
# 变量列表,保存了变量的名称
varList = ['inited',
'trading',
'pos',
'fastMa0',
'fastMa1',
'slowMa0',
'slowMa1']
# 同步列表,保存了需要保存到数据库的变量名称
syncList = ['pos']
#----------------------------------------------------------------------
def __init__(self, ctaEngine, setting):
"""Constructor"""
super(DoubleMaStrategy, self).__init__(ctaEngine, setting)
self.bg = BarGenerator(self.onBar)
self.am = ArrayManager()
# 注意策略类中的可变对象属性(通常是list和dict等),在策略初始化时需要重新创建,
# 否则会出现多个策略实例之间数据共享的情况,有可能导致潜在的策略逻辑错误风险,
# 策略类中的这些可变对象属性可以选择不写,全都放在__init__下面,写主要是为了阅读
# 策略时方便(更多是个编程习惯的选择)
#----------------------------------------------------------------------
def onInit(self):
"""初始化策略(必须由用户继承实现)"""
self.writeCtaLog(u'双EMA演示策略初始化')
initData = self.loadBar(self.initDays)
for bar in initData:
self.onBar(bar)
self.putEvent()
#----------------------------------------------------------------------
def onStart(self):
"""启动策略(必须由用户继承实现)"""
self.writeCtaLog(u'双EMA演示策略启动')
self.putEvent()
#----------------------------------------------------------------------
def onStop(self):
"""停止策略(必须由用户继承实现)"""
self.writeCtaLog(u'双EMA演示策略停止')
self.putEvent()
#----------------------------------------------------------------------
def onTick(self, tick):
"""收到行情TICK推送(必须由用户继承实现)"""
self.bg.updateTick(tick)
#----------------------------------------------------------------------
def onBar(self, bar):
"""收到Bar推送(必须由用户继承实现)"""
am = self.am
am.updateBar(bar)
if not am.inited:
return
# 计算快慢均线
fastMa = am.sma(self.fastWindow, array=True)
self.fastMa0 = fastMa[-1]
self.fastMa1 = fastMa[-2]
slowMa = am.sma(self.slowWindow, array=True)
self.slowMa0 = slowMa[-1]
self.slowMa1 = slowMa[-2]
# 判断买卖
crossOver = self.fastMa0>self.slowMa0 and self.fastMa1<self.slowMa1 # 金叉上穿
crossBelow = self.fastMa0<self.slowMa0 and self.fastMa1>self.slowMa1 # 死叉下穿
# 金叉和死叉的条件是互斥
# 所有的委托均以K线收盘价委托(这里有一个实盘中无法成交的风险,考虑添加对模拟市价单类型的支持)
if crossOver:
# 如果金叉时手头没有持仓,则直接做多
if self.pos == 0:
self.buy(bar.close, 1)
# 如果有空头持仓,则先平空,再做多
elif self.pos < 0:
self.cover(bar.close, 1)
self.buy(bar.close, 1)
# 死叉和金叉相反
elif crossBelow:
if self.pos == 0:
self.short(bar.close, 1)
elif self.pos > 0:
self.sell(bar.close, 1)
self.short(bar.close, 1)
# 发出状态更新事件
self.putEvent()
#----------------------------------------------------------------------
def onOrder(self, order):
"""收到委托变化推送(必须由用户继承实现)"""
# 对于无需做细粒度委托控制的策略,可以忽略onOrder
pass
#----------------------------------------------------------------------
def onTrade(self, trade):
"""收到成交推送(必须由用户继承实现)"""
# 对于无需做细粒度委托控制的策略,可以忽略onOrder
pass
#----------------------------------------------------------------------
def onStopOrder(self, so):
"""停止单推送"""
pass
| 30.072368
| 86
| 0.429009
|
5b6255b2c504e6bb2c61221dc40eaff55dee5413
| 1,431
|
py
|
Python
|
templatebot/__main__.py
|
PascalRoose/tgbot-template
|
72c87679e1598629a2739c06d7e69030ddbfd4f6
|
[
"MIT"
] | null | null | null |
templatebot/__main__.py
|
PascalRoose/tgbot-template
|
72c87679e1598629a2739c06d7e69030ddbfd4f6
|
[
"MIT"
] | null | null | null |
templatebot/__main__.py
|
PascalRoose/tgbot-template
|
72c87679e1598629a2739c06d7e69030ddbfd4f6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import os
from telegram.ext import messagequeue as mq
from telegram.utils import helpers
from telegram.utils.request import Request
from .utils.conf import settings, DIRS, MEMKEY
from .utils.log import init_logger
from .utils.tg import MQBot, get_updater, load_handlers, load_jobs
mqueue = mq.MessageQueue(all_burst_limit=10, all_time_limit_ms=3000)
request = Request(con_pool_size=8)
bot = MQBot(token=settings.TOKEN, request=request, mqueue=mqueue)
init_logger(os.path.join(DIRS.user_log_dir, f'{bot.get_me().username}.log'))
log = logging.getLogger(__name__)
persistance_path = os.path.join(DIRS.user_cache_dir, 'telegram.pkl')
updater = get_updater(bot=bot, persistence_path=persistance_path)
load_handlers(updater.dispatcher)
load_jobs(updater.job_queue)
if settings.ADMIN == 0:
print(f'Please start the bot using the following link to become admin: '
f'{helpers.create_deep_linked_url(bot.get_me().username, MEMKEY, group=False)}')
if settings.WEBHOOK.ENABLED:
updater.start_webhook(listen=settings.WEBHOOK.IP,
port=settings.WEBHOOK.PORT,
url_path=settings.WEBHOOK.PATH,
webhook_url=settings.WEBHOOK.URL)
log.info(f'Started webhook listener on: {settings.WEBHOOK.URL}')
else:
updater.start_polling()
log.info('Started polling...')
updater.idle()
| 33.27907
| 90
| 0.732355
|
2186665e6b4529941795a1bac30cd465f9d7b74e
| 6,279
|
py
|
Python
|
examples/QKD/qkd_eqsn.py
|
pritamsinha2304/QuNetSim
|
65a7486d532816724b5c98cfdcc0910404bfe0e2
|
[
"MIT"
] | 61
|
2020-02-15T00:59:20.000Z
|
2022-03-08T10:29:23.000Z
|
examples/QKD/qkd_eqsn.py
|
pritamsinha2304/QuNetSim
|
65a7486d532816724b5c98cfdcc0910404bfe0e2
|
[
"MIT"
] | 50
|
2020-01-28T12:18:50.000Z
|
2021-12-16T21:38:19.000Z
|
examples/QKD/qkd_eqsn.py
|
pritamsinha2304/QuNetSim
|
65a7486d532816724b5c98cfdcc0910404bfe0e2
|
[
"MIT"
] | 27
|
2020-01-21T12:59:28.000Z
|
2022-02-21T14:23:00.000Z
|
import numpy as np
import random
import time
from qunetsim.components import Host
from qunetsim.components import Network
from qunetsim.objects import Qubit
from qunetsim.objects import Logger
from qunetsim.backends import EQSNBackend
Logger.DISABLED = True
wait_time = 10
# !! Warning: this Crypto algorithm is really bad!
# !! Warning: Do not use it as a real Crypto Algorithm!
# key has to be a string
def encrypt(key, text):
encrypted_text = ""
for char in text:
encrypted_text += chr(ord(key[0]) ^ ord(char))
return encrypted_text
def decrypt(key, encrypted_text):
return encrypt(key, encrypted_text)
def get_next_classical_message(host, receive_from_id, buffer, sequence_nr):
buffer = buffer + host.get_classical(receive_from_id, wait=-1)
msg = "ACK"
while msg == "ACK" or (msg.split(':')[0] != ("%d" % sequence_nr)):
if len(buffer) == 0:
buffer = buffer + host.get_classical(receive_from_id, wait=-1)
ele = buffer.pop(0)
msg = ele.content
return msg
def alice_qkd(alice, msg_buff, secret_key, receiver):
sequence_nr = 0
# iterate over all bits in the secret key.
for bit in secret_key:
ack = False
while not ack:
print("Alice sent %d key bits" % (sequence_nr + 1))
# get a random base. 0 for Z base and 1 for X base.
base = random.randint(0, 1)
# create qubit
q_bit = Qubit(alice)
# Set qubit to the bit from the secret key.
if bit == 1:
q_bit.X()
# Apply basis change to the bit if necessary.
if base == 1:
q_bit.H()
# Send Qubit to Bob
alice.send_qubit(receiver, q_bit, await_ack=True)
# Get measured basis of Bob
message = get_next_classical_message(
alice, receiver, msg_buff, sequence_nr)
# Compare to send basis, if same, answer with 0 and set ack True and go to next bit,
# otherwise, send 1 and repeat.
if message == ("%d:%d") % (sequence_nr, base):
ack = True
alice.send_classical(receiver, ("%d:0" %
sequence_nr), await_ack=True)
else:
ack = False
alice.send_classical(receiver, ("%d:1" %
sequence_nr), await_ack=True)
sequence_nr += 1
def eve_qkd(eve, msg_buff, key_size, sender):
sequence_nr = 0
received_counter = 0
key_array = []
while received_counter < key_size:
# decide for a measurement base
measurement_base = random.randint(0, 1)
# wait for the qubit
q_bit = eve.get_data_qubit(sender, wait=wait_time)
while q_bit is None:
q_bit = eve.get_data_qubit(sender, wait=wait_time)
# measure qubit in right measurement basis
if measurement_base == 1:
q_bit.H()
bit = q_bit.measure()
# Send Alice the base in which Bob has measured
eve.send_classical(sender, "%d:%d" %
(sequence_nr, measurement_base), await_ack=True)
# get the return message from Alice, to know if the bases have matched
msg = get_next_classical_message(eve, sender, msg_buff, sequence_nr)
# Check if the bases have matched
if msg == ("%d:0" % sequence_nr):
received_counter += 1
print("Eve received %d key bits." % received_counter)
key_array.append(bit)
sequence_nr += 1
return key_array
# helper function, used to make the key to a string
def key_array_to_key_string(key_array):
key_string_binary = ''.join([str(x) for x in key_array])
return ''.join(chr(int(''.join(x), 2)) for x in zip(*[iter(key_string_binary)] * 8))
def alice_send_message(alice, secret_key, receiver):
msg_to_eve = "Hi Eve, how are you?"
secret_key_string = key_array_to_key_string(secret_key)
encrypted_msg_to_eve = encrypt(secret_key_string, msg_to_eve)
print("Alice sends encrypted message")
alice.send_classical(
receiver, "-1:" + encrypted_msg_to_eve, await_ack=True)
def eve_receive_message(eve, msg_buff, eve_key, sender):
encrypted_msg_from_alice = get_next_classical_message(
eve, sender, msg_buff, -1)
encrypted_msg_from_alice = encrypted_msg_from_alice.split(':')[1]
secret_key_string = key_array_to_key_string(eve_key)
decrypted_msg_from_alice = decrypt(
secret_key_string, encrypted_msg_from_alice)
print("Eve received decoded message: %s" % decrypted_msg_from_alice)
def main():
# Initialize a network
network = Network.get_instance()
backend = EQSNBackend()
# Define the host IDs in the network
nodes = ['Alice', 'Bob']
network.delay = 0.0
# Start the network with the defined hosts
network.start(nodes, backend)
# Initialize the host Alice
host_alice = Host('Alice', backend)
# Add a one-way connection (classical and quantum) to Bob
host_alice.add_connection('Bob')
host_alice.delay = 0.0
# Start listening
host_alice.start()
host_bob = Host('Bob', backend)
# Bob adds his own one-way connection to Alice
host_bob.add_connection('Alice')
host_bob.delay = 0.0
host_bob.start()
# Add the hosts to the network
# The network is: Alice <--> Bob
network.add_host(host_alice)
network.add_host(host_bob)
# Generate random key
key_size = 20 # the size of the key in bit
secret_key = np.random.randint(2, size=key_size)
# Concatentate functions
def alice_func(alice):
msg_buff = []
alice_qkd(alice, msg_buff, secret_key, host_bob.host_id)
alice_send_message(alice, secret_key, host_bob.host_id)
def bob_func(eve):
msg_buff = []
eve_key = eve_qkd(eve, msg_buff, key_size, host_alice.host_id)
eve_receive_message(eve, msg_buff, eve_key, host_alice.host_id)
# Run Bob and Alice
t1 = host_alice.run_protocol(alice_func, ())
t2 = host_bob.run_protocol(bob_func, ())
t1.join()
t2.join()
network.stop(True)
if __name__ == '__main__':
main()
| 30.480583
| 96
| 0.629877
|
f7caf3e9b395602c33812d4dab7e428005a17d7d
| 85
|
py
|
Python
|
backend/metric/ulca-metric-api/src/services/__init__.py
|
agupta54/ulca
|
c1f570ac254ce2ac73f40c49716458f4f7cbaee2
|
[
"MIT"
] | 3
|
2022-01-12T06:51:51.000Z
|
2022-02-23T18:54:33.000Z
|
backend/metric/ulca-metric-api/src/services/__init__.py
|
agupta54/ulca
|
c1f570ac254ce2ac73f40c49716458f4f7cbaee2
|
[
"MIT"
] | 6
|
2021-08-31T19:21:26.000Z
|
2022-01-03T05:53:42.000Z
|
backend/metric/ulca-metric-api/src/services/__init__.py
|
agupta54/ulca
|
c1f570ac254ce2ac73f40c49716458f4f7cbaee2
|
[
"MIT"
] | 8
|
2021-08-12T08:07:49.000Z
|
2022-01-25T04:40:51.000Z
|
from .metriccronjob import CronProcessor
from .mismatchcron import AlertCronProcessor
| 42.5
| 44
| 0.894118
|
5acecf2be0f1820422b92530928b5c26dfadcb89
| 3,271
|
py
|
Python
|
backend/migrations/versions/6680bd9737cf_.py
|
kzagorulko/flower-system
|
7203862e6366ac08c7be939ef443aa274c04ec63
|
[
"MIT"
] | 3
|
2020-10-26T08:54:43.000Z
|
2021-05-29T09:55:34.000Z
|
backend/migrations/versions/6680bd9737cf_.py
|
kzagorulko/flower-system
|
7203862e6366ac08c7be939ef443aa274c04ec63
|
[
"MIT"
] | 28
|
2020-10-25T10:20:54.000Z
|
2021-02-04T10:51:57.000Z
|
backend/migrations/versions/6680bd9737cf_.py
|
kzagorulko/flower-system
|
7203862e6366ac08c7be939ef443aa274c04ec63
|
[
"MIT"
] | 1
|
2020-11-12T10:07:07.000Z
|
2020-11-12T10:07:07.000Z
|
"""added roles, more fields for users
Revision ID: 6680bd9737cf
Revises: 498f307695d6
Create Date: 2020-11-01 15:01:10.385506
"""
from alembic import op
import sqlalchemy as sa
from uuid import uuid4
# revision identifiers, used by Alembic.
revision = '6680bd9737cf'
down_revision = '498f307695d6'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
t_roles = op.create_table(
'roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=50), nullable=False),
sa.Column('display_name', sa.String(length=64), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('display_name'),
sa.UniqueConstraint('name')
)
op.add_column(
'users', sa.Column('deactivated', sa.Boolean(), nullable=True)
)
op.add_column(
'users',
sa.Column('display_name', sa.String(length=50), nullable=True)
)
op.add_column(
'users', sa.Column('email', sa.String(length=50), nullable=True)
)
op.add_column(
'users',
sa.Column('path_to_image', sa.String(length=120), nullable=True)
)
op.add_column(
'users', sa.Column('role_id', sa.Integer(), nullable=True)
)
op.add_column('users', sa.Column('session', sa.String(length=36)))
t_users = sa.Table(
'users',
sa.MetaData(),
sa.Column('id', sa.Integer),
sa.Column('username', sa.String),
sa.Column('deactivated', sa.Boolean),
sa.Column('display_name', sa.String),
sa.Column('email', sa.String),
sa.Column('session', sa.String),
sa.Column('role_id', sa.Integer)
)
connection = op.get_bind()
connection.execute(
sa.insert(t_roles).values(
{'name': 'admin', 'display_name': 'Администрация'}
)
)
users = connection.execute(
sa.select([t_users.c.id, t_users.c.username])
).fetchall()
for user in users:
connection.execute(
sa.update(t_users).where(
t_users.c.id == user[0]
).values(
email=f'{user[1]}@example.org',
deactivated=False,
display_name='Иван',
session=str(uuid4()),
role_id=1
)
)
op.alter_column('users', 'deactivated', nullable=False)
op.alter_column('users', 'display_name', nullable=False)
op.alter_column('users', 'email', nullable=False)
op.create_unique_constraint('uq_email', 'users', ['email'])
op.create_foreign_key(
'fk_users_roles', 'users', 'roles', ['role_id'], ['id']
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('fk_users_roles', 'users', type_='foreignkey')
op.drop_constraint('uq_email', 'users', type_='unique')
op.drop_column('users', 'role_id')
op.drop_column('users', 'path_to_image')
op.drop_column('users', 'email')
op.drop_column('users', 'display_name')
op.drop_column('users', 'deactivated')
op.drop_column('users', 'session')
op.drop_table('roles')
# ### end Alembic commands ###
| 29.736364
| 72
| 0.602874
|
663a6bd89833d89ea6568a4458d8a2e14abd8641
| 4,037
|
py
|
Python
|
pearll/models/encoders.py
|
LondonNode/Anvil
|
bc50fd7b16af36051157814e2548a98e787b03de
|
[
"MIT"
] | 13
|
2022-01-17T14:43:05.000Z
|
2022-03-10T04:05:36.000Z
|
pearll/models/encoders.py
|
LondonNode/Anvil
|
bc50fd7b16af36051157814e2548a98e787b03de
|
[
"MIT"
] | 3
|
2022-02-24T18:29:12.000Z
|
2022-03-22T11:09:07.000Z
|
pearll/models/encoders.py
|
LondonNode/Anvil
|
bc50fd7b16af36051157814e2548a98e787b03de
|
[
"MIT"
] | null | null | null |
from typing import Dict, List, Optional, Type
import numpy as np
import torch as T
from gym import spaces
from pearll.common.type_aliases import Tensor
from pearll.common.utils import to_numpy
from pearll.models.utils import preprocess_inputs
class IdentityEncoder(T.nn.Module):
"""This encoder passes the input through unchanged."""
def __init__(self):
super().__init__()
def forward(
self, observations: Tensor, actions: Optional[Tensor] = None
) -> T.Tensor:
# Some algorithms use both the observations and actions as input (e.g. DDPG for conitnuous Q function)
input = preprocess_inputs(observations, actions)
return input
class FlattenEncoder(T.nn.Module):
"""This encoder flattens the input."""
def __init__(self):
super().__init__()
def forward(
self, observations: Tensor, actions: Optional[Tensor] = None
) -> T.Tensor:
# Some algorithms use both the observations and actions as input (e.g. DDPG for conitnuous Q function)
# Make sure observations is a torch tensor, get error if numpy for some reason??
input = preprocess_inputs(observations, actions)
return T.flatten(input)
class MLPEncoder(T.nn.Module):
"""This is a single layer MLP encoder"""
def __init__(self, input_size, output_size):
super().__init__()
self.model = T.nn.Linear(input_size, output_size)
def forward(
self, observations: Tensor, actions: Optional[Tensor] = None
) -> T.Tensor:
input = preprocess_inputs(observations, actions)
return self.model(input)
class CNNEncoder(T.nn.Module):
"""
CNN from DQN nature paper:
Mnih, Volodymyr, et al.
"Human-level control through deep reinforcement learning."
Nature 518.7540 (2015): 529-533.
:param observation_space:
:param output_size: number neurons in the last layer.
:param activation_fn: the activation function after each layer
"""
def __init__(
self,
observation_space: spaces.Box,
output_size: int = 512,
activation_fn: Type[T.nn.Module] = T.nn.ReLU,
):
super().__init__()
# We assume CxHxW images (channels first)
# Re-ordering will be done by pre-preprocessing or wrapper
n_input_channels = observation_space.shape[0]
self.cnn = T.nn.Sequential(
T.nn.Conv2d(n_input_channels, 32, kernel_size=8, stride=4, padding=0),
activation_fn(),
T.nn.Conv2d(32, 64, kernel_size=4, stride=2, padding=0),
activation_fn(),
T.nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=0),
activation_fn(),
T.nn.Flatten(),
)
# Compute shape by doing one forward pass
with T.no_grad():
n_flatten = self.cnn(
T.as_tensor(observation_space.sample()[None]).float()
).shape[1]
self.linear = T.nn.Sequential(T.nn.Linear(n_flatten, output_size), T.nn.ReLU())
def forward(self, observations: Tensor) -> T.Tensor:
return self.linear(self.cnn(observations))
class DictEncoder(T.nn.Module):
"""
Handles dictionary observations, e.g. from GoalEnv
:param labels: dictionary labels to extract for model
:param encoder: encoder module to run after extracting array from dictionary
"""
def __init__(
self,
labels: List[str] = ["observation", "desired_goal"],
encoder: T.nn.Module = IdentityEncoder(),
) -> None:
super().__init__()
self.labels = labels
self.encoder = encoder
def forward(
self, observations: Dict[str, Tensor], actions: Optional[Tensor] = None
) -> T.Tensor:
obs = [observations[label] for label in self.labels]
obs = to_numpy(*obs)
if len(self.labels) > 1:
shape_length = len(observations[self.labels[0]].shape)
obs = np.concatenate(obs, axis=shape_length - 1)
return self.encoder(obs, actions)
| 32.296
| 110
| 0.640822
|
eec19b0a270d0368a67155f95545f6cb4a27c586
| 232
|
py
|
Python
|
deepCoral/config.py
|
Fassial/Air-Writing-with-TL
|
9b9047c5bd5aef3a869e2d5166be1c0cf0c5ccf0
|
[
"MIT"
] | 1
|
2021-06-16T16:45:01.000Z
|
2021-06-16T16:45:01.000Z
|
deepCoral/config.py
|
Fassial/Air-Writing-with-TL
|
9b9047c5bd5aef3a869e2d5166be1c0cf0c5ccf0
|
[
"MIT"
] | null | null | null |
deepCoral/config.py
|
Fassial/Air-Writing-with-TL
|
9b9047c5bd5aef3a869e2d5166be1c0cf0c5ccf0
|
[
"MIT"
] | 1
|
2020-04-21T01:31:26.000Z
|
2020-04-21T01:31:26.000Z
|
CFG = {
"datapath": "../dataset",
"kwargs": {"n_workers": 4},
"batch_size": 20,
"n_epoches": 20,
"lr": 1e-3,
"momentum": .9,
"log_interval": 10,
"l2_decay": 0,
"lambda": 10,
"backbone": "naive_cnnblstm",
"n_class": 31
}
| 15.466667
| 30
| 0.573276
|
ec32317fc34abfe3179a308b772cda01e15661a5
| 1,421
|
py
|
Python
|
chapter_5/py_5_11_merge_sort.py
|
kfrime/algo-in-python
|
e017dd20385fd9ea2086a72698fbfcb7d706dd86
|
[
"MIT"
] | null | null | null |
chapter_5/py_5_11_merge_sort.py
|
kfrime/algo-in-python
|
e017dd20385fd9ea2086a72698fbfcb7d706dd86
|
[
"MIT"
] | null | null | null |
chapter_5/py_5_11_merge_sort.py
|
kfrime/algo-in-python
|
e017dd20385fd9ea2086a72698fbfcb7d706dd86
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
5.11 归并排序
"""
from __future__ import print_function
def merge_sort(alist):
"""
归并排序是一种递归算法,不断将列表拆分为一半。
如果列表为空或有一个项,则按定义(基本情况)进行排序。
如果列表有多个项,我们分割列表,并递归调用两个半部分的合并排序。
一旦对这两半排序完成,就执行合并操作。
合并是获取两个较小的排序列表并将它们组合成单个排序的新列表的过程。
O(nlog^n)
"""
print("Splitting ", alist)
if len(alist) > 1:
mid = len(alist) // 2
left_half = alist[:mid]
right_half = alist[mid:]
merge_sort(left_half)
merge_sort(right_half)
i = j = k = 0
# print("left ", left_half)
# print("right ", right_half
# 合并,通过重复从排序列表中取最小的项目,将项目逐个放回切分后的原始列表(alist)。
while i < len(left_half) and j < len(right_half):
if left_half[i] < right_half[j]:
alist[k] = left_half[i]
i = i + 1
else:
alist[k] = right_half[j]
j = j + 1
k = k + 1
# 合并左边的剩余部分
while i < len(left_half):
alist[k] = left_half[i]
i = i + 1
k = k + 1
# 合并右边的剩余部分
while j < len(right_half):
alist[k] = right_half[j]
j = j + 1
k = k + 1
print("Merging ", alist)
def test_merge_sort():
alist = [54, 26, 93, 17, 77, 31, 44, 55, 20]
print(alist)
merge_sort(alist)
print(alist)
if __name__ == "__main__":
test_merge_sort()
| 20.3
| 57
| 0.508797
|
bcfb66caf5352bd01b439eb7f7f07d216623919a
| 6,533
|
py
|
Python
|
src/finmag/util/versions.py
|
davidcortesortuno/finmag
|
9ac0268d2c0e45faf1284cee52a73525aa589e2b
|
[
"BSL-1.0"
] | 10
|
2018-03-24T07:43:17.000Z
|
2022-03-26T10:42:27.000Z
|
src/finmag/util/versions.py
|
davidcortesortuno/finmag
|
9ac0268d2c0e45faf1284cee52a73525aa589e2b
|
[
"BSL-1.0"
] | 21
|
2018-03-26T15:08:53.000Z
|
2021-07-10T16:11:14.000Z
|
src/finmag/util/versions.py
|
davidcortesortuno/finmag
|
9ac0268d2c0e45faf1284cee52a73525aa589e2b
|
[
"BSL-1.0"
] | 7
|
2018-04-09T11:50:48.000Z
|
2021-06-10T09:23:25.000Z
|
import os
import re
import sh
import sys
import logging
import finmag
logger = logging.getLogger('finmag')
def get_linux_issue():
try:
f = open("/etc/issue")
except IOError:
logger.error("Can't read /etc/issue -- this is odd?")
raise RuntimeError("Cannot establish linux version")
issue = f.readline() # only return first line
issue = issue.replace('\\l', '')
issue = issue.replace('\\n', '')
#logger.debug("Linux OS = '%s'" % issue)
return issue.strip() # get rid of white space left and right
def get_version_python():
version = sys.version.split(' ')[0]
assert version.count('.') == 2, "Unknown version format: %s" % version
return version
def get_module_version(name):
try:
m = __import__(name)
return m.__version__
except ImportError:
return None
def get_version_ipython():
try:
return get_module_version('IPython')
except ValueError:
# This is needed due to a strange error seen in some test runs:
#
# /usr/lib/python2.7/dist-packages/IPython/utils/io.py:32: in __init__
# > raise ValueError("fallback required, but not specified")
# E ValueError: fallback required, but not specified
#
# It seems that this can happen because standard output is caught by
# py.test, but providing the -s switch didn't help either.
return None
def get_version_dolfin():
return get_module_version('dolfin')
def get_version_numpy():
return get_module_version('numpy')
def get_version_matplotlib():
# this will only do a look-up of matplotlib's version if it is already
# imported. If matplotlib hasn't been imported yet, it won't do so either.
if "matplotlib" not in sys.modules:
return "lazily loaded"
return get_module_version('matplotlib')
def get_version_scipy():
return get_module_version('scipy')
def get_version_boostpython():
"""
Determine and return the boost-python version.
We check the name of the symlink of libboost_python.
If libboost_python.so is installed, returns a string with the version
number, otherwise returns None. Raises NotImplementedError if
the version cannot be determined. This may mean the file is not available,
or not available in the standard place (/usr/lib).
"""
# get version number as string
maj, min_, rev = get_version_python().split('.')
# libfile = /usr/lib/libboost_python-py27.so' or similar
libfile = '/usr/lib/libboost_python-py%s%s.so' % (maj, min_)
try:
filename = os.readlink(libfile)
except OSError:
raise NotImplementedError(
"Cannot locate %s. Cannot determine boost-python version." % libfile)
# expect filename to be something like 'libboost_python-py27.so.1.49.0'
version = filename.split(".so.")[1]
return version
def get_debian_package_version(pkg_name):
"""
Determine and return the version of the given Debian package (as a string).
This only works on Debian-derived systems (such as Debian or Ubuntu) as
it internally calls 'dpkg -s' to determine the version number.
If the package is installed, returns a string with the version number,
otherwise returns None. Warns if the version cannot be determined due to
an unsupported system.
"""
import subprocess
import re
version = None
try:
with open(os.devnull, 'w') as devnull:
output = subprocess.check_output(
['dpkg', '-s', pkg_name], stderr=devnull)
except subprocess.CalledProcessError as e:
logger.warning(
"Could not determine version of {} using dpkg.".format(pkg_name))
if e.returncode == 1:
logger.warning(
"The package {} is probably not installed.".format(pkg_name))
elif e.returncode == 127:
logger.warning(
"This does not seem to be a debian-derived Linux distribution.")
else:
logger.warning("Can't determine cause of error.")
return None
lines = output.split('\n')
version_str = filter(lambda s: s.startswith('Version'), lines)[0]
version = re.sub('Version: ', '', version_str)
return version
def get_version_sundials():
return finmag.native.sundials.get_sundials_version()
def get_version_paraview():
try:
# XXX TODO: There should be a more cross-platform way of
# determining the Paraview version, but the only method I could
# find is in the thread [1], and it doesn't work any more for
# recent versions of Paraview. It's quite annoying that something
# as simple as "import paraview; paraview.__version__" doesn't
# work...
#
# [1] http://blog.gmane.org/gmane.comp.science.paraview.user/month=20090801/page=34
version = get_debian_package_version('paraview')
except:
try:
sh.pvpython('--version')
except sh.ErrorReturnCode_1 as ex:
# This is fine. (Oddly, pvpython returns
# with exit code 1 if successful...)
m = re.match('paraview version (.*)', ex.stderr.strip())
version = m.group(1)
return version
def running_binary_distribution():
"""Return True if this is the cython-based binary
distribution or False if it is source distribtion
"""
thefile = __file__
if thefile.endswith('.py') or thefile.endswith('.pyc'):
#logger.debug("Running source code version")
return False
elif thefile.endswith('.so'):
#logger.debug("Binary finmag distribution")
return True
else:
logger.error("thefile=%s" % thefile)
raise RuntimeError("Checking running_binary_distribution failed!")
def loose_compare_ubuntu_version(v1, v2):
if not v1.startswith('Ubuntu') or not v2.startswith('Ubuntu'):
return False
from distutils.version import LooseVersion
t1 = LooseVersion(v1).version
t2 = LooseVersion(v2).version
if t1[3] == t2[3] and t1[4] == t2[4]:
return True
return False
if __name__ == "__main__":
linux_issue = get_linux_issue()
print("__file__ = %s" % __file__)
print("Linux issue: %s" % linux_issue)
print("Binary distribution: %s" % running_binary_distribution())
print("Sundials version: %s" % get_version_sundials())
print loose_compare_ubuntu_version('Ubuntu 12.04.1 LTS', "Ubuntu 12.04.2 LTS")
| 31.109524
| 91
| 0.651462
|
2042b01c1eb81f3808555cd623e984970ef80393
| 1,717
|
py
|
Python
|
model/user.py
|
maximatorrus/automated_testing_python
|
259f0c9a94bbe81b6a8d2076aeed66054c73ea45
|
[
"Apache-2.0"
] | null | null | null |
model/user.py
|
maximatorrus/automated_testing_python
|
259f0c9a94bbe81b6a8d2076aeed66054c73ea45
|
[
"Apache-2.0"
] | null | null | null |
model/user.py
|
maximatorrus/automated_testing_python
|
259f0c9a94bbe81b6a8d2076aeed66054c73ea45
|
[
"Apache-2.0"
] | null | null | null |
from sys import maxsize
class User:
def __init__(self, firstname=None, middlename=None, lastname=None, nickname=None, title=None, company=None,
address=None, telephone=None, mobile=None, work=None, fax=None, email_=None, email2=None, email3=None,
homepage=None, byear=None, ayear=None, bday=None, bmonth=None, aday=None, amonth=None, id=None,
secondaryphone=None, all_phones_from_home_page=None, all_emails_from_home_page=None):
self.firstname = firstname
self.middlename = middlename
self.lastname = lastname
self.nickname = nickname
self.title = title
self.company = company
self.address = address
self.telephone = telephone
self.mobile = mobile
self.work = work
self.fax = fax
self.email_ = email_
self.email2 = email2
self.email3 = email3
self.homepage = homepage
self.byear = byear
self.ayear = ayear
self.bday = bday
self.bmonth = bmonth
self.aday = aday
self.amonth = amonth
self.id = id
self.secondaryphone = secondaryphone
self.all_phones_from_home_page = all_phones_from_home_page
self.all_emails_from_home_page = all_emails_from_home_page
def __repr__(self):
return "%s:%s:%s" % (self.id, self.firstname, self.lastname)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and self.firstname == other.firstname\
and self.lastname == other.lastname
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize
| 35.770833
| 119
| 0.624927
|
e301612a559918174491b08498c275178d00627b
| 6,832
|
py
|
Python
|
bert_onnx.py
|
MatRazor/ONNXRuntime_tutorial_collection
|
9fe46311896391f769a51cc4d07814e6bfafd8ee
|
[
"MIT"
] | 2
|
2021-03-12T16:29:03.000Z
|
2021-07-24T17:07:14.000Z
|
bert_onnx.py
|
MatRazor/ONNXRuntime_tutorial_collection
|
9fe46311896391f769a51cc4d07814e6bfafd8ee
|
[
"MIT"
] | null | null | null |
bert_onnx.py
|
MatRazor/ONNXRuntime_tutorial_collection
|
9fe46311896391f769a51cc4d07814e6bfafd8ee
|
[
"MIT"
] | 1
|
2021-11-11T18:36:25.000Z
|
2021-11-11T18:36:25.000Z
|
# %%
## Most part of the code taken from https://github.com/microsoft/onnxruntime/blob/master/onnxruntime/python/tools/transformers/notebooks/PyTorch_Bert-Squad_OnnxRuntime_CPU.ipynb
import os
import requests
from transformers import BertConfig, BertForQuestionAnswering, BertTokenizer
from transformers.data.processors.squad import SquadV1Processor
from transformers import squad_convert_examples_to_features
import torch
import torch.nn as nn
import onnxruntime
import matplotlib.pyplot as plt
from timeit import Timer
import numpy as np
def load_bert():
# The following code is adapted from HuggingFace transformers
# https://github.com/huggingface/transformers/blob/master/examples/run_squad.py
# Load pretrained model and tokenizer
config_class, model_class, tokenizer_class = (BertConfig, BertForQuestionAnswering, BertTokenizer)
config = config_class.from_pretrained(model_name_or_path, cache_dir = cache_dir)
tokenizer = tokenizer_class.from_pretrained(model_name_or_path, do_lower_case = True, cache_dir = cache_dir)
model = model_class.from_pretrained(model_name_or_path,
from_tf = False,
config = config,
cache_dir = cache_dir)
# load some examples
processor = SquadV1Processor()
examples = processor.get_dev_examples(None, filename=predict_file)
# Convert examples to features
features, dataset = squad_convert_examples_to_features(
examples=examples[:total_samples], # convert just enough examples for this notebook
tokenizer=tokenizer,
max_seq_length=max_seq_length,
doc_stride=doc_stride,
max_query_length=max_query_length,
is_training=False,
return_dataset='pt'
)
return model, features, dataset
# %%
def speed(inst, number=10, repeat=20):
timer = Timer(inst, globals=globals())
raw = np.array(timer.repeat(repeat, number=number))
ave = raw.sum() / len(raw) / number
mi, ma = raw.min() / number, raw.max() / number
print("Average %1.3g min=%1.3g max=%1.3g" % (ave, mi, ma))
return ave
# %%
if __name__ == '__main__':
# Create a cache directory to store pretrained model.
cache_dir = os.path.join(".", "cache_models")
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Download Stanford Question Answering Dataset (SQuAD) dataset (BERT trained on it)
predict_file_url = "https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json"
file_name = "dev-v1.1.json"
predict_file = os.path.join(cache_dir, file_name)
if not os.path.exists(predict_file):
print("Start downloading predict file.")
r = requests.get(predict_file_url)
with open(predict_file, 'wb') as f:
f.write(r.content)
print("Predict file downloaded.")
# %%
# Bert Base Code for the Demo
model_name_or_path = "bert-base-cased"
max_seq_length = 128
doc_stride = 128
max_query_length = 64
# Total samples to inference. It shall be large enough to get stable latency measurement.
total_samples = 100
# Load BERT PyTorch
model, features, dataset = load_bert()
output_dir = os.path.join(".", "onnx_models")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
export_model_path = os.path.join(output_dir, 'bert-base-cased-squad.onnx')
# %%
data = dataset[0]
inputs = {
'input_ids' : data[0].reshape(1, max_seq_length),
'attention_mask' : data[1].reshape(1, max_seq_length),
'token_type_ids' : data[2].reshape(1, max_seq_length)
}
model.eval()
# dynamic elements
symbolic_names = {0 : 'batch_size', 1 : 'max_seq_length'}
torch.onnx.export(model,
args = tuple(inputs.values()),
f = export_model_path,
opset_version = 11,
do_constant_folding = True,
input_names = ['input_ids',
'input_mask',
'segment_ids'],
output_names = ['start',
'end'],
dynamic_axes = {'input_ids' : symbolic_names,
'input_mask' : symbolic_names,
'segment_ids' : symbolic_names,
'start' : symbolic_names,
'end' : symbolic_names}
)
print('Model exported successfully in:', export_model_path)
#%%
print("Starting Pytorch...")
# torch model
torch_avg_time = []
with torch.no_grad():
for i in range(total_samples):
data = dataset[i]
inputs = {
'input_ids' : data[0].reshape(1, max_seq_length),
'attention_mask' : data[1].reshape(1, max_seq_length),
'token_type_ids' : data[2].reshape(1, max_seq_length)
}
ave_torch = speed("model(**inputs)")
torch_avg_time.append(ave_torch)
# ONNXRuntime
print("Starting ONNX...")
# Create a session
session = onnxruntime.InferenceSession(export_model_path)
# %%
# Inference through Onnxruntime
onnxruntime_avg_time = []
for i in range(total_samples):
data = dataset[i]
ort_inputs = {
'input_ids' : data[0].reshape(1, max_seq_length).numpy(),
'input_mask' : data[1].reshape(1, max_seq_length).numpy(),
'segment_ids' : data[2].reshape(1, max_seq_length).numpy()
}
ave_onnx = speed("session.run(None, ort_inputs)")
onnxruntime_avg_time.append(ave_onnx)
# %%
torch_avg_final = sum(torch_avg_time) / len(torch_avg_time)
print("Execution time for PyTorch")
print(torch_avg_final)
onnx_avg_final = sum(onnxruntime_avg_time) / len(onnxruntime_avg_time)
print("Execution time for ONNX Runtime")
print(onnx_avg_final)
# %%
# Plotting Performances
names = ['std_inference', 'onnxruntime_inference']
values = [torch_avg_final * 10e2, onnx_avg_final * 10e2]
fig = plt.figure(figsize=(9,10))
plt.yticks(np.arange(0, 170, 5))
plt.xlabel('Inference Engines', fontsize='large', fontweight='bold')
plt.ylabel('Time [ms]', fontsize='large', fontweight='bold')
plt.title('BERT average inference performance (SQuAD set)', fontsize='large', fontweight='bold')
plt.bar(names, values)
plt.show()
# %%
| 39.264368
| 178
| 0.604508
|
e9dbc3bb63cc70147aea01926500fb2d7b65b029
| 956
|
py
|
Python
|
migrations/versions/f6e9c6582972_initial_migration.py
|
Meziu/srb2_highscores
|
9d2805309b523c74186ead71aeabdb754c8b5746
|
[
"Unlicense"
] | 1
|
2020-05-21T13:30:54.000Z
|
2020-05-21T13:30:54.000Z
|
migrations/versions/f6e9c6582972_initial_migration.py
|
Meziu/srb2_highscores
|
9d2805309b523c74186ead71aeabdb754c8b5746
|
[
"Unlicense"
] | 24
|
2020-05-20T21:34:22.000Z
|
2021-05-03T18:29:03.000Z
|
migrations/versions/f6e9c6582972_initial_migration.py
|
Meziu/srb2_highscores
|
9d2805309b523c74186ead71aeabdb754c8b5746
|
[
"Unlicense"
] | 7
|
2020-05-20T15:57:20.000Z
|
2021-05-03T17:01:49.000Z
|
"""Initial migration.
Revision ID: f6e9c6582972
Revises:
Create Date: 2020-06-17 20:17:27.108433
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'f6e9c6582972'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
'voted',
sa.Column('ip', sa.String(length=15), nullable=False),
sa.Column('map', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('ip', 'map')
)
op.drop_constraint('highscores_ibfk_1', 'highscores', type_='foreignkey')
op.add_column('maps', sa.Column('image', sa.LargeBinary(), nullable=True))
op.add_column('maps', sa.Column('votes', sa.Integer(), nullable=False, default=0))
def downgrade():
op.drop_column('maps', 'votes')
op.drop_column('maps', 'image')
op.create_foreign_key('highscores_ibfk_1', 'highscores', 'maps', ['map_id'], ['id'])
op.drop_table('voted')
| 26.555556
| 88
| 0.675732
|
703f941c469ea74cbd1f6cf3056e0e412618a4f5
| 2,947
|
py
|
Python
|
admixtureworkflow.py
|
janneengestoft/birc-project
|
95b201648a851efaab1682388e8bb617752b4812
|
[
"MIT"
] | null | null | null |
admixtureworkflow.py
|
janneengestoft/birc-project
|
95b201648a851efaab1682388e8bb617752b4812
|
[
"MIT"
] | null | null | null |
admixtureworkflow.py
|
janneengestoft/birc-project
|
95b201648a851efaab1682388e8bb617752b4812
|
[
"MIT"
] | null | null | null |
from gwf import Workflow, AnonymousTarget
from scripts.modpath import modpath
gwf = Workflow()
# Chromosome and group to perform admixture analysis on
vcffile = '../../../../primatediversity/data/PG_baboons_pananu3_23_2_2021/output.filtered.snps.chr7.removed.AB.pass.vep.vcf.gz'
chr = '7'
pop = 'females'
popfile = 'females.txt'
ks = range(4, 11)
def vcf_filter(vcf_file, chrom, popfile, pop):
output_vcf = f'steps/recode_vcf/chr{chrom}_{pop}.recode.vcf'
base_name = modpath(output_vcf, suffix=('.recode.vcf', ''))
inputs = [vcf_file]
outputs = [output_vcf]
options = {
'memory': '2g',
'walltime': '02:00:00'
}
spec = f'''
mkdir -p steps/recode_vcf
vcftools --gzvcf {vcf_file} --recode --keep data/{popfile} \
--out {base_name}
'''
return AnonymousTarget(inputs=inputs, outputs=outputs, options=options, spec=spec)
def vcf2bed(chrom, pop):
filtered_vcf = f'steps/recode_vcf/chr{chrom}_{pop}.recode.vcf'
bed = f'steps/plink/chr{chrom}_{pop}.bed'
base_name = modpath(bed, suffix=('.bed', ''))
pruned_bed = f'steps/plink/chr{chrom}_{pop}.pruned.bed'
inputs = [filtered_vcf]
outputs = [pruned_bed]
options = {
'memory': '2g',
'walltime': '02:00:00'
}
spec = f'''
mkdir -p steps/plink
plink --vcf {filtered_vcf} --make-bed --double-id --geno 0.025 --indep-pairwise 50 10 0.1 \
--out {base_name}
plink --bfile {base_name} --extract {base_name}.prune.in --make-bed --out {base_name}.pruned
'''
return AnonymousTarget(inputs=inputs, outputs=outputs, options=options, spec=spec)
def admixture(k, chrom, pop):
bedfile = f'steps/plink/chr{chrom}_{pop}.pruned.bed'
outputq = f'results/admixture/chr{chrom}_{pop}/chr{chrom}_{pop}.pruned.{k}.Q'
outputp = f'results/admixture/chr{chrom}_{pop}/chr{chrom}_{pop}.pruned.{k}.P'
no_path = f'chr{chrom}_{pop}.pruned.{k}'
logs = f'results/admixture/crossvalidation/log_chr{chrom}_{pop}.{k}.out'
inputs = [bedfile]
outputs = [outputq, outputp, logs]
options = {
'memory': '5g',
'walltime': '8:00:00'
}
spec = f'''
mkdir -p results/admixture/chr{chrom}_{pop}
mkdir -p results/admixture/crossvalidation
admixture --cv {bedfile} {k} | tee {logs}
mv {no_path}* results/admixture/chr{chrom}_{pop}
'''
return AnonymousTarget(inputs=inputs, outputs=outputs, options=options, spec=spec)
gwf.target_from_template(
name='exctract_pop',
template=vcf_filter(
vcf_file=vcffile,
chrom=chr,
popfile=popfile,
pop=pop
)
)
gwf.target_from_template(
name='vcf2bed',
template=vcf2bed(
chrom=chr,
pop=pop
)
)
for k in ks:
gwf.target_from_template(
name=f'admixture_{k}',
template=admixture(
k=k,
chrom=chr,
pop=pop
)
)
| 24.558333
| 127
| 0.620631
|
1b53918df9199a440d6a3e0547020f598785d17d
| 5,450
|
py
|
Python
|
scrape_mars.py
|
Areej32/webscraping
|
5431e1830287804e5ae857cef4bd00546b75683b
|
[
"ADSL"
] | null | null | null |
scrape_mars.py
|
Areej32/webscraping
|
5431e1830287804e5ae857cef4bd00546b75683b
|
[
"ADSL"
] | null | null | null |
scrape_mars.py
|
Areej32/webscraping
|
5431e1830287804e5ae857cef4bd00546b75683b
|
[
"ADSL"
] | null | null | null |
# Scrape Web Data about Mars and Return one Library to collect all the scrape data
# Dependencies
from bs4 import BeautifulSoup as bs
import requests
import pandas as pd
from splinter import Browser
from splinter.exceptions import ElementDoesNotExist
import time
# Define scrape function
def scrape():
# Create a library that holds all the Mars' Data
mars_library = {}
# Use splinter to navigate the JPL's Featured Space Image and scrape the current Featured Mars Image url (https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars)
# Execute Chromedriver
executable_path = {'executable_path': 'chromedriver.exe'}
browser = Browser('chrome', **executable_path, headless=False)
# #### NASA Mars News
# We will scrape the lastest News Title and Paragragh Text from NASA Mars News Site(https://mars.nasa.gov/news/).
# URL of page to be scraped
url1 = 'https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest'
#Visit the page using the browser
browser.visit(url1)
# assign html content
html = browser.html
# Create a Beautiful Soup object
soup1 = bs(html, "html5lib")
# Extract the text from the class="content_title" and clean up the text use strip
news_title = soup1.find_all('div', class_='content_title')[0].find('a').text.strip()
# Extract the paragraph from the class="rollover_description_inner" and clean up the text use strip
news_p = soup1.find_all('div', class_='rollover_description_inner')[0].text.strip()
# put infos into Library
mars_library['news_title'] = news_title
mars_library['news_p'] = news_p
# #### JPL Mars Space Images - Featured Image
# URL of page to be scraped
url2 = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
#Visit the page using the browser
browser.visit(url2)
# assign html content
html = browser.html
# Create a Beautiful Soup object
soup2 = bs(html, "html5lib")
#Scrape Path for the Feature Image. got the partial path of the url
partial_address = soup2.find_all('a', class_='fancybox')[0].get('data-fancybox-href').strip()
#combine the root url to get the full address
featured_image_url = "https://www.jpl.nasa.gov"+partial_address
# Put infos into Library
mars_library['featured_image_url'] = featured_image_url
# #### Mars Weather
# Use splinter to scrape the latest Mars weather tweet from the Mars Weather twitter account (https://twitter.com/marswxreport?lang=en)
# URL of page to be scraped
url3 = 'https://twitter.com/marswxreport?lang=en'
#Visit the page using the browser
browser.visit(url3)
# assign html content
html = browser.html
# Create a Beautiful Soup object
soup3 = bs(html, "html5lib")
#scrap latest Mars weather tweet
mars_weather = soup3.find_all('p', class_='TweetTextSize TweetTextSize--normal js-tweet-text tweet-text')[0].text
# Put infos into Library
mars_library['mars_weather'] = mars_weather
# #### Mars Facts
# Use Pandas to scrape the table from Mars Facts webpage and convert the data to a HTML table string
# URL of page to be scraped
url4 = 'https://space-facts.com/mars/'
# use Pandas to get the url table
tables = pd.read_html(url4)
# Convert list of table into pandas dataframe
df = tables[0]
# update column name
df.columns=['description','value']
#Set the index to the description column
df.set_index('description', inplace=True)
# Use pandas to generate HTML tables from DataFrames and save as html file
mars_facts=df.to_html(justify='left')
# Put infos into Library
mars_library['mars_facts'] = mars_facts
# #### Mars Hemisperes
# USGS Astrogeology site to obtain high resolution images for each of Mar's hemispheres
# URL of page to be scraped
url5 = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
#Visit the page using the browser
browser.visit(url5)
# assign html content
html = browser.html
# Create a Beautiful Soup object
soup5 = bs(html,"html5lib")
# assigned list to store:
hemisphere_image_urls = []
# create empty dict
dict = {}
# get all the title
results = soup5.find_all('h3')
# Loop through each result
for result in results:
# Get text info from result
itema = result.text
time.sleep(1)
browser.click_link_by_partial_text(itema)
time.sleep(1)
# assign html content
htmla = browser.html
# Create a Beautiful Soup object
soupa = bs(htmla,"html5lib")
time.sleep(1)
# Grab the image link
linka = soupa.find_all('div', class_="downloads")[0].find_all('a')[0].get("href")
# Pass title to Dict
time.sleep(1)
dict["title"]=itema
# Pass url to Dict
dict["img_url"]=linka
# Append Dict to the list
hemisphere_image_urls.append(dict)
# Clean Up Dict
dict = {}
browser.visit(url5)
time.sleep(1)
# Put infos into Library
mars_library['hemisphere_image_urls']=hemisphere_image_urls
# Return Library
return mars_library
| 20.335821
| 170
| 0.66789
|
17a5d64261a56bf5aad08882c39fcb29af627714
| 3,944
|
py
|
Python
|
explainable_ai/util/fig_plotter.py
|
banna88/Configuration-Space-Reduction
|
3d061deaf2cb06597037bb085b4769483e42fd53
|
[
"MIT"
] | null | null | null |
explainable_ai/util/fig_plotter.py
|
banna88/Configuration-Space-Reduction
|
3d061deaf2cb06597037bb085b4769483e42fd53
|
[
"MIT"
] | null | null | null |
explainable_ai/util/fig_plotter.py
|
banna88/Configuration-Space-Reduction
|
3d061deaf2cb06597037bb085b4769483e42fd53
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import pandas as pd
from ast import literal_eval
import json
import numpy as np
import matplotlib.patches as mpatches
from sklearn.preprocessing import StandardScaler, MinMaxScaler, MaxAbsScaler
version = 'v1' # v1, v2
def plot_learning_vs_no_learning():
plt.figure()
plt_index = 1
for title in [
'Packet Loss (%)',
'Latency (%)',
'Adaptation Space',
'Analysis Time (sec)',
]:
data = {
'explainable_learning': [],
'learning': []
}
for file_name in ['explainable_learning', 'learning']:
path = 'data/results/' + version + '/'
file_data = open(path + file_name + '.txt').readlines()
file_data = [x.strip() for x in file_data]
learning_size = 11
no_learning_size = 11
for line in file_data:
content = line.split(';')
if len(content) > 1 and int(content[0]) == 1: # skip training cycle
continue
if title == 'Packet Loss (%)':
if len(content) == no_learning_size:
data[file_name].append(float(content[7]))
elif len(content) == learning_size:
data[file_name].append(float(content[7]))
elif title == 'Latency (%)':
if len(content) == no_learning_size:
data[file_name].append(float(content[8]))
elif len(content) == learning_size:
data[file_name].append(float(content[8]))
elif title == 'Adaptation Space':
if len(content) == no_learning_size:
data[file_name].append(int(content[4]))
elif len(content) == learning_size:
data[file_name].append(int(content[4]))
elif title == 'Analysis Time (sec)':
if len(content) == no_learning_size:
data[file_name].append(float(content[5]) / 1000)
elif len(content) == learning_size:
data[file_name].append(float(content[5]) / 1000)
print({
'title': title,
'explainable_learning_avg': np.average(data['explainable_learning']),
'learning_avg': np.average(data['learning'])
})
plt.subplot(2, 2, plt_index)
boxplot = plt.boxplot(
[data[x] for x in ['learning', 'explainable_learning']],
positions=[1, 2],
widths=.3,
labels=['regular', 'explainable'],
patch_artist=True,
#showfliers=False,
#notch=True,
medianprops={'color': 'black', 'linewidth': 2}
)
for index, box in enumerate(boxplot['boxes']):
box.set(facecolor=[ 'orange', 'green'][index])
#box.set(facecolor=['orange', 'dodgerblue'][index])
plt.ylabel(title, fontsize='20')
plt.xticks(size = 20)
plt.yticks(size = 15)
#plt.figlegend(bbox_to_anchor=(1.1, 1.05), loc="upper left")
plt_index += 1
plt.show()
def plot_training_selection():
data = json.load(open('data/model_training/' + version + '_training.json'))
labels = {
'packet_loss': 'Packet Loss Model',
'latency': 'Latency Model'
}
plt.figure()
for item in data:
plt.plot(item['training_samples'], item['accuracy'], label=labels[item['target']])
plt.ylabel('Accuracy (%)')
plt.xlabel('Training Samples\n(Total Samples = ' + str(data[0]['total_samples']) + ')')
plt.xticks(data[0]['training_samples'])
plt.ylim(top=1.0, bottom=0.0)
plt.grid()
plt.legend()
plt.show()
#plot_learning_vs_no_learning()
plot_training_selection()
#for i in range(300, 0, -1):
# plot_selected_adaptation_options(i)
| 32.595041
| 91
| 0.540061
|
1d09abce70057b11a403ddcbda399d9f444807cc
| 3,293
|
py
|
Python
|
gym_bot_app/tasks/did_not_train_updater.py
|
raascode/GymBot
|
10ea5ef7639d41bd243761a85507c2509427dc99
|
[
"Apache-2.0"
] | 8
|
2018-12-02T10:15:19.000Z
|
2022-01-27T09:03:26.000Z
|
gym_bot_app/tasks/did_not_train_updater.py
|
raascode/GymBot
|
10ea5ef7639d41bd243761a85507c2509427dc99
|
[
"Apache-2.0"
] | 4
|
2021-02-10T02:20:38.000Z
|
2021-10-19T20:54:21.000Z
|
gym_bot_app/tasks/did_not_train_updater.py
|
raascode/GymBot
|
10ea5ef7639d41bd243761a85507c2509427dc99
|
[
"Apache-2.0"
] | 9
|
2018-07-27T09:05:43.000Z
|
2022-01-24T12:18:38.000Z
|
from datetime import time, timedelta, datetime
from typing import List
from telegram import ParseMode
from telegram.ext import CallbackQueryHandler
from gym_bot_app.models import Trainee, Group
from gym_bot_app.tasks import Task
from gym_bot_app.utils import get_trainees_that_selected_today_and_did_not_train_yet
from gym_bot_app.decorators import repeats, run_for_all_groups
class DidNotTrainUpdaterTask(Task):
"""Telegram gym bot update trainee did not go to gym task."""
DEFAULT_TARGET_TIME = time(hour=23, minute=55, second=0, microsecond=0)
DID_NOT_TRAIN_QUERY_IDENTIFIER = 'did_not_train_updater'
DATE_FORMAT = '%d/%m/%Y'
DID_NOT_GO_TO_GYM_PLURAL_MSG = 'אפסים מאופסים {trainees}'
WENT_TO_GYM_INDIVIDUAL_MSG = 'אפס מאופס {trainees}'
def __init__(self, target_time=None, *args, **kwargs):
super(DidNotTrainUpdaterTask, self).__init__(*args, **kwargs)
self.target_time = target_time or self.DEFAULT_TARGET_TIME
def get_start_time(self):
"""Start time of did not train updater based on the target time."""
return self._seconds_until_time(target_time=self.target_time)
@repeats(every_seconds=timedelta(days=1).total_seconds())
@run_for_all_groups
def execute(self, group: Group):
"""Override method to execute did not train updater.
Sends did not go to gym message with the trainees of today that did not train to the given group chat.
"""
self.logger.info('Executing did not train updater with %s', group)
relevant_trainees = get_trainees_that_selected_today_and_did_not_train_yet(group)
self.logger.debug('Relevant trainees %s', relevant_trainees)
if relevant_trainees:
# The use of timedelta here is to make sure that we remain within the same day we wanted to
not_trained_time = (datetime.today() - timedelta(hours=2)).date()
for trainee in relevant_trainees:
if not trainee.get_training_info(training_date=not_trained_time):
trainee.add_training_info(training_date=not_trained_time, trained=False)
did_not_go_to_gym_msg = self._get_did_not_go_to_gym_msg(trainees)
self.updater.bot.send_message(chat_id=group.id, text=did_not_go_to_gym_msg, parse_mode=ParseMode.MARKDOWN)
else:
self.logger.debug('There are no trainees that said they would train and did not')
def _get_did_not_go_to_gym_msg(self, trainees: List[Trainee]):
"""Generate did not go to gym message based on the given trainees.
Args:
trainees(list): trainees that will be included in the message.
Returns:
str. message of did not go to gym with the given trainees.
"""
trainee_string = ' '.join(trainee.get_mention_string() for trainee in trainees)
if len(trainees) > 1:
self.logger.debug('More than one trainee therefore creating plural msg')
did_not_go_msg = self.DID_NOT_GO_TO_GYM_PLURAL_MSG.format(trainees=trainee_string)
else:
self.logger.debug('One trainee creating msg for individual')
did_not_go_msg = self.WENT_TO_GYM_INDIVIDUAL_MSG.format(trainees=trainee_string)
return did_not_go_msg
| 43.328947
| 118
| 0.711509
|
24d69ee2d4c36a119c9007a9fb07364a5e29c403
| 344
|
py
|
Python
|
Python/Algorithm/FunctionValidanting.py
|
piovezan/SOpt
|
a5ec90796b7bdf98f0675457fc4bb99c8695bc40
|
[
"MIT"
] | 148
|
2017-08-03T01:49:27.000Z
|
2022-03-26T10:39:30.000Z
|
Python/Algorithm/FunctionValidanting.py
|
piovezan/SOpt
|
a5ec90796b7bdf98f0675457fc4bb99c8695bc40
|
[
"MIT"
] | 3
|
2017-11-23T19:52:05.000Z
|
2020-04-01T00:44:40.000Z
|
Python/Algorithm/FunctionValidanting.py
|
piovezan/SOpt
|
a5ec90796b7bdf98f0675457fc4bb99c8695bc40
|
[
"MIT"
] | 59
|
2017-08-03T01:49:19.000Z
|
2022-03-31T23:24:38.000Z
|
def pedeChute():
while True:
try:
chute = int(input("chute: "))
if chute == 0:
return chute
except:
pass
def jogo():
while True:
#algo aqui
chute = pedeChute() #sempre será válido
#algo aqui
#https://pt.stackoverflow.com/q/449742/101
| 20.235294
| 47
| 0.482558
|
b18f8d784adf9c64137c910b0deb3f8526254810
| 2,725
|
py
|
Python
|
docker/package/package_generator.py
|
serokell/tezos-packaging
|
74397ce01721a3366043895710b229f1d53a5504
|
[
"Apache-2.0",
"MIT"
] | 43
|
2019-12-12T11:54:15.000Z
|
2022-03-08T01:10:36.000Z
|
docker/package/package_generator.py
|
serokell/tezos-packaging
|
74397ce01721a3366043895710b229f1d53a5504
|
[
"Apache-2.0",
"MIT"
] | 226
|
2019-12-10T13:39:22.000Z
|
2022-03-30T12:30:17.000Z
|
docker/package/package_generator.py
|
serokell/tezos-packaging
|
74397ce01721a3366043895710b229f1d53a5504
|
[
"Apache-2.0",
"MIT"
] | 11
|
2020-08-11T09:25:05.000Z
|
2022-03-05T15:51:46.000Z
|
# SPDX-FileCopyrightText: 2020 TQ Tezos <https://tqtezos.com/>
#
# SPDX-License-Identifier: LicenseRef-MIT-TQ
import os, shutil, argparse
from .fedora import build_fedora_package
from .packages import packages
from .ubuntu import build_ubuntu_package
is_ubuntu = None
is_source = None
package_to_build = None
source_archive = None
parser = argparse.ArgumentParser()
parser.add_argument("--os", required=True)
parser.add_argument("--type", help="package type", required=True)
parser.add_argument("--package", help="specify binary to package")
parser.add_argument(
"--sources", help="specify source archive for single ubuntu package"
)
args = parser.parse_args()
if args.os == "ubuntu":
is_ubuntu = True
elif args.os == "fedora":
is_ubuntu = False
else:
raise Exception(
"Unexpected package target OS, only 'ubuntu' and 'fedora' are supported."
)
if args.type == "source":
is_source = True
elif args.type == "binary":
is_source = False
else:
raise Exception(
"Unexpected package format, only 'source' and 'binary' are supported."
)
package_to_build = args.package
source_archive = args.sources
if is_ubuntu:
run_deps = [
"libev-dev",
"libgmp-dev",
"libhidapi-dev",
"libffi-dev",
"zlib1g-dev",
"libpq-dev",
]
else:
run_deps = [
"libev-devel",
"gmp-devel",
"hidapi-devel",
"libffi-devel",
"zlib-devel",
"libpq-devel",
]
build_deps = [
"make",
"m4",
"perl",
"pkg-config",
"wget",
"unzip",
"rsync",
"gcc",
"cargo",
"opam",
"git",
"autoconf",
]
common_deps = run_deps + build_deps
ubuntu_versions = [
"bionic", # 18.04
"focal", # 20.04
"hirsute", # 21.04
]
pwd = os.getcwd()
home = os.environ["HOME"]
for package in packages:
if package_to_build is None or package.name == package_to_build:
if is_ubuntu:
build_ubuntu_package(
package, ubuntu_versions, common_deps, is_source, source_archive
)
else:
build_fedora_package(package, build_deps, run_deps, is_source)
os.mkdir("out")
if not is_source:
if is_ubuntu:
exts = [".deb"]
else:
exts = [".rpm"]
else:
if is_ubuntu:
exts = [".orig.tar.gz", ".dsc", ".changes", ".debian.tar.xz", ".buildinfo"]
else:
exts = [".src.rpm"]
if is_ubuntu:
artifacts_dir = "."
else:
subdir = "SRPMS" if is_source else "RPMS/x86_64"
artifacts_dir = f"{home}/rpmbuild/{subdir}"
for f in os.listdir(artifacts_dir):
for ext in exts:
if f.endswith(ext):
shutil.copy(f"{artifacts_dir}/{f}", os.path.join("out", f))
| 23.09322
| 83
| 0.616147
|
aee0523670b853d2509a9968d763f4058261652c
| 15,419
|
py
|
Python
|
.history/src/Simulador_20200711171005.py
|
eduardodut/Trabalho_final_estatistica_cd
|
fbedbbea6bdd7a79e1d62030cde0fab4e93fc338
|
[
"MIT"
] | null | null | null |
.history/src/Simulador_20200711171005.py
|
eduardodut/Trabalho_final_estatistica_cd
|
fbedbbea6bdd7a79e1d62030cde0fab4e93fc338
|
[
"MIT"
] | null | null | null |
.history/src/Simulador_20200711171005.py
|
eduardodut/Trabalho_final_estatistica_cd
|
fbedbbea6bdd7a79e1d62030cde0fab4e93fc338
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
from Matriz_esferica import Matriz_esferica
from Individuo import Individuo, Fabrica_individuo
import random
from itertools import permutations
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from scipy.sparse import csr_matrix, lil_matrix
class Simulador():
SADIO = 0
INFECTADO_TIPO_1 = 1 #assintomáticos e o infectado inicial
INFECTADO_TIPO_2 = 2 #sintomático
CURADO = 3
MORTO = 4
def __init__(
self,
tamanho_matriz, #numero de linhas e colunas da matriz esférica
percentual_inicial_tipo1, #percentual inicial da população que será infectada tipo 1
percentual_inicial_tipo2, #percentual inicial da população que será infectada tipo 2
chance_infeccao, #chance que um infectado tipo 2 tem de infectar um indivíduo saudável
chance_infeccao_tipo2, #chance de um indivíduo infectado se tornar contagioso
chance_morte, #chance de um indivíduo tipo 2 morrer ao fim de uma atualização
atualizacoes_cura): #número de atualizações necessárias para a cura de um indivíduo tipo 1 ou 2
self.num_atualizacoes = 0
self.lista_infectados_tipo_2 = []
self.lista_infectados_tipo_1 = []
self.num_curados = 0
self.num_mortos = 0
self.chance_infeccao = chance_infeccao
self.chance_infeccao_tipo2 = chance_infeccao_tipo2
self.chance_morte = chance_morte
self.atualizacoes_cura = atualizacoes_cura
self.populacao_inicial = int(tamanho_matriz**2)
self.num_inicial_tipo2 = int(self.populacao_inicial * percentual_inicial_tipo2)
self.num_inicial_tipo1 = 1 + int(self.populacao_inicial * percentual_inicial_tipo1)
self.num_inicial_sadios = self.populacao_inicial - (self.num_inicial_tipo2 + self.num_inicial_tipo1)
self.matriz_status = lil_matrix((tamanho_matriz, tamanho_matriz),dtype= np.uint8)
self.matriz_atualizacoes_cura = lil_matrix((tamanho_matriz, tamanho_matriz),dtype= np.uint8)
#self.matriz_status = self.df_individuos.to_numpy()
self.popular(tamanho_matriz)
self.lista_matrizes_status = []
#objeto que é responsável por validar a movimentação no grid n x n
self.matriz_esferica = Matriz_esferica(tamanho_matriz)
dict = {
'num_sadios':self.num_inicial_sadios,
'num_infect_t1':self.num_inicial_tipo1,
'num_infect_t2':self.num_inicial_tipo2,
'num_curados':0,
'num_mortos':0}
#dataframe que guardará os resultados de cada atualização
self.dataframe = pd.DataFrame(dict,index = [0])
self.salvar_posicionamento()
def criar_individuo(self, status, posicao):
self.matriz_status[posicao[0], posicao[1]] = status
if status == self.INFECTADO_TIPO_1 or status == self.INFECTADO_TIPO_2:
self.matriz_atualizacoes_cura[posicao[0], posicao[1]] = self.atualizacoes_cura
def salvar_posicionamento(self):
self.lista_matrizes_status.append(self.matriz_status)
def verificar_infeccao(self, lista_infectantes):
lista_novos_infectados_tipo1 = []
lista_novos_infectados_tipo2 = []
#itera sobre sobre a lista de individuos que infectam e cada um realiza a tividade de infectar
for indice_infectante in lista_infectantes:
#busca os vizinhos do infectante atual
lista_vizinhos = self.matriz_esferica.get_vizinhos(indice_infectante)
#Para cada vizinho, se ele for sadio, é gerado um número aleatório para verificar se foi infectado
for indice_vizinho in lista_vizinhos:
#verificação de SADIO
if self.verifica_status(indice_vizinho) == self.SADIO:
#verificação do novo status
novo_status = self.infectar(chance_infeccao, chance_infeccao_tipo2)
#se for um infectado tipo 1
if novo_status == Individuo.INFECTADO_TIPO_1:
#adiciona na lista de novos tipo 1
lista_novos_infectados_tipo1.append(indice_vizinho)
self.criar_individuo(Individuo.INFECTADO_TIPO_1,indice_vizinho)
if novo_status == Individuo.INFECTADO_TIPO_2:
#adiciona na lista de novos tipo 1
lista_novos_infectados_tipo2.append(indice_vizinho)
self.criar_individuo(Individuo.INFECTADO_TIPO_2,indice_vizinho)
return lista_novos_infectados_tipo1, lista_novos_infectados_tipo2
def checagem_morte_individual(self, chance_morte, indice):
rng_morte = random.random()
if rng_morte <= chance_morte:
self.matriz_status[indice[0], indice[1]] = self.MORTO
return self.MORTO
else:
return self.checar_cura_individual(indice)
def checar_cura_individual(self, indice):
self.matriz_atualizacoes_cura[indice[0], indice[1]] = self.matriz_atualizacoes_cura[indice[0], indice[1]] - 1
if self.matriz_atualizacoes_cura[indice[0], indice[1]] == 0:
self.matriz_status[indice[0], indice[1]] = self.CURADO
return self.CURADO
else:
return self.matriz_status[indice[0], indice[1]]
def checagem_morte_cura_lista(self, lista_infectantes_tipo2):
lista_curados = []
lista_mortos = []
for indice_infectante in lista_infectantes_tipo2:
self.checagem_morte_individual(self.chance_morte, indice_infectante)
if self.verifica_status(indice_infectante) == Individuo.MORTO:
lista_mortos.append(indice_infectante)
if self.verifica_status(indice_infectante) == Individuo.CURADO:
lista_curados.append(indice_infectante)
return lista_mortos, lista_curados
def checagem_cura_lista(self, lista_infectantes):
lista_curados = []
for indice_infectante in lista_infectantes:
self.checar_cura_individual(indice_infectante)
if self.verifica_status(indice_infectante) == Individuo.CURADO:
lista_curados.append(indice_infectante)
return lista_curados
def iterar(self):
#Verifica os novos infectados por infectantes do tipo 1 e 2
print(self.lista_infectados_tipo_1+self.lista_infectados_tipo_2)
lista_novos_infectados_tipo1, lista_novos_infectados_tipo2 = self.verificar_infeccao(self.lista_infectados_tipo_1+self.lista_infectados_tipo_2)
#Verifica morte/cura dos infectados tipo 2
lista_mortos, lista_curados_t2 = self.checagem_morte_cura_lista(self.lista_infectados_tipo_2)
#Verifica cura dos infectados tipo 1
lista_curados_t1 = self.checagem_cura_lista(self.lista_infectados_tipo_1)
#remove os mortos e curados das listas de infectantes tipo 1 e 2
nova_lista_infectados_t2 = []
for indice in self.lista_infectados_tipo_2:
if indice not in lista_mortos:
if indice not in lista_curados_t2:
nova_lista_infectados_t2.append(indice)
self.lista_infectados_tipo_2 = nova_lista_infectados_t2
nova_lista_infectados_t1 = []
for indice in self.lista_infectados_tipo_1:
if indice not in lista_curados_t1:
nova_lista_infectados_t1.append(indice)
self.lista_infectados_tipo_1 = nova_lista_infectados_t1
#atualiza o número de mortos
self.num_mortos = self.num_mortos + len(lista_mortos) + 1
#atualiza o número de curados
print("curados da rodada")
print(len(lista_curados_t1) + len(lista_curados_t2))
self.num_curados = self.num_curados + len(lista_curados_t1) + len(lista_curados_t2)
#movimentar infectantes:
nova_lista_infectados_t1 = []
for indice in self.lista_infectados_tipo_1:
nova_lista_infectados_t1.append(self.mover_infectante(indice))
self.lista_infectados_tipo_1 = nova_lista_infectados_t1
nova_lista_infectados_t2 = []
for indice in self.lista_infectados_tipo_2:
nova_lista_infectados_t2.append(self.mover_infectante(indice))
self.lista_infectados_tipo_2 = nova_lista_infectados_t2
print(self.lista_infectados_tipo_1+self.lista_infectados_tipo_2)
#adicionar os novos infectados tipo 1 e 2 para as respectivas listas
self.lista_infectados_tipo_2 = self.lista_infectados_tipo_2 + lista_novos_infectados_tipo2
self.lista_infectados_tipo_1 = self.lista_infectados_tipo_1 + lista_novos_infectados_tipo1
#populacao_sadia = self.dataframe.iloc[-1]['num_sadios'] - len(lista_novos_infectados_tipo2+lista_novos_infectados_tipo1+lista_curados_t1+lista_curados_t2+)
dict = {
'num_sadios':self.populacao_inicial - self.num_mortos - self.num_curados - len(self.lista_infectados_tipo_1) - len(self.lista_infectados_tipo_2) ,
'num_infect_t1':len(self.lista_infectados_tipo_1),
'num_infect_t2':len(self.lista_infectados_tipo_2),
'num_curados':self.num_curados,
'num_mortos':self.num_mortos}
# dict = {
# 'num_sadios':self.dataframe.iloc[-1]['num_sadios'] - np.sum(self.matriz_status[self.matriz_status != 0].toarray()),
# 'num_infect_t1':np.sum(self.matriz_status[self.matriz_status == 1].toarray()),
# 'num_infect_t2':np.sum(self.matriz_status[self.matriz_status == 2].toarray()),
# 'num_curados':np.sum(self.matriz_status[self.matriz_status == 3].toarray()),
# 'num_mortos':np.sum(self.matriz_status[self.matriz_status == 4].toarray())}
self.dataframe = self.dataframe.append(dict, ignore_index=True)
# print("num t1: ", len(self.lista_infectados_tipo_1))
# print("num t2: ", len(self.lista_infectados_tipo_2))
# print("num curados: ", self.num_curados)
# print("num mortos: ", self.num_mortos)
# print("---------")
# #salva a nova matriz de status
self.salvar_posicionamento()
#adiciona 1 ao número de atualizações realizadas na matriz
self.num_atualizacoes +=1
def infectar(self, chance_infeccao, chance_infeccao_tipo2):
saida = Individuo.SADIO
#número aleatório para chance de infectar o vizinho
rng_infeccao = random.random()
if rng_infeccao <= chance_infeccao:
#número aleatório para chance de infecção tipo 1 ou 2
rng_infeccao_tipo2 = random.random()
if rng_infeccao_tipo2 <= chance_infeccao_tipo2:
saida = Individuo.INFECTADO_TIPO_2
else:
saida = Individuo.INFECTADO_TIPO_1
return saida
def popular(self, tamanho_matriz):
#lista de possíveis combinações de índices da matriz de dados
permutacoes = permutations(list(range(tamanho_matriz)),2)
#conversão para lista de tuplas(x,y)
lista_indices = list(permutacoes)
#embaralhamento dos índices
random.shuffle(lista_indices)
#cria o primeiro tipo1:
indice = lista_indices.pop()
self.criar_individuo(Individuo.INFECTADO_TIPO_1, indice)
self.lista_infectados_tipo_1.append(indice)
#cria o restante dos tipos 1
for i in range(1,self.num_inicial_tipo1):
indice = lista_indices.pop()
self.criar_individuo(Individuo.INFECTADO_TIPO_1,indice)
self.lista_infectados_tipo_1.append(indice)
#cria o restante dos tipo 2:
for indice in range(self.num_inicial_tipo2):
indice = lista_indices.pop()
self.criar_individuo(Individuo.INFECTADO_TIPO_2,indice)
self.lista_infectados_tipo_2.append(indice)
def trocar(self,matriz,ponto_ini,ponto_final):
x_ini = ponto_ini[0]
y_ini = ponto_ini[1]
x_fin = ponto_final[0]
y_fin = ponto_final[1]
aux = matriz[x_fin,y_fin]
matriz[x_fin,y_fin] = matriz[x_ini,y_ini]
matriz[x_ini,y_ini] = aux
def verifica_status(self, indice):
return self.matriz_status[indice[0], indice[1]]
def mover_infectante(self, posicao_inicial):
pos_x, pos_y = posicao_inicial[0], posicao_inicial[1]
rng_posicao = random.random()
if rng_posicao <=0.25:
#move pra cima
pos_x -= 1
elif rng_posicao <=0.5:
#move pra baixo
pos_x += 1
elif rng_posicao <=0.75:
#move para esquerda
pos_y -= 1
else:
#move para direita
pos_y += 1
posicao_final= self.matriz_esferica.valida_ponto_matriz(pos_x, pos_y)
self.trocar(self.matriz_status, posicao_inicial, posicao_final)
self.trocar(self.matriz_atualizacoes_cura, posicao_inicial, posicao_final)
return posicao_final
chance_infeccao = 0.3
chance_infeccao_tipo2 = 0.3
chance_morte = 0.1
atualizacoes_cura = 10
percentual_inicial_tipo1 = 0.0
percentual_inicial_tipo2 = 0.0
sim = Simulador(
5,
percentual_inicial_tipo1,
percentual_inicial_tipo2,
chance_infeccao,
chance_infeccao_tipo2,
chance_morte,atualizacoes_cura)
#print(sim.lista_matrizes_posicionamento[0])
#print(sim.lista_infectados_tipo_2)
#print(sim.lista_infectados_tipo_1)
cmap = ListedColormap(['w', 'y', 'r', 'blue', 'black'])
while (sim.dataframe.iloc[-1]['num_infect_t1']+sim.dataframe.iloc[-1]['num_infect_t2']) > 0:
#plt.matshow(sim.matriz_status.toarray(), cmap = cmap, vmin= 0, vmax = 4)
print(sim.dataframe.iloc[-1])
sim.iterar()
#print(sim.dataframe.iloc[-1])
#print("xxxxxxxxxxxxxxxxxTipo: ",type(sim.lista_matrizes_posicionamento[len(sim.lista_matrizes_posicionamento)-1].toarray()))
print(sim.dataframe)
#plt.show()
# for i in range(12):
# #plt.matshow(sim.lista_matrizes_status[i].toarray(), cmap = cmap, vmin= 0, vmax = 4)
# print(i)
# print("Status")
# print(sim.matriz_status.toarray())
# print("Cura")
# print(sim.matriz_atualizacoes_cura.toarray())
# sim.iterar()
# m = sim.matriz_atualizacoes_cura[sim.matriz_status == 1 or sim.matriz_status == 2].toarray()
# print(m)
#plt.show()
#print(sim.dataframe)
# print(sim.lista_infectados_tipo_1)
# print(sim.lista_infectados_tipo_2)
# sim.iterar()
# print(sim.lista_infectados_tipo_1)
# print(sim.lista_infectados_tipo_2)
# print(sim.dataframe)
# print("status inicial: ", sim.df_individuos[1][0].status)
# print("Novos infectados: ", sim.verificar_infeccao(sim.lista_infectados_tipo_1))
# plt.show()
| 40.153646
| 170
| 0.651858
|
bd7d3f20ccbd0581bf494538eef0d62bb3f12c4a
| 2,519
|
py
|
Python
|
tools/c7n_gcp/tests/test_gcp_storage.py
|
dnouri/cloud-custodian
|
4e8b3b45f60731df942ffe6b61645416d7a67daa
|
[
"Apache-2.0"
] | 1
|
2020-09-07T21:10:29.000Z
|
2020-09-07T21:10:29.000Z
|
tools/c7n_gcp/tests/test_gcp_storage.py
|
dnouri/cloud-custodian
|
4e8b3b45f60731df942ffe6b61645416d7a67daa
|
[
"Apache-2.0"
] | 1
|
2021-02-10T02:20:45.000Z
|
2021-02-10T02:20:45.000Z
|
tools/c7n_gcp/tests/test_gcp_storage.py
|
dnouri/cloud-custodian
|
4e8b3b45f60731df942ffe6b61645416d7a67daa
|
[
"Apache-2.0"
] | 1
|
2021-10-15T11:29:54.000Z
|
2021-10-15T11:29:54.000Z
|
# Copyright 2019 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import time
from gcp_common import BaseTest
class BucketTest(BaseTest):
def test_bucket_query(self):
project_id = 'cloud-custodian'
factory = self.replay_flight_data('bucket-query', project_id)
p = self.load_policy(
{'name': 'all-buckets',
'resource': 'gcp.bucket'},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['id'], "staging.cloud-custodian.appspot.com")
self.assertEqual(resources[0]['storageClass'], "STANDARD")
def test_bucket_get(self):
project_id = 'cloud-custodian'
bucket_name = "staging.cloud-custodian.appspot.com"
factory = self.replay_flight_data(
'bucket-get-resource', project_id)
p = self.load_policy({'name': 'bucket', 'resource': 'gcp.bucket'},
session_factory=factory)
bucket = p.resource_manager.get_resource({
"bucket_name": bucket_name,
})
self.assertEqual(bucket['name'], bucket_name)
self.assertEqual(bucket['id'], "staging.cloud-custodian.appspot.com")
self.assertEqual(bucket['storageClass'], "STANDARD")
self.assertEqual(bucket['location'], "EU")
def test_enable_uniform_bucket_level_access(self):
project_id = 'custodian-1291'
bucket_name = 'c7n-dev-test'
factory = self.replay_flight_data(
'bucket-uniform-bucket-access', project_id)
p = self.load_policy({
'name': 'bucket',
'resource': 'gcp.bucket',
'filters': [
{'name': 'c7n-dev-test'},
{'iamConfiguration.uniformBucketLevelAccess.enabled': False},
],
'actions': ['set-uniform-access']},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
if self.recording:
time.sleep(5)
bucket = p.resource_manager.get_resource({
"bucket_name": bucket_name,
})
self.assertEqual(bucket['name'], bucket_name)
self.assertEqual(bucket['id'], bucket_name)
self.assertEqual(bucket['storageClass'], "REGIONAL")
self.assertEqual(bucket['location'], "US-EAST1")
self.assertJmes('iamConfiguration.uniformBucketLevelAccess.enabled', bucket, True)
| 38.753846
| 90
| 0.616118
|
f64682d3a903f6073a2f0e8588812417574d7c8c
| 2,685
|
py
|
Python
|
apps/search/src/search/search_controller.py
|
vsosrc/hue
|
d8bc236d8d622759fa5988ff32246e4c750e7503
|
[
"Apache-2.0"
] | null | null | null |
apps/search/src/search/search_controller.py
|
vsosrc/hue
|
d8bc236d8d622759fa5988ff32246e4c750e7503
|
[
"Apache-2.0"
] | null | null | null |
apps/search/src/search/search_controller.py
|
vsosrc/hue
|
d8bc236d8d622759fa5988ff32246e4c750e7503
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -- coding: utf-8 --
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from desktop.lib.exceptions_renderable import PopupException
from search.api import SolrApi
from search.conf import SOLR_URL
from search.models import Collection
from django.utils.translation import ugettext as _
LOG = logging.getLogger(__name__)
class SearchController(object):
"""
Glue the models to the views.
"""
def __init__(self, user):
self.user = user
def get_search_collections(self):
# TODO perms
return Collection.objects.filter(enabled=True)
def delete_collection(self, collection_id):
id = collection_id
try:
Collection.objects.get(id=collection_id).delete()
except Exception, e:
LOG.warn('Error deleting collection: %s' % e)
id = -1
return id
def copy_collection(self, collection_id):
id = -1
try:
copy = Collection.objects.get(id=collection_id)
copy.label += _(' (Copy)')
copy.id = copy.pk = None
copy.save()
facets = copy.facets
facets.id = None
facets.save()
copy.facets = facets
result = copy.result
result.id = None
result.save()
copy.result = result
sorting = copy.sorting
sorting.id = None
sorting.save()
copy.sorting = sorting
copy.save()
id = copy.id
except Exception, e:
LOG.warn('Error copying collection: %s' % e)
def is_collection(self, collection_name):
solr_collections = SolrApi(SOLR_URL.get(), self.user).collections()
return collection_name in solr_collections
def is_core(self, core_name):
solr_cores = SolrApi(SOLR_URL.get(), self.user).cores()
return core_name in solr_cores
def get_solr_collection(self):
return SolrApi(SOLR_URL.get(), self.user).collections()
def get_all_indexes(self):
return self.get_solr_collection().keys() + SolrApi(SOLR_URL.get(), self.user).cores().keys()
| 27.680412
| 96
| 0.700931
|
a982299d94cd2afc01eeceb33dd70f04c8f1b198
| 18,746
|
py
|
Python
|
wagtail/images/models.py
|
simo97/wagtail
|
ef404c775559722b9ad6be61a2cc6d6ca6ed8f69
|
[
"BSD-3-Clause"
] | null | null | null |
wagtail/images/models.py
|
simo97/wagtail
|
ef404c775559722b9ad6be61a2cc6d6ca6ed8f69
|
[
"BSD-3-Clause"
] | null | null | null |
wagtail/images/models.py
|
simo97/wagtail
|
ef404c775559722b9ad6be61a2cc6d6ca6ed8f69
|
[
"BSD-3-Clause"
] | null | null | null |
import hashlib
import os.path
from collections import OrderedDict
from contextlib import contextmanager
from io import BytesIO
from django.conf import settings
from django.core import checks
from django.core.files import File
from django.db import models
from django.forms.utils import flatatt
from django.urls import reverse
from django.utils.functional import cached_property
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from taggit.managers import TaggableManager
from unidecode import unidecode
from willow.image import Image as WillowImage
from wagtail.admin.utils import get_object_usage
from wagtail.core import hooks
from wagtail.core.models import CollectionMember
from wagtail.images.exceptions import InvalidFilterSpecError
from wagtail.images.rect import Rect
from wagtail.search import index
from wagtail.search.queryset import SearchableQuerySetMixin
class SourceImageIOError(IOError):
"""
Custom exception to distinguish IOErrors that were thrown while opening the source image
"""
pass
class ImageQuerySet(SearchableQuerySetMixin, models.QuerySet):
pass
def get_upload_to(instance, filename):
"""
Obtain a valid upload path for an image file.
This needs to be a module-level function so that it can be referenced within migrations,
but simply delegates to the `get_upload_to` method of the instance, so that AbstractImage
subclasses can override it.
"""
return instance.get_upload_to(filename)
def get_rendition_upload_to(instance, filename):
"""
Obtain a valid upload path for an image rendition file.
This needs to be a module-level function so that it can be referenced within migrations,
but simply delegates to the `get_upload_to` method of the instance, so that AbstractRendition
subclasses can override it.
"""
return instance.get_upload_to(filename)
class AbstractImage(CollectionMember, index.Indexed, models.Model):
title = models.CharField(max_length=255, verbose_name=_('title'))
file = models.ImageField(
verbose_name=_('file'), upload_to=get_upload_to, width_field='width', height_field='height'
)
width = models.IntegerField(verbose_name=_('width'), editable=False)
height = models.IntegerField(verbose_name=_('height'), editable=False)
created_at = models.DateTimeField(verbose_name=_('created at'), auto_now_add=True, db_index=True)
uploaded_by_user = models.ForeignKey(
settings.AUTH_USER_MODEL, verbose_name=_('uploaded by user'),
null=True, blank=True, editable=False, on_delete=models.SET_NULL
)
tags = TaggableManager(help_text=None, blank=True, verbose_name=_('tags'))
focal_point_x = models.PositiveIntegerField(null=True, blank=True)
focal_point_y = models.PositiveIntegerField(null=True, blank=True)
focal_point_width = models.PositiveIntegerField(null=True, blank=True)
focal_point_height = models.PositiveIntegerField(null=True, blank=True)
file_size = models.PositiveIntegerField(null=True, editable=False)
# A SHA-1 hash of the file contents
file_hash = models.CharField(max_length=40, blank=True, editable=False)
# To hold the current rotation angle of the image
angle = models.IntegerField(null=True, blank=True, default=0)
objects = ImageQuerySet.as_manager()
def is_stored_locally(self):
"""
Returns True if the image is hosted on the local filesystem
"""
try:
self.file.path
return True
except NotImplementedError:
return False
def get_file_size(self):
if self.file_size is None:
try:
self.file_size = self.file.size
except Exception as e:
# File not found
#
# Have to catch everything, because the exception
# depends on the file subclass, and therefore the
# storage being used.
raise SourceImageIOError(str(e))
self.save(update_fields=['file_size'])
return self.file_size
def _set_file_hash(self, file_contents):
self.file_hash = hashlib.sha1(file_contents).hexdigest()
def get_file_hash(self):
if self.file_hash == '':
with self.open_file() as f:
self._set_file_hash(f.read())
self.save(update_fields=['file_hash'])
return self.file_hash
def get_upload_to(self, filename):
folder_name = 'original_images'
filename = self.file.field.storage.get_valid_name(filename)
# do a unidecode in the filename and then
# replace non-ascii characters in filename with _ , to sidestep issues with filesystem encoding
filename = "".join((i if ord(i) < 128 else '_') for i in unidecode(filename))
# Truncate filename so it fits in the 100 character limit
# https://code.djangoproject.com/ticket/9893
full_path = os.path.join(folder_name, filename)
if len(full_path) >= 95:
chars_to_trim = len(full_path) - 94
prefix, extension = os.path.splitext(filename)
filename = prefix[:-chars_to_trim] + extension
full_path = os.path.join(folder_name, filename)
return full_path
def get_usage(self):
return get_object_usage(self)
@property
def usage_url(self):
return reverse('wagtailimages:image_usage',
args=(self.id,))
search_fields = CollectionMember.search_fields + [
index.SearchField('title', partial_match=True, boost=10),
index.AutocompleteField('title'),
index.FilterField('title'),
index.RelatedFields('tags', [
index.SearchField('name', partial_match=True, boost=10),
index.AutocompleteField('name'),
]),
index.FilterField('uploaded_by_user'),
]
def __str__(self):
return self.title
@contextmanager
def open_file(self):
# Open file if it is closed
close_file = False
try:
image_file = self.file
if self.file.closed:
# Reopen the file
if self.is_stored_locally():
self.file.open('rb')
else:
# Some external storage backends don't allow reopening
# the file. Get a fresh file instance. #1397
storage = self._meta.get_field('file').storage
image_file = storage.open(self.file.name, 'rb')
close_file = True
except IOError as e:
# re-throw this as a SourceImageIOError so that calling code can distinguish
# these from IOErrors elsewhere in the process
raise SourceImageIOError(str(e))
# Seek to beginning
image_file.seek(0)
try:
yield image_file
finally:
if close_file:
image_file.close()
@contextmanager
def get_willow_image(self):
with self.open_file() as image_file:
yield WillowImage.open(image_file)
def get_rect(self):
return Rect(0, 0, self.width, self.height)
def get_focal_point(self):
if self.focal_point_x is not None and \
self.focal_point_y is not None and \
self.focal_point_width is not None and \
self.focal_point_height is not None:
return Rect.from_point(
self.focal_point_x,
self.focal_point_y,
self.focal_point_width,
self.focal_point_height,
)
def has_focal_point(self):
return self.get_focal_point() is not None
def set_focal_point(self, rect):
if rect is not None:
self.focal_point_x = rect.centroid_x
self.focal_point_y = rect.centroid_y
self.focal_point_width = rect.width
self.focal_point_height = rect.height
else:
self.focal_point_x = None
self.focal_point_y = None
self.focal_point_width = None
self.focal_point_height = None
def get_suggested_focal_point(self):
with self.get_willow_image() as willow:
faces = willow.detect_faces()
if faces:
# Create a bounding box around all faces
left = min(face[0] for face in faces)
top = min(face[1] for face in faces)
right = max(face[2] for face in faces)
bottom = max(face[3] for face in faces)
focal_point = Rect(left, top, right, bottom)
else:
features = willow.detect_features()
if features:
# Create a bounding box around all features
left = min(feature[0] for feature in features)
top = min(feature[1] for feature in features)
right = max(feature[0] for feature in features)
bottom = max(feature[1] for feature in features)
focal_point = Rect(left, top, right, bottom)
else:
return None
# Add 20% to width and height and give it a minimum size
x, y = focal_point.centroid
width, height = focal_point.size
width *= 1.20
height *= 1.20
width = max(width, 100)
height = max(height, 100)
return Rect.from_point(x, y, width, height)
@classmethod
def get_rendition_model(cls):
""" Get the Rendition model for this Image model """
return cls.renditions.rel.related_model
def get_rendition(self, filter):
if isinstance(filter, str):
filter = Filter(spec=filter)
cache_key = filter.get_cache_key(self)
Rendition = self.get_rendition_model()
try:
rendition = self.renditions.get(
filter_spec=filter.spec,
focal_point_key=cache_key,
)
except Rendition.DoesNotExist:
# Generate the rendition image
generated_image = filter.run(self, BytesIO())
# Generate filename
input_filename = os.path.basename(self.file.name)
input_filename_without_extension, input_extension = os.path.splitext(input_filename)
# A mapping of image formats to extensions
FORMAT_EXTENSIONS = {
'jpeg': '.jpg',
'png': '.png',
'gif': '.gif',
}
output_extension = filter.spec.replace('|', '.') + FORMAT_EXTENSIONS[generated_image.format_name]
if cache_key:
output_extension = cache_key + '.' + output_extension
# Truncate filename to prevent it going over 60 chars
output_filename_without_extension = input_filename_without_extension[:(59 - len(output_extension))]
output_filename = output_filename_without_extension + '.' + output_extension
rendition, created = self.renditions.get_or_create(
filter_spec=filter.spec,
focal_point_key=cache_key,
defaults={'file': File(generated_image.f, name=output_filename)}
)
return rendition
def is_portrait(self):
return (self.width < self.height)
def is_landscape(self):
return (self.height < self.width)
@property
def filename(self):
return os.path.basename(self.file.name)
@property
def default_alt_text(self):
# by default the alt text field (used in rich text insertion) is populated
# from the title. Subclasses might provide a separate alt field, and
# override this
return self.title
def is_editable_by_user(self, user):
from wagtail.images.permissions import permission_policy
return permission_policy.user_has_permission_for_instance(user, 'change', self)
class Meta:
abstract = True
class Image(AbstractImage):
admin_form_fields = (
'title',
'file',
'collection',
'tags',
'focal_point_x',
'focal_point_y',
'focal_point_width',
'focal_point_height',
)
class Meta:
verbose_name = _('image')
verbose_name_plural = _('images')
class Filter:
"""
Represents one or more operations that can be applied to an Image to produce a rendition
appropriate for final display on the website. Usually this would be a resize operation,
but could potentially involve colour processing, etc.
"""
def __init__(self, spec=None):
# The spec pattern is operation1-var1-var2|operation2-var1
self.spec = spec
@cached_property
def operations(self):
# Search for operations
self._search_for_operations()
# Build list of operation objects
operations = []
for op_spec in self.spec.split('|'):
op_spec_parts = op_spec.split('-')
if op_spec_parts[0] not in self._registered_operations:
raise InvalidFilterSpecError("Unrecognised operation: %s" % op_spec_parts[0])
op_class = self._registered_operations[op_spec_parts[0]]
operations.append(op_class(*op_spec_parts))
return operations
def run(self, image, output):
with image.get_willow_image() as willow:
original_format = willow.format_name
# Fix orientation of image
willow = willow.auto_orient()
env = {
'original-format': original_format,
}
for operation in self.operations:
willow = operation.run(willow, image, env) or willow
# Find the output format to use
if 'output-format' in env:
# Developer specified an output format
output_format = env['output-format']
else:
# Default to outputting in original format
output_format = original_format
# Convert BMP files to PNG
if original_format == 'bmp':
output_format = 'png'
# Convert unanimated GIFs to PNG as well
if original_format == 'gif' and not willow.has_animation():
output_format = 'png'
if output_format == 'jpeg':
# Allow changing of JPEG compression quality
if 'jpeg-quality' in env:
quality = env['jpeg-quality']
elif hasattr(settings, 'WAGTAILIMAGES_JPEG_QUALITY'):
quality = settings.WAGTAILIMAGES_JPEG_QUALITY
else:
quality = 85
# If the image has an alpha channel, give it a white background
if willow.has_alpha():
willow = willow.set_background_color_rgb((255, 255, 255))
return willow.save_as_jpeg(output, quality=quality, progressive=True, optimize=True)
elif output_format == 'png':
return willow.save_as_png(output, optimize=True)
elif output_format == 'gif':
return willow.save_as_gif(output)
def get_cache_key(self, image):
vary_parts = []
for operation in self.operations:
for field in getattr(operation, 'vary_fields', []):
value = getattr(image, field, '')
vary_parts.append(str(value))
vary_string = '-'.join(vary_parts)
# Return blank string if there are no vary fields
if not vary_string:
return ''
return hashlib.sha1(vary_string.encode('utf-8')).hexdigest()[:8]
_registered_operations = None
@classmethod
def _search_for_operations(cls):
if cls._registered_operations is not None:
return
operations = []
for fn in hooks.get_hooks('register_image_operations'):
operations.extend(fn())
cls._registered_operations = dict(operations)
class AbstractRendition(models.Model):
filter_spec = models.CharField(max_length=255, db_index=True)
file = models.ImageField(upload_to=get_rendition_upload_to, width_field='width', height_field='height')
width = models.IntegerField(editable=False)
height = models.IntegerField(editable=False)
focal_point_key = models.CharField(max_length=16, blank=True, default='', editable=False)
@property
def url(self):
return self.file.url
@property
def alt(self):
return self.image.title
@property
def attrs(self):
"""
The src, width, height, and alt attributes for an <img> tag, as a HTML
string
"""
return flatatt(self.attrs_dict)
@property
def attrs_dict(self):
"""
A dict of the src, width, height, and alt attributes for an <img> tag.
"""
return OrderedDict([
('src', self.url),
('width', self.width),
('height', self.height),
('alt', self.alt),
])
def img_tag(self, extra_attributes={}):
attrs = self.attrs_dict.copy()
attrs.update(extra_attributes)
return mark_safe('<img{}>'.format(flatatt(attrs)))
def __html__(self):
return self.img_tag()
def get_upload_to(self, filename):
folder_name = 'images'
filename = self.file.field.storage.get_valid_name(filename)
return os.path.join(folder_name, filename)
@classmethod
def check(cls, **kwargs):
errors = super(AbstractRendition, cls).check(**kwargs)
if not cls._meta.abstract:
if not any(
set(constraint) == set(['image', 'filter_spec', 'focal_point_key'])
for constraint in cls._meta.unique_together
):
errors.append(
checks.Error(
"Custom rendition model %r has an invalid unique_together setting" % cls,
hint="Custom rendition models must include the constraint "
"('image', 'filter_spec', 'focal_point_key') in their unique_together definition.",
obj=cls,
id='wagtailimages.E001',
)
)
return errors
class Meta:
abstract = True
class Rendition(AbstractRendition):
image = models.ForeignKey(Image, related_name='renditions', on_delete=models.CASCADE)
class Meta:
unique_together = (
('image', 'filter_spec', 'focal_point_key'),
)
| 34.208029
| 111
| 0.616505
|
675a302cf444d9735bc54159b5fce1527384a630
| 663
|
py
|
Python
|
fsdet/config/defaults.py
|
wz940216/few-shot-object-detection-custom
|
66277921a9c38b0f0d55a4f0d07c54363b17070b
|
[
"Apache-2.0"
] | 4
|
2021-08-01T01:11:43.000Z
|
2021-11-01T07:14:18.000Z
|
fsdet/config/defaults.py
|
wz940216/few-shot-object-detection-custom
|
66277921a9c38b0f0d55a4f0d07c54363b17070b
|
[
"Apache-2.0"
] | null | null | null |
fsdet/config/defaults.py
|
wz940216/few-shot-object-detection-custom
|
66277921a9c38b0f0d55a4f0d07c54363b17070b
|
[
"Apache-2.0"
] | 1
|
2021-07-12T08:19:23.000Z
|
2021-07-12T08:19:23.000Z
|
from detectron2.config import CfgNode as CN
from detectron2.config.defaults import _C
# adding additional default values built on top of the default values in detectron2
_CC = _C
# FREEZE Parameters
_CC.MODEL.BACKBONE.FREEZE = False
_CC.MODEL.PROPOSAL_GENERATOR.FREEZE = False
_CC.MODEL.ROI_HEADS.FREEZE_FEAT = False
# choose from "FastRCNNOutputLayers" and "CosineSimOutputLayers"
_CC.MODEL.ROI_HEADS.OUTPUT_LAYER = "FastRCNNOutputLayers"
# scale of cosine similarity (set to -1 for learnable scale)
_CC.MODEL.ROI_HEADS.COSINE_SCALE = 20.0
# Backward Compatible options.
_CC.MUTE_HEADER = True
# Number of data loading threads
_C.DATALOADER.NUM_WORKERS = 0
| 30.136364
| 83
| 0.80543
|
ef49c704b88afc03df0a6e9c6612cb725ce5b0f6
| 61
|
py
|
Python
|
src/timeatlas/metadata/__init__.py
|
fredmontet/timeatlas
|
9a439a913ef9a8a1ef9833b42e5fb4e988d7e35e
|
[
"MIT"
] | 10
|
2020-08-25T09:23:02.000Z
|
2021-01-12T14:00:35.000Z
|
src/timeatlas/metadata/__init__.py
|
fredmontet/timeatlas
|
9a439a913ef9a8a1ef9833b42e5fb4e988d7e35e
|
[
"MIT"
] | 140
|
2020-06-30T11:59:47.000Z
|
2021-08-23T20:58:43.000Z
|
src/timeatlas/metadata/__init__.py
|
fredmontet/timeatlas
|
9a439a913ef9a8a1ef9833b42e5fb4e988d7e35e
|
[
"MIT"
] | null | null | null |
from .metadata import Metadata
__all__ = [
'Metadata'
]
| 10.166667
| 30
| 0.672131
|
7618b04373475ca406b4ebe19fdcfabbc188a6e7
| 529
|
py
|
Python
|
mmdet/version.py
|
ccw1996/mmdetection
|
6b87ac22b8d9dea8cc28b9ce84909e6c311e6268
|
[
"Apache-2.0"
] | 2
|
2021-11-27T03:30:42.000Z
|
2022-01-01T05:14:18.000Z
|
mmdet/version.py
|
Bella-ing/mmdetection
|
70f6d9cfade4a2f0b198e4f64776521d181b28be
|
[
"Apache-2.0"
] | 1
|
2020-05-20T08:13:44.000Z
|
2020-05-20T08:13:44.000Z
|
mmdet/version.py
|
Bella-ing/mmdetection
|
70f6d9cfade4a2f0b198e4f64776521d181b28be
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '2.22.0'
short_version = __version__
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
| 26.45
| 56
| 0.642722
|
a18d9e84e9e9123d545f3da558d943e975bcfa2a
| 2,323
|
py
|
Python
|
accounts/webService/userAPI.py
|
vahidtwo/simpleSocialSite
|
40d971f04b7127811b7e277ddb3068fb451e9574
|
[
"MIT"
] | 1
|
2020-05-16T16:14:51.000Z
|
2020-05-16T16:14:51.000Z
|
accounts/webService/userAPI.py
|
vahidtwo/simpleSocialSite
|
40d971f04b7127811b7e277ddb3068fb451e9574
|
[
"MIT"
] | 5
|
2021-03-18T23:21:18.000Z
|
2022-01-13T02:10:19.000Z
|
accounts/webService/userAPI.py
|
vahidtwo/simpleSocialSite
|
40d971f04b7127811b7e277ddb3068fb451e9574
|
[
"MIT"
] | null | null | null |
from django.http import JsonResponse
from rest_framework.parsers import FileUploadParser
from rest_framework.status import (
HTTP_400_BAD_REQUEST, HTTP_200_OK, HTTP_404_NOT_FOUND
)
from rest_framework.views import APIView
from rest_framework.permissions import IsAuthenticated
from accounts.models import User
from accounts.serializers import UserSerializer
from chanel.models import Follow
from chanel.serializers import FollowSerializer
from posts.models import Post
class UserAPI(APIView):
permission_classes = (IsAuthenticated,)
parser_class = (FileUploadParser,)
def put(self, request):
_request_perms = request.data
try:
_request_perms['title'] = str(_request_perms.get('picture').name)
except Exception:
pass
user = request.user
ser = UserSerializer(user, _request_perms, partial=True)
if ser.is_valid():
ser.save()
return JsonResponse(data={'msg': 'user update', 'success': True}, status=HTTP_200_OK)
else:
return JsonResponse(data={'msg': ser.errors, 'success': False}, status=HTTP_400_BAD_REQUEST)
def get(self, request, username=None):
data = {}
data['user_post_count'] = Post.objects.filter(author=request.user).count()
if username:
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
return JsonResponse(data={'msg': 'user not found', 'success': False}, status=HTTP_404_NOT_FOUND)
follower = Follow.objects.filter(chanel__owner=user)
following = Follow.objects.filter(user=user)
data['follower'] = FollowSerializer(follower, many=True).data
data['follower_count'] = follower.count()
data['following'] = FollowSerializer(following, many=True).data
data['following_count'] = following.count()
data['user_data'] = UserSerializer(user).data
return JsonResponse(data={'data': data, 'success': True}, status=HTTP_200_OK)
else:
data['user_data'] = UserSerializer(request.user).data
follower = Follow.objects.filter(chanel__owner=request.user)
following = Follow.objects.filter(user=request.user)
data['follower'] = FollowSerializer(follower, many=True).data
data['follower_count'] = follower.count()
data['following'] = FollowSerializer(following, many=True).data
data['following_count'] = following.count()
return JsonResponse(data={'data': data, 'success': True}, status=HTTP_200_OK)
| 39.372881
| 100
| 0.754628
|
5c46ff58add2dce689de09d41ce6de546ad3db41
| 465
|
py
|
Python
|
Typing/exa.py
|
simone-trubian/blog-posts
|
85a80df1f8ef85e796470656838792f29c80c3a8
|
[
"BSD-3-Clause"
] | null | null | null |
Typing/exa.py
|
simone-trubian/blog-posts
|
85a80df1f8ef85e796470656838792f29c80c3a8
|
[
"BSD-3-Clause"
] | null | null | null |
Typing/exa.py
|
simone-trubian/blog-posts
|
85a80df1f8ef85e796470656838792f29c80c3a8
|
[
"BSD-3-Clause"
] | null | null | null |
from typing import Tuple, TypeVar
# This will Fail!
def incBoth(x: int, y:int) -> (int, int):
return(x + 1, y + 1)
#def incBoth(x: int, y:int) -> Tuple[int, int]:
# return(x + 1, y + 1)
Pair = Tuple[int, int]
Num = TypeVar('Num', int, float, complex)
def incPair(x: int, y:int) -> Pair:
return(x + 1, y + 1)
#def add(x: int, y: int) -> int:
# return x + y
def add(x: Num, y: Num) -> Num:
return x + y
# Wrong
var = add('hello ', 'reader')
| 18.6
| 47
| 0.55914
|
411a19b63653dcbabb4faa1cf86122114b7af75b
| 2,226
|
py
|
Python
|
sensor_callbacks.py
|
motorox/sensors-raspi
|
dc8ba50bd5deb2afdc2381c4fbf915cd66819603
|
[
"MIT"
] | null | null | null |
sensor_callbacks.py
|
motorox/sensors-raspi
|
dc8ba50bd5deb2afdc2381c4fbf915cd66819603
|
[
"MIT"
] | 1
|
2018-05-16T08:38:59.000Z
|
2018-05-16T08:38:59.000Z
|
sensor_callbacks.py
|
motorox/sensors-raspi
|
dc8ba50bd5deb2afdc2381c4fbf915cd66819603
|
[
"MIT"
] | null | null | null |
import time
from sensor_utils import calcAccel, calcGyro, calcHum, calcMagn, calcTmpTarget
class SensorCallbacks:
data = {}
def __init__(self, addr):
self.data['addr'] = addr
self.data['keys'] = 0
def tmp007(self, v):
objT = (v[1]<<8)+v[0]
ambT = (v[3]<<8)+v[2]
#print 'ObjT: ', objT
#print 'Ambient: ', ambT/128.0
self.data['ambtemp'] = ambT/128.0
targetT = calcTmpTarget(objT, ambT)
self.data['temp'] = targetT
celsiusVal = (targetT - 32)*5.0/9.0 #FAHR to Celsius
self.data['celsiustemp'] = celsiusVal
#print "T007 %.1f" % celsiusVal
def lux(self, v):
lux = (v[1]<<8)+v[0]
self.data['lux'] = lux
#print 'Lux', lux
def keys(self, v):
keys = v[0]
self.data['keys'] = keys
#print 'Keys', keys
def humidity(self, v):
rawT = (v[1]<<8)+v[0]
rawH = (v[3]<<8)+v[2]
(t, rh) = calcHum(rawT, rawH)
self.data['humdtemp'] = t
self.data['humd'] = rh
#print "HUMD %.1f" % rh
#print "TEMP %.1f" % t
def baro(self, v):
rawT = ((v[2]<<16) + (v[1]<<8)+v[0])/100.0 # in Celsius
rawP = ((v[5]<<16) + (v[4]<<8)+v[3])/100.0 # in hPa
self.data['barotemp'] = rawT
self.data['baropress'] = rawP
self.data['time'] = long(time.time() * 1000)
def movement(self, v):
# enable magnetometer
mx = (v[13]<<8)+v[12]
my = (v[15]<<8)+v[14]
mz = (v[17]<<8)+v[16]
(mgnx, mgny, mgnz) = calcMagn(mx, my, mz)
self.data['magnx'] = mgnx
self.data['magny'] = mgny
self.data['magnz'] = mgnz
# enable accelerometer
ax = (v[7]<<8)+v[6]
ay = (v[9]<<8)+v[8]
az = (v[11]<<8)+v[10]
(axyz, mag) = calcAccel(ax, ay, az)
self.data['accelx'] = axyz[0]
self.data['accely'] = axyz[1]
self.data['accelz'] = axyz[2]
# enable gyroscope
gx = (v[1]<<8)+v[0]
gy = (v[3]<<8)+v[2]
gz = (v[5]<<8)+v[4]
gxyz = calcGyro(gx, gy, gz)
self.data['gyrox'] = gxyz[0]
self.data['gyroy'] = gxyz[1]
self.data['gyroz'] = gxyz[2]
| 29.289474
| 78
| 0.478437
|
f1ad5862a439dba9506139c01d07126ec3e546b5
| 1,013
|
py
|
Python
|
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/cli.py
|
jlant/cookiecutter-pyproj
|
ad5895f65b7e6e2541f8aee7498c125a0c144e62
|
[
"MIT"
] | null | null | null |
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/cli.py
|
jlant/cookiecutter-pyproj
|
ad5895f65b7e6e2541f8aee7498c125a0c144e62
|
[
"MIT"
] | null | null | null |
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/cli.py
|
jlant/cookiecutter-pyproj
|
ad5895f65b7e6e2541f8aee7498c125a0c144e62
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Click command line interface for {{ cookiecutter.project_slug }}.
Notes
-----
Click is a great Python package for creating nice command line interfaces.
Please see `Click documentation <http://click.pocoo.org/>`_.
"""
import sys
import click
import {{cookiecutter.project_slug}}
@click.command()
@click.option("--verbose", is_flag=True, help="Print detailed results from analysis/model")
def main(verbose):
"""Command line interface for iris.
Run all analysis, models, and/or main script from a command line interface.
"""
click.echo("Running analysis from a Click command line interface")
{{cookiecutter.project_slug}}.main()
click.echo("Click allows you to easily add various commands and options "
"as you see fit.")
click.echo()
if verbose:
click.echo("Verbose mode is on.")
click.echo("Can print more detailed results from your analysis/model.")
return 0
if __name__ == "__main__":
sys.exit(main())
| 25.974359
| 91
| 0.685094
|
1630f80e291bd8ee2f184901062605f46466ccd8
| 3,552
|
py
|
Python
|
siam_tracker/models/alexnet.py
|
songheony/SPM-Tracker
|
41fd91ec42cf9072fe44d45c5bb68993f28a12ad
|
[
"MIT"
] | 32
|
2019-08-30T09:50:03.000Z
|
2021-10-12T08:36:25.000Z
|
siam_tracker/models/alexnet.py
|
songheony/SPM-Tracker
|
41fd91ec42cf9072fe44d45c5bb68993f28a12ad
|
[
"MIT"
] | 3
|
2019-09-05T09:45:52.000Z
|
2020-12-02T02:42:08.000Z
|
siam_tracker/models/alexnet.py
|
songheony/SPM-Tracker
|
41fd91ec42cf9072fe44d45c5bb68993f28a12ad
|
[
"MIT"
] | 16
|
2019-09-10T09:04:53.000Z
|
2021-09-13T12:44:47.000Z
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from math import ceil
from torch import nn
from collections import OrderedDict
from .base import BackBoneCNN, NetworkInfo
class AlexNetConv(BackBoneCNN):
"""
The standard AlexNet convolutional backbone.
For more details, please refer to AlexNet paper:
"ImageNet Classification with Deep Convolutional Neural Networks", NIPS 2012
"""
num_blocks = 5
blocks = dict(
conv1=NetworkInfo(stride=4, channel=96, rf=15, size_func=lambda x: int(ceil(x / 4.0))),
conv2=NetworkInfo(stride=8, channel=256, rf=39, size_func=lambda x: int(ceil(x / 8.0))),
conv3=NetworkInfo(stride=8, channel=384, rf=55, size_func=lambda x: int(ceil(x / 8.0))),
conv4=NetworkInfo(stride=8, channel=384, rf=71, size_func=lambda x: int(ceil(x / 8.0))),
conv5=NetworkInfo(stride=8, channel=256, rf=87, size_func=lambda x: int(ceil(x / 8.0))),
)
def __init__(self, padding=True):
super(AlexNetConv, self).__init__()
if padding:
self.conv1 = nn.Sequential(OrderedDict([
('conv', nn.Conv2d(3, 96, 11, stride=2, padding=5, bias=True)),
('relu', nn.ReLU()),
('pool', nn.MaxPool2d(3, stride=2, padding=1, dilation=1)),
('norm', nn.LocalResponseNorm(size=5, alpha=1e-4, beta=0.75, k=1.0))]))
self.conv2 = nn.Sequential(OrderedDict([
('conv', nn.Conv2d(96, 256, 5, stride=1, padding=2, groups=2, bias=True)),
('relu', nn.ReLU()),
('pool', nn.MaxPool2d(3, stride=2, padding=1, dilation=1)),
('norm', nn.LocalResponseNorm(size=5, alpha=1e-4, beta=0.75, k=1.0))]))
self.conv3 = nn.Sequential(OrderedDict([
('conv', nn.Conv2d(256, 384, kernel_size=3, padding=1, bias=True)),
('relu', nn.ReLU())]))
self.conv4 = nn.Sequential(OrderedDict([
('conv', nn.Conv2d(384, 384, kernel_size=3, padding=1, groups=2, bias=True)),
('relu', nn.ReLU())]))
self.conv5 = nn.Sequential(OrderedDict([
('conv', nn.Conv2d(384, 256, kernel_size=3, padding=1, groups=2, bias=True)),
('relu', nn.ReLU())]))
else:
self.conv1 = nn.Sequential(OrderedDict([
('conv', nn.Conv2d(3, 96, 11, stride=2, padding=0, bias=True)),
('relu', nn.ReLU()),
('pool', nn.MaxPool2d(3, stride=2, padding=0, dilation=1)),
('norm', nn.LocalResponseNorm(size=5, alpha=1e-4, beta=0.75, k=1.0))]))
self.conv2 = nn.Sequential(OrderedDict([
('conv', nn.Conv2d(96, 256, 5, stride=1, padding=0, groups=2, bias=True)),
('relu', nn.ReLU()),
('pool', nn.MaxPool2d(3, stride=2, padding=0, dilation=1)),
('norm', nn.LocalResponseNorm(size=5, alpha=1e-4, beta=0.75, k=1.0))]))
self.conv3 = nn.Sequential(OrderedDict([
('conv', nn.Conv2d(256, 384, kernel_size=3, padding=0, bias=True)),
('relu', nn.ReLU())]))
self.conv4 = nn.Sequential(OrderedDict([
('conv', nn.Conv2d(384, 384, kernel_size=3, padding=0, groups=2, bias=True)),
('relu', nn.ReLU())]))
self.conv5 = nn.Sequential(OrderedDict([
('conv', nn.Conv2d(384, 256, kernel_size=3, padding=0, groups=2, bias=True)),
('relu', nn.ReLU())]))
| 50.742857
| 96
| 0.560811
|
04acc1265af3116ae378f05cee69520a7bf29133
| 6,936
|
py
|
Python
|
backend/falling_brook_31504/settings.py
|
crowdbotics-apps/falling-brook-31504
|
6b71c01e67f9716a207c77fc15ce7a96723d9de5
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/falling_brook_31504/settings.py
|
crowdbotics-apps/falling-brook-31504
|
6b71c01e67f9716a207c77fc15ce7a96723d9de5
|
[
"FTL",
"AML",
"RSA-MD"
] | 7
|
2021-10-18T03:00:36.000Z
|
2021-10-18T03:00:44.000Z
|
backend/falling_brook_31504/settings.py
|
crowdbotics-apps/falling-brook-31504
|
6b71c01e67f9716a207c77fc15ce7a96723d9de5
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
"""
Django settings for falling_brook_31504 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
from modules.manifest import get_modules
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
]
MODULES_APPS = get_modules()
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS + MODULES_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'falling_brook_31504.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'falling_brook_31504.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| 29.641026
| 112
| 0.731257
|
be411d7942432939d76dc1f79faf68059457c4e9
| 7,042
|
py
|
Python
|
demo/demo.py
|
ChenRIT/Essentia
|
8309d52b1e46562646129dc0c481897fa352bff4
|
[
"Apache-2.0"
] | null | null | null |
demo/demo.py
|
ChenRIT/Essentia
|
8309d52b1e46562646129dc0c481897fa352bff4
|
[
"Apache-2.0"
] | null | null | null |
demo/demo.py
|
ChenRIT/Essentia
|
8309d52b1e46562646129dc0c481897fa352bff4
|
[
"Apache-2.0"
] | null | null | null |
import sys
sys.path.append('./scripts/essentia')
import json
import flask
import networkx as nx
from networkx.readwrite import json_graph
# from scripts.fsa import *
# from scripts.generate_word_alignment import *
# from scripts.preprocessing import create_valid_groups
from generate_word_alignment import nlp, make_alignment_matrix, make_alignment_matrix_with_rules
from preprocessing import create_valid_groups
from fsa import create_fsa, process_sents, generate_pairwise_paths, find_phrase_paraphrases, idx_to_node, start_state, end_state
sultan_aligner = True
def prep_graph(G_temp, names):
# Adding the names to the graph
print("names: {}".format(names))
for n, n_data in G_temp.nodes(data=True):
n_data['name'] = names[n]
# Collapsing long paths
collapse_paths(G_temp)
# The graph needs attribute name (used when mouse hover over nodes) and
# a weight attribute on each edge
G = nx.MultiDiGraph()
node2id, id2node = {}, {}
for id_, n in enumerate(G_temp):
node2id[n] = id_
id2node[id_] = n
for i in range(len(id2node)):
if id2node[i] == 0 or id2node[i] == 1:
x_pos = 100 + id2node[i] * 700
G.add_node(i, name=G_temp.nodes[id2node[i]]['name'],
group=id2node[i], size=8, fixed=True, x=x_pos, y=200)
else:
G.add_node(i, name=G_temp.nodes[id2node[i]]['name'],
group=2, size=5, fixed=False)
for (x, y) in G_temp.edges():
G.add_edge(node2id[x], node2id[y], weight=1)
return G
def collapse_paths(G):
has_collapse = True
while has_collapse:
has_collapse = False
for (x, y) in G.edges():
if G.in_degree(x) == 1 and G.out_degree(x) == 1 and \
G.in_degree(y) == 1 and G.out_degree(y) == 1:
has_collapse = True
new_node = str(x) + ' ' + str(y)
new_name = G.nodes[x]['name'] + ' ' + G.nodes[y]['name']
G.add_node(new_node, name=new_name)
for (z, _) in G.in_edges(x):
G.add_edge(z, new_node)
for (_, z) in G.out_edges(y):
G.add_edge(new_node, z)
G.remove_nodes_from([x, y])
break
return G
def build_graph_test(sents):
# TODO: We should make a graph from sentences
# This is a dummy solution for now.
G = nx.read_adjlist('example.adjlist', create_using=nx.MultiDiGraph(),
nodetype=int)
raw_names = json.load(open('node_to_text_dic.json', 'r'))
names = {}
for node_str, values_str in raw_names.items():
node = int(node_str)
if node == 0:
names[node] = 'START'
elif node == 1:
names[node] = 'END'
else:
values = eval(values_str) if values_str != "" else {}
all_words = list(set([values[x][1].lower() for x in values]))
names[node] = '/'.join(all_words)
return G, names
def build_graph(sents):
origin_sents = sents
tk_sents = {}
for i, sent in enumerate(sents):
doc = nlp(sent)
tk_st = [tk.text for tk in doc]
tk_sents[i] = tk_st
align_matrix = None
sents_cluster = None
if sultan_aligner:
align_matrix = make_alignment_matrix(origin_sents)
#merge_chunks(align_matrix, tk_sents, origin_sents)
sents_cluster = create_valid_groups(align_matrix, tk_sents)
else:
align_matrix = make_alignment_matrix_with_rules(origin_sents)
sents_cluster = create_valid_groups(align_matrix, tk_sents)
#sents_cluster = [range(len(align_matrix))]
# print("sentence clusters: {}".format(sents_cluster))
# print(align_matrix)
fsa = create_fsa(tk_sents)
for cluster in sents_cluster:
fsa = process_sents(fsa, tk_sents, align_matrix, cluster)
raw_names = idx_to_node
names = {}
for node_str, values_str in raw_names.items():
# print("node_str: {}".format(node_str))
# print("values_str: {}".format(values_str))
node = int(node_str)
if node == start_state:
names[node] = 'START'
elif node == end_state:
names[node] = 'END'
else:
values = eval(values_str) if values_str != "" else {}
all_words = list(set([values[x][1].lower() for x in values]))
names[node] = '/'.join(all_words)
return fsa, names
def main():
print('Wrote node-link JSON data to force/force.json')
# Serve the file over http to allow for cross origin requests
app = flask.Flask(__name__, static_folder="force")
@app.route('/<path:path>')
def static_proxy(path):
return flask.send_from_directory(app.static_folder, path)
@app.route('/')
def index():
return flask.send_from_directory(app.static_folder, "index.html")
@app.route('/render', methods=['POST'])
def renderit():
sents = flask.request.form['sents'].split('\r\n')
G, names = build_graph(sents)
# writing optional expressions
#pair_to_paths = generate_pairwise_paths(G)
#print("pair_to_paths: {}".format(pair_to_paths))
#ndpair_to_exps = find_optional_exps(pair_to_paths)
#print("ndpair_to_exps: {}".format(ndpair_to_exps))
# Output alternative expressions
nd_pair_to_paras = find_phrase_paraphrases(G)
with open('./demo/force/alt_exp.txt', 'w') as out_file:
#out_file.write("Optional expressions:\n")
# out_file.write('This is a test!\n')
# out_file.write(' (1) This is another test!\n')
# out_file.write(' (*) This is nothing!\n')
count = 0
for _, v in nd_pair_to_paras.items():
# for exp in v:
# out_file.write(exp)
# out_file.write('\n')
#print("v: {}".format(v))
out_file.write("Group {}: ".format(count))
out_file.write(str(v))
out_file.write('\n\n')
count += 1
# Post-processing for demo
G = prep_graph(G, names) # merge consecutive nodes together for demo
# write json formatted data
d = json_graph.node_link_data(G) # node-link format to serialize
# write json
json.dump(d, open('./demo/force/force.json', 'w'))
return flask.send_from_directory(app.static_folder, "force.html")
# this is to avoid caching
@app.after_request
def add_header(r):
"""
Add headers to both force latest IE rendering engine or Chrome Frame,
and also to cache the rendered page for 10 minutes.
"""
r.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
r.headers["Pragma"] = "no-cache"
r.headers["Expires"] = "0"
r.headers['Cache-Control'] = 'public, max-age=0'
return r
app.run(port=8000)
if __name__ == "__main__":
main()
| 35.928571
| 128
| 0.596279
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.