content
stringlengths 5
1.05M
|
|---|
"""
sevenbridges-python
~~~~~~~~~~~~~~~~~~~
:copyright: 2018 Seven Bridges Genomics Inc.
:license: Apache 2.0
"""
import ssl
import logging
__version__ = "0.23.0"
from sevenbridges.api import Api
from sevenbridges.config import Config
from sevenbridges.models.invoice import Invoice
from sevenbridges.models.billing_group import (
BillingGroup, BillingGroupBreakdown
)
from sevenbridges.models.user import User
from sevenbridges.models.endpoints import Endpoints
from sevenbridges.models.project import Project
from sevenbridges.models.task import Task
from sevenbridges.models.app import App
from sevenbridges.models.dataset import Dataset
from sevenbridges.models.bulk import BulkRecord
from sevenbridges.models.team import Team, TeamMember
from sevenbridges.models.member import Member, Permissions
from sevenbridges.models.file import File
from sevenbridges.models.storage_export import Export
from sevenbridges.models.storage_import import Import
from sevenbridges.models.volume import Volume
from sevenbridges.models.marker import Marker
from sevenbridges.models.division import Division
from sevenbridges.models.automation import (
Automation, AutomationRun, AutomationPackage, AutomationMember
)
from sevenbridges.models.async_jobs import AsyncJob
from sevenbridges.models.enums import (
AppCopyStrategy, AppRawFormat, AsyncFileOperations, AsyncJobStates,
AutomationRunActions, DivisionRole, FileStorageType, ImportExportState,
TaskStatus, TransferState, VolumeAccessMode, VolumeType,
)
from sevenbridges.errors import (
SbgError, ResourceNotModified, ReadOnlyPropertyError, ValidationError,
TaskValidationError, PaginationError, BadRequest, Unauthorized, Forbidden,
NotFound, Conflict, TooManyRequests, ServerError, ServiceUnavailable,
MethodNotAllowed, RequestTimeout, LocalFileAlreadyExists,
ExecutionDetailsInvalidTaskType
)
logging.getLogger(__name__).addHandler(logging.NullHandler())
__all__ = [
'Api', 'AsyncJob', 'Automation', 'AutomationRun', 'AutomationMember',
'AutomationPackage', 'Config', 'Invoice', 'BillingGroup',
'BillingGroupBreakdown', 'User', 'Endpoints', 'Project', 'Task', 'App',
'Member', 'Permissions', 'File', 'Export', 'Import', 'Volume', 'Marker',
'Division', 'Team', 'TeamMember', 'Dataset', 'BulkRecord',
# Enums
'AppCopyStrategy', 'AppRawFormat', 'AppCopyStrategy',
'AsyncFileOperations', 'AsyncJobStates', 'AutomationRunActions',
'DivisionRole', 'FileStorageType', 'ImportExportState', 'TaskStatus',
'TransferState', 'VolumeAccessMode', 'VolumeType',
# Errors
'SbgError', 'ResourceNotModified', 'ReadOnlyPropertyError',
'ValidationError', 'TaskValidationError', 'PaginationError', 'BadRequest',
'Unauthorized', 'Forbidden', 'NotFound', 'Conflict', 'TooManyRequests',
'ServerError', 'ServiceUnavailable', 'MethodNotAllowed', 'RequestTimeout',
'LocalFileAlreadyExists', 'ExecutionDetailsInvalidTaskType'
]
required_ssl_version = (1, 0, 1)
if ssl.OPENSSL_VERSION_INFO < required_ssl_version:
raise SbgError(
'OpenSSL version included in this python version must be '
'at least 1.0.1 or greater. Please update your environment build.'
)
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Data operations, will be used in train.py and eval.py
"""
import mindspore.common.dtype as mstype
import mindspore.dataset as ds
import mindspore.dataset.transforms.c_transforms as C2
import mindspore.dataset.vision.c_transforms as C
def create_dataset(dataset_path, do_train, rank, group_size,
num_parallel_workers=8, batch_size=128,
drop_remainder=True, shuffle=True,
cutout=False, cutout_length=56):
"""
create a train or eval dataset
Args:
dataset_path(string): the path of dataset.
do_train(bool): whether dataset is used for train or eval.
rank(int): The shard ID within num_shards (default=None).
group_size(int): Number of shards that the dataset should be divided into (default=None).
num_parallel_workers(int): the number of parallel workers (Default:8).
batch_size(int): the batch size for dataset (Default:128).
drop_remainder(bool): whether to drop the remainder in dataset (Default:True).
shuffle(bool): whether to shuffle the dataset (Default:True).
cutout(bool): whether to cutout the data during trainning (Default:False).
cutout_length(int): the length to cutout data when cutout is True (Default:56).
Returns:
dataset
"""
if group_size == 1 or not do_train:
data_set = ds.ImageFolderDataset(dataset_path, num_parallel_workers=num_parallel_workers,
shuffle=shuffle)
print(dataset_path)
else:
data_set = ds.ImageFolderDataset(dataset_path, num_parallel_workers=num_parallel_workers,
shuffle=shuffle,
num_shards=group_size, shard_id=rank)
print(dataset_path, ' group_size = ', group_size, ' rank = ', rank)
# define transform operations
if do_train:
trans = [
C.RandomCropDecodeResize(224),
]
if cutout:
trans += [C.CutOut(length=cutout_length, num_patches=1)]
trans += [
C.RandomHorizontalFlip(prob=0.5),
]
trans += [C.RandomColorAdjust(brightness=0.4, contrast=0.4, saturation=0.4)]
else:
trans = [
C.Decode(),
C.Resize(256),
C.CenterCrop(224)
]
trans += [
C.Rescale(1.0 / 255.0, 0.0),
C.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
C.HWC2CHW(),
C2.TypeCast(mstype.float32)
]
type_cast_op = C2.TypeCast(mstype.int32)
data_set = data_set.map(operations=trans, input_columns="image",
num_parallel_workers=num_parallel_workers)
data_set = data_set.map(operations=type_cast_op, input_columns="label",
num_parallel_workers=num_parallel_workers)
# apply batch operations
data_set = data_set.batch(batch_size, drop_remainder=drop_remainder)
return data_set
|
from flask_wtf import FlaskForm
from wtforms import SubmitField, StringField
from wtforms.validators import DataRequired
class PostSearchForm(FlaskForm):
text = StringField("Search")
submit = SubmitField('Search')
|
def get_sql(cte, params, company, conn, ledger_id, source_codes):
common = f"""
(
SELECT b.source_code, SUM(a.tran_day) AS tran_day
FROM {company}.ar_totals a
JOIN {company}.gl_source_codes b ON b.row_id = a.source_code_id
WHERE a.deleted_id = 0 AND a.tran_date = dates.date
AND a.ledger_row_id = {conn.constants.param_style}
AND b.source_code IN ({', '.join([conn.constants.param_style]*len(source_codes))})
GROUP BY b.source_code -- to aggregate locations/functions
) as day
"""
if conn.constants.servertype == 'sqlite3':
sql = cte + f"""
SELECT
dates.date AS "[DATE]",
{', '.join(f'''
COALESCE((SELECT SUM(CASE WHEN day.source_code = '{col_name}' THEN
COALESCE(day.tran_day, 0)
ELSE 0 END) AS "{col_name} [REAL2]" FROM {common}), 0)
''' for col_name in source_codes) }
FROM dates
ORDER BY dates.date
"""
params += (ledger_id, *source_codes) * len(source_codes)
elif conn.constants.servertype == 'pgsql':
sql = cte + f"""
SELECT
dates.date AS "[DATE]",
{', '.join(f'COALESCE(a.{col_name}, 0) AS "[REAL2]"' for col_name in source_codes)}
FROM dates
JOIN LATERAL
(SELECT
{', '.join(f'''
SUM(CASE WHEN day.source_code = '{col_name}' THEN
COALESCE(day.tran_day, 0)
ELSE 0 END) AS {col_name}
''' for col_name in source_codes) }
FROM {common}
) AS a
ON true
ORDER BY dates.date
"""
params += (ledger_id, *source_codes)
elif conn.constants.servertype == 'mssql':
sql = cte + f"""
SELECT
dates.date AS "[DATE]",
{', '.join(f'COALESCE(a.{col_name}, 0) AS "[REAL2]"' for col_name in source_codes)}
FROM dates
CROSS APPLY
(SELECT
{', '.join(f'''
SUM(CASE WHEN day.source_code = '{col_name}' THEN
COALESCE(day.tran_day, 0)
ELSE 0 END) AS {col_name}
''' for col_name in source_codes) }
FROM {common}
) AS a
ORDER BY dates.date
"""
params += (ledger_id, *source_codes)
fmt = '{:%d-%m} : {:>12.2f}{:>12.2f}'
return sql, params, fmt
|
"""
setup.py for auth-service.
For reference see
https://packaging.python.org/guides/distributing-packages-using-setuptools/
"""
from pathlib import Path
from setuptools import setup, find_packages
HERE = Path(__file__).parent.absolute()
with (HERE / 'README.md').open('rt') as fh:
LONG_DESCRIPTION = fh.read().strip()
REQUIREMENTS: dict = {
'core': [
'aiohttp',
'asyncio_extras; python_version<"3.7"',
'python-keycloak-client',
'tornado',
],
'test': [
],
'dev': [
],
}
setup(
name='auth-service',
version="0.0.1",
author='Phil Elson',
author_email='philip.elson@cern.ch',
description='A simple auth service that can be interrogated by NGINX',
packages=find_packages(),
python_requires='~=3.6',
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
install_requires=REQUIREMENTS['core'],
extras_require={
**REQUIREMENTS,
# The 'dev' extra is the union of 'test' and 'doc', with an option
# to have explicit development dependencies listed.
'dev': [req
for extra in ['dev', 'test', 'doc']
for req in REQUIREMENTS.get(extra, [])],
# The 'all' extra is the union of all requirements.
'all': [req for reqs in REQUIREMENTS.values() for req in reqs],
},
)
|
# Copyright 2020 Miljenko Šuflaj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
# PATHS ARE RELATIVE TO THE PROJECT ROOT
#
# This doesn't bother us since we'll change our directory to the project
# root anyways.
EMBEDDINGS_PATH = "data/embeddings_300-d.txt"
EMBEDDINGS_MATRIX_PATH = "data/embedding-matrix.npy"
TRAIN_CSV_PATH = "data/train.csv"
VAL_CSV_PATH = "data/val.csv"
TEST_CSV_PATH = "data/test.csv"
DEFAULT_SAVE_TASK2 = "results/task_2"
DEFAULT_SAVE_TASK3 = "results/task_3"
DEFAULT_SAVE_TASK4 = "results/task_4"
DEFAULT_SAVE_TASK4_1 = os.path.join(DEFAULT_SAVE_TASK4, "part-1")
DEFAULT_SAVE_TASK4_2 = os.path.join(DEFAULT_SAVE_TASK4, "part-2")
|
#!/usr/bin/env python3
"""
Telegram message listener that launches commands
Usage::
./mthook.py api_id api_hash session "chat_id,command,regex" "chat_id,command,regex" ...
"""
import re
import requests
import subprocess
import sys
from datetime import datetime
from pyrogram import Client, filters
def handle(fs, message):
id = ''
if hasattr(message, 'chat') and hasattr(message.chat, 'username'):
id = message.chat.username
elif hasattr(message, 'chat') and hasattr(message.chat, 'id'):
id = message.chat.id
else:
return
if not id in fs:
return
filter = fs[id]
match = filter['regex'].search(message.text)
if match is None:
return
hook = filter['hook']
if hook.startswith('http'):
args = []
for i in range (len(match.groups())):
args.append(match.group(i+1))
url = filter['hook']+','.join(args)
print(datetime.now().time(), url)
requests.get(url)
else:
args = [filter['hook']]
for i in range (len(match.groups())):
args.append(match.group(i+1))
print(datetime.now().time(),' '.join(args))
subprocess.run(args)
if len(sys.argv) == 2 and sys.argv[1] == 'version':
print('1.2.9')
elif len(sys.argv) < 5:
print('Usage mthook.py api_id api_hash session "chat_id,command,regex" "chat_id,command,regex" ...')
else:
# values from my.telegram.org
api_id = sys.argv[1]
api_hash = sys.argv[2]
app = Client(sys.argv[3], api_id, api_hash)
print(datetime.now().time(), 'mthook started')
fs = {}
for arg in sys.argv[4:]:
vals = arg.split(',')
reg = re.compile(vals[2])
fs[vals[0]] = {'hook': vals[1], 'regex': reg}
@app.on_message(filters.text)
def onMessage(client, message):
handle(fs, message)
app.run()
|
from django.conf.urls import url, include
from django.contrib import admin
from liq_ytdl import views as liq_ytdl_views
from liq_ffmpeg import views as liq_ffmpeg_views
from liq_scdl import views as liq_scdl_views
from liq_wget import views as liq_wget_views
from liquid import views as liquid_views
"""
ALL URLS CONVENE HERE
"""
urlpatterns = [
url(r'^login', liquid_views.login_user, name='login'),
url(r'^signup', liquid_views.signup, name='initialize_user'),
url(r'^webtorrent', liquid_views.webtor, name='webtor'),
url(r'^ffmpeg$', liq_ffmpeg_views.FFMPEG_submit, name='ffmpeg'),
# url(r'^soundcloud', liquid_views., name='ffmpeg'),
url(r'^ffmpeg-submit', liq_ffmpeg_views.FFMPEG_submit, name='ffmpeg_submit'),
url(r'^soundcloud-submit', liq_scdl_views.soundcloud_submit, name='soundcloud_submit'),
url(r'^youtubedl-submit', liq_ytdl_views.youtube_dl_submit, name='youtube_dl_submit'),
url(r'^wget-submit', liq_wget_views.wget_submit, name='wget_submit'),
url(r'youtubedl/', include('liq_ytdl.urls')),
url(r'^cloudcmd', include('liq_cloud_cmd.urls')),
url(r'^download-manager/', include('liq_dl_manager.urls')),
# Settings URLs
url(r'^settings/get-settings', liquid_views.get_liquid_dl_settings,
name='add_subscription'),
url(r'^settings/youtubedl/save', liq_ytdl_views.update_youtube_dl_settings,
name='add_subscription'),
url(r'^settings/liquid-dl/save', liquid_views.update_liquid_dl_settings,
name='add_subscription'),
url(r'^settings/liquid-dl/update-dependencies', liquid_views.update_dependencies,
name='add_subscription'),
url(r'^settings/liquid-dl/api-key-reset', liquid_views.api_key_rejection,
name='add_subscription'),
url(r'^settings/liquid-dl/update-default-directory', liquid_views.update_default_directory,
name='update_default_directory'),
# Torrent URLS
url(r'^torrents/torrent/receive/torrents', liquid_views.update_torrent,
name='add_subscription'),
url(r'^torrents/torrent/get/torrents', liquid_views.update_torrent,
name='add_subscription'),
url(r'^torrents/torrent/get/torrents', liquid_views.update_torrent,
name='add_subscription'),
]
|
def application(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html')])
return ["<h1 style='color:blue'>Hello There!</h1>"]
|
# -*- coding: utf-8 -*-
{
'\n\nThank you!': '\n\nThank you!',
'\n\nWe will wait and let you know when your payment is confirmed.': '\n\nWe will wait and let you know when your payment is confirmed.',
'\n- %s from %s to %s': '\n- %s from %s to %s',
'\nAmount: R$%.2f': '\nAmount: R$%.2f',
"\nSomething happened and we couldn't verify your payment.\n": "\nSomething happened and we couldn't verify your payment.\n",
'\nThank you for your purchase!': '\nThank you for your purchase!',
'\nThank you!': '\nThank you!',
'\nThank you.': '\nThank you.',
'\nThe total amount was R$%.2f.': '\nThe total amount was R$%.2f.',
'\nWe will wait and let you know when your payment is confirmed.\n': '\nWe will wait and let you know when your payment is confirmed.\n',
'\nYou can check your payment history after login in to your profile.': '\nYou can check your payment history after login in to your profile.',
'!langcode!': 'cs-cz',
'!langname!': 'čeština',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': 'Kolonka "Upravit" je nepovinný výraz, například "pole1=\'nováhodnota\'". Výsledky databázového JOINu nemůžete mazat ani upravovat.',
'"User Exception" debug mode. An error ticket could be issued!': '"User Exception" debug mode. An error ticket could be issued!',
'%%{Row} in Table': '%%{řádek} v tabulce',
'%%{Row} selected': 'označených %%{řádek}',
'%(month)s %(day)sth': '%(month)s %(day)sth',
'%02d/%02d': '%02d/%02d',
'%B %d, %Y': '%B %d, %Y',
'%d%% OFF': '%d%% OFF',
'%d/%d': '%d/%d',
'%m-%d-%Y': '%m-%d-%Y',
'%s %%{row} deleted': '%s smazaných %%{záznam}',
'%s %%{row} updated': '%s upravených %%{záznam}',
'%s %dth': '%s %dth',
'%s Certificate': '%s Certificate',
'%s of %s': '%s of %s',
'%s selected': '%s označených',
'%Y-%m-%d': '%d.%m.%Y',
'%Y-%m-%d %H:%M:%S': '%d.%m.%Y %H:%M:%S',
'(requires internet access)': '(vyžaduje připojení k internetu)',
'(requires internet access, experimental)': '(requires internet access, experimental)',
'(something like "it-it")': '(například "cs-cs")',
'- %s from %s to %s': '- %s from %s to %s',
'- %s from %s to %s\n': '- %s from %s to %s\n',
'?': '?',
'@markmin\x01(file **gluon/contrib/plural_rules/%s.py** is not found)': '(soubor **gluon/contrib/plural_rules/%s.py** nenalezen)',
'@markmin\x01Searching: **%s** %%{file}': 'Hledání: **%s** %%{soubor}',
'About': 'O programu',
'About application': 'O aplikaci',
'Access': 'Access',
'Access Control': 'Řízení přístupu',
'Access the /appadmin to make at least one teacher user:': 'Access the /appadmin to make at least one teacher user:',
'Actions': 'Actions',
'Add breakpoint': 'Přidat bod přerušení',
'Add more': 'Add more',
'Additional code for your application': 'Další kód pro Vaši aplikaci',
'Admin design page': 'Admin design page',
'Admin language': 'jazyk rozhraní',
'Administrative interface': 'pro administrátorské rozhraní klikněte sem',
'Administrative Interface': 'Administrátorské rozhraní',
'administrative interface': 'rozhraní pro správu',
'Administrator Password:': 'Administrátorské heslo:',
'Ajax Recipes': 'Recepty s ajaxem',
'All certificates sent!': 'All certificates sent!',
'All Classes': 'All Classes',
'Alternative A': 'Alternative A',
'Alternative B': 'Alternative B',
'Alternative C': 'Alternative C',
'Alternative D': 'Alternative D',
'Amount': 'Amount',
'Amount: R$%.2f': 'Amount: R$%.2f',
'Amount: R$%.2f\n': 'Amount: R$%.2f\n',
'An error occured, please %s the page': 'An error occured, please %s the page',
'and enroll!': 'and enroll!',
'and go to': 'and go to',
'and rename it:': 'a přejmenovat na:',
'Announcements': 'Announcements',
'appadmin': 'appadmin',
'appadmin is disabled because insecure channel': 'appadmin je zakázaná bez zabezpečeného spojení',
'Application': 'Application',
'application "%s" uninstalled': 'application "%s" odinstalována',
'application compiled': 'aplikace zkompilována',
'Application name:': 'Název aplikace:',
'are not used': 'nepoužita',
'are not used yet': 'ještě nepoužita',
'Are you sure you want to delete this object?': 'Opravdu chcete odstranit tento objekt?',
'Are you sure you want to uninstall application "%s"?': 'Opravdu chcete odinstalovat aplikaci "%s"?',
'arguments': 'arguments',
'at char %s': 'at char %s',
'at line %s': 'at line %s',
'ATTENTION:': 'ATTENTION:',
'ATTENTION: TESTING IS NOT THREAD SAFE SO DO NOT PERFORM MULTIPLE TESTS CONCURRENTLY.': 'ATTENTION: TESTING IS NOT THREAD SAFE SO DO NOT PERFORM MULTIPLE TESTS CONCURRENTLY.',
'Available Databases and Tables': 'Dostupné databáze a tabulky',
'Available Until': 'Available Until',
'back': 'zpět',
'Back to wizard': 'Back to wizard',
'Banner': 'Banner',
'Basics': 'Basics',
'Begin': 'Začít',
'Body': 'Body',
'breakpoint': 'bod přerušení',
'Breakpoints': 'Body přerušení',
'breakpoints': 'body přerušení',
'Buy Now': 'Buy Now',
'Buy this book': 'Koupit web2py knihu',
'Cache': 'Cache',
'cache': 'cache',
'Cache Cleared': 'Cache Cleared',
'Cache Keys': 'Klíče cache',
'cache, errors and sessions cleaned': 'cache, chyby a relace byly pročištěny',
'Calendar': 'Calendar',
'can be a git repo': 'může to být git repo',
'Cancel': 'Storno',
'Cannot be empty': 'Nemůže být prázdné',
'Certificates': 'Certificates',
'Change admin password': 'Změnit heslo pro správu aplikací',
'Change Admin Password': 'Změnit heslo pro správu',
'Change password': 'Změna hesla',
'check all': 'vše označit',
'Check for upgrades': 'Zkusit aktualizovat',
'Check to delete': 'Označit ke smazání',
'Check to delete:': 'Označit ke smazání:',
'Checking for upgrades...': 'Zjišťuji, zda jsou k dispozici aktualizace...',
'Class %s': 'Class %s',
'Class Id': 'Class Id',
'Classes': 'Classes',
'Clean': 'Pročistit',
'Clear CACHE?': 'Vymazat CACHE?',
'Clear DISK': 'Vymazat DISK',
'Clear RAM': 'Vymazat RAM',
'Click row to expand traceback': 'Pro rozbalení stopy, klikněte na řádek',
'Click row to view a ticket': 'Pro zobrazení chyby (ticketu), klikněte na řádku...',
'Client IP': 'IP adresa klienta',
'Closed': 'Closed',
'code': 'code',
'Code listing': 'Code listing',
'collapse/expand all': 'vše sbalit/rozbalit',
'Community': 'Komunita',
'Compile': 'Zkompilovat',
'compiled application removed': 'zkompilovaná aplikace smazána',
'Components and Plugins': 'Komponenty a zásuvné moduly',
'Condition': 'Podmínka',
'Confirmation Time': 'Confirmation Time',
'Confirmed': 'Confirmed',
'Contact': 'Contact',
'continue': 'continue',
'Continue Shopping': 'Continue Shopping',
'Controller': 'Kontrolér (Controller)',
'Controllers': 'Kontroléry',
'controllers': 'kontroléry',
'Copyright': 'Copyright',
'Correct Alternative': 'Correct Alternative',
'Count': 'Počet',
'Course': 'Course',
'Course Announcements': 'Course Announcements',
'Course Id': 'Course Id',
"Course's end": "Course's end",
"Course's start": "Course's start",
'Courses': 'Courses',
'Create': 'Vytvořit',
'create file with filename:': 'vytvořit soubor s názvem:',
'created by': 'vytvořil',
'Created By': 'Vytvořeno - kým',
'Created On': 'Vytvořeno - kdy',
'crontab': 'crontab',
'Current request': 'Aktuální požadavek',
'Current response': 'Aktuální odpověď',
'Current session': 'Aktuální relace',
'currently running': 'právě běží',
'currently saved or': 'uloženo nebo',
'customize me!': 'upravte mě!',
'DASHBOARD': 'DASHBOARD',
'Dashboard': 'Dashboard',
'data uploaded': 'data nahrána',
'Database': 'Rozhraní databáze',
'Database %s select': 'databáze %s výběr',
'Database administration': 'Database administration',
'database administration': 'správa databáze',
'Database Administration (appadmin)': 'Database Administration (appadmin)',
'Date': 'Date',
'Date and Time': 'Datum a čas',
'day': 'den',
'db': 'db',
'DB Model': 'Databázový model',
'Debug': 'Ladění',
'defines tables': 'defines tables',
'Delete': 'Smazat',
'delete': 'smazat',
'delete all checked': 'smazat vše označené',
'delete plugin': 'delete plugin',
'Delete this file (you will be asked to confirm deletion)': 'Smazat tento soubor (budete požádán o potvrzení mazání)',
'Delete:': 'Smazat:',
'deleted after first hit': 'smazat po prvním dosažení',
'Demo': 'Demo',
'Denied': 'Denied',
'Deploy': 'Nahrát',
'Deploy on Google App Engine': 'Nahrát na Google App Engine',
'Deploy to OpenShift': 'Nahrát na OpenShift',
'Deployment Recipes': 'Postupy pro deployment',
'Description': 'Popis',
'design': 'návrh',
'Detailed traceback description': 'Podrobný výpis prostředí',
'Details': 'Details',
'details': 'podrobnosti',
'direction: ltr': 'směr: ltr',
'Disable': 'Zablokovat',
'Discount': 'Discount',
'DISK': 'DISK',
'Disk Cache Keys': 'Klíče diskové cache',
'Disk Cleared': 'Disk smazán',
'docs': 'dokumentace',
'Documentation': 'Dokumentace',
"Don't know what to do?": 'Nevíte kudy kam?',
'done!': 'hotovo!',
'Download': 'Stáhnout',
'download layouts': 'stáhnout moduly rozvržení stránky',
'download plugins': 'stáhnout zásuvné moduly',
'E-mail': 'E-mail',
'Edit': 'Upravit',
'edit all': 'edit all',
'Edit application': 'Správa aplikace',
'edit controller': 'edit controller',
'Edit current record': 'Upravit aktuální záznam',
'Edit Profile': 'Upravit profil',
'edit views:': 'upravit pohled:',
'Editing file "%s"': 'Úprava souboru "%s"',
'Editing Language file': 'Úprava jazykového souboru',
'Editing Plural Forms File': 'Editing Plural Forms File',
'Email and SMS': 'Email a SMS',
'Enable': 'Odblokovat',
'End': 'End',
'End date': 'End date',
'End Date': 'End Date',
'Enroll now!': 'Enroll now!',
'enter a number between %(min)g and %(max)g': 'zadejte číslo mezi %(min)g a %(max)g',
'enter an integer between %(min)g and %(max)g': 'zadejte celé číslo mezi %(min)g a %(max)g',
'Enter the auth_membership table and associate your new user to the "Teacher" group': 'Enter the auth_membership table and associate your new user to the "Teacher" group',
'Enter the auth_user table and create a new record': 'Enter the auth_user table and create a new record',
'Enter with your teacher user and create your course, classes and lessons': 'Enter with your teacher user and create your course, classes and lessons',
'Error': 'Chyba',
'Error logs for "%(app)s"': 'Seznam výskytu chyb pro aplikaci "%(app)s"',
'Error snapshot': 'Snapshot chyby',
'Error ticket': 'Ticket chyby',
'Errors': 'Chyby',
'Erros no formulário!': 'Erros no formulário!',
'Exception %(extype)s: %(exvalue)s': 'Exception %(extype)s: %(exvalue)s',
'Exception %s': 'Exception %s',
'Exception instance attributes': 'Prvky instance výjimky',
'Expand Abbreviation': 'Expand Abbreviation',
'export as csv file': 'exportovat do .csv souboru',
'exposes': 'vystavuje',
'exposes:': 'vystavuje funkce:',
'extends': 'rozšiřuje',
'failed to compile file because:': 'soubor se nepodařilo zkompilovat, protože:',
'FAQ': 'Často kladené dotazy',
'File': 'Soubor',
'file': 'soubor',
'file "%(filename)s" created': 'file "%(filename)s" created',
'file saved on %(time)s': 'soubor uložen %(time)s',
'file saved on %s': 'soubor uložen %s',
'Filename': 'Název souboru',
'filter': 'filtr',
'Find Next': 'Najít další',
'Find Previous': 'Najít předchozí',
'First name': 'Křestní jméno',
'First, import a template and signature!': 'First, import a template and signature!',
'Forgot username?': 'Zapomněl jste svoje přihlašovací jméno?',
'forgot username?': 'zapomněl jste svoje přihlašovací jméno?',
'Form has errors!': 'Form has errors!',
'Forms and Validators': 'Formuláře a validátory',
'Forum': 'Forum',
'Frames': 'Frames',
'FREE': 'FREE',
'Free Applications': 'Aplikace zdarma',
'from %s to %s': 'from %s to %s',
'FULL!': 'FULL!',
'Functions with no doctests will result in [passed] tests.': 'Functions with no doctests will result in [passed] tests.',
'Generate': 'Vytvořit',
'Generate Certificate': 'Generate Certificate',
'Get from URL:': 'Stáhnout z internetu:',
'Git Pull': 'Git Pull',
'Git Push': 'Git Push',
'Globals##debug': 'Globální proměnné',
'go!': 'OK!',
'Goto': 'Goto',
'graph model': 'graph model',
'Graph Model': 'Graph Model',
'Group %(group_id)s created': 'Skupina %(group_id)s vytvořena',
'Group ID': 'ID skupiny',
'Groups': 'Skupiny',
'has satisfactorily completed the course': 'has satisfactorily completed the course',
'Hello World': 'Ahoj světe',
'Help': 'Nápověda',
'Hide/Show Translated strings': 'Skrýt/Zobrazit přeložené texty',
'Hits': 'Kolikrát dosaženo',
'Home': 'Domovská stránka',
'honored only if the expression evaluates to true': 'brát v potaz jen když se tato podmínka vyhodnotí kladně',
'hours': 'hours',
'How did you get here?': 'Jak jste se sem vlastně dostal?',
'Icon': 'Icon',
'If start the upgrade, be patient, it may take a while to download': 'If start the upgrade, be patient, it may take a while to download',
'If the report above contains a ticket number it indicates a failure in executing the controller, before any attempt to execute the doctests. This is usually due to an indentation error or an error outside function code.\nA green title indicates that all tests (if defined) passed. In this case test results are not shown.': 'If the report above contains a ticket number it indicates a failure in executing the controller, before any attempt to execute the doctests. This is usually due to an indentation error or an error outside function code.\nA green title indicates that all tests (if defined) passed. In this case test results are not shown.',
'If you want to test, just': 'If you want to test, just',
"If you're sure you paid the order, please contact us. Otherwise, try to pay again later.": "If you're sure you paid the order, please contact us. Otherwise, try to pay again later.",
"If you're sure you paid the order, please contact us. Otherwise, try to pay again later.\n": "If you're sure you paid the order, please contact us. Otherwise, try to pay again later.\n",
'import': 'import',
'Import/Export': 'Import/Export',
'in a total of %d hours.': 'in a total of %d hours.',
'In Progress': 'In Progress',
'includes': 'zahrnuje',
'Index': 'Index',
'insert new': 'vložit nový záznam ',
'insert new %s': 'vložit nový záznam %s',
'inspect attributes': 'inspect attributes',
'Install': 'Instalovat',
'Installed applications': 'Nainstalované aplikace',
'Interaction at %s line %s': 'Interakce v %s, na řádce %s',
'Interactive console': 'Interaktivní příkazová řádka',
'Interested? Submit your email below to be notified for the next open class.': 'Interested? Submit your email below to be notified for the next open class.',
'Interests': 'Interests',
'Internal State': 'Vnitřní stav',
'Introduction': 'Úvod',
'Invalid email': 'Neplatný email',
'Invalid password': 'Nesprávné heslo',
'invalid password.': 'neplatné heslo',
'Invalid Query': 'Neplatný dotaz',
'invalid request': 'Neplatný požadavek',
'Is Active': 'Je aktivní',
'It is %s %%{day} today.': 'Dnes je to %s %%{den}.',
'Key': 'Klíč',
'Key bindings': 'Vazby klíčů',
'Key bindings for ZenCoding Plugin': 'Key bindings for ZenCoding Plugin',
'languages': 'jazyky',
'Languages': 'Jazyky',
'Last name': 'Příjmení',
'Last saved on:': 'Naposledy uloženo:',
'Layout': 'Rozvržení stránky (layout)',
'Layout Plugins': 'Moduly rozvržení stránky (Layout Plugins)',
'Layouts': 'Rozvržení stránek',
'Lesson': 'Lesson',
'Lesson Id': 'Lesson Id',
'Lesson scheduled for:': 'Lesson scheduled for:',
'Lesson Type': 'Lesson Type',
'License for': 'Licence pro',
'Limit date:': 'Limit date:',
'limited to': 'limited to',
'Line number': 'Číslo řádku',
'LineNo': 'Č.řádku',
'Live Chat': 'Online pokec',
'loading...': 'nahrávám...',
'locals': 'locals',
'Locals##debug': 'Lokální proměnné',
'Log In': 'Log In',
'Logged in': 'Přihlášení proběhlo úspěšně',
'Logged out': 'Odhlášení proběhlo úspěšně',
'login': 'přihlásit se',
'Login': 'Přihlásit se',
'Login to the Administrative Interface': 'Přihlásit se do Správce aplikací',
'logout': 'odhlásit se',
'Logout': 'Odhlásit se',
'Lost Password': 'Zapomněl jste heslo',
'Lost password?': 'Zapomněl jste heslo?',
'lost password?': 'zapomněl jste heslo?',
'Manage': 'Manage',
'Manage %(action)s': 'Manage %(action)s',
'Manage Access Control': 'Manage Access Control',
'Manage Cache': 'Manage Cache',
'Manage courses': 'Manage courses',
'Max Students': 'Max Students',
'Max. Students': 'Max. Students',
'Memberships': 'Memberships',
'Menu Model': 'Model rozbalovací nabídky',
'Models': 'Modely',
'models': 'modely',
'Modified By': 'Změněno - kým',
'Modified On': 'Změněno - kdy',
'Module': 'Module',
'Modules': 'Moduly',
'modules': 'moduly',
'My Calendar': 'My Calendar',
'My Certificates': 'My Certificates',
'My Courses': 'My Courses',
'My courses': 'My courses',
'My Sites': 'Správa aplikací',
'My Work': 'My Work',
'Name': 'Jméno',
'New': 'New',
'New announcement': 'New announcement',
'New announcement on %s class': 'New announcement on %s class',
'new application "%s" created': 'nová aplikace "%s" vytvořena',
'New application wizard': 'Nový průvodce aplikací',
'New Application Wizard': 'Nový průvodce aplikací',
'New Class': 'New Class',
'New Course': 'New Course',
'New lesson': 'New lesson',
'New module': 'New module',
'New password': 'Nové heslo',
'New Record': 'Nový záznam',
'new record inserted': 'nový záznam byl založen',
'New simple application': 'Vytvořit primitivní aplikaci',
'New Topic': 'New Topic',
'New topic': 'New topic',
'next': 'next',
'next %s rows': 'next %s rows',
'next 100 rows': 'dalších 100 řádků',
'No announcements yet!': 'No announcements yet!',
'No databases in this application': 'V této aplikaci nejsou žádné databáze',
'No Interaction yet': 'Ještě žádná interakce nenastala',
'No ticket_storage.txt found under /private folder': 'Soubor ticket_storage.txt v adresáři /private nenalezen',
"Now, you won't be able to see the lessons anymore. But the forum, announcements and other resources are still available.": "Now, you won't be able to see the lessons anymore. But the forum, announcements and other resources are still available.",
'Object or table name': 'Objekt či tabulka',
'Old password': 'Původní heslo',
'online designer': 'online návrhář',
'Online examples': 'Příklady online',
'Open classes': 'Open classes',
'Open Enrollment': 'Open Enrollment',
'Open new app in new window': 'Open new app in new window',
'or alternatively': 'or alternatively',
'Or Get from URL:': 'Or Get from URL:',
'or import from csv file': 'nebo importovat z .csv souboru',
'Order': 'Order',
'Order Date': 'Order Date',
'Order date': 'Order date',
'Order details': 'Order details',
'Order Id': 'Order Id',
'Order Nº': 'Order Nº',
'Origin': 'Původ',
'Original/Translation': 'Originál/Překlad',
'Other Plugins': 'Ostatní moduly',
'Other Recipes': 'Ostatní zásuvné moduly',
'Overview': 'Přehled',
'Overwrite installed app': 'Přepsat instalovanou aplikaci',
'Owner': 'Owner',
'Pack all': 'Zabalit',
'Pack compiled': 'Zabalit zkompilované',
'pack plugin': 'pack plugin',
'Password': 'Heslo',
'password': 'heslo',
"Password fields don't match": 'Hesla se neshodují',
'Payment completed! Congratulations for your purchase!': 'Payment completed! Congratulations for your purchase!',
'Payment confirmed!': 'Payment confirmed!',
'Payment History': 'Payment History',
'Peeking at file': 'Peeking at file',
'Pending': 'Pending',
'Pending Id': 'Pending Id',
'Permission': 'Permission',
'Permissions': 'Permissions',
'Place': 'Place',
'Please': 'Prosím',
'Please, select which type of lesson you want to create.': 'Please, select which type of lesson you want to create.',
'Plugin "%s" in application': 'Plugin "%s" in application',
'plugins': 'zásuvné moduly',
'Plugins': 'Zásuvné moduly',
'Plural Form #%s': 'Plural Form #%s',
'Plural-Forms:': 'Množná čísla:',
'Post': 'Post',
'Powered by': 'Poháněno',
'Preface': 'Předmluva',
'Preview': 'Preview',
'previous %s rows': 'previous %s rows',
'previous 100 rows': 'předchozích 100 řádků',
'Price': 'Price',
'Private files': 'Soukromé soubory',
'private files': 'soukromé soubory',
'Products': 'Products',
'Professor': 'Professor',
'profile': 'profil',
'Project Progress': 'Vývoj projektu',
'pygraphviz library not found': 'pygraphviz library not found',
'Python': 'Python',
'Query:': 'Dotaz:',
'Question': 'Question',
'Quick Examples': 'Krátké příklady',
'RAM': 'RAM',
'RAM Cache Keys': 'Klíče RAM Cache',
'Ram Cleared': 'RAM smazána',
'Readme': 'Nápověda',
'Recipes': 'Postupy jak na to',
'Record': 'Záznam',
'record does not exist': 'záznam neexistuje',
'Record ID': 'ID záznamu',
'Record id': 'id záznamu',
'refresh': 'obnovte',
'register': 'registrovat',
'Register': 'Zaregistrovat se',
'register a normal user': 'register a normal user',
'Registered On': 'Registered On',
'Registration identifier': 'Registrační identifikátor',
'Registration key': 'Registrační klíč',
'Release Date': 'Release Date',
'reload': 'reload',
'Reload routes': 'Znovu nahrát cesty',
'Remember me (for 30 days)': 'Zapamatovat na 30 dní',
'Remove compiled': 'Odstranit zkompilované',
'Removed Breakpoint on %s at line %s': 'Bod přerušení smazán - soubor %s na řádce %s',
'Replace': 'Zaměnit',
'Replace All': 'Zaměnit vše',
'Replies': 'Replies',
'Reply this user': 'Reply this user',
'request': 'request',
'Reset Password key': 'Reset registračního klíče',
'response': 'response',
'restart': 'restart',
'restore': 'obnovit',
'Retrieve username': 'Získat přihlašovací jméno',
'return': 'return',
'revert': 'vrátit se k původnímu',
'Role': 'Role',
'Roles': 'Roles',
'Rows in Table': 'Záznamy v tabulce',
'Rows selected': 'Záznamů zobrazeno',
'rules are not defined': 'pravidla nejsou definována',
"Run tests in this file (to run all files, you may also use the button labelled 'test')": "Spustí testy v tomto souboru (ke spuštění všech testů, použijte tlačítko 'test')",
'Running on %s': 'Běží na %s',
'said': 'said',
'Save': 'Uložit',
'Save file:': 'Save file:',
'Save model as...': 'Save model as...',
'Save via Ajax': 'Uložit pomocí Ajaxu',
'Saved file hash:': 'hash uloženého souboru:',
'Schedule Date': 'Schedule Date',
'Schedule event': 'Schedule event',
'Semantic': 'Modul semantic',
'Send to Students': 'Send to Students',
'Services': 'Služby',
'session': 'session',
'session expired': 'session expired',
'Set Breakpoint on %s at line %s: %s': 'Bod přerušení nastaven v souboru %s na řádce %s: %s',
'Settings': 'Settings',
'shell': 'příkazová řádka',
'Shopping Cart': 'Shopping Cart',
'Short Description': 'Short Description',
'Sign Up': 'Sign Up',
'Signature': 'Signature',
'Singular Form': 'Singular Form',
'Site': 'Správa aplikací',
'Size of cache:': 'Velikost cache:',
'skip to generate': 'skip to generate',
"Something happened and we couldn't verify your payment.": "Something happened and we couldn't verify your payment.",
'Something went wrong!': 'Something went wrong!',
'Sorry! Something bad happened!': 'Sorry! Something bad happened!',
'Sorry, could not find mercurial installed': 'Bohužel mercurial není nainstalován.',
'Start': 'Start',
'Start a new app': 'Vytvořit novou aplikaci',
'Start Date': 'Start Date',
'Start date': 'Start date',
'Start searching': 'Začít hledání',
'Start wizard': 'Spustit průvodce',
'Starting on': 'Starting on',
'state': 'stav',
'Static': 'Static',
'static': 'statické soubory',
'Static files': 'Statické soubory',
'Statistics': 'Statistika',
'Status': 'Status',
'Step': 'Step',
'step': 'step',
'stop': 'stop',
'Student': 'Student',
'students max': 'students max',
'Stylesheet': 'CSS styly',
'submit': 'odeslat',
'Submit': 'Odeslat',
'successful': 'úspěšně',
'Support': 'Podpora',
'Sure you want to delete this object?': 'Opravdu chcete smazat tento objekt?',
'Table': 'tabulka',
'Table name': 'Název tabulky',
'Take a look at our Courses': 'Take a look at our Courses',
'Template': 'Template',
'Temporary': 'Dočasný',
'test': 'test',
'Testing application': 'Testing application',
'Text': 'Text',
'Thank you for your purchase!': 'Thank you for your purchase!',
'Thank you!': 'Thank you!',
'Thank you.': 'Thank you.',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': '"Dotaz" je podmínka, například "db.tabulka1.pole1==\'hodnota\'". Podmínka "db.tabulka1.pole1==db.tabulka2.pole2" pak vytvoří SQL JOIN.',
'The application logic, each URL path is mapped in one exposed function in the controller': 'Logika aplikace: každá URL je mapována na funkci vystavovanou kontrolérem.',
'The Core': 'Jádro (The Core)',
'The data representation, define database tables and sets': 'Reprezentace dat: definovat tabulky databáze a záznamy',
'The following classes have a limit date of conclusion:': 'The following classes have a limit date of conclusion:',
'The output of the file is a dictionary that was rendered by the view %s': 'Výstup ze souboru je slovník, který se zobrazil v pohledu %s.',
'The presentations layer, views are also known as templates': 'Prezentační vrstva: pohledy či templaty (šablony)',
'The total amount was R$%.2f.': 'The total amount was R$%.2f.',
'The total amount was R$%.2f.\n': 'The total amount was R$%.2f.\n',
'The Views': 'Pohledy (The Views)',
'There are no controllers': 'There are no controllers',
'There are no modules': 'There are no modules',
'There are no plugins': 'Žádné moduly nejsou instalovány.',
'There are no private files': 'Žádné soukromé soubory neexistují.',
'There are no static files': 'There are no static files',
'There are no translators, only default language is supported': 'There are no translators, only default language is supported',
'There are no views': 'There are no views',
'There is a new announcement on %s class.': 'There is a new announcement on %s class.',
'There was a problem with this video!': 'There was a problem with this video!',
'There was a problem with your payment!': 'There was a problem with your payment!',
'There was a problem with your payment!\n': 'There was a problem with your payment!\n',
'These files are not served, they are only available from within your app': 'Tyto soubory jsou klientům nepřístupné. K dispozici jsou pouze v rámci aplikace.',
'These files are served without processing, your images go here': 'Tyto soubory jsou servírovány bez přídavné logiky, sem patří např. obrázky.',
'These people are interested in your course %s': 'These people are interested in your course %s',
'This App': 'Tato aplikace',
'This class has a limit date for conclusion.': 'This class has a limit date for conclusion.',
'This class reached the limit date': 'This class reached the limit date',
'This course is already on your shopping cart!': 'This course is already on your shopping cart!',
'This is a copy of the scaffolding application': 'Toto je kopie aplikace skelet.',
'This is an experimental feature and it needs more testing. If you decide to upgrade you do it at your own risk': 'This is an experimental feature and it needs more testing. If you decide to upgrade you do it at your own risk',
'This is the %(filename)s template': 'This is the %(filename)s template',
'This is to certify that': 'This is to certify that',
"This means that, after the limit date, you won't be able to see the lessons anymore. Forum, announcements and other resources will still be available.": "This means that, after the limit date, you won't be able to see the lessons anymore. Forum, announcements and other resources will still be available.",
'this page to see if a breakpoint was hit and debug interaction is required.': 'tuto stránku, abyste uviděli, zda se dosáhlo bodu přerušení.',
'Ticket': 'Ticket',
'Ticket ID': 'Ticket ID',
'Time in Cache (h:m:s)': 'Čas v Cache (h:m:s)',
'Timestamp': 'Časové razítko',
'Title': 'Title',
'to previous version.': 'k předchozí verzi.',
'To create a plugin, name a file/folder plugin_[name]': 'Zásuvný modul vytvoříte tak, že pojmenujete soubor/adresář plugin_[jméno modulu]',
'To emulate a breakpoint programatically, write:': 'K nastavení bodu přerušení v kódu programu, napište:',
'to finish your payment.': 'to finish your payment.',
'to use the debugger!': ', abyste mohli ladící program používat!',
'toggle breakpoint': 'vyp./zap. bod přerušení',
'Toggle Fullscreen': 'Na celou obrazovku a zpět',
'Token': 'Token',
'too short': 'Příliš krátké',
'Total': 'Total',
'Total Hours': 'Total Hours',
'Traceback': 'Traceback',
'Translation strings for the application': 'Překlad textů pro aplikaci',
'try something like': 'try something like',
'Try the mobile interface': 'Zkuste rozhraní pro mobilní zařízení',
'try view': 'try view',
'Twitter': 'Twitter',
'Type python statement in here and hit Return (Enter) to execute it.': 'Type python statement in here and hit Return (Enter) to execute it.',
'Type some Python code in here and hit Return (Enter) to execute it.': 'Type some Python code in here and hit Return (Enter) to execute it.',
'Unable to check for upgrades': 'Unable to check for upgrades',
'unable to parse csv file': 'csv soubor nedá sa zpracovat',
'uncheck all': 'vše odznačit',
'Uninstall': 'Odinstalovat',
'update': 'aktualizovat',
'update all languages': 'aktualizovat všechny jazyky',
'Update:': 'Upravit:',
'Upgrade': 'Upgrade',
'upgrade now': 'upgrade now',
'upgrade now to %s': 'upgrade now to %s',
'upload': 'nahrát',
'Upload': 'Upload',
'Upload a package:': 'Nahrát balík:',
'Upload and install packed application': 'Nahrát a instalovat zabalenou aplikaci',
'upload file:': 'nahrát soubor:',
'upload plugin file:': 'nahrát soubor modulu:',
'Upload Video': 'Upload Video',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Použijte (...)&(...) pro AND, (...)|(...) pro OR a ~(...) pro NOT pro sestavení složitějších dotazů.',
'User': 'User',
'User %(id)s Logged-in': 'Uživatel %(id)s přihlášen',
'User %(id)s Logged-out': 'Uživatel %(id)s odhlášen',
'User %(id)s Password changed': 'Uživatel %(id)s změnil heslo',
'User %(id)s Profile updated': 'Uživatel %(id)s upravil profil',
'User %(id)s Registered': 'Uživatel %(id)s se zaregistroval',
'User %(id)s Username retrieved': 'Uživatel %(id)s si nachal zaslat přihlašovací jméno',
'User Area - Courses': 'User Area - Courses',
'User avatar': 'User avatar',
'User Class': 'User Class',
'User Id': 'User Id',
'User ID': 'ID uživatele',
'User Lesson': 'User Lesson',
'Username': 'Přihlašovací jméno',
'Users': 'Users',
'variables': 'variables',
'Verify Password': 'Zopakujte heslo',
'Version': 'Verze',
'Version %s.%s.%s (%s) %s': 'Verze %s.%s.%s (%s) %s',
'Versioning': 'Verzování',
'Video': 'Video',
'Video file': 'Video file',
'Video URL': 'Video URL',
'Videos': 'Videa',
'View': 'Pohled (View)',
'View course': 'View course',
'views': 'pohledy',
'Views': 'Pohledy',
'Wait a few seconds.': 'Wait a few seconds.',
"We couldn't find any video here. Please, alert your instructor about this problem!": "We couldn't find any video here. Please, alert your instructor about this problem!",
'We just confirmed your payment for order number %s.': 'We just confirmed your payment for order number %s.',
'We just confirmed your payment for order number %s.\n': 'We just confirmed your payment for order number %s.\n',
'We just received your order number %s:': 'We just received your order number %s:',
'We just received your order number %s:\n': 'We just received your order number %s:\n',
'We received your order!': 'We received your order!',
'We will wait and let you know when your payment is confirmed.': 'We will wait and let you know when your payment is confirmed.',
'Web Framework': 'Web Framework',
'web2py is up to date': 'Máte aktuální verzi web2py.',
'web2py online debugger': 'Ladící online web2py program',
'web2py Recent Tweets': 'Štěbetání na Twitteru o web2py',
'web2py upgrade': 'web2py upgrade',
'web2py upgraded; please restart it': 'web2py upgraded; please restart it',
'Welcome': 'Vítejte',
'Welcome to web2py': 'Vitejte ve web2py',
'Welcome to web2py!': 'Vítejte ve web2py!',
'Which called the function %s located in the file %s': 'která zavolala funkci %s v souboru (kontroléru) %s.',
"Why don't you follow this steps to start making your courses?": "Why don't you follow this steps to start making your courses?",
'Working...': 'Working...',
'You are already enrolled in this class to %s, so we removed it from your shopping cart.': 'You are already enrolled in this class to %s, so we removed it from your shopping cart.',
'You are already on the list for this course!': 'You are already on the list for this course!',
'You are already on this class!': 'You are already on this class!',
'You are already registered!': 'You are already registered!',
'You are successfully running web2py': 'Úspěšně jste spustili web2py.',
'You can access it here: %s': 'You can access it here: %s',
'You can also set and remove breakpoint in the edit window, using the Toggle Breakpoint button': 'Nastavovat a mazat body přerušení je též možno v rámci editování zdrojového souboru přes tlačítko Vyp./Zap. bod přerušení',
'You can check your payment history after login in to your profile.': 'You can check your payment history after login in to your profile.',
'You can check your payment history after login in to your profile.\n': 'You can check your payment history after login in to your profile.\n',
'You can inspect variables using the console bellow': 'Níže pomocí příkazové řádky si můžete prohlédnout proměnné',
'You can modify this application and adapt it to your needs': 'Tuto aplikaci si můžete upravit a přizpůsobit ji svým potřebám.',
'You have nothing in your shopping cart yet!': 'You have nothing in your shopping cart yet!',
'You need to set up and reach a': 'Je třeba nejprve nastavit a dojít až na',
'You visited the url %s': 'Navštívili jste stránku %s,',
"You're beeing redirected to a secure enviroment on Paypal": "You're beeing redirected to a secure enviroment on Paypal",
'Your application will be blocked until you click an action button (next, step, continue, etc.)': 'Aplikace bude blokována než se klikne na jedno z tlačítek (další, krok, pokračovat, atd.)',
'Your browser does not support the video tag.': 'Your browser does not support the video tag.',
'Your Certificate of Conclusion of %s is attached to this email. For more info, contact your teacher.\n\nCongratulations!': 'Your Certificate of Conclusion of %s is attached to this email. For more info, contact your teacher.\n\nCongratulations!',
'Your email': 'Your email',
}
|
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
SQLALCHEMY_DATABASE_URL = "sqlite:///./data.db"
engine = create_engine(SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False})
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
|
# -*- coding: utf-8 -*-
import os
from server import icityServer
ROOT_PATH = os.path.abspath(os.path.dirname(__file__))
# Need to import all resources
from workspaces.home.controller import HomeRoutes
from workspaces.dashboard.controller import DashRoutes
# Register all Blueprint
icityServer.icity_app.register_blueprint(icityServer.icity_bp)
icityServer.icity_app.register_blueprint(icityServer.icity_dsh_bp)
# run dev, prod
if __name__ == '__main__':
icityServer.run()
|
print('Testing Lin v Log')
test=2
if test==1:
import astropy.table as table
import numpy as np
from defcuts import *
from defflags import *
from halflight_first import *
from def_get_mags import *
from def_halflight_math import *
bands=['g', 'r', 'i','z', 'y']
daperture=[1.01,1.51,2.02,3.02,4.03,5.71,8.40,11.8,16.8,23.5]
aperture=[x*0.5 for x in daperture]
ty='mean'
stax=True
if stax==False:
tag=''
else:
tag='uplim'
txtdist= 'Figure2'
txtslope='Figure1'
outdir='/Users/amandanewmark/repositories/galaxy_dark_matter/lumprofplots/clumps/+LL'+ty+tag
doutdir='/Users/amandanewmark/repositories/galaxy_dark_matter/lumprofplots/distribution/+LL'+ty+tag
Flags=['flags_pixel_bright_object_center', 'brobj_cen_flag-', 'No Bright Ojbect Centers', 'Only Bright Object Centers', 'brobj_cen_flag']
indir='/Users/amandanewmark/repositories/galaxy_dark_matter/GAH/'
bigdata = table.Table.read(indir+ 'LOWZ_HSCGAMA15_apmgs+cmodmag.fits')
def do_cuts(datatab):
parm=['flags_pixel_saturated_center','flags_pixel_edge','flags_pixel_interpolated_center','flags_pixel_cr_center','flags_pixel_suspect_center', 'flags_pixel_clipped_any','flags_pixel_bad']
ne=[99.99, 199.99, 0.0]
mincut=0.1
maxcut=''
cutdata=not_cut(datatab, bands, 'mag_forced_cmodel', ne)
for b in range(0, len(bands)-1):
newdata=many_flags(cutdata, parm, bands[b]) #flags not in y?
cutdata=newdata
return newdata
def get_TF(data):
bandi=['i']
Flag, Not,lab= TFflag(bandi,Flags, data)
return Flag, Not
newdata=do_cuts(bigdata)
Flagdat, Notdat=get_TF(newdata)
def my_halflight2(dat1, sc=''):
loglum, lograd, loglumd= get_ind_lums(dat1, bands, aperture, scale='log')
if stax==True:
loglum, lograd, loglumd= upper_rad_cut(loglum, lograd, loglumd, 4, proof=False)
#print('length of radius array is ', len(lograd))
mloglum, mlogdens, mlograd, mlogerr= get_avg_lums(loglum, lograd, loglumd, gr=[1,80,11], type=ty, scale=sc)
logr12s= get_halflight(loglum, lograd)
logr12= get_halflight(mloglum, mlograd)
Ms, cs, errs= get_slopes(logr12s, lograd, loglumd, error=None, smax=stax)
M, c, logrcut, logldcut, sterr, errcut =get_slopes(logr12, mlograd, mlogdens, error=mlogerr, smax=stax)
print(sterr)
cutmlogld = M * logrcut + c
ind=[loglum, loglumd, lograd, logr12s]
means=[mloglum,mlogdens,mlograd,logr12, mlogerr]
ind_slope=[Ms, cs, errs]
mean_slopes=[M, c, logrcut, logldcut, cutmlogld, sterr, errcut]
#logrcut and logldcut are for lines of best fit
return ind, means, ind_slope, mean_slopes
inds1, means1, ind_slope1, mean_slopes1=my_halflight2(Flagdat, sc='lindata')
inds2, means2, ind_slope2, mean_slopes2=my_halflight2(Flagdat, sc='')
def my_graphs(inds1, means1, ind_slope1, mean_slopes1, inds2, means2, ind_slope2, mean_slopes2):
import matplotlib.pyplot as plt
import numpy as np
import math
#ind=[loglum, loglumd, lograd, logr12s]
#means=[mloglum,mlogdens,lograd,logr12, mlogerr]
#ind_slope=[Ms, cs, errs]
#mean_slopes=[M, c, logrcut, logldcut, cutmlogld, sterr, errcut]
def lum_mult_fit(x1, x2, y1, y2, xcut1, xcut2, yfit1, yfit2, sterr1, sterr2 , m1, m2, error1, error2, outdir=''):
print('Make Scatter Plots')
f=plt.figure()
plt.scatter(x1, y1, color='r', marker='o',label='Linearly Averaged')
plt.plot(xcut1, yfit1, color='m', label='Linearly Averaged: slope= '+str(np.round(m1,2))+' +- '+str(np.round(sterr1,2)))
plt.errorbar(x1, y1, yerr=error1, fmt='.',color='r')
plt.scatter(x2, y2, color='b', marker='o',label='Log Averaged ')
plt.plot(xcut2, yfit2, color='c', label='Log Averaged: slope= '+str(np.round(m2,2))+' +- '+str(np.round(sterr2,2)))
plt.errorbar(x2, y2, yerr=error2, fmt='.',color='b')
plt.xlabel('Log Radii (kpc)')
plt.ylabel('Luminosity Densities (Lsolar/kpc^2)')
plt.title('Average Luminosity Densities v Radii')
#plt.xlim(math.log10(1), math.log10(80))
#plt.ylim(6,8.6)
plt.legend(loc=0,prop={'size':6.0})
f.text(0.05, 0.05, txtslope, color='red', weight='bold')
outdirs=outdir+tag+'TF.pdf'
#plt.show()
f.savefig(outdirs)
print(outdirs)
def dist_mean(m1s, m2s, m1, m2, sterr1, sterr2, KS=False):
figs=plt.figure()
bs=np.linspace(-2.0,-1.4,num=15, endpoint=False)
n1, b1, p1= plt.hist(m1s, bs, color='red', label='Linearly Averaged ('+str(len(m1s))+')', alpha=0.8)
n2, b2, p2= plt.hist(m2s,bs, color='blue', label='Log Averaged ('+str(len(m2s))+')', alpha=0.8)
ts=''
if KS==True:
M=m1s+m2s
import scipy
D, p=scipy.stats.ks_2samp(m1s,m2s)
plt.plot(0,0, c='green', marker='*', label='K-S test is '+str(D))
plt.xlim(np.min(M),-1.4)
ts='KS'
#print('Standard Deviation (Not Flagged): ', str(np.std(m1s)))
#print('Standard Deviation (Flagged): ', str(np.std(m2s)))
plt.axvline(x=m1, color='magenta', label='Linearly Averaged: slope= '+str(np.round(m1,2))+' +- ' +str(np.round(sterr1,2)))
plt.axvline(x=m2, color='cyan', label='Log Averaged: slope= '+str(np.round(m2,2))+' +- '+str(np.round(sterr2,2)))
plt.xlabel('Slopes', fontsize=10)
plt.legend(loc=0,prop={'size':6.5})
plt.ylabel('Frequency', fontsize=10)
plt.title('With '+ty+' Slopes')
outdirs=doutdir+'slopedist.pdf'
#figs.text(0.03, 0.03, txtdist, color='red', weight='bold')
#plt.show()
figs.savefig(outdirs)
print(outdirs)
def all_lumprof(lum1s, lum2s, rad1s, rad2s, mrad1, mrad2, mden1, mden2, error1, error2):
f=plt.figure()
#print(len(mrad1)) #these are the mean radii
#print(len(mrad2))
#print(len(mden1))
#print(len(mden2))
for n in range(len(lum1s)):
plt.plot(rad1s[n], lum1s[n],color='lightgrey', marker='.')
for n in range(len(lum2s)):
plt.plot(rad2s[n], lum2s[n],color='lightgrey', marker='.')
plt.scatter(mrad1, mden1, color='red', marker='o',label='Linearly Averaged ('+str(len(lum1s))+')', zorder=3)
plt.scatter(mrad2,mden2,color='blue', marker='o',label='Log Averaged ('+str(len(lum1s))+')', zorder=3)
plt.xlabel('Log Radii (kpc)')
plt.ylabel('Luminosity Densities (Lsolar/kpc^2)')
plt.title('Average Luminosity Densities v Radii')
plt.legend(loc=0,prop={'size':6.0})
outdirs=outdir+tag+'all_lumprof.pdf'
#plt.show()
f.savefig(outdirs)
print(outdirs)
dist_mean(ind_slope1[0],ind_slope2[0],mean_slopes1[0],mean_slopes2[0],mean_slopes1[5], mean_slopes2[5], KS=False)
all_lumprof(inds1[1], inds2[1], inds1[2], inds2[2], means1[2], means2[2], means1[1], means2[1],means1[4], means2[4])
lum_mult_fit(means1[2], means2[2], means1[1], means2[1], mean_slopes1[2], mean_slopes2[2], mean_slopes1[4], mean_slopes2[4], mean_slopes1[5], mean_slopes2[5], mean_slopes1[0], mean_slopes2[0],means1[4], means2[4], outdir=outdir)
my_graphs(inds1, means1, ind_slope1, mean_slopes1, inds2, means2, ind_slope2, mean_slopes2)
else:
from halflight_second import meanlum2
import numpy as np
import matplotlib.pyplot as plt
Naps=0
L=np.array([7.5, 8.0, 8.5, 9.0, 8.5,7.0, 8.5])
R=np.array([1,2,3,3,4,0,2.5])
mL, mR, bb=meanlum2(L, R, Naps,grange=[10**0.8,10**3.5,4],scale='lindata')
mL1, mR1, bb1=meanlum2(L, R, Naps,grange=[10**0.8,10**3.5,4],scale='')
print('Lums', mL, mL1)
print('Rads', mR1, mR1)
plt.scatter(mR, mL, color='red', label='Averaged Linearly')
plt.scatter(mR1, mL1, color='blue', label='Averaged on Log scale')
plt.xlabel('Log Radii')
plt.ylabel('Log Luminosity')
plt.legend(loc=0,prop={'size':6.0})
plt.show()
|
from .tensors import *
|
import os
import torch
import random
import scipy.io
import typing as t
import numpy as np
import torchio as tio
from glob import glob
from functools import partial
from torch.utils.data import DataLoader
def get_scan_shape(filename: str):
extension = filename[:-3]
if extension == 'mat':
data = scipy.io.loadmat(filename)
shape = data['FLAIRarray'].shape
else:
scan = np.load(filename)
shape = scan.shape[1:]
return shape
def rotate(scan: np.ndarray):
''' rotate scan 90 degree to the left '''
assert len(scan.shape) == 4
# copy is needed to avoid negative strides error
return np.rot90(scan, k=1, axes=[1, 3]).copy()
def load_mat(filename: str, sequence: str = None):
'''
Reader callable object for tio.ScalarImage to load sequence from mat file.
Note: NaN and negative values are replaced with zeros.
Args:
filename: path to the mat file
sequence: FLAIRarray, T1array or T2array to load specify sequence, or
None to load all sequences as channels.
Returns:
scan: np.ndarray in shape CHWD
affine: 4x4 affine matrix
'''
sequences = ['FLAIRarray', 'T1array', 'T2array']
assert sequence is None or sequence in sequences
data = scipy.io.loadmat(filename)
if sequence is None:
# load all sequences
flair = data['FLAIRarray'].astype(np.float32)
t1 = data['T1array'].astype(np.float32)
t2 = data['T2array'].astype(np.float32)
scan = np.stack([flair, t1, t2])
else:
# load a particular sequence
scan = data[sequence].astype(np.float32)
scan = np.expand_dims(scan, axis=0)
# replace NaN values with zeros
if np.isnan(scan).any():
scan = np.nan_to_num(scan)
# replace negative values with zeros
scan = np.maximum(scan, 0.0)
# rotate scan to the left 90 degree
scan = rotate(scan)
return scan, np.eye(4)
def load_npy(filename: str, sequence: str = None):
'''
Reader callable object for tio.ScalarImage to load sequence from npy file.
Args:
filename: path to the npy file
sequence: FLAIRarray, T1array or T2array to load specify sequence, or
None to load all sequences as channels.
Returns:
scan: np.ndarray in shape CHWD
affine: 4x4 affine matrix
'''
sequences = ['FLAIRarray', 'T1array', 'T2array']
assert sequence is None or sequence in sequences
scan = np.load(filename)
if sequence is not None:
# load a particular sequence
channel = sequences.index(sequence)
scan = np.expand_dims(scan[channel], axis=0)
# rotate scan to the left 90 degree
scan = rotate(scan)
return scan, np.eye(4)
def load_subject(lr_filename: str,
sequence: str = None,
require_hr: bool = False):
extension = lr_filename[-3:]
assert extension in ['npy', 'mat']
reader = partial(load_mat if extension == 'mat' else load_npy,
sequence=sequence)
lr = tio.ScalarImage(path=lr_filename, reader=reader)
hr, hr_filename = None, lr_filename.replace('V0', 'V1')
hr_exists = os.path.exists(hr_filename)
if require_hr and not hr_exists:
raise FileNotFoundError(f'{hr_filename} not found.')
if hr_exists:
hr = tio.ScalarImage(path=hr_filename, reader=reader)
name = os.path.basename(lr_filename)
if '.npy' in name or '.mat' in name:
name = name[:-4]
if '_V0' in name:
name = name[:name.find('_V0')]
if sequence is not None:
name += f'_{sequence}'
if hr_exists:
subject = tio.Subject(lr=lr, hr=hr, name=name)
else:
subject = tio.Subject(lr=lr, name=name)
return subject
def load_dataset(filenames: t.List[str], combine_sequence: bool = True):
subjects = []
for filename in filenames:
if combine_sequence:
subjects.append(
load_subject(lr_filename=filename, sequence=None, require_hr=True))
else:
subjects.extend([
load_subject(lr_filename=filename, sequence=sequence, require_hr=True)
for sequence in ['FLAIRarray', 'T1array', 'T2array']
])
return tio.SubjectsDataset(subjects)
def get_loaders(args,
val_ratio: float = 0.2,
test_ratio: float = 0.1,
random_state: int = 42):
if not os.path.exists(args.input_dir):
raise FileNotFoundError(f'{args.input_dir} not found.')
filenames = sorted(
glob(os.path.join(args.input_dir, f'*_V0*.{args.extension}')))
assert len( filenames) > 0, \
f'no {args.extension} files found in {args.input_dir}'
print(f'found {len(filenames)} {args.extension} pairs in {args.input_dir}')
# shuffle files
random.Random(random_state).shuffle(filenames)
args.ds_name = os.path.basename(args.input_dir).lower()
args.scan_shape = get_scan_shape(filenames[0])
args.scan_types = ['FLAIR', 'T1', 'T2']
test_size = int(len(filenames) * test_ratio)
val_size = int(len(filenames) * val_ratio)
args.test_filenames = filenames[:test_size]
val_filenames = filenames[test_size:test_size + val_size]
train_filenames = filenames[test_size + val_size:]
C = 3 if args.combine_sequence else 1
# calculate HWD dimension, dim indicate the dimension to insert 1
dim = 2 if args.ds_name == 'warp' else 1
if args.patch_size is None:
if args.ds_name == 'warp':
H, W = args.scan_shape[0], args.scan_shape[1]
args.n_patches = args.scan_shape[2]
else:
H, W = args.scan_shape[0], args.scan_shape[2]
args.n_patches = args.scan_shape[1]
else:
assert args.n_patches is not None, \
'--n_patches need to be defined with patching'
H, W = args.patch_size, args.patch_size
# TorchIO sampler sample 3D patches from image
# we insert 1 to patch_size to output 2D patches effectively
patch_size = [H, W]
patch_size.insert(dim, 1)
args.patch_shape = tuple(patch_size)
args.slice_dim = dim + 2 # slice dimension in NCHWD
args.input_shape = (C, H, W)
train_dataset = load_dataset(train_filenames,
combine_sequence=args.combine_sequence)
train_queue = tio.Queue(
subjects_dataset=train_dataset,
max_length=args.n_patches * 4,
samples_per_volume=args.n_patches,
sampler=tio.UniformSampler(patch_size=args.patch_shape),
num_workers=args.num_workers,
shuffle_subjects=True,
shuffle_patches=True)
train_loader = DataLoader(train_queue,
batch_size=args.batch_size,
pin_memory=True)
val_dataset = load_dataset(val_filenames,
combine_sequence=args.combine_sequence)
val_queue = tio.Queue(subjects_dataset=val_dataset,
max_length=args.n_patches * 4,
samples_per_volume=args.n_patches,
sampler=tio.UniformSampler(patch_size=args.patch_shape),
num_workers=args.num_workers,
shuffle_subjects=False,
shuffle_patches=False)
val_loader = DataLoader(val_queue,
batch_size=args.batch_size,
pin_memory=True)
return train_loader, val_loader
def prepare_batch(batch,
dim: int,
device: torch.device = 'cpu') -> (torch.Tensor, torch.Tensor):
'''
Prepare a batch from TorchIO data loader
Args:
batch: a batch from data loader
dim: slice dimension in NCHWD
device: torch device
Returns:
lr: low resolution batch
hr: high resolution batch if exists, else None
'''
def prepare(tensor):
tensor = torch.squeeze(tensor, dim=dim)
return tensor.to(device)
lr, hr = batch['lr'][tio.DATA], None
lr = prepare(lr)
if 'hr' in batch:
hr = batch['hr'][tio.DATA]
hr = prepare(hr)
return lr, hr
def random_samples(args, val_loader, num_samples: int = 6):
''' Randomly select num_samples samples from val_loader for plotting '''
batch = next(iter(val_loader))
inputs, targets = prepare_batch(batch, dim=args.slice_dim, device=args.device)
indexes = np.random.choice(inputs.shape[0],
size=min(num_samples, inputs.shape[0]),
replace=False)
return inputs[indexes], targets[indexes]
|
'''
Reporters output information about the state and progress of the program to
the user. They handle information about the formatting of this output, as
well as any additional processing which is required to support it, such as
comunicating with a web or email server, for example.
They are derived from the Base_Reporter class
'''
import Reporters.utils
class Base_Reporter():
'''
Reporters output information about the state and progress of the program
to the user. They handle information about the formatting of this
output, as well as any additional processing which is required to
support it, such as comunicating with a web or email server, for
example.
'''
def __init__(self):
'''
Creates Reporter object with an empty dictionary of
registered_monitors
'''
self.registered_monitors = {}
def register(self, monitor, formatting_data):
'''
Registers a monitor with the reporter.
Records information about how data from the monitor should be
interpreded and displayed. Records this, along with the signiture of
the data the Reportor can expect to recieve from the Monitor.
Returns an id that is given to the Reporter with updates to identify
which monitor is providing the update.
'''
# Generates an id for the monitor.
#
# id shoud be unique to each registered monitor and small enough to
# avoid passing around large objects as dict keys.
#
# currently uses the python object id, but this is non-essential
# and should not be relied upon outside of Reporter class (and
# and should avoid relying on it there.
monitor_id = id(monitor)
# The formatting data is processed by the parse_formatting_data
# method. This method will generally be overwritten in subclasses
required_signiture, parsed_formatting_information = (
self.parse_formatting_data(formatting_data)
)
# It is checked that the monitor will proved the information
# nessarsery for the Reporters output.
if not Reporters.utils.is_compatable_data_signiture(
required_signiture, monitor.data_signiture):
raise ValueError('The formatting data given to Reporter is not '
'compatable with the data provided by the '
'Monitor being registered')
# The monitor's information is recorded (as you would expect for a
# method called register
self.registered_monitors[monitor_id] = (monitor.data_signiture,
parsed_formatting_information)
# The monitor_id is returned so that the monitor can use it to
# identify itself when updating the reporter.
return monitor_id
def parse_formatting_data(self,formatting_data):
'''
Processes that formatting data passed with a newly registered
Monitor to tell the Reporter how to display the data passed to it
from that monitor
Returns a tuple consiting of dictionary containing the data
signiture required to display the output requested and the processed
data about how the Reporter should display the data passed by the
monitor.
'''
# As the Base_Reporter does not display any data, the data signiture
# is the empty dictionary (no data required) and the processed
# formatting data is None
return {}, None
def update(self, monitor_id, **kwargs):
'''
Passes data from a monitor to the Reporter. The id returned when
the monitor was registerd with the reporter must be given, followed
by any number of keyword arguemnts. The keyword arguments are
interpreted by comparing withthe monitor's data signiture.
'''
try:
if not Reporters.utils.is_compatable_data_signiture(kwargs,
self.registered_monitors[monitor_id][0]):
raise ValueError("Data in update not compatable with the "
"passing monitor's data signiture")
except KeyError:
raise ValueError('ID does not corrispond to a registered'
' Monitor')
class Text_Reporter(Base_Reporter):
'''
Generates text detailing the information to be reported,
'''
pass #TODO
class Cmd_Line_Reporter(Text_Reporter):
'''
Text Reporter the prints text to stdout
'''
pass #TODO
|
from telegram.ext import (Updater, CommandHandler, MessageHandler,
Filters ,ConversationHandler,RegexHandler)
import apiai, json
import sqlite3
updater = Updater(token='')
print("Connection to Telegram established; starting bot.")
dispatcher = updater.dispatcher
import telegram as tg
import pandas as pd
CHOOSING, CANTIDAD, OFICINA, FIN = range(4)
data = pd.read_csv("C:/Users/prueba/Downloads/Telegram Desktop/Productos.csv", sep= ";")
columnas = ['id','edad','riesgo','cantidad','oficina']
df=pd.DataFrame(columns=columnas)
dictEdad = {"18-30":"¡Vaya jovencito! Ahora dime qué riesgo estás dispuesto a tomar.",
"30-60":"MEdiana edad. Ahora dime qué riesgo estás dispuesto a tomar.",
">60": "La segunda juventud. Ahora dime qué riesgo estás dispuesto a tomar."}
dictRiesgo = {"Alto":"¡Vaya, veo que te va la marcha! Ahora dime qué cantidad te gustaría invertir.",
"Medio":"Un punto medio, así me gusta. Ahora dime qué cantidad te gustaría invertir",
"Bajo": "A mí también me gusta la tranquilidad. Ahora dime qué cantidad te gustaría invertir."}
dictCantidad = {"<5000":"Me gusta empezar con algo moderado. Dime, ¿Necesitarías una oficina para las gestiones?",
"5000-20000":"Vaya, parece que quieres tomárte esto en serio. Dime, ¿Necesitarías una oficina para las gestiones?",
">20000": "Uuuf, veo que alguien ha trabajado duro y ahora está recogiendo los frutos. Dime, ¿Necesitarías una oficina para las gestiones?"}
def startCommand(bot, update,user_data):
df.set_value(update.message.chat_id, 'id', update.message.chat_id)
reply_keyboard = [['18-30', '30-60'],
['>60']]
markup = tg.ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True)
bot.send_message(chat_id=update.message.chat_id,
text="Tenemos que empezar por saber tu edad",
reply_markup=markup)
return CHOOSING
def riesgo_choice(bot, update,user_data):
df.set_value(update.message.chat_id,
'edad', update.message.text)
respuesta = dictEdad[update.message.text]
reply_keyboard = [['Alto', 'Medio'],
['Bajo']]
markup = tg.ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True)
bot.send_message(chat_id=update.message.chat_id,
text=respuesta,
reply_markup=markup)
return CANTIDAD
def cantidad_choice(bot, update,user_data):
df.set_value(update.message.chat_id,
'riesgo', update.message.text)
respuesta = dictRiesgo[update.message.text]
reply_keyboard = [['<5000', '5000-20000'],
['>20000']]
markup = tg.ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True)
bot.send_message(chat_id=update.message.chat_id,
text=respuesta,
reply_markup=markup)
return OFICINA
def oficina_choice(bot, update,user_data):
df.set_value(update.message.chat_id,
'cantidad', update.message.text)
respuesta = dictCantidad[update.message.text]
reply_keyboard = [['Sí', 'No']]
markup = tg.ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True)
bot.send_message(chat_id=update.message.chat_id,
text=respuesta,
reply_markup=markup)
return final_choice
def final_choice(bot, update,user_data):
df.set_value(update.message.chat_id,
'oficina', update.message.text)
edad = df.query("id == "+str(update.message.chat_id)+"").iloc[0,1]
riesgo = df.query("id == "+str(update.message.chat_id)+"").iloc[0,2]
cantidad = df.query("id == "+str(update.message.chat_id)+"").iloc[0,3]
oficina = df.query("id == "+str(update.message.chat_id)+"").iloc[0,4]
respuesta = data.query("EDAD == '"+str(edad)+"' & RIESGO == '"+
str(riesgo)+"' & OFICINA == '"+str(oficina)+
"' & CANTIDAD == '"+str(cantidad)+"'").iloc[0,0]
respuesta = "Hemos consultado la base de datos y el producto que mejor se adapta a sus necesidades es el " + str(respuesta)
bot.send_message(chat_id=update.message.chat_id,
text=respuesta)
return ConversationHandler.END
def done(bot, update, user_data):
update.message.reply_text("I learned these facts about you:")
return ConversationHandler.END
def textMessage (bot, update):
cnx = sqlite3.connect("Conversaciones.db")
cursor = cnx.cursor()
request = apiai.ApiAI ('').text_request() # Token API to Dialogflow
request.lang = 'es' # In which language will the request be sent
request.session_id = 'small-talk-63ecd' # ID Sessions of the dialog (you need to learn the bot afterwards)
request.query = update.message.text # We send a request to the AI with a message from the user
responseJson = json.loads(request.getresponse().read().decode('utf-8'))
response = responseJson['result']['fulfillment']['speech'] # We parse JSON and pull out the answer
#meter timestamp,update.message.text,response
msgusuario=update.message.text
numero=str(update.message.chat_id)
cursor.execute("INSERT INTO chats2 (id,usuario,bot) VALUES ('"+numero+"','"+msgusuario+"', '"+response+"')")
cnx.commit()
# If there is an answer from the bot - we send it to the user, if not - the bot did not understand it
if response:
bot.send_message(chat_id = update.message.chat_id, text = response)
else:
bot.send_message(chat_id = update.message.chat_id, text = 'No te entiendo, recuerda que estoy aprendiendo')
conv_handler = ConversationHandler(
entry_points=[CommandHandler('start', startCommand, pass_user_data=True)],
states={
CHOOSING: [RegexHandler('^(18-30|30-60|>60|)$',
riesgo_choice,
pass_user_data=True),
],
CANTIDAD: [MessageHandler(Filters.text,
cantidad_choice,
pass_user_data=True)
],
OFICINA: [MessageHandler(Filters.text,
oficina_choice,
pass_user_data=True)
]
},
fallbacks=[RegexHandler('^(Sí|No|)$', final_choice, pass_user_data=True)]
)
text_message_handler = MessageHandler(Filters.text,textMessage)
dispatcher.add_handler(conv_handler)
dispatcher.add_handler(text_message_handler)
updater.start_polling(clean=True)
print('Ejecutando')
updater.idle()
|
"""
Write a function to find the longest common prefix string amongst an array of
strings.
If there is no common prefix, return an empty string "".
Example 1:
Input: strs = ["flower","flow","flight"] Output: "fl"
IDEA:
take the 1st word as a prefix, then decrease the length each time the divergence is found
NOTE: the order is not important, as well as the relative length of the 1st word
"""
class Solution14:
pass
|
from collections import defaultdict
NO_DEFAULT = object()
class Observable:
"""
:class: Observables are properties that one can bind methods to.
Notes
-----
We store method names instead of methods themselves. This so we can dynamically patch methods on widgets and the new
method will be called.
Instances that want to use a custom __get__ simply need to add the getter (a no-argument callable) to `getters`.
(e.g., `observable.getters[my_instance] = getter`)
"""
def __init__(self, default=NO_DEFAULT):
self.default = default
self.methods = defaultdict(dict)
self.callbacks = { }
self.getters = { }
def __set_name__(self, owner, name):
self.name = name
def __set__(self, instance, value):
instance.__dict__[self.name] = value
self.dispatch(instance)
def __get__(self, instance, owner):
if instance is None:
return self
if instance in self.getters:
return self.getters[instance]()
if self.name in instance.__dict__:
return instance.__dict__[self.name]
if self.default is not NO_DEFAULT:
return self.default
return self
def dispatch(self, instance):
# Build list of dispatches from _mro_ if it doesn't exist
name = type(instance).__name__
if name not in self.callbacks:
d = { }
for base in reversed(type(instance).__mro__):
d.update(self.methods.get(base.__name__, { }))
self.callbacks[name] = list(d)
for callback in self.callbacks[name]:
getattr(instance, callback)()
def bind(self, class_name, method_name):
self.methods[class_name][method_name] = None
|
from django.contrib import admin
from .models import contact,SlideShowItem
# Register your models here.
admin.site.register(contact)
# Registered the model
admin.site.register(SlideShowItem)
|
from unicodedata import name
from django.shortcuts import redirect, render
from django.http import Http404, HttpResponse
import datetime as dt
from .models import Image
# Create your views here.
def test(request):
return HttpResponse('testing gallery')
def show_all_images(request):
images = Image.get_images()
ctx = {'images':images}
return render(request, 'index.html', ctx)
def filter_images(request, location):
images = Image.location_filter(location=location)
message = f"{location}"
return render(request, 'location.html', {"location": location,"images":images, "message":message})
def search_results(request):
if 'image' in request.GET and request.GET["image"]:
search_term = request.GET.get("image")
searched_images = Image.search_by_category(search_term)
message = f"{search_term}"
return render(request, 'search.html',{"message":message,"images": searched_images})
else:
search_term = request.GET.get("image")
message = f"You haven't searched for any term{search_term}"
return render(request, 'search.html',{"message":message})
def image(request,image_id):
try:
image = Image.objects.get(id = image_id)
except Image.DoesNotExist:
raise Http404()
return render(request,"index.html", {"image":image})
|
import logging
from fastapi import FastAPI
from starlette.requests import Request
from starlette.exceptions import HTTPException as StarletteHTTPException
from starlette.responses import JSONResponse
from controllers.health.status import health_router
from controllers.v1.conference import conference_router
from controllers.v1.talk import talk_router
from controllers.v1.talk_manager import talk_manager_router
from config import settings
app = FastAPI(
title=settings.PROJECT_NAME, openapi_url=f"{settings.API_V1_STR}/openapi.json"
)
logging.config.fileConfig('logger.conf', disable_existing_loggers=False)
logger = logging.getLogger(__name__)
@app.exception_handler(Exception)
async def exception_callback(request: Request, exc: Exception):
logging.info('API Unhandled Error', exc_info=True)
return JSONResponse({"detail": "Unknown Error"}, status_code=500)
app.include_router(health_router, tags=["HealthCheck"])
app.include_router(conference_router, prefix="/conferences",tags=["Conferences"])
app.include_router(talk_router, prefix="/conferences",tags=["Talks"])
app.include_router(talk_manager_router, prefix="/conferences",tags=["Talks"])
|
from typing import Callable
from typing import List
from typing import Union
import torch
from cata import constants
from cata.teachers import base_teacher
from cata.utils import threshold_functions
class ClassificationTeacher(base_teacher.BaseTeacher):
"""Classification - threshold output"""
def __init__(
self,
input_dimension: int,
hidden_dimensions: List[int],
output_dimension: int,
bias: bool,
nonlinearity: str,
forward_hidden_scaling: float,
forward_scaling: float,
unit_norm_teacher_head: bool,
weight_normalisation: bool,
noise_std: Union[float, int],
initialisation_std: float,
):
super().__init__(
input_dimension=input_dimension,
hidden_dimensions=hidden_dimensions,
output_dimension=output_dimension,
bias=bias,
loss_type=constants.CLASSIFICATION,
nonlinearity=nonlinearity,
forward_hidden_scaling=forward_hidden_scaling,
forward_scaling=forward_scaling,
unit_norm_teacher_head=unit_norm_teacher_head,
weight_normalisation=weight_normalisation,
noise_std=noise_std,
initialisation_std=initialisation_std,
)
self._threshold_fn = self._setup_threshold()
def _setup_threshold(self) -> Callable:
# threshold differently depending on nonlinearity to
# ensure even class distributions
if self._nonlinearity == constants.RELU:
threshold_fn = threshold_functions.positive_threshold
elif self.nonlinearity_name == constants.LINEAR:
threshold_fn = threshold_functions.tanh_threshold
else:
raise NotImplementedError(
f"Teacher thresholding for {self._nonlinearity}"
" nonlinearity not yet implemented"
)
return threshold_fn
def _get_output_from_head(self, x: torch.Tensor) -> torch.Tensor:
"""Pass tensor through head."""
y = self._head(x)
thresholded_output = self._threshold_fn(y)
return thresholded_output
|
import base64
import requests_mock
from app.clients.freshdesk import Freshdesk
def test_create_ticket(notify_api):
def match_json(request):
expected = {
'product_id': 42,
'subject': 'Ask a question',
'description': 'my message',
'email': 'test@example.com',
'priority': 1,
'status': 2,
'tags': []
}
encoded_auth = base64.b64encode(b'freshdesk-api-key:x').decode('ascii')
json_matches = request.json() == expected
basic_auth_header = request.headers.get('Authorization') == f"Basic {encoded_auth}"
return json_matches and basic_auth_header
with requests_mock.mock() as rmock:
rmock.request(
"POST",
'https://example.com/freshdesk/api/v2/tickets',
additional_matcher=match_json,
status_code=201
)
with notify_api.app_context():
response = Freshdesk.create_ticket({
'message': 'my message',
'email': 'test@example.com',
'support_type': 'Ask a question',
})
assert response == 201
|
#!/usr/bin/env python
import numpy as np
import math
import matplotlib.pyplot as plt
from pylab import *
import h5py
from matplotlib.colors import LogNorm
## Set the Zero
zero = 1.0e-20
## Set the maximum size of xenon
maxSize = 1000001
## Create plots
fig = plt.figure()
title = 'Xenon Distribution'
fig.suptitle(title,fontsize=22)
xePlot = plt.subplot(111)
## Create lists of file names to read from, time step number in the file, associated line colors, labels
name = ['/home/sophie/Workspace/xolotl-plsm-build/script/xolotlStop.h5', '/home/sophie/Workspace/xolotl-plsm-build/script/xolotlStop.h5', '/home/sophie/Workspace/xolotl-plsm-build/script/xolotlStop.h5', '/home/sophie/Workspace/xolotl-plsm-build/script/xolotlStop.h5']
timestep = [25,35,45,55]
col = ['black', 'blue', 'magenta', 'green']
lab = ['TS 25', 'TS 35', 'TS 45', 'TS 55']
for i in range(len(name)):
## Open the file
f = h5py.File(name[i], 'r')
## Open the concentration group
groupName ='concentrationsGroup/concentration_' + str(timestep[i])
concGroup = f[groupName]
## Read the concentration and index datasets
concDset = concGroup['concs']
indexDset = concGroup['concs_startingIndices']
## Read the time at the chosen time step
time = concGroup.attrs['absoluteTime']
## Read how many normal and super clusters there are
networkGroup = f['networkGroup']
totalSize = networkGroup.attrs['totalSize']
## Create the mesh and data array
x = np.empty([maxSize])
xeArray = np.empty([maxSize])
for j in range(maxSize):
x[j] = j
xeArray[j] = zero
pos = 0 ## if 0D
for j in range(indexDset[pos], indexDset[pos+1]):
## Skip the moments for now
if (int(concDset[j][0]) > totalSize - 1): continue
## Get the cluster bounds
groupName = str(concDset[j][0])
clusterGroup = networkGroup[groupName]
bounds = clusterGroup.attrs['bounds']
## Loop on Xe size
for l in range(bounds[0], bounds[1]+1):
## Fill the array
xeArray[l] = xeArray[l] + concDset[j][1]
## Plot the data
x = np.delete(x,(0), axis=0)
xeArray = np.delete(xeArray,(0), axis=0)
xePlot.plot(x, xeArray, lw=4, color=col[i], label=lab[i], alpha=0.75)
## Some formatting
xePlot.set_xlabel("Cluster Size",fontsize=22)
xePlot.set_ylabel("Concentration (# / nm3)",fontsize=22)
xePlot.set_xlim([1, 1000000])
xePlot.set_ylim([1.0e-16, 1.0e-1])
xePlot.set_xscale('log')
xePlot.set_yscale('log')
xePlot.tick_params(axis='both', which='major', labelsize=20)
## Plot the legends
l2 = xePlot.legend(loc='best')
setp(l2.get_texts(), fontsize=25)
## Show the plots
plt.show()
|
from pathlib import Path
from typing import Set
from datetime import datetime
from dataclasses import dataclass
from rich.progress import track
from rich import print
import csv
import requests
lat = 47.350
lon = 8.100
rssiFilePath = Path("/Users/eliabieri/Downloads/rssi.csv")
appId = "262b0bd7ac5cf100bd4198648434b211"
days: Set[datetime] = set()
@dataclass
class Record:
day: datetime
temp: float
rain3h: float
weatherCategory: str
with Path(rssiFilePath).open(newline='') as f:
lines = f.readlines()[1:1000]
reader = csv.reader(lines)
for row in track(reader, "Parsing dates", total=len(lines)):
t = datetime.strptime(row[1], '%Y-%m-%d %H:%M:%S')
day = datetime(year=t.year, month=t.month, day=t.day)
days.add(day)
print(f"Found {len(days)} days")
data = []
for day in track(days, "Downloading data"):
try:
resp = requests.get(f"http://history.openweathermap.org/data/2.5/history/city?lat={lat}&lon={lon}&type=hour&start={12}&appid={appId}").json()
print(resp)
data.append(
Record(day=day,
rain3h=resp["weather"]["rain.3h"],
temp=resp["list"]["main"]["main.temp"],
weatherCategory=resp["weather"]["weather.main"]
)
)
except Exception as e:
print(e)
print(data)
|
# img2epub
__version__ = "0.3.0"
|
"""Test the R2C Module"""
|
from DB import SQLite as sql
from languages import lang as lang_P
def admin(param):
fct = param["fct"]
arg2 = param["arg2"]
arg3 = param["arg3"]
arg4 = param["arg4"]
msg = []
if fct == "playerid":
if arg2 == "None":
platform = param["name_pl"]
else:
platform = arg2
else:
platform = param["name_pl"]
if param["name_pl"] == "Admin" and fct != "playerid":
PlayerID = int(param["ID"])
lang = "EN"
else:
ID = sql.get_SuperID(int(param["ID"]), platform)
lang = param["lang"]
if ID == "Error 404":
msg = ["WarningMsg", lang_P.forge_msg(lang, "WarningMsg", None, False, 0)]
return msg
PlayerID = sql.get_PlayerID(ID, "gems")
if fct == "init":
sql.init()
elif fct == "update":
# arg2 = nameDB | arg3 = fieldName | arg4 = fieldValue
desc = sql.updateField(PlayerID, arg3, arg4, arg2)
elif fct == "add":
# arg2 = nameDB | arg3 = nameElem | arg4 = nbElem
desc = sql.add(PlayerID, arg3, arg4, arg2)
elif fct == "value":
# arg2 = nameDB | arg3 = nameElem
desc = sql.valueAt(PlayerID, arg3, arg2)
elif fct == "gems":
# arg2 = nb gems
desc = sql.addGems(PlayerID, arg2)
elif fct == "spinelles":
# arg2 = nb spinelles
desc = sql.addSpinelles(PlayerID, arg2)
elif fct == "balance total" or fct == 'balancetotal':
desc = "Balance total"
desc += "\n{}:gem:".format(sql.countTotalGems())
spinelleidmoji = "{idmoji[spinelle]}"
desc += "\n{0}<:spinelle:{1}>".format(sql.countTotalSpinelles(), spinelleidmoji)
elif fct == "playerid":
desc = "PlayerID: {}".format(PlayerID)
else:
desc = ":regional_indicator_s::regional_indicator_q::regional_indicator_l:"
msg.append("Admin {}".format(fct))
msg.append(lang)
msg.append(str(desc))
return msg
|
from django.conf import settings
GET_CREATE_LOCATION_FUNCTION = getattr(settings, 'GET_CREATE_LOCATION_FUNCTION', None)
APP_NAME = getattr(settings, 'APP_NAME', 'Untitled')
COMPILE_MEDIA = getattr(settings, 'COMPILE_MEDIA', False)
|
from django import forms
from .models import *
class NewsLetterForm(forms.Form):
your_name = forms.CharField(label='First Name', max_length=30)
email = forms.EmailField(label='Email')
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
exclude = ['poster', 'imagecommented']
class NewProfileForm(forms.ModelForm):
class Meta:
model = Profile
exclude = ['user']
class UploadForm(forms.ModelForm):
class Meta:
model = Post
exclude = ['user_profile', 'profile','likes', 'opinions']
|
"""
Código para o calculo de quaernions em python.
"""
from __future__ import print_function
from math import pi, sin, cos, asin, acos, atan2, sqrt
def _check_close(a, b, error=0.0001):
if isinstance(a, (tuple, list)):
assert isinstance(b, (tuple, list))
assert len(a) == len(b)
for a1, b1 in zip(a, b):
diff = abs(a1-b1)
if diff > error:
raise ValueError("%s vs %s, for %s vs %s difference %s > %s"
% (a, b, a1, b1, diff, error))
return
diff = abs(a-b)
if diff > error:
raise ValueError("%s vs %s, difference %s > %s"
% (a, b, diff, error))
def quaternion_mgnitude(w, x, y, z):
return sqrt(w*w + x*x + y*y + z*z)
def quaternion_normalise(w, x, y, z):
mag = sqrt(w*w + x*x + y*y + z*z)
return w/mag, x/mag, y/mag, z/mag
def quaternion_from_axis_angle(vector, theta):
sin_half_theta = sin(theta/2)
return cos(theta/2), vector[0]*sin_half_theta, vector[1]*sin_half_theta, vector[2]*sin_half_theta
#TODO - Write quaternion_to_axis_angle and cross-validate
def quaternion_to_rotation_matrix_rows(w, x, y, z):
"""Returns a tuple of three rows which make up a 3x3 rotatation matrix.
It is trival to turn this into a NumPy array/matrix if desired."""
x2 = x*x
y2 = y*2
z2 = z*2
row0 = (1 - 2*y2 - 2*z2,
2*x*y - 2*w*z,
2*x*z + 2*w*y)
row1 = (2*x*y + 2*w*z,
1 - 2*x2 - 2*z2,
2*y*z - 2*w*x)
row2 = (2*x*z - 2*w*y,
2*y*z + 2*w*x,
1 - 2*x2 - 2*y2)
return row0, row1, row2
def quaternion_from_rotation_matrix_rows(row0, row1, row2):
#No point merging three rows into a 3x3 matrix if just want quaternion
#Based on several sources including the C++ implementation here:
#http://www.camelsoftware.com/firetail/blog/uncategorized/quaternion-based-ahrs-using-altimu-10-arduino/
#http://www.camelsoftware.com/firetail/blog/c/imu-maths/
trace = row0[0] + row1[1] + row2[2]
if trace > row2[2]:
S = sqrt(1.0 + trace) * 2
w = 0.25 * S
x = (row2[1] - row1[2]) / S
y = (row0[2] - row2[0]) / S
z = (row1[0] - row0[1]) / S
elif row0[0] < row1[1] and row0[0] < row2[2]:
S = sqrt(1.0 + row0[0] - row1[1] - row2[2]) * 2
w = (row2[1] - row1[2]) / S
x = 0.25 * S
y = (row0[1] + row1[0]) / S
z = (row0[2] + row2[0]) / S
elif row1[1] < row2[2]:
S = sqrt(1.0 + row1[1] - row0[0] - row2[2]) * 2
w = (row0[2] - row2[0]) / S
x = (row0[1] + row1[0]) / S
y = 0.25 * S
z = (row1[2] + row2[1]) / S
else:
S = sqrt(1.0 + row2[2] - row0[0] - row1[1]) * 2
w = (row1[0] - row0[1]) / S
x = (row0[2] + row2[0]) / S
y = (row1[2] + row2[1]) / S
z = 0.25 * S
return w, x, y, z
#TODO - Double check which angles exactly have I calculated (which frame etc)?
def quaternion_from_euler_angles(yaw, pitch, roll):
"""Returns (w, x, y, z) quaternion from angles in radians.
Assuming angles given in the moving frame of reference of the sensor,
not a fixed Earth bound observer.
"""
#Roll = phi, pitch = theta, yaw = psi
return (cos(roll/2)*cos(pitch/2)*cos(yaw/2) + sin(roll/2)*sin(pitch/2)*sin(yaw/2),
sin(roll/2)*cos(pitch/2)*cos(yaw/2) - cos(roll/2)*sin(pitch/2)*sin(yaw/2),
cos(roll/2)*sin(pitch/2)*cos(yaw/2) + sin(roll/2)*cos(pitch/2)*sin(yaw/2),
cos(roll/2)*cos(pitch/2)*sin(yaw/2) - sin(roll/2)*sin(pitch/2)*cos(yaw/2))
def quaternion_to_euler_angles(w, x, y, z):
"""Returns angles about Z, Y, X axes in radians (yaw, pitch, roll).
Using moving frame of reference of the sensor, not the fixed frame of
an Earth bound observer..
"""
w2 = w*w
x2 = x*x
y2 = y*y
z2 = z*z
return (atan2(2.0 * (x*y + z*w), (w2 + x2 - y2 - z2)), # -pi to pi
asin(2.0 * (w*y - x*z) / (w2 + x2 + y2 + z2)), # -pi/2 to +pi/2
atan2(2.0 * (y*z + x*w), (w2 - x2 - y2 + z2))) # -pi to pi
_check_close(quaternion_to_euler_angles(0, 1, 0, 0), (0, 0, pi))
_check_close(quaternion_to_euler_angles(0,-1, 0, 0), (0, 0, pi))
_check_close(quaternion_from_euler_angles(0, 0, pi), (0, 1, 0, 0))
_check_close(quaternion_to_euler_angles(0, 0, 1, 0), (pi, 0, pi))
_check_close(quaternion_to_euler_angles(0, 0,-1, 0), (pi, 0, pi))
_check_close(quaternion_from_euler_angles(pi, 0, pi), (0, 0, 1, 0))
_check_close(quaternion_to_euler_angles(0, 0, 0, 1), (pi, 0, 0))
_check_close(quaternion_to_euler_angles(0, 0, 0,-1), (pi, 0, 0))
_check_close(quaternion_from_euler_angles(pi, 0, 0), (0, 0, 0, 1))
_check_close(quaternion_to_euler_angles(0, 0, 0.5*sqrt(2), 0.5*sqrt(2)), (pi, 0, pi/2))
_check_close(quaternion_from_euler_angles(pi, 0, pi/2), (0, 0, 0.5*sqrt(2), 0.5*sqrt(2)))
_check_close(quaternion_to_euler_angles(0, 0.5*sqrt(2), 0, 0.5*sqrt(2)), (0, -pi/2, 0))
_check_close(quaternion_to_euler_angles(0.5*sqrt(2), 0,-0.5*sqrt(2), 0), (0, -pi/2, 0))
_check_close(quaternion_from_euler_angles(0, -pi/2, 0), (0.5*sqrt(2), 0, -0.5*sqrt(2), 0))
_check_close(quaternion_to_euler_angles(0, 1, 1, 0), (pi/2, 0, pi)) #Not normalised
_check_close(quaternion_to_euler_angles(0, 0.5*sqrt(2), 0.5*sqrt(2), 0), (pi/2, 0, pi))
_check_close(quaternion_from_euler_angles(pi/2, 0, pi), (0, 0.5*sqrt(2), 0.5*sqrt(2), 0))
#w, x, y, z = quaternion_from_euler_angles(pi, 0, pi)
#print("quarternion (%0.2f, %0.2f, %0.2f, %0.2f) magnitude %0.2f" % (w, x, y, z, sqrt(w*w + x*x + y*y + z*z)))
def quaternion_multiply(a, b):
a_w, a_x, a_y, a_z = a
b_w, b_x, b_y, b_z = b
return (a_w*b_w - a_x*b_x - a_y*b_y - a_z*b_z,
a_w*b_x + a_x*b_w + a_y*b_z - a_z*b_y,
a_w*b_y - a_x*b_z + a_y*b_w + a_z*b_x,
a_w*b_z + a_x*b_y - a_y*b_x + a_z*b_w)
_check_close(quaternion_multiply((0, 0, 0, 1), (0, 0, 1, 0)), (0, -1, 0, 0))
def quaternion_scalar_multiply(q, s):
w, x, y, z = q
return (w*s, x*s, y*s, z*q)
|
import simplyencrypt as en
keys = en.new_random_password(1024)
line = "Let's encrypt this line"
print("Unenceypted line: " + line)
cipher_text = en.encrypt(keys, line)
try:
print("Encrypted line: " + str(cipher_text))
except:
print("Encrypted: ", end=" ")
for c in cipher_text:
c = str(ord(c))
print(c, end=", ")
print("\n\nPrinted Encrypted line in termes of ord values cause \
the values couldn't be printed as utf-8 chars")
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for services relating to typed objects."""
from core.domain import interaction_registry
from core.domain import obj_services
from core.tests import test_utils
from extensions.objects.models import objects
class ObjectRegistryUnitTests(test_utils.GenericTestBase):
"""Test the Registry class in obj_services."""
def test_get_object_class_by_type_method(self):
"""Tests the normal behavior of get_object_class_by_type()."""
self.assertEqual(
obj_services.Registry.get_object_class_by_type('Int').__name__,
'Int')
def test_fake_class_is_not_gettable(self):
"""Tests that trying to retrieve a fake class raises an error."""
with self.assertRaisesRegexp(TypeError, 'not a valid object class'):
obj_services.Registry.get_object_class_by_type('FakeClass')
def test_base_object_is_not_gettable(self):
"""Tests that BaseObject exists and cannot be set as an obj_type."""
assert getattr(objects, 'BaseObject')
with self.assertRaisesRegexp(TypeError, 'not a valid object class'):
obj_services.Registry.get_object_class_by_type('BaseObject')
class ObjectJsFilenamesUnitTests(test_utils.GenericTestBase):
"""Test that all object JS templates are for the objects themselves.
The frontend code currently gets the JS template by constructing it from
the object type. This will lead to errors if an object which is a subclass
of another object uses the latter's JS template. Hence this test.
"""
def test_object_js_filenames(self):
# Show full failure messages for this test (both the system-generated
# one and the developer-specified one).
self.longMessage = True
all_object_classes = obj_services.Registry.get_all_object_classes()
for obj_type, obj_cls in all_object_classes.iteritems():
if obj_cls.has_editor_js_template():
template = obj_cls.get_editor_js_template()
directive_name = '%sEditor' % obj_type
normalized_directive_name = (
directive_name[0].lower() + directive_name[1:])
self.assertIn(
'oppia.directive(\'%s\'' % normalized_directive_name,
template, msg='(%s)' % obj_type)
class ObjectDefaultValuesUnitTests(test_utils.GenericTestBase):
"""Test that the default value of objects recorded in
extensions/objects/object_defaults.json correspond to
the defined default values in objects.py for all objects that
are used in rules.
"""
def test_all_rule_input_fields_have_default_values(self):
"""Checks that all rule input fields have a default value, and this
is provided in get_default_values().
"""
interactions = interaction_registry.Registry.get_all_interactions()
object_default_vals = obj_services.get_default_object_values()
for interaction in interactions:
for rule_name in interaction.rules_dict:
param_list = interaction.get_rule_param_list(rule_name)
for (_, param_obj_type) in param_list:
param_obj_type_name = param_obj_type.__name__
default_value = param_obj_type.default_value
self.assertIsNotNone(
default_value, msg=(
'No default value specified for object class %s.' %
param_obj_type_name))
self.assertIn(param_obj_type_name, object_default_vals)
self.assertEqual(
default_value, object_default_vals[param_obj_type_name])
def test_get_object_default_values_is_valid(self):
"""Checks that the default values provided by get_default_values()
correspond to the ones defined in objects.py.
"""
object_default_vals = obj_services.get_default_object_values()
all_object_classes = obj_services.Registry.get_all_object_classes()
for (obj_type, default_value) in object_default_vals.iteritems():
self.assertIn(obj_type, all_object_classes)
self.assertEqual(
default_value, all_object_classes[obj_type].default_value)
|
from .base import *
# security enforcement
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SECURE_SSL_REDIRECT = env('DJANGO_SECURE_SSL_REDIRECT', True)
SESSION_COOKIE_SECURE = env('DJANGO_SESSION_COOKIE_SECURE', True)
# uncomment for cross-domain cookies
# SESSION_COOKIE_DOMAIN = '.{}'.format(env('DJANGO_ALLOWED_HOSTS'))
# emails
DEFAULT_EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', EMAIL_BACKEND_DEFAULT)
EMAIL_HOST = env('DJANGO_EMAIL_HOST')
EMAIL_PORT = env('DJANGO_EMAIL_HOST_PORT')
EMAIL_HOST_USER = env('DJANGO_EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = env('DJANGO_EMAIL_HOST_PASSWORD')
EMAIL_USE_TLS = env('DJANGO_EMAIL_USE_TLS', True)
# logging
LOGGING['loggers'] = {
'django': {
'handlers': ['console', 'syslog'],
'level': env('DJANGO_LOG_LEVEL', 'INFO'),
},
'threejs': {
'handlers': ['logstash', 'syslog'],
'level': env('THREEJS_LOG_LEVEL', 'INFO'),
},
}
|
import numpy as np
import scipy.sparse as sp
import networkx as nx
from sklearn.gaussian_process.kernels import RBF
from sklearn.preprocessing import StandardScaler
# Cora
def encode_onehot(labels):
classes = set(labels)
classes_dict = {c: np.identity(len(classes))[i, :] for i, c in
enumerate(classes)}
labels_onehot = np.array(list(map(classes_dict.get, labels)),
dtype=np.int32)
return labels_onehot
def load_cora():
# cora dataset and loading function from https://github.com/tkipf/pygcn
idx_features_labels = np.genfromtxt("cora/cora.content", dtype=np.dtype(str))
labels = encode_onehot(idx_features_labels[:, -1])
labels = np.array([np.argmax(label) + 1 for label in labels])
# build graph
idx = np.array(idx_features_labels[:, 0], dtype=np.int32)
idx_map = {j: i for i, j in enumerate(idx)}
edges_unordered = np.genfromtxt("cora/cora.cites", dtype=np.int32)
edges = np.array(list(map(idx_map.get, edges_unordered.flatten())), dtype=np.int32).reshape(edges_unordered.shape)
adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])), shape=(labels.shape[0], labels.shape[0]), dtype=np.float32)
# build symmetric adjacency matrix
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
idx_train = range(140)
idx_val = range(200, 500)
idx_test = range(500, 1500)
return adj, labels
def load_graph_preprocessed(dataset, version):
return np.load(f'{dataset}/{dataset}_v{version}_graph_preprocessed.npy', allow_pickle=True).item()
def load_labels_preprocessed(dataset, version):
return np.load(f'{dataset}/{dataset}_v{version}_labels_preprocessed.npy')
def load_embeddings(dataset, version, matrix_id):
return np.load(f'{dataset}/embeddings/{matrix_id}_{dataset}_v{version}.npy')
def check_symmetric(a, rtol=1e-05, atol=1e-08):
return np.allclose(a, a.T, rtol=rtol, atol=atol)
# Stanford Email dataset: http://snap.stanford.edu/data/email-Eu-core.html
def read_stanford_graph(path):
adj = nx.to_numpy_array(nx.read_edgelist(path, nodetype=int))
adj = sp.coo_matrix(adj)
# build symmetric adjacency matrix
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
return adj
def read_stanford_label(path, graph_size):
labels = [0] * graph_size
with open(path) as _file:
for line in _file.readlines():
labels[int(line.split()[0])] = int(line.split()[1])
return np.array(labels)
# Ssets: http://cs.joensuu.fi/sipu/datasets/
def read_points(path):
points = []
with open(path) as _file:
for line in _file.readlines():
p = line.strip().split()
points.append(list(map(int, p)))
return np.array(points)
def read_labels(path):
labels = []
with open(path) as _file:
for line in _file.readlines():
labels.append(int(line.strip()))
return np.array(labels)
def load_graph(dataset, version):
if dataset == 'cora':
adj, labels = load_cora()
# select largest connected
G = nx.from_numpy_array(np.array(adj.todense()))
c = list(nx.connected_components(G))[0]
adj = adj[list(c), :][:, list(c)]
labels = labels[list(c)]
np.save(f'{dataset}/{dataset}_v{version}_graph_preprocessed.npy', adj)
np.save(f'{dataset}/{dataset}_v{version}_labels_preprocessed.npy', labels)
return adj, labels
elif dataset == 'email':
adj = read_stanford_graph('email/email-Eu-core.txt')
labels = read_stanford_label('email/email-Eu-core-department-labels.txt', adj.shape[0])
# select largest connected
G = nx.from_numpy_array(np.array(adj.todense()))
c = list(nx.connected_components(G))[0]
adj = adj[list(c), :][:, list(c)]
labels = labels[list(c)]
np.save(f'{dataset}/{dataset}_v{version}_graph_preprocessed.npy', adj)
np.save(f'{dataset}/{dataset}_v{version}_labels_preprocessed.npy', labels)
return sp.coo_matrix(adj), labels
elif dataset == 'ssets':
points = read_points(f'ssets/s{version}.txt')
labels = read_labels(f'ssets/s{version}-label.pa')
points = StandardScaler().fit_transform(points)
rbf = RBF()
adj = rbf(points)
np.save(f'{dataset}/{dataset}_v{version}_graph_preprocessed.npy', adj)
np.save(f'{dataset}/{dataset}_v{version}_labels_preprocessed.npy', labels)
return adj, labels
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import logging
from time import time
#from CIM15 import nsPrefix, nsURI
from CIM16 import nsPrefix, nsURI
from PyCIM.SimpleXMLWriter import XMLWriter
nsPrefixRDF = "rdf"
nsRDF = "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
logger = logging.getLogger(__name__)
def cimwrite(d, source, encoding="utf-8"):
"""CIM RDF/XML serializer.
@type d: dict
@param d: Map of URIs to CIM objects.
@type source: File or file-like object.
@param source: This object must implement a C{write} method
that takes an 8-bit string.
@type encoding: string
@param encoding: Character encoding defaults to "utf-8", but can also
be set to "us-ascii".
@rtype: bool
@return: Write success.
"""
# Start the clock
t0 = time()
w = XMLWriter(source, encoding)
# Write the XML declaration.
w.declaration()
# TODO print the econding in the header tag
# TODO add carry return into the XMLWriter
# Add a '#' suffix to the CIM namespace URI if not present.
nsCIM = nsURI if nsURI[-1] == "#" else nsURI + "#"
# TODO Add ns for
# xmlns:entsoe="http://entsoe.eu/CIM/SchemaExtension/3/1#"
# xmlns:cim="http://iec.ch/TC57/2013/CIM-schema-cim16#"
# xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
# Start the root RDF element and declare namespaces.
xmlns = {u"xmlns:%s" % nsPrefixRDF: nsRDF, u"xmlns:%s" % nsPrefix: nsCIM}
rdf = w.start(u"%s:RDF" % nsPrefixRDF, xmlns)
# Iterate over all UUID, CIM object pairs in the given dictionary.
for uuid, obj in d.items():
w.start(u"%s:%s" % (nsPrefix, obj.__class__.__name__),
{u"%s:ID" % nsPrefixRDF: obj.UUID})
mro = obj.__class__.mro()
mro.reverse()
# Serialise attributes.
for klass in mro[2:]: # skip 'object' and 'Element'
attrs = [a for a in klass._attrs if a not in klass._enums]
for attr in attrs:
val = getattr(obj, attr)
if val != klass._defaults[attr]:
w.element(u"%s:%s.%s" % (nsPrefix, klass.__name__, attr),
str(val))
# Serialise enumeration data-types.
for klass in mro[2:]: # skip 'object' and 'Element'
enums = [a for a in klass._attrs if a in klass._enums]
for enum in enums:
val = getattr(obj, enum)
dt = klass._enums[enum]
w.element(u"%s:%s.%s" % (nsPrefix, klass.__name__, enum),
attrib={u"%s:resource" % nsPrefixRDF:
u"%s%s.%s" % (nsCIM, dt, val)})
# Serialise references.
for klass in mro[2:]: # skip 'object' and 'Element'
# FIXME: serialise 'many' references.
refs = [r for r in klass._refs if r not in klass._many_refs]
for ref in refs:
val = getattr(obj, ref)
if val is not None:
w.element(u"%s:%s.%s" % (nsPrefix, klass.__name__, ref),
attrib={u"%s:resource" % nsPrefixRDF:
u"#%s" % val.UUID})
w.end()
# Close the root RDF element.
w.close(rdf)
# Flush the output stream.
w.flush()
logger.info("%d CIM objects serialised in %.2fs.", len(d), time() - t0)
if __name__ == "__main__":
from RDFXMLReader import cimread
from PrettyPrintXML import xmlpp
logging.basicConfig(level=logging.INFO)
d = cimread("/Users/fran_jo/Desktop/PhD_CODE/cim2modelica/res/network/ieee_9bus_cim16_basic.xml")
# d = cimread("Test/Data/ENTSOE_16_BE_EQ.xml")
tmp = "/Users/fran_jo/Desktop/PhD_CODE/cim2modelica/res/network/ieee_9bus_cim16_after.xml"
cimwrite(d, tmp)
print(xmlpp(tmp))
|
# -*- coding: utf-8 -*-
import sys
import cv2
import glob
import os
from matplotlib.pyplot import imread
from tqdm import tqdm
import numpy as np
import math
from multiprocessing import Pool
import matplotlib.pyplot as plt
import argparse
args = argparse.ArgumentParser(description='the option of the evaluation')
args.add_argument('--test_dir', type=str, default='', help='enhancement dir')
args.add_argument('--gt_dir', type=str, default='', help='gt dir')
args.add_argument('--mask_dir', type=str, default='', help='mask dir')
args = args.parse_args()
def psnr(img1, img2):
mse = np.mean( (img1/255. - img2/255.) ** 2 )
if mse < 1.0e-10:
return 100
PIXEL_MAX = 1
return 20 * math.log10(PIXEL_MAX / math.sqrt(mse))
def ssim(img1, img2):
C1 = (0.01 * 255)**2
C2 = (0.03 * 255)**2
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
kernel = cv2.getGaussianKernel(11, 1.5)
window = np.outer(kernel, kernel.transpose())
mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid
mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
mu1_sq = mu1**2
mu2_sq = mu2**2
mu1_mu2 = mu1 * mu2
sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
(sigma1_sq + sigma2_sq + C2))
return ssim_map.mean()
gt_list = sorted(glob.glob(os.path.join(args.gt_dir, '*')))
image_list = sorted(glob.glob(os.path.join(args.test_dir, '*')))
def run(source_path, target_path, mask_path=None):
source = imread(source_path).copy()
if mask_path is not None:
mask = imread(mask_path)
mask = cv2.resize(mask, (512, 512), interpolation=cv2.INTER_NEAREST)
mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2RGB)
target = imread(target_path).copy()
source = cv2.resize(source, (512, 512), interpolation=cv2.INTER_NEAREST)
if mask_path is not None:
source[mask == 0] = 0
target[mask == 0] = 0
target = cv2.resize(target, (512, 512), interpolation=cv2.INTER_NEAREST)
return psnr(source, target), ssim(source, target)
psnr_list = []
ssim_list = []
pool = Pool(processes=32)
result = []
for image_path in tqdm(image_list):
try:
_, image_id = os.path.split(image_path)
if args.mask_dir != '':
mask_path = os.path.join(args.mask_dir, image_id.split('.')[0] + '_mask.gif')
else:
mask_path = None
image_id_split = image_id.split('_')[:-1]
image_name = ''
for idx, word in enumerate(image_id_split):
image_name += str(word)
if idx != len(image_id_split) - 1:
image_name += '_'
image_name += '.jpeg'
gt_path = os.path.join(args.gt_dir, image_name)
result.append(pool.apply_async(run, (image_path, gt_path, mask_path)))
except:
pass
pool.close()
for res in result:
psnr, ssim = res.get()
psnr_list.append(psnr)
ssim_list.append(ssim)
p_m = np.mean(psnr_list)
psnr_diff = np.std(psnr_list)
s_m = np.mean(ssim_list)
ssim_diff = np.std(ssim_list)
print('PSNR : {} +- {}'.format(p_m, psnr_diff))
print('SSIM : {} +- {}'.format(s_m, ssim_diff))
|
import json
import os
import shutil
from pathlib import Path
from typing import Callable
from github import Github
from .deck_exporter import DeckExporter
from ..anki.adapters.anki_deck import AnkiDeck
from ..representation import deck_initializer
from ..representation.deck import Deck
from ..utils.constants import DECK_FILE_NAME, DECK_FILE_EXTENSION, MEDIA_SUBDIRECTORY_NAME
from ..utils.filesystem.name_sanitizer import sanitize_anki_deck_name
from .note_sorter import NoteSorter
from ..config.config_settings import ConfigSettings
from ..utils.notifier import AnkiModalNotifier, Notifier
class AnkiJsonExporter(DeckExporter):
def __init__(self, collection,
config: ConfigSettings,
deck_name_sanitizer: Callable[[str], str] = sanitize_anki_deck_name,
deck_file_name: str = DECK_FILE_NAME):
self.config = config
self.collection = collection
self.last_exported_count = 0
self.deck_name_sanitizer = deck_name_sanitizer
self.deck_file_name = deck_file_name
self.note_sorter = NoteSorter(config)
def export_to_directory(self, deck: AnkiDeck, output_dir=Path("."), copy_media=True, create_deck_subdirectory=True) -> Path:
deck_directory = output_dir
if create_deck_subdirectory:
deck_directory = output_dir.joinpath(self.deck_name_sanitizer(deck.name))
deck_directory.mkdir(parents=True, exist_ok=True)
deck = deck_initializer.from_collection(self.collection, deck.name)
deck.notes = self.note_sorter.sort_notes(deck.notes)
self.last_exported_count = deck.get_note_count()
deck_filename = deck_directory.joinpath(self.deck_file_name).with_suffix(DECK_FILE_EXTENSION)
with deck_filename.open(mode='w', encoding="utf8") as deck_file:
deck_file.write(json.dumps(deck,
default=Deck.default_json,
sort_keys=True,
indent=4,
ensure_ascii=False))
self._save_changes(deck)
if copy_media:
self._copy_media(deck, deck_directory)
return deck_directory
def export_to_github(self, deck: AnkiDeck, user, pass, repo, copy_media=True, create_deck_subdirectory=True, notifier=None):
"""
This utility function directly uploads an AnkiDeck to Github in the JSON format.
To authorize it, a username and password must be supplied (note: password should be a Github
personal access token from https://github.com/settings/tokens - don't try it using your
actual username and password as that would be highly insecure and against best practices.
Note: if a file already exists on the repo at the location determined, it will be updated.
"""
deck_directory = ""
if create_deck_subdirectory:
deck_directory = f"{self.deck_name_sanitizer(deck.name)}/"
filename = deck_directory + self.deck_file_name + DECK_FILE_EXTENSION
deck = deck_initializer.from_collection(self.collection, deck.name)
deck.notes = self.note_sorter.sort_notes(deck.notes)
self.last_exported_count = deck.get_note_count()
g = Github(user, pass)
try:
gh_user = g.get_user()
except:
return notifier.warning("Authentication to Github failed", "Authenticating with Github failed. Please check that "
"both your username and password are correct. Remember: don't use your "
"real Github login password, create a personal access token (https://git.io/token) "
"and use that as the password.")
# We find out if the file exists so we can replace it
# Code snippet from https://stackoverflow.com/a/63445581, CC-BY-SA
try:
repo = gh_user.get_repo(GITHUB_REPO)
except:
return notifier.warning("Unable to find Github repository", "Unable to find your Github repository. Make sure you've created one first: https://repo.new")
all_files = []
contents = repo.get_contents("")
while contents:
file_content = contents.pop(0)
if file_content.type == "dir":
contents.extend(repo.get_contents(file_content.path))
else:
file = file_content
all_files.append(str(file).replace('ContentFile(path="','').replace('")',''))
try:
if filename in all_files:
contents = repo.get_contents(filename)
new_contents = json.dumps(deck, default=Deck.default_json, sort_keys=True, indent=4, ensure_ascii=False)
repo.update_file(contents.path, "Automated update from CrowdAnki", new_contents, contents.sha, branch = "main")
else:
new_contents = json.dumps(deck, default=Deck.default_json, sort_keys=True, indent=4, ensure_ascii=False)
repo.create_file(filename, "Automated upload from CrowdAnki", new_contents, branch="main")
except Exception as e:
return notifier.warning("Unknown error when uploading file", "Please report this error at https://git.io/JCUKl.\n\n" + str(e))
# Not sure what to return if successful
return True
def _save_changes(self, deck, is_export_child=False):
"""Save updates that were made during the export. E.g. UUID fields
It saves decks, deck configurations and models.
is_export_child refers to whether this deck is a child for the
_purposes of the current export operation_. For instance, if
we're exporting or snapshotting a specific subdeck, then it's
considered the "parent" here. We need the argument to avoid
duplicately saving deck configs and note models.
"""
self.collection.decks.save(deck.anki_dict)
for child_deck in deck.children:
self._save_changes(child_deck, is_export_child=True)
if not is_export_child:
for deck_config in deck.metadata.deck_configs.values():
self.collection.decks.save(deck_config.anki_dict)
for model in deck.metadata.models.values():
self.collection.models.save(model.anki_dict)
# Notes?
def _copy_media(self, deck, deck_directory):
media_directory = deck_directory.joinpath(MEDIA_SUBDIRECTORY_NAME)
media_directory.mkdir(parents=True, exist_ok=True)
for file_src in deck.get_media_file_list():
try:
shutil.copy(os.path.join(self.collection.media.dir(), file_src),
str(media_directory.resolve()))
except IOError as ioerror:
print("Failed to copy a file {}. Full error: {}".format(file_src, ioerror))
|
s = float(input('\033[1;30mDigite seu salário: R$'))
a = s+(s*15/100)
print(f'\033[1;32mSeu novo salário com um aumento de 15% vai ficar \033[36mR${a:.2f}')
|
#!/usr/bin/env python3
# coding: utf-8
"""
Utility which help preparing data in view of parallel or distributed computing, in a dask-oriented view.
Usage:
daskutils.py partition --config-file=<cf> --nb-partitions=<p> [--log-file=<f> --verbose]
Options:
--config-file=<cf> json configuration dict specifying various arguments
"""
import logging
import docopt
import os
import sys
import json
import bz2
from dask.diagnostics import ProgressBar
import dask.bag as db
import numpy as np
from impresso_commons.utils import init_logger
from impresso_commons.utils import Timer, user_confirmation
from impresso_commons.path.path_s3 import s3_filter_archives
from impresso_commons.utils.s3 import get_bucket, read_jsonlines, readtext_jsonlines
from impresso_commons.utils.s3 import IMPRESSO_STORAGEOPT
from impresso_commons.utils.config_loader import PartitionerConfig
__author__ = "maudehrmann"
logger = logging.getLogger(__name__)
def partitioner(bag, path, nbpart):
"""Partition a bag into n partitions and write each partition in a file"""
grouped_items = bag.groupby(lambda x: np.random.randint(500), npartitions=nbpart)
items = grouped_items.map(lambda x: x[1]).flatten()
path = os.path.join(path, "*.jsonl.bz2")
with ProgressBar():
items.to_textfiles(path)
def create_even_partitions(bucket,
config_newspapers,
output_dir,
local_fs=False,
keep_full=False,
nb_partition=500):
"""Convert yearly bz2 archives to even bz2 archives, i.e. partitions.
Enables efficient (distributed) processing, bypassing the size discrepancies of newspaper archives.
N.B.: in resulting partitions articles are all shuffled.
Warning: consider well the config_newspapers as it decides what will be in the partitions and loaded in memory.
@param bucket: name of the bucket where the files to partition are
@param config_newspapers: json dict specifying the sources to consider (name(s) of newspaper(s) and year span(s))
@param output_dir: classic FS repository where to write the produced partitions
@param local_fs:
@param keep_full: whether to filter out metadata or not (i.e. keeping only text and leaving out coordinates)
@param nb_partition: number of partitions
@return: None
"""
t = Timer()
# set the output
if local_fs:
os.makedirs(output_dir, exist_ok=True)
path = os.path.join(output_dir, "*.jsonl.bz2")
else:
path = f'{output_dir}/*.jsonl.gz'
logger.info(f"Will write partitions to {path}")
# collect (yearly) keys & load in bag
bz2_keys = s3_filter_archives(bucket.name, config=config_newspapers)
bag_bz2_keys = db.from_sequence(bz2_keys)
# read and filter lines (1 elem = list of lines, or articles, from a key)
if keep_full is False:
bag_items = bag_bz2_keys.map(readtext_jsonlines, bucket_name=bucket.name).flatten()
else:
bag_items = bag_bz2_keys.map(read_jsonlines, bucket_name=bucket.name).flatten()
# repartition evenly
grouped_items = bag_items.groupby(lambda x: np.random.randint(1000), npartitions=nb_partition)
items = grouped_items.map(lambda x: x[1]).flatten()
# write partitions
with ProgressBar():
# items.compute()
# if local_fs:
# items.to_textfiles(path)
# else:
items.to_textfiles(path,
storage_options=IMPRESSO_STORAGEOPT,
compute=True)
logger.info(f"Partitioning done in {t.stop()}.")
def main(args):
# get args
config_file = args["--config-file"]
log_file = args["--log-file"]
nb_partitions = args["--nb-partitions"]
log_level = logging.DEBUG if args["--verbose"] else logging.INFO
# init logger
global logger
logger = init_logger(logging.getLogger(), log_level, log_file)
logger.info(f"CLI arguments received: {args}")
config = PartitionerConfig.from_json(config_file)
bucket = get_bucket(config.bucket_name, create=False)
logger.info(f"Retrieved bucket: {bucket.name}")
if args["partition"] is True:
create_even_partitions(bucket,
config.newspapers,
config.output_dir,
local_fs=config.local_fs,
keep_full=config.keep_full,
nb_partition=int(nb_partitions))
if __name__ == "__main__":
arguments = docopt.docopt(__doc__)
main(arguments)
|
#! /usr/bin/python3
"""Extracts the reghdfe sections from the output logs as markdown."""
import re
import sys
def ExtractReghdfes(lines):
section = []
in_section = False
for line in lines:
# Stata logs every command as ". some_command" so lines that start with
# ". " indicate a new section. Close off the old section.
if in_section and re.search("^\. ", line):
yield section
section = []
in_section = False
# We only want the reghdfe outputs.
if re.search("^\. reghdfe", line):
in_section = True
if in_section:
section.append(line)
def main(argv):
for fn in argv[1:]:
print(f"# {fn}\n")
for section in ExtractReghdfes(open(fn)):
print("```")
for line in section:
sys.stdout.write(line)
print("```")
if __name__ == '__main__':
main(sys.argv)
|
#!/usr/bin/python
import grizzly.numpy_weld as npw
import numpy as np
import unittest
class NumPyWeldTestMethods(unittest.TestCase):
# TODO: Add more tests here
def test_div(self):
input = npw.NumpyArrayWeld(
np.array([3, 6, 12, 15, 14], dtype=np.int32), npw.WeldInt())
self.assertItemsEqual([1, 2, 4, 5, 4], (input / 3).evaluate(False))
def test_sum(self):
input = npw.NumpyArrayWeld(
np.array([1, 2, 3, 4, 5], dtype=np.int32), npw.WeldInt())
self.assertEqual(15, input.sum().evaluate(False))
def test_dot(self):
matrix = np.array([[1, 2, 3], [2, 3, 4], [3, 4, 5]], dtype=np.int32)
vector = np.array([0, 1, 2], dtype=np.int32)
self.assertItemsEqual([8, 11, 14], npw.dot(
matrix, vector).evaluate(False))
if __name__ == '__main__':
unittest.main()
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
import math
class Linkedin:
def __init__(self):
linkprofile = webdriver.FirefoxProfile('')
self.driver = webdriver.Firefox(linkprofile)
#self.driver.get('https://www.linkedin.com/mynetwork/')
time.sleep(10)
def Link_job_apply(self):
count_application = 0
count_job = 0
jobs_per_page = 25
easy_apply = "?f_AL=true"
location = "Poland" # "Worldwide"
keywords = ["node", "react", "angular",
"javascript", "python", "java", "programming"]
for indexpag in range(len(keywords)):
self.driver.get(
'https://www.linkedin.com/jobs/search/' + easy_apply + '&keywords=' + keywords[indexpag] + "&" + location)
numofjobs = self.driver.find_element_by_xpath(
'//small').text # get number of results
space_ind = numofjobs.index(' ')
total_jobs = (numofjobs[0:space_ind])
total_jobs_int = int(total_jobs.replace(',', ''))
number_of_pages = math.ceil(total_jobs_int/jobs_per_page)
print(number_of_pages)
for i in range(number_of_pages):
cons_page_mult = 25 * i
url = 'https://www.linkedin.com/jobs/search/' + easy_apply + \
'&keywords=' + keywords[indexpag] + \
"&" + location + "&start=" + str(cons_page_mult)
self.driver.get(url)
time.sleep(10)
links = self.driver.find_elements_by_xpath(
'//div[@data-job-id]') # needs to be scrolled down
IDs = []
for link in links:
temp = link.get_attribute("data-job-id")
jobID = temp.split(":")[-1]
IDs.append(int(jobID))
IDs = set(IDs)
jobIDs = [x for x in IDs]
for jobID in jobIDs:
job_page = 'https://www.linkedin.com/jobs/view/' + \
str(jobID)
self.driver.get(job_page)
count_job += 1
time.sleep(5)
try:
button = self.driver.find_elements_by_xpath(
'//button[contains(@class, "jobs-apply")]/span[1]')
# if button[0].text in "Easy Apply" :
EasyApplyButton = button[0]
except:
EasyApplyButton = False
button = EasyApplyButton
if button is not False:
string_easy = "* has Easy Apply Button"
button.click()
time.sleep(2)
try:
self.driver.find_element_by_css_selector(
"button[aria-label='Submit application']").click()
time.sleep(3)
count_application += 1
print("* Just Applied to this job!")
except:
try:
button = self.driver.find_element_by_css_selector(
"button[aria-label='Continue to next step']").click()
time.sleep(3)
percen = self.driver.find_element_by_xpath("/html/body/div[3]/div/div/div[2]/div/div/span").text
percen_numer = int(percen[0:percen.index("%")])
if int(percen_numer) < 25:
print(
"*More than 5 pages,wont apply to this job! Link: " +job_page)
elif int(percen_numer) < 30:
try:
self.driver.find_element_by_css_selector(
"button[aria-label='Continue to next step']").click()
time.sleep(3)
self.driver.find_element_by_css_selector(
"button[aria-label='Continue to next step']").click()
time.sleep(3)
self.driver.find_element_by_css_selector(
"button[aria-label='Review your application']").click()
time.sleep(3)
self.driver.find_element_by_css_selector(
"button[aria-label='Submit application']").click()
count_application += 1
print("* Just Applied to this job!")
except:
print(
"*4 Pages,wont apply to this job! Extra info needed. Link: " +job_page)
elif int(percen_numer) < 40:
try:
self.driver.find_element_by_css_selector(
"button[aria-label='Continue to next step']").click()
time.sleep(3)
self.driver.find_element_by_css_selector(
"button[aria-label='Review your application']").click()
time.sleep(3)
self.driver.find_element_by_css_selector(
"button[aria-label='Submit application']").click()
count_application += 1
print("* Just Applied to this job!")
except:
print(
"*3 Pages,wont apply to this job! Extra info needed. Link: " +job_page)
elif int(percen_numer) < 60:
try:
self.driver.find_element_by_css_selector(
"button[aria-label='Review your application']").click()
time.sleep(3)
self.driver.find_element_by_css_selector(
"button[aria-label='Submit application']").click()
count_application += 1
print("* Just Applied to this job!")
except:
print(
"* 2 Pages,wont apply to this job! Unknown. Link: " +job_page)
except:
print("* Cannot apply to this job!!")
else:
print("* Already applied!")
time.sleep(2)
print("Category: " + keywords + " ,applied: " + str(count_application) +
" jobs out of " + str(count_job) + ".")
start_time = time.time()
ed = Linkedin()
ed.Link_job_apply()
end = time.time()
print("---Took: " + str(round((time.time() - start_time)/60)) + " minute(s).")
|
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.models import User
from corehq.apps.sso.models import IdentityProvider, AuthenticatedEmailDomain
from corehq.apps.sso.utils.user_helpers import get_email_domain_from_username
class SsoBackend(ModelBackend):
"""
Authenticates against an IdentityProvider and SAML2 session data.
"""
def authenticate(self, request, username, idp_slug, is_handshake_successful):
if not (request and username and idp_slug and is_handshake_successful):
return None
try:
identity_provider = IdentityProvider.objects.get(slug=idp_slug)
except IdentityProvider.DoesNotExist:
# not sure how we would even get here, but just in case
request.sso_login_error = f"Identity Provider {idp_slug} does not exist."
return None
if not identity_provider.is_active:
request.sso_login_error = f"This Identity Provider {idp_slug} is not active."
return None
email_domain = get_email_domain_from_username(username)
if not email_domain:
# not a valid username
request.sso_login_error = f"Username {username} is not valid."
return None
if not AuthenticatedEmailDomain.objects.filter(
email_domain=email_domain, identity_provider=identity_provider
).exists():
# if this user's email domain is not authorized by this identity
# do not continue with authentication
request.sso_login_error = (
f"The Email Domain {email_domain} is not allowed to "
f"authenticate with this Identity Provider ({idp_slug})."
)
return None
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
# todo handle user creation based on information from request/session
# do this prior to handling the invite scenario and new user scenario
request.sso_login_error = f"User {username} does not exist."
return None
request.sso_login_error = None
# todo what happens with 2FA required here?
return user
|
from keras.models import Sequential
from keras.layers import Convolution2D, MaxPooling2D, Dropout
from keras.layers import Flatten, Dense
size_of_image = 96
model = Sequential()
model.add(Convolution2D(8, (3, 3), activation='relu', input_shape=(size_of_image,size_of_image,1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(16, (2,2), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(32, (2,2), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(500))
#model.add(Dropout(0.2))
model.add(Dense(30))
# Summarize the model
model.summary()
|
# vim:tw=50
"""Classes are Types
Let's move on to **classes**. We've been using
them already without directly talking about it, so
let's get down to what they really are.
In general, you can think of a class as a
**type**. This is, of course, merely a useful
fiction because it hides subtlety, but it is still
a great way to think about it, because classes
allow you to create a bunch of things that are the
same _kind_ or _type_ of thing. We'll learn how to
make our own types in the coming slides.
Calling a class makes a new **instance** of it.
If you think of a class as a blueprint for, say, a
house, an instance is the actual house you build
by following the plan.
Some basic properties of classes are demonstrated
in the example code by looking at |ValueError|,
which is a class we've seen and used before.
You've seen a lot of other classes already, such
as |list|, |tuple|, |dict|, |int|, |float|, and
others. We've been referring to them as
"callables", because they are, but that's because
_all_ classes are callable: calling one creates an
instance.
"""
# What is this type of thing anyway?
print "What's a ValueError class?"
print " ", repr(ValueError)
# Make a new instance of ValueError by calling it.
ex = ValueError("My super informative error message")
# What is this?
# Note how "repr" in this case shows you how to
# make one, which can be really useful.
print "What's a ValueError instance?"
print " ", repr(ex)
print "What (non-special) stuff is inside of it?"
print " " + "\n ".join(x for x in dir(ex) if x[:2] != '__')
# Now, there are various ways of getting at the
# message:
print "args: \t", ex.args
print "message:\t", ex.message
print "str: \t", str(ex)
# But "str" just calls the __str__ method:
print "__str__:\t", ex.__str__()
# And since it has a __str__ method, print can use
# it directly:
print "Bare: \t", ex
|
import torch
import argparse
import os
import numpy as np
from torch.backends import cudnn
from config.config import cfg, cfg_from_file, cfg_from_list
import data.transforms as T
import sys
import pprint
import random
from solver.solver import Solver
from model import segmentation as SegNet
from model.domain_bn import DomainBN
from model.discriminator import FCDiscriminator
import data.datasets as Dataset
from data import utils as data_utils
from data.label_map import get_label_map
import utils.utils as gen_utils
from utils.utils import freeze_BN
from torch.nn.parallel import DistributedDataParallel
#import apex
#from apex.parallel import DistributedDataParallel
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train script.')
parser.add_argument('--weights', dest='weights',
help='initialize with specified model parameters',
default=None, type=str)
parser.add_argument('--resume', dest='resume',
help='initialize with saved solver status',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default=None, type=str)
parser.add_argument('--local_rank', dest='local_rank',
help='optional local rank',
default=0, type=int)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--exp_name', dest='exp_name',
help='the experiment name',
default='exp', type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def get_transform(train, dataset_name):
base_size = cfg.DATA_TRANSFORM.LOADSIZE
crop_size = cfg.DATA_TRANSFORM.CROPSIZE
ignore_label = cfg.DATASET.IGNORE_LABEL
if dataset_name == cfg.DATASET.SOURCE:
input_size = cfg.DATA_TRANSFORM.INPUT_SIZE_S
else:
input_size = cfg.DATA_TRANSFORM.INPUT_SIZE_T
min_size = int((1.0 if train else 1.0) * base_size)
max_size = int((1.3 if train else 1.0) * base_size)
transforms = []
if cfg.DATA_TRANSFORM.RANDOM_RESIZE_AND_CROP:
if train:
transforms.append(T.RandomResize(min_size, max_size))
transforms.append(T.RandomHorizontalFlip(0.5))
transforms.append(T.RandomCrop(crop_size, ignore_label=ignore_label))
else:
transforms.append(T.Resize(cfg.DATA_TRANSFORM.INPUT_SIZE_T, True))
else:
if train:
transforms.append(T.Resize(input_size))
transforms.append(T.RandomHorizontalFlip(0.5))
else:
transforms.append(T.Resize(input_size, True))
mapping = get_label_map(cfg.DATASET.SOURCE, cfg.DATASET.TARGET)
transforms.append(T.LabelRemap(mapping[dataset_name]))
transforms.append(T.ToTensor(cfg.DATASET.IMG_MODE))
if cfg.DATASET.IMG_MODE == "BGR":
mean = (104.00698793, 116.66876762, 122.67891434)
std = (1.0, 1.0, 1.0)
else:
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
transforms.append(T.Normalize(mean, std))
return T.Compose(transforms)
def prepare_data(args):
train_transform_S = get_transform(train=True, dataset_name=cfg.DATASET.SOURCE)
train_transform_T = get_transform(train=True, dataset_name=cfg.DATASET.TARGET)
val_transform = get_transform(train=False, dataset_name=cfg.DATASET.VAL)
train_dataset_S = eval('Dataset.%s'%cfg.DATASET.SOURCE)(
cfg.DATASET.DATAROOT_S,
cfg.DATASET.TRAIN_SPLIT_S,
transform=train_transform_S)
train_dataset_T = eval('Dataset.%s'%cfg.DATASET.TARGET)(
cfg.DATASET.DATAROOT_T,
cfg.DATASET.TRAIN_SPLIT_T,
transform=train_transform_T)
val_dataset = eval('Dataset.%s'%cfg.DATASET.VAL)(
cfg.DATASET.DATAROOT_VAL,
cfg.DATASET.VAL_SPLIT,
transform=val_transform)
# construct dataloaders
train_dataloader_S = data_utils.get_dataloader(
train_dataset_S, cfg.TRAIN.TRAIN_BATCH_SIZE, cfg.NUM_WORKERS,
train=True, distributed=args.distributed,
world_size=gen_utils.get_world_size())
train_dataloader_T = data_utils.get_dataloader(
train_dataset_T, cfg.TRAIN.TRAIN_BATCH_SIZE, cfg.NUM_WORKERS,
train=True, distributed=args.distributed,
world_size=gen_utils.get_world_size())
val_dataloader = data_utils.get_dataloader(
val_dataset, cfg.TRAIN.VAL_BATCH_SIZE, cfg.NUM_WORKERS,
train=False, distributed=args.distributed,
world_size=gen_utils.get_world_size())
dataloaders = {'train_S': train_dataloader_S, \
'train_T': train_dataloader_T, 'val': val_dataloader}
return dataloaders
def init_net_D(args, state_dict=None):
net_D = FCDiscriminator(cfg.DATASET.NUM_CLASSES)
if args.distributed:
net_D = torch.nn.SyncBatchNorm.convert_sync_batchnorm(net_D)
if cfg.MODEL.DOMAIN_BN:
net_D = DomainBN.convert_domain_batchnorm(net_D, num_domains=2)
if state_dict is not None:
try:
net_D.load_state_dict(state_dict)
except:
net_D = DomainBN.convert_domain_batchnorm(net_D, num_domains=2)
net_D.load_state_dict(state_dict)
if cfg.TRAIN.FREEZE_BN:
net_D.apply(freeze_BN)
if torch.cuda.is_available():
net_D.cuda()
if args.distributed:
net_D = DistributedDataParallel(net_D, device_ids=[args.gpu])
else:
net_D = torch.nn.DataParallel(net_D)
return net_D
def train(args):
#seed = 12345
#random.seed(seed)
#np.random.seed(seed)
#torch.random.manual_seed(seed)
# initialize model
model_state_dict = None
model_state_dict_D = None
resume_dict = None
if cfg.RESUME != '':
resume_dict = torch.load(cfg.RESUME, torch.device('cpu'))
model_state_dict = resume_dict['model_state_dict']
elif cfg.WEIGHTS != '':
param_dict = torch.load(cfg.WEIGHTS, torch.device('cpu'))
model_state_dict = param_dict['weights']
model_state_dict_D = param_dict['weights_D'] if 'weights_D' in param_dict else None
net = SegNet.__dict__[cfg.MODEL.NETWORK_NAME](
pretrained=False, pretrained_backbone=False,
num_classes=cfg.DATASET.NUM_CLASSES,
aux_loss=cfg.MODEL.USE_AUX_CLASSIFIER
)
net = gen_utils.load_model(net, './model/resnet101-imagenet.pth', True)
if args.distributed:
net = torch.nn.SyncBatchNorm.convert_sync_batchnorm(net)
#net = apex.parallel.convert_syncbn_model(net)
if cfg.MODEL.DOMAIN_BN:
net = DomainBN.convert_domain_batchnorm(net, num_domains=2)
if model_state_dict is not None:
try:
net.load_state_dict(model_state_dict)
except:
net = DomainBN.convert_domain_batchnorm(net, num_domains=2)
net.load_state_dict(model_state_dict)
if cfg.TRAIN.FREEZE_BN:
net.apply(freeze_BN)
if torch.cuda.is_available():
net.cuda()
if args.distributed:
net = DistributedDataParallel(net, device_ids=[args.gpu])
#net = DistributedDataParallel(net)
else:
net = torch.nn.DataParallel(net)
net_D = init_net_D(args, model_state_dict_D) if cfg.TRAIN.ADV_TRAIN else None
dataloaders = prepare_data(args)
# initialize solver
train_solver = Solver(net, net_D, dataloaders, args.distributed,
resume=resume_dict)
# train
train_solver.solve()
print('Finished!')
if __name__ == '__main__':
cudnn.benchmark = True
args = parse_args()
gen_utils.init_distributed_mode(args)
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
if args.resume is not None:
cfg.RESUME = args.resume
if args.weights is not None:
cfg.WEIGHTS = args.weights
if args.exp_name is not None:
cfg.EXP_NAME = args.exp_name
print('Using config:')
pprint.pprint(cfg)
cfg.SAVE_DIR = os.path.join(cfg.SAVE_DIR, cfg.EXP_NAME)
if not os.path.exists(cfg.SAVE_DIR):
os.makedirs(cfg.SAVE_DIR)
print('Output will be saved to %s.' % cfg.SAVE_DIR)
train(args)
|
import bpy
from collections import OrderedDict
from mathutils import Vector, Matrix
import math
import json
from functools import reduce
import os
def vec2array( _vector ):
return [ _vector.x, _vector.z, -_vector.y ]
def unit(vec):
vec_sq = [x for x in map(lambda x:x*x, vec)]
length = math.sqrt(reduce(lambda x, y: x + y, vec_sq))
return [x for x in map(lambda x: x / length, vec)]
def get_scene_data():
scene_data = OrderedDict()
scene_data['resolutionX'] = bpy.data.scenes['Scene'].render.resolution_x
scene_data['resolutionY'] = bpy.data.scenes['Scene'].render.resolution_y
scene_data['resolutionPercentage'] = bpy.data.scenes['Scene'].render.resolution_percentage
scene_data.update(export_camera())
scene_data.update(export_lights())
scene_data.update(export_shapes())
return scene_data
def export_scene (outdir, filename):
scene_data = get_scene_data()
outfilename = os.path.join(outdir, filename)
f = open(outfilename, "w")
f.write(json.dumps(scene_data, indent=4))
f.close()
def export_camera():
obj_camera = bpy.data.objects["Camera"]
loc_camera = obj_camera.matrix_world.translation
camera_param = OrderedDict()
camera_param['location'] = vec2array(obj_camera.matrix_world.translation)
camera_param['rotation'] = vec2array(obj_camera.rotation_euler)
camera_param['lookat'] = vec2array((obj_camera.matrix_world * Vector( ( 0, 0, -1, 1 ) ) ).xyz)
camera_param['fov'] = bpy.data.cameras['Camera'].angle * 180 / math.pi
camera_param['lens'] = bpy.data.cameras['Camera'].lens
camera_param['sensorWidth'] = bpy.data.cameras['Camera'].sensor_width
camera_param['sensorHeight'] = bpy.data.cameras['Camera'].sensor_height
camera_param['dofDistance'] = bpy.data.cameras['Camera'].dof_distance
camera_param['fStop'] = bpy.data.cameras['Camera'].gpu_dof.fstop
camera_param['up'] = vec2array((obj_camera.matrix_world * Vector( ( 0, 1, 0, 0 ) ) ).xyz)
camera_data = OrderedDict()
camera_data['camera'] = camera_param
return camera_data
def export_shapes():
shapes = OrderedDict()
shapes['shapes'] = []
for obj in bpy.data.objects:
obj_type = obj.type
if obj_type != 'MESH':
continue
obj_data = OrderedDict()
obj_data['name'] = obj.name
if len(obj.material_slots) > 0:
mat = obj.material_slots[0].material
obj_data['material'] = OrderedDict()
obj_data['material']['name'] = mat.name
obj_data['material']['diffuseColor'] = [c for c in mat.diffuse_color]
shapes['shapes'].append(obj_data)
return shapes
def export_lights():
lights = []
for light in bpy.data.lamps:
light_data = OrderedDict()
type_name = str(light.type)
light_data['type'] = type_name
if bpy.data.objects.find(light.name) == -1:
continue
light_obj = bpy.data.objects[light.name]
if type_name == 'POINT':
light_data['position'] = vec2array(light_obj.location)
light_data['color'] = [c for c in light.color]
light_data['energy'] = light.energy
elif type_name == 'SPOT':
position = vec2array(light_obj.location)
light_data['position'] = position
light_data['color'] = [c for c in light.color]
light_data['energy'] = light.energy
lookat = vec2array((light_obj.matrix_world * Vector( ( 0, 0, -1, 1 ) ) ).xyz)
# direction = [x - y for (x, y) in zip(lookat, light_data['location'])]
# light_data['direction'] = direction
light_data['direction'] = unit([d for d in map(lambda x, y: x - y, lookat, position)])
light_data['spotSize'] = light.spot_size
light_data['spotBlend'] = light.spot_blend
elif type_name == 'SUN':
light_data['type'] = 'DIRECTIONAL'
light_data['color'] = [c for c in light.color]
light_data['energy'] = light.energy
lookat = vec2array((light_obj.matrix_world * Vector( ( 0, 0, -1, 1 ) ) ).xyz)
position = vec2array(light_obj.location)
light_data['direction'] = unit([d for d in map(lambda x, y: x - y, lookat, position)])
elif type_name == 'AREA':
light_data['type'] = 'AREA'
position = vec2array(light_obj.location)
light_data['position'] = position
light_data['color'] = [c for c in light.color]
light_data['energy'] = light.energy
lookat = vec2array((light_obj.matrix_world * Vector( ( 0, 0, -1, 1 ) ) ).xyz)
light_data['direction'] = unit([d for d in map(lambda x, y: x - y, lookat, position)])
light_data['rotation'] = vec2array(light_obj.rotation_euler)
light_data['size'] = light.size
lights.append(light_data)
data = OrderedDict()
data['lights'] = lights
return data
|
YACHT = lambda dice: 50 if len(set(dice)) == 1 else 0
ONES = lambda dice: sum(x for x in dice if x == 1)
TWOS = lambda dice: sum(x for x in dice if x == 2)
THREES = lambda dice: sum(x for x in dice if x == 3)
FOURS = lambda dice: sum(x for x in dice if x == 4)
FIVES = lambda dice: sum(x for x in dice if x == 5)
SIXES = lambda dice: sum(x for x in dice if x == 6)
FULL_HOUSE = lambda dice: sum(dice) if len(set(dice)) == 2 and any(dice.count(x) == 3 for x in set(dice)) else 0
FOUR_OF_A_KIND = lambda dice: sum(x * 4 for x in set(dice) if dice.count(x) > 3)
LITTLE_STRAIGHT = lambda dice: 30 if sum(dice) == 15 and len(set(dice)) == 5 else 0
BIG_STRAIGHT = lambda dice: 30 if sum(dice) == 20 and len(set(dice)) == 5 else 0
CHOICE = lambda dice: sum(dice)
def score(dice, category):
if any((not 0 < x < 7) for x in dice):
raise ValueError("Invalid dice {dice}".format())
return category(dice)
|
from pandas import Series
from pandas import DataFrame
inde = ['x1', 'x2', 'x3', 'x4', 'x5']
oneSer = Series(data=[1, 2, 3, 4, 5], index=inde)
twoSer = Series(data=[2, 4, 6, 8, 10], index=inde)
threeSer = Series(data=[1, 3, 5, 7, 9], index=inde)
x1 = DataFrame([oneSer, twoSer, threeSer])
print(x1)
inde2 = ['y1', 'y2', 'y3']
yoneSer = Series(data=[1, 2, 3], index=inde2)
ytwoSer = Series(data=[4, 5, 6], index=inde2)
ythreeSer = Series(data=[7, 8, 9], index=inde2)
yfourSer = Series(data=[10, 11, 12], index=inde2)
yfiveSer = Series(data=[13, 14, 15], index=inde2)
y1 = DataFrame([yoneSer, ytwoSer, ythreeSer, yfourSer, yfiveSer])
print(y1)
for rx in x1.values:
print(rx)
for cy in y1.colums:
print(y1[cy])
for i in y1[cy].index:
print(rx[i] * y1[cy][i])
|
# ------------------------------------------------------------------------------ #
# Basic Input-Output pada Python #
# ------------------------------------------------------------------------------ #
# OUTPUT : #
# 1.) Untuk print/menampilkan pada stdout (layar) : #
# - print "string" #
# Contoh : #
# - print "Python adalah bahasa pemograman yang sangat bagus" #
# input([prompt]) => fungsi ini membaca satu baris dari stdin (keyboard) #
# dan return sebagai sebuah stringself. #
# --------------------------------------------------------------------------- #
# BASIC TUPLES OPERATION #
# --------------------------------------------------------------------------- #
# Python Expression Results Description #
# --------------------------------------------------------------------------- #
# len((1, 2, 3)) 3 Length #
# (1, 2, 3) + (4, 5, 6) (1, 2, 3, 4, 5, 6) Concatenation #
# ('Hi!',) * 4 ('Hi!', 'Hi!', 'Hi!', 'Hi!') Repetition #
# 3 in (1, 2, 3) True Membership #
# for x in (1, 2, 3): print x, 1 2 3 Iteration #
# --------------------------------------------------------------------------- #
#!/usr/bin/python
# Contoh input([prompt])
str = input("Enter your input: ");
print ("Received input is : ", str)
|
INOUTDOOR_LABELS = ['person']
|
from dolfin import *
from xii.meshing.make_mesh_cpp import make_mesh
from xii.assembler.average_matrix import curve_average_matrix
from xii.assembler.average_shape import Square
from xii import EmbeddedMesh
import numpy as np
surface_average_matrix = lambda V, TV, bdry_curve: curve_average_matrix(V, TV, bdry_curve, which='surface')
def make_z_mesh(num_vertices, zmin=0, zmax=1):
'''{(0, 0, zmin + t*(zmax - zmin))}'''
t = zmin + np.linspace(0, 1, num_vertices)*(zmax - zmin)
coordinates = np.c_[np.zeros_like(t), np.zeros_like(t), t]
cells = np.c_[np.arange(num_vertices - 1), np.arange(1, num_vertices)]
cells.dtype = 'uintp'
mesh = Mesh(mpi_comm_world())
make_mesh(coordinates, cells, 1, 3, mesh)
return mesh
def test(f, n, P, degree=8):
'''Check integrals due to averaging operator'''
mesh = BoxMesh(Point(-1, -1, -1), Point(1, 1, 1), n, n, n)
mf = MeshFunction('size_t', mesh, 1, 0)
CompiledSubDomain('near(x[0], 0.0) && near(x[1], 0.0)').mark(mf, 1)
line_mesh = EmbeddedMesh(mf, 1)
V = FunctionSpace(mesh, 'CG', 1)
TV = FunctionSpace(line_mesh, 'DG', 1)
f = interpolate(f, V)
cylinder = Square(P, degree)
Pi = surface_average_matrix(V, TV, cylinder)
print('\t', Pi.norm('linf'), max(len(Pi.getrow(i)[0]) for i in range(TV.dim())))
Pi_f = Function(TV)
Pi.mult(f.vector(), Pi_f.vector())
return Pi_f
# --------------------------------------------------------------------
if __name__ == '__main__':
# NOTE the size for integration size!!
size = 0.125
P = lambda x0: np.array([-size, -size, x0[2]])
f = Expression('2', degree=2)
Pi_f0 = f
f = Expression('x[2]', degree=1)
Pi_f0 = f
f = Expression('x[2]*x[2]', degree=2)
Pi_f0 = f
f = Expression('x[0]', degree=2)
Pi_f0 = Constant(0)
f = Expression('x[0]+x[1]', degree=2)
Pi_f0 = Constant(0)
f = Expression('x[0]*x[0]', degree=2)
Pi_f0 = Constant(2*size**2/3.)
f = Expression('x[0]*x[0]+x[1]*x[1]', degree=2)
Pi_f0 = Constant(4*size**2/3.)
f = Expression('x[2]*(x[0]*x[0]+x[1]*x[1])', degree=2)
Pi_f0 = Expression('x[2]*4*A*A/3.', A=size, degree=1)
e0, n0 = None, None
for n in (4, 8, 16, 32):
Pi_f = test(f, n, P)
print(Pi_f(0, 0, 0.5))
assert Pi_f.vector().norm('l2') > 0
e = sqrt(abs(assemble(inner(Pi_f0 - Pi_f, Pi_f0 - Pi_f)*dx)))
if e0 is not None:
rate = ln(e/e0)/ln(float(n0)/n)
else:
rate = np.inf
print('error %g, rate=%.2f' % (e, rate))
n0, e0 = n, e
|
#!/usr/bin/env python
# coding:utf-8
"""
# @Time : 2020-08-26 20:39
# @Author : Zhangyu
# @Email : zhangycqupt@163.com
# @File : gaode_interface.py
# @Software : PyCharm
# @Desc :
"""
import json
import requests
from math import radians, cos, sin, asin, sqrt
from geopy.distance import geodesic
def haversine(lon1, lat1, lon2, lat2): # 经度1,纬度1,经度2,纬度2 (十进制度数)
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# 将十进制度数转化为弧度
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine公式
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
c = 2 * asin(sqrt(a))
r = 6371 # 地球平均半径,单位为公里
return c * r * 1000
def address_extract(text):
session = requests.Session()
session.headers = {"Content-Type": "application/json"}
# session.proxies = {"http":"", "https": ""}
# 百度开放平台address接口-https://ai.baidu.com/ai-doc/NLP/vk6z52h5n
access_token = "24.943decb903ba71fae61eba9900d91c84.2592000.1601037376.282335-22269163"
URL = "https://aip.baidubce.com/rpc/2.0/nlp/v1/address" + "?access_token=" + access_token
# result = session.post(URL, json=text, timeout=5)
# print(result.text)
# 高德-https://lbs.amap.com/api/webservice/guide/api/georegeo
api_key = "6aee72fb716339e61042505e33e68b45"
URL = "https://restapi.amap.com/v3/geocode/geo?address=%s&output=json&key=%s" % (text["text"], api_key)
result = session.get(URL, timeout=5)
print(result.text)
return json.loads(result.text)["geocodes"][0]["location"].split(",")
# from xml.dom.minidom import parseString
#
# domobj = parseString(result.text)
# elementobj = domobj.documentElement
# subElementobj = elementobj.getElementsByTagName("geocode")
# for e in subElementobj:
# city = e.getElementsByTagName("location")[0]
# name = city.childNodes[0].data
# print(name)
# return name.split(",")
# return []
if __name__ == "__main__":
text = {"text": "江岸区蔡家田北区6栋4单元5层1室"}
location1 = address_extract(text)
lon1, lat1 = float(location1[0]), float(location1[1])
text = {"text": "武汉市江岸区蔡家田A区4栋5单元1层1室司法拍卖"}
# text = {"text": "佛山市顺德区龙江镇儒林大街太平巷横三巷4号"}
location2 = address_extract(text)
lon2, lat2 = float(location2[0]), float(location2[1])
distance = haversine(lon1, lat1, lon2, lat2)
print(distance)
print(geodesic((lat1, lon1), (lat2, lon2)).m) # 计算两个坐标直线距离
|
import sys
import asyncio
import os
import types
import functools
from collections import namedtuple
from collections.abc import Iterator
import abc
from typing import List, Union, NamedTuple
import uuid
import yaml
import logging
log = logging.getLogger(__name__)
Fire = NamedTuple('Fire', [('rate', int), ('duration', int),
('supervisor', str)])
Idle = NamedTuple('Idle', [])
Terminate = NamedTuple('Terminate', [])
RunnerHistoryItem = NamedTuple('RunnerHistoryItem',
[('at', int), ('succeeded', int),
('failed', int)])
class LauncherResp(object):
def __init__(self, action: Union[Fire, Idle], opaque=None):
self.action = action
self.opaque = opaque
def is_fire(self):
return isinstance(self.action, Fire)
def is_idle(self):
return isinstance(self.action, Idle)
def is_terminate(self):
return isinstance(self.action, Terminate)
class RunnerContext(object):
def __init__(self, host: str, history: List[RunnerHistoryItem]):
self.host = host
self._history = [] if history is None else history
self.opaque = None
def update(self, prev_resp: LauncherResp, history: List[RunnerHistoryItem]):
self._history = history
self.opaque = prev_resp.opaque
@staticmethod
def new_conext(host: str=''):
if not host:
host = uuid.uuid1().hex
return RunnerContext(host, [])
class Launcher(metaclass=abc.ABCMeta):
@abc.abstractmethod
def ask_next(self, runner_ctx: RunnerContext) -> LauncherResp:
"""
:param runner_ctx:
:return:
"""
class IdleLancher(Launcher):
def ask_next(self, runner_ctx: RunnerContext) -> LauncherResp:
log.debug('runner_ctx:{}'.format(runner_ctx))
return LauncherResp(Idle())
class OneShotLancher(Launcher):
def ask_next(self, runner_ctx: RunnerContext) -> LauncherResp:
log.debug('runner_ctx:{}'.format(runner_ctx.__dict__))
if runner_ctx.opaque is not None:
return LauncherResp(Terminate())
else:
#return LauncherResp(Fire(rate=1, start=0, end=10, supervisor=''),
# opaque=1)
return LauncherResp(Fire(rate=1, duration=1, supervisor=''),
opaque=1)
|
# Generated by Django 2.0.3 on 2018-04-06 18:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('entrance', '0063_selectenrollmenttypeentrancestep_text_on_moderation'),
]
operations = [
migrations.AlterField(
model_name='selectedenrollmenttype',
name='is_approved',
field=models.BooleanField(db_index=True, help_text='Одобрен ли запрос'),
),
migrations.AlterField(
model_name='selectedenrollmenttype',
name='is_moderated',
field=models.BooleanField(db_index=True, help_text='Обработан ли запрос'),
),
]
|
observations["eventDate"] = pd.to_datetime(observations[["year", "month", "day"]])
observations
|
'''Train CIFAR10 with PyTorch.'''
import torch.optim as optim
import torch.cuda
import torch.backends.cudnn as cudnn
import torch.utils.data
from models.codinet import *
from utils.dataloader import get_data
import os
from utils.argument import get_args
from utils.metric import *
from utils.loss import *
from utils.metric import accuracy
from utils.utils import *
from utils.dist_utils import *
from apex.parallel import DistributedDataParallel as DDP
def get_variables(inputs, labels):
if 'aug' in args.dataset:
assert len(inputs.shape) == 5
assert len(labels.shape) == 2
inputs = inputs.view(inputs.shape[0] * inputs.shape[1], inputs.shape[2], inputs.shape[3],
inputs.shape[4]).cuda()
labels = labels.view(-1).cuda()
else:
inputs, labels = inputs.cuda(), labels.cuda()
return inputs, labels
def train_epoch(net, train_loader, logger, epoch):
net.train()
for batch_idx, (inputs, labels) in enumerate(dist_tqdm(train_loader)):
global_step = epoch * len(train_loader) + batch_idx
inputs, labels = get_variables(inputs, labels)
result, prob = net(inputs)
# prob: block * batch * 2 * 1 * 1
loss_CE = criterion_CE(result, labels)
loss = loss_CE
if args.loss_lda is not None:
loss_lda_inter, loss_lda_intra = criterion_CoDi(prob)
loss += args.loss_lda * (loss_lda_inter + loss_lda_intra)
if args.loss_w is not None:
loss_FL = criterion_FL(prob)
loss += loss_FL * args.loss_w
# measure accuracy and record loss
prec, = accuracy(result, labels, topk=(1,))
# calc gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
logger.add_scalar('train/prob', mean_reduce(prob[:, :, 0].sum() / prob.shape[0]),
global_step=global_step)
logger.add_scalar('train/train_prec', mean_reduce(prec), global_step=global_step)
logger.add_scalar('train/loss_single', loss, global_step=global_step)
logger.add_scalar('train/lr', optimizer.param_groups[0]['lr'], global_step=global_step)
logger.add_scalar('train/cls', mean_reduce(loss_CE), global_step=global_step)
logger.add_scalar('train/total', mean_reduce(loss), global_step=global_step)
def valid_epoch(net, valid_loader, logger, epoch):
counter = MultiLabelAcc()
probable = AverageMetric()
path_nums = []
net.eval()
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(dist_tqdm(valid_loader)):
inputs, targets = inputs.cuda(), targets.cuda()
result, prob = net(inputs)
counter.update(result, targets)
probable.update(prob[:, :, 0].sum() / prob.shape[0], inputs.size(0))
path_nums += tensor_path2nums(prob)
all_total = sum_reduce(torch.tensor([counter.total, ], dtype=torch.long).cuda())
all_correct = sum_reduce(torch.tensor([counter.correct, ], dtype=torch.long).cuda())
all_prob = mean_reduce(torch.tensor([probable.avg], dtype=torch.float).cuda())
all_paths = cat_reduce(torch.tensor(path_nums, dtype=torch.long).cuda())
logger.add_scalar('valid/prec', to_python_float(all_correct) * 100.0 / to_python_float(all_total),
global_step=epoch)
logger.add_scalar('valid/prob', all_prob, global_step=epoch)
logger.add_scalar('valid/path_num', len(torch.unique(all_paths)), global_step=epoch)
return to_python_float(all_correct) * 100.0 / to_python_float(all_total)
def save_model(top1, best_acc, epoch, save_path):
if top1 > best_acc:
dist_print('Saving best model..')
state = {'net': net.state_dict(), 'acc': top1, 'epoch': epoch, }
if not os.path.isdir(save_path):
os.mkdir(save_path)
model_path = os.path.join(save_path, 'ckpt.pth')
torch.save(state, model_path)
best_acc = top1
return best_acc
if __name__ == "__main__":
args = get_args().parse_args()
distributed = False
if 'WORLD_SIZE' in os.environ:
distributed = int(os.environ['WORLD_SIZE']) > 1
if distributed:
assert int(os.environ[
'WORLD_SIZE']) == torch.cuda.device_count(), 'It should be the same number of devices and processes'
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
dist_print(args)
best_acc = 0 # best test accuracy
train_loader, valid_loader, classes = get_data(args.train_bs, args.test_bs, dataset=args.dataset,
data_root=args.data_root, distributed=distributed,
aug_repeat=args.aug_repeat)
cudnn.benchmark = True
# Model
dist_print('==> Building model..')
net = CoDiNet(args.backbone, classes, args.beta, args.finetune).cuda()
if distributed:
net = net.cuda()
net = DDP(net, delay_allreduce=True) # TODO test no delay
logger, save_path = parse_system(args)
criterion_CE = nn.CrossEntropyLoss().cuda()
criterion_FL = FLOPSL1Loss(target=args.num_target).cuda()
criterion_CoDi = ConDivLoss(args.aug_repeat, args.lda_intra_margin, args.lda_inter_margin).cuda()
optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
for epoch in range(args.epochs):
dist_print('\nEpoch: %d' % epoch)
adjust_learning_rate(optimizer, epoch, args)
train_epoch(net, train_loader, logger, epoch)
top1 = valid_epoch(net, valid_loader, logger, epoch)
synchronize()
if is_main_process():
best_acc = save_model(top1, best_acc, epoch, save_path)
synchronize()
logger.add_scalar('best_acc', best_acc, global_step=epoch)
dist_print('\nRESNET: acc %f' % (best_acc))
torch.set_printoptions(profile="full")
logger.close()
|
"""The ``i2c`` module lets you communicate with devices connected to your board
using the I²C bus protocol. There can be multiple slave devices connected at
the same time, and each one has its own unique address, that is either fixed
for the device or configured set_power_on it. Your board acts as the I²C master.
We use 7-bit addressing for devices because of the reasons stated
`here <http://www.totalphase.com/support/articles/200349176-7-bit-8-bit-and-10-bit-I2C-Slave-Addressing>`_.
This may be different to other micro:bit related solutions.
How exactly you should communicate with the devices, that is, what bytes to
send and how to interpret the responses, depends set_power_on the device in question and
should be described separately in that device's documentation.
You should connect the device's ``SCL`` pin to micro:bit pin 19, and the
device's ``SDA`` pin to micro:bit pin 20. You also must connect the device's
ground to the micro:bit ground (pin ``GND``). You may need to power the device
using an external power supply or the micro:bit.
There are internal pull-up resistors set_power_on the I²C lines of the board, but with
particularly long wires or large number of devices you may need to add
additional pull-up resistors, to ensure noise-free communication.
"""
from . import pin19, pin20
from typing import Union
def init(freq: int = 100000, sda: int = pin20, scl: int = pin19) -> None:
"""Re-initialize peripheral with the specified clock frequency ``freq`` set_power_on the
specified ``sda`` and ``scl`` pins.
.. warning::
Changing the I²C pins from defaults will make the accelerometer and
compass stop working, as they are connected internally to those pins.
"""
def read(addr: int, n: int, repeat: bool = False) -> bytes:
"""Read ``n`` bytes from the device with 7-bit address ``addr``. If ``repeat``
is ``True``, no stop bit will be sent.
"""
def write(addr: int, buf: Union[bytes, bytearray], repeat=False) -> None:
"""Write bytes from ``buf`` to the device with 7-bit address ``addr``. If
``repeat`` is ``True``, no stop bit will be sent.
"""
|
import unittest
import mock
from main import check_util
from main import Common
from StringIO import StringIO
import console_logger
class TestCheckUtil(unittest.TestCase):
""" unit tests for functions in the check_util module """
def setUp(self):
self.logger = console_logger.ConsoleLogger()
self.cutil = check_util.CheckUtil(self.logger)
def get_mock_filestream(self, somestring):
stream = StringIO()
stream.write(somestring)
stream.seek(0)
return stream
@mock.patch('os.path.isfile', return_value=False)
@mock.patch('os.path.isdir', return_value=False)
def test_appcompat(self, os_path_isdir, os_path_isfile):
self.assertFalse(self.cutil.is_app_compat_issue_detected())
@mock.patch('os.popen')
def test_memory(self, os_popen):
output = "8000000"
os_popen.return_value = self.get_mock_filestream(output)
self.assertFalse(self.cutil.is_insufficient_memory())
@mock.patch('os.popen')
def test_memory_low_memory(self, os_popen):
output = "6000000"
os_popen.return_value = self.get_mock_filestream(output)
self.assertTrue(self.cutil.is_insufficient_memory())
def test_is_kv_url(self):
dns_suffix_list = ["vault.azure.net", "vault.azure.cn", "vault.usgovcloudapi.net", "vault.microsoftazure.de"]
for dns_suffix in dns_suffix_list:
self.cutil.check_kv_url("https://testkv." + dns_suffix + "/", "")
self.cutil.check_kv_url("https://test-kv2." + dns_suffix + "/", "")
self.cutil.check_kv_url("https://test-kv2." + dns_suffix + ":443/", "")
self.cutil.check_kv_url("https://test-kv2." + dns_suffix + ":443/keys/kekname/kekversion", "")
self.assertRaises(Exception, self.cutil.check_kv_url, "http://testkv." + dns_suffix + "/", "")
# self.assertRaises(Exception, self.cutil.check_kv_url, "https://https://testkv." + dns_suffix + "/", "")
# self.assertRaises(Exception, self.cutil.check_kv_url, "https://testkv.testkv." + dns_suffix + "/", "")
# self.assertRaises(Exception, self.cutil.check_kv_url, "https://testkv.vault.azure.com/", "")
self.assertRaises(Exception, self.cutil.check_kv_url, "https://", "")
def test_validate_volume_type(self):
self.cutil.validate_volume_type({Common.CommonVariables.VolumeTypeKey: "DATA"})
self.cutil.validate_volume_type({Common.CommonVariables.VolumeTypeKey: "ALL"})
self.cutil.validate_volume_type({Common.CommonVariables.VolumeTypeKey: "all"})
self.cutil.validate_volume_type({Common.CommonVariables.VolumeTypeKey: "Os"})
self.cutil.validate_volume_type({Common.CommonVariables.VolumeTypeKey: "OS"})
self.cutil.validate_volume_type({Common.CommonVariables.VolumeTypeKey: "os"})
self.cutil.validate_volume_type({Common.CommonVariables.VolumeTypeKey: "Data"})
self.cutil.validate_volume_type({Common.CommonVariables.VolumeTypeKey: "data"})
for vt in Common.CommonVariables.SupportedVolumeTypes:
self.cutil.validate_volume_type({Common.CommonVariables.VolumeTypeKey: vt})
self.assertRaises(Exception, self.cutil.validate_volume_type, {Common.CommonVariables.VolumeTypeKey: "NON-OS"})
self.assertRaises(Exception, self.cutil.validate_volume_type, {Common.CommonVariables.VolumeTypeKey: ""})
self.assertRaises(Exception, self.cutil.validate_volume_type, {Common.CommonVariables.VolumeTypeKey: "123"})
def test_fatal_checks(self):
self.cutil.precheck_for_fatal_failures({
Common.CommonVariables.EncryptionEncryptionOperationKey: Common.CommonVariables.QueryEncryptionStatus
})
self.cutil.precheck_for_fatal_failures({
Common.CommonVariables.VolumeTypeKey: "DATA",
Common.CommonVariables.EncryptionEncryptionOperationKey: Common.CommonVariables.DisableEncryption
})
self.cutil.precheck_for_fatal_failures({
Common.CommonVariables.VolumeTypeKey: "ALL",
Common.CommonVariables.KeyVaultURLKey: "https://vaultname.vault.azure.net/",
Common.CommonVariables.EncryptionEncryptionOperationKey: Common.CommonVariables.EnableEncryption
})
self.cutil.precheck_for_fatal_failures({
Common.CommonVariables.VolumeTypeKey: "ALL",
Common.CommonVariables.KeyVaultURLKey: "https://vaultname.vault.azure.net/",
Common.CommonVariables.KeyEncryptionKeyURLKey: "https://vaultname.vault.azure.net/keys/keyname/ver",
Common.CommonVariables.EncryptionEncryptionOperationKey: Common.CommonVariables.EnableEncryptionFormat
})
self.cutil.precheck_for_fatal_failures({
Common.CommonVariables.VolumeTypeKey: "ALL",
Common.CommonVariables.KeyVaultURLKey: "https://vaultname.vault.azure.net/",
Common.CommonVariables.KeyEncryptionKeyURLKey: "https://vaultname.vault.azure.net/keys/keyname/ver",
Common.CommonVariables.KeyEncryptionAlgorithmKey: 'rsa-OAEP-256',
Common.CommonVariables.EncryptionEncryptionOperationKey: Common.CommonVariables.EnableEncryptionFormatAll
})
self.assertRaises(Exception, self.cutil.precheck_for_fatal_failures, {})
self.assertRaises(Exception, self.cutil.precheck_for_fatal_failures, {
Common.CommonVariables.VolumeTypeKey: "123",
Common.CommonVariables.EncryptionEncryptionOperationKey: Common.CommonVariables.EnableEncryption
})
self.assertRaises(Exception, self.cutil.precheck_for_fatal_failures, {
Common.CommonVariables.VolumeTypeKey: "ALL",
Common.CommonVariables.KeyVaultURLKey: "https://vaultname.vault.azure.net/",
Common.CommonVariables.KeyEncryptionKeyURLKey: "https://vaultname.vault.azure.net/keys/keyname/ver",
Common.CommonVariables.KeyEncryptionAlgorithmKey: 'rsa-OAEP-25600',
Common.CommonVariables.EncryptionEncryptionOperationKey: Common.CommonVariables.EnableEncryptionFormatAll
})
def test_mount_scheme(self):
proc_mounts_output = """
sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0
proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0
udev /dev devtmpfs rw,relatime,size=4070564k,nr_inodes=1017641,mode=755 0 0
devpts /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0
tmpfs /run tmpfs rw,nosuid,noexec,relatime,size=815720k,mode=755 0 0
/dev/sda1 / ext4 rw,relatime,discard,data=ordered 0 0
none /sys/fs/cgroup tmpfs rw,relatime,size=4k,mode=755 0 0
none /sys/fs/fuse/connections fusectl rw,relatime 0 0
none /sys/kernel/debug debugfs rw,relatime 0 0
none /sys/kernel/security securityfs rw,relatime 0 0
none /run/lock tmpfs rw,nosuid,nodev,noexec,relatime,size=5120k 0 0
none /run/shm tmpfs rw,nosuid,nodev,relatime 0 0
none /run/user tmpfs rw,nosuid,nodev,noexec,relatime,size=102400k,mode=755 0 0
none /sys/fs/pstore pstore rw,relatime 0 0
systemd /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,name=systemd 0 0
/dev/mapper/fee16d98-9c18-4e7d-af70-afd7f3dfb2d9 /mnt/resource ext4 rw,relatime,data=ordered 0 0
/dev/mapper/vg0-lv0 /data ext4 rw,relatime,discard,data=ordered 0 0
"""
with mock.patch("__builtin__.open", mock.mock_open(read_data=proc_mounts_output)):
self.assertFalse(self.cutil.is_unsupported_mount_scheme())
# Skip LVM OS validation when OS volume is not being targeted
def test_skip_lvm_os_check_if_data_only_enable(self):
# skip lvm detection if data only
self.cutil.validate_lvm_os({Common.CommonVariables.VolumeTypeKey: "DATA", Common.CommonVariables.EncryptionEncryptionOperationKey: Common.CommonVariables.EnableEncryption})
def test_skip_lvm_os_check_if_data_only_ef(self):
# skip lvm detection if data only
self.cutil.validate_lvm_os({Common.CommonVariables.VolumeTypeKey: "DATA", Common.CommonVariables.EncryptionEncryptionOperationKey: Common.CommonVariables.EnableEncryptionFormat})
def test_skip_lvm_os_check_if_data_only_efa(self):
# skip lvm detection if data only
self.cutil.validate_lvm_os({Common.CommonVariables.VolumeTypeKey: "DATA", Common.CommonVariables.EncryptionEncryptionOperationKey: Common.CommonVariables.EnableEncryptionFormatAll})
def test_skip_lvm_os_check_if_data_only_disable(self):
# skip lvm detection if data only
self.cutil.validate_lvm_os({Common.CommonVariables.VolumeTypeKey: "DATA", Common.CommonVariables.EncryptionEncryptionOperationKey: Common.CommonVariables.DisableEncryption})
def test_skip_lvm_os_check_if_query(self):
# skip lvm detection if query status operation is invoked without volume type
self.cutil.validate_lvm_os({Common.CommonVariables.EncryptionEncryptionOperationKey: Common.CommonVariables.QueryEncryptionStatus})
def test_skip_lvm_no_encryption_operation(self):
# skip lvm detection if no encryption operation
self.cutil.validate_lvm_os({Common.CommonVariables.VolumeTypeKey: "ALL"})
def test_skip_lvm_no_volume_type(self):
# skip lvm detection if no volume type specified
self.cutil.validate_lvm_os({Common.CommonVariables.EncryptionEncryptionOperationKey: Common.CommonVariables.EnableEncryptionFormatAll})
@mock.patch("os.system", return_value=-1)
def test_no_lvm_no_config(self, os_system):
# simulate no LVM OS, no config
self.cutil.validate_lvm_os({})
@mock.patch("os.system", return_value=0)
def test_lvm_no_config(self, os_system):
# simulate valid LVM OS, no config
self.cutil.validate_lvm_os({})
@mock.patch("os.system", side_effect=[0, -1])
def test_invalid_lvm_no_config(self, os_system):
# simulate invalid LVM naming scheme, but no config setting to encrypt OS
self.cutil.validate_lvm_os({})
@mock.patch("os.system", return_value=-1)
def test_lvm_os_lvm_absent(self, os_system):
# using patched return value of -1, simulate no LVM OS
self.cutil.validate_lvm_os({Common.CommonVariables.VolumeTypeKey: "ALL", Common.CommonVariables.EncryptionEncryptionOperationKey: Common.CommonVariables.EnableEncryption})
@mock.patch("os.system", return_value=0)
def test_lvm_os_valid(self, os_system):
# simulate a valid LVM OS and a valid naming scheme by always returning 0
self.cutil.validate_lvm_os({Common.CommonVariables.VolumeTypeKey: "ALL", Common.CommonVariables.EncryptionEncryptionOperationKey: Common.CommonVariables.EnableEncryption})
@mock.patch("os.system", side_effect=[0, -1])
def test_lvm_os_lv_missing_expected_name(self, os_system):
# using patched side effects, first simulate LVM OS present, then simulate not finding the expected LV name
self.assertRaises(Exception, self.cutil.validate_lvm_os, {Common.CommonVariables.VolumeTypeKey: "ALL", Common.CommonVariables.EncryptionEncryptionOperationKey: Common.CommonVariables.EnableEncryption})
|
#!Python 3
import csv
with open("test.csv") as csvfileA:
reader = csv.DictReader(csvfileA)
test = len(list(reader))
|
# Copyright (c) 2020, Ahmed M. Alaa
# Licensed under the BSD 3-clause license (see LICENSE.txt)
# ---------------------------------------------------------
# Helper functions and utilities for deep learning models
# ---------------------------------------------------------
import numpy as np
from matplotlib import pyplot as plt
from sklearn.metrics import roc_auc_score
from models.sequential import *
from utils.make_data import *
def plot_1D_uncertainty(results, Y_test, data_index):
plt.fill_between(list(range(len(results["Lower limit"][data_index]))),
results["Lower limit"][data_index].reshape(-1,),
results["Upper limit"][data_index].reshape(-1,), color="r", alpha=0.25)
plt.plot(results["Lower limit"][data_index], linestyle=":", linewidth=3, color="r")
plt.plot(results["Upper limit"][data_index], linestyle=":", linewidth=3, color="r")
plt.plot(Y_test[data_index], linestyle="--", linewidth=2, color="black")
plt.plot(results["Point predictions"][data_index], linewidth=3, color="r", Marker="o")
def evaluate_performance(model, X_test, Y_test, coverage=.9, error_threshold=1):
if type(model) is RNN_uncertainty_wrapper:
y_pred, y_l_approx, y_u_approx = model.predict(X_test, coverage=coverage)
elif type(model) is QRNN:
y_u_approx, y_l_approx = model.predict(X_test)
y_pred = [(y_l_approx[k] + y_u_approx[k])/2 for k in range(len(y_u_approx))]
elif type(model) is DPRNN:
y_pred, y_std = model.predict(X_test, alpha=1-coverage)
y_u_approx = [y_pred[k] + y_std[k] for k in range(len(y_pred))]
y_l_approx = [y_pred[k] - y_std[k] for k in range(len(y_pred))]
results = dict({"Point predictions": None,
"Confidence intervals": None,
"Errors": None,
"Upper limit": None,
"Lower limit": None,
"Coverage indicators": None,
"Coverage": None,
"AUC-ROC": None})
results["Point predictions"] = y_pred
results["Upper limit"] = y_u_approx
results["Lower limit"] = y_l_approx
results["Confidence intervals"] = [y_u_approx[k] - y_l_approx[k] for k in range(len(y_u_approx))]
results["Errors"] = [np.abs(Y_test[k] - y_pred[k]) for k in range(len(y_u_approx))]
results["Coverage indicators"] = [((y_u_approx[k] >= Y_test[k]) * (y_l_approx[k] <= Y_test[k])) * 1 for k in range(len(y_u_approx))]
results["Coverage"] = np.mean(np.concatenate(results["Coverage indicators"]))
if error_threshold == "Auto":
results["AUC-ROC"] = roc_auc_score((np.concatenate(results["Errors"]) > np.median(np.concatenate(results["Errors"]))) * 1,
np.concatenate(results["Confidence intervals"]))
else:
results["AUC-ROC"] = roc_auc_score((np.concatenate(results["Errors"]) > error_threshold) * 1,
np.concatenate(results["Confidence intervals"]))
results["CI length"] = np.mean(np.concatenate(results["Confidence intervals"]))
return results
def collect_synthetic_results(noise_vars, params, coverage=0.9, seq_len=5, n_train_seq=1000, n_test_seq=1000):
#noise_profs = [noise_vars[k] * np.ones(seq_len) for k in range(len(noise_vars))]
noise_profs = noise_vars * np.ones(seq_len)
result_dict = dict({"BJRNN": [], "QRNN": [], "DPRNN": []})
model_type = [RNN, QRNN, DPRNN]
model_names = ["BJRNN", "QRNN", "DPRNN"]
for u in range(len(model_type)):
X, Y = create_autoregressive_data(n_samples=n_train_seq, noise_profile=noise_profs,
seq_len=seq_len, mode="time-dependent")
RNN_model = model_type[u](**params)
print("Training model " + model_names[u] + " with aleatoric noise variance %.4f and %d training sequences" % (noise_vars, n_train_seq))
RNN_model.fit(X, Y)
if type(RNN_model) is RNN:
RNN_model_ = RNN_uncertainty_wrapper(RNN_model)
else:
RNN_model_ = RNN_model
X_test, Y_test = create_autoregressive_data(n_samples=n_test_seq, noise_profile=noise_profs,
seq_len=seq_len, mode="time-dependent")
result_dict[model_names[u]].append(evaluate_performance(RNN_model_, X_test, Y_test, coverage=coverage, error_threshold="Auto"))
return result_dict
|
import os
import sys
from os import listdir
from os.path import isfile, join
DIR_TASK = os.path.basename(os.getcwd())
DIR_LIB = os.path.abspath(os.path.join(os.path.dirname(__file__),"../"))
DIR_TASK = os.path.dirname(os.path.abspath(__file__))
import json, csv, time, string, itertools, copy, yaml
import numpy as np
import pandas as pd
import datetime as dt
CONFIG_FILE_NAME = '004.01_config'
config = yaml.load( stream = file( DIR_TASK + '\\' + CONFIG_FILE_NAME + '.yml', 'r'))
#yaml.dump( config, file( DIR_TASK + '\\config.yml', 'w') )
sys.path.append( DIR_LIB )
from lib.router import Router
router = Router( )
print '>> Walk, Don`t Run'
# --------------------------------------------------------------------------
#STEP: modify version?
configVersion = config['version']
config['version'] = round( float(configVersion) + .1, 1 ) if config['options']['increment-version'] == True else configVersion
halfFinalDate = config['params']['half-final_date']
d = dt.datetime.strptime( halfFinalDate , '%d.%m.%Y')
filePrefix = 'teams_' + d.strftime('%Y-%m-%d')
#STEP: load web-likes from half-final day
sourcePath = router.getRoute( config['source']['web-teams']['route'] ) + config['source']['web-teams']['dir']
sourceFilesNames = [f for f in listdir( sourcePath ) if filePrefix in f ]
sourceFilesNames.sort()
coachNames = ['Michael Patrick', 'Mark', 'Michi & Smudo', 'Yvonne' ]
def getTimeDataStart( _fileIndex ):
#INFO: global list of sourceFilesNames
#COM: get list of current active participants
fileName = sourceFilesNames[ _fileIndex ]
rawObj = open( sourcePath + fileName, 'r')
fileJsonContent = json.load( rawObj )
timeData = {}
#COM: empty file?
if len( fileJsonContent ) > 0:
fileTime = fileName.split("_")[1].split("--")[1].replace(".json","").replace("-",":")
for coachName in coachNames:
coachListParticipants = list(filter(lambda item: item['in'] == True, fileJsonContent[ coachName ] ))
timeData[ coachName ] = {
'participants': [ { 'name': f['name'], 'likes':f['likes'] } for f in coachListParticipants ],
'time': fileTime
}
return timeData
timeData_start = getTimeDataStart( 0 )
#STEP: Load the half-final results
sourcePathFile = router.getRoute( config['source']['half-final']['route'] ) \
+ config['source']['half-final']['dir'] \
+ config['source']['half-final']['file']
hfDf = pd.read_csv( filepath_or_buffer = sourcePathFile , sep=";", quoting= 3, decimal=',' )
votesData = []
for selectedCoachName in coachNames:
teamData = { 'participants':[], 'winner':'' }
teamWinner = {}
for participantRow in hfDf.loc[ hfDf['coach'] == selectedCoachName ].iterrows():
#HELP: iterrows() returns a tuple with: 0 index, 1 row data
name = participantRow[1]['participant']
quote = participantRow[1]['rate_procent']
if( participantRow[1]['winner'] == '+' ):
teamWinner = {'name':name, 'quote': quote }
teamData['participants'].append( {'name':name, 'quote': quote } )
teamData['winner'] = teamWinner
teamData['coach'] = selectedCoachName
votesData.append( teamData )
print votesData
#STEP: output-file
#COM: Absolute Path
outputPath = router.getRoute( config['target']['route'] ) + config['target']['dir']
outputFilePath = outputPath + config['target']['file'].replace("$VERSION$", str( config['version'] ) )
#COM: create output folder
if not os.path.exists( outputPath ):
os.makedirs( outputPath )
#COM: create output file
with open( outputFilePath, 'w') as outfile:
json.dump( votesData , outfile , indent=2)
|
# python3
# coding: utf-8
import argparse
import warnings
from collections import Counter
from sklearn.metrics import classification_report, f1_score
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import cross_validate
from sklearn.dummy import DummyClassifier
from wsd_helpers import *
import random
import pandas as pd
import numpy as np
import torch
from torch.utils.data import DataLoader
import transformers as ppb # pytorch transformers
warnings.filterwarnings("ignore")
def classify(data_file, w2v=None, elmo=None, bert=None, max_batch_size=300, algo='logreg'):
data = pd.read_csv(data_file, sep='\t', compression='gzip')
print(data.head())
train0 = []
train1 = []
y = data.label.values
if elmo:
batcher, sentence_character_ids, elmo_sentence_input = elmo
sentences0 = [t.split() for t in data.text0]
sentences1 = [t.split() for t in data.text1]
print('=====')
print('%d sentences total' % (len(sentences0)))
print('=====')
# Here we divide all the sentences for the current word in several chunks
# to reduce the batch size
with tf.Session() as sess:
# It is necessary to initialize variables once before running inference.
sess.run(tf.global_variables_initializer())
for chunk in divide_chunks(sentences0, max_batch_size):
train0 += get_elmo_vector_average(sess, chunk, batcher, sentence_character_ids, elmo_sentence_input)
for chunk in divide_chunks(sentences1, max_batch_size):
train1 += get_elmo_vector_average(sess, chunk, batcher, sentence_character_ids, elmo_sentence_input)
elif bert:
tokenizer, model = bert
tokenized0 = data.text0.apply((lambda x: tokenizer.encode(x, add_special_tokens=True)))
tokenized1 = data.text1.apply((lambda x: tokenizer.encode(x, add_special_tokens=True)))
print('Padding...', file=sys.stderr)
max_len = 0
for i in tokenized0.values + tokenized1.values:
if len(i) > max_len:
max_len = len(i)
print('Max length:', max_len)
padded0 = [i + [0]*(max_len-len(i)) for i in tokenized0.values]
padded1 = [i + [0]*(max_len-len(i)) for i in tokenized1.values]
input_ids0 = torch.tensor(np.array(padded0)).to('cuda')
input_ids1 = torch.tensor(np.array(padded1)).to('cuda')
features = []
for inp in [input_ids0, input_ids1]:
loader = DataLoader(inp, batch_size=256, shuffle=False)
last_hidden_states = []
with torch.no_grad():
for i in loader:
last_hidden_states.append(model(i))
last_hidden_states = torch.cat([i[0] for i in last_hidden_states], 0)
print('BERT output shape:', last_hidden_states.shape, file=sys.stderr)
# Slice the output for the first position for all the sequences, take all hidden unit outputs
# features.append(last_hidden_states[:,0,:].cpu().numpy())
# Take the average embedding for all the sequences:
features.append([np.mean(row, axis=0) for row in last_hidden_states.cpu().numpy()])
train0 = features[0]
train1 = features[1]
classes = Counter(y)
print('Distribution of classes in the whole sample:', dict(classes))
x_train = [[np.dot(t0, t1)] for t0, t1 in zip(train0, train1)]
print('Train shape:', len(x_train))
if algo == 'logreg':
clf = LogisticRegression(solver='lbfgs', max_iter=2000, multi_class='auto', class_weight='balanced')
else:
clf = MLPClassifier(hidden_layer_sizes=(200, ), max_iter=500)
dummy = DummyClassifier(strategy='stratified')
averaging = True # Do you want to average the cross-validate metrics?
scoring = ['precision_macro', 'recall_macro', 'f1_macro']
# some splits are containing samples of one class, so we split until the split is OK
counter = 0
while True:
try:
cv_scores = cross_validate(clf, x_train, y, cv=10, scoring=scoring)
cv_scores_dummy = cross_validate(dummy, x_train, y, cv=10, scoring=scoring)
except ValueError:
counter += 1
if counter > 500:
print('Impossible to find a good split!')
exit()
continue
else:
# No error; stop the loop
break
scores = ([cv_scores['test_precision_macro'].mean(), cv_scores['test_recall_macro'].mean(), cv_scores['test_f1_macro'].mean()])
dummy_scores = ([cv_scores_dummy['test_precision_macro'].mean(), cv_scores_dummy['test_recall_macro'].mean(), cv_scores_dummy['test_f1_macro'].mean()])
print('Real scores:')
print('=====')
print('Precision: %0.3f' % scores[0])
print('Recall: %0.3f' % scores[1])
print('F1: %0.3f' % scores[2])
print('Random choice scores:')
print('=====')
print('Precision: %0.3f' % dummy_scores[0])
print('Recall: %0.3f' % dummy_scores[1])
print('F1: %0.3f' % dummy_scores[2])
return scores
if __name__ == '__main__':
parser = argparse.ArgumentParser()
arg = parser.add_argument
arg('--input', help='Path to tab-separated file with paraphrase data', required=True)
arg('--bert', help='Path to BERT model (optional)')
arg('--elmo', help='Path to ELMo model (optional)')
parser.set_defaults(w2v=False)
parser.set_defaults(elmo=False)
args = parser.parse_args()
data_path = args.input
if args.bert:
model_class, tokenizer_class, pretrained_weights = (ppb.BertModel, ppb.BertTokenizer, args.bert)
tokenizer = tokenizer_class.from_pretrained(pretrained_weights)
model = model_class.from_pretrained(pretrained_weights).to('cuda')
eval_scores = classify(data_path, bert=(tokenizer, model))
elif args.elmo:
emb_model = load_elmo_embeddings(args.elmo, top=False)
eval_scores = classify(data_path, elmo=emb_model)
else:
eval_scores = classify(data_path)
|
import numpy as np
from numpy import linalg
from abc import abstractmethod
import pandas as pd
import math
pd.options.display.float_format = '{:,.6f}'.format
np.set_printoptions(suppress=True, precision=6)
TOR = pow(10.0, -5)
TRUE_X = np.array([0.5, 0, -0.5235988])
class SteepestDescentMethod(object):
def __init__(self):
return
@abstractmethod
def f(self, x):
return NotImplementedError('Implement f()!')
@abstractmethod
def g(self, x):
return NotImplementedError('Implement g()!')
@abstractmethod
def grad_g(self, x):
return NotImplementedError('Implement grad_g()!')
@abstractmethod
def jacobian(self, x):
return NotImplementedError('Implement jacobian()!')
@abstractmethod
def run(self, x):
return NotImplementedError('Implement run()!')
class SteepestDescent(SteepestDescentMethod):
def __init__(self):
super(SteepestDescentMethod, self).__init__()
def f(self, x):
sol = np.zeros(len(x))
sol[0] = 3 * x[0] - math.cos(x[1] * x[2]) - 1.0 / 2.0
sol[1] = pow(x[0], 2) - 81 * pow(x[1] + 0.1, 2) + math.sin(x[2]) + 1.06
sol[2] = math.exp(-x[0] * x[1]) + 20 * x[2] + (10 * math.pi - 3.0) / 3.0
return sol
def g(self, x):
sol = self.f(x)
return sum([e * e for e in sol])
def grad_g(self, x):
return 2 * self.jacobian(x).transpose().dot(self.f(x))
def jacobian(self, x):
jac = np.zeros(shape=(3, 3))
jac[0][0] = 3.0
jac[0][1] = x[2] * math.sin(x[1] * x[2])
jac[0][2] = x[1] * math.sin(x[1] * x[2])
jac[1][0] = 2 * x[0]
jac[1][1] = -162 * (x[1] + 0.1)
jac[1][2] = math.cos(x[2])
jac[2][0] = -x[1] * math.exp(-x[0] * x[1])
jac[2][1] = -x[0] * math.exp(-x[0] * x[1])
jac[2][2] = 20
return jac
def run(self, x):
"""
given x_0 in R^3 as a starting point.
:param x: x_0 as described
:return: the minimizer x* of f
"""
df = pd.DataFrame(columns=['x' + str(i + 1) for i in range(len(x))] + ['g', 'residual', 'actual-residual'])
row = len(df)
df.loc[row] = [xe for xe in x] + [self.g(x), np.nan, np.nan]
while True:
prev_x = x
g1 = self.g(x)
z = self.grad_g(x)
z0 = linalg.norm(z, 2)
if z0 == 0.0:
print('Zero gradient')
return x
z /= z0
alpha3 = 1
g3 = self.g(x - alpha3 * z)
while g3 >= g1:
alpha3 /= 2.0
g3 = self.g(x - alpha3 * z)
if alpha3 < TOR / 2.0:
print('No likely improvement')
return x
alpha2 = alpha3 / 2.0
g2 = self.g(x - alpha2 * z)
h1 = (g2 - g1) / alpha2
h2 = (g3 - g2) / (alpha3 - alpha2)
h3 = (h2 - h1) / alpha3
alpha0 = (alpha2 - h1 / h3) / 2.0
g0 = self.g(x - alpha0 * z)
alpha = alpha0
g = g0
if g3 < g:
alpha = alpha3
g = g3
x = x - alpha * z
residual = linalg.norm(x - prev_x, np.inf)
row = len(df)
df.loc[row] = [nxe for nxe in x] + [g, residual, np.nan]
if math.fabs(g - g1) < TOR:
break
for i in range(len(df)):
xk = np.array([df.loc[i][j] for j in range(len(x))])
df.loc[i][5] = linalg.norm(xk - x, np.inf)
print(df)
def main():
x0 = np.array([0, 0, 0])
SteepestDescent().run(x0)
if __name__ == '__main__':
main()
|
from __future__ import print_function
import pandas as pd
def fetch_training_molecules():
smiles_list = []
count = 0
for i in range(10):
data_file = '../../datasets/keck_pria/fold_{}.csv'.format(i)
df = pd.read_csv(data_file)
smiles_list.extend(df['rdkit SMILES'].tolist())
count += df.shape[0]
smiles_list = set(smiles_list)
print('{} uniques out of {}'.format(len(smiles_list), count))
return smiles_list
if __name__ == '__main__':
training_smiles_list = fetch_training_molecules()
predicted_file = 'aldrich_prediction.out'
with open(predicted_file) as f:
lines = f.readlines()
handler = open('filtered_aldrich_prediction.out', 'w')
count = 0
for line in lines:
line = line.strip().split('\t')
old_smiles, neo_smiles, id, pred_value= line[0], line[1], line[2], line[3]
if neo_smiles in training_smiles_list:
print('Duplicate SMILES: {}'.format(neo_smiles))
count += 1
else:
print('{}\t{}\t{}\t{}'.format(old_smiles, neo_smiles, id, pred_value), file=handler)
print('{} duplicates out of {}'.format(count, len(lines)))
|
# MIT License
#
# Copyright (c) 2021 Julien Gossa
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
import tweepy
import logging
import os
logger = logging.getLogger()
def create_api():
consumer_key = os.getenv("CONSUMER_KEY")
consumer_secret = os.getenv("CONSUMER_SECRET")
access_token = os.getenv("ACCESS_TOKEN")
access_token_secret = os.getenv("ACCESS_TOKEN_SECRET")
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True)
try:
api.verify_credentials()
except Exception as e:
logger.error("Error creating API", exc_info=True)
raise e
logger.info("API created")
return api
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2020 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.conf import settings
from rest_framework.filters import BaseFilterBackend
class GeoStoryPermissionsFilter(BaseFilterBackend):
"""
A filter backend that limits results to those where the requesting user
has read object level permissions.
"""
shortcut_kwargs = {
'accept_global_perms': True,
}
def filter_queryset(self, request, queryset, view):
# We want to defer this import until runtime, rather than import-time.
# See https://github.com/encode/django-rest-framework/issues/4608
# (Also see #1624 for why we need to make this import explicitly)
from guardian.shortcuts import get_objects_for_user
from geonode.security.utils import get_visible_resources
user = request.user
resources = get_objects_for_user(
user,
'base.view_resourcebase',
**self.shortcut_kwargs
).filter(polymorphic_ctype__model='geostory')
obj_with_perms = get_visible_resources(
resources,
user,
admin_approval_required=settings.ADMIN_MODERATE_UPLOADS,
unpublished_not_visible=settings.RESOURCE_PUBLISHING,
private_groups_not_visibile=settings.GROUP_PRIVATE_RESOURCES)
return queryset.filter(id__in=obj_with_perms.values('id'))
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Design References:
# ------------------
# Add-on: Another Noise Tool - Suite (W.I.P.)
# Author: Jimmy Hazevoet 5/2017
#
# Add-on: Easy City Addon
# Author: Goeminne Colas, Barthel Xavier
#
bl_info = {
"name": "Film Set Builder",
"author": "Ajay Bhaga",
"version": (0, 0, 1),
"blender": (2, 90, 0),
"location": "View3D > UI panel",
"description": "Film Set Builder: Scene Generator",
"warning": "",
# "doc_url": "{BLENDER_MANUAL_URL}/addons/add_mesh/fsb_filmset.html",
"category": "Object",
}
if "bpy" in locals():
import importlib
importlib.reload(gen_set_fsb)
importlib.reload(mesh_ant_displace)
importlib.reload(ant_functions)
importlib.reload(ant_noise)
else:
from FilmSetBuilder import gen_set_fsb
from FilmSetBuilder import mesh_ant_displace
from FilmSetBuilder import ant_functions
from FilmSetBuilder import ant_noise
import bpy
from bpy.props import *
import os
import copy
from bpy.props import (
BoolProperty,
FloatProperty,
IntProperty,
StringProperty,
PointerProperty,
EnumProperty,
)
from .ant_functions import (
draw_ant_refresh,
draw_ant_main,
draw_ant_noise,
draw_ant_displace,
)
#
bpy.types.Scene.city_size = IntProperty(name="Size", default=20)
bpy.types.Scene.max_block_size = IntProperty(name="Block Size", default=7)
bpy.types.Scene.park_mean = FloatProperty(name="Proportion of parks", default=0.1, min=0.0, max=1.0)
bpy.types.Scene.height_mean = FloatProperty(name="Mean building height", default=30.0, min=10.0, max=100.0)
bpy.types.Scene.height_std = FloatProperty(name="Standard deviation building height", default=15.0, min=5.0, max=50.0)
bpy.types.Scene.path_size = IntProperty(name="Path Size", default=50, min=0)
bpy.types.Scene.camera_speed = IntProperty(name="Speed", default=3, min=1,max=5)
matrice=[]
def setMatrice(mat):
global matrice
matrice=copy.deepcopy(mat)
#
# class FilmSetBuilderPanel(bpy.types.Panel):
#
class FilmSetBuilderPanel(bpy.types.Panel):
bl_label = "Film Set Generator"
bl_space_type = 'VIEW_3D'
bl_region_type = 'TOOLS'
bl_category = 'City'
def draw(self, context):
layout = self.layout
layout.label(text="City Parameters:")
# split = layout.split()
# col = split.column(align=True)
# col.operator("mesh.primitive_plane_add", text="Plane", icon='MESH_PLANE')
# col.operator("mesh.primitive_torus_add", text="Torus", icon='MESH_TORUS')
scene = context.scene
row = layout.row()
row.prop(scene, 'city_size')
row.prop(scene, 'max_block_size')
row = layout.row()
row.prop(scene, 'park_mean')
row = layout.row()
row.prop(scene, 'height_mean')
row.prop(scene, 'height_std')
row = layout.row()
row.operator('city.generate')
row.operator('city.delete')
row = layout.row()
row.operator('city.day')
row.operator('city.night')
row = layout.row()
row.operator('city.cars')
row = layout.row()
layout.label(text="Camera Path Parameters:")
row = layout.row()
row.operator('city.camera_path')
row = layout.row()
row.prop(scene, 'path_size')
row.prop(scene,'camera_speed')
class OBJECT_OT_Day(bpy.types.Operator):
bl_idname = "city.day"
bl_label = "Day Light"
bl_description = "Set day light environment"
def execute(self,context):
print("lenmatrice : ",len(matrice))
floor_repartition.setDayLight(matrice)
return {'FINISHED'}
class OBJECT_OT_Night(bpy.types.Operator):
bl_idname = "city.night"
bl_label = "Night Light"
bl_description = "Set night light environment"
def execute(self,context):
floor_repartition.setNightLight(matrice)
return {'FINISHED'}
class OBJECT_OT_CameraPath(bpy.types.Operator):
bl_idname = "city.camera_path"
bl_label = "Generate Camera Path"
bl_description = "generate a camera path though the city"
def execute(self,context):
floor_repartition.cameraPath(matrice,bpy.context.scene.path_size,bpy.context.scene.camera_speed)
return {'FINISHED'}
class OBJECT_OT_Car(bpy.types.Operator):
bl_idname = "city.cars"
bl_label = "Cars"
bl_description = "Generate cars riding throught the city"
def execute(self,context):
directory = os.path.dirname(__file__)
carsfilepath = os.path.join(directory, "models/cars.blend")
with bpy.data.libraries.load(carsfilepath, link=True) as (data_from, data_to):
data_to.objects = [name for name in data_from.objects if name.startswith("car")]
cars = [obj for obj in bpy.data.objects if "car" in obj.name]
floor_repartition.carsAnim(matrice, cars)
return {'FINISHED'}
class OBJECT_OT_GenerateCity(bpy.types.Operator):
bl_idname = "city.generate"
bl_label = "Generate"
bl_description = "Generates the city based on the given parameters."
def execute(self, context):
directory = os.path.dirname(__file__)
roadfilepath = os.path.join(directory, "models/road.blend")
with bpy.data.libraries.load(roadfilepath, link=True) as (data_from, data_to):
data_to.objects = [name for name in data_from.objects if name.startswith("road")]
buildingsfilepath = os.path.join(directory, "models/buildings.blend")
with bpy.data.libraries.load(buildingsfilepath, link=True) as (data_from, data_to):
data_to.objects = [name for name in data_from.objects if (name.startswith("building") or name.startswith("house"))]
parksfilepath = os.path.join(directory, "models/parks.blend")
with bpy.data.libraries.load(parksfilepath, link=True) as (data_from, data_to):
data_to.objects = [name for name in data_from.objects if name.startswith("park")]
urbanfilepath = os.path.join(directory, "models/urban.blend")
with bpy.data.libraries.load(urbanfilepath, link=True) as (data_from, data_to):
data_to.objects = [name for name in data_from.objects if name.startswith("street") or name.startswith("urban")]
worldfilepath = os.path.join(directory, "models/sky.blend")
with bpy.data.libraries.load(worldfilepath, link=True) as (data_from, data_to):
data_to.worlds = [name for name in data_from.worlds if name.startswith("myWorld")]
worldNightfilepath = os.path.join(directory, "models/skyNight.blend")
with bpy.data.libraries.load(worldNightfilepath, link=True) as (data_from, data_to):
data_to.worlds = [name for name in data_from.worlds if name.startswith("myWorld")]
scene = context.scene
# Remove previous city (if any)
bpy.ops.city.delete()
# Add an empty that will serve as the parent of all buildings
bpy.ops.object.add(type='EMPTY')
empty = bpy.context.object
empty.name = 'City'
# # Get the template objects (name starting with '_'
# objs = [obj for obj in bpy.data.objects if obj.name[0] == '_']
# # Get the mesh from the template object
# meshes = [obj.data for obj in objs]
size = scene.city_size
max_block_size = scene.max_block_size
park_mean = scene.park_mean
height_mean = scene.height_mean
height_std = scene.height_std
roads = { "straight": bpy.data.objects['roadStraight'],
"roadL": bpy.data.objects['roadL'],
"roadT": bpy.data.objects['roadT'],
"roadX": bpy.data.objects['roadX']}
buildings = [obj for obj in bpy.data.objects if ("building" in obj.name or "house" in obj.name)]
parks = [obj for obj in bpy.data.objects if "park" in obj.name]
cars = [obj for obj in bpy.data.objects if "car" in obj.name]
streetLamp=[obj for obj in bpy.data.objects if "street" in obj.name]
urbanObjects=[obj for obj in bpy.data.objects if "urban" in obj.name]
bpy.context.scene.render.engine = 'CYCLES'
mat=copy.deepcopy(floor_repartition.draw_roads_and_buildings(size, roads, buildings, max_block_size, parks, park_mean, height_mean, height_std))
setMatrice(mat)
floor_repartition.setDayLight(mat)
floor_repartition.setUrban(mat,streetLamp,urbanObjects)
# # Create a duplicate linked object of '_Building1'
# for x in np.linspace(-size/2, size/2, size):
# for y in np.linspace(-size/2, size/2, size):
# height = 2 + np.random.rand() * 8 # Random height
# mesh = meshes[np.random.random_integers(len(meshes))-1] # Random mesh from templates
# new_obj = bpy.data.objects.new('Building.000', mesh) # Create new object linked to same mesh data
# new_obj.location = (x*2,y*2,0) # Set its location
# new_obj.scale = (1,1,height) # Set its scale
# scene.objects.link(new_obj) # Link new object to scene
# new_obj.parent = empty # Link new object to empty
return {'FINISHED'}
class OBJECT_OT_DeleteCity(bpy.types.Operator):
bl_idname = "city.delete"
bl_label = "Delete"
def execute(self, context):
scene = context.scene
# Remove previous city
city = bpy.data.objects.get('City') # Get 'City' object
if not city is None: # If exists
bpy.ops.object.select_all(action='DESELECT') # Deselect all
city.select = True # Select City
bpy.ops.object.select_hierarchy(direction='CHILD', # Select all children of City
extend=True)
bpy.ops.object.select_hierarchy(direction='CHILD', extend=True)
bpy.ops.object.delete(use_global=False) # Delete selection
return {'FINISHED'}
#
# ------------------------------------------------------------
# Menu's and panels
def menu_func_eroder(self, context):
ob = bpy.context.active_object
if ob and (ob.fsb_filmset.keys() and not ob.fsb_filmset['sphere_mesh']):
self.layout.operator('mesh.eroder', text="Landscape Eroder", icon='SMOOTHCURVE')
def menu_func_landscape(self, context):
layout = self.layout
layout.separator()
self.layout.operator('mesh.filmset_generate', text="Film Set", icon="RNDCURVE")
# Landscape Add Panel
class FilmSetBuilderAddPanel(bpy.types.Panel):
bl_category = "Create"
bl_label = "Film Set Builder"
bl_idname = "ANTLANDSCAPE_PT_add"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_context = "objectmode"
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
col = self.layout.column()
col.operator('mesh.filmset_generate', text="Generate Film Set", icon="WORLD")
col.operator('mesh.actors_generate', text="Generate Actors", icon="OBJECT_DATA")
col.operator('mesh.cameras_generate', text="Generate Cameras", icon="CAMERA_DATA")
col.operator('mesh.paths_generate', text="Generate Paths", icon="ANIM_DATA")
col.operator('mesh.lights_generate', text="Generate Lights", icon="LIGHT_DATA")
# Landscape Tools:
class AntLandscapeToolsPanel(bpy.types.Panel):
bl_category = "Create"
bl_label = "Film Set Builder Tools"
bl_idname = "ANTLANDSCAPE_PT_tools"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_context = "objectmode"
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
ob = bpy.context.active_object
return (ob and ob.type == 'MESH')
def draw(self, context):
layout = self.layout
ob = context.active_object
col = layout.column()
col.operator('mesh.ant_displace', text="Randomize Actors", icon="OBJECT_DATA")
col.operator('mesh.ant_displace', text="Randomize Environment", icon="SCENE_DATA")
col.operator('mesh.ant_slope_map', icon='GROUP_VERTEX')
if ob.fsb_filmset.keys() and not ob.fsb_filmset['sphere_mesh']:
col.operator('mesh.eroder', text="Landscape Eroder", icon='SMOOTHCURVE')
# Film Set Settings
class FilmSetSettingsPanel(bpy.types.Panel):
bl_category = "Create"
bl_label = "Film Set Settings"
bl_idname = "ANTLANDSCAPE_PT_noise"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
ob = bpy.context.active_object
return ob.fsb_filmset.keys() if ob else False
def draw(self, context):
layout = self.layout
scene = context.scene
ob = bpy.context.active_object
ant = ob.fsb_filmset
box = layout.box()
col = box.column(align=True)
col.scale_y = 1.5
if ant.sphere_mesh:
col.operator('mesh.fsb_filmset_regenerate', text="Regenerate", icon="LOOP_FORWARDS")
else:
col.operator('mesh.fsb_filmset_refresh', text="Refresh", icon="FILE_REFRESH")
box.prop(ant, "noise_type")
if ant.noise_type == "blender_texture":
box.prop_search(ant, "texture_block", bpy.data, "textures")
else:
box.prop(ant, "basis_type")
col = box.column(align=True)
col.prop(ant, "random_seed")
col = box.column(align=True)
col.prop(ant, "noise_offset_x")
col.prop(ant, "noise_offset_y")
if ant.sphere_mesh:
col.prop(ant, "noise_offset_z")
col.prop(ant, "noise_size_x")
col.prop(ant, "noise_size_y")
if ant.sphere_mesh:
col.prop(ant, "noise_size_z")
col = box.column(align=True)
col.prop(ant, "noise_size")
col = box.column(align=True)
if ant.noise_type == "multi_fractal":
col.prop(ant, "noise_depth")
col.prop(ant, "dimension")
col.prop(ant, "lacunarity")
elif ant.noise_type == "ridged_multi_fractal":
col.prop(ant, "noise_depth")
col.prop(ant, "dimension")
col.prop(ant, "lacunarity")
col.prop(ant, "offset")
col.prop(ant, "gain")
elif ant.noise_type == "hybrid_multi_fractal":
col.prop(ant, "noise_depth")
col.prop(ant, "dimension")
col.prop(ant, "lacunarity")
col.prop(ant, "offset")
col.prop(ant, "gain")
elif ant.noise_type == "hetero_terrain":
col.prop(ant, "noise_depth")
col.prop(ant, "dimension")
col.prop(ant, "lacunarity")
col.prop(ant, "offset")
elif ant.noise_type == "fractal":
col.prop(ant, "noise_depth")
col.prop(ant, "dimension")
col.prop(ant, "lacunarity")
elif ant.noise_type == "turbulence_vector":
col.prop(ant, "noise_depth")
col.prop(ant, "amplitude")
col.prop(ant, "frequency")
col.separator()
row = col.row(align=True)
row.prop(ant, "hard_noise", expand=True)
elif ant.noise_type == "variable_lacunarity":
box.prop(ant, "vl_basis_type")
box.prop(ant, "distortion")
elif ant.noise_type == "marble_noise":
box.prop(ant, "marble_shape")
box.prop(ant, "marble_bias")
box.prop(ant, "marble_sharp")
col = box.column(align=True)
col.prop(ant, "distortion")
col.prop(ant, "noise_depth")
col.separator()
row = col.row(align=True)
row.prop(ant, "hard_noise", expand=True)
elif ant.noise_type == "shattered_hterrain":
col.prop(ant, "noise_depth")
col.prop(ant, "dimension")
col.prop(ant, "lacunarity")
col.prop(ant, "offset")
col.prop(ant, "distortion")
elif ant.noise_type == "strata_hterrain":
col.prop(ant, "noise_depth")
col.prop(ant, "dimension")
col.prop(ant, "lacunarity")
col.prop(ant, "offset")
col.prop(ant, "distortion", text="Strata")
elif ant.noise_type == "ant_turbulence":
col.prop(ant, "noise_depth")
col.prop(ant, "amplitude")
col.prop(ant, "frequency")
col.prop(ant, "distortion")
col.separator()
row = col.row(align=True)
row.prop(ant, "hard_noise", expand=True)
elif ant.noise_type == "vl_noise_turbulence":
col.prop(ant, "noise_depth")
col.prop(ant, "amplitude")
col.prop(ant, "frequency")
col.prop(ant, "distortion")
col.separator()
box.prop(ant, "vl_basis_type")
col.separator()
row = col.row(align=True)
row.prop(ant, "hard_noise", expand=True)
elif ant.noise_type == "vl_hTerrain":
col.prop(ant, "noise_depth")
col.prop(ant, "dimension")
col.prop(ant, "lacunarity")
col.prop(ant, "offset")
col.prop(ant, "distortion")
col.separator()
box.prop(ant, "vl_basis_type")
elif ant.noise_type == "distorted_heteroTerrain":
col.prop(ant, "noise_depth")
col.prop(ant, "dimension")
col.prop(ant, "lacunarity")
col.prop(ant, "offset")
col.prop(ant, "distortion")
col.separator()
col.prop(ant, "vl_basis_type")
elif ant.noise_type == "double_multiFractal":
col.prop(ant, "noise_depth")
col.prop(ant, "dimension")
col.prop(ant, "lacunarity")
col.prop(ant, "offset")
col.prop(ant, "gain")
col.separator()
box.prop(ant, "vl_basis_type")
elif ant.noise_type == "rocks_noise":
col.prop(ant, "noise_depth")
col.prop(ant, "distortion")
col.separator()
row = col.row(align=True)
row.prop(ant, "hard_noise", expand=True)
elif ant.noise_type == "slick_rock":
col.prop(ant, "noise_depth")
col.prop(ant, "dimension")
col.prop(ant, "lacunarity")
col.prop(ant, "gain")
col.prop(ant, "offset")
col.prop(ant, "distortion")
col.separator()
box.prop(ant, "vl_basis_type")
elif ant.noise_type == "planet_noise":
col.prop(ant, "noise_depth")
col.separator()
row = col.row(align=True)
row.prop(ant, "hard_noise", expand=True)
# Effects mix
col = box.column(align=False)
box.prop(ant, "fx_type")
if ant.fx_type != "0":
if int(ant.fx_type) <= 12:
box.prop(ant, "fx_bias")
box.prop(ant, "fx_mix_mode")
col = box.column(align=True)
col.prop(ant, "fx_mixfactor")
col = box.column(align=True)
col.prop(ant, "fx_loc_x")
col.prop(ant, "fx_loc_y")
col.prop(ant, "fx_size")
col = box.column(align=True)
col.prop(ant, "fx_depth")
if ant.fx_depth != 0:
col.prop(ant, "fx_frequency")
col.prop(ant, "fx_amplitude")
col.prop(ant, "fx_turb")
col = box.column(align=True)
row = col.row(align=True).split(factor=0.92, align=True)
row.prop(ant, "fx_height")
row.prop(ant, "fx_invert", toggle=True, text="", icon='ARROW_LEFTRIGHT')
col.prop(ant, "fx_offset")
# ------------------------------------------------------------
# Properties group
class AntLandscapePropertiesGroup(bpy.types.PropertyGroup):
ant_terrain_name: StringProperty(
name="Name",
default="Landscape"
)
land_material: StringProperty(
name='Material',
default="",
description="Terrain material"
)
water_material: StringProperty(
name='Material',
default="",
description="Water plane material"
)
texture_block: StringProperty(
name="Texture",
default=""
)
at_cursor: BoolProperty(
name="Cursor",
default=True,
description="Place at cursor location",
)
smooth_mesh: BoolProperty(
name="Smooth",
default=True,
description="Shade smooth"
)
tri_face: BoolProperty(
name="Triangulate",
default=False,
description="Triangulate faces"
)
sphere_mesh: BoolProperty(
name="Sphere",
default=False,
description="Generate uv sphere - remove doubles when ready"
)
subdivision_x: IntProperty(
name="Subdivisions X",
default=128,
min=4,
max=6400,
description="Mesh X subdivisions"
)
subdivision_y: IntProperty(
default=128,
name="Subdivisions Y",
min=4,
max=6400,
description="Mesh Y subdivisions"
)
mesh_size: FloatProperty(
default=2.0,
name="Mesh Size",
min=0.01,
max=100000.0,
description="Mesh size"
)
mesh_size_x: FloatProperty(
default=2.0,
name="Mesh Size X",
min=0.01,
description="Mesh x size"
)
mesh_size_y: FloatProperty(
name="Mesh Size Y",
default=2.0,
min=0.01,
description="Mesh y size"
)
random_seed: IntProperty(
name="Random Seed",
default=0,
min=0,
description="Randomize noise origin"
)
noise_offset_x: FloatProperty(
name="Offset X",
default=0.0,
description="Noise X Offset"
)
noise_offset_y: FloatProperty(
name="Offset Y",
default=0.0,
description="Noise Y Offset"
)
noise_offset_z: FloatProperty(
name="Offset Z",
default=0.0,
description="Noise Z Offset"
)
noise_size_x: FloatProperty(
default=1.0,
name="Size X",
min=0.01,
max=1000.0,
description="Noise x size"
)
noise_size_y: FloatProperty(
name="Size Y",
default=1.0,
min=0.01,
max=1000.0,
description="Noise y size"
)
noise_size_z: FloatProperty(
name="Size Z",
default=1.0,
min=0.01,
max=1000.0,
description="Noise Z size"
)
noise_size: FloatProperty(
name="Noise Size",
default=1.0,
min=0.01,
max=1000.0,
description="Noise size"
)
noise_type: EnumProperty(
name="Noise Type",
default='hetero_terrain',
description="Noise type",
items = [
('multi_fractal', "Multi Fractal", "Blender: Multi Fractal algorithm", 0),
('ridged_multi_fractal', "Ridged MFractal", "Blender: Ridged Multi Fractal", 1),
('hybrid_multi_fractal', "Hybrid MFractal", "Blender: Hybrid Multi Fractal", 2),
('hetero_terrain', "Hetero Terrain", "Blender: Hetero Terrain", 3),
('fractal', "fBm Fractal", "Blender: fBm - Fractional Browninian motion", 4),
('turbulence_vector', "Turbulence", "Blender: Turbulence Vector", 5),
('variable_lacunarity', "Distorted Noise", "Blender: Distorted Noise", 6),
('marble_noise', "Marble", "A.N.T.: Marble Noise", 7),
('shattered_hterrain', "Shattered hTerrain", "A.N.T.: Shattered hTerrain", 8),
('strata_hterrain', "Strata hTerrain", "A.N.T: Strata hTerrain", 9),
('ant_turbulence', "Another Noise", "A.N.T: Turbulence variation", 10),
('vl_noise_turbulence', "vlNoise turbulence", "A.N.T: Real vlNoise turbulence", 11),
('vl_hTerrain', "vlNoise hTerrain", "A.N.T: vlNoise hTerrain", 12),
('distorted_heteroTerrain', "Distorted hTerrain", "A.N.T distorted hTerrain", 13),
('double_multiFractal', "Double MultiFractal", "A.N.T: double multiFractal", 14),
('rocks_noise', "Noise Rocks", "A.N.T: turbulence variation", 15),
('slick_rock', "Slick Rock", "A.N.T: slick rock", 16),
('planet_noise', "Planet Noise", "Planet Noise by: Farsthary", 17),
('blender_texture', "Blender Texture - Texture Nodes", "Blender texture data block", 18)]
)
basis_type: EnumProperty(
name="Noise Basis",
default=ant_noise.noise_basis_default,
description="Noise basis algorithms",
items = ant_noise.noise_basis
)
vl_basis_type: EnumProperty(
name="vlNoise Basis",
default=ant_noise.noise_basis_default,
description="VLNoise basis algorithms",
items = ant_noise.noise_basis
)
distortion: FloatProperty(
name="Distortion",
default=1.0,
min=0.01,
max=100.0,
description="Distortion amount"
)
hard_noise: EnumProperty(
name="Soft Hard",
default="0",
description="Soft Noise, Hard noise",
items = [
("0", "Soft", "Soft Noise", 0),
("1", "Hard", "Hard noise", 1)]
)
noise_depth: IntProperty(
name="Depth",
default=8,
min=0,
max=16,
description="Noise Depth - number of frequencies in the fBm"
)
amplitude: FloatProperty(
name="Amp",
default=0.5,
min=0.01,
max=1.0,
description="Amplitude"
)
frequency: FloatProperty(
name="Freq",
default=2.0,
min=0.01,
max=5.0,
description="Frequency"
)
dimension: FloatProperty(
name="Dimension",
default=1.0,
min=0.01,
max=2.0,
description="H - fractal dimension of the roughest areas"
)
lacunarity: FloatProperty(
name="Lacunarity",
min=0.01,
max=6.0,
default=2.0,
description="Lacunarity - gap between successive frequencies"
)
offset: FloatProperty(
name="Offset",
default=1.0,
min=0.01,
max=6.0,
description="Offset - raises the terrain from sea level"
)
gain: FloatProperty(
name="Gain",
default=1.0,
min=0.01,
max=6.0,
description="Gain - scale factor"
)
marble_bias: EnumProperty(
name="Bias",
default="0",
description="Marble bias",
items = [
("0", "Sin", "Sin", 0),
("1", "Cos", "Cos", 1),
("2", "Tri", "Tri", 2),
("3", "Saw", "Saw", 3)]
)
marble_sharp: EnumProperty(
name="Sharp",
default="0",
description="Marble sharpness",
items = [
("0", "Soft", "Soft", 0),
("1", "Sharp", "Sharp", 1),
("2", "Sharper", "Sharper", 2),
("3", "Soft inv.", "Soft", 3),
("4", "Sharp inv.", "Sharp", 4),
("5", "Sharper inv.", "Sharper", 5)]
)
marble_shape: EnumProperty(
name="Shape",
default="0",
description="Marble shape",
items= [
("0", "Default", "Default", 0),
("1", "Ring", "Ring", 1),
("2", "Swirl", "Swirl", 2),
("3", "Bump", "Bump", 3),
("4", "Wave", "Wave", 4),
("5", "Z", "Z", 5),
("6", "Y", "Y", 6),
("7", "X", "X", 7)]
)
height: FloatProperty(
name="Height",
default=0.5,
min=-10000.0,
max=10000.0,
description="Noise intensity scale"
)
height_invert: BoolProperty(
name="Invert",
default=False,
description="Height invert",
)
height_offset: FloatProperty(
name="Offset",
default=0.0,
min=-10000.0,
max=10000.0,
description="Height offset"
)
fx_mixfactor: FloatProperty(
name="Mix Factor",
default=0.0,
min=-1.0,
max=1.0,
description="Effect mix factor: -1.0 = Noise, +1.0 = Effect"
)
fx_mix_mode: EnumProperty(
name="Effect Mix",
default="0",
description="Effect mix mode",
items = [
("0", "Mix", "Mix", 0),
("1", "Add", "Add", 1),
("2", "Sub", "Subtract", 2),
("3", "Mul", "Multiply", 3),
("4", "Abs", "Absolute", 4),
("5", "Scr", "Screen", 5),
("6", "Mod", "Modulo", 6),
("7", "Min", "Minimum", 7),
("8", "Max", "Maximum", 8)
]
)
fx_type: EnumProperty(
name="Effect Type",
default="0",
description="Effect type",
items = [
("0", "None", "No effect", 0),
("1", "Gradient", "Gradient", 1),
("2", "Waves", "Waves - Bumps", 2),
("3", "Zigzag", "Zigzag", 3),
("4", "Wavy", "Wavy", 4),
("5", "Bump", "Bump", 5),
("6", "Dots", "Dots", 6),
("7", "Rings", "Rings", 7),
("8", "Spiral", "Spiral", 8),
("9", "Square", "Square", 9),
("10", "Blocks", "Blocks", 10),
("11", "Grid", "Grid", 11),
("12", "Tech", "Tech", 12),
("13", "Crackle", "Crackle", 13),
("14", "Cracks", "Cracks", 14),
("15", "Rock", "Rock noise", 15),
("16", "Lunar", "Craters", 16),
("17", "Cosine", "Cosine", 17),
("18", "Spikey", "Spikey", 18),
("19", "Stone", "Stone", 19),
("20", "Flat Turb", "Flat turbulence", 20),
("21", "Flat Voronoi", "Flat voronoi", 21)
]
)
fx_bias: EnumProperty(
name="Effect Bias",
default="0",
description="Effect bias type",
items = [
("0", "Sin", "Sin", 0),
("1", "Cos", "Cos", 1),
("2", "Tri", "Tri", 2),
("3", "Saw", "Saw", 3),
("4", "None", "None", 4)]
)
fx_turb: FloatProperty(
name="Distortion",
default=0.0,
min=0.0,
max=1000.0,
description="Effect turbulence distortion"
)
fx_depth: IntProperty(
name="Depth",
default=0,
min=0,
max=16,
description="Effect depth - number of frequencies"
)
fx_amplitude: FloatProperty(
name="Amp",
default=0.5,
min=0.01,
max=1.0,
description="Amplitude"
)
fx_frequency: FloatProperty(
name="Freq",
default=2.0,
min=0.01,
max=5.0,
description="Frequency"
)
fx_size: FloatProperty(
name="Effect Size",
default=1.0,
min=0.01,
max=1000.0,
description="Effect size"
)
fx_loc_x: FloatProperty(
name="Offset X",
default=0.0,
description="Effect x offset"
)
fx_loc_y: FloatProperty(
name="Offset Y",
default=0.0,
description="Effect y offset"
)
fx_height: FloatProperty(
name="Intensity",
default=1.0,
min=-1000.0,
max=1000.0,
description="Effect intensity scale"
)
fx_invert: BoolProperty(
name="Invert",
default=False,
description="Effect invert"
)
fx_offset: FloatProperty(
name="Offset",
default=0.0,
min=-1000.0,
max=1000.0,
description="Effect height offset"
)
edge_falloff: EnumProperty(
name="Falloff",
default="3",
description="Flatten edges",
items = [
("0", "None", "None", 0),
("1", "Y", "Y Falloff", 1),
("2", "X", "X Falloff", 2),
("3", "X Y", "X Y Falloff", 3)]
)
falloff_x: FloatProperty(
name="Falloff X",
default=4.0,
min=0.1,
max=100.0,
description="Falloff x scale"
)
falloff_y: FloatProperty(
name="Falloff Y",
default=4.0,
min=0.1,
max=100.0,
description="Falloff y scale"
)
edge_level: FloatProperty(
name="Edge Level",
default=0.0,
min=-10000.0,
max=10000.0,
description="Edge level, sealevel offset"
)
maximum: FloatProperty(
name="Maximum",
default=1.0,
min=-10000.0,
max=10000.0,
description="Maximum, flattens terrain at plateau level"
)
minimum: FloatProperty(
name="Minimum",
default=-1.0,
min=-10000.0,
max=10000.0,
description="Minimum, flattens terrain at seabed level"
)
vert_group: StringProperty(
name="Vertex Group",
default=""
)
strata: FloatProperty(
name="Amount",
default=5.0,
min=0.01,
max=1000.0,
description="Strata layers / terraces"
)
strata_type: EnumProperty(
name="Strata",
default="0",
description="Strata types",
items = [
("0", "None", "No strata", 0),
("1", "Smooth", "Smooth transitions", 1),
("2", "Sharp Sub", "Sharp subtract transitions", 2),
("3", "Sharp Add", "Sharp add transitions", 3),
("4", "Quantize", "Quantize", 4),
("5", "Quantize Mix", "Quantize mixed", 5)]
)
water_plane: BoolProperty(
name="Water Plane",
default=False,
description="Add water plane"
)
water_level: FloatProperty(
name="Level",
default=0.01,
min=-10000.0,
max=10000.0,
description="Water level"
)
remove_double: BoolProperty(
name="Remove Doubles",
default=False,
description="Remove doubles"
)
refresh: BoolProperty(
name="Refresh",
default=False,
description="Refresh"
)
auto_refresh: BoolProperty(
name="Auto",
default=True,
description="Automatic refresh"
)
# ------------------------------------------------------------
# Register:
classes = (
FilmSetBuilderAddPanel,
AntLandscapeToolsPanel,
FilmSetSettingsPanel,
AntLandscapePropertiesGroup,
gen_set_fsb.GenerateFilmSet,
gen_set_fsb.GenerateActors,
mesh_ant_displace.AntMeshDisplace,
ant_functions.FilmSetRefresh,
ant_functions.FilmSetRegenerate,
ant_functions.AntVgSlopeMap,
ant_functions.Eroder,
)
def register():
for cls in classes:
bpy.utils.register_class(cls)
bpy.types.VIEW3D_MT_mesh_add.append(menu_func_landscape)
bpy.types.Object.fsb_filmset = PointerProperty(type=AntLandscapePropertiesGroup, name="FSB_Filmset", description="Filmset properties")
bpy.types.VIEW3D_MT_paint_weight.append(menu_func_eroder)
def unregister():
for cls in reversed(classes):
bpy.utils.unregister_class(cls)
bpy.types.VIEW3D_MT_mesh_add.remove(menu_func_landscape)
bpy.types.VIEW3D_MT_paint_weight.remove(menu_func_eroder)
if __name__ == "__main__":
register()
|
from . import constants
class MilterMessage(object):
def __init__(self, cmd, data=None):
if cmd not in constants.VALID_CMDS:
raise ValueError('invalid command %s' % cmd)
self.cmd = cmd
self.data = data or {}
def __str__(self):
return '%s<%s, %s>' % (self.__class__.__name__, self.cmd, self.data)
__repr__ = __str__
def __eq__(self, other):
return (self.cmd == other.cmd
and (sorted(self.data.iteritems())
== sorted(other.data.iteritems())
)
)
def __ne__(self, other):
return (self.cmd != other.cmd
or (sorted(self.data.iteritems())
!= sorted(other.data.iteritems())
)
)
|
import utime
def toI8(byte, scaling):
return float((byte + 2 ** 7) % 2 ** 8 - 2 ** 7) * scaling
def log(string):
#dateArray = utime.localtime()
#ts = "%02d-%02d-%02d %02d:%02d:%02d" % (
# dateArray[0], dateArray[1], dateArray[2], dateArray[3], dateArray[4], dateArray[5])
print("%s : %s" % (utime.ticks_ms(), string))
def getXBeeInternalTemperature(xbeeObj):
reading = xbeeObj.atcmd('TP')
if reading > 0x7FFF:
reading = reading - 0x10000
return reading
def getXBeeVoltage(xbeeObj):
reading = xbeeObj.atcmd('%V')
return reading
def configureXBee3asBLESensor(xbeeObj):
log("Configuring XBee...")
xbeeObj.atcmd('BD', 7)
xbeeObj.atcmd('NB', 0)
xbeeObj.atcmd('SB', 0)
xbeeObj.atcmd('SM', 6)
xbeeObj.atcmd('DO', 4)
xbeeObj.atcmd('D1', 0)
xbeeObj.atcmd('D2', 4)
xbeeObj.atcmd('D3', 4)
xbeeObj.atcmd('D4', 0) #USER LED
xbeeObj.atcmd('D5', 4)
xbeeObj.atcmd('D6', 4)
xbeeObj.atcmd('D7', 4)
xbeeObj.atcmd('D8', 4)
xbeeObj.atcmd('D9', 4)
xbeeObj.atcmd('P0', 4)
xbeeObj.atcmd('P1', 4)
xbeeObj.atcmd('P2', 4)
xbeeObj.atcmd('P5', 0)
xbeeObj.atcmd('P6', 0)
xbeeObj.atcmd('P7', 0)
xbeeObj.atcmd('P8', 0)
xbeeObj.atcmd('P9', 0)
def configureSingleParameter(xbeeObj, parameter, value):
xbeeObj.atcmd(parameter, value)
def setSleepmode(xbeeObj,value):
xbeeObj.atcmd('SM', value)
def setLED(pin, value):
pin.value(value)
def Average(lst):
return sum(lst) / len(lst)
def mapSensorValueToRange(x, a, b, c, d):
return ((x-a) / (b-a) * (d-c) + c)
def isNaN(num):
return num != num
|
"""
Created on Dec 14, 2018
@author: Yuedong Chen
"""
from .base_solver import BaseSolver
from sklearn.metrics import confusion_matrix, accuracy_score
import time
import os
import torch
import numpy as np
class ResFaceClsSolver(BaseSolver):
"""docstring for ResFaceClsSolver"""
def __init__(self):
super(ResFaceClsSolver, self).__init__()
def train_networks(self):
super(ResFaceClsSolver, self).train_networks()
def init_train_setting(self):
super(ResFaceClsSolver, self).init_train_setting()
def train_epoch(self, epoch):
self.train_model.set_train()
last_print_losses_freq_t = time.time()
for idx, batch in enumerate(self.train_dataset):
self.train_total_steps += 1
self.train_model.feed_batch(batch)
self.train_model.optimize_paras()
if self.train_total_steps % self.opt.print_losses_freq == 0:
cur_losses = self.train_model.get_latest_losses()
avg_step_t = (time.time() - last_print_losses_freq_t) / self.opt.print_losses_freq
last_print_losses_freq_t = time.time()
info_dict = {'epoch': epoch, 'epoch_len': self.epoch_len,
'epoch_steps': idx * self.opt.batch_size, 'epoch_steps_len': len(self.train_dataset),
'step_time': avg_step_t, 'cur_lr': self.cur_lr,
'log_path': os.path.join(self.opt.ckpt_dir, self.opt.log_file),
'losses': cur_losses
}
self.visual.print_losses_info(info_dict)
def test_networks(self, opt):
# go through all the dataset and generate map
dataset, model = self.init_test_setting(opt)
# print("Test networks: ", model.is_train)
results_dict = {'real_img': [], 'gen_resface': [], 'focus_face': []}
real_cls_list = []
pred_cls_list = []
for idx, batch in enumerate(dataset):
with torch.no_grad():
model.feed_batch(batch)
model.forward()
results_dict['real_img'].append(model.real_img[0].cpu().float().numpy())
results_dict['gen_resface'].append(model.gen_resface[0].cpu().float().numpy())
results_dict['focus_face'].append(model.focus_face[0].cpu().float().numpy())
pred_cls = model.pred_cls.detach().cpu().numpy()
pred_cls = np.argmax(pred_cls, axis=1)
pred_cls_list.extend(pred_cls)
real_cls = batch['real_cls'].detach().cpu().numpy().astype(int)
real_cls_list.extend(real_cls)
confusion_mat = confusion_matrix(real_cls_list, pred_cls_list, labels=list(range(opt.cls_nc)))
acc_num = accuracy_score(real_cls_list, pred_cls_list, normalize=False)
acc = float(acc_num) / len(dataset)
msg = "Acc: %.3f(%d/%d)" % (acc, acc_num, len(dataset))
print("=======> ", msg)
return acc, msg, confusion_mat, results_dict
|
from . import utils
from . import settings
from . import exceptions
from .resource import (
ResourceConstant,
get_resource_repository,
GroupResourceRepository,
IndexedResourceRepository,
IndexedGroupResourceRepository,
ResourceRepository,
GroupHistoryDataRepository,
IndexedHistoryDataRepository,
IndexedGroupHistoryDataRepository,
HistoryDataRepository,
ResourceConsumeClient,
ResourceConsumeClients,
HistoryDataConsumeClient,
)
from .azure_blob import (
AzureBlobStorage,
)
from .localstorage import (
LocalStorage,
)
from . import transform
|
import asyncio
import json
import urllib.request
from typing import Any, Callable
from urllib.error import HTTPError
from urllib.parse import urlencode
BASE_URL = "http://127.0.0.1:8000"
async def exec_as_aio(blocking_fn: Callable[..., Any], *args: Any) -> Any:
"""Asyncronously run blocking functions.
Args:
blocking_fn (Callable[..., Any]): The blocking fn.
Returns:
Any: The return value/s of the blocking fn.
"""
loop = asyncio.get_running_loop()
return await loop.run_in_executor(None, blocking_fn, *args)
async def parse_response(json_: Any) -> Any:
"""Convert JSON object to a python data type.
Args:
response (Any): The JSON object to deserialize.
Returns:
Any: The deserialized JSON object.
"""
try:
return json.loads(json_.read())
except UnicodeDecodeError:
return json_
async def search(query: str) -> dict[str, Any]:
"""Make a request to the /search endpoint.
Args:
query (str): The search term/query.
Returns:
dict[str, Any]: The search result.
"""
resp = await exec_as_aio(urllib.request.urlopen, f"{BASE_URL}/search?{query}")
return await parse_response(resp)
async def convert(query: str) -> dict[str, Any]:
"""Make a request to convert a video to audio.
Args:
query (str): The search term/query or video url.
Returns:
dict[str, Any]: A JSON response.
"""
resp = await exec_as_aio(urllib.request.urlopen, f"{BASE_URL}/convert?{query}")
return await parse_response(resp)
async def save(ticket: str) -> None:
"""Save the audio file using assigned ticket.
Args:
ticket (str): The value of the ticket key in the JSON response.
"""
try:
filedata = await exec_as_aio(
urllib.request.urlopen, f"{BASE_URL}/save?{ticket}"
)
datatowrite = filedata.read()
with open("audio_file.m4a", "wb") as f:
f.write(datatowrite)
except HTTPError as http_err:
if http_err.code == 409:
# Clients are rate limited to 5 requests per minute by default
# Too many requests will result in a 429 response
await asyncio.sleep(13)
await save(ticket)
else:
await exec_as_aio(
urllib.request.urlretrieve,
f"{BASE_URL}/download?{ticket}",
"audio_file.m4a",
)
async def main(query: str) -> None:
query = urlencode({"query": query}) if "http" not in query else query
result = await search(query)
print(result)
response = await convert(query)
print(response)
await save(f"""ticket={response["ticket"]}""")
asyncio.run(main("when the comedic timing is just perfect~"))
|
from setuptools import setup
meta = {}
with open("bladex/meta.py") as fp:
exec(fp.read(), meta)
# Package meta-data.
NAME = meta['__title__']
DESCRIPTION = 'Python Blade Morphing'
URL = 'https://github.com/mathLab/BladeX'
MAIL = meta['__mail__']
AUTHOR = meta['__author__']
VERSION = meta['__version__']
KEYWORDS = 'blade-generation propeller iges procal'
REQUIRED = [
'numpy', 'scipy', 'matplotlib', 'Sphinx', 'sphinx_rtd_theme',
]
EXTRAS = {
'docs': ['Sphinx==1.4', 'sphinx_rtd_theme'],
'test': ['pytest', 'pytest-cov'],
}
LDESCRIPTION = (
"BladeX is a Python package for geometrical parametrization and bottom-up "
"construction of propeller blades. It allows to generate and deform a "
"blade based on the radial distribution of its parameters such as pitch, "
"rake, skew, and the sectional foils' parameters such as chord and "
"camber. The package is ideally suited for parametric simulations on "
"large number of blade deformations. It provides an automated procedure "
"for the CAD generation, hence reducing the time and effort required for "
"modelling. The main scope of BladeX is to deal with propeller blades, "
"however it can be flexible to be applied on further applications with "
"analogous geometrical structures such as aircraft wings, turbomachinery, "
"or wind turbine blades."
)
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LDESCRIPTION,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Mathematics'
],
keywords=KEYWORDS,
url=URL,
author=AUTHOR,
author_email=MAIL,
license='MIT',
packages=[NAME],
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
zip_safe=False
)
|
"""
Task 6
@author: Alexandr Mazanik
"""
import time
import turtle
n = int(input('enter the number of legs - '))
alpha = 360 / n
length = 150
turtle.shape('turtle')
turtle.speed(5)
for i in range(n):
turtle.forward(length)
turtle.stamp()
turtle.left(180)
turtle.forward(length)
turtle.left(180)
turtle.left(alpha)
turtle.hideturtle()
time.sleep(2)
|
# Copyright (c) 2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests to ensure all attributes of L{twisted.internet.gtkreactor} are
deprecated.
"""
import sys
from twisted.trial.unittest import TestCase
class GtkReactorDeprecation(TestCase):
"""
Tests to ensure all attributes of L{twisted.internet.gtkreactor} are
deprecated.
"""
class StubGTK:
class GDK:
INPUT_READ = None
def input_add(self, *params):
pass
class StubPyGTK:
def require(self, something):
pass
def setUp(self):
"""
Create a stub for the module 'gtk' if it does not exist, so that it can
be imported without errors or warnings.
"""
self.mods = sys.modules.copy()
sys.modules['gtk'] = self.StubGTK()
sys.modules['pygtk'] = self.StubPyGTK()
def tearDown(self):
"""
Return sys.modules to the way it was before the test.
"""
sys.modules.clear()
sys.modules.update(self.mods)
def lookForDeprecationWarning(self, testmethod, attributeName):
warningsShown = self.flushWarnings([testmethod])
self.assertEquals(len(warningsShown), 1)
self.assertIdentical(warningsShown[0]['category'], DeprecationWarning)
self.assertEquals(
warningsShown[0]['message'],
"twisted.internet.gtkreactor." + attributeName + " "
"was deprecated in Twisted 10.1.0: All new applications should be "
"written with gtk 2.x, which is supported by "
"twisted.internet.gtk2reactor.")
def test_gtkReactor(self):
"""
Test deprecation of L{gtkreactor.GtkReactor}
"""
from twisted.internet import gtkreactor
gtkreactor.GtkReactor();
self.lookForDeprecationWarning(self.test_gtkReactor, "GtkReactor")
def test_portableGtkReactor(self):
"""
Test deprecation of L{gtkreactor.GtkReactor}
"""
from twisted.internet import gtkreactor
gtkreactor.PortableGtkReactor()
self.lookForDeprecationWarning(self.test_portableGtkReactor,
"PortableGtkReactor")
def test_install(self):
"""
Test deprecation of L{gtkreactor.install}
"""
from twisted.internet import gtkreactor
self.assertRaises(AssertionError, gtkreactor.install)
self.lookForDeprecationWarning(self.test_install, "install")
def test_portableInstall(self):
"""
Test deprecation of L{gtkreactor.portableInstall}
"""
from twisted.internet import gtkreactor
self.assertRaises(AssertionError, gtkreactor.portableInstall)
self.lookForDeprecationWarning(self.test_portableInstall,
"portableInstall")
|
#!/usr/bin/env python3
import os
import sys
import json
import subprocess
import shutil
import datetime
os.environ["DEBIAN_FRONTEND"] = "noninteractive"
DISTRIBUTION=sys.argv[1]
NOW=datetime.datetime.utcnow().strftime("%Y%m%dT%H%M%SZ")
SCRIPT_DIR=os.path.dirname(os.path.abspath(__file__))
METAPACKAGES_DIR=os.path.abspath(os.path.join(SCRIPT_DIR, "..", "metapackages"))
BUILDINFO_DIR=os.path.abspath(os.path.join(SCRIPT_DIR, "..", DISTRIBUTION))
BUILD_DIR=os.path.abspath(os.path.join(BUILDINFO_DIR, "build"))
class cd:
"""
Context manager for changing the current working directory
Credits: https://stackoverflow.com/a/13197763
"""
def __init__(self, new_path):
self.new_path = os.path.expanduser(new_path)
def __enter__(self):
self.old_path = os.getcwd()
os.chdir(self.new_path)
def __exit__(self, etype, value, traceback):
os.chdir(self.old_path)
def run(*args, **kwargs):
"""Same as subprocess.run(), but forces 'check' to True"""
kwargs["check"] = True
print("+", " ".join(args[0]))
return subprocess.run(*args, **kwargs)
def get_buildinfo():
""""Parse and return 'buildinfo.json'"""
path = os.path.join(SCRIPT_DIR, "..", DISTRIBUTION, "buildinfo.json")
return json.load(open(path, "rb"))
def apt_install(pkgs):
"""Install a package using system's 'apt'"""
return run(["apt-get", "install", "-y"] + list(pkgs))
def cabal_update():
return run(["cabal", "update"])
def cabal_get(name, unpack_to, version=None):
"""Fetch a package from hackage and unpack it"""
pkg = name
if version is not None:
pkg += "=="
pkg += version
return run(["cabal", "get", pkg, "-d", unpack_to])
def local_get(name, unpack_to, dir):
os.makedirs(unpack_to)
shutil.copytree(
os.path.abspath(os.path.join(BUILDINFO_DIR, dir)),
os.path.join(unpack_to, os.path.basename(dir))
)
def build_debs(name, cabal_debian_options=()):
# Create debian/
cmd = ["cabal-debian"]
cmd += ["--native"]
cmd += ["--dep-map", "pthread:libpthread-stubs0-dev"]
cmd += ["--revision", "-" + NOW]
cmd += list(cabal_debian_options)
run(cmd)
# Install dependencies
run([
"mk-build-deps", "--install",
"--tool", "apt -o Debug::pkgProblemResolver=yes -y"
])
# Build package
run(["dpkg-buildpackage"])
def install_debs():
apt_install("./" + p for p in os.listdir(".") if p.endswith(".deb"))
def main():
# Install helpers
buildinfo = get_buildinfo()
apt_install(buildinfo["build-dependencies"])
# Build packages
cabal_update()
for pkg in buildinfo.get("packages", []):
unpack_dir = os.path.join(BUILD_DIR, pkg["name"])
# Don't do work if Debian packages are already built.
if os.path.exists(unpack_dir):
debs = [p for p in os.listdir(unpack_dir) if p.endswith(".deb")]
else:
debs = []
if not debs:
shutil.rmtree(unpack_dir, ignore_errors=True)
src = pkg["src"]
src_type = src["type"]
del src["type"]
if src_type == "hackage":
cabal_get(pkg["name"], unpack_dir, **src)
elif src_type == "local":
local_get(pkg["name"], unpack_dir, **src)
else:
raise Exception("Unrecognized src type: {}".format(src_type))
[pkg_dir] = os.listdir(unpack_dir)
pkg_dir = os.path.join(unpack_dir, pkg_dir)
with cd(pkg_dir):
build_debs(pkg, pkg.get("cabal_debian_options", []))
shutil.rmtree(pkg_dir)
with cd(unpack_dir):
install_debs()
# Post install script
after_install = pkg.get("after_install")
if after_install is not None:
wd = after_install.get("cwd", ".")
with cd(os.path.abspath(os.path.join(BUILDINFO_DIR, wd))):
env = after_install.get("env", {})
run(after_install["cmd"], env={**os.environ, **env})
# Build metapackages
with cd(METAPACKAGES_DIR):
for nm in os.listdir("."):
run(["equivs-build", nm])
for deb in [p for p in os.listdir(".") if p.endswith(".deb")]:
shutil.move(deb, BUILD_DIR)
# Build package index
with cd(BUILD_DIR):
run(["bash", "-c", "dpkg-scanpackages . > Packages"])
if __name__ == '__main__':
os.makedirs(BUILD_DIR, exist_ok=True)
main()
|
# Author: Arash Nemati Hayati
# 14 Feb 2019
# HPCC Brandeis
#This code will
#1) All files inside a given path that have not been accessed/modified/group_changed in the last threshod days
#2) Create a report of user owners and their deleted files
import os, time, datetime
import pwd
import sys
clear = lambda: os.system('clear')
clear()
# This function will return the group and user for a given file
class FILE:
def __init__(self, filepath):
self.path=filepath
self.user=pwd.getpwuid(os.stat(filepath).st_uid).pw_name
self.group=pwd.getpwuid(os.stat(filepath).st_uid).pw_name
def create_report(database, time_thresh, time_now, filepath):
date=str(time_now.year)+"-"+str(time_now.month)+"-"+str(time_now.day)
file = open("work_file_removal-"+date,"w")
file.write("#This file reports user/group owners of files on "+str(filepath)+" that have not been accessed in the last "+str(time_thresh)+" days.\n")
file.write("#Report Date: "+date+"\n")
file.write("#Format: user_owner total#_removed_files\n\n")
for key in database:
file.write("%s %d\n" %(key, database[key]))
file.close()
# This function will walk through all files in a given path recursively
def file_search(filepath, time_thresh,time_now):
database={}
for (dirpath, dirnames, filenames) in os.walk(filepath):
if dirpath.find('.snapshot') == -1:
for f in filenames:
if f[0] != '.':
# get the absolute path of the file
file=dirpath+'/'+f
# last time the file ownership was changed
last_own=os.stat(file).st_ctime # in date-hh-mm-ss format
time_own=time.ctime(last_own) # in seconds format
# last time the file was changed
last_mod=os.stat(file).st_mtime # in date-hh-mm-ss format
time_mod=time.ctime(last_mod) # in seconds format
# last time the file was accessed
last_acc=os.stat(file).st_atime # in date-hh-mm-ss format
time_acc=time.ctime(last_acc) # in seconds format
# convert current time to seconds
stamp_now=datetime.datetime.timestamp(time_now)
# find the time difference between now and the last file changes
diff_own = stamp_now - last_own # file owenership change
diff_mod = stamp_now - last_mod # file modification
diff_acc = stamp_now - last_acc # file access
# Find the minimum time difference between now and last file change
diff_min = min(diff_acc,diff_mod,diff_own) / (24 * 3600) # in days
# Find the latest time change of the file
time_max = max(time_acc,time_own,time_mod)
# Get the file ownership information
F = FILE(file)
# Count the number of files that each user/group has that has exceeded the criteria
if (diff_min > time_thresh):
if F.user in database:
database[F.user] += 1
else:
database[F.user] = 1
os.remove(file)
return database
def main():
# current time
time_now=datetime.datetime.now()
# time period criteria to check whether the last time the file was changed is beyond the time threshold
time_thresh=int(sys.argv[1]) # in days
# filepath
filepath=str(sys.argv[2])
# Run the file search function and create the database
database=file_search(filepath, time_thresh,time_now)
create_report(database,time_thresh,time_now,filepath)
if __name__ == '__main__':
main()
|
import os
import importlib
import simplejson as json
import click
from flask import current_app
import pandas as pd
import numpy as np
from sqlalchemy import Table
import datetime as dt
DATE_FORMAT = '%Y-%m-%d'
DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S'
class Error(Exception):
"""Based class for exceptions in this modules."""
pass
class ModelNameError(Error):
"""Exception raised when model_name is not in fixture_models."""
def __init__(self, message):
self.message = message
def get_fixtures_directory():
"""Return the path to the fixtures directory.
Returns:
path (str): string of path to the fixtures directory
Notes:
The fixtures directory default is parent directory of app.root_path. This can
be overriden by setting 'SQLAFIXTURES_DIRECTORY' in app.config.
"""
return current_app.extensions['sqlafixtures'].directory
def seed(model_names=[]):
"""Seed the database.
Parameters:
models_names (list of str): names of models to seed. If empty, seed all.
Notes:
app.extensions['sqlafixtures'].fixtures_directory point to the directory
where fixtures are maintained.
"""
db = current_app.extensions['sqlafixtures'].db
fixtures_directory = get_fixtures_directory()
conn = db.engine.connect()
metadata = db.metadata
fixture_models = get_fixture_models(model_names)
for mdl in fixture_models:
table_name = mdl.__table__.name
path = os.path.join(fixtures_directory, table_name + '.json')
fixture = json.load(open(path))
table = Table(fixture['table']['name'], metadata)
cols = [col for col in table.columns]
fixture['records'] = format_fixture_record_dates(
cols, fixture['records'])
conn.execute(table.insert().prefix_with(
'OR REPLACE'), fixture['records'])
def get_fixture_models(model_names=[], excludes=[]):
"""Return a list of models configured as sqlafixture models.
Parameters:
model_names (list of str): name of models to return. If empty, return all
exludes (list of str): name of models to exclues
Returns:
list of models
Note:
app.extensions['sqlafixtures'].fixtures_modules is a list of configured modules
module.FIXTURES is a list of configured models
"""
models = []
for module_name in current_app.extensions['sqlafixtures'].modules:
module = importlib.import_module(module_name)
for model_name in getattr(module, 'FIXTURES'):
if model_names and model_name not in model_names:
continue
if model_name in excludes:
continue
models.append(getattr(module, model_name))
mdl_names = [mdl.__name__ for mdl in models]
model_names = [name for name in model_names if name not in excludes]
for name in model_names:
if name not in mdl_names:
message = "Model '{name}' is not defined as a fixture.".format(
name=name)
raise ModelNameError(message)
return models
def format_fixture_record_dates(cols, records):
"""Covert json string text date to datetime.date or datetime.datetime.
Parameters:
cols (list): list of column objects from a database table.
records (list): list of dict where each dict is a record from the database.
Returns:
records (list): list of dict records where col with str date have been convert to date
or datetime.
"""
for record in records:
for col in cols:
if col.type.python_type in (dt.date, dt.datetime):
record[col.name] = convert_str_to_datetime_or_date(
record[col.name], col.type.python_type)
return records
def convert_str_to_datetime_or_date(data, col_type=dt.date):
"""Return dt.date or dt.datetime object or None.
Parameters:
data (str): string for of a date or datetime.
col_type (dt.date or dt.datetime): if dt.date, return dt.date, else return dt.datetime
if ValueError or TypeError, return None
Return:
data (dt.date or dt.datetime): returns dt.date, dt.dateimte or None
"""
date_format = DATE_FORMAT if col_type == dt.date else DATETIME_FORMAT
try:
data = dt.datetime.strptime(data, date_format)
except (ValueError, TypeError): # ValueError for None, TypeError for not matching format
data = None
if data and col_type == dt.date:
data = data.date()
return data
def create_fixtures(model_names, excludes=[], from_file=False):
"""Create json fixtures
Parameters:
model_names (list of str): names of models to create fixtures. If empty, create all.
excludes (list of str): names of models to exclude
from_file (boolean): True - create from xlsx file, False - create from db.
"""
models = get_fixture_models(model_names, excludes)
for model in models:
if from_file:
create_fixture_from_file(model)
else:
create_fixture_from_db(model)
def create_fixture_from_file(model):
"""Create a fixture from an excel file for the associated model.
Parameters:
model (object): The model object for the fixture to create from the file.
"""
fixtures_directory = get_fixtures_directory()
click.echo('Creating a fixture for "{model}".'.format(model=model))
fixture = {}
fixture['table'] = {}
table = model.__table__
fixture['table']['name'] = table.name
fixture['records'] = []
cols = [col.name for col in model.__table__.columns]
df = get_fixture_dataframe(table.name)
try:
df = df[cols]
except KeyError as e:
print(model)
print(table.name)
print(df.columns)
raise e
df = convert_df_dates_to_str_or_none(df, table.columns)
fixture['records'] = df.to_dict('records')
#sfile = fixtures_directory.joinpath(table.name + '.json')
sfile = os.path.join(fixtures_directory, table.name + '.json')
with open(sfile, 'w') as outfile:
json.dump(fixture, outfile, indent=4)
def convert_df_dates_to_str_or_none(df, cols):
"""Convert dataframe dates to str or None.
Parameters:
df (pd.DataFrame): The input dataframe
cols (sqlalchemy table columns): The colums for the associated table
Returns:
df (pd.DataFrame): DataFrame with idntified date columns as string or None
"""
col_names = [col.name for col in cols if col.type.python_type in (
dt.date, dt.datetime)]
for name in col_names:
df[name] = df[name].astype(str)
df[name] = df[name].replace({'NaT': None})
return df
def get_fixture_dataframe(table_name):
"""Function to get a fixture dataframe from the masters_fixture_file."""
sfile = current_app.extensions['sqlafixtures'].file
df = pd.read_excel(sfile, sheet_name=table_name)
# Drop all rows with NaN
df = df.dropna(how='all')
# for col in df.columns:
# if df[col].dtype.name == 'int64':
# df[col] = df[col].astype(np.int32)
# if df[col].dtype.name == 'datetime64[ns]':
# df[col] = df[col].apply(format_datetime)
return df
def format_datetime(data):
return data.date().__str__()
def create_fixture_from_db(model):
"""Create a fixture from a model in the db."""
db = current_app.extensions['sqlafixtures'].db
fixtures_directory = get_fixtures_directory()
click.echo('Creating fixture from db for "{model}".'.format(model=model))
tablename = model.__tablename__
fixture = {}
fixture['table'] = {}
fixture['table']['name'] = tablename
fixture['records'] = []
cols = [col.name for col in model.__table__.columns]
statement = 'SELECT * FROM {}'.format(model.__tablename__)
rows = db.session.execute(statement)
[fixture['records'].append(dict(row)) for row in rows]
sfile = os.path.join(fixtures_directory, tablename + '.json')
with open(sfile, 'w') as outfile:
json.dump(fixture, outfile, indent=4, default=json_encoder)
def json_encoder(obj):
"""JSON encode for objects."""
if type(obj) == dt.date:
return obj.__str__()
if type(obj) == dt.datetime:
return obj.strftime(DATETIME_FORMAT)
# if isinstance(obj, dt.date):
# return obj.__str__()
|
# The following list comprehension exercises will make use of the
# defined Human class.
import pdb
class Human:
def __init__(self, name, age):
self.name = name
self.age = age
def __repr__(self):
return f"<Human: {self.name}, {self.age}>"
humans = [
Human("Alice", 29),
Human("Bob", 32),
Human("Charlie", 37),
Human("Daphne", 30),
Human("Eve", 26),
Human("Frank", 18),
Human("Glenn", 42),
Human("Harrison", 12),
Human("Igon", 41),
Human("David", 31),
]
# Write a list comprehension that creates a list of names of everyone
# whose name starts with 'D':
print("Starts with D:")
a = [i.name for i in humans if i.name[0] == 'D']
print(a)
# Write a list comprehension that creates a list of names of everyone
# whose name ends in "e".
print("Ends with e:")
b = [i.name for i in humans if i.name[-1] == 'e']
print(b)
# Write a list comprehension that creates a list of names of everyone
# whose name starts with any letter between 'C' and 'G' inclusive.
print("Starts between C and G, inclusive:")
c = [i.name for i in humans if ord(i.name[0]) in range(ord('C'), ord('G')+1)]
print(c)
# Write a list comprehension that creates a list of all the ages plus 10.
print("Ages plus 10:")
d = [i.age + 10 for i in humans]
print(d)
# Write a list comprehension that creates a list of strings which are the name
# joined to the age with a hyphen, for example "David-31", for all humans.
print("Name hyphen age:")
e = ['-'.join([i.name, str(i.age)]) for i in humans]
print(e)
# Write a list comprehension that creates a list of tuples containing name and
# age, for example ("David", 31), for everyone between the ages of 27 and 32,
# inclusive.
print("Names and ages between 27 and 32:")
f = [tuple([i.name, i.age]) for i in humans if i.age in range(27, 32+1)]
print(f)
# Write a list comprehension that creates a list of new Humans like the old
# list, except with all the names uppercase and the ages with 5 added to them.
# The "humans" list should be unmodified.
print("All names uppercase:")
g = [Human(i.name.upper(), i.age+5) for i in humans]
print(g)
# Write a list comprehension that contains the square root of all the ages.
print("Square root of ages:")
# import math
print('\n******* (math lib not needed here, why import??)\n')
h = [i.age**.5 for i in humans]
print(h, '\n**********\n')
|
import execjs
import requests
class Google:
def __init__(self):
self.api = "https://translate.google.cn/translate_a/single"
self.ctx = execjs.compile("""
function TL(a) {
var k = "";
var b = 406644;
var b1 = 3293161072;
var jd = ".";
var $b = "+-a^+6";
var Zb = "+-3^+b+-f";
for (var e = [], f = 0, g = 0; g < a.length; g++) {
var m = a.charCodeAt(g);
128 > m ? e[f++] = m : (2048 > m ? e[f++] = m >> 6 | 192 : (55296 == (m & 64512) && g + 1 < a.length && 56320 == (a.charCodeAt(g + 1) & 64512) ? (m = 65536 + ((m & 1023) << 10) + (a.charCodeAt(++g) & 1023),
e[f++] = m >> 18 | 240,
e[f++] = m >> 12 & 63 | 128) : e[f++] = m >> 12 | 224,
e[f++] = m >> 6 & 63 | 128),
e[f++] = m & 63 | 128)
}
a = b;
for (f = 0; f < e.length; f++) a += e[f],
a = RL(a, $b);
a = RL(a, Zb);
a ^= b1 || 0;
0 > a && (a = (a & 2147483647) + 2147483648);
a %= 1E6;
return a.toString() + jd + (a ^ b)
};
function RL(a, b) {
var t = "a";
var Yb = "+";
for (var c = 0; c < b.length - 2; c += 3) {
var d = b.charAt(c + 2),
d = d >= t ? d.charCodeAt(0) - 87 : Number(d),
d = b.charAt(c + 1) == Yb ? a >>> d: a << d;
a = b.charAt(c) == Yb ? a + d & 4294967295 : a ^ d
}
return a
}
""")
self.to_langs = {
'中文': 'zh-CN',
'英语': 'en',
'日语': 'ja',
'韩语': 'ko',
}
def get_TK(self, text):
"""
获取TK值
"""
return self.ctx.call("TL", text)
def translate(self, text, from_lang, to_lang, mysignal):
params = {
"client": "webapp",
"sl": from_lang,
"tl": to_lang,
"hl": from_lang,
"dt": "at",
"dt": "bd",
"dt": "ex",
"dt": "ld",
"dt": "md",
"dt": "qca",
"dt": "rw",
"dt": "rm",
"dt": "ss",
"dt": "t",
"swap": 1,
"otf": 2,
"ssel": 5,
"tsel": 5,
"kc": 1,
"tk": self.get_TK(text),
"q": text
}
res = requests.get(self.api, params=params)
res_json = res.json()
# 合并分段翻译
src = []
dst = []
for lines in res_json[0]:
src.append(lines[1])
dst.append(lines[0])
src = "".join(src)
dst = "".join(dst)
mysignal.text_print.emit(src, dst)
|
from logging import Logger
from typing import NoReturn, Callable, Dict, Optional, Tuple
from discord_webhook import DiscordWebhook
from telegram import Bot, Chat
from .models import FormatFunction, NotificationSettings
class NotificationBase:
def __init__(self, name: str) -> NoReturn:
self.name = name
self.settings = NotificationSettings(message=True, error=True, warning=True, info=True, debug=True)
self.default_format_fn = self._default_format_fn
def _default_format_fn(self, message: str, *args, **kwargs):
return message
def _format(self, service, message: str, format_fn: Optional[FormatFunction] = None,
fn_args: Optional[Tuple] = None,
fn_kwargs: Optional[Dict] = None, *args, **kwargs) -> str:
if format_fn:
return format_fn(service, message, fn_args, fn_kwargs)
else:
return self.default_format_fn(message, *args, **kwargs)
def set_default_format_fn(self, format_fn: Callable) -> NoReturn:
self.default_format_fn = format_fn
def clear_default_format_fn(self) -> NoReturn:
self.default_format_fn = self._default_format_fn
def _send(self, message: str, format_fn: Optional[FormatFunction] = None, *args, **kwargs) -> NoReturn:
raise NotImplementedError
def message(self, message: str, format_fn: Optional[FormatFunction] = None, fn_args: Optional[Tuple] = None,
fn_kwargs: Optional[Dict] = None, *args, **kwargs) -> NoReturn:
raise NotImplementedError
def error(self, message: str, format_fn: Optional[FormatFunction] = None, fn_args: Optional[Tuple] = None,
fn_kwargs: Optional[Dict] = None, *args, **kwargs) -> NoReturn:
raise NotImplementedError
def warning(self, message: str, format_fn: Optional[FormatFunction] = None, fn_args: Optional[Tuple] = None,
fn_kwargs: Optional[Dict] = None, *args, **kwargs) -> NoReturn:
raise NotImplementedError
def info(self, message: str, format_fn: Optional[FormatFunction] = None, fn_args: Optional[Tuple] = None,
fn_kwargs: Optional[Dict] = None, *args, **kwargs) -> NoReturn:
raise NotImplementedError
def debug(self, message: str, format_fn: Optional[FormatFunction] = None, fn_args: Optional[Tuple] = None,
fn_kwargs: Optional[Dict] = None, *args, **kwargs) -> NoReturn:
raise NotImplementedError
class Notification(NotificationBase):
def __init__(self) -> NoReturn:
super().__init__("GLOBAL")
self.services = {}
def _add(self, name: str, service: any):
self.services[name] = service
def add_logger(self, name: str, logger: Logger, settings: NotificationSettings):
self._add(name, LoggerNotification(name, logger, settings))
def add_discord(self, name: str, webhook_url: str, settings: NotificationSettings):
service = DiscordNotification(name, webhook_url, settings)
self.services[name] = service
def add_telegram(self, name: str, token: str, chat_id: int, settings: NotificationSettings):
service = TelegramNotification(name, token, chat_id, settings)
self.services[name] = service
def remove_service(self, name: str) -> NoReturn:
del self.services[name]
def get_service(self, name: str) -> any:
return self.services[name]
def set_global_settings(self, settings: NotificationSettings):
self.settings = settings
def _send(self, message: str, *args, **kwargs) -> NoReturn:
raise NotImplementedError
def message(self, message: str, format_fn: Optional[FormatFunction] = None, fn_args: Optional[Tuple] = None,
fn_kwargs: Optional[Dict] = None, *args, **kwargs) -> NoReturn:
if self.settings.message:
[_.message(self._format(_, message, format_fn, fn_args, fn_kwargs, *args, **kwargs), *args, **kwargs) for _
in self.services.values()]
def error(self, message: str, format_fn: Optional[FormatFunction] = None, fn_args: Optional[Tuple] = None,
fn_kwargs: Optional[Dict] = None, *args, **kwargs) -> NoReturn:
if self.settings.error:
[_.error(self._format(_, message, format_fn, fn_args, fn_kwargs, *args, **kwargs), *args, **kwargs) for _ in
self.services.values()]
def warning(self, message: str, format_fn: Optional[FormatFunction] = None, fn_args: Optional[Tuple] = None,
fn_kwargs: Optional[Dict] = None, *args, **kwargs) -> NoReturn:
if self.settings.warning:
[_.warning(self._format(_, message, format_fn, fn_args, fn_kwargs, *args, **kwargs), *args, **kwargs) for _
in self.services.values()]
def info(self, message: str, format_fn: Optional[FormatFunction] = None, fn_args: Optional[Tuple] = None,
fn_kwargs: Optional[Dict] = None, *args, **kwargs) -> NoReturn:
if self.settings.info:
[_.info(self._format(_, message, format_fn, fn_args, fn_kwargs, *args, **kwargs), *args, **kwargs) for _ in
self.services.values()]
def debug(self, message: str, format_fn: Optional[FormatFunction] = None, fn_args: Optional[Tuple] = None,
fn_kwargs: Optional[Dict] = None, *args, **kwargs) -> NoReturn:
if self.settings.debug:
[_.debug(self._format(_, message, format_fn, fn_args, fn_kwargs, *args, **kwargs), *args, **kwargs) for _ in
self.services.values()]
class LoggerNotification(NotificationBase):
def __init__(self, name: str, logger: Logger, settings: NotificationSettings, ):
super().__init__(name)
self.logger = logger
self.settings = settings
def _send(self, message: str, *args, **kwargs) -> NoReturn:
raise NotImplementedError
def message(self, message: str, format_fn: Optional[FormatFunction] = None, fn_args: Optional[Tuple] = None,
fn_kwargs: Optional[Dict] = None, *args, **kwargs) -> NoReturn:
if self.settings.message:
msg = self._format(self, message, format_fn, fn_args, fn_kwargs, *args, **kwargs)
self.logger.warning(msg, *args, **kwargs)
def error(self, message: str, format_fn: Optional[FormatFunction] = None, fn_args: Optional[Tuple] = None,
fn_kwargs: Optional[Dict] = None, *args, **kwargs) -> NoReturn:
if self.settings.error:
msg = self._format(self, message, format_fn, fn_args, fn_kwargs, *args, **kwargs)
self.logger.error(msg, *args, **kwargs)
def warning(self, message: str, format_fn: Optional[FormatFunction] = None, fn_args: Optional[Tuple] = None,
fn_kwargs: Optional[Dict] = None, *args, **kwargs) -> NoReturn:
if self.settings.warning:
msg = self._format(self, message, format_fn, fn_args, fn_kwargs, *args, **kwargs)
self.logger.warning(msg, *args, **kwargs)
def info(self, message: str, format_fn: Optional[FormatFunction] = None, fn_args: Optional[Tuple] = None,
fn_kwargs: Optional[Dict] = None, *args, **kwargs) -> NoReturn:
if self.settings.info:
msg = self._format(self, message, format_fn, fn_args, fn_kwargs, *args, **kwargs)
self.logger.info(msg, *args, *kwargs)
def debug(self, message: str, format_fn: Optional[FormatFunction] = None, fn_args: Optional[Tuple] = None,
fn_kwargs: Optional[Dict] = None, *args, **kwargs) -> NoReturn:
if self.settings.debug:
msg = self._format(self, message, format_fn, fn_args, fn_kwargs, *args, **kwargs)
self.logger.debug(msg, *args, **kwargs)
class ChatNotification(NotificationBase):
def __init__(self, name: str):
super().__init__(name)
def _send(self, message: str, *args, **kwargs):
raise NotImplementedError
def message(self, message: str, format_fn: Optional[FormatFunction] = None, fn_args: Optional[Tuple] = None,
fn_kwargs: Optional[Dict] = None, *args, **kwargs) -> NoReturn:
if self.settings.message:
msg = self._format(self, message, format_fn, fn_args, fn_kwargs, *args, **kwargs)
self._send(msg, *args, **kwargs)
def error(self, message: str, format_fn: Optional[FormatFunction] = None, fn_args: Optional[Tuple] = None,
fn_kwargs: Optional[Dict] = None, *args, **kwargs) -> NoReturn:
if self.settings.error:
msg = self._format(self, message, format_fn, fn_args, fn_kwargs, *args, **kwargs)
self._send(msg, *args, **kwargs)
def warning(self, message: str, format_fn: Optional[FormatFunction] = None, fn_args: Optional[Tuple] = None,
fn_kwargs: Optional[Dict] = None, *args, **kwargs) -> NoReturn:
if self.settings.warning:
msg = self._format(self, message, format_fn, fn_args, fn_kwargs, *args, **kwargs)
self._send(msg, *args, **kwargs)
def info(self, message: str, format_fn: Optional[FormatFunction] = None, fn_args: Optional[Tuple] = None,
fn_kwargs: Optional[Dict] = None, *args, **kwargs) -> NoReturn:
if self.settings.info:
msg = self._format(self, message, format_fn, fn_args, fn_kwargs, *args, **kwargs)
self._send(msg, *args, **kwargs)
def debug(self, message: str, format_fn: Optional[FormatFunction] = None, fn_args: Optional[Tuple] = None,
fn_kwargs: Optional[Dict] = None, *args, **kwargs) -> NoReturn:
if self.settings.debug:
msg = self._format(self, message, format_fn, fn_args, fn_kwargs, *args, **kwargs)
self._send(msg, *args, **kwargs)
class DiscordNotification(ChatNotification):
def __init__(self, name: str, webhook_url: str, settings: NotificationSettings,
bot_config: Optional[Dict] = None) -> NoReturn:
super().__init__(name)
if bot_config:
self.send_fn = DiscordWebhook(**bot_config)
else:
self.send_fn = DiscordWebhook(webhook_url)
self.settings = settings
def _send(self, message: str, *args, **kwargs):
if message != '':
self.send_fn.set_content(message)
self.send_fn.execute()
class TelegramNotification(ChatNotification):
def __init__(self, name: str, token: str, chat_id: int, settings: NotificationSettings,
bot_config: Optional[Dict] = None, chat_config: Optional[Dict] = None) -> NoReturn:
super().__init__(name)
if bot_config:
self.bot = Bot(**bot_config)
else:
self.bot = Bot(token)
self.chat_id = chat_id
if chat_config:
self.send_fn = Chat(**chat_config)
else:
self.send_fn = Chat(self.chat_id, "private", bot=self.bot)
self.settings = settings
def _send(self, message: str, *args, **kwargs):
if message != '':
self.send_fn.send_message(message)
|
# coding: utf-8
"""
Camunda BPM REST API
OpenApi Spec for Camunda BPM REST API. # noqa: E501
The version of the OpenAPI document: 7.13.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from openapi_client.configuration import Configuration
class ExternalTaskQueryDto(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'external_task_id': 'str',
'external_task_id_in': 'list[str]',
'topic_name': 'str',
'worker_id': 'str',
'locked': 'bool',
'not_locked': 'bool',
'with_retries_left': 'bool',
'no_retries_left': 'bool',
'lock_expiration_after': 'datetime',
'lock_expiration_before': 'datetime',
'activity_id': 'str',
'activity_id_in': 'list[str]',
'execution_id': 'str',
'process_instance_id': 'str',
'process_instance_id_in': 'list[str]',
'process_definition_id': 'str',
'tenant_id_in': 'list[str]',
'active': 'bool',
'suspended': 'bool',
'priority_higher_than_or_equals': 'int',
'priority_lower_than_or_equals': 'int',
'sorting': 'list[ExternalTaskQueryDtoSorting]'
}
attribute_map = {
'external_task_id': 'externalTaskId',
'external_task_id_in': 'externalTaskIdIn',
'topic_name': 'topicName',
'worker_id': 'workerId',
'locked': 'locked',
'not_locked': 'notLocked',
'with_retries_left': 'withRetriesLeft',
'no_retries_left': 'noRetriesLeft',
'lock_expiration_after': 'lockExpirationAfter',
'lock_expiration_before': 'lockExpirationBefore',
'activity_id': 'activityId',
'activity_id_in': 'activityIdIn',
'execution_id': 'executionId',
'process_instance_id': 'processInstanceId',
'process_instance_id_in': 'processInstanceIdIn',
'process_definition_id': 'processDefinitionId',
'tenant_id_in': 'tenantIdIn',
'active': 'active',
'suspended': 'suspended',
'priority_higher_than_or_equals': 'priorityHigherThanOrEquals',
'priority_lower_than_or_equals': 'priorityLowerThanOrEquals',
'sorting': 'sorting'
}
def __init__(self, external_task_id=None, external_task_id_in=None, topic_name=None, worker_id=None, locked=None, not_locked=None, with_retries_left=None, no_retries_left=None, lock_expiration_after=None, lock_expiration_before=None, activity_id=None, activity_id_in=None, execution_id=None, process_instance_id=None, process_instance_id_in=None, process_definition_id=None, tenant_id_in=None, active=None, suspended=None, priority_higher_than_or_equals=None, priority_lower_than_or_equals=None, sorting=None, local_vars_configuration=None): # noqa: E501
"""ExternalTaskQueryDto - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._external_task_id = None
self._external_task_id_in = None
self._topic_name = None
self._worker_id = None
self._locked = None
self._not_locked = None
self._with_retries_left = None
self._no_retries_left = None
self._lock_expiration_after = None
self._lock_expiration_before = None
self._activity_id = None
self._activity_id_in = None
self._execution_id = None
self._process_instance_id = None
self._process_instance_id_in = None
self._process_definition_id = None
self._tenant_id_in = None
self._active = None
self._suspended = None
self._priority_higher_than_or_equals = None
self._priority_lower_than_or_equals = None
self._sorting = None
self.discriminator = None
if external_task_id is not None:
self.external_task_id = external_task_id
if external_task_id_in is not None:
self.external_task_id_in = external_task_id_in
if topic_name is not None:
self.topic_name = topic_name
if worker_id is not None:
self.worker_id = worker_id
self.locked = locked
self.not_locked = not_locked
self.with_retries_left = with_retries_left
self.no_retries_left = no_retries_left
self.lock_expiration_after = lock_expiration_after
self.lock_expiration_before = lock_expiration_before
if activity_id is not None:
self.activity_id = activity_id
if activity_id_in is not None:
self.activity_id_in = activity_id_in
if execution_id is not None:
self.execution_id = execution_id
if process_instance_id is not None:
self.process_instance_id = process_instance_id
if process_instance_id_in is not None:
self.process_instance_id_in = process_instance_id_in
if process_definition_id is not None:
self.process_definition_id = process_definition_id
if tenant_id_in is not None:
self.tenant_id_in = tenant_id_in
self.active = active
self.suspended = suspended
self.priority_higher_than_or_equals = priority_higher_than_or_equals
self.priority_lower_than_or_equals = priority_lower_than_or_equals
if sorting is not None:
self.sorting = sorting
@property
def external_task_id(self):
"""Gets the external_task_id of this ExternalTaskQueryDto. # noqa: E501
Filter by an external task's id. # noqa: E501
:return: The external_task_id of this ExternalTaskQueryDto. # noqa: E501
:rtype: str
"""
return self._external_task_id
@external_task_id.setter
def external_task_id(self, external_task_id):
"""Sets the external_task_id of this ExternalTaskQueryDto.
Filter by an external task's id. # noqa: E501
:param external_task_id: The external_task_id of this ExternalTaskQueryDto. # noqa: E501
:type: str
"""
self._external_task_id = external_task_id
@property
def external_task_id_in(self):
"""Gets the external_task_id_in of this ExternalTaskQueryDto. # noqa: E501
Filter by the comma-separated list of external task ids. # noqa: E501
:return: The external_task_id_in of this ExternalTaskQueryDto. # noqa: E501
:rtype: list[str]
"""
return self._external_task_id_in
@external_task_id_in.setter
def external_task_id_in(self, external_task_id_in):
"""Sets the external_task_id_in of this ExternalTaskQueryDto.
Filter by the comma-separated list of external task ids. # noqa: E501
:param external_task_id_in: The external_task_id_in of this ExternalTaskQueryDto. # noqa: E501
:type: list[str]
"""
self._external_task_id_in = external_task_id_in
@property
def topic_name(self):
"""Gets the topic_name of this ExternalTaskQueryDto. # noqa: E501
Filter by an external task topic. # noqa: E501
:return: The topic_name of this ExternalTaskQueryDto. # noqa: E501
:rtype: str
"""
return self._topic_name
@topic_name.setter
def topic_name(self, topic_name):
"""Sets the topic_name of this ExternalTaskQueryDto.
Filter by an external task topic. # noqa: E501
:param topic_name: The topic_name of this ExternalTaskQueryDto. # noqa: E501
:type: str
"""
self._topic_name = topic_name
@property
def worker_id(self):
"""Gets the worker_id of this ExternalTaskQueryDto. # noqa: E501
Filter by the id of the worker that the task was most recently locked by. # noqa: E501
:return: The worker_id of this ExternalTaskQueryDto. # noqa: E501
:rtype: str
"""
return self._worker_id
@worker_id.setter
def worker_id(self, worker_id):
"""Sets the worker_id of this ExternalTaskQueryDto.
Filter by the id of the worker that the task was most recently locked by. # noqa: E501
:param worker_id: The worker_id of this ExternalTaskQueryDto. # noqa: E501
:type: str
"""
self._worker_id = worker_id
@property
def locked(self):
"""Gets the locked of this ExternalTaskQueryDto. # noqa: E501
Only include external tasks that are currently locked (i.e., they have a lock time and it has not expired). Value may only be `true`, as `false` matches any external task. # noqa: E501
:return: The locked of this ExternalTaskQueryDto. # noqa: E501
:rtype: bool
"""
return self._locked
@locked.setter
def locked(self, locked):
"""Sets the locked of this ExternalTaskQueryDto.
Only include external tasks that are currently locked (i.e., they have a lock time and it has not expired). Value may only be `true`, as `false` matches any external task. # noqa: E501
:param locked: The locked of this ExternalTaskQueryDto. # noqa: E501
:type: bool
"""
self._locked = locked
@property
def not_locked(self):
"""Gets the not_locked of this ExternalTaskQueryDto. # noqa: E501
Only include external tasks that are currently not locked (i.e., they have no lock or it has expired). Value may only be `true`, as `false` matches any external task. # noqa: E501
:return: The not_locked of this ExternalTaskQueryDto. # noqa: E501
:rtype: bool
"""
return self._not_locked
@not_locked.setter
def not_locked(self, not_locked):
"""Sets the not_locked of this ExternalTaskQueryDto.
Only include external tasks that are currently not locked (i.e., they have no lock or it has expired). Value may only be `true`, as `false` matches any external task. # noqa: E501
:param not_locked: The not_locked of this ExternalTaskQueryDto. # noqa: E501
:type: bool
"""
self._not_locked = not_locked
@property
def with_retries_left(self):
"""Gets the with_retries_left of this ExternalTaskQueryDto. # noqa: E501
Only include external tasks that have a positive (> 0) number of retries (or `null`). Value may only be `true`, as `false` matches any external task. # noqa: E501
:return: The with_retries_left of this ExternalTaskQueryDto. # noqa: E501
:rtype: bool
"""
return self._with_retries_left
@with_retries_left.setter
def with_retries_left(self, with_retries_left):
"""Sets the with_retries_left of this ExternalTaskQueryDto.
Only include external tasks that have a positive (> 0) number of retries (or `null`). Value may only be `true`, as `false` matches any external task. # noqa: E501
:param with_retries_left: The with_retries_left of this ExternalTaskQueryDto. # noqa: E501
:type: bool
"""
self._with_retries_left = with_retries_left
@property
def no_retries_left(self):
"""Gets the no_retries_left of this ExternalTaskQueryDto. # noqa: E501
Only include external tasks that have 0 retries. Value may only be `true`, as `false` matches any external task. # noqa: E501
:return: The no_retries_left of this ExternalTaskQueryDto. # noqa: E501
:rtype: bool
"""
return self._no_retries_left
@no_retries_left.setter
def no_retries_left(self, no_retries_left):
"""Sets the no_retries_left of this ExternalTaskQueryDto.
Only include external tasks that have 0 retries. Value may only be `true`, as `false` matches any external task. # noqa: E501
:param no_retries_left: The no_retries_left of this ExternalTaskQueryDto. # noqa: E501
:type: bool
"""
self._no_retries_left = no_retries_left
@property
def lock_expiration_after(self):
"""Gets the lock_expiration_after of this ExternalTaskQueryDto. # noqa: E501
Restrict to external tasks that have a lock that expires after a given date. By [default](https://docs.camunda.org/manual/7.13/reference/rest/overview/date-format/), the date must have the format `yyyy-MM-dd'T'HH:mm:ss.SSSZ`, e.g., `2013-01-23T14:42:45.000+0200`. # noqa: E501
:return: The lock_expiration_after of this ExternalTaskQueryDto. # noqa: E501
:rtype: datetime
"""
return self._lock_expiration_after
@lock_expiration_after.setter
def lock_expiration_after(self, lock_expiration_after):
"""Sets the lock_expiration_after of this ExternalTaskQueryDto.
Restrict to external tasks that have a lock that expires after a given date. By [default](https://docs.camunda.org/manual/7.13/reference/rest/overview/date-format/), the date must have the format `yyyy-MM-dd'T'HH:mm:ss.SSSZ`, e.g., `2013-01-23T14:42:45.000+0200`. # noqa: E501
:param lock_expiration_after: The lock_expiration_after of this ExternalTaskQueryDto. # noqa: E501
:type: datetime
"""
self._lock_expiration_after = lock_expiration_after
@property
def lock_expiration_before(self):
"""Gets the lock_expiration_before of this ExternalTaskQueryDto. # noqa: E501
Restrict to external tasks that have a lock that expires before a given date. By [default](https://docs.camunda.org/manual/7.13/reference/rest/overview/date-format/), the date must have the format `yyyy-MM-dd'T'HH:mm:ss.SSSZ`, e.g., `2013-01-23T14:42:45.000+0200`. # noqa: E501
:return: The lock_expiration_before of this ExternalTaskQueryDto. # noqa: E501
:rtype: datetime
"""
return self._lock_expiration_before
@lock_expiration_before.setter
def lock_expiration_before(self, lock_expiration_before):
"""Sets the lock_expiration_before of this ExternalTaskQueryDto.
Restrict to external tasks that have a lock that expires before a given date. By [default](https://docs.camunda.org/manual/7.13/reference/rest/overview/date-format/), the date must have the format `yyyy-MM-dd'T'HH:mm:ss.SSSZ`, e.g., `2013-01-23T14:42:45.000+0200`. # noqa: E501
:param lock_expiration_before: The lock_expiration_before of this ExternalTaskQueryDto. # noqa: E501
:type: datetime
"""
self._lock_expiration_before = lock_expiration_before
@property
def activity_id(self):
"""Gets the activity_id of this ExternalTaskQueryDto. # noqa: E501
Filter by the id of the activity that an external task is created for. # noqa: E501
:return: The activity_id of this ExternalTaskQueryDto. # noqa: E501
:rtype: str
"""
return self._activity_id
@activity_id.setter
def activity_id(self, activity_id):
"""Sets the activity_id of this ExternalTaskQueryDto.
Filter by the id of the activity that an external task is created for. # noqa: E501
:param activity_id: The activity_id of this ExternalTaskQueryDto. # noqa: E501
:type: str
"""
self._activity_id = activity_id
@property
def activity_id_in(self):
"""Gets the activity_id_in of this ExternalTaskQueryDto. # noqa: E501
Filter by the comma-separated list of ids of the activities that an external task is created for. # noqa: E501
:return: The activity_id_in of this ExternalTaskQueryDto. # noqa: E501
:rtype: list[str]
"""
return self._activity_id_in
@activity_id_in.setter
def activity_id_in(self, activity_id_in):
"""Sets the activity_id_in of this ExternalTaskQueryDto.
Filter by the comma-separated list of ids of the activities that an external task is created for. # noqa: E501
:param activity_id_in: The activity_id_in of this ExternalTaskQueryDto. # noqa: E501
:type: list[str]
"""
self._activity_id_in = activity_id_in
@property
def execution_id(self):
"""Gets the execution_id of this ExternalTaskQueryDto. # noqa: E501
Filter by the id of the execution that an external task belongs to. # noqa: E501
:return: The execution_id of this ExternalTaskQueryDto. # noqa: E501
:rtype: str
"""
return self._execution_id
@execution_id.setter
def execution_id(self, execution_id):
"""Sets the execution_id of this ExternalTaskQueryDto.
Filter by the id of the execution that an external task belongs to. # noqa: E501
:param execution_id: The execution_id of this ExternalTaskQueryDto. # noqa: E501
:type: str
"""
self._execution_id = execution_id
@property
def process_instance_id(self):
"""Gets the process_instance_id of this ExternalTaskQueryDto. # noqa: E501
Filter by the id of the process instance that an external task belongs to. # noqa: E501
:return: The process_instance_id of this ExternalTaskQueryDto. # noqa: E501
:rtype: str
"""
return self._process_instance_id
@process_instance_id.setter
def process_instance_id(self, process_instance_id):
"""Sets the process_instance_id of this ExternalTaskQueryDto.
Filter by the id of the process instance that an external task belongs to. # noqa: E501
:param process_instance_id: The process_instance_id of this ExternalTaskQueryDto. # noqa: E501
:type: str
"""
self._process_instance_id = process_instance_id
@property
def process_instance_id_in(self):
"""Gets the process_instance_id_in of this ExternalTaskQueryDto. # noqa: E501
Filter by a comma-separated list of process instance ids that an external task may belong to. # noqa: E501
:return: The process_instance_id_in of this ExternalTaskQueryDto. # noqa: E501
:rtype: list[str]
"""
return self._process_instance_id_in
@process_instance_id_in.setter
def process_instance_id_in(self, process_instance_id_in):
"""Sets the process_instance_id_in of this ExternalTaskQueryDto.
Filter by a comma-separated list of process instance ids that an external task may belong to. # noqa: E501
:param process_instance_id_in: The process_instance_id_in of this ExternalTaskQueryDto. # noqa: E501
:type: list[str]
"""
self._process_instance_id_in = process_instance_id_in
@property
def process_definition_id(self):
"""Gets the process_definition_id of this ExternalTaskQueryDto. # noqa: E501
Filter by the id of the process definition that an external task belongs to. # noqa: E501
:return: The process_definition_id of this ExternalTaskQueryDto. # noqa: E501
:rtype: str
"""
return self._process_definition_id
@process_definition_id.setter
def process_definition_id(self, process_definition_id):
"""Sets the process_definition_id of this ExternalTaskQueryDto.
Filter by the id of the process definition that an external task belongs to. # noqa: E501
:param process_definition_id: The process_definition_id of this ExternalTaskQueryDto. # noqa: E501
:type: str
"""
self._process_definition_id = process_definition_id
@property
def tenant_id_in(self):
"""Gets the tenant_id_in of this ExternalTaskQueryDto. # noqa: E501
Filter by a comma-separated list of tenant ids. An external task must have one of the given tenant ids. # noqa: E501
:return: The tenant_id_in of this ExternalTaskQueryDto. # noqa: E501
:rtype: list[str]
"""
return self._tenant_id_in
@tenant_id_in.setter
def tenant_id_in(self, tenant_id_in):
"""Sets the tenant_id_in of this ExternalTaskQueryDto.
Filter by a comma-separated list of tenant ids. An external task must have one of the given tenant ids. # noqa: E501
:param tenant_id_in: The tenant_id_in of this ExternalTaskQueryDto. # noqa: E501
:type: list[str]
"""
self._tenant_id_in = tenant_id_in
@property
def active(self):
"""Gets the active of this ExternalTaskQueryDto. # noqa: E501
Only include active tasks. Value may only be `true`, as `false` matches any external task. # noqa: E501
:return: The active of this ExternalTaskQueryDto. # noqa: E501
:rtype: bool
"""
return self._active
@active.setter
def active(self, active):
"""Sets the active of this ExternalTaskQueryDto.
Only include active tasks. Value may only be `true`, as `false` matches any external task. # noqa: E501
:param active: The active of this ExternalTaskQueryDto. # noqa: E501
:type: bool
"""
self._active = active
@property
def suspended(self):
"""Gets the suspended of this ExternalTaskQueryDto. # noqa: E501
Only include suspended tasks. Value may only be `true`, as `false` matches any external task. # noqa: E501
:return: The suspended of this ExternalTaskQueryDto. # noqa: E501
:rtype: bool
"""
return self._suspended
@suspended.setter
def suspended(self, suspended):
"""Sets the suspended of this ExternalTaskQueryDto.
Only include suspended tasks. Value may only be `true`, as `false` matches any external task. # noqa: E501
:param suspended: The suspended of this ExternalTaskQueryDto. # noqa: E501
:type: bool
"""
self._suspended = suspended
@property
def priority_higher_than_or_equals(self):
"""Gets the priority_higher_than_or_equals of this ExternalTaskQueryDto. # noqa: E501
Only include jobs with a priority higher than or equal to the given value. Value must be a valid `long` value. # noqa: E501
:return: The priority_higher_than_or_equals of this ExternalTaskQueryDto. # noqa: E501
:rtype: int
"""
return self._priority_higher_than_or_equals
@priority_higher_than_or_equals.setter
def priority_higher_than_or_equals(self, priority_higher_than_or_equals):
"""Sets the priority_higher_than_or_equals of this ExternalTaskQueryDto.
Only include jobs with a priority higher than or equal to the given value. Value must be a valid `long` value. # noqa: E501
:param priority_higher_than_or_equals: The priority_higher_than_or_equals of this ExternalTaskQueryDto. # noqa: E501
:type: int
"""
self._priority_higher_than_or_equals = priority_higher_than_or_equals
@property
def priority_lower_than_or_equals(self):
"""Gets the priority_lower_than_or_equals of this ExternalTaskQueryDto. # noqa: E501
Only include jobs with a priority lower than or equal to the given value. Value must be a valid `long` value. # noqa: E501
:return: The priority_lower_than_or_equals of this ExternalTaskQueryDto. # noqa: E501
:rtype: int
"""
return self._priority_lower_than_or_equals
@priority_lower_than_or_equals.setter
def priority_lower_than_or_equals(self, priority_lower_than_or_equals):
"""Sets the priority_lower_than_or_equals of this ExternalTaskQueryDto.
Only include jobs with a priority lower than or equal to the given value. Value must be a valid `long` value. # noqa: E501
:param priority_lower_than_or_equals: The priority_lower_than_or_equals of this ExternalTaskQueryDto. # noqa: E501
:type: int
"""
self._priority_lower_than_or_equals = priority_lower_than_or_equals
@property
def sorting(self):
"""Gets the sorting of this ExternalTaskQueryDto. # noqa: E501
A JSON array of criteria to sort the result by. Each element of the array is a JSON object that specifies one ordering. The position in the array identifies the rank of an ordering, i.e., whether it is primary, secondary, etc. The ordering objects have the following properties: **Note:** The `sorting` properties will not be applied to the External Task count query. # noqa: E501
:return: The sorting of this ExternalTaskQueryDto. # noqa: E501
:rtype: list[ExternalTaskQueryDtoSorting]
"""
return self._sorting
@sorting.setter
def sorting(self, sorting):
"""Sets the sorting of this ExternalTaskQueryDto.
A JSON array of criteria to sort the result by. Each element of the array is a JSON object that specifies one ordering. The position in the array identifies the rank of an ordering, i.e., whether it is primary, secondary, etc. The ordering objects have the following properties: **Note:** The `sorting` properties will not be applied to the External Task count query. # noqa: E501
:param sorting: The sorting of this ExternalTaskQueryDto. # noqa: E501
:type: list[ExternalTaskQueryDtoSorting]
"""
self._sorting = sorting
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ExternalTaskQueryDto):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ExternalTaskQueryDto):
return True
return self.to_dict() != other.to_dict()
|
import gc
import numpy as np
import argparse
import sys
import itertools
import librosa
import scipy
from scipy.signal import stft, istft
# use CQT based on nonstationary gabor transform
from nsgt import NSGT_sliced, MelScale, LogScale, BarkScale, VQLogScale
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='check slice length support for nsgt config'
)
parser.add_argument(
'--control',
action='store_true',
help='evaluate control (stft)'
)
parser.add_argument(
'--bins',
type=str,
default='12,2000,10',
help='comma-separated range of bins to evaluate, step is last element'
)
parser.add_argument(
'--fmins',
type=str,
default='10,130,5',
help='comma-separated range of fmin to evaluate, step is last element'
)
parser.add_argument(
'--fmaxes',
type=str,
default='14000,22050,5',
help='comma-separated range of fmax to evaluate, step is last element'
)
parser.add_argument(
'--gammas',
type=str,
default='0,100',
help='comma-separated range of gamma to evaluate'
)
parser.add_argument(
'--fscale',
type=str,
default='vqlog',
help='nsgt frequency scale (choices: vqlog, cqlog, mel, bark)'
)
parser.add_argument(
'--sllen-test',
type=int,
default=16384*2,
help='sllen to test'
)
args = parser.parse_args()
fs = 44100
sldur = args.sllen_test/fs
bmin, bmax, bstep = [int(x) for x in args.bins.split(',')]
fmin, fmax, fstep = [float(x) for x in args.fmins.split(',')]
fmaxmin, fmaxmax, fmaxstep = [float(x) for x in args.fmaxes.split(',')]
bins = np.arange(bmin,bmax,bstep)
fmins = np.arange(fmin,fmax,fstep)
fmaxes = np.arange(fmaxmin,fmaxmax,fmaxstep)
for (fmin, fmax) in itertools.product(fmins, fmaxes):
for fbins in bins:
scl = None
if args.fscale == 'mel':
scl = MelScale(fmin, fs/2, fbins)
elif args.fscale == 'bark':
scl = BarkScale(fmin, fs/2, fbins)
elif args.fscale == 'vqlog':
scl = VQLogScale(fmin, fs/2, fbins, gamma=25)
elif args.fscale == 'cqlog':
scl = LogScale(fmin, fs/2, fbins)
# use slice length required to support desired frequency scale/q factors
sllen_suggested = scl.suggested_sllen(fs)
if sllen_suggested > args.sllen_test:
print(f"testing nsgt param combination:\n\t{args.fscale=} {fbins=} {fmin=} {fmax=}")
print(f'sllen too big to be supported by slice duration: {sldur:.2f} s')
break
|
import unittest
import pinion
from pinion import parse_hosts, create_packet
class BaseTestCase(unittest.TestCase):
def test_parse_hosts(self):
expected = [("127.0.0.1", 4730)]
self.assertEqual(parse_hosts([("127.0.0.1", "4730")]), expected)
self.assertEqual(parse_hosts(["127.0.0.1"]), expected)
self.assertEqual(parse_hosts(["127.0.0.1:4730"]), expected)
self.assertEqual(parse_hosts([{'host': "127.0.0.1"}]), expected)
self.assertEqual(parse_hosts([{'host': "127.0.0.1", 'port': "4730"}]), expected)
def test_create_packet(self):
self.assertEqual(create_packet(1, []), b'\x00REQ\x00\x00\x00\x01\x00\x00\x00\x00')
self.assertEqual(create_packet(1, [b'a']), b'\x00REQ\x00\x00\x00\x01\x00\x00\x00\x01a')
self.assertEqual(create_packet(1, [b'a', b'b']), b'\x00REQ\x00\x00\x00\x01\x00\x00\x00\x03a\x00b')
def test_create_packet_accepts_only_binary_packet_data(self):
with self.assertRaises(pinion.GearmanException):
self.assertEqual(create_packet(1, [u"\u2603"]))
class GearmanClientTestCase(unittest.TestCase):
def test_submit_job_job_data_does_not_contain_null(self):
client = pinion.GearmanClient(["127.0.0.1"])
with self.assertRaises(pinion.GearmanException):
client.submit_job('task', b'\x00containsNULL')
class GearmanWorkerTestCase(unittest.TestCase):
pass
|
from .nanopublication import Nanopublication
from .nanopublication_manager import NanopublicationManager
|
Import("env", "projenv")
import serial;
import time;
import io
def after_upload(source, target, env):
print("Start reset.")
ser = serial.Serial("COM3", 115200, serial.EIGHTBITS, serial.PARITY_NONE, serial.STOPBITS_ONE, xonxoff=0, rtscts=0)
#if(ser.isOpen()):
# print("It was previously open so close it.")
# ser.close()
if(not ser.isOpen()):
ser.open()
ser.setDTR(False)
time.sleep(1)
ser.flushInput()
ser.setDTR(True)
time.sleep(3)
if(not ser.isOpen()):
raise Exception("Could not open serial port.")
print("Sending RST Command.")
sio = io.TextIOWrapper(io.BufferedRWPair(ser, ser))
sio.write("RST")
sio.flush()
ser.write([0x0A])
ser.flush()
print("BTL Command Sent.")
ser.close
env.AddPostAction("upload", after_upload)
|
from rest_framework.exceptions import APIException
class UsernameUnavailableException(APIException):
status_code = 200
default_detail = 'This username is unavailable.'
default_code = 'username_unavailable'
class EmailInUseException(APIException):
status_code = 200
default_detail = 'This email is already linked to another account.'
default_code = 'email_in_use'
|
import logging
from django.db.models import Count, Sum
from django.conf import settings
from django.core.management.base import NoArgsCommand
from pycon.finaid.models import FinancialAidApplication
from pycon.finaid.utils import send_email_message
logger = logging.getLogger(__name__)
class Command(NoArgsCommand):
def handle_noargs(self, **options):
result = FinancialAidApplication.objects.aggregate(Count('id'), Sum('amount_requested'))
template_name = 'admin/weekly'
send_email_message(template_name,
from_=settings.FINANCIAL_AID_EMAIL,
to=settings.FINANCIAL_AID_WEEKLY_REPORT_EMAIL,
context=result)
|
#! /usr/bin/env python
import os.path
import sys
import sift_pyx12.error_handler
import sift_pyx12.map_if
from sift_pyx12.params import params
import sift_pyx12.segment
def donode(node):
print((node.get_path()))
for child in node.children:
if child.is_loop() or child.is_segment():
donode(child)
param = params()
param.set('map_path', os.path.expanduser('~/src/sift_pyx12/map/'))
map = sift_pyx12.map_if.load_map_file(sys.argv[1], param)
donode(map)
|
#!/usr/bin/python
# Copyright 2018 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script analyzes link map file generated by Xcode. It calculates and
# prints out the sizes of each dependent library and the total sizes of the
# symbols.
# The script takes one parameter, which is the path to the link map file.
import sys
import re
table_tag = {}
state = "start"
table_stats_symbol = {}
table_stats_dead = {}
section_total_size = 0
symbol_total_size = 0
file_import = sys.argv[1]
lines = list(open(file_import))
for line in lines:
line_stripped = line[:-1]
if "# Object files:" == line_stripped:
state = "object"
continue
elif "# Sections:" == line_stripped:
state = "section"
continue
elif "# Symbols:" == line_stripped:
state = "symbol"
continue
elif "# Dead Stripped Symbols:" == line_stripped:
state = "dead"
continue
if state == "object":
segs = re.search('(\[ *[0-9]*\]) (.*)', line_stripped)
table_tag[segs.group(1)] = segs.group(2)
if state == "section":
if len(line_stripped) == 0 or line_stripped[0] == '#':
continue
segs = re.search('^(.+?)\s+(.+?)\s+.*', line_stripped)
section_total_size += int(segs.group(2), 16)
if state == "symbol":
if len(line_stripped) == 0 or line_stripped[0] == '#':
continue
segs = re.search('^.+?\s+(.+?)\s+(\[.+?\]).*', line_stripped)
target = table_tag[segs.group(2)]
target_stripped = re.search('^(.*?)(\(.+?\))?$', target).group(1)
size = int(segs.group(1), 16)
if not target_stripped in table_stats_symbol:
table_stats_symbol[target_stripped] = 0
table_stats_symbol[target_stripped] += size
print("Sections total size: %d" % section_total_size)
for target in table_stats_symbol:
print(target)
print(table_stats_symbol[target])
symbol_total_size += table_stats_symbol[target]
print("Symbols total size: %d" % symbol_total_size)
|
# -*- coding: utf-8 -*-
import enigma.core as core
import enigma.rotors as rotors
import enigma.reflectors as reflectors
if __name__ == '__main__':
main()
|
__version__ = '0.1.0'
API_PREFIX = '/v1'
|
# -*- coding:utf-8 -*-
from __future__ import unicode_literals
import cProfile
import pstats
import time
from functools import wraps
import hfut
from .log import logger
def term_range(major_name, max_term_number):
start_year = int(major_name[:4])
start = (start_year - 2001) * 2 - 1
end = start + 7
if max_term_number < end:
end = max_term_number
return range(start, end + 1)
def elapse_dec(func):
@wraps(func)
def wrap(*args, **kwargs):
start = time.time()
rv = func(*args, **kwargs)
end = time.time()
elapsed = end - start
logger.info('Execution cost of %s(args: %s, kwargs: %s): %f sec.', func.__name__, args, kwargs, elapsed)
return rv
return wrap
def profile(func):
@wraps(func)
def wrap(*args, **kwargs):
pr = cProfile.Profile()
pr.enable()
rv = func(*args, **kwargs)
pr.disable()
# 文件时间戳间隔 5 min
with open('%s_%d.txt' % (func.__name__, time.time() // 300), 'w') as f:
sortby = 'cumulative'
pstats.Stats(pr, stream=f).strip_dirs().sort_stats(sortby).print_stats()
return rv
return wrap
|
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class AuthConfig(AppConfig):
name = "blapp.auth"
label = "blapp_auth"
verbose_name = _("Authentication and authorization")
|
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
df = pd.read_csv('train.csv')
dfi = df.set_index('PassengerId')
dfi.shape
dfi.columns
dfi.describe()
dfi.info()
#Write a program that calculates the number of surviving passengers and prints it to the screen.
surv = dfi[dfi['Survived'] == 1].count()
surv.head(1)
firstc = dfi[['Pclass', 'Survived']]
firstc_t = firstc[firstc['Pclass'] == 1]
t = firstc_t['Pclass'].value_counts()
f = firstc_t[firstc_t['Survived'] == 1]
perc = f['Survived'].value_counts() * 100 / t
perc
firstc_t.groupby('Pclass')['Survived'].value_counts().plot.pie()
plt.xlabel("63 % of first class passengers survived")
dfi['Age'].mean()
del dfi['Embarked']
del dfi['Cabin']
dfi1 = dfi.dropna()
dfi1.groupby('Survived')['Age'].mean()
dfi.reset_index(inplace=True)
dfi.set_index(['Survived'], inplace=True)
dfi['Age'].fillna({0:30.6, 1:28.3}, inplace=True)
dfi.reset_index(inplace=True)
dfi.groupby(['Survived', 'Sex'])['Pclass'].value_counts().unstack().plot.bar()
plt.ylabel("Person count")
plt.title("surviving/dead passengers separated by class and gender")
# Feature Engineering
#* normalizing -- de-mean, scale or otherwise transform features
#* scaling -- shift the mean and standard deviation
#* imputation -- fill missing values
#* one-hot encoding -- convert categories to binary columns
#* add features -- add extra polynomial or combined features
#* feature selection -- decide which features to use
dfi.set_index('PassengerId', inplace=True)
#Columns = Pclass', 'Name', 'Sex', 'Age', 'SibSp', 'Parch', 'Ticket', 'Fare'
#binary (hot coding) - Sex, Pclass, SibSP, Parch
#normalizing - Fare, Age
dfi['Sex_bin'] = [1 if x =='male' else 0 for x in df['Sex']]
del dfi['Sex']
del dfi['Ticket']
Pc = dfi['Pclass']
Pclass_bin = pd.get_dummies(Pc)
Pclass_bin.columns = ['Pclass1', 'Pclass2', 'Pclass3']
dfi = pd.concat([dfi, Pclass_bin], axis=1)
del dfi['Pclass']
dfi['Age'] = dfi['Age'].apply(lambda x: x / 26501.77)
dfi['Fare'] = dfi['Fare'].apply(lambda x: x / 28693.9493)
Parch = dfi['Parch'].values
SibSp = dfi['SibSp'].values
dfi['family'] = (list(zip(Parch, SibSp)))
dfi['family'] =
#dfi['family'] = pd.DataFrame(Parch, SibSp)
#m = PolynomialFeatures(interaction_only=True)
#m.fit_transform(dfi['family'])
# Logistic Regression
#* sigmoid function -- function rapidly changing from 0 to 1
#* coefficients -- model parameters in the linear part
#* log probability -- result of the logistic function
#* threshold value -- probability at which a positive prediction is made (default 0.5)
#* log loss -- error function to be optimized
#* one-vs-rest -- strategy for multinomial regression
#* softmax -- error function for multinomial regression
|
"""
Package: service
Package for the application models and service routes
This module creates and configures the Flask app and sets up the logging
"""
import importlib
import logging
import os
from flask import Flask
# Create Flask application
app = Flask(__name__)
app.config.from_object("config")
# Import the routes After the Flask app is created
user_module_name = os.path.split(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))[1]
submodule_name = os.path.split(os.path.abspath(os.path.dirname(__file__)))[1]
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith('.py') and not file.startswith('_'):
module = file[:file.find('.py')]
importlib.import_module(user_module_name + '.' + submodule_name + '.' + module)
# Set up logging for production
print("Setting up logging for {}...".format(__name__))
app.logger.propagate = False
if __name__ != "__main__":
gunicorn_logger = logging.getLogger("gunicorn.error")
app.logger.handlers = gunicorn_logger.handlers
app.logger.setLevel(gunicorn_logger.level)
# Make all log formats consistent
formatter = logging.Formatter(
"[%(asctime)s] [%(levelname)s] [%(module)s] %(message)s", "%Y-%m-%d %H:%M:%S %z"
)
for handler in app.logger.handlers:
handler.setFormatter(formatter)
app.logger.info("Logging handler established")
app.logger.info(70 * "*")
app.logger.info(" A M R V E R B N E T S E R V I C E ".center(70, "*"))
app.logger.info(70 * "*")
app.logger.info("Service inititalized!")
|
# Copyright 2021, 2022 Cambridge Quantum Computing Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
QuantumTrainer
==============
A trainer that wraps the training loop of a :py:class:`QuantumModel`
"""
from __future__ import annotations
from typing import Any, Callable, Mapping, Optional, Type, Union
import os
import numpy as np
from lambeq.core.globals import VerbosityLevel
from lambeq.training.quantum_model import QuantumModel
from lambeq.training.trainer import Trainer
from lambeq.training.optimizer import Optimizer
class QuantumTrainer(Trainer):
"""A Trainer for the quantum pipeline."""
model: QuantumModel
def __init__(
self,
model: QuantumModel,
loss_function: Callable,
epochs: int,
optimizer: Type[Optimizer],
optim_hyperparams: dict[str, float],
evaluate_functions: Optional[Mapping[str, Callable]] = None,
evaluate_on_train: bool = True,
use_tensorboard: bool = False,
log_dir: Optional[Union[str, os.PathLike]] = None,
from_checkpoint: bool = False,
verbose: str = VerbosityLevel.TEXT.value,
seed: Optional[int] = None) -> None:
"""Initialise a :py:class:`.Trainer` instance using a quantum backend.
Parameters
----------
model : :py:class:`.QuantumModel`
A lambeq Model.
loss_function : callable
A loss function.
epochs : int
Number of training epochs
optimizer : Optimizer
An optimizer of type :py:class:`lambeq.training.Optimizer`.
evaluate_functions : mapping of str to callable, optional
Mapping of evaluation metric functions from their names.
Structure [{\"metric\": func}].
Each function takes the prediction \"y_hat\" and the label \"y\" as
input.
The validation step calls \"func(y_hat, y)\".
evaluate_on_train : bool, default: True
Evaluate the metrics on the train dataset.
use_tensorboard : bool, default: False
Use Tensorboard for visualisation of the training logs.
log_dir : str or PathLike, optional
Location of model checkpoints (and tensorboard log). Default is
`runs/**CURRENT_DATETIME_HOSTNAME**`.
from_checkpoint : bool, default: False
Starts training from the checkpoint, saved in the log_dir.
verbose : str, default: 'text',
See :py:class:`VerbosityLevel` for options.
seed : int, optional
Random seed.
"""
if seed is not None:
np.random.seed(seed)
super().__init__(model,
loss_function,
epochs,
evaluate_functions,
evaluate_on_train,
use_tensorboard,
log_dir,
from_checkpoint,
verbose,
seed)
self.optimizer = optimizer(self.model,
optim_hyperparams,
self.loss_function)
def _add_extra_chkpoint_info(self) -> Mapping[str, Any]:
"""Add any additional information to the training checkpoint. These
might include model-specific information like the random state of the
backend or the state of the optimizer.
Returns
-------
mapping of str to any
Mapping containing the extra information to save.
"""
return {'numpy_random_state': np.random.get_state(),
'optimizer_state_dict': self.optimizer.state_dict()}
def _load_extra_chkpoint_info(self,
checkpoint: Mapping[str, Any]) -> None:
"""Load the additional checkpoint information that was previously
added by calling the method `_add_extra_chkpoint_info()`.
Parameters
----------
checkpoint : mapping of str to any
Mapping containing the checkpoint information.
"""
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
if self.seed is not None:
np.random.set_state(checkpoint['numpy_random_state'])
def training_step(
self,
batch: tuple[list[Any], np.ndarray]) -> tuple[np.ndarray, float]:
"""Perform a training step.
Parameters
----------
batch : tuple of list and np.ndarray
Current batch.
Returns
-------
Tuple of np.ndarray and float
The model predictions and the calculated loss.
"""
y_hat, loss = self.optimizer.backward(batch)
self.train_costs.append(loss)
self.optimizer.step()
self.optimizer.zero_grad()
return y_hat, loss
def validation_step(
self,
batch: tuple[list[Any], np.ndarray]) -> tuple[np.ndarray, float]:
"""Perform a validation step.
Parameters
----------
batch : tuple of list and np.ndarray
Current batch.
Returns
-------
tuple of np.ndarray and float
The model predictions and the calculated loss.
"""
x, y = batch
y_hat = self.model(x)
loss = self.loss_function(y_hat, y)
return y_hat, loss
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.