hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8d4fdfe20025b0e5daa5a835dc6c99f6fc1fb9bb
| 1,069
|
py
|
Python
|
vlp/loader_utils.py
|
ChenYutongTHU/VLP
|
0a52c7d5444c880bb56d89a409aca229bde8a96f
|
[
"Apache-2.0"
] | null | null | null |
vlp/loader_utils.py
|
ChenYutongTHU/VLP
|
0a52c7d5444c880bb56d89a409aca229bde8a96f
|
[
"Apache-2.0"
] | null | null | null |
vlp/loader_utils.py
|
ChenYutongTHU/VLP
|
0a52c7d5444c880bb56d89a409aca229bde8a96f
|
[
"Apache-2.0"
] | null | null | null |
from random import randint, shuffle
from random import random as rand
import pickle
import json
from collections import namedtuple
import torch
import torch.nn as nn
import unicodedata
from multiprocessing import Lock
def get_random_word(vocab_words):
i = randint(0, len(vocab_words)-1)
return vocab_words[i]
def batch_list_to_batch_tensors(batch): #[(info, (...)), (info, (...))]
batch_tensors = []
info_batch = [d[0] for d in batch]
data_batch = [d[1] for d in batch]
for x in zip(*data_batch):
if isinstance(x[0], torch.Tensor):
batch_tensors.append(torch.stack(x))
else:
batch_tensors.append(torch.tensor(x, dtype=torch.long))
return info_batch, batch_tensors
class Pipeline():
""" Pre-process Pipeline Class : callable """
def __init__(self):
super().__init__()
self.mask_same_word = None
self.skipgram_prb = None
self.skipgram_size = None
def __call__(self, instance):
raise NotImplementedError
| 26.725
| 73
| 0.647334
|
d634afdc1f105bb4fcc8082ae0d0d27e255d718f
| 1,957
|
py
|
Python
|
tests/test_02_app/test_simple_app.py
|
sondrelg/uvicorn-gunicorn-docker
|
ddd38797d6a9ca820bd8b3134a0398ef3df8877a
|
[
"MIT"
] | null | null | null |
tests/test_02_app/test_simple_app.py
|
sondrelg/uvicorn-gunicorn-docker
|
ddd38797d6a9ca820bd8b3134a0398ef3df8877a
|
[
"MIT"
] | null | null | null |
tests/test_02_app/test_simple_app.py
|
sondrelg/uvicorn-gunicorn-docker
|
ddd38797d6a9ca820bd8b3134a0398ef3df8877a
|
[
"MIT"
] | null | null | null |
import os
import time
from pathlib import Path
import docker
import requests
from docker.client import DockerClient
from ..utils import (
CONTAINER_NAME,
IMAGE_NAME,
generate_dockerfile_content,
get_config,
get_logs,
get_response_text2,
remove_previous_container,
)
client = docker.from_env()
def verify_container(container: DockerClient, response_text: str) -> None:
config_data = get_config(container)
assert config_data['workers_per_core'] == 1
assert config_data['host'] == '0.0.0.0'
assert config_data['port'] == '80'
assert config_data['loglevel'] == 'info'
assert config_data['workers'] >= 2
assert config_data['bind'] == '0.0.0.0:80'
logs = get_logs(container)
assert 'Checking for script in /app/prestart.sh' in logs
assert 'Running script /app/prestart.sh' in logs
assert 'Running inside /app/prestart.sh, you could add migrations to this file' in logs
response = requests.get('http://127.0.0.1:8000')
assert response.text == response_text
def test_simple_app() -> None:
name = os.getenv('NAME', '')
dockerfile_content = generate_dockerfile_content(name)
dockerfile = 'Dockerfile'
response_text = get_response_text2()
sleep_time = int(os.getenv('SLEEP_TIME', 1))
remove_previous_container(client)
test_path = Path(__file__)
path = test_path.parent / 'simple_app'
dockerfile_path = path / dockerfile
dockerfile_path.write_text(dockerfile_content)
client.images.build(path=str(path), dockerfile=dockerfile, tag=IMAGE_NAME)
container = client.containers.run(IMAGE_NAME, name=CONTAINER_NAME, ports={'80': '8000'}, detach=True)
time.sleep(sleep_time)
verify_container(container, response_text)
container.stop()
# Test that everything works after restarting too
container.start()
time.sleep(sleep_time)
verify_container(container, response_text)
container.stop()
container.remove()
| 32.616667
| 105
| 0.718447
|
4b6648131ce2ac67964a548bf5a76cd5ee797ac9
| 1,905
|
py
|
Python
|
caffe/zz_experimental/mnist-gpu/model/test_predict.py
|
PipelineAI/models
|
d8df07877aa8b10ce9b84983bb440af75e84dca7
|
[
"Apache-2.0"
] | 44
|
2017-11-17T06:19:05.000Z
|
2021-11-03T06:00:56.000Z
|
caffe/zz_experimental/mnist-cpu/model/test_predict.py
|
PipelineAI/models
|
d8df07877aa8b10ce9b84983bb440af75e84dca7
|
[
"Apache-2.0"
] | 3
|
2018-08-09T14:28:17.000Z
|
2018-09-10T03:32:42.000Z
|
caffe/zz_experimental/mnist-cpu/model/test_predict.py
|
PipelineAI/models
|
d8df07877aa8b10ce9b84983bb440af75e84dca7
|
[
"Apache-2.0"
] | 21
|
2017-11-18T15:12:12.000Z
|
2020-08-15T07:08:33.000Z
|
import pipeline_invoke
json_bytes = b'{"image": [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,10,150,238,254,255,237,150,150,225,161,221,203,18,0,0,0,0,0,0,0,0,0,0,0,0,0,0,66,146,253,253,253,253,253,253,253,253,253,253,253,173,0,0,0,0,0,0,0,0,0,0,0,0,0,140,251,253,253,178,114,114,114,114,114,114,167,253,253,154,0,0,0,0,0,0,0,0,0,0,0,12,84,240,253,248,170,28,0,0,0,0,0,0,90,253,250,90,0,0,0,0,0,0,0,0,0,0,10,129,226,253,235,128,0,0,0,0,0,0,0,8,188,253,190,0,0,0,0,0,0,0,0,0,0,0,56,250,253,246,98,0,0,0,0,0,0,0,0,76,243,234,100,0,0,0,0,0,0,0,0,0,0,0,185,253,248,44,0,0,0,0,0,0,0,0,34,245,253,95,0,0,0,0,0,0,0,0,0,0,0,0,69,187,87,0,0,0,0,0,0,0,0,22,164,253,223,63,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,55,247,253,85,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,47,230,253,184,10,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,145,253,241,43,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,57,250,253,206,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,135,253,253,40,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,105,251,248,108,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,68,226,253,180,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,40,233,253,205,13,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,5,189,253,240,72,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,110,253,253,109,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,47,242,253,159,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,43,198,228,24,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}'
print(pipeline_invoke.invoke(json_bytes))
| 317.5
| 1,837
| 0.576378
|
d010d3bc1854d58cc950439ce52ce40a45e7a39d
| 6,229
|
py
|
Python
|
demo/demo_app/admin.py
|
patpro28/semantic-admin
|
1c56dc03f33837661065f5bf226bf2e6a500aff8
|
[
"MIT"
] | null | null | null |
demo/demo_app/admin.py
|
patpro28/semantic-admin
|
1c56dc03f33837661065f5bf226bf2e6a500aff8
|
[
"MIT"
] | null | null | null |
demo/demo_app/admin.py
|
patpro28/semantic-admin
|
1c56dc03f33837661065f5bf226bf2e6a500aff8
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from django.contrib import admin
from django.contrib.admin import ModelAdmin as DefaultModelAdmin
from django.contrib.admin import StackedInline as DefaultStackedInline
from django.contrib.admin import TabularInline as DefaultTabularInline
from django.contrib.auth.models import Group, User
from django.db.models import Count
from django.urls import reverse
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from semantic_admin import (
SemanticModelAdmin,
SemanticStackedInline,
SemanticTabularInline,
)
from taggit.models import Tag
from .filters import PersonFilter
from .models import Favorite, Person, Picture
try:
from django.utils.translation import gettext_lazy as _ # Django >= 4
except ImportError:
from django.utils.translation import ugettext_lazy as _
admin.site.unregister(User)
admin.site.unregister(Group)
admin.site.unregister(Tag)
if "semantic_admin" in settings.INSTALLED_APPS:
ModelAdmin = SemanticModelAdmin
StackedInline = SemanticStackedInline
TabularInline = SemanticTabularInline
else:
ModelAdmin = DefaultModelAdmin
StackedInline = DefaultStackedInline
TabularInline = DefaultTabularInline
def html5_picture(obj, css=""):
name = str(obj)
img = obj.get_img(css=css)
html = f"{img}<em>{name}</em>"
return format_html(mark_safe(html))
class PictureStackedInline(StackedInline):
model = Picture
fields = (
("date_and_time", "tags"),
"inline_picture",
"is_color",
)
readonly_fields = ("inline_picture",)
show_change_link = True
extra = 0
def inline_picture(self, obj):
return html5_picture(obj, css="large rounded")
inline_picture.short_description = _("picture").capitalize() # type: ignore
def has_add_permission(self, request, obj=None):
return False
class PersonFavoriteTabularInline(TabularInline):
model = Favorite
autocomplete_fields = fields = ("picture",)
extra = 0
@admin.register(Person)
class PersonAdmin(ModelAdmin):
search_fields = ("name",)
filter_class = PersonFilter
list_display = ("name", "birthday", "list_friends", "list_favorites")
list_editable = ("birthday",)
fieldsets = (
(None, {"fields": (("name", "birthday"),)}),
(_("extra").capitalize(), {"fields": (("slug", "url", "email"),)}),
(None, {"fields": ("friends",)}),
)
prepopulated_fields = {"slug": ("name",)}
autocomplete_fields = ("friends",)
list_per_page = 10
actions = ("send_friend_request",)
inlines = (PictureStackedInline, PersonFavoriteTabularInline)
def list_friends(self, obj):
friends = []
for friend in obj.friends.all():
url = reverse("admin:demo_app_person_change", args=(friend.pk,))
a = f"<a href={url}>{friend.name}</a>"
friends.append(a)
html = ", ".join(friends)
return format_html(mark_safe(html))
list_friends.short_description = _("friends").capitalize() # type: ignore
def list_favorites(self, obj):
favorites = []
for favorite in obj.favorites.all():
picture = favorite.picture
name = str(picture)
url = reverse("admin:demo_app_picture_change", args=(picture.pk,))
img = picture.get_img(css="tiny rounded")
a = f"<a href={url}>{img}<em>{name}</em></a>"
favorites.append(a)
html = "".join(favorites)
return format_html(mark_safe(html))
list_favorites.short_description = _("favorites").capitalize() # type: ignore
def send_friend_request(self, request, queryset):
msg = _("You are now friends with {friends}.")
format_dict = {"friends": ", ".join((obj.name for obj in queryset))}
self.message_user(request, msg.format(**format_dict))
def get_queryset(self, request):
queryset = super().get_queryset(request)
return queryset.prefetch_related("friends", "favorites__picture")
class PictureFavoriteTabularInline(TabularInline):
model = Favorite
autocomplete_fields = fields = ("person",)
extra = 0
@admin.register(Picture)
class PictureAdmin(ModelAdmin):
search_fields = ("tags__name",)
list_filter = ("person",)
list_display = (
"list_picture",
"person",
"date_and_time",
"is_color",
"has_favorites",
)
list_editable = (
"person",
"date_and_time",
"is_color",
)
fields = (
("date_and_time", "tags", "is_color"),
"detail_picture",
)
readonly_fields = (
"list_picture",
"person_changelink",
"has_favorites",
"detail_picture",
)
date_hierarchy = "date_and_time"
list_per_page = 10
inlines = (PictureFavoriteTabularInline,)
def list_picture(self, obj):
return html5_picture(obj, css="medium rounded")
list_picture.short_description = _("picture").capitalize() # type: ignore
list_picture.admin_order_field = "date_and_time" # type: ignore
def detail_picture(self, obj):
return html5_picture(obj, css="large rounded")
detail_picture.short_description = _("picture").capitalize() # type: ignore
def person_changelink(self, obj):
url = reverse("admin:demo_app_person_change", args=(obj.pk,))
a = f"<a href={url}>{obj.person.name}</a>"
return format_html(mark_safe(a))
person_changelink.short_description = _("person").capitalize() # type: ignore
person_changelink.admin_order_field = "person" # type: ignore
def has_favorites(self, obj):
return obj.total_favorites > 1
has_favorites.short_description = _("has favorites").capitalize() # type: ignore
has_favorites.admin_order_field = "total_favorites"
has_favorites.boolean = True # type: ignore
def has_add_permission(self, request):
return False
def get_queryset(self, request):
queryset = super().get_queryset(request)
queryset = queryset.select_related("person")
queryset = queryset.prefetch_related("tags")
return queryset.annotate(total_favorites=Count("favorites"))
| 31.780612
| 85
| 0.668968
|
fc7f278969273dff1b5adcd4acbdfd9d88351e79
| 7,831
|
py
|
Python
|
finrl/commands/list_commands.py
|
solazu/FinRL-Library
|
6cfe00933c16fc8a74efc9fb3d9cfa1b3bf296ea
|
[
"MIT"
] | 1
|
2021-07-18T13:31:55.000Z
|
2021-07-18T13:31:55.000Z
|
finrl/commands/list_commands.py
|
solazu/FinRL-Library
|
6cfe00933c16fc8a74efc9fb3d9cfa1b3bf296ea
|
[
"MIT"
] | null | null | null |
finrl/commands/list_commands.py
|
solazu/FinRL-Library
|
6cfe00933c16fc8a74efc9fb3d9cfa1b3bf296ea
|
[
"MIT"
] | null | null | null |
import csv
import logging
import sys
from collections import OrderedDict
from pathlib import Path
from typing import Any, Dict, List
import rapidjson
from colorama import Fore, Style
from colorama import init as colorama_init
from tabulate import tabulate
from finrl.config import setup_utils_configuration
from finrl.constants import USERPATH_HYPEROPTS, USERPATH_STRATEGIES
from finrl.exceptions import OperationalException
from finrl.exchange import available_exchanges, ccxt_exchanges, market_is_active
from finrl.misc import plural
from finrl.resolvers import ExchangeResolver
from finrl.state import RunMode
logger = logging.getLogger(__name__)
"""
TODO MAKE LIST AGENTS, LIST MODELS, LIST ENVIRONMENTS
ARGS_LIST_EXCHANGES = ["print_one_column", "list_exchanges_all"]
ARGS_LIST_TIMEFRAMES = ["exchange", "print_one_column"]
ARGS_LIST_PAIRS = ["exchange", "print_list", "list_pairs_print_json", "print_one_column",
"print_csv", "base_currencies", "quote_currencies", "list_pairs_all"]
ARGS_TEST_PAIRLIST = ["config", "quote_currencies", "print_one_column", "list_pairs_print_json"]
"""
def start_list_exchanges(args: Dict[str, Any]) -> None:
"""
Print available exchanges
:param args: Cli args from Arguments()
:return: None
"""
exchanges = ccxt_exchanges() if args['list_exchanges_all'] else available_exchanges()
if args['print_one_column']:
print('\n'.join(exchanges))
else:
if args['list_exchanges_all']:
print(f"All exchanges supported by the ccxt library: {', '.join(exchanges)}")
else:
print(f"Exchanges available for Freqtrade: {', '.join(exchanges)}")
def _print_objs_tabular(objs: List, print_colorized: bool) -> None:
if print_colorized:
colorama_init(autoreset=True)
red = Fore.RED
yellow = Fore.YELLOW
reset = Style.RESET_ALL
else:
red = ''
yellow = ''
reset = ''
names = [s['name'] for s in objs]
objss_to_print = [{
'name': s['name'] if s['name'] else "--",
'location': s['location'].name,
'status': (red + "LOAD FAILED" + reset if s['class'] is None
else "OK" if names.count(s['name']) == 1
else yellow + "DUPLICATE NAME" + reset)
} for s in objs]
print(tabulate(objss_to_print, headers='keys', tablefmt='psql', stralign='right'))
def start_list_timeframes(args: Dict[str, Any]) -> None:
"""
Print ticker intervals (timeframes) available on Exchange
"""
config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE)
# Do not use timeframe set in the config
config['timeframe'] = None
# Init exchange
exchange = ExchangeResolver.load_exchange(config['exchange']['name'], config, validate=False)
if args['print_one_column']:
print('\n'.join(exchange.timeframes))
else:
print(f"Timeframes available for the exchange `{exchange.name}`: "
f"{', '.join(exchange.timeframes)}")
def start_list_markets(args: Dict[str, Any], pairs_only: bool = False) -> None:
"""
Print pairs/markets on the exchange
:param args: Cli args from Arguments()
:param pairs_only: if True print only pairs, otherwise print all instruments (markets)
:return: None
"""
config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE)
# Init exchange
exchange = ExchangeResolver.load_exchange(config['exchange']['name'], config, validate=False)
# By default only active pairs/markets are to be shown
active_only = not args.get('list_pairs_all', False)
base_currencies = args.get('base_currencies', [])
quote_currencies = args.get('quote_currencies', [])
try:
pairs = exchange.get_markets(base_currencies=base_currencies,
quote_currencies=quote_currencies,
pairs_only=pairs_only,
active_only=active_only)
# Sort the pairs/markets by symbol
pairs = OrderedDict(sorted(pairs.items()))
except Exception as e:
raise OperationalException(f"Cannot get markets. Reason: {e}") from e
else:
summary_str = ((f"Exchange {exchange.name} has {len(pairs)} ") +
("active " if active_only else "") +
(plural(len(pairs), "pair" if pairs_only else "market")) +
(f" with {', '.join(base_currencies)} as base "
f"{plural(len(base_currencies), 'currency', 'currencies')}"
if base_currencies else "") +
(" and" if base_currencies and quote_currencies else "") +
(f" with {', '.join(quote_currencies)} as quote "
f"{plural(len(quote_currencies), 'currency', 'currencies')}"
if quote_currencies else ""))
headers = ["Id", "Symbol", "Base", "Quote", "Active",
*(['Is pair'] if not pairs_only else [])]
tabular_data = []
for _, v in pairs.items():
tabular_data.append({'Id': v['id'], 'Symbol': v['symbol'],
'Base': v['base'], 'Quote': v['quote'],
'Active': market_is_active(v),
**({'Is pair': exchange.market_is_tradable(v)}
if not pairs_only else {})})
if (args.get('print_one_column', False) or
args.get('list_pairs_print_json', False) or
args.get('print_csv', False)):
# Print summary string in the log in case of machine-readable
# regular formats.
logger.info(f"{summary_str}.")
else:
# Print empty string separating leading logs and output in case of
# human-readable formats.
print()
if len(pairs):
if args.get('print_list', False):
# print data as a list, with human-readable summary
print(f"{summary_str}: {', '.join(pairs.keys())}.")
elif args.get('print_one_column', False):
print('\n'.join(pairs.keys()))
elif args.get('list_pairs_print_json', False):
print(rapidjson.dumps(list(pairs.keys()), default=str))
elif args.get('print_csv', False):
writer = csv.DictWriter(sys.stdout, fieldnames=headers)
writer.writeheader()
writer.writerows(tabular_data)
else:
# print data as a table, with the human-readable summary
print(f"{summary_str}:")
print(tabulate(tabular_data, headers='keys', tablefmt='psql', stralign='right'))
elif not (args.get('print_one_column', False) or
args.get('list_pairs_print_json', False) or
args.get('print_csv', False)):
print(f"{summary_str}.")
def start_show_trades(args: Dict[str, Any]) -> None:
"""
Show trades
"""
import json
from freqtrade.persistence import Trade, init_db
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
if 'db_url' not in config:
raise OperationalException("--db-url is required for this command.")
logger.info(f'Using DB: "{config["db_url"]}"')
init_db(config['db_url'], clean_open_orders=False)
tfilter = []
if config.get('trade_ids'):
tfilter.append(Trade.id.in_(config['trade_ids']))
trades = Trade.get_trades(tfilter).all()
logger.info(f"Printing {len(trades)} Trades: ")
if config.get('print_json', False):
print(json.dumps([trade.to_json() for trade in trades], indent=4))
else:
for trade in trades:
print(trade)
| 38.014563
| 97
| 0.607585
|
e4b2b587c9b3d29673d783aeb832e4b6ce1e1d55
| 6,191
|
py
|
Python
|
pytorch/benchmark.py
|
mingfeima/convnet-benchmarks
|
e07c4814cc9ca1fdcbda1ff3ea4fcb386ed7691a
|
[
"MIT"
] | 4
|
2018-10-30T08:15:00.000Z
|
2021-03-08T03:44:20.000Z
|
pytorch/benchmark.py
|
mingfeima/convnet-benchmarks
|
e07c4814cc9ca1fdcbda1ff3ea4fcb386ed7691a
|
[
"MIT"
] | null | null | null |
pytorch/benchmark.py
|
mingfeima/convnet-benchmarks
|
e07c4814cc9ca1fdcbda1ff3ea4fcb386ed7691a
|
[
"MIT"
] | 5
|
2018-04-04T23:30:10.000Z
|
2020-12-08T06:34:03.000Z
|
import argparse
import torch
from torch.autograd import Variable
import torch.nn as nn
import torchvision.models as models
import torch.optim as optim
import time
import subprocess
from collections import OrderedDict
from mobilenet import MobileNetV2
models.__dict__['mobilenet_v2'] = MobileNetV2
from shufflenet import ShuffleNet
models.__dict__['shufflenet'] = ShuffleNet
from unet2d import UNet
models.__dict__['unet'] = UNet
from unet3d import UNet3D
models.__dict__['unet3d'] = UNet3D
archs = OrderedDict()
archs['alexnet'] = [128, 3, 224, 224]
archs['vgg11'] = [64, 3, 224, 224]
archs['inception_v3'] = [32, 3, 299, 299]
archs['resnet50'] = [128, 3, 224, 224]
archs['squeezenet1_0'] = [128, 3, 224, 224]
archs['densenet121'] = [32, 3, 224, 224]
archs['mobilenet_v2'] = [128, 3, 224, 224]
archs['shufflenet'] = [128, 3, 224, 224]
archs['unet'] = [32, 3, 128, 128]
archs['unet3d'] = [6, 4, 64, 64, 64]
archs_list = list(archs.keys())
steps = 10 # nb of steps in loop to average perf
nDryRuns = 5 # nb of warmup steps
def benchmark():
# benchmark settings
parser = argparse.ArgumentParser(description='PyTorch Convnet Benchmark')
parser.add_argument('--arch', action='store', default='all',
choices=archs_list + ['all'],
help='model name can be specified. all is default.' )
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disable CUDA')
parser.add_argument('--inference', action='store_true', default=False,
help='run inference only')
parser.add_argument('--single-batch-size', action='store_true', default=False,
help='single batch size')
parser.add_argument('--print-iteration-time', action='store_true', default=False,
help='print iteration time')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
arch_dict = {args.arch: archs[args.arch]} if args.arch in archs_list else archs # by huiming, support one or all models.
if args.cuda:
import torch.backends.cudnn as cudnn
cudnn.benchmark = True
cudnn.deterministic = True
kernel = 'cudnn'
p = subprocess.check_output('nvidia-smi --query-gpu=name --format=csv',
shell=True)
device_name = str(p).split('\\n')[1]
else:
kernel = 'nn'
p = subprocess.check_output('cat /proc/cpuinfo | grep name | head -n 1',
shell = True)
device_name = str(p).split(':')[1][:-3]
print('Running on device: %s' % (device_name))
def _time():
if args.cuda:
torch.cuda.synchronize()
return time.time()
for arch, sizes in arch_dict.items():
if arch == 'unet3d':
batch_size, c, d, h, w = sizes[0], sizes[1], sizes[2], sizes[3], sizes[4]
batch_size = 1 if args.single_batch_size else batch_size
print('ModelType: %s, Kernels: %s Input shape: %dx%dx%dx%dx%d' %
(arch, kernel, batch_size, c, d, h, w))
data_ = torch.randn(batch_size, c, d, h, w)
else:
batch_size, c, h, w = sizes[0], sizes[1], sizes[2], sizes[3]
batch_size = 64 if arch is 'resnet50' and args.inference else batch_size
batch_size = 1 if args.single_batch_size else batch_size
print('ModelType: %s, Kernels: %s Input shape: %dx%dx%dx%d' %
(arch, kernel, batch_size, c, h, w))
data_ = torch.randn(batch_size, c, h, w)
target_ = torch.arange(1, batch_size + 1).long()
net = models.__dict__[arch]() # no need to load pre-trained weights for dummy data
optimizer = optim.SGD(net.parameters(), lr=0.01)
criterion = nn.CrossEntropyLoss()
if args.cuda:
data_, target_ = data_.cuda(), target_.cuda()
net.cuda()
criterion = criterion.cuda()
if args.inference:
net.eval()
else:
net.train()
net.aux_logits = False
data, target = Variable(data_), Variable(target_)
for i in range(nDryRuns):
optimizer.zero_grad() # zero the gradient buffers
output = net(data)
if not args.inference:
loss = output.sum() / 1e6 if 'unet' in arch else criterion(output, target)
loss.backward()
optimizer.step() # Does the update
time_fwd, time_bwd, time_upt = 0, 0, 0
for i in range(steps):
optimizer.zero_grad() # zero the gradient buffers
t1 = _time()
output = net(data)
t2 = _time()
if not args.inference:
loss = output.sum() / 1e6 if 'unet' in arch else criterion(output, target)
loss.backward()
t3 = _time()
optimizer.step() # Does the update
t4 = _time()
time_fwd = time_fwd + (t2 - t1)
if args.print_iteration_time:
print("%-30s %d: %10.2f ms" % ('forward iteration', i, (t2-t1)*1000))
if not args.inference:
time_bwd = time_bwd + (t3 - t2)
time_upt = time_upt + (t4 - t3)
time_fwd_avg = time_fwd / steps * 1000
time_bwd_avg = time_bwd / steps * 1000
time_upt_avg = time_upt / steps * 1000
# update not included!
time_total = time_fwd_avg + time_bwd_avg
print("%-30s %10s %10.2f (ms) %10.2f (imgs/s)" % (kernel, ':forward:',
time_fwd_avg, batch_size*1000/time_fwd_avg ))
print("%-30s %10s %10.2f (ms)" % (kernel, ':backward:', time_bwd_avg))
print("%-30s %10s %10.2f (ms)" % (kernel, ':update:', time_upt_avg))
print("%-30s %10s %10.2f (ms) %10.2f (imgs/s)" % (kernel, ':total:',
time_total, batch_size*1000/time_total ))
if __name__ == '__main__':
benchmark()
| 38.216049
| 125
| 0.563237
|
5e63443df00e14c24301191da60e0407fe86bdde
| 10,928
|
py
|
Python
|
detectron2/modeling/meta_arch/rcnn.py
|
katport/detectron2_fork
|
bcb21146ae4360543681d1fa3b60820f3a142703
|
[
"Apache-2.0"
] | null | null | null |
detectron2/modeling/meta_arch/rcnn.py
|
katport/detectron2_fork
|
bcb21146ae4360543681d1fa3b60820f3a142703
|
[
"Apache-2.0"
] | 3
|
2021-06-08T22:00:44.000Z
|
2022-01-13T03:01:49.000Z
|
detectron2/modeling/meta_arch/rcnn.py
|
katport/detectron2_fork
|
bcb21146ae4360543681d1fa3b60820f3a142703
|
[
"Apache-2.0"
] | 1
|
2020-08-18T16:44:41.000Z
|
2020-08-18T16:44:41.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import numpy as np
import torch
from torch import nn
from detectron2.structures import ImageList
# from detectron2.utils.events import get_event_storage
from detectron2.utils.logger import log_first_n
from ..backbone import build_backbone
from ..postprocessing import detector_postprocess
from ..proposal_generator import build_proposal_generator
from ..roi_heads import build_roi_heads
from .build import META_ARCH_REGISTRY
__all__ = ["GeneralizedRCNN", "ProposalNetwork"]
@META_ARCH_REGISTRY.register()
class GeneralizedRCNN(nn.Module):
"""
Generalized R-CNN. Any models that contains the following three components:
1. Per-image feature extraction (aka backbone)
2. Region proposal generation
3. Per-region feature extraction and prediction
"""
def __init__(self, cfg):
super().__init__()
self.backbone = build_backbone(cfg)
self.proposal_generator = build_proposal_generator(cfg, self.backbone.output_shape())
self.roi_heads = build_roi_heads(cfg, self.backbone.output_shape())
self.vis_period = cfg.VIS_PERIOD
self.input_format = cfg.INPUT.FORMAT
assert len(cfg.MODEL.PIXEL_MEAN) == len(cfg.MODEL.PIXEL_STD)
self.register_buffer("pixel_mean", torch.Tensor(cfg.MODEL.PIXEL_MEAN).view(-1, 1, 1))
self.register_buffer("pixel_std", torch.Tensor(cfg.MODEL.PIXEL_STD).view(-1, 1, 1))
@property
def device(self):
return self.pixel_mean.device
def visualize_training(self, batched_inputs, proposals):
"""
A function used to visualize images and proposals. It shows ground truth
bounding boxes on the original image and up to 20 predicted object
proposals on the original image. Users can implement different
visualization functions for different models.
Args:
batched_inputs (list): a list that contains input to the model.
proposals (list): a list that contains predicted proposals. Both
batched_inputs and proposals should have the same length.
"""
from detectron2.utils.visualizer import Visualizer
# storage = get_event_storage()
max_vis_prop = 20
for input, prop in zip(batched_inputs, proposals):
img = input["image"].cpu().numpy()
assert img.shape[0] == 3, "Images should have 3 channels."
if self.input_format == "BGR":
img = img[::-1, :, :]
img = img.transpose(1, 2, 0)
v_gt = Visualizer(img, None)
v_gt = v_gt.overlay_instances(boxes=input["instances"].gt_boxes)
anno_img = v_gt.get_image()
box_size = min(len(prop.proposal_boxes), max_vis_prop)
v_pred = Visualizer(img, None)
v_pred = v_pred.overlay_instances(
boxes=prop.proposal_boxes[0:box_size].tensor.cpu().numpy()
)
prop_img = v_pred.get_image()
vis_img = np.concatenate((anno_img, prop_img), axis=1)
vis_img = vis_img.transpose(2, 0, 1)
vis_name = "Left: GT bounding boxes; Right: Predicted proposals"
# storage.put_image(vis_name, vis_img)
break # only visualize one image in a batch
def forward(self, batched_inputs):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* image: Tensor, image in (C, H, W) format.
* instances (optional): groundtruth :class:`Instances`
* proposals (optional): :class:`Instances`, precomputed proposals.
Other information that's included in the original dicts, such as:
* "height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
Returns:
list[dict]:
Each dict is the output for one input image.
The dict contains one key "instances" whose value is a :class:`Instances`.
The :class:`Instances` object has the following keys:
"pred_boxes", "pred_classes", "scores", "pred_masks", "pred_keypoints"
"""
if not self.training:
return self.inference(batched_inputs)
images = self.preprocess_image(batched_inputs)
if "instances" in batched_inputs[0]:
gt_instances = [x["instances"] for x in batched_inputs] #.to(self.device)
elif "targets" in batched_inputs[0]:
log_first_n(
logging.WARN, "'targets' in the model inputs is now renamed to 'instances'!", n=10
)
gt_instances = [x["targets"] for x in batched_inputs] # .to(self.device)
else:
gt_instances = None
print ('grcnn: ', images.tensor)
features = self.backbone(images.tensor)
if self.proposal_generator:
proposals, proposal_losses = self.proposal_generator(images, features, gt_instances)
else:
assert "proposals" in batched_inputs[0]
proposals = [x["proposals"] for x in batched_inputs] #.to(self.device)
proposal_losses = {}
_, detector_losses = self.roi_heads(images, features, proposals, gt_instances)
# if self.vis_period > 0:
# storage = get_event_storage()
# if storage.iter % self.vis_period == 0:
# self.visualize_training(batched_inputs, proposals)
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
return losses
def inference(self, batched_inputs, detected_instances=None, do_postprocess=True):
"""
Run inference on the given inputs.
Args:
batched_inputs (list[dict]): same as in :meth:`forward`
detected_instances (None or list[Instances]): if not None, it
contains an `Instances` object per image. The `Instances`
object contains "pred_boxes" and "pred_classes" which are
known boxes in the image.
The inference will then skip the detection of bounding boxes,
and only predict other per-ROI outputs.
do_postprocess (bool): whether to apply post-processing on the outputs.
Returns:
same as in :meth:`forward`.
"""
assert not self.training
images = self.preprocess_image(batched_inputs)
features = self.backbone(images.tensor)
if detected_instances is None:
if self.proposal_generator:
proposals, _ = self.proposal_generator(images, features, None)
else:
assert "proposals" in batched_inputs[0]
proposals = [x["proposals"] for x in batched_inputs] #.to(self.device)
results, _ = self.roi_heads(images, features, proposals, None)
else:
detected_instances = [x for x in detected_instances] #.to(self.device)
results = self.roi_heads.forward_with_given_boxes(features, detected_instances)
if do_postprocess:
return GeneralizedRCNN._postprocess(results, batched_inputs, images.image_sizes)
else:
return results
def preprocess_image(self, batched_inputs):
"""
Normalize, pad and batch the input images.
"""
images = [x["image"] for x in batched_inputs] #.to(self.device)
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.backbone.size_divisibility)
return images
@staticmethod
def _postprocess(instances, batched_inputs, image_sizes):
"""
Rescale the output instances to the target size.
"""
# note: private function; subject to changes
processed_results = []
for results_per_image, input_per_image, image_size in zip(
instances, batched_inputs, image_sizes
):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
r = detector_postprocess(results_per_image, height, width)
processed_results.append({"instances": r})
return processed_results
@META_ARCH_REGISTRY.register()
class ProposalNetwork(nn.Module):
def __init__(self, cfg):
super().__init__()
self.backbone = build_backbone(cfg)
self.proposal_generator = build_proposal_generator(cfg, self.backbone.output_shape())
self.register_buffer("pixel_mean", torch.Tensor(cfg.MODEL.PIXEL_MEAN).view(-1, 1, 1))
self.register_buffer("pixel_std", torch.Tensor(cfg.MODEL.PIXEL_STD).view(-1, 1, 1))
@property
def device(self):
return self.pixel_mean.device
def forward(self, batched_inputs):
"""
Args:
Same as in :class:`GeneralizedRCNN.forward`
Returns:
list[dict]:
Each dict is the output for one input image.
The dict contains one key "proposals" whose value is a
:class:`Instances` with keys "proposal_boxes" and "objectness_logits".
"""
images = [x["image"] for x in batched_inputs] #.to(self.device)
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.backbone.size_divisibility)
features = self.backbone(images.tensor)
if "instances" in batched_inputs[0]:
gt_instances = [x["instances"] for x in batched_inputs] #.to(self.device)
elif "targets" in batched_inputs[0]:
log_first_n(
logging.WARN, "'targets' in the model inputs is now renamed to 'instances'!", n=10
)
gt_instances = [x["targets"] for x in batched_inputs] #.to(self.device)
else:
gt_instances = None
proposals, proposal_losses = self.proposal_generator(images, features, gt_instances)
# In training, the proposals are not useful at all but we generate them anyway.
# This makes RPN-only models about 5% slower.
if self.training:
return proposal_losses
processed_results = []
for results_per_image, input_per_image, image_size in zip(
proposals, batched_inputs, images.image_sizes
):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
r = detector_postprocess(results_per_image, height, width)
processed_results.append({"proposals": r})
return processed_results
| 41.869732
| 98
| 0.632778
|
9826fef4c76b1083a2283bda6dd7ee5adf3518ec
| 792
|
py
|
Python
|
mathgenerator/funcs/algebra/compound_interest.py
|
Sankari-K/mathgenerator
|
712c74fbe34fe594c4c0f7e3b3057b01d85112ba
|
[
"MIT"
] | 40
|
2020-10-14T17:29:51.000Z
|
2020-11-01T04:41:03.000Z
|
mathgenerator/funcs/algebra/compound_interest.py
|
Sankari-K/mathgenerator
|
712c74fbe34fe594c4c0f7e3b3057b01d85112ba
|
[
"MIT"
] | 209
|
2020-10-14T15:32:08.000Z
|
2020-11-03T19:08:19.000Z
|
mathgenerator/funcs/algebra/compound_interest.py
|
Sankari-K/mathgenerator
|
712c74fbe34fe594c4c0f7e3b3057b01d85112ba
|
[
"MIT"
] | 179
|
2020-10-14T15:36:55.000Z
|
2020-10-29T19:26:16.000Z
|
from .__init__ import *
def gen_func(maxPrinciple=10000,
maxRate=10,
maxTime=10,
format='string'):
p = random.randint(1000, maxPrinciple)
r = random.randint(1, maxRate)
n = random.randint(1, maxTime)
a = round(p * (1 + r / 100)**n, 2)
if format == 'string':
problem = "Compound interest for a principle amount of " + \
str(p) + " dollars, " + str(r) + \
"% rate of interest and for a time period of " + \
str(n) + " year is = "
return problem, str(a)
elif format == 'latex':
return "Latex unavailable"
else:
return p, r, n, a
compound_interest = Generator(
"Compound Interest", 78, gen_func,
["maxPrinciple=10000", "maxRate=10", "maxTime=10"])
| 28.285714
| 68
| 0.549242
|
d1435ef66cce84ce01356b167a74a3108f29751f
| 1,181
|
py
|
Python
|
packages/pyright-internal/src/tests/samples/loops1.py
|
lipovsek/pytea
|
c536515a5e5947fac8871784323ba7eddc58956d
|
[
"MIT"
] | null | null | null |
packages/pyright-internal/src/tests/samples/loops1.py
|
lipovsek/pytea
|
c536515a5e5947fac8871784323ba7eddc58956d
|
[
"MIT"
] | null | null | null |
packages/pyright-internal/src/tests/samples/loops1.py
|
lipovsek/pytea
|
c536515a5e5947fac8871784323ba7eddc58956d
|
[
"MIT"
] | null | null | null |
# This sample tests the type checker's ability to handle type
# inferences within loop constructs.
def bar(a: list):
pass
def func1():
data = None
for x in [2, 3]:
if not data:
data = [1, 2]
else:
# This should not generate an error because the
# type checker should be able to determine that
# data must be a list at this point in the code.
bar(data)
else:
# This should generate an error because the
# type checker should be able to determine that
# data must contain None at this point.
bar(data)
x = 20 + 20
def func2():
data = None
while x:
if not data:
data = [1, 2]
else:
# This should not generate an error because the
# type checker should be able to determine that
# data must be a list at this point in the code.
bar(data)
else:
# This should generate an error because the
# type checker should be able to determine that
# data must contain None at this point.
bar(data)
| 25.673913
| 62
| 0.547841
|
54dca9c5e51da97d36c90831dec0705403c2655e
| 2,907
|
py
|
Python
|
row_opt_manual.py
|
VietHTran/math-checker
|
872bf3c172c2aee81c875bf37c55bf618135c3cb
|
[
"MIT"
] | null | null | null |
row_opt_manual.py
|
VietHTran/math-checker
|
872bf3c172c2aee81c875bf37c55bf618135c3cb
|
[
"MIT"
] | null | null | null |
row_opt_manual.py
|
VietHTran/math-checker
|
872bf3c172c2aee81c875bf37c55bf618135c3cb
|
[
"MIT"
] | null | null | null |
from fractions import Fraction
def printMatrix(m):
for i in range(len(m)):
for j in range(len(m[0])):
print(m[i][j], end="\t")
print()
def getFraction(message):
while True:
rawInp = input(message)
try:
if "/" in rawInp: # Fraction
inp = rawInp.split("/")
num = int(inp[0])
den = int(inp[1])
return Fraction(num, den)
else:
return Fraction(int(rawInp), 1)
except:
print("Error: Invalid Input")
continue
def getRowNum(message, matrix):
length = len(matrix)
while True:
rawInp = input(message)
try:
inp = int(rawInp)
if inp >= 0 and inp <= length:
return inp - 1
else:
print("Error: Index out of range")
continue
except:
print("Error: Invalid Input")
continue
def cloneMatrix(matrix):
clone = []
for row in matrix:
holder = []
for num in row:
holder.append(Fraction(num))
clone.append(holder)
return clone
rows = int(input("Matrix row: "))
columns = int(input("Matrix column: "))
matrix = []
for i in range(rows):
holder = []
for j in range(columns):
holder.append(getFraction("matrix[%d][%d]: " %(i,j)))
matrix.append(holder)
print("Input matrix: ")
printMatrix(matrix)
savedMatrix = [cloneMatrix(matrix)]
ind = 1
opt = ""
while True:
opt = input("Swap[w], Sum[s], Times[t], Undo[u] or Exit[x]: ")
if opt == "w":
row1 = getRowNum("Row 1 (1-based): ", matrix)
row2 = getRowNum("Row 2 (1-based): ", matrix)
matrix[row1], matrix[row2] = matrix[row2], matrix[row1]
ind += 1
savedMatrix.append(cloneMatrix(matrix))
printMatrix(matrix)
elif opt == "s":
x = getFraction("Enter coeff: ")
row1 = getRowNum("Row multiplied (1-based): ", matrix)
row2 = getRowNum("Row changed (1-based): ", matrix)
for i in range(len(matrix[0])):
matrix[row2][i] = matrix[row2][i] + (x * matrix[row1][i])
ind += 1
savedMatrix.append(cloneMatrix(matrix))
printMatrix(matrix)
elif opt == "t":
x = getFraction("Enter coeff: ")
row1 = getRowNum("Row added (1-based): ", matrix)
for i in range(len(matrix[0])):
matrix[row1][i] = x * matrix[row1][i]
ind += 1
savedMatrix.append(cloneMatrix(matrix))
printMatrix(matrix)
elif opt == "u":
if ind == 1:
print("Already at oldest change")
else:
savedMatrix.pop()
ind -= 1
matrix = cloneMatrix(savedMatrix[-1])
printMatrix(matrix)
elif opt == "x":
break
else:
print("Error: Unrecognize command")
| 27.951923
| 69
| 0.519436
|
9864e11ded76f7ff8ed81b9e4adf9fcdd0d1a480
| 120,842
|
py
|
Python
|
python_etl/CMS_SynPuf_ETL_CDM_v5.py
|
CPHI-TVHS/ETL-CMS
|
51044ceea7ddd14d02275e2796f62f454016e238
|
[
"Apache-2.0"
] | null | null | null |
python_etl/CMS_SynPuf_ETL_CDM_v5.py
|
CPHI-TVHS/ETL-CMS
|
51044ceea7ddd14d02275e2796f62f454016e238
|
[
"Apache-2.0"
] | null | null | null |
python_etl/CMS_SynPuf_ETL_CDM_v5.py
|
CPHI-TVHS/ETL-CMS
|
51044ceea7ddd14d02275e2796f62f454016e238
|
[
"Apache-2.0"
] | null | null | null |
import csv,os,os.path,sys
from time import strftime
from collections import OrderedDict
import argparse
import dotenv
import math
from constants import OMOP_CONSTANTS, OMOP_MAPPING_RECORD, BENEFICIARY_SUMMARY_RECORD, OMOP_CONCEPT_RECORD, OMOP_CONCEPT_RELATIONSHIP_RECORD
from utility_classes import Table_ID_Values
from beneficiary import Beneficiary
from FileControl import FileControl
from SynPufFiles import PrescriptionDrug, InpatientClaim, OutpatientClaim, CarrierClaim
from datetime import date
import calendar
# ------------------------
# TODO: polish for updating to OHDSI (doc strings, testing, comments, pylint, etc)
#
# ------------------------
# ------------------------
# This python script creates the OMOP CDM v5 tables from the CMS SynPuf (Synthetic Public Use Files).
# ------------------------
#
# Input Required:
# OMOP Vocabulary v5 Concept file. Remember to run: java -jar cpt4.jar (appends CPT4 concepts from concept_cpt4.csv to CONCEPT.csv)
# BASE_OMOP_INPUT_DIRECTORY / CONCEPT.csv
# / CONCEPT_RELATIONSHIP.csv
#
#
# SynPuf data files
# BASE_SYNPUF_INPUT_DIRECTORY
# / DE1_0_2008_Beneficiary_Summary_File_Sample_<sample_number>.csv
# / DE1_0_2009_Beneficiary_Summary_File_Sample_<sample_number>.csv
# / DE1_0_2010_Beneficiary_Summary_File_Sample_<sample_number>.csv
# / DE1_0_2008_to_2010_Carrier_Claims_Sample_<sample_number>_A.csv
# / DE1_0_2008_to_2010_Carrier_Claims_Sample_<sample_number>_B.csv
# / DE1_0_2008_to_2010_Inpatient_Claims_Sample_<sample_number>_B.csv
# / DE1_0_2008_to_2010_Outpatient_Claims_Sample_<sample_number>_B.csv
# / DE1_0_2008_to_2010_Prescription_Drug_Events_Sample_<sample_number>_B.csv
#
# Output Produced:
# Last-used concept_IDs for CDM v5 tables
# BASE_OUTPUT_DIRECTORY / etl_synpuf_last_table_ids.txt
# / npi_provider_id.txt
# / provider_id_care_site.txt
# / location_dictionary.csv
#
# SynPuf Beneficiary Files with year prefix
# BASE_SYNPUF_INPUT_DIRECTORY
# / DE1_0_comb_Beneficiary_Summary_File_Sample_<sample_number>.csv
# / DE1_0_comb_Beneficiary_Summary_File_Sample_<sample_number>.csv.srt
# / DE1_0_2008_to_2010_Carrier_Claims_Sample_<sample_number>.csv.srt
# / DE1_0_2008_to_2010_Inpatient_Claims_Sample_<sample_number>.csv.srt
# / DE1_0_2008_to_2010_Outpatient_Claims_Sample_<sample_number>.csv.srt
# / DE1_0_2008_to_2010_Prescription_Drug_Events_Sample_<sample_number>.csv.srt
#
#
# OMOP CDM v5 Tables
# BASE_OUTPUT_DIRECTORY / care_site_<sample_number>.csv
# / condition_occurrence_<sample_number>.csv
# / death_<sample_number>.csv
# / device_cost_<sample_number>.csv
# / device_exposure_<sample_number>.csv
# / drug_cost_<sample_number>.csv
# / drug_exposure_<sample_number>.csv
# / location_<sample_number>.csv
# / measurement_occurrence_<sample_number>.csv
# / observation_<sample_number>.csv
# / observation_period_<sample_number>.csv
# / payer_plan_period_<sample_number>.csv
# / person_<sample_number>.csv
# / procedure_cost_<sample_number>.csv
# / procedure_occurrence_<sample_number>.csv
# / provider_<sample_number>.csv
# / specimen_<sample_number>.csv
# / visit_cost_<sample_number>.csv
# / visit_occurrence_<sample_number>.csv
#
#
# ** Various debug and log files
#
# ------------------------
# ------------------------
# 2015-02-05 C. Dougherty Created
#
# 2016-06-17 Christophe Lambert, Praveen Kumar, Amritansh -- University of New Mexico -- Major overhaul
# ------------------------
dotenv.load_dotenv(".env")
# -----------------------------------
# - Configuration
# -----------------------------------
# ---------------------------------
# Edit your .env file to change which directories to use in the ETL process
# Path to the directory where control files should be saved (input/output
BASE_ETL_CONTROL_DIRECTORY = os.environ['BASE_ETL_CONTROL_DIRECTORY']
# Path to the directory containing the downloaded SynPUF files
BASE_SYNPUF_INPUT_DIRECTORY = os.environ['BASE_SYNPUF_INPUT_DIRECTORY']
# Path to the directory containing the OMOP Vocabulary v5 files (can be downloaded from http://www.ohdsi.org/web/athena/)
BASE_OMOP_INPUT_DIRECTORY = os.environ['BASE_OMOP_INPUT_DIRECTORY']
# Path to the directory where CDM-compatible CSV files should be saved
BASE_OUTPUT_DIRECTORY = os.environ['BASE_OUTPUT_DIRECTORY']
# SynPUF dir format. I've seen DE1_{0} and DE_{0} as different prefixes for the name of the directory containing a slice of SynPUF data
SYNPUF_DIR_FORMAT = os.environ['SYNPUF_DIR_FORMAT']
DESTINATION_FILE_DRUG = 'drug'
DESTINATION_FILE_CONDITION = 'condition'
DESTINATION_FILE_PROCEDURE = 'procedure'
DESTINATION_FILE_OBSERVATION = 'observation'
DESTINATION_FILE_MEASUREMENT = 'measurement'
DESTINATION_FILE_DEVICE = 'device'
DESTINATION_FILE_VISIT = 'visit'
class SourceCodeConcept(object):
def __init__(self, source_concept_code, source_concept_id, target_concept_id, destination_file):
self.source_concept_code = source_concept_code
self.source_concept_id = source_concept_id
self.target_concept_id = target_concept_id
self.destination_file = destination_file
# -----------------------------------
# Globals
# -----------------------------------
file_control = None
table_ids = None
source_code_concept_dict = {} # stores source and target concept ids + destination file
concept_relationship_dict = {} # stores the source concept id and its mapped target concept id
person_location_dict = {} # stores location_id for a given state + county
current_stats_filename = ''
#This was used to detect death via ICD9 codes, but since death information is
#listed in the beneficiary file, we will not use. Plus this isn't even a complete list
#icd9_codes_death = ['761.6', '798', '798.0', '798.1', '798.2','798.9', '799.9', 'E913.0','E913.1','E913.2','E913.3','E913.8','E913.9', 'E978']
provider_id_care_site_id = {} # sotres care site id for a provider_num(institution)
visit_id_list = set() # stores unique visit ids written to visit occurrence file
visit_occurrence_ids = OrderedDict() # stores visit ids generated by determine_visits function
npi_provider_id = {} # stores provider id for an npi
#-------------------------------------------------------------------------------
# SSA codes for Puerto Rico('40') and Virgin Islands ('48') have not been added
# to the following dictionary. SSA code '54' is for others where others=
# PUERTO RICO, VIRGIN ISLANDS, AFRICA, ASIA OR CALIFORNIA; INSTITUTIONAL PROVIDER
# OF SERVICES (IPS) ONLY, CANADA & ISLANDS, CENTRAL AMERICA AND WEST INDIES,
# EUROPE, MEXICO, OCEANIA, PHILIPPINES, SOUTH AMERICA, U.S. POSSESSIONS, AMERICAN
# SAMOA, GUAM, SAIPAN OR NORTHERN MARIANAS, TEXAS; INSTITUTIONAL PROVIDER OF SERVICES
# (IPS) ONLY, NORTHERN MARIANAS, GUAM, UNKNOWN.
#-------------------------------------------------------------------------------
SSA_state_codes = {
'01':'AL',
'02':'AK',
'03':'AZ',
'04':'AR',
'05':'CA',
'06':'CO',
'07':'CT',
'08':'DE',
'09':'DC',
'10':'FL',
'11':'GA',
'12':'HI',
'13':'ID',
'14':'IL',
'15':'IN',
'16':'IA',
'17':'KS',
'18':'KY',
'19':'LA',
'20':'ME',
'21':'MD',
'22':'MA',
'23':'MI',
'24':'MN',
'25':'MS',
'26':'MO',
'27':'MT',
'28':'NE',
'29':'NV',
'30':'NH',
'31':'NJ',
'32':'NM',
'33':'NY',
'34':'NC',
'35':'ND',
'36':'OH',
'37':'OK',
'38':'OR',
'39':'PA',
'41':'RI',
'42':'SC',
'43':'SD',
'44':'TN',
'45':'TX',
'46':'UT',
'47':'VT',
'49':'VA',
'50':'WA',
'51':'WV',
'52':'WI',
'53':'WY',
'54':'54'}
domain_destination_file_list = {
'Condition' : DESTINATION_FILE_CONDITION,
'Condition/Meas' : DESTINATION_FILE_MEASUREMENT,
'Condition/Obs' : DESTINATION_FILE_OBSERVATION,
'Condition/Procedure' : DESTINATION_FILE_PROCEDURE,
'Device' : DESTINATION_FILE_DEVICE,
'Device/Obs' : DESTINATION_FILE_OBSERVATION,
'Device/Procedure' : DESTINATION_FILE_PROCEDURE,
'Drug' : DESTINATION_FILE_DRUG,
'Measurement' : DESTINATION_FILE_MEASUREMENT,
'Meas/Procedure' : DESTINATION_FILE_PROCEDURE,
'Obs/Procedure' : DESTINATION_FILE_PROCEDURE,
'Observation' : DESTINATION_FILE_OBSERVATION,
'Procedure' : DESTINATION_FILE_PROCEDURE,
'Visit' : DESTINATION_FILE_VISIT
}
# -----------------------------------
# get timestamp
# -----------------------------------
def get_timestamp():
return strftime("%Y-%m-%d %H:%M:%S")
# -----------------------------------
# TODO: use standard python logger...
# -----------------------------------
def log_stats(msg):
print msg
global current_stats_filename
with open(current_stats_filename,'a') as fout:
fout.write('[{0}]{1}\n'.format(get_timestamp(),msg))
# -----------------------------------
# format date in YYYYMMDD
# -----------------------------------
def get_date_YYYY_MM_DD(date_YYYYMMDD):
if len(date_YYYYMMDD) == 0:
return ''
return '{0}-{1}-{2}'.format(date_YYYYMMDD[0:4], date_YYYYMMDD[4:6], date_YYYYMMDD[6:8])
# -----------------------------------------------------------------------------------------------------
# Each provider_num (institution) has a unique care_site_id. It is generated by the following code by
# adding 1 to previous care_site_id.
# -------------------------------------------------------------------------------------------------------
def get_CareSite(provider_num):
global table_ids
if provider_num not in provider_id_care_site_id:
provider_id_care_site_id[provider_num] = [table_ids.last_care_site_id,0]
table_ids.last_care_site_id += 1
return provider_id_care_site_id[provider_num][0]
# -------------------------------------------------------------------------
# A unique provider_id for each npi is generated by adding 1 to the previous provider_id
# --------------------------------------------------------------------------
def get_Provider(npi):
global table_ids
if npi not in npi_provider_id:
npi_provider_id[npi] = [table_ids.last_provider_id,0]
table_ids.last_provider_id += 1
return npi_provider_id[npi][0]
# --------------------------------------------------------------------------------------------------
# A unique location id for each unique combination of state+county is generated by adding 1 to
# the previous location id
# ------------------------------------------------------------------------------------------------
def get_location_id(state_county):
global table_ids
if state_county not in person_location_dict:
person_location_dict[state_county] = [table_ids.last_location_id,0]
table_ids.last_location_id += 1
return person_location_dict[state_county][0]
# -----------------------------------
# This function produces dictionaries that give mappings between SynPUF codes and OMOP concept_ids
# -----------------------------------
def build_maps():
log_stats('-'*80)
log_stats('build_maps starting...')
#--------------------------------------------------------------------------------------
# load existing person_location_dict. v5
# It populates the dictionary with the existing data so that the subsequent run of this
# program doesn't generate the duplicate location_id.
#--------------------------------------------------------------------------------------
recs_in = 0
global table_ids
global person_location_dict
location_dict_file = os.path.join(BASE_ETL_CONTROL_DIRECTORY,"location_dictionary.txt")
if os.path.exists(location_dict_file):
log_stats('reading existing location_dict_file ->' + location_dict_file)
with open(location_dict_file,'r') as fin:
for rec in fin:
recs_in += 1
flds = (rec[:-1]).split('\t')
if len(flds) == 2:
state_county = flds[0]
location_id = flds[1]
location_id = location_id.lstrip('[').rstrip(']').split(',') #convert string to list as the file data is string
location_id = [int(location_id[0]), int(location_id[1])] # convert the data in the list to integer
person_location_dict[state_county] = location_id
log_stats('done, recs_in={0}, len person_location_dict={1}'.format(recs_in, len(person_location_dict)))
else:
log_stats('No existing location_dict_file found (looked for ->' + location_dict_file + ')')
#----------------
# load existing provider_id_care_site_id.
# It populates the dictionary with the existing data so that the subsequent run of this
# program doesn't generate the duplicate care_site_id.
#----------------
recs_in = 0
global table_ids
global provider_id_care_site_id
provider_id_care_site_file = os.path.join(BASE_ETL_CONTROL_DIRECTORY,'provider_id_care_site.txt')
if os.path.exists(provider_id_care_site_file):
log_stats('reading existing provider_id_care_site_file ->' + provider_id_care_site_file)
with open(provider_id_care_site_file,'r') as fin:
for rec in fin:
recs_in += 1
flds = (rec[:-1]).split('\t')
if len(flds) == 2:
provider_num = flds[0]
care_site_id = flds[1]
care_site_id = care_site_id.lstrip('[').rstrip(']').split(',') #convert string to list as the file data is string
care_site_id = [int(care_site_id[0]), int(care_site_id[1])] # convert the data in the list to integer
provider_id_care_site_id[provider_num] = care_site_id
log_stats('done, recs_in={0}, len provider_id_care_site_id={1}'.format(recs_in, len(provider_id_care_site_id)))
else:
log_stats('No existing provider_id_care_site_file found (looked for ->' + provider_id_care_site_file + ')')
#----------------
# load existing npi_provider_id
# It populates the dictionary with the existing data so that the subsequent run of this
# program doesn't generate the duplicate provider_id.
#----------------
recs_in = 0
global npi_provider_id
npi_provider_id_file = os.path.join(BASE_ETL_CONTROL_DIRECTORY,'npi_provider_id.txt')
if os.path.exists(npi_provider_id_file):
log_stats('reading existing npi_provider_id_file ->' + npi_provider_id_file)
with open(npi_provider_id_file,'r') as fin:
for rec in fin:
recs_in += 1
flds = (rec[:-1]).split('\t')
if len(flds) == 2:
npi = flds[0]
provider_id = flds[1]
provider_id = provider_id.lstrip('[').rstrip(']').split(',') #convert string to list as the file data is string
provider_id = [int(provider_id[0]), int(provider_id[1])] # convert the data in the list to integer
npi_provider_id[npi] = provider_id
log_stats('done, recs_in={0}, len npi_provider_id={1}'.format(recs_in, len(npi_provider_id_file)))
else:
log_stats('No existing npi_provider_id_file found (looked for ->' + npi_provider_id_file + ')')
#----------------
# Load the OMOP v5 Concept file to build the source code to conceptID xref.
# NOTE: This version of the flat file had embedded newlines. This code handles merging the split
# records. This may not be needed when the final OMOP v5 Concept file is produced.
#----------------
omop_concept_relationship_debug_file = os.path.join(BASE_OUTPUT_DIRECTORY,'concept_relationship_debug_log.txt')
omop_concept_relationship_file = os.path.join(BASE_OMOP_INPUT_DIRECTORY,'CONCEPT_RELATIONSHIP.csv')
omop_concept_debug_file = os.path.join(BASE_OUTPUT_DIRECTORY,'concept_debug_log.txt')
omop_concept_file = os.path.join(BASE_OMOP_INPUT_DIRECTORY,'CONCEPT.csv')
recs_in = 0
recs_skipped = 0
log_stats('Reading omop_concept_relationship_file -> ' + omop_concept_relationship_file)
log_stats('Writing to log file -> ' + omop_concept_relationship_debug_file)
with open(omop_concept_relationship_file,'r') as fin, \
open(omop_concept_relationship_debug_file, 'w') as fout_log:
fin.readline() #skip header
for rec in fin:
recs_in += 1
if recs_in % 100000 == 0: print 'omop concept relationship recs=',recs_in
flds = (rec[:-1]).split('\t')
if len(flds) == OMOP_CONCEPT_RELATIONSHIP_RECORD.fieldCount:
concept_id1 = flds[OMOP_CONCEPT_RELATIONSHIP_RECORD.CONCEPT_ID_1]
concept_id2 = flds[OMOP_CONCEPT_RELATIONSHIP_RECORD.CONCEPT_ID_2]
relationship_id = flds[OMOP_CONCEPT_RELATIONSHIP_RECORD.RELATIONSHIP_ID]
invalid_reason = flds[OMOP_CONCEPT_RELATIONSHIP_RECORD.INVALID_REASON]
if concept_id1 != '' and concept_id2 != '' and relationship_id == "Maps to" and invalid_reason == '':
if concept_relationship_dict.has_key(concept_id1): # one concept id might have several mapping, so values are stored as list
concept_relationship_dict[concept_id1].append(concept_id2)
else:
concept_relationship_dict[concept_id1] = [concept_id2]
else:
recs_skipped = recs_skipped + 1
log_stats('Done, omop concept recs_in = ' + str(recs_in))
log_stats('recs_skipped = ' + str(recs_skipped))
log_stats('len source_code_concept_dict = ' + str(len(source_code_concept_dict)))
recs_in = 0
recs_skipped = 0
merged_recs=0
recs_checked=0
#TODO: there is an overlap of 41 2-character codes that are the same between CPT4 and HCPCS,
#but map to different OMOP concepts. Need to determine which should prevail. Whichever prevails should call one of the next 2 code blocks first.
log_stats('Reading omop_concept_file -> ' + omop_concept_file)
log_stats('Writing to log file -> ' + omop_concept_debug_file)
#First pass to obtain domain ids of concepts
domain_dict = {}
with open(omop_concept_file,'r') as fin:
fin.readline()
for rec in fin:
flds = (rec[:-1]).split('\t')
if len(flds) == OMOP_CONCEPT_RECORD.fieldCount:
concept_id = flds[OMOP_CONCEPT_RECORD.CONCEPT_ID]
domain_id = flds[OMOP_CONCEPT_RECORD.DOMAIN_ID]
domain_dict[concept_id] = domain_id
print "loaded domain dict with this many records: ", len(domain_dict)
with open(omop_concept_file,'r') as fin, \
open(omop_concept_debug_file, 'w') as fout_log:
# open(omop_concept_file_mini, 'w') as fout_mini:
fin.readline() #skip header
for rec in fin:
recs_in += 1
if recs_in % 100000 == 0: print 'omop concept recs=',recs_in
flds = (rec[:-1]).split('\t')
if len(flds) == OMOP_CONCEPT_RECORD.fieldCount:
concept_id = flds[OMOP_CONCEPT_RECORD.CONCEPT_ID]
concept_code = original_concept_code = flds[OMOP_CONCEPT_RECORD.CONCEPT_CODE].replace(".","")
vocabulary_id = flds[OMOP_CONCEPT_RECORD.VOCABULARY_ID]
if vocabulary_id == OMOP_CONSTANTS.CPT4_VOCABULARY_ID:
vocabulary_id = OMOP_CONSTANTS.HCPCS_VOCABULARY_ID
if(vocabulary_id in [OMOP_CONSTANTS.ICD_9_DIAGNOSIS_VOCAB_ID,OMOP_CONSTANTS.ICD_9_PROCEDURES_VOCAB_ID]):
vocabulary_id = OMOP_CONSTANTS.ICD_9_VOCAB_ID
domain_id = flds[OMOP_CONCEPT_RECORD.DOMAIN_ID]
invalid_reason = flds[OMOP_CONCEPT_RECORD.INVALID_REASON]
status = ''
if concept_id != '':
if vocabulary_id in [OMOP_CONSTANTS.ICD_9_VOCAB_ID,
OMOP_CONSTANTS.HCPCS_VOCABULARY_ID,
OMOP_CONSTANTS.NDC_VOCABULARY_ID]:
recs_checked += 1
if not concept_relationship_dict.has_key(concept_id):
destination_file = domain_destination_file_list[domain_id]
if( vocabulary_id == OMOP_CONSTANTS.ICD_9_VOCAB_ID):
status = "No map from ICD9 code, or code invalid for " + concept_id
recs_skipped += 1
if( vocabulary_id == OMOP_CONSTANTS.HCPCS_VOCABULARY_ID):
status = "No self map from OMOP (HCPCS/CPT4) to OMOP (HCPCS/CPT4) or code invalid for " + concept_id
recs_skipped += 1
if( vocabulary_id == OMOP_CONSTANTS.NDC_VOCABULARY_ID):
status = "No map from OMOP (NCD) to OMOP (RxNorm) or code invalid for " + concept_id
recs_skipped += 1
source_code_concept_dict[vocabulary_id,concept_code] = [SourceCodeConcept(concept_code, concept_id, "0", destination_file)]
else:
source_code_concept_dict[vocabulary_id,concept_code] = []
for concept in concept_relationship_dict[concept_id]:
destination_file = domain_destination_file_list[domain_dict[concept]]
source_code_concept_dict[vocabulary_id,concept_code].append(SourceCodeConcept(concept_code, concept_id, concept, destination_file))
if status != '':
fout_log.write(status + ': \t')
# for fld in line: fout_log.write(fld + '\t')
fout_log.write(rec + '\n')
log_stats('Done, omop concept recs_in = ' + str(recs_in))
log_stats('recs_checked = ' + str(recs_checked))
log_stats('recs_skipped = ' + str(recs_skipped))
log_stats('merged_recs = ' + str(merged_recs))
log_stats('len source_code_concept_dict = ' + str(len(source_code_concept_dict)))
#---------------------------
# -----------------------------------
# write the provider_num(institution) + care_site_id to provider_id_care_site.txt file.
# write the npi + provider_id to npi_provider_id.txt file.
# the data from these two files are loaded to dictionaries before processing the input
# records to make sure that the duplicate records are not written to care_site and provider files.
# -----------------------------------
def persist_lookup_tables():
recs_out = 0
location_dict_file = os.path.join(BASE_ETL_CONTROL_DIRECTORY,'location_dictionary.txt')
log_stats('writing location_dict_file ->' + location_dict_file)
with open(location_dict_file,'w') as fout:
for state_county, location_id in person_location_dict.items():
fout.write('{0}\t{1}\n'.format(state_county, location_id))
recs_out += 1
log_stats('done, recs_out={0}, len person_location_dict={1}'.format(recs_out, len(person_location_dict)))
recs_out = 0
provider_id_care_site_file = os.path.join(BASE_ETL_CONTROL_DIRECTORY,'provider_id_care_site.txt')
log_stats('writing provider_id_care_site_file ->' + provider_id_care_site_file)
with open(provider_id_care_site_file,'w') as fout:
for provider_num, care_site_id in provider_id_care_site_id.items():
fout.write('{0}\t{1}\n'.format(provider_num, care_site_id))
recs_out += 1
log_stats('done, recs_out={0}, len provider_id_care_site_id={1}'.format(recs_out, len(provider_id_care_site_id)))
recs_out = 0
npi_provider_id_file = os.path.join(BASE_ETL_CONTROL_DIRECTORY,'npi_provider_id.txt')
log_stats('writing npi_provider_id_file ->' + npi_provider_id_file)
with open(npi_provider_id_file,'w') as fout:
for npi, provider_id in npi_provider_id.items():
fout.write('{0}\t{1}\n'.format(npi, provider_id))
recs_out += 1
log_stats('done, recs_out={0}, len npi_provider_id={1}'.format(recs_out, len(npi_provider_id)))
# ------------------------------------------------------------------------------------------------------------------------
# Logic to determine visits. visit_dates is used to determine the start and end date of observation period for a beneficiary.
# visit_occurrence_ids keeps track of unique visits.
# -------------------------------------------------------------------------------------------------------------------------
def determine_visits(bene):
# each unique date gets a visit id
visit_id = table_ids.last_visit_occurrence_id
#For death records just track dates for purpose of observation_period
yd = bene.LatestYearData()
if yd is not None and yd.BENE_DEATH_DT != '':
bene.visit_dates[yd.BENE_DEATH_DT] = visit_id
#For prescription records just track dates for purpose of observation_period
for raw_rec in bene.prescription_records:
rec = PrescriptionDrug(raw_rec)
if rec.SRVC_DT == '':
continue
bene.visit_dates[rec.SRVC_DT] = visit_id
#For inpatient records, if same patient, same date range, and same provider institution number, is same visit
for raw_rec in bene.inpatient_records:
rec = InpatientClaim(raw_rec)
if rec.CLM_FROM_DT == '':
continue
if not visit_occurrence_ids.has_key((rec.DESYNPUF_ID,rec.CLM_FROM_DT,rec.CLM_THRU_DT,rec.PRVDR_NUM)):
bene.visit_dates[rec.CLM_FROM_DT] = visit_id
bene.visit_dates[rec.CLM_THRU_DT] = visit_id
visit_occurrence_ids[rec.DESYNPUF_ID,rec.CLM_FROM_DT,rec.CLM_THRU_DT,rec.PRVDR_NUM] = visit_id
visit_id+=1
#For outpatient records, if same patient, same date range, and same provider institution number, is same visit
for raw_rec in bene.outpatient_records:
rec = OutpatientClaim(raw_rec)
if rec.CLM_FROM_DT == '':
continue
if not visit_occurrence_ids.has_key((rec.DESYNPUF_ID,rec.CLM_FROM_DT,rec.CLM_THRU_DT,rec.PRVDR_NUM)):
bene.visit_dates[rec.CLM_FROM_DT] = visit_id
bene.visit_dates[rec.CLM_THRU_DT] = visit_id
visit_occurrence_ids[rec.DESYNPUF_ID,rec.CLM_FROM_DT,rec.CLM_THRU_DT,rec.PRVDR_NUM] = visit_id
visit_id+=1
#For carrier claims, if same patient, same date range, and same institution tax number, is same visit
for raw_rec in bene.carrier_records:
rec = CarrierClaim(raw_rec)
if rec.CLM_FROM_DT == '':
continue
if not visit_occurrence_ids.has_key((rec.DESYNPUF_ID,rec.CLM_FROM_DT,rec.CLM_THRU_DT,rec.TAX_NUM)):
bene.visit_dates[rec.CLM_FROM_DT] = visit_id
bene.visit_dates[rec.CLM_THRU_DT] = visit_id
visit_occurrence_ids[rec.DESYNPUF_ID,rec.CLM_FROM_DT,rec.CLM_THRU_DT,rec.TAX_NUM] = visit_id
visit_id+=1
table_ids.last_visit_occurrence_id = visit_id #store the last_visit_occurrence_id
# -----------------------------------
# CDM v5 Person - Write person records
# -----------------------------------
def write_person_record(beneficiary):
person_fd = file_control.get_Descriptor('person')
yd = beneficiary.LatestYearData()
if yd is None: return
person_fd.write('{0},'.format(beneficiary.person_id)) # person_id
if int(yd.BENE_SEX_IDENT_CD) == 1: # gender_concept_id
person_fd.write('{0},'.format(OMOP_CONSTANTS.GENDER_MALE))
elif int(yd.BENE_SEX_IDENT_CD) == 2:
person_fd.write('{0},'.format(OMOP_CONSTANTS.GENDER_FEMALE))
else:
person_fd.write('0,')
person_fd.write('{0},'.format(yd.BENE_BIRTH_DT[0:4])) # year_of_birth
person_fd.write('{0},'.format(yd.BENE_BIRTH_DT[4:6])) # month_of_birth
person_fd.write('{0},'.format(yd.BENE_BIRTH_DT[6:8])) # day_of_birth
person_fd.write(',') # time_of_birth
#print ("yd.BENE_RACE_CD: " + str(yd.BENE_RACE_CD))
if int(yd.BENE_RACE_CD) == 1: #White # race_concept_id and ethnicity_concept_id
person_fd.write('{0},'.format(OMOP_CONSTANTS.RACE_WHITE))
person_fd.write('{0},'.format(OMOP_CONSTANTS.ETHNICITY_NON_HISPANIC))
elif int(yd.BENE_RACE_CD) == 2: #Black
person_fd.write('{0},'.format(OMOP_CONSTANTS.RACE_BLACK))
person_fd.write('{0},'.format(OMOP_CONSTANTS.ETHNICITY_NON_HISPANIC))
elif int(yd.BENE_RACE_CD) == 3: #Others
person_fd.write('{0},'.format(OMOP_CONSTANTS.RACE_OTHER))
person_fd.write('{0},'.format(OMOP_CONSTANTS.ETHNICITY_NON_HISPANIC))
elif int(yd.BENE_RACE_CD) == 5: #Hispanic
person_fd.write('{0},'.format(OMOP_CONSTANTS.RACE_NON_WHITE))
person_fd.write('{0},'.format(OMOP_CONSTANTS.ETHNICITY_HISPANIC))
else:
person_fd.write('0,')
person_fd.write('0,')
#write person records to the person file
state_county = str(beneficiary.SP_STATE_CODE) + '-' + str(beneficiary.BENE_COUNTY_CD)
current_location_id = get_location_id(state_county) # get the location id for the given pair of state & county
person_fd.write('{0},'.format(current_location_id)) # location_id
person_fd.write(',') # provider_id
person_fd.write(',') # care_site_id
person_fd.write('{0},'.format(beneficiary.DESYNPUF_ID)) # person_source_value
person_fd.write('{0},'.format(yd.BENE_SEX_IDENT_CD)) # gender_source_value
person_fd.write(',') # gender_source_concept_id
person_fd.write('{0},'.format(yd.BENE_RACE_CD)) # race_source_value
person_fd.write(',') # race_source_concept_id
person_fd.write('{0},'.format(yd.BENE_RACE_CD)) # ethnicity_source_value
#person_fd.write('') # ethnicity_source_concept_id
person_fd.write('\n')
person_fd.increment_recs_written(1)
# ----------------------------------------------------
# Write payer plan period records for each beneficiary
# ----------------------------------------------------
def write_payer_plan_period_record(beneficiary):
payer_plan_period_fd = file_control.get_Descriptor('payer_plan_period')
plan_source_value_list = ["Medicare Part A", "Medicare Part B", "HMO", "Medicare Part D"]
ppyd = beneficiary.PayerPlanPerioYearDict() # for all 3 years, get the number of months for each plan
if not bool(ppyd):
return # dictionary is empty
else:
'''
for k,v in ppyd.iteritems():
if k[1] == 'BENE_HI_CVRAGE_TOT_MONS': #plan A
planA[k[0]] = v
if k[1] == 'BENE_SMI_CVRAGE_TOT_MONS': #plan B
planB[k[0]] = v
if k[1] == 'BENE_HMO_CVRAGE_TOT_MONS': #HMO
hmo[k[0]] = v
if k[1] == 'PLAN_CVRG_MOS_NUM': #plan D
planD[k[0]] = v
'''
for plan_source_value in plan_source_value_list:
if plan_source_value == "Medicare Part A":
nd = {k[0]:v for k,v in ppyd.iteritems() if k[1] == 'BENE_HI_CVRAGE_TOT_MONS'} # new dictionary with year as key and value as val
payer_plan_period_dates = get_payer_plan_period_date_list(nd)
for i in range(len(payer_plan_period_dates)):
payer_plan_period_start_date = payer_plan_period_dates[i][0]
payer_plan_period_end_date = payer_plan_period_dates[i][1]
plan_source_value = "Medicare Part A"
write_to_payer_plan_period_file(payer_plan_period_fd, beneficiary.person_id, payer_plan_period_start_date, payer_plan_period_end_date, plan_source_value)
elif plan_source_value == "Medicare Part B":
nd = {k[0]:v for k,v in ppyd.iteritems() if k[1] == 'BENE_SMI_CVRAGE_TOT_MONS'} # new dictionary with year as key and value as val
payer_plan_period_dates = get_payer_plan_period_date_list(nd)
for i in range(len(payer_plan_period_dates)):
payer_plan_period_start_date = payer_plan_period_dates[i][0]
payer_plan_period_end_date = payer_plan_period_dates[i][1]
plan_source_value = "Medicare Part B"
write_to_payer_plan_period_file(payer_plan_period_fd, beneficiary.person_id, payer_plan_period_start_date, payer_plan_period_end_date, plan_source_value)
elif plan_source_value == "Medicare Part D":
nd = {k[0]:v for k,v in ppyd.iteritems() if k[1] == 'PLAN_CVRG_MOS_NUM'} # new dictionary with year as key and value as val
payer_plan_period_dates = get_payer_plan_period_date_list(nd)
for i in range(len(payer_plan_period_dates)):
payer_plan_period_start_date = payer_plan_period_dates[i][0]
payer_plan_period_end_date = payer_plan_period_dates[i][1]
plan_source_value = "Medicare Part D"
write_to_payer_plan_period_file(payer_plan_period_fd, beneficiary.person_id, payer_plan_period_start_date, payer_plan_period_end_date, plan_source_value)
elif plan_source_value == "HMO":
nd = {k[0]:v for k,v in ppyd.iteritems() if k[1] == 'BENE_HMO_CVRAGE_TOT_MONS'} # new dictionary with year as key and value as val
payer_plan_period_dates = get_payer_plan_period_date_list(nd)
for i in range(len(payer_plan_period_dates)):
payer_plan_period_start_date = payer_plan_period_dates[i][0]
payer_plan_period_end_date = payer_plan_period_dates[i][1]
plan_source_value = "HMO"
write_to_payer_plan_period_file(payer_plan_period_fd, beneficiary.person_id, payer_plan_period_start_date, payer_plan_period_end_date, plan_source_value)
#------------------------------------------------------
# write payer plan period data to the file
#--------------------------------------------------------
def write_to_payer_plan_period_file(payer_plan_period_fd, person_id, payer_plan_period_start_date, payer_plan_period_end_date, plan_source_value):
payer_plan_period_fd.write('{0},'.format(table_ids.last_payer_plan_period_id)) # payer_plan_period_id
payer_plan_period_fd.write('{0},'.format(person_id)) # person_id
payer_plan_period_fd.write('{0},'.format(payer_plan_period_start_date)) # payer_plan_period_start_date
payer_plan_period_fd.write('{0},'.format(payer_plan_period_end_date)) # payer_plan_period_end_date
payer_plan_period_fd.write(',') # payer_source_value
payer_plan_period_fd.write('{0},'.format(plan_source_value)) # plan_source_value
payer_plan_period_fd.write('') # family_source_value
payer_plan_period_fd.write('\n')
payer_plan_period_fd.increment_recs_written(1)
table_ids.last_payer_plan_period_id += 1
#----------------------------------------------------------------
# generate the list of payer_plan_period start date and end date.
# date_list will be in this format date_list = [(d1,d2),(d1,d2)]
#-----------------------------------------------------------------
def get_payer_plan_period_date_list(plan):
date_list = []
# check if any year is missing. If yes, add that year. This will prevent dictionary keyError at runtime.
for year in ['2008','2009','2010']:
if year not in plan:
plan[year] = 0
# determine the start and end date for payer plan period
if plan['2008'] == 12 and plan['2009'] == 12 and plan['2010'] == 12:
payer_plan_period_start_date = '2008-01-01'
payer_plan_period_end_date = '2010-12-31'
date_list.append((payer_plan_period_start_date, payer_plan_period_end_date))
elif plan['2008'] == 12 and plan['2009'] == 12 and plan['2010'] < 12:
payer_plan_period_start_date = '2008-01-01'
payer_plan_period_end_date = get_payer_plan_period_date(date(2009,12,31), plan['2010'])
date_list.append((payer_plan_period_start_date, payer_plan_period_end_date))
elif plan['2008'] == 12 and plan['2009'] < 12 and plan['2010'] == 12:
payer_plan_period_start_date = '2008-01-01'
payer_plan_period_end_date = '2008-12-31'
date_list.append((payer_plan_period_start_date, payer_plan_period_end_date))
if plan['2009'] > 0:
payer_plan_period_start_date = '2009-01-01'
payer_plan_period_end_date = get_payer_plan_period_date(date(2009,01,01), plan['2009'])
date_list.append((payer_plan_period_start_date, payer_plan_period_end_date))
payer_plan_period_start_date = '2010-01-01'
payer_plan_period_end_date = '2010-12-31'
date_list.append((payer_plan_period_start_date, payer_plan_period_end_date))
elif plan['2008'] == 12 and plan['2009'] < 12 and plan['2010'] < 12:
payer_plan_period_start_date = '2008-01-01'
payer_plan_period_end_date = '2008-12-31'
date_list.append((payer_plan_period_start_date, payer_plan_period_end_date))
if plan['2009'] > 0:
payer_plan_period_start_date = '2009-01-01'
payer_plan_period_end_date = get_payer_plan_period_date(date(2009,01,01), plan['2009'])
date_list.append((payer_plan_period_start_date, payer_plan_period_end_date))
if plan['2010'] > 0:
payer_plan_period_start_date = '2010-01-01'
payer_plan_period_end_date = get_payer_plan_period_date(date(2010,01,01), plan['2010'])
date_list.append((payer_plan_period_start_date, payer_plan_period_end_date))
elif plan['2008'] < 12 and plan['2009'] == 12 and plan['2010'] == 12:
if plan['2008'] == 0:
payer_plan_period_start_date = '2009-01-01'
else:
payer_plan_period_start_date = get_payer_plan_period_date(date(2008,12,31), -1*plan['2008'])
payer_plan_period_end_date = '2010-12-31'
date_list.append((payer_plan_period_start_date, payer_plan_period_end_date))
elif plan['2008'] < 12 and plan['2009'] == 12 and plan['2010'] < 12:
if plan['2008'] == 0:
payer_plan_period_start_date = '2009-01-01'
else:
payer_plan_period_start_date = get_payer_plan_period_date(date(2008,12,31), -1*plan['2008'])
payer_plan_period_end_date = get_payer_plan_period_date(date(2009,12,31), plan['2010'])
date_list.append((payer_plan_period_start_date, payer_plan_period_end_date))
elif plan['2008'] < 12 and plan['2009'] < 12 and plan['2010'] == 12:
if plan['2008'] > 0:
payer_plan_period_start_date = '2008-01-01'
payer_plan_period_end_date = get_payer_plan_period_date(date(2008,01,01), plan['2008'])
date_list.append((payer_plan_period_start_date, payer_plan_period_end_date))
if plan['2009'] > 0:
payer_plan_period_start_date = '2009-01-01'
payer_plan_period_end_date = get_payer_plan_period_date(date(2009,01,01), plan['2009'])
date_list.append((payer_plan_period_start_date, payer_plan_period_end_date))
payer_plan_period_start_date = '2010-01-01'
payer_plan_period_end_date = '2010-12-31'
date_list.append((payer_plan_period_start_date, payer_plan_period_end_date))
elif plan['2008'] < 12 and plan['2009'] < 12 and plan['2010'] < 12:
if plan['2008'] > 0:
payer_plan_period_start_date = '2008-01-01'
payer_plan_period_end_date = get_payer_plan_period_date(date(2008,01,01), plan['2008'])
date_list.append((payer_plan_period_start_date, payer_plan_period_end_date))
if plan['2009'] > 0:
payer_plan_period_start_date = '2009-01-01'
payer_plan_period_end_date = get_payer_plan_period_date(date(2009,01,01), plan['2009'])
date_list.append((payer_plan_period_start_date, payer_plan_period_end_date))
if plan['2010'] > 0:
payer_plan_period_start_date = '2010-01-01'
payer_plan_period_end_date = get_payer_plan_period_date(date(2010,01,01), plan['2010'])
date_list.append((payer_plan_period_start_date, payer_plan_period_end_date))
return date_list
#---------------------------------------------------------------------
# use the start/end date and number of months(delta) to calculate the
# end/start date
#--------------------------------------------------------------------
def get_payer_plan_period_date(date, delta):
m, y = (date.month+delta) % 12, date.year + ((date.month)+delta-1) // 12 # calculate new month and year
if m == 0:
m = 12
d = min(date.day, calendar.monthrange(y, m)[1]) # get the last date of the month
return date.replace(day=d,month=m, year=y) # return the new date
# -----------------------------------
# Write Location records
# -----------------------------------
def write_location_record(beneficiary):
state_county = str(beneficiary.SP_STATE_CODE) + '-' + str(beneficiary.BENE_COUNTY_CD)
current_location_id = get_location_id(state_county) # get the location id for the given pair of state & county
idx = person_location_dict[state_county][1]
if idx == 0:
location_fd = file_control.get_Descriptor('location')
location_fd.write('{0},'.format(current_location_id)) # location_id
location_fd.write(',')
location_fd.write(',')
location_fd.write(',')
try:
location_fd.write('{0},'.format(SSA_state_codes[beneficiary.SP_STATE_CODE])) # state_code - if SSA code is present in the dictionary
except:
location_fd.write('{0},'.format(beneficiary.SP_STATE_CODE)) # if SSA code is not present in the dictionary
location_fd.write(',')
if len(beneficiary.SP_STATE_CODE) == 1: # convert to 2 bytes
beneficiary.SP_STATE_CODE = '0' + str(beneficiary.SP_STATE_CODE)
if len(beneficiary.BENE_COUNTY_CD) == 1: # convert to 3 bytes
beneficiary.BENE_COUNTY_CD = '00' + str(beneficiary.BENE_COUNTY_CD)
elif len(beneficiary.BENE_COUNTY_CD) == 2: # convert to 3 bytes
beneficiary.BENE_COUNTY_CD = '0' + str(beneficiary.BENE_COUNTY_CD)
local_county_code = str(beneficiary.SP_STATE_CODE) + str(beneficiary.BENE_COUNTY_CD)
location_fd.write('{0},'.format(local_county_code)) # county_code
location_fd.write('{0}'.format(beneficiary.LOCATION_ID)) # location_source_value
location_fd.write('\n')
location_fd.increment_recs_written(1)
person_location_dict[state_county] = [person_location_dict[state_county][0],1] # change the status to written
# -----------------------------------
# Observation Period
# -----------------------------------
def write_observation_period_records(beneficiary):
#There are beneficiaries who are listed but have no activity, so we generate no observation period
if len(beneficiary.visit_dates) == 0:
return
obs_period_fd = file_control.get_Descriptor('observation_period')
start_date = min(beneficiary.visit_dates.keys())
end_date = max(beneficiary.visit_dates.keys())
obs_period_fd.write('{0},'.format(table_ids.last_observation_period_id))
obs_period_fd.write('{0},'.format(beneficiary.person_id))
obs_period_fd.write('{0},'.format(start_date))
obs_period_fd.write('{0},'.format(end_date))
obs_period_fd.write('{0}'.format(OMOP_CONSTANTS.OBS_PERIOD_ENROLLED_INSURANCE))
obs_period_fd.write('\n')
obs_period_fd.increment_recs_written(1)
table_ids.last_observation_period_id += 1
# -----------------------------------
# Death Record
# -----------------------------------
def write_death_records(death_fd, beneficiary, death_type_concept_id, cause_source_concept_id):
yd = beneficiary.LatestYearData()
if yd is not None and yd.BENE_DEATH_DT != '': # if year data for BENE_DEATH_DT is not available, don't write to death file.
death_fd.write('{0},'.format(beneficiary.person_id))
death_fd.write('{0},'.format(get_date_YYYY_MM_DD(yd.BENE_DEATH_DT)))
death_fd.write('{0},'.format(death_type_concept_id))
death_fd.write(',') # cause_concept_id
death_fd.write(',') # cause_source_value
death_fd.write('{0}'.format(cause_source_concept_id))
death_fd.write('\n')
death_fd.increment_recs_written(1)
# -----------------------------------
# Drug Exposure
# -----------------------------------
def write_drug_exposure(drug_exp_fd, person_id, drug_concept_id, start_date, drug_type_concept_id,
quantity, days_supply, drug_source_concept_id, drug_source_value, provider_id, visit_occurrence_id):
drug_exp_fd.write('{0},'.format(table_ids.last_drug_exposure_id))
drug_exp_fd.write('{0},'.format(person_id))
drug_exp_fd.write('{0},'.format(drug_concept_id))
drug_exp_fd.write('{0},'.format(get_date_YYYY_MM_DD(start_date))) # drug_exposure_start_date
drug_exp_fd.write(',') # drug_exposure_end_date
drug_exp_fd.write('{0},'.format(drug_type_concept_id))
drug_exp_fd.write(',') # stop_reason
drug_exp_fd.write(',') # refills
if quantity is None:
drug_exp_fd.write(',')
else:
drug_exp_fd.write('{0},'.format(float(quantity)))
if days_supply is None:
drug_exp_fd.write(',')
else:
drug_exp_fd.write('{0},'.format(days_supply))
drug_exp_fd.write(',') # sig
drug_exp_fd.write(',') # route_concept_id
drug_exp_fd.write(',') # effective_drug_dose
drug_exp_fd.write(',') # dose_unit_concept_ id
drug_exp_fd.write(',') # lot_number
drug_exp_fd.write('{0},'.format(provider_id)) # provider_id
drug_exp_fd.write('{0},'.format(visit_occurrence_id))
drug_exp_fd.write('{0},'.format(drug_source_value))
drug_exp_fd.write('{0},'.format(drug_source_concept_id))
drug_exp_fd.write(',') # route_source_value
#drug_exp_fd.write('') # dose_unit_source_value
drug_exp_fd.write('\n')
drug_exp_fd.increment_recs_written(1)
table_ids.last_drug_exposure_id += 1
# -----------------------------------
# Device Exposure
# -----------------------------------
def write_device_exposure(device_fd, person_id, device_concept_id, start_date, end_date, device_type_concept_id,
device_source_value, device_source_concept_id, provider_id, visit_occurrence_id):
device_fd.write('{0},'.format(table_ids.last_device_exposure_id))
device_fd.write('{0},'.format(person_id))
device_fd.write('{0},'.format(device_concept_id))
device_fd.write('{0},'.format(get_date_YYYY_MM_DD(start_date)))
device_fd.write('{0},'.format(get_date_YYYY_MM_DD(end_date)))
device_fd.write('{0},'.format(device_type_concept_id))
device_fd.write(',') # unique_device_id
device_fd.write(',') # quantity
device_fd.write('{0},'.format(provider_id)) # provider_id
device_fd.write('{0},'.format(visit_occurrence_id))
device_fd.write('{0},'.format(device_source_value))
device_fd.write('{0}'.format(device_source_concept_id))
device_fd.write('\n')
device_fd.increment_recs_written(1)
table_ids.last_device_exposure_id += 1
# -----------------------------------
# Prescription Drug File -> Drug Exposure; Drug Cost
# -----------------------------------
def write_drug_records(beneficiary):
drug_exp_fd = file_control.get_Descriptor('drug_exposure')
drug_cost_fd = file_control.get_Descriptor('drug_cost')
for raw_rec in beneficiary.prescription_records:
rec = PrescriptionDrug(raw_rec)
if rec.SRVC_DT == '':
continue
ndc_code = rec.PROD_SRVC_ID
if (OMOP_CONSTANTS.NDC_VOCABULARY_ID,ndc_code) in source_code_concept_dict:
#In practice we do not see multiple mappings of drugs, but in principle it could happen
for sccd in source_code_concept_dict[OMOP_CONSTANTS.NDC_VOCABULARY_ID,ndc_code]:
drug_source_concept_id = sccd.source_concept_id
drug_concept_id = sccd.target_concept_id
write_drug_exposure(drug_exp_fd, beneficiary.person_id,
drug_concept_id=drug_concept_id,
start_date=rec.SRVC_DT,
drug_type_concept_id=OMOP_CONSTANTS.DRUG_TYPE_PRESCRIPTION,
quantity=rec.QTY_DSPNSD_NUM,
days_supply=rec.DAYS_SUPLY_NUM,
drug_source_concept_id=drug_source_concept_id,
drug_source_value=ndc_code,
provider_id="",
visit_occurrence_id="")
else:
#These are for any NDC codes not in CONCEPT.csv
dline = 'DrugRecords--- ' + 'Unmapped NDC code: ' + str(ndc_code) + ' DESYNPUF_ID: ' + rec.DESYNPUF_ID + '\n'
unmapped_log.write(dline)
write_drug_exposure(drug_exp_fd, beneficiary.person_id,
drug_concept_id="0",
start_date=rec.SRVC_DT,
drug_type_concept_id=OMOP_CONSTANTS.DRUG_TYPE_PRESCRIPTION,
quantity=rec.QTY_DSPNSD_NUM,
days_supply=rec.DAYS_SUPLY_NUM,
drug_source_concept_id="0",
drug_source_value=ndc_code,
provider_id="",
visit_occurrence_id="")
#----------------------
# drug cost -- only written once, even if (doesn't happen now) NDC code maps to multiple RxNorm drugs
#----------------------
current_drug_exposure_id = table_ids.last_drug_exposure_id - 1 #subtracted 1 as drug_exposure function added 1 to last_drug_exposure_id
drug_cost_fd.write('{0},'.format(table_ids.last_drug_cost_id))
drug_cost_fd.write('{0},'.format(current_drug_exposure_id))
drug_cost_fd.write('{0},'.format(OMOP_CONSTANTS.CURRENCY_US_DOLLAR))
drug_cost_fd.write(',') # paid_copay
drug_cost_fd.write('{0},'.format(rec.PTNT_PAY_AMT)) # paid_coinsurance
drug_cost_fd.write(',') # paid_toward_deductible
drug_cost_fd.write(',') # paid_by_payer
drug_cost_fd.write(',') # paid_by_coordination_of_benefits
drug_cost_fd.write('{0},'.format(rec.PTNT_PAY_AMT)) # total_out_of_pocket #
drug_cost_fd.write('{0},'.format(rec.TOT_RX_CST_AMT)) # total_paid #
drug_cost_fd.write(',') # ingredient_cost
drug_cost_fd.write(',') # dispensing_fee
drug_cost_fd.write(',') # average_wholesale_price
#drug_cost_fd.write('') # payer_plan_period_id ##### At moment we do not have payer_plan_period implemented, as we have no payer plan information.
drug_cost_fd.write('\n')
drug_cost_fd.increment_recs_written(1)
table_ids.last_drug_cost_id += 1
# -----------------------------------
# Provider file
# -----------------------------------
def write_provider_record(provider_fd, npi, provider_id, care_site_id, provider_source_value):
if not provider_id:
return
idx = npi_provider_id[npi][1]
if idx == 0:
provider_fd.write('{0},'.format(provider_id))
provider_fd.write(',') # provider_name
provider_fd.write('{0},'.format(npi))
provider_fd.write(',') # dea
provider_fd.write(',')
provider_fd.write('{0},'.format(care_site_id))
provider_fd.write(',') # year_of_birth
provider_fd.write(',') # gender_concept_id
provider_fd.write('{0},'.format(provider_source_value)) # provider_source_value
provider_fd.write(',') # specialty_source_value
provider_fd.write(',') # specialty_source_concept_id
provider_fd.write(',') # gender_source_value
#provider_fd.write('') # gender_source_concept_id
provider_fd.write('\n')
provider_fd.increment_recs_written(1)
npi_provider_id[npi] = [npi_provider_id[npi][0],1] #set index to 1 to mark provider_id written
# -----------------------------------
# Condition Occurence file
# - Added provider_id
# -----------------------------------
def write_condition_occurrence(cond_occur_fd, person_id, condition_concept_id,
from_date, thru_date, condition_type_concept_id, provider_id,
condition_source_value, condition_source_concept_id, visit_occurrence_id):
cond_occur_fd.write('{0},'.format(table_ids.last_condition_occurrence_id))
cond_occur_fd.write('{0},'.format(person_id))
cond_occur_fd.write('{0},'.format(condition_concept_id))
cond_occur_fd.write('{0},'.format(get_date_YYYY_MM_DD(from_date)))
cond_occur_fd.write('{0},'.format(get_date_YYYY_MM_DD(thru_date)))
cond_occur_fd.write('{0},'.format(condition_type_concept_id))
cond_occur_fd.write(',') # stop_reason
cond_occur_fd.write('{0},'.format(provider_id)) # provider_id
cond_occur_fd.write('{0},'.format(visit_occurrence_id))
cond_occur_fd.write('{0},'.format(condition_source_value))
cond_occur_fd.write('{0}'.format(condition_source_concept_id))
cond_occur_fd.write('\n')
cond_occur_fd.increment_recs_written(1)
table_ids.last_condition_occurrence_id += 1
# -----------------------------------
# - Added this new function to
# create Visit Occurence file
# -----------------------------------
def write_visit_occurrence(visit_occur_fd, person_id, visit_concept_id, visit_occurrence_id, care_site_id, visit_source_concept_id,
from_date, thru_date, visit_type_concept_id, provider_id, visit_source_value):
visit_occur_fd.write('{0},'.format(visit_occurrence_id))
visit_occur_fd.write('{0},'.format(person_id))
visit_occur_fd.write('{0},'.format(visit_concept_id))
visit_occur_fd.write('{0},'.format(get_date_YYYY_MM_DD(from_date)))
visit_occur_fd.write(',') # visit_start_time
visit_occur_fd.write('{0},'.format(get_date_YYYY_MM_DD(thru_date)))
visit_occur_fd.write(',') # visit_end_time
visit_occur_fd.write('{0},'.format(visit_type_concept_id))
visit_occur_fd.write('{0},'.format(provider_id)) # provider_id
visit_occur_fd.write('{0},'.format(care_site_id)) # care_site_id
visit_occur_fd.write('{0},'.format(visit_source_value))
#visit_occur_fd.write('') # visit_source_concept_id
visit_occur_fd.write('\n')
visit_occur_fd.increment_recs_written(1)
# -----------------------------------
# Procedure Occurence file
# -----------------------------------
def write_procedure_occurrence(proc_occur_fd, person_id, procedure_concept_id,
from_date, procedure_type_concept_id,provider_id,modifier_concept_id,
procedure_source_value, procedure_source_concept_id, visit_occurrence_id):
proc_occur_fd.write('{0},'.format(table_ids.last_procedure_occurrence_id))
proc_occur_fd.write('{0},'.format(person_id))
proc_occur_fd.write('{0},'.format(procedure_concept_id))
proc_occur_fd.write('{0},'.format(get_date_YYYY_MM_DD(from_date))) # procedure_date
proc_occur_fd.write('{0},'.format(procedure_type_concept_id))
proc_occur_fd.write(',') # modifier_concept_id
proc_occur_fd.write(',') # quantity
proc_occur_fd.write('{0},'.format(provider_id)) # provider_id
proc_occur_fd.write('{0},'.format(visit_occurrence_id))
proc_occur_fd.write('{0},'.format(procedure_source_value))
proc_occur_fd.write('{0},'.format(procedure_source_concept_id))
#proc_occur_fd.write('') # qualifier_source_value
proc_occur_fd.write('\n')
proc_occur_fd.increment_recs_written(1)
table_ids.last_procedure_occurrence_id += 1
# -----------------------------------
# Measurement file
# -----------------------------------
def write_measurement(measurement_fd, person_id, measurement_concept_id,
measurement_date, measurement_type_concept_id,
measurement_source_value, measurement_source_concept_id, provider_id, visit_occurrence_id):
measurement_fd.write('{0},'.format(table_ids.last_measurement_id))
measurement_fd.write('{0},'.format(person_id))
measurement_fd.write('{0},'.format(measurement_concept_id))
measurement_fd.write('{0},'.format(get_date_YYYY_MM_DD(measurement_date)))
measurement_fd.write(',') # measurement_time
measurement_fd.write('{0},'.format(measurement_type_concept_id))
measurement_fd.write(',') # operator_concept_id
measurement_fd.write(',') # value_as_number
measurement_fd.write('0,') # value_as_concept_id
measurement_fd.write(',') # unit_concept_id
measurement_fd.write(',') # range_low
measurement_fd.write(',') # range_high
measurement_fd.write('{0},'.format(provider_id)) # provider_id
measurement_fd.write('{0},'.format(visit_occurrence_id))
measurement_fd.write('{0},'.format(measurement_source_value))
measurement_fd.write('{0},'.format(measurement_source_concept_id))
measurement_fd.write(',') # unit_source_value
#measurement_fd.write('') # value_source_value
measurement_fd.write('\n')
measurement_fd.increment_recs_written(1)
table_ids.last_measurement_id += 1
# -----------------------------------
# Observation file
# -----------------------------------
def write_observation(observation_fd, person_id, observation_concept_id,provider_id,
observation_date, observation_type_concept_id,
observation_source_value, observation_source_concept_id, visit_occurrence_id):
observation_fd.write('{0},'.format(table_ids.last_observation_id))
observation_fd.write('{0},'.format(person_id))
observation_fd.write('{0},'.format(observation_concept_id))
observation_fd.write('{0},'.format(get_date_YYYY_MM_DD(observation_date)))
observation_fd.write(',') # observation_time
observation_fd.write('{0},'.format(observation_type_concept_id))
observation_fd.write(',') # value_as_number
observation_fd.write(',') # value_as_string
observation_fd.write('0,') # value_as_concept_id
observation_fd.write(',') # qualifier_concept_id
observation_fd.write(',') # unit_concept_id
observation_fd.write('{0},'.format(provider_id)) # provider_id
observation_fd.write('{0},'.format(visit_occurrence_id))
observation_fd.write('{0},'.format(observation_source_value))
observation_fd.write('{0},'.format(observation_source_concept_id))
observation_fd.write(',') # unit_source_value
#observation_fd.write('') # qualifier_source_value
observation_fd.write('\n')
observation_fd.increment_recs_written(1)
table_ids.last_observation_id += 1
# -----------------------------------
# Write to Care Site file
# -----------------------------------
def write_care_site(care_site_fd, care_site_id, place_of_service_concept_id, care_site_source_value, place_of_service_source_value):
if not care_site_id:
return
idx = provider_id_care_site_id[care_site_source_value][1]
if idx == 0:
care_site_fd.write('{0},'.format(care_site_id))
care_site_fd.write(',') # care_site_name
care_site_fd.write('{0},'.format(place_of_service_concept_id))
care_site_fd.write(',') # location_id
care_site_fd.write('{0},'.format(care_site_source_value))
care_site_fd.write('{0}'.format(place_of_service_source_value))
care_site_fd.write('\n')
care_site_fd.increment_recs_written(1)
provider_id_care_site_id[care_site_source_value] = [provider_id_care_site_id[care_site_source_value][0],1] # change index to 1 to mark it written
# -----------------------------------
# From Inpatient Records:
# --> Visit Occurrence
# --> Visit Cost
# --> Procedure Occurrence
# --> Drug Exposure
# --> Device Exposure
# --> Condition Occurrence
# --> Measurement Occurrence
# --> Observation
# --> Care Site
# --> Provider
# -----------------------------------
def process_inpatient_records(beneficiary):
drug_exp_fd = file_control.get_Descriptor('drug_exposure')
drug_cost_fd = file_control.get_Descriptor('drug_cost')
proc_occur_fd = file_control.get_Descriptor('procedure_occurrence')
proc_cost_fd = file_control.get_Descriptor('procedure_cost')
cond_occur_fd = file_control.get_Descriptor('condition_occurrence')
death_fd = file_control.get_Descriptor('death')
care_site_fd = file_control.get_Descriptor('care_site')
provider_fd = file_control.get_Descriptor('provider')
measurement_fd = file_control.get_Descriptor('measurement_occurrence')
observation_fd = file_control.get_Descriptor('observation')
device_fd = file_control.get_Descriptor('device_exposure')
visit_occur_fd = file_control.get_Descriptor('visit_occurrence')
visit_cost_fd = file_control.get_Descriptor('visit_cost')
# location_fd = file_control.get_Descriptor('location')
for raw_rec in beneficiary.inpatient_records:
rec = InpatientClaim(raw_rec)
if rec.CLM_FROM_DT == '':
continue
# initialize both care_site_id and provider_id to null as some institution might not have PRVDR_NUM and some NPI might be null.
care_site_id = ""
provider_id = ""
# --get care_site_id (a unique number generated by the program) for the given institution (PRVDR_NUM)
if rec.PRVDR_NUM != '':
provider_number = rec.PRVDR_NUM
care_site_id = get_CareSite(provider_number)
write_care_site(care_site_fd, care_site_id,
place_of_service_concept_id=OMOP_CONSTANTS.INPATIENT_PLACE_OF_SERVICE,
care_site_source_value=rec.PRVDR_NUM,
place_of_service_source_value=OMOP_CONSTANTS.INPATIENT_PLACE_OF_SERVICE_SOURCE)
#-- get provider_id (a unique number generated by the program) for the given NPI. Each NPI will have its own provider_id
for npi in (rec.AT_PHYSN_NPI, rec.OP_PHYSN_NPI, rec.OT_PHYSN_NPI):
if npi != '':
provider_id = get_Provider(npi)
write_provider_record(provider_fd, npi, provider_id, care_site_id, rec.AT_PHYSN_NPI)
#-- get visit id. Person id + CLM_FROM_DT + CLM_THRU_DT + institution number(PRVDR_NUM) make the key for a particular visit
current_visit_id = visit_occurrence_ids[rec.DESYNPUF_ID,rec.CLM_FROM_DT,rec.CLM_THRU_DT,rec.PRVDR_NUM]
for (vocab,code) in ([(OMOP_CONSTANTS.ICD_9_VOCAB_ID, x) for x in rec.ICD9_DGNS_CD_list] +
[(OMOP_CONSTANTS.ICD_9_VOCAB_ID,x) for x in rec.ICD9_PRCDR_CD_list] +
[(OMOP_CONSTANTS.HCPCS_VOCABULARY_ID, x) for x in rec.HCPCS_CD_list]):
if rec.CLM_FROM_DT != '':
if (vocab,code) in source_code_concept_dict:
for sccd in source_code_concept_dict[vocab,code]:
target_concept_id = sccd.target_concept_id
source_concept_id = sccd.source_concept_id
destination_file = sccd.destination_file
if destination_file == DESTINATION_FILE_PROCEDURE:
write_procedure_occurrence(proc_occur_fd, beneficiary.person_id,
procedure_concept_id=target_concept_id,
from_date=rec.CLM_FROM_DT,
procedure_type_concept_id=OMOP_CONSTANTS.INPAT_PROCEDURE_1ST_POSITION,
procedure_source_value=code,
procedure_source_concept_id=source_concept_id,
provider_id=provider_id,
modifier_concept_id=0,
visit_occurrence_id=current_visit_id)
elif destination_file == DESTINATION_FILE_CONDITION:
write_condition_occurrence(cond_occur_fd,beneficiary.person_id,
condition_concept_id=target_concept_id,
from_date=rec.CLM_FROM_DT, thru_date=rec.CLM_THRU_DT,
condition_type_concept_id=OMOP_CONSTANTS.INPAT_CONDITION_1ST_POSITION,
condition_source_value=code,
condition_source_concept_id=source_concept_id,
provider_id=provider_id,
visit_occurrence_id=current_visit_id)
elif destination_file == DESTINATION_FILE_DRUG:
write_drug_exposure(drug_exp_fd, beneficiary.person_id,
drug_concept_id=target_concept_id,
start_date=rec.CLM_FROM_DT,
drug_type_concept_id=OMOP_CONSTANTS.DRUG_TYPE_PRESCRIPTION,
quantity=None,
days_supply=None,
drug_source_value=code,
drug_source_concept_id=source_concept_id,
provider_id=provider_id,
visit_occurrence_id=current_visit_id)
elif destination_file == DESTINATION_FILE_MEASUREMENT:
write_measurement(measurement_fd, beneficiary.person_id,
measurement_concept_id=target_concept_id,
measurement_date=rec.CLM_FROM_DT,
measurement_type_concept_id=OMOP_CONSTANTS.MEASUREMENT_DERIVED_VALUE,
measurement_source_value=code,
measurement_source_concept_id=source_concept_id,
provider_id=provider_id,
visit_occurrence_id=current_visit_id)
elif destination_file == DESTINATION_FILE_OBSERVATION:
write_observation(observation_fd, beneficiary.person_id,
observation_concept_id=target_concept_id,
observation_date=rec.CLM_FROM_DT,
observation_type_concept_id=OMOP_CONSTANTS.OBSERVATION_CHIEF_COMPLAINT,
observation_source_value=code,
observation_source_concept_id=source_concept_id,
provider_id=provider_id,
visit_occurrence_id=current_visit_id)
elif destination_file == DESTINATION_FILE_DEVICE:
write_device_exposure(device_fd, beneficiary.person_id,
device_concept_id=target_concept_id,
start_date=rec.CLM_FROM_DT,
end_date=rec.CLM_THRU_DT,
device_type_concept_id=OMOP_CONSTANTS.DEVICE_INFERRED_PROCEDURE_CLAIM,
device_source_value=code,
device_source_concept_id=source_concept_id,
provider_id=provider_id,
visit_occurrence_id=current_visit_id)
#-- Write each unique visit to visit_occurrence file.
if current_visit_id not in visit_id_list:
write_visit_occurrence(visit_occur_fd,beneficiary.person_id,
visit_concept_id=OMOP_CONSTANTS.INPAT_VISIT_CONCEPT_ID,
from_date=rec.CLM_FROM_DT, thru_date=rec.CLM_THRU_DT,
visit_type_concept_id=OMOP_CONSTANTS.INPAT_VISIT_1ST_POSITION,
visit_source_value=rec.CLM_ID,
visit_source_concept_id=source_concept_id,
care_site_id=care_site_id,
provider_id=provider_id,
visit_occurrence_id=current_visit_id)
visit_id_list.add(current_visit_id)
else:
dfile = 'Inpatient--- unmapped ' + str(vocab) + ' code: ' + str(code) + ' DESYNPUF_ID: ' + rec.DESYNPUF_ID + '\n'
unmapped_log.write(dfile)
#-- care site / provider
# -----------------------------------
# From Outpatient Records:
# --> Visit Occurrence
# --> Visit Cost
# --> Procedure Occurrence
# --> Drug Exposure
# --> Device Exposure
# --> Device Exposure Cost
# --> Condition Occurrence
# --> Measurement Occurrence
# --> Observation
# --> Care Site
# --> Provider
# -----------------------------------
def process_outpatient_records(beneficiary):
drug_exp_fd = file_control.get_Descriptor('drug_exposure')
drug_cost_fd = file_control.get_Descriptor('drug_cost')
proc_occur_fd = file_control.get_Descriptor('procedure_occurrence')
proc_cost_fd = file_control.get_Descriptor('procedure_cost')
cond_occur_fd = file_control.get_Descriptor('condition_occurrence')
death_fd = file_control.get_Descriptor('death')
care_site_fd = file_control.get_Descriptor('care_site')
provider_fd = file_control.get_Descriptor('provider')
measurement_fd = file_control.get_Descriptor('measurement_occurrence')
observation_fd = file_control.get_Descriptor('observation')
device_fd = file_control.get_Descriptor('device_exposure')
visit_occur_fd = file_control.get_Descriptor('visit_occurrence')
visit_cost_fd = file_control.get_Descriptor('visit_cost')
for raw_rec in beneficiary.outpatient_records:
rec = OutpatientClaim(raw_rec)
if rec.CLM_FROM_DT == '':
continue
# initialize both care_site_id and provider_id to null as some institution might not have PRVDR_NUM and some NPI might be null.
care_site_id = ""
provider_id = ""
#-- get care_site_id (a unique number generated by the program) for the given institution (PRVDR_NUM)
if rec.PRVDR_NUM != '':
provider_number = rec.PRVDR_NUM
care_site_id = get_CareSite(provider_number)
write_care_site(care_site_fd, care_site_id,
place_of_service_concept_id=OMOP_CONSTANTS.OUTPATIENT_PLACE_OF_SERVICE,
care_site_source_value=rec.PRVDR_NUM,
place_of_service_source_value=OMOP_CONSTANTS.OUTPATIENT_PLACE_OF_SERVICE_SOURCE)
#-- get provider_id (a unique number generated by the program) for the given NPI. Each NPI will have its own provider_id
for npi in (rec.AT_PHYSN_NPI, rec.OP_PHYSN_NPI, rec.OT_PHYSN_NPI):
if npi != '':
provider_id = get_Provider(npi)
write_provider_record(provider_fd, npi, provider_id, care_site_id, rec.AT_PHYSN_NPI)
#-- get visit id. Person id + CLM_FROM_DT + CLM_THRU_DT + institution number(PRVDR_NUM) make the key for a particular visit
current_visit_id = visit_occurrence_ids[rec.DESYNPUF_ID,rec.CLM_FROM_DT,rec.CLM_THRU_DT,rec.PRVDR_NUM]
for (vocab,code) in ( ([] if rec.ADMTNG_ICD9_DGNS_CD == "" else [(OMOP_CONSTANTS.ICD_9_VOCAB_ID,rec.ADMTNG_ICD9_DGNS_CD)]) +
[(OMOP_CONSTANTS.ICD_9_VOCAB_ID,x) for x in rec.ICD9_DGNS_CD_list] +
[(OMOP_CONSTANTS.ICD_9_VOCAB_ID,x) for x in rec.ICD9_PRCDR_CD_list] +
[(OMOP_CONSTANTS.HCPCS_VOCABULARY_ID,x) for x in rec.HCPCS_CD_list]):
if rec.CLM_FROM_DT != '':
if (vocab,code) in source_code_concept_dict:
for sccd in source_code_concept_dict[vocab,code]:
target_concept_id = sccd.target_concept_id
source_concept_id = sccd.source_concept_id
destination_file = sccd.destination_file
if destination_file == DESTINATION_FILE_PROCEDURE:
write_procedure_occurrence(proc_occur_fd, beneficiary.person_id,
procedure_concept_id=target_concept_id,
from_date=rec.CLM_FROM_DT,
procedure_type_concept_id=OMOP_CONSTANTS.OUTPAT_PROCEDURE_1ST_POSITION,
procedure_source_value=code,
procedure_source_concept_id=source_concept_id,
provider_id=provider_id,
modifier_concept_id=0,
visit_occurrence_id=current_visit_id)
elif destination_file == DESTINATION_FILE_CONDITION:
write_condition_occurrence(cond_occur_fd,beneficiary.person_id,
condition_concept_id=target_concept_id,
from_date=rec.CLM_FROM_DT, thru_date=rec.CLM_THRU_DT,
condition_type_concept_id=OMOP_CONSTANTS.OUTPAT_CONDITION_1ST_POSITION,
condition_source_value=code,
condition_source_concept_id=source_concept_id,
provider_id=provider_id,
visit_occurrence_id=current_visit_id)
elif destination_file == DESTINATION_FILE_DRUG:
write_drug_exposure(drug_exp_fd, beneficiary.person_id,
drug_concept_id=target_concept_id,
start_date=rec.CLM_FROM_DT,
drug_type_concept_id=OMOP_CONSTANTS.DRUG_TYPE_PRESCRIPTION,
quantity=None,
days_supply=None,
drug_source_value=code,
drug_source_concept_id=source_concept_id,
provider_id=provider_id,
visit_occurrence_id=current_visit_id)
elif destination_file == DESTINATION_FILE_MEASUREMENT:
write_measurement(measurement_fd, beneficiary.person_id,
measurement_concept_id=target_concept_id,
measurement_date=rec.CLM_FROM_DT,
measurement_type_concept_id=OMOP_CONSTANTS.MEASUREMENT_DERIVED_VALUE,
measurement_source_value=code,
measurement_source_concept_id=source_concept_id,
provider_id=provider_id,
visit_occurrence_id=current_visit_id)
elif destination_file == DESTINATION_FILE_OBSERVATION:
write_observation(observation_fd, beneficiary.person_id,
observation_concept_id=target_concept_id,
observation_date=rec.CLM_FROM_DT,
observation_type_concept_id=OMOP_CONSTANTS.OBSERVATION_CHIEF_COMPLAINT,
observation_source_value=code,
observation_source_concept_id=source_concept_id,
provider_id=provider_id,
visit_occurrence_id=current_visit_id)
elif destination_file == DESTINATION_FILE_DEVICE:
write_device_exposure(device_fd, beneficiary.person_id,
device_concept_id=target_concept_id,
start_date=rec.CLM_FROM_DT,
end_date=rec.CLM_THRU_DT,
device_type_concept_id=OMOP_CONSTANTS.DEVICE_INFERRED_PROCEDURE_CLAIM,
device_source_value=code,
device_source_concept_id=source_concept_id,
provider_id=provider_id,
visit_occurrence_id=current_visit_id)
#-- Write each unique visit to visit_occurrence file.
if current_visit_id not in visit_id_list:
write_visit_occurrence(visit_occur_fd,beneficiary.person_id,
visit_concept_id=OMOP_CONSTANTS.OUTPAT_VISIT_CONCEPT_ID,
from_date=rec.CLM_FROM_DT, thru_date=rec.CLM_THRU_DT,
visit_type_concept_id=OMOP_CONSTANTS.OUTPAT_VISIT_1ST_POSITION,
visit_source_value=rec.CLM_ID,
visit_source_concept_id=source_concept_id,
care_site_id=care_site_id,
provider_id=provider_id,
visit_occurrence_id=current_visit_id)
visit_id_list.add(current_visit_id)
else:
dfile = 'Outpatient--- unmapped ' + str(vocab) + ' code: ' + str(code) + ' DESYNPUF_ID: ' + rec.DESYNPUF_ID + '\n'
unmapped_log.write(dfile)
# -----------------------------------
# From Carrier Claims Records:
# --> Visit Occurrence
# --> Visit Cost
# --> Procedure Occurrence
# --> Drug Exposure
# --> Device Exposure
# --> Device Exposure Cost
# --> Condition Occurrence
# --> Measurement Occurrence
# --> Observation
# --> Care Site
# --> Provider
# -----------------------------------
def process_carrier_records(beneficiary):
drug_exp_fd = file_control.get_Descriptor('drug_exposure')
drug_cost_fd = file_control.get_Descriptor('drug_cost')
proc_occur_fd = file_control.get_Descriptor('procedure_occurrence')
proc_cost_fd = file_control.get_Descriptor('procedure_cost')
cond_occur_fd = file_control.get_Descriptor('condition_occurrence')
death_fd = file_control.get_Descriptor('death')
care_site_fd = file_control.get_Descriptor('care_site')
provider_fd = file_control.get_Descriptor('provider')
measurement_fd = file_control.get_Descriptor('measurement_occurrence')
observation_fd = file_control.get_Descriptor('observation')
device_fd = file_control.get_Descriptor('device_exposure')
visit_occur_fd = file_control.get_Descriptor('visit_occurrence')
visit_cost_fd = file_control.get_Descriptor('visit_cost')
for raw_rec in beneficiary.carrier_records:
rec = CarrierClaim(raw_rec)
if rec.CLM_FROM_DT == '':
continue
# initialize both care_site_id and provider_id to null as some institution might not have PRVDR_NUM and some NPI might be null.
care_site_id = ""
provider_id = ""
#-- get care_site_id (a unique number generated by the program) for the given TAX_NUM
for cc_line in rec.CarrierClaimLine_list:
# initialize both care_site_id and provider_id to null as some institution might not have PRVDR_NUM and some NPI might be null.
care_site_id = ''
provider_id = ''
if cc_line.TAX_NUM != '':
save_TAX_NUM = cc_line.TAX_NUM
care_site_id = get_CareSite(cc_line.TAX_NUM)
write_care_site(care_site_fd, care_site_id,
place_of_service_concept_id=OMOP_CONSTANTS.CARRIER_CLAIMS_PLACE_OF_SERVICE,
care_site_source_value=cc_line.TAX_NUM,
place_of_service_source_value=OMOP_CONSTANTS.CARRIER_CLAIMS_PLACE_OF_SERVICE_SOURCE)
#-- get provider_id (a unique number generated by the program) for the given NPI. Each NPI will have its own provider_id
if cc_line.PRF_PHYSN_NPI != '':
npi = cc_line.PRF_PHYSN_NPI
provider_id = get_Provider(npi)
write_provider_record(provider_fd, npi, provider_id, care_site_id, cc_line.PRF_PHYSN_NPI)
#-- get visit id. Person id + CLM_FROM_DT + CLM_THRU_DT + TAX_NUM make the key for a particular visit
current_visit_id = visit_occurrence_ids[rec.DESYNPUF_ID,rec.CLM_FROM_DT,rec.CLM_THRU_DT,rec.TAX_NUM]
for (vocab,code) in ([(OMOP_CONSTANTS.ICD_9_VOCAB_ID,x) for x in rec.ICD9_DGNS_CD_list] +
[(OMOP_CONSTANTS.HCPCS_VOCABULARY_ID, x) for x in rec.HCPCS_CD_list] +
[(OMOP_CONSTANTS.ICD_9_VOCAB_ID, x) for x in rec.LINE_ICD9_DGNS_CD_list]):
if rec.CLM_FROM_DT != '':
if (vocab,code) in source_code_concept_dict:
for sccd in source_code_concept_dict[vocab,code]:
target_concept_id = sccd.target_concept_id
source_concept_id = sccd.source_concept_id
destination_file = sccd.destination_file
if destination_file == DESTINATION_FILE_PROCEDURE:
write_procedure_occurrence(proc_occur_fd, beneficiary.person_id,
procedure_concept_id=target_concept_id,
from_date=rec.CLM_FROM_DT,
procedure_type_concept_id=OMOP_CONSTANTS.OUTPAT_PROCEDURE_1ST_POSITION,
procedure_source_value=code,
procedure_source_concept_id=source_concept_id,
provider_id=provider_id,
modifier_concept_id=0,
visit_occurrence_id=current_visit_id)
#-- procedure cost. If there is an entry in procedure occurence, then only procedure cost should be updated.
current_procedure_occurence_id = table_ids.last_procedure_occurrence_id - 1 # after writing procedure occurence, id is increased by 1 and hence subtracted 1 to get the same id.
for cc_line in rec.CarrierClaimLine_list:
if cc_line.has_nonzero_amount():
proc_cost_fd.write('{0},'.format(table_ids.last_procedure_cost_id))
proc_cost_fd.write('{0},'.format(current_procedure_occurence_id))
proc_cost_fd.write('{0},'.format(OMOP_CONSTANTS.CURRENCY_US_DOLLAR)) # currency_concept_id
proc_cost_fd.write(',') # paid_copay
proc_cost_fd.write('{0},'.format(cc_line.LINE_COINSRNC_AMT)) # paid_coinsurance
proc_cost_fd.write('{0},'.format(cc_line.LINE_BENE_PTB_DDCTBL_AMT)) # paid_toward_deductible
proc_cost_fd.write('{0},'.format(cc_line.LINE_NCH_PMT_AMT)) # paid_by_payer
proc_cost_fd.write('{0},'.format(cc_line.LINE_BENE_PRMRY_PYR_PD_AMT)) # paid_by_coordination_benefits
amt = 0
try:
amt = float(cc_line.LINE_BENE_PTB_DDCTBL_AMT) + float(cc_line.LINE_COINSRNC_AMT)
except:
pass
proc_cost_fd.write('{0:2},'.format(amt)) # total_out_of_pocket
proc_cost_fd.write('{0},'.format(cc_line.LINE_ALOWD_CHRG_AMT)) # total_paid
proc_cost_fd.write(',') # revenue_code_concept_id
##
## need to lookup
##
proc_cost_fd.write(',') # payer_plan_period_id Changed to space as payer_plan_period file is not created
#proc_cost_fd.write('') # revenue_code_source_value
proc_cost_fd.write('\n')
proc_cost_fd.increment_recs_written(1)
table_ids.last_procedure_cost_id += 1
elif destination_file == DESTINATION_FILE_CONDITION:
write_condition_occurrence(cond_occur_fd,beneficiary.person_id,
condition_concept_id=target_concept_id,
from_date=rec.CLM_FROM_DT, thru_date=rec.CLM_THRU_DT,
condition_type_concept_id=OMOP_CONSTANTS.OUTPAT_CONDITION_1ST_POSITION,
condition_source_value=code,
condition_source_concept_id=source_concept_id,
provider_id=provider_id,
visit_occurrence_id=current_visit_id)
elif destination_file == DESTINATION_FILE_DRUG:
write_drug_exposure(drug_exp_fd, beneficiary.person_id,
drug_concept_id=target_concept_id,
start_date=rec.CLM_FROM_DT,
drug_type_concept_id=OMOP_CONSTANTS.DRUG_TYPE_PRESCRIPTION,
quantity=None,
days_supply=None,
drug_source_value=code,
drug_source_concept_id=source_concept_id,
provider_id=provider_id,
visit_occurrence_id=current_visit_id)
elif destination_file == DESTINATION_FILE_MEASUREMENT:
write_measurement(measurement_fd, beneficiary.person_id,
measurement_concept_id=target_concept_id,
measurement_date=rec.CLM_FROM_DT,
measurement_type_concept_id=OMOP_CONSTANTS.MEASUREMENT_DERIVED_VALUE,
measurement_source_value=code,
measurement_source_concept_id=source_concept_id,
provider_id=provider_id,
visit_occurrence_id=current_visit_id)
elif destination_file == DESTINATION_FILE_OBSERVATION:
write_observation(observation_fd, beneficiary.person_id,
observation_concept_id=target_concept_id,
observation_date=rec.CLM_FROM_DT,
observation_type_concept_id=OMOP_CONSTANTS.OBSERVATION_CHIEF_COMPLAINT,
observation_source_value=code,
observation_source_concept_id=source_concept_id,
provider_id=provider_id, #
visit_occurrence_id=current_visit_id)
elif destination_file == DESTINATION_FILE_DEVICE:
write_device_exposure(device_fd, beneficiary.person_id,
device_concept_id=target_concept_id,
start_date=rec.CLM_FROM_DT,
end_date=rec.CLM_THRU_DT,
device_type_concept_id=OMOP_CONSTANTS.DEVICE_INFERRED_PROCEDURE_CLAIM,
device_source_value=code,
device_source_concept_id=source_concept_id,
provider_id=provider_id,
visit_occurrence_id=current_visit_id)
#-- Write each unique visit to visit_occurrence file.
if current_visit_id not in visit_id_list:
write_visit_occurrence(visit_occur_fd,beneficiary.person_id,
visit_concept_id=OMOP_CONSTANTS.CARRIER_CLAIMS_VISIT_CONCEPT_ID,
from_date=rec.CLM_FROM_DT, thru_date=rec.CLM_THRU_DT,
visit_type_concept_id=OMOP_CONSTANTS.CARRIER_CLAIMS_VISIT_1ST_POSITION,
visit_source_value=rec.CLM_ID,
visit_source_concept_id=source_concept_id,
care_site_id=care_site_id,
provider_id=provider_id,
visit_occurrence_id=current_visit_id)
visit_id_list.add(current_visit_id)
else:
dfile = 'CarrierClaim--- unmapped ' + str(vocab) + ' code: ' + str(code) + ' DESYNPUF_ID: ' + rec.DESYNPUF_ID + '\n'
unmapped_log.write(dfile)
#---------------------------------
def write_header_records():
headers = {
'person' :
'person_id,gender_concept_id,year_of_birth,month_of_birth,day_of_birth,time_of_birth,race_concept_id,ethnicity_concept_id,'
'location_id,provider_id,care_site_id,person_source_value,gender_source_value,gender_source_concept_id,race_source_value,'
'race_source_concept_id,ethnicity_source_value,ethnicity_source_concept_id',
'observation':
'observation_id,person_id,observation_concept_id,observation_date,observation_time,observation_type_concept_id,value_as_number,'
'value_as_string,value_as_concept_id,qualifier_concept_id,unit_concept_id,provider_id,visit_occurrence_id,observation_source_value,'
'observation_source_concept_id,unit_source_value,qualifier_source_value',
'observation_period':
'observation_period_id,person_id,observation_period_start_date,observation_period_end_date,period_type_concept_id',
'specimen':
'specimen_id,person_id,specimen_concept_id,specimen_type_concept_id,specimen_date,specimen_time,quantity,'
'unit_concept_id,anatomic_site_concept_id,disease_status_concept_id,specimen_source_id,specimen_source_value,unit_source_value,'
'anatomic_site_source_value,disease_status_source_value',
'death':
'person_id,death_date,death_type_concept_id,cause_concept_id,cause_source_value,cause_source_concept_id',
'visit_occurrence':
'visit_occurrence_id,person_id,visit_concept_id,visit_start_date,visit_start_time,visit_end_date,visit_end_time,'
'visit_type_concept_id,provider_id,care_site_id,visit_source_value,visit_source_concept_id',
'visit_cost':
'visit_cost_id,visit_occurrence_id,currency_concept_id,paid_copay,paid_coinsurance,paid_toward_deductible,'
'paid_by_payer,paid_by_coordination_benefits,total_out_of_pocket,total_paid,payer_plan_period_id',
'condition_occurrence':
'condition_occurrence_id,person_id,condition_concept_id,condition_start_date,condition_end_date,condition_type_concept_id,'
'stop_reason,provider_id,visit_occurrence_id,condition_source_value,condition_source_concept_id',
'procedure_occurrence':
'procedure_occurrence_id,person_id,procedure_concept_id,procedure_date,procedure_type_concept_id,modifier_concept_id,'
'quantity,provider_id,visit_occurrence_id,procedure_source_value,procedure_source_concept_id,qualifier_source_value',
'procedure_cost':
'procedure_cost_id,procedure_occurrence_id,currency_concept_id,paid_copay,paid_coinsurance,paid_toward_deductible,'
'paid_by_payer,paid_by_coordination_benefits,total_out_of_pocket,total_paid,revenue_code_concept_id,payer_plan_period_id,revenue_code_source_value',
'drug_exposure':
'drug_exposure_id,person_id,drug_concept_id,drug_exposure_start_date,drug_exposure_end_date,drug_type_concept_id,'
'stop_reason,refills,quantity,days_supply,sig,route_concept_id,effective_drug_dose,dose_unit_concept_id,'
'lot_number,provider_id,visit_occurrence_id,drug_source_value,drug_source_concept_id,route_source_value,dose_unit_source_value',
'drug_cost':
'drug_cost_id,drug_exposure_id,currency_concept_id,paid_copay,paid_coinsurance,paid_toward_deductible,paid_by_payer,paid_by_coordination_of_benefits,'
'total_out_of_pocket,total_paid,ingredient_cost,dispensing_fee,average_wholesale_price,payer_plan_period_id',
'device_exposure':
'device_exposure_id,person_id,device_concept_id,device_exposure_start_date,device_exposure_end_date,device_type_concept_id,'
'unique_device_id,quantity,provider_id,visit_occurrence_id,device_source_value,device_source_concept_id',
'device_cost':
'device_cost_id,device_exposure_id,currency_concept_id,paid_copay,paid_coinsurance,paid_toward_deductible,'
'paid_by_payer,paid_by_coordination_benefits,total_out_of_pocket,total_paid,payer_plan_period_id',
'measurement_occurrence':
'measurement_id,person_id,measurement_concept_id,measurement_date,measurement_time,measurement_type_concept_id,operator_concept_id,'
'value_as_number,value_as_concept_id,unit_concept_id,range_low,range_high,provider_id,visit_occurrence_id,measurement_source_value,'
'measurement_source_concept_id,unit_source_value,value_source_value',
'location':
'location_id,address_1,address_2,city,state,zip,county,location_source_value',
'care_site':
'care_site_id,care_site_name,place_of_service_concept_id,location_id,care_site_source_value,place_of_service_source_value',
'provider':
'provider_id,provider_name,NPI,DEA,specialty_concept_id,care_site_id,year_of_birth,gender_concept_id,provider_source_value,'
'specialty_source_value,specialty_source_concept_id,gender_source_value,gender_source_concept_id',
'payer_plan_period':
'payer_plan_period_id,person_id,payer_plan_period_start_date,payer_plan_period_end_date,payer_source_value,'
'plan_source_value,family_source_value',
}
for token in sorted(file_control.descriptor_list(which='output')):
fd = file_control.get_Descriptor(token)
fd.write(headers[token] + '\n')
fd.increment_recs_written(1)
#---------------------------------
#Dead code
#---------------------------------
'''
def dump_beneficiary_records(fout, rec):
fout.write('-'*80+'\n')
for rec in ben.carrier_records:
fout.write('[carrier] {0}\n'.format(rec))
cc = CarrierClaim(rec)
fout.write('[CarrierClaim]\n')
fout.write('\t CLM_ID ={0}\n'.format(cc.CLM_ID))
fout.write('\t CLM_FROM_DT ={0}\n'.format(cc.CLM_FROM_DT))
fout.write('\t CLM_THRU_DT ={0}\n'.format(cc.CLM_THRU_DT))
for cd in cc.ICD9_DGNS_CD_list:
fout.write('\t\t {0} \n'.format(cd))
for ix,line in enumerate(cc.CarrierClaimLine_list):
fout.write('\t\t' + str(ix) + ' ' + '-'*30+'\n')
fout.write('\t\t PRF_PHYSN_NPI ={0} \n'.format(line.PRF_PHYSN_NPI))
fout.write('\t\t TAX_NUM ={0} \n'.format(line.TAX_NUM))
fout.write('\t\t HCPCS_CD ={0} \n'.format(line.HCPCS_CD))
fout.write('\t\t LINE_NCH_PMT_AMT ={0} \n'.format(line.LINE_NCH_PMT_AMT))
fout.write('\t\t LINE_BENE_PTB_DDCTBL_AMT ={0} \n'.format(line.LINE_BENE_PTB_DDCTBL_AMT))
fout.write('\t\t LINE_BENE_PRMRY_PYR_PD_AMT ={0} \n'.format(line.LINE_BENE_PRMRY_PYR_PD_AMT))
fout.write('\t\t LINE_COINSRNC_AMT ={0} \n'.format(line.LINE_COINSRNC_AMT))
fout.write('\t\t LINE_ALOWD_CHRG_AMT ={0} \n'.format(line.LINE_ALOWD_CHRG_AMT))
fout.write('\t\t LINE_PRCSG_IND_CD ={0} \n'.format(line.LINE_PRCSG_IND_CD))
fout.write('\t\t LINE_ICD9_DGNS_CD ={0} \n'.format(line.LINE_ICD9_DGNS_CD))
for rec in ben.inpatient_records:
fout.write('[inpatient] {0}\n'.format(rec))
ip = InpatientClaim(rec)
fout.write('[InpatientClaim]\n')
fout.write('\t CLM_ID ={0}\n'.format(ip.CLM_ID))
fout.write('\t SEGMENT ={0}\n'.format(ip.SEGMENT))
fout.write('\t CLM_FROM_DT ={0}\n'.format(ip.CLM_FROM_DT))
fout.write('\t ICD9_DGNS_CD_list \n')
for cd in ip.ICD9_DGNS_CD_list:
fout.write('\t\t {0} \n'.format(cd))
for rec in ben.outpatient_records:
fout.write('[outpatient] {0}\n'.format(rec))
op = OutpatientClaim(rec)
fout.write('[OutpatientClaim]\n')
fout.write('\t CLM_ID ={0}\n'.format(op.CLM_ID))
fout.write('\t SEGMENT ={0}\n'.format(op.SEGMENT))
fout.write('\t CLM_FROM_DT ={0}\n'.format(op.CLM_FROM_DT))
fout.write('\t ICD9_DGNS_CD_list \n')
for cd in op.ICD9_DGNS_CD_list:
fout.write('\t\t {0} \n'.format(cd))
for rec in ben.prescription_records:
fout.write('[prescription] {0}\n'.format(rec))
rx = PrescriptionDrug(rec)
fout.write('[PrescriptionDrug]\n')
fout.write('\t PDE_ID ={0}\n'.format(rx.PDE_ID))
fout.write('\t SRVC_DT ={0}\n'.format(rx.SRVC_DT))
fout.write('\t PROD_SRVC_ID ={0}\n'.format(rx.PROD_SRVC_ID))
fout.write('\t QTY_DSPNSD_NUM ={0}\n'.format(rx.QTY_DSPNSD_NUM))
fout.write('\t DAYS_SUPLY_NUM ={0}\n'.format(rx.DAYS_SUPLY_NUM))
fout.write('\t PTNT_PAY_AMT ={0}\n'.format(rx.PTNT_PAY_AMT))
fout.write('\t TOT_RX_CST_AMT ={0}\n'.format(rx.TOT_RX_CST_AMT))
'''
def process_beneficiary(bene):
bene.LoadClaimData(file_control)
write_person_record(bene)
write_payer_plan_period_record(bene)
write_location_record(bene)
determine_visits(bene)
write_observation_period_records(bene)
write_death_records(file_control.get_Descriptor('death'), bene,
death_type_concept_id=OMOP_CONSTANTS.DEATH_TYPE_PAYER_ENR_STATUS,
cause_source_concept_id=0)
write_drug_records(bene)
process_inpatient_records(bene)
process_outpatient_records(bene)
process_carrier_records(bene)
file_control.flush_all()
#---------------------------------
#Dead code
#---------------------------------
'''
def dump_source_concept_codes():
rec_types = {'icd9':0, 'icd9proc':0, 'hcpcs':0, 'cpt':0, 'ndc':0}
recs_in = recs_out = 0
code_file_out = os.path.join(BASE_OUTPUT_DIRECTORY, 'codes_1.txt')
icd9_codes = {}
hcpcs_codes = {}
cpt_codes = {}
ndc_codes = {}
with open(code_file_out, 'w') as fout_codes:
def write_code_rec(DESYNPUF_ID, record_number, record_type, code_type, code_value):
fout_codes.write("{0},{1},{2},{3},{4}\n".format(DESYNPUF_ID, record_number, record_type, code_type, code_value))
rec_types[code_type] += 1
def check_carrier_claims():
global recs_in
global recs_out
with open('/Data/OHDSI/CMS_SynPuf/DE1_1/DE1_0_2008_to_2010_Carrier_Claims_Sample_1AB.csv.srt','rU') as fin:
for raw_rec in fin:
recs_in += 1
if recs_in % 50000 == 0:
print 'carrier-claims, recs_in=', recs_in
# print '[{0}] {1}'.format(recs_in, rec[:-1])
# fout_codes.write('[{0}] {1}\n'.format(recs_in, raw_rec[:-1]))
# if recs_in > 100: break
if "DESYNPUF_ID" in raw_rec: continue
rec = CarrierClaim((raw_rec[:-1]).split(','))
for src_code in rec.ICD9_DGNS_CD_list:
if src_code in icd9_codes:
icd9_codes[src_code] += 1
else:
icd9_codes[src_code] = 1
fout_codes.write("{0},{1},cc,icd9-1,{2}\n".format(rec.DESYNPUF_ID, recs_in, src_code))
recs_out += 1
rec_types['icd9'] += 1
for src_code in rec.HCPCS_CD_list:
if src_code in hcpcs_codes:
hcpcs_codes[src_code] += 1
else:
hcpcs_codes[src_code] = 1
fout_codes.write("{0},{1},cc,hcpcs,{2}\n".format(rec.DESYNPUF_ID, recs_in, src_code))
recs_out += 1
rec_types['hcpcs'] += 1
for src_code in rec.LINE_ICD9_DGNS_CD_list:
if src_code in icd9_codes:
icd9_codes[src_code] += 1
else:
icd9_codes[src_code] = 1
fout_codes.write("{0},{1},cc,icd9,{2}\n".format(rec.DESYNPUF_ID, recs_in, src_code))
recs_out += 1
rec_types['icd9'] += 1
fout_codes.flush()
def check_inpatient_claims():
global recs_in
global recs_out
with open('/Data/OHDSI/CMS_SynPuf/DE1_1/DE1_0_2008_to_2010_Inpatient_Claims_Sample_1.csv','rU') as fin:
record_type = 'ip'
for raw_rec in fin:
recs_in += 1
if recs_in % 10000 == 0:
print 'inpatient-claims, recs_in=', recs_in
# print '[{0}] {1}'.format(recs_in, rec[:-1])
# fout_codes.write('[{0}] {1}\n'.format(recs_in, raw_rec[:-1]))
# if recs_in > 100: break
if "DESYNPUF_ID" in raw_rec: continue
rec = InpatientClaim((raw_rec[:-1]).split(','))
for src_code in rec.ICD9_DGNS_CD_list:
if src_code in icd9_codes:
icd9_codes[src_code] += 1
else:
icd9_codes[src_code] = 1
write_code_rec(rec.DESYNPUF_ID, recs_in, record_type, code_type='icd9', code_value=src_code)
recs_out += 1
for src_code in rec.HCPCS_CD_list:
if src_code in hcpcs_codes:
hcpcs_codes[src_code] += 1
else:
hcpcs_codes[src_code] = 1
write_code_rec(rec.DESYNPUF_ID, recs_in, record_type, code_type='hcpcs', code_value=src_code)
recs_out += 1
for src_code in rec.ICD9_PRCDR_CD_list:
if src_code in icd9_codes:
icd9_codes[src_code] += 1
else:
icd9_codes[src_code] = 1
write_code_rec(rec.DESYNPUF_ID, recs_in, record_type, code_type='icd9proc', code_value=src_code)
recs_out += 1
def check_outpatient_claims():
global recs_in
global recs_out
with open('/Data/OHDSI/CMS_SynPuf/DE1_1/DE1_0_2008_to_2010_Outpatient_Claims_Sample_1.csv','rU') as fin:
record_type = 'op'
for raw_rec in fin:
recs_in += 1
if recs_in % 10000 == 0:
print 'outpatient-claims, recs_in=', recs_in
# print '[{0}] {1}'.format(recs_in, rec[:-1])
# fout_codes.write('[{0}] {1}\n'.format(recs_in, raw_rec[:-1]))
# if recs_in > 100: break
if "DESYNPUF_ID" in raw_rec: continue
rec = OutpatientClaim((raw_rec[:-1]).split(','))
for src_code in rec.ICD9_DGNS_CD_list:
if src_code in icd9_codes:
icd9_codes[src_code] += 1
else:
icd9_codes[src_code] = 1
write_code_rec(rec.DESYNPUF_ID, recs_in, record_type, code_type='icd9', code_value=src_code)
recs_out += 1
for src_code in rec.HCPCS_CD_list:
if src_code in hcpcs_codes:
hcpcs_codes[src_code] += 1
else:
hcpcs_codes[src_code] = 1
write_code_rec(rec.DESYNPUF_ID, recs_in, record_type, code_type='hcpcs', code_value=src_code)
recs_out += 1
for src_code in rec.ICD9_PRCDR_CD_list:
if src_code in icd9_codes:
icd9_codes[src_code] += 1
else:
icd9_codes[src_code] = 1
write_code_rec(rec.DESYNPUF_ID, recs_in, record_type, code_type='icd9proc', code_value=src_code)
recs_out += 1
if len(rec.ADMTNG_ICD9_DGNS_CD) > 0:
src_code = rec.ADMTNG_ICD9_DGNS_CD
if src_code in icd9_codes:
icd9_codes[src_code] += 1
else:
icd9_codes[src_code] = 1
write_code_rec(rec.DESYNPUF_ID, recs_in, record_type, code_type='icd9', code_value=src_code)
recs_out += 1
def check_prescription_drug():
global recs_in
global recs_out
with open('/Data/OHDSI/CMS_SynPuf/DE1_1/DE1_0_2008_to_2010_Prescription_Drug_Events_Sample_1.csv','rU') as fin:
record_type = 'rx'
for raw_rec in fin:
recs_in += 1
if recs_in % 10000 == 0:
print 'prescription-drugs, recs_in=', recs_in
# print '[{0}] {1}'.format(recs_in, rec[:-1])
# fout_codes.write('[{0}] {1}\n'.format(recs_in, raw_rec[:-1]))
# if recs_in > 100: break
if "DESYNPUF_ID" in raw_rec: continue
rec = PrescriptionDrug((raw_rec[:-1]).split(','))
if len(rec.PROD_SRVC_ID) > 0:
ndc = rec.PROD_SRVC_ID
if ndc in ndc_codes:
ndc_codes[ndc] += 1
else:
ndc_codes[ndc] = 1
write_code_rec(rec.DESYNPUF_ID, recs_in, record_type, code_type='ndc', code_value=ndc)
recs_out += 1
check_carrier_claims()
check_inpatient_claims()
check_outpatient_claims()
check_prescription_drug()
code_summary_file = os.path.join(BASE_OUTPUT_DIRECTORY, 'code_summary.txt')
with open(code_summary_file, 'w') as fout:
for label, dct in [ ('ndc', ndc_codes),
('hcpcs', hcpcs_codes),
('cpt', cpt_codes),
('icd9', icd9_codes)]:
for code, recs in dct.items():
fout.write("{0},{1},{2}\n".format(label, code, recs))
print '--done: recs-in=',recs_in,', out=', recs_out
for type, count in rec_types.items():
print type,count
'''
#---------------------------------
# start of the program
#---------------------------------
if __name__ == '__main__':
if not os.path.exists(BASE_OUTPUT_DIRECTORY): os.makedirs(BASE_OUTPUT_DIRECTORY)
if not os.path.exists(BASE_ETL_CONTROL_DIRECTORY): os.makedirs(BASE_ETL_CONTROL_DIRECTORY)
parser = argparse.ArgumentParser(description='Enter Sample Number')
parser.add_argument('sample_number', type=int, default=1)
args = parser.parse_args()
current_sample_number = args.sample_number
SAMPLE_RANGE = [current_sample_number]
current_stats_filename = os.path.join(BASE_OUTPUT_DIRECTORY,'etl_stats.txt_{0}'.format(current_sample_number))
if os.path.exists(current_stats_filename): os.unlink(current_stats_filename)
log_stats('CMS_ETL starting')
log_stats('BASE_SYNPUF_INPUT_DIRECTORY =' + BASE_SYNPUF_INPUT_DIRECTORY)
log_stats('BASE_OUTPUT_DIRECTORY =' + BASE_OUTPUT_DIRECTORY)
log_stats('BASE_ETL_CONTROL_DIRECTORY =' + BASE_ETL_CONTROL_DIRECTORY)
file_control = FileControl(BASE_SYNPUF_INPUT_DIRECTORY, BASE_OUTPUT_DIRECTORY, SYNPUF_DIR_FORMAT, current_sample_number)
file_control.delete_all_output()
print '-'*80
print '-- all files present....'
print '-'*80
#Set up initial identifier counters
table_ids = Table_ID_Values()
table_ids_filename = os.path.join(BASE_ETL_CONTROL_DIRECTORY, 'etl_synpuf_last_table_ids.txt')
if os.path.exists(table_ids_filename):
table_ids.Load(table_ids_filename, log_stats)
# Build mappings between SynPUF codes and OMOP Vocabulary concept_ids
build_maps()
bene_dump_filename = os.path.join(BASE_OUTPUT_DIRECTORY,'beneficiary_dump_{0}.txt'.format(current_sample_number))
omop_unmapped_code_file = os.path.join(BASE_ETL_CONTROL_DIRECTORY,'unmapped_code_log.txt')
unmapped_log = open(omop_unmapped_code_file, 'a+')
# Build the object to manage access to all the files
write_header_records()
with open(bene_dump_filename,'w') as fout:
beneficiary_fd = file_control.get_Descriptor('beneficiary')
log_stats('-'*80)
log_stats('reading beneficiary file -> '+ beneficiary_fd.complete_pathname)
log_stats('last_person_id starting value -> ' + str(table_ids.last_person_id))
recs_in = 0
rec = ''
save_DESYNPUF_ID = ''
unique_DESYNPUF_ID_count = 0
bene = None
try:
with beneficiary_fd.open() as fin:
# Skip header record
rec = fin.readline()
for rec in fin:
recs_in += 1
if recs_in % 10000 == 0: print 'beneficiary recs_in: ', recs_in
rec = rec.split(',')
DESYNPUF_ID = rec[BENEFICIARY_SUMMARY_RECORD.DESYNPUF_ID]
SP_STATE_CODE = rec[BENEFICIARY_SUMMARY_RECORD.SP_STATE_CODE]
BENE_COUNTY_CD = rec[BENEFICIARY_SUMMARY_RECORD.BENE_COUNTY_CD]
# count on this header record field being in every file
if '"DESYNPUF_ID"' in rec:
continue
# check for bene break
if DESYNPUF_ID != save_DESYNPUF_ID:
if not bene is None:
process_beneficiary(bene)
unique_DESYNPUF_ID_count += 1
save_DESYNPUF_ID = DESYNPUF_ID
bene = Beneficiary(DESYNPUF_ID, table_ids.last_person_id, SP_STATE_CODE, BENE_COUNTY_CD)
table_ids.last_person_id += 1
#accumulate for the current bene
bene.AddYearData(rec)
if not bene is None:
process_beneficiary(bene)
except BaseException:
print '** ERROR reading beneficiary file, record number ', recs_in, '\n record-> ', rec
raise
beneficiary_fd.increment_recs_read(recs_in)
log_stats('last_person_id ending value -> ' + str(table_ids.last_person_id))
log_stats('Done: total records read ={0}, unique IDs={1}'.format(recs_in, unique_DESYNPUF_ID_count))
file_control.close_all()
#- save look up tables & last-used-ids
persist_lookup_tables()
table_ids.Save(table_ids_filename)
log_stats('CMS_ETL done')
log_stats('Input Records------')
for token in sorted(file_control.descriptor_list(which='input')):
fd = file_control.get_Descriptor(token)
log_stats('\tFile: {0:50}, records_read={1:10}'.format(fd.token, fd.records_read))
log_stats('Output Records------')
for token in sorted(file_control.descriptor_list(which='output')):
fd = file_control.get_Descriptor(token)
if fd.records_written > 1:
log_stats('\tFile: {0:50}, records_written={1:10}'.format(fd.token, fd.records_written))
print '** done **'
| 57.298246
| 212
| 0.573882
|
733fc63b5b68e86f5afad72da038cb88dff6e91e
| 2,244
|
py
|
Python
|
aria_plugin/executor.py
|
cloudify-cosmo/-cloudify-aria-plugin
|
5b51019f0981ecec31f684983db71cf4dbb76b9a
|
[
"Apache-2.0"
] | 1
|
2018-02-21T22:40:01.000Z
|
2018-02-21T22:40:01.000Z
|
aria_plugin/executor.py
|
cloudify-cosmo/-cloudify-aria-plugin
|
5b51019f0981ecec31f684983db71cf4dbb76b9a
|
[
"Apache-2.0"
] | null | null | null |
aria_plugin/executor.py
|
cloudify-cosmo/-cloudify-aria-plugin
|
5b51019f0981ecec31f684983db71cf4dbb76b9a
|
[
"Apache-2.0"
] | null | null | null |
########
# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from threading import Thread
from aria.orchestrator import execution_preparer
from aria.orchestrator.workflows.core import engine
from aria.orchestrator.workflows.executor import process
from aria.cli import logger
from .exceptions import AriaWorkflowError
def execute(env, workflow_name):
ctx = execution_preparer.ExecutionPreparer(
env.model_storage,
env.resource_storage,
env.plugin_manager,
env.service,
workflow_name
).prepare()
eng = engine.Engine(
process.ProcessExecutor(env.plugin_manager, strict_loading=False)
)
# Since we want a live log feed, we need to execute the workflow
# while simultaneously printing the logs into the CFY logger. This Thread
# executes the workflow, while the main process thread writes the logs.
thread = Thread(target=eng.execute, kwargs=dict(ctx=ctx))
thread.start()
log_iterator = logger.ModelLogIterator(env.model_storage, ctx.execution.id)
while thread.is_alive():
for log in log_iterator:
leveled_log = getattr(env.ctx_logger, log.level.lower())
leveled_log(log)
if log.traceback:
leveled_log(log.traceback)
thread.join(0.1)
aria_execution = ctx.execution
if aria_execution.status != aria_execution.SUCCEEDED:
raise AriaWorkflowError(
'ARIA workflow {aria_execution.workflow_name} was not successful\n'
'status: {aria_execution.status}\n'
'error message: {aria_execution.error}'
.format(aria_execution=aria_execution))
| 36.193548
| 79
| 0.707219
|
1b821c0a87f883a884e75b6bd5fdd4121c509db8
| 248
|
py
|
Python
|
bettercache/urls.py
|
ironfroggy/django-better-cache
|
5350e8c646cef1c1ca74eab176f856ddd9eaf5c3
|
[
"MIT"
] | 2
|
2015-07-03T09:11:12.000Z
|
2019-10-20T18:37:46.000Z
|
bettercache/urls.py
|
ironfroggy/django-better-cache
|
5350e8c646cef1c1ca74eab176f856ddd9eaf5c3
|
[
"MIT"
] | 4
|
2016-02-04T04:17:44.000Z
|
2021-06-10T20:22:46.000Z
|
bettercache/urls.py
|
ironfroggy/django-better-cache
|
5350e8c646cef1c1ca74eab176f856ddd9eaf5c3
|
[
"MIT"
] | null | null | null |
from django.conf.urls.defaults import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
url(r'', 'bettercache.views.cache_view'),
)
| 24.8
| 60
| 0.741935
|
f9e8e60714f7c20219239cfa03972299cfd3d9cd
| 4,094
|
py
|
Python
|
test/integration/test_types.py
|
motey/py2neo
|
2e46bbf4d622f53282e796ffc521fc4bc6d0b60d
|
[
"Apache-2.0"
] | 545
|
2017-01-06T07:27:01.000Z
|
2021-06-10T01:08:23.000Z
|
test/integration/test_types.py
|
motey/py2neo
|
2e46bbf4d622f53282e796ffc521fc4bc6d0b60d
|
[
"Apache-2.0"
] | 370
|
2016-12-25T15:47:37.000Z
|
2021-06-17T06:09:43.000Z
|
test/integration/test_types.py
|
motey/py2neo
|
2e46bbf4d622f53282e796ffc521fc4bc6d0b60d
|
[
"Apache-2.0"
] | 133
|
2016-12-21T19:39:28.000Z
|
2021-05-26T14:26:02.000Z
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2011-2021, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging.version import Version
from interchange.geo import CartesianPoint, WGS84Point
from interchange.time import Date, Time, DateTime, Duration
from pytest import skip
from py2neo.data import Node
def test_null(graph):
i = None
o = graph.evaluate("RETURN $x", x=i)
assert o is i
def test_true(graph):
i = True
o = graph.evaluate("RETURN $x", x=i)
assert o is i
def test_false(graph):
i = False
o = graph.evaluate("RETURN $x", x=i)
assert o is i
def test_int(graph):
for i in range(-128, 128):
o = graph.evaluate("RETURN $x", x=i)
assert o == i
def test_float(graph):
for i in range(-128, 128):
f = float(i) + 0.5
o = graph.evaluate("RETURN $x", x=f)
assert o == f
def test_string(graph):
i = u"hello, world"
o = graph.evaluate("RETURN $x", x=i)
assert o == i
def test_bytes(graph):
i = bytearray([65, 66, 67])
o = graph.evaluate("RETURN $x", x=i)
# The values are coerced to bytearray before comparison
# as HTTP does not support byte parameters, instead
# coercing such values to lists of integers.
assert bytearray(o) == bytearray(i)
def test_list(graph):
i = [65, 66, 67]
o = graph.evaluate("RETURN $x", x=i)
assert o == i
def test_dict(graph):
i = {"one": 1, "two": 2}
o = graph.evaluate("RETURN $x", x=i)
assert o == i
def test_node(graph):
i = Node("Person", name="Alice")
o = graph.evaluate("CREATE (a:Person {name: 'Alice'}) RETURN a")
assert o.labels == i.labels
assert dict(o) == dict(i)
def test_relationship(graph):
o = graph.evaluate("CREATE ()-[r:KNOWS {since: 1999}]->() RETURN r")
assert type(o).__name__ == "KNOWS"
assert dict(o) == {"since": 1999}
def test_path(graph):
o = graph.evaluate("CREATE p=(:Person {name: 'Alice'})-[:KNOWS]->(:Person {name: 'Bob'}) RETURN p")
assert len(o) == 1
assert o.start_node.labels == {"Person"}
assert dict(o.start_node) == {"name": "Alice"}
assert type(o.relationships[0]).__name__ == "KNOWS"
assert o.end_node.labels == {"Person"}
assert dict(o.end_node) == {"name": "Bob"}
def skip_if_no_temporal_support(graph):
connector = graph.service.connector
if graph.service.kernel_version < Version("3.4"):
skip("Temporal type tests are only valid for Neo4j 3.4+")
if connector.profile.protocol != "bolt":
skip("Temporal type tests are only valid for Bolt connectors")
def test_date(graph):
skip_if_no_temporal_support(graph)
i = Date(2014, 8, 6)
o = graph.evaluate("RETURN $x", x=i)
assert o == i
def test_time(graph):
skip_if_no_temporal_support(graph)
i = Time(12, 34, 56.789)
o = graph.evaluate("RETURN $x", x=i)
assert o == i
def test_date_time(graph):
skip_if_no_temporal_support(graph)
i = DateTime(2014, 8, 6, 12, 34, 56.789)
o = graph.evaluate("RETURN $x", x=i)
assert o == i
def test_duration(graph):
skip_if_no_temporal_support(graph)
i = Duration(months=1, days=2, seconds=3)
o = graph.evaluate("RETURN $x", x=i)
assert o == i
def test_cartesian_point(graph):
skip_if_no_temporal_support(graph)
i = CartesianPoint((12.34, 56.78))
o = graph.evaluate("RETURN $x", x=i)
assert o == i
def test_wgs84_point(graph):
skip_if_no_temporal_support(graph)
i = WGS84Point((12.34, 56.78))
o = graph.evaluate("RETURN $x", x=i)
assert o == i
| 25.748428
| 103
| 0.646556
|
5272ee425faaa0b0d3c7d69bd0a15cfdbf91612a
| 3,420
|
py
|
Python
|
b3j0f/conf/parser/resolver/lang/py.py
|
b3j0f/configuration
|
18dd6d5d6560f9b202793739e2330a2181163511
|
[
"MIT"
] | 3
|
2016-02-18T18:58:24.000Z
|
2017-03-14T08:40:01.000Z
|
b3j0f/conf/parser/resolver/lang/py.py
|
b3j0f/configuration
|
18dd6d5d6560f9b202793739e2330a2181163511
|
[
"MIT"
] | 1
|
2016-02-18T15:27:35.000Z
|
2016-04-02T10:36:43.000Z
|
b3j0f/conf/parser/resolver/lang/py.py
|
b3j0f/configuration
|
18dd6d5d6560f9b202793739e2330a2181163511
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# --------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2015 Jonathan Labéjof <jonathan.labejof@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# --------------------------------------------------------------------
"""python expression resolver."""
__all__ = ['resolvepy']
from b3j0f.utils.runtime import safe_eval
from b3j0f.utils.path import lookup
from re import compile as re_compile, sub
from ..registry import register
from ..core import (
DEFAULT_BESTEFFORT, DEFAULT_SAFE, DEFAULT_TOSTR, DEFAULT_SCOPE
)
MISSING_NAME = r'\'(?P<name>\w+)\''
MISSING_NAME_REGEX = re_compile(MISSING_NAME)
MISSING_VARIABLE = r'(?P<name>(\w+\.)*{0}(\.\w+)*)'
def genrepl(scope):
"""Replacement function with specific scope."""
def repl(match):
"""Internal replacement function."""
name = match.group('name')
value = lookup(name, scope=scope)
result = name.replace('.', '_')
scope[result] = value
return result
return repl
@register('py')
@register('python')
def resolvepy(
expr,
safe=DEFAULT_SAFE, tostr=DEFAULT_TOSTR, scope=DEFAULT_SCOPE,
besteffort=DEFAULT_BESTEFFORT
):
"""Resolve input expression.
:param str expr: configuration expression to resolve in this language.
:param bool safe: safe run execution context (True by default).
:param bool tostr: format the result.
:param dict scope: execution scope (contains references to expression
objects).
:param bool besteffort: try to resolve unknown variable name with execution
runtime."""
result = None
_eval = safe_eval if safe else eval
_expr = expr
scope = {} if scope is None else scope.copy()
while True:
try:
result = _eval(_expr, scope)
except (AttributeError, NameError) as nex:
if not besteffort:
raise
arg = nex.args[0]
missing = MISSING_NAME_REGEX.findall(arg)[-1]
try:
_expr = sub(
MISSING_VARIABLE.format(missing),
genrepl(scope=scope),
_expr
)
except ImportError:
raise nex
else:
break
if tostr:
result = str(result)
return result
| 28.264463
| 79
| 0.633918
|
d273bb8e5920b5c3eb8fbb5b56d51f816f31ba7a
| 120
|
py
|
Python
|
bids/grabbids/__init__.py
|
raamana/pybids
|
431cc5db1720cb911b8b64dab59db7f9ada23469
|
[
"MIT"
] | null | null | null |
bids/grabbids/__init__.py
|
raamana/pybids
|
431cc5db1720cb911b8b64dab59db7f9ada23469
|
[
"MIT"
] | 1
|
2019-07-10T11:51:56.000Z
|
2019-07-10T11:51:56.000Z
|
bids/grabbids/__init__.py
|
raamana/pybids
|
431cc5db1720cb911b8b64dab59db7f9ada23469
|
[
"MIT"
] | null | null | null |
from .bids_layout import BIDSLayout
from .bids_validator import BIDSValidator
__all__ = ["BIDSLayout", "BIDSValidator"]
| 30
| 41
| 0.816667
|
0f62d9c8dfd4c92ddd934e4047c5fecb9bd22793
| 2,977
|
py
|
Python
|
dataset/generate_synthetic.py
|
The-RunningSnail/FactorGCN-PyTorch
|
8e926689273cb1b2002c062f1787f4167eae61bd
|
[
"MIT"
] | 39
|
2020-10-05T13:31:01.000Z
|
2022-03-13T09:25:21.000Z
|
dataset/generate_synthetic.py
|
The-RunningSnail/FactorGCN-PyTorch
|
8e926689273cb1b2002c062f1787f4167eae61bd
|
[
"MIT"
] | 1
|
2021-07-23T15:04:09.000Z
|
2021-07-23T15:04:09.000Z
|
dataset/generate_synthetic.py
|
The-RunningSnail/FactorGCN-PyTorch
|
8e926689273cb1b2002c062f1787f4167eae61bd
|
[
"MIT"
] | 6
|
2020-11-23T03:32:47.000Z
|
2021-12-26T06:28:51.000Z
|
import os, collections
import networkx as nx
import numpy as np
import torch, dgl
import random, pickle
class synthetic_graph_cls:
# generate several graphs with different paterns and union them
# labels is whether the union graph contains speicific pattern
def __init__(self, args):
self.args = args
self.saved_file = f'./data/synthetic/synthetic_graph_cls_data_{args.num_factors}.pkl'
os.makedirs(os.path.dirname(self.saved_file), exist_ok=True)
def gen_union_graph(self, graph_size=15, num_graph=20000):
if os.path.isfile(self.saved_file):
print(f"load synthetic graph cls data from {self.saved_file}")
with open(self.saved_file, 'rb') as f:
return pickle.load(f)
graph_list = synthetic_graph_cls.get_graph_list(self.args.num_factors)
samples = []
for _ in range(num_graph):
union_adj = np.zeros((graph_size, graph_size))
factor_adjs = []
labels = np.zeros((1, len(graph_list)))
id_index = list(range(len(graph_list)))
random.shuffle(id_index)
for i in range((len(id_index)+1)//2): # get random half adj
id = id_index[i]
labels[0, id] = 1
single_adj = graph_list[id]
padded_adj = np.zeros((graph_size, graph_size))
padded_adj[:single_adj.shape[0], :single_adj.shape[0]] = single_adj
random_index = np.arange(padded_adj.shape[0])
np.random.shuffle(random_index)
padded_adj = padded_adj[random_index]
padded_adj = padded_adj[:, random_index]
union_adj += padded_adj
factor_adjs.append((padded_adj, id))
g = dgl.DGLGraph()
g.from_networkx(nx.DiGraph(union_adj))
g = dgl.transform.add_self_loop(g)
g.ndata['feat'] = torch.tensor(union_adj)
labels = torch.tensor(labels)
samples.append((g, labels, factor_adjs))
with open(self.saved_file, 'wb') as f:
pickle.dump(samples, f)
print(f"dataset saved to {self.saved_file}")
return samples
@staticmethod
def get_graph_list(num_factors):
graph_list = []
# 2, 3 bipartite graph
g = nx.turan_graph(n=5, r=2)
graph_list.append(nx.to_numpy_array(g))
g = nx.house_x_graph()
graph_list.append(nx.to_numpy_array(g))
g = nx.balanced_tree(r=3, h=2)
graph_list.append(nx.to_numpy_array(g))
g = nx.grid_2d_graph(m=3, n=3)
graph_list.append(nx.to_numpy_array(g))
g = nx.hypercube_graph(n=3)
graph_list.append(nx.to_numpy_array(g))
g = nx.octahedral_graph()
graph_list.append(nx.to_numpy_array(g))
return graph_list[:num_factors]
| 35.86747
| 93
| 0.586161
|
0e6a7e3681627f95f521a45e505d7f11c7d8fc90
| 1,995
|
py
|
Python
|
bigip-decode.py
|
yuboliu/f5-bigip-decode
|
deb945309c1f5e8c1705ed7de1cf1fee6de9b1c1
|
[
"MIT"
] | 2
|
2021-04-10T18:17:29.000Z
|
2022-01-31T16:32:47.000Z
|
bigip-decode.py
|
yuboliu/f5-bigip-decode
|
deb945309c1f5e8c1705ed7de1cf1fee6de9b1c1
|
[
"MIT"
] | null | null | null |
bigip-decode.py
|
yuboliu/f5-bigip-decode
|
deb945309c1f5e8c1705ed7de1cf1fee6de9b1c1
|
[
"MIT"
] | 1
|
2021-09-29T00:41:32.000Z
|
2021-09-29T00:41:32.000Z
|
#!/usr/bin/env python3
from optparse import OptionParser
from sys import exit
# Decodes F5 BigIP cookies
# Based on instructions at https://support.f5.com/csp/article/K6917
# Usage: bigip-decode.py -c 0000000000.00000.000
# Where -c is the F5 BigIP cookie
# October 2020, Ken Mininger, kmininger@us.ibm.com
def get_port(c_port) -> str:
# convert the second part of the cookie to hex
hh_port = (hex((int(c_port))))
# reverse the byte order
r_port = reverse_bytes(hh_port)
# turn it back into a hex number
r_port2 = "{0}".format((r_port.replace('0x', '')))
return str(int(r_port2, 16))
def get_host(c_host) -> str:
# convert the first part of the cookie to hex
hh_host = (hex((int(c_host)))[2:])
# reverse the byte order
r_host = reverse_bytes(hh_host)
# make a list of pairs of bytes from above
dh_host = [r_host[i:i + 2] for i in range(0, len(r_host), 2)]
# convert those reversed bytes to decimal
xhosts = [int(dh_host[pos], 16) for pos in range(len(dh_host))]
# print out the ip address
return '.'.join([str(octet) for octet in xhosts])
def reverse_bytes(payload) -> str:
return "".join(reversed([payload[i:i + 2] for i in range(0, len(payload), 2)]))
def main():
parser = OptionParser()
parser.add_option("-c", "--cookie", type="string")
(options, args) = parser.parse_args()
if not options.cookie:
parser.error("Cookie not provided. Please provide the F5 BigIP cookie.")
exit(1)
# initial value of cookie
cookie = options.cookie
# echo the cookie
print("F5 BigIP cookie: ", cookie)
# split the cookie into 2 parts, splitting at the '.' and ignoring the right-most value
[c_host, c_port] = cookie.split('.')[:2]
host = get_host(c_host)
port = get_port(c_port)
print("Decoded cookie (IP address:Port): {0}".format(str(":".join([host, port]))))
if __name__ == '__main__':
main()
| 33.25
| 92
| 0.637594
|
3e35c9440dedf6c4b781474469b5a60024d8da05
| 26,553
|
py
|
Python
|
wandb/wandb_run.py
|
guysmoilov/client
|
28b818c5302e935ba269b9d4480e97903c28b8b2
|
[
"MIT"
] | null | null | null |
wandb/wandb_run.py
|
guysmoilov/client
|
28b818c5302e935ba269b9d4480e97903c28b8b2
|
[
"MIT"
] | null | null | null |
wandb/wandb_run.py
|
guysmoilov/client
|
28b818c5302e935ba269b9d4480e97903c28b8b2
|
[
"MIT"
] | null | null | null |
import datetime
import logging
import os
import socket
import json
import yaml
import fnmatch
import tempfile
import shutil
import glob
from sentry_sdk import configure_scope
from . import env
import wandb
from wandb import history
from wandb import jsonlfile
from wandb import summary
from wandb import meta
from wandb import typedtable
from wandb import util
from wandb.core import termlog
from wandb import data_types
from wandb.file_pusher import FilePusher
from wandb.apis import InternalApi, CommError
from wandb.wandb_config import Config
import six
from six.moves import input
from six.moves import urllib
import atexit
import sys
from watchdog.utils.dirsnapshot import DirectorySnapshot
RESUME_FNAME = 'wandb-resume.json'
HISTORY_FNAME = 'wandb-history.jsonl'
EVENTS_FNAME = 'wandb-events.jsonl'
CONFIG_FNAME = 'config.yaml'
USER_CONFIG_FNAME = 'config.json'
SUMMARY_FNAME = 'wandb-summary.json'
METADATA_FNAME = 'wandb-metadata.json'
DESCRIPTION_FNAME = 'description.md'
class Run(object):
def __init__(self, run_id=None, mode=None, dir=None, group=None, job_type=None,
config=None, sweep_id=None, storage_id=None, description=None, resume=None,
program=None, args=None, wandb_dir=None, tags=None, name=None, notes=None,
api=None):
"""Create a Run.
Arguments:
description (str): This is the old, deprecated style of description: the run's
name followed by a newline, followed by multiline notes.
"""
# self.storage_id is "id" in GQL.
self.storage_id = storage_id
# self.id is "name" in GQL.
self.id = run_id if run_id else util.generate_id()
# self._name is "display_name" in GQL.
self._name = None
self.notes = None
self.resume = resume if resume else 'never'
self.mode = mode if mode else 'run'
self.group = group
self.job_type = job_type
self.pid = os.getpid()
self.resumed = False # we set resume when history is first accessed
if api:
if api.current_run_id and api.current_run_id != self.id:
raise RuntimeError('Api object passed to run {} is already being used by run {}'.format(
self.id, api.current_run_id))
else:
api.set_current_run_id(self.id)
self._api = api
if dir is None:
self._dir = run_dir_path(self.id, dry=self.mode == 'dryrun')
else:
self._dir = os.path.abspath(dir)
self._mkdir()
# self.name and self.notes used to be combined into a single field.
# Now if name and notes don't have their own values, we get them from
# self._name_and_description, but we don't update description.md
# if they're changed. This is to discourage relying on self.description
# and self._name_and_description so that we can drop them later.
#
# This needs to be set before name and notes because name and notes may
# influence it. They have higher precedence.
self._name_and_description = None
if description:
wandb.termwarn('Run.description is deprecated. Please use wandb.init(notes="long notes") instead.')
self._name_and_description = description
elif os.path.exists(self.description_path):
with open(self.description_path) as d_file:
self._name_and_description = d_file.read()
if name is not None:
self.name = name
if notes is not None:
self.notes = notes
self.program = program
if not self.program:
try:
import __main__
self.program = __main__.__file__
except (ImportError, AttributeError):
# probably `python -c`, an embedded interpreter or something
self.program = '<python with no main file>'
self.args = args
if self.args is None:
self.args = sys.argv[1:]
self.wandb_dir = wandb_dir
with configure_scope() as scope:
self.project = self.api.settings("project")
scope.set_tag("project", self.project)
scope.set_tag("entity", self.entity)
try:
scope.set_tag("url", self.get_url(self.api, network=False)) # TODO: Move this somewhere outside of init
except CommError:
pass
if self.resume == "auto":
util.mkdir_exists_ok(wandb.wandb_dir())
resume_path = os.path.join(wandb.wandb_dir(), RESUME_FNAME)
with open(resume_path, "w") as f:
f.write(json.dumps({"run_id": self.id}))
if config is None:
self.config = Config()
else:
self.config = config
# socket server, currently only available in headless mode
self.socket = None
self.tags = tags if tags else []
self.sweep_id = sweep_id
self._history = None
self._events = None
self._summary = None
self._meta = None
self._run_manager = None
self._jupyter_agent = None
@property
def api(self):
if self._api is None:
self._api = InternalApi()
self._api.set_current_run_id(self.id)
return self._api
@property
def entity(self):
return self.api.settings('entity')
@entity.setter
def entity(self, entity):
self.api.set_setting("entity", entity)
@property
def path(self):
# TODO: theres an edge case where self.entity is None
return "/".join([str(self.entity), self.project_name(), self.id])
def _init_jupyter_agent(self):
from wandb.jupyter import JupyterAgent
self._jupyter_agent = JupyterAgent()
def _stop_jupyter_agent(self):
self._jupyter_agent.stop()
def send_message(self, options):
""" Sends a message to the wandb process changing the policy
of saved files. This is primarily used internally by wandb.save
"""
if not options.get("save_policy") and not options.get("tensorboard"):
raise ValueError(
"Only configuring save_policy and tensorboard is supported")
if self.socket:
# In the user process
self.socket.send(options)
elif self._jupyter_agent:
# Running in jupyter
self._jupyter_agent.start()
if options.get("save_policy"):
self._jupyter_agent.rm.update_user_file_policy(
options["save_policy"])
elif options.get("tensorboard"):
self._jupyter_agent.rm.start_tensorboard_watcher(
options["tensorboard"]["logdir"], options["tensorboard"]["save"])
elif self._run_manager:
# Running in the wandb process, used for tfevents saving
if options.get("save_policy"):
self._run_manager.update_user_file_policy(
options["save_policy"])
else:
wandb.termerror(
"wandb.init hasn't been called, can't configure run")
@classmethod
def from_environment_or_defaults(cls, environment=None):
"""Create a Run object taking values from the local environment where possible.
The run ID comes from WANDB_RUN_ID or is randomly generated.
The run mode ("dryrun", or "run") comes from WANDB_MODE or defaults to "dryrun".
The run directory comes from WANDB_RUN_DIR or is generated from the run ID.
The Run will have a .config attribute but its run directory won't be set by
default.
"""
if environment is None:
environment = os.environ
run_id = environment.get(env.RUN_ID)
resume = environment.get(env.RESUME)
storage_id = environment.get(env.RUN_STORAGE_ID)
mode = environment.get(env.MODE)
api = InternalApi(environ=environment)
disabled = api.disabled()
if not mode and disabled:
mode = "dryrun"
elif disabled and mode != "dryrun":
wandb.termwarn(
"WANDB_MODE is set to run, but W&B was disabled. Run `wandb on` to remove this message")
elif disabled:
wandb.termlog(
'W&B is disabled in this directory. Run `wandb on` to enable cloud syncing.')
group = environment.get(env.RUN_GROUP)
job_type = environment.get(env.JOB_TYPE)
run_dir = environment.get(env.RUN_DIR)
sweep_id = environment.get(env.SWEEP_ID)
program = environment.get(env.PROGRAM)
description = environment.get(env.DESCRIPTION)
name = environment.get(env.NAME)
notes = environment.get(env.NOTES)
args = env.get_args(env=environment)
wandb_dir = env.get_dir(env=environment)
tags = env.get_tags(env=environment)
# TODO(adrian): should pass environment into here as well.
config = Config.from_environment_or_defaults()
run = cls(run_id, mode, run_dir,
group, job_type, config,
sweep_id, storage_id, program=program, description=description,
args=args, wandb_dir=wandb_dir, tags=tags,
name=name, notes=notes,
resume=resume, api=api)
return run
@classmethod
def from_directory(cls, directory, project=None, entity=None, run_id=None, api=None, ignore_globs=None):
api = api or InternalApi()
run_id = run_id or util.generate_id()
run = Run(run_id=run_id, dir=directory)
run_name = None
project_from_meta = None
snap = DirectorySnapshot(directory)
meta = next((p for p in snap.paths if METADATA_FNAME in p), None)
if meta:
meta = json.load(open(meta))
run_name = meta.get("name")
project_from_meta = meta.get("project")
project = project or project_from_meta or api.settings(
"project") or run.auto_project_name(api=api)
if project is None:
raise ValueError("You must specify project")
api.set_current_run_id(run_id)
api.set_setting("project", project)
if entity:
api.set_setting("entity", entity)
res = api.upsert_run(name=run_id, project=project, entity=entity, display_name=run_name)
entity = res["project"]["entity"]["name"]
wandb.termlog("Syncing {} to:".format(directory))
try:
wandb.termlog(res["displayName"] + " " + run.get_url(api))
except CommError as e:
wandb.termwarn(e.message)
file_api = api.get_file_stream_api()
file_api.start()
paths = [os.path.relpath(abs_path, directory)
for abs_path in snap.paths if os.path.isfile(abs_path)]
if ignore_globs:
paths = set(paths)
for g in ignore_globs:
paths = paths - set(fnmatch.filter(paths, g))
paths = list(paths)
run_update = {"id": res["id"]}
tfevents = sorted([p for p in snap.paths if ".tfevents." in p])
history = next((p for p in snap.paths if HISTORY_FNAME in p), None)
event = next((p for p in snap.paths if EVENTS_FNAME in p), None)
config = next((p for p in snap.paths if CONFIG_FNAME in p), None)
user_config = next(
(p for p in snap.paths if USER_CONFIG_FNAME in p), None)
summary = next((p for p in snap.paths if SUMMARY_FNAME in p), None)
if history:
wandb.termlog("Uploading history metrics")
file_api.stream_file(history)
snap.paths.remove(history)
elif len(tfevents) > 0:
from wandb import tensorflow as wbtf
wandb.termlog("Found tfevents file, converting...")
summary = {}
for path in tfevents:
filename = os.path.basename(path)
namespace = path.replace(filename, "").replace(directory, "").strip(os.sep)
summary.update(wbtf.stream_tfevents(path, file_api, run, namespace=namespace))
for path in glob.glob(os.path.join(directory, "media/**/*"), recursive=True):
if os.path.isfile(path):
paths.append(path)
else:
wandb.termerror(
"No history or tfevents files found, only syncing files")
if event:
file_api.stream_file(event)
snap.paths.remove(event)
if config:
run_update["config"] = util.load_yaml(
open(config))
elif user_config:
# TODO: half backed support for config.json
run_update["config"] = {k: {"value": v}
for k, v in six.iteritems(user_config)}
if isinstance(summary, dict):
#TODO: summary should already have data_types converted here...
run_update["summary_metrics"] = util.json_dumps_safer(summary)
elif summary:
run_update["summary_metrics"] = open(summary).read()
if meta:
if meta.get("git"):
run_update["commit"] = meta["git"].get("commit")
run_update["repo"] = meta["git"].get("remote")
if meta.get("host"):
run_update["host"] = meta["host"]
run_update["program_path"] = meta["program"]
run_update["job_type"] = meta.get("jobType")
run_update["notes"] = meta.get("notes")
else:
run_update["host"] = run.host
wandb.termlog("Updating run and uploading files")
api.upsert_run(**run_update)
pusher = FilePusher(api)
for k in paths:
path = os.path.abspath(os.path.join(directory, k))
pusher.update_file(k, path)
pusher.file_changed(k, path)
pusher.finish()
pusher.print_status()
file_api.finish(0)
# Remove temporary media images generated from tfevents
if history is None and os.path.exists(os.path.join(directory, "media")):
shutil.rmtree(os.path.join(directory, "media"))
wandb.termlog("Finished!")
return run
def auto_project_name(self, api):
# if we're in git, set project name to git repo name + relative path within repo
root_dir = api.git.root_dir
if root_dir is None:
return None
repo_name = os.path.basename(root_dir)
program = self.program
if program is None:
return repo_name
if not os.path.isabs(program):
program = os.path.join(os.curdir, program)
prog_dir = os.path.dirname(os.path.abspath(program))
if not prog_dir.startswith(root_dir):
return repo_name
project = repo_name
sub_path = os.path.relpath(prog_dir, root_dir)
if sub_path != '.':
project += '-' + sub_path
return project.replace(os.sep, '_')
def save(self, id=None, program=None, summary_metrics=None, num_retries=None, api=None):
api = api or self.api
project = api.settings('project')
if project is None:
project = self.auto_project_name(api)
upsert_result = api.upsert_run(id=id or self.storage_id, name=self.id, commit=api.git.last_commit,
project=project, entity=self.entity,
group=self.group, tags=self.tags if len(
self.tags) > 0 else None,
config=self.config.as_dict(), description=self._name_and_description, host=self.host,
program_path=program or self.program, repo=api.git.remote_url, sweep_name=self.sweep_id,
display_name=self._name, notes=self.notes,
summary_metrics=summary_metrics, job_type=self.job_type, num_retries=num_retries)
self.storage_id = upsert_result['id']
self.name = upsert_result.get('displayName')
return upsert_result
def set_environment(self, environment=None):
"""Set environment variables needed to reconstruct this object inside
a user scripts (eg. in `wandb.init()`).
"""
if environment is None:
environment = os.environ
environment[env.RUN_ID] = self.id
environment[env.RESUME] = self.resume
if self.storage_id:
environment[env.RUN_STORAGE_ID] = self.storage_id
environment[env.MODE] = self.mode
environment[env.RUN_DIR] = self.dir
if self.group:
environment[env.RUN_GROUP] = self.group
if self.job_type:
environment[env.JOB_TYPE] = self.job_type
if self.wandb_dir:
environment[env.DIR] = self.wandb_dir
if self.sweep_id is not None:
environment[env.SWEEP_ID] = self.sweep_id
if self.program is not None:
environment[env.PROGRAM] = self.program
if self.args is not None:
environment[env.ARGS] = json.dumps(self.args)
if self._name_and_description is not None:
environment[env.DESCRIPTION] = self._name_and_description
if self._name is not None:
environment[env.NAME] = self._name
if self.notes is not None:
environment[env.NOTES] = self.notes
if len(self.tags) > 0:
environment[env.TAGS] = ",".join(self.tags)
return environment
def _mkdir(self):
util.mkdir_exists_ok(self._dir)
def project_name(self, api=None):
api = api or self.api
return api.settings('project') or self.auto_project_name(api) or "uncategorized"
def _generate_query_string(self, api, params=None):
"""URL encodes dictionary of params"""
params = params or {}
if str(api.settings().get('anonymous', 'false')) == 'true':
params['apiKey'] = api.api_key
if not params:
return ""
return '?' + urllib.parse.urlencode(params)
def _load_entity(self, api, network):
if not api.api_key:
raise CommError("Can't find API key, run wandb login or set WANDB_API_KEY")
entity = api.settings('entity')
if network:
if api.settings('entity') is None:
viewer = api.viewer()
if viewer.get('entity'):
api.set_setting('entity', viewer['entity'])
entity = api.settings('entity')
if not entity:
# This can happen on network failure
raise CommError("Can't connect to network to query entity from API key")
return entity
def get_project_url(self, api=None, network=None, params=None):
"""Generate a url for a project.
If network is false and entity isn't specified in the environment raises wandb.apis.CommError
"""
params = params or {}
api = api or self.api
self._load_entity(api, network)
return "{base}/{entity}/{project}{query_string}".format(
base=api.app_url,
entity=urllib.parse.quote_plus(api.settings('entity')),
project=urllib.parse.quote_plus(self.project_name(api)),
query_string=self._generate_query_string(api, params)
)
def get_sweep_url(self, api=None, network=None, params=None):
"""Generate a url for a sweep.
If network is false and entity isn't specified in the environment raises wandb.apis.CommError
Returns:
string - url if the run is part of a sweep
None - if the run is not part of the sweep
"""
params = params or {}
api = api or self.api
self._load_entity(api, network)
sweep_id = self.sweep_id
if sweep_id is None:
return
return "{base}/{entity}/{project}/sweeps/{sweepid}{query_string}".format(
base=api.app_url,
entity=urllib.parse.quote_plus(api.settings('entity')),
project=urllib.parse.quote_plus(self.project_name(api)),
sweepid=urllib.parse.quote_plus(sweep_id),
query_string=self._generate_query_string(api, params)
)
def get_url(self, api=None, network=True, params=None):
"""Generate a url for a run.
If network is false and entity isn't specified in the environment raises wandb.apis.CommError
"""
params = params or {}
api = api or self.api
self._load_entity(api, network)
return "{base}/{entity}/{project}/runs/{run}{query_string}".format(
base=api.app_url,
entity=urllib.parse.quote_plus(api.settings('entity')),
project=urllib.parse.quote_plus(self.project_name(api)),
run=urllib.parse.quote_plus(self.id),
query_string=self._generate_query_string(api, params)
)
def upload_debug(self):
"""Uploads the debug log to cloud storage"""
if os.path.exists(self.log_fname):
pusher = FilePusher(self.api)
pusher.update_file("wandb-debug.log", self.log_fname)
pusher.file_changed("wandb-debug.log", self.log_fname)
pusher.finish()
def __repr__(self):
try:
return "W&B Run: %s" % self.get_url()
except CommError as e:
return "W&B Error: %s" % e.message
@property
def name(self):
if self._name is not None:
return self._name
elif self._name_and_description is not None:
return self._name_and_description.split("\n")[0]
else:
return None
@name.setter
def name(self, name):
self._name = name
if self._name_and_description is not None:
parts = self._name_and_description.split("\n", 1)
parts[0] = name
self._name_and_description = "\n".join(parts)
@property
def description(self):
wandb.termwarn('Run.description is deprecated. Please use run.notes instead.')
if self._name_and_description is None:
self._name_and_description = ''
parts = self._name_and_description.split("\n", 1)
if len(parts) > 1:
return parts[1]
else:
return ""
@description.setter
def description(self, desc):
wandb.termwarn('Run.description is deprecated. Please use wandb.init(notes="long notes") instead.')
if self._name_and_description is None:
self._name_and_description = self._name or ""
parts = self._name_and_description.split("\n", 1)
if len(parts) == 1:
parts.append("")
parts[1] = desc
self._name_and_description = "\n".join(parts)
with open(self.description_path, 'w') as d_file:
d_file.write(self._name_and_description)
@property
def host(self):
return os.environ.get(env.HOST, socket.gethostname())
@property
def dir(self):
return self._dir
@property
def log_fname(self):
# TODO: we started work to log to a file in the run dir, but it had issues.
# For now all logs goto the same place.
return util.get_log_file_path()
def enable_logging(self):
"""Enable logging to the global debug log. This adds a run_id to the log,
in case of muliple processes on the same machine.
Currently no way to disable logging after it's enabled.
"""
handler = logging.FileHandler(self.log_fname)
handler.setLevel(logging.INFO)
run_id = self.id
class WBFilter(logging.Filter):
def filter(self, record):
record.run_id = run_id
return True
formatter = logging.Formatter(
'%(asctime)s %(levelname)-7s %(threadName)-10s:%(process)d [%(run_id)s:%(filename)s:%(funcName)s():%(lineno)s] %(message)s')
handler.setFormatter(formatter)
handler.addFilter(WBFilter())
root = logging.getLogger()
root.addHandler(handler)
@property
def summary(self):
if self._summary is None:
self._summary = summary.FileSummary(self)
return self._summary
@property
def has_summary(self):
return self._summary or os.path.exists(os.path.join(self._dir, summary.SUMMARY_FNAME))
def _history_added(self, row):
self.summary.update(row, overwrite=False)
@property
def history(self):
if self._history is None:
jupyter_callback = self._jupyter_agent.start if self._jupyter_agent else None
self._history = history.History(
self, add_callback=self._history_added, jupyter_callback=jupyter_callback)
if self._history._steps > 0:
self.resumed = True
return self._history
@property
def step(self):
return self.history._steps
@property
def has_history(self):
return self._history or os.path.exists(os.path.join(self._dir, HISTORY_FNAME))
@property
def events(self):
if self._events is None:
self._events = jsonlfile.JsonlEventsFile(EVENTS_FNAME, self._dir)
return self._events
@property
def has_events(self):
return self._events or os.path.exists(os.path.join(self._dir, EVENTS_FNAME))
@property
def description_path(self):
return os.path.join(self.dir, DESCRIPTION_FNAME)
def close_files(self):
"""Close open files to avoid Python warnings on termination:
Exception ignored in: <_io.FileIO name='wandb/dryrun-20180130_144602-9vmqjhgy/wandb-history.jsonl' mode='wb' closefd=True>
ResourceWarning: unclosed file <_io.TextIOWrapper name='wandb/dryrun-20180130_144602-9vmqjhgy/wandb-history.jsonl' mode='w' encoding='UTF-8'>
"""
if self._events is not None:
self._events.close()
self._events = None
if self._history is not None:
self._history.close()
self._history = None
def run_dir_path(run_id, dry=False):
if dry:
prefix = 'dryrun'
else:
prefix = 'run'
time_str = datetime.datetime.utcnow().strftime('%Y%m%d_%H%M%S')
return os.path.join(wandb.wandb_dir(), '{}-{}-{}'.format(prefix, time_str, run_id))
| 38.371387
| 149
| 0.605732
|
081416cdd9ec6e602a5004e331d88def2e2e4b0d
| 4,467
|
py
|
Python
|
rsvp/__init__.py
|
sundeep-co-in/rsvp
|
28f7a31607609264cf76892d0902daabee88274f
|
[
"Apache-2.0"
] | 1
|
2018-10-13T14:51:10.000Z
|
2018-10-13T14:51:10.000Z
|
rsvp/__init__.py
|
sundeep-co-in/rsvp
|
28f7a31607609264cf76892d0902daabee88274f
|
[
"Apache-2.0"
] | 1
|
2018-08-24T10:27:29.000Z
|
2018-08-24T10:27:29.000Z
|
rsvp/__init__.py
|
sundeep-co-in/rsvp
|
28f7a31607609264cf76892d0902daabee88274f
|
[
"Apache-2.0"
] | null | null | null |
# RSVP Main File
from uuid import uuid4
from rsvp.helpers import RSVP_Helpers
from rsvp.constants import MEMBER_KEYS
from rsvp.exceptions import EXCEPTION_MESSAGES
MEMBERS_KEY = 'event_members'
helpers = RSVP_Helpers()
store_file_name = ''
def _locate_file(event_id):
all_files = helpers.get_all_store_files()
required_file = [file for file in all_files if event_id in file]
if isinstance(required_file, list) and len(required_file) > 0:
return required_file[0]
return ''
def create_rsvp_store(*source, **event_details):
"""
Creates a new RSVP store object
:param source: Source details
- rsvp_source string
:param event_details: Event detail kwargs
- event_slug slug
- event_name string
- event_description string
- event_start_date datetime
- event_end_date datetime
- event_members list
:return: event_id uuid as string
"""
if not isinstance(source, (list, tuple)) and len(source) > 0:
raise Exception(EXCEPTION_MESSAGES['SOURCE_NOT_FOUND'])
rsvp_source = source[0]
if not event_details.get('event_slug'):
raise Exception(EXCEPTION_MESSAGES['EVENT_SLUG_NOT_FOUND'])
if not event_details.get('event_start_date'):
raise Exception(EXCEPTION_MESSAGES['START_DATE_NOT_FOUND'])
event_id = uuid4()
event_master_dict = dict()
event_master_dict['event_id'] = str(event_id)
event_master_dict['event_source'] = rsvp_source
event_master_dict['event_slug'] = event_details['event_slug']
event_master_dict['event_name'] = event_details.get('event_name')
event_master_dict['event_description'] = event_details.get('event_description')
event_master_dict['event_start_date'] = str(event_details['event_start_date'])
event_master_dict['event_end_date'] = str(event_details.get('event_end_date'))
event_master_dict[MEMBERS_KEY] = event_details.get('event_members', [])
store_file_name = str(event_id) + '.' + event_details['event_slug']
if helpers.save_rsvp(store_file_name, event_master_dict):
return str(event_id)
return
def delete_rsvp_store(event_id):
"""
Deletes RSVP store file
:param event_id: uuid as string
:return: boolean
"""
file_to_delete = _locate_file(event_id)
if file_to_delete and helpers.delete_store_file(file_to_delete):
return True
return False
def add_members(event_id, members):
"""
Add members to event
:param event_id: uuid as string
:param members: list of names (string)
:return: boolean
"""
file_to_append = _locate_file(event_id)
event_data = helpers.load_file_data(file_to_append)
member_keys_dict = {key: False for key in MEMBER_KEYS}
members_list = [{member: member_keys_dict} for member in members]
event_data[MEMBERS_KEY].extend(members_list)
if helpers.save_rsvp(file_to_append, event_data):
return True
return False
def adjust_rsvp_state(event_id, members, **states):
"""
Set rsvp for members for an event
:param event_id: uuid as string
:param members: list
:param states: dict example {'in_queue_waiting': true}
:return: boolean
"""
file_to_update = _locate_file(event_id)
event_data = helpers.load_file_data(file_to_update)
for member_rsvp in event_data.get(MEMBERS_KEY, []):
member = [m for m in members if member_rsvp.get(m)]
if member and len(member) == 1:
member = member[0]
new_state = {i: states.get(i, j) for i, j in member_rsvp[member].items()}
member_index = event_data[MEMBERS_KEY].index(member_rsvp)
event_data[MEMBERS_KEY].pop(member_index)
event_data[MEMBERS_KEY].insert(
member_index, {member: new_state}
)
if helpers.save_rsvp(file_to_update, event_data):
return True
return False
def get_rsvp_state(event_id, members):
"""
Get rsvp for members for an event
:param event_id: uuid as string
:param members: list
:return: list of dict
"""
members_rsvp = []
file_to_update = _locate_file(event_id)
event_data = helpers.load_file_data(file_to_update)
members_data = event_data.get(MEMBERS_KEY, [])
if members_data:
[members_rsvp.append(member) for member in members_data if list(member)[0] in members]
return members_rsvp
| 33.840909
| 94
| 0.687486
|
731db9861765d20f2d87a6d4d9497180e037c365
| 7,733
|
py
|
Python
|
vspk/v5_0/nupolicyobjectgroup.py
|
axxyhtrx/vspk-python
|
4495882c6bcbb1ef51b14b9f4dc7efe46476ff50
|
[
"BSD-3-Clause"
] | 19
|
2016-03-07T12:34:22.000Z
|
2020-06-11T11:09:02.000Z
|
vspk/v5_0/nupolicyobjectgroup.py
|
axxyhtrx/vspk-python
|
4495882c6bcbb1ef51b14b9f4dc7efe46476ff50
|
[
"BSD-3-Clause"
] | 40
|
2016-06-13T15:36:54.000Z
|
2020-11-10T18:14:43.000Z
|
vspk/v5_0/nupolicyobjectgroup.py
|
axxyhtrx/vspk-python
|
4495882c6bcbb1ef51b14b9f4dc7efe46476ff50
|
[
"BSD-3-Clause"
] | 15
|
2016-06-10T22:06:01.000Z
|
2020-12-15T18:37:42.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from .fetchers import NUNSGatewaysFetcher
from bambou import NURESTObject
class NUPolicyObjectGroup(NURESTObject):
""" Represents a PolicyObjectGroup in the VSD
Notes:
Policy Object Groups are a collection of existing Network Services Gateways. These groups can be used in routing policies for domain links.
"""
__rest_name__ = "policyobjectgroup"
__resource_name__ = "policyobjectgroups"
## Constants
CONST_TYPE_NSGATEWAY = "NSGateway"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
def __init__(self, **kwargs):
""" Initializes a PolicyObjectGroup instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> policyobjectgroup = NUPolicyObjectGroup(id=u'xxxx-xxx-xxx-xxx', name=u'PolicyObjectGroup')
>>> policyobjectgroup = NUPolicyObjectGroup(data=my_dict)
"""
super(NUPolicyObjectGroup, self).__init__()
# Read/Write Attributes
self._name = None
self._last_updated_by = None
self._description = None
self._entity_scope = None
self._external_id = None
self._type = None
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
self.expose_attribute(local_name="type", remote_name="type", attribute_type=str, is_required=False, is_unique=False, choices=[u'NSGateway'])
# Fetchers
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.ns_gateways = NUNSGatewaysFetcher.fetcher_with_object(parent_object=self, relationship="member")
self._compute_args(**kwargs)
# Properties
@property
def name(self):
""" Get name value.
Notes:
Name of the Policy Object Group
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
Name of the Policy Object Group
"""
self._name = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def description(self):
""" Get description value.
Notes:
Description of the Policy Object Group
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
Description of the Policy Object Group
"""
self._description = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
@property
def type(self):
""" Get type value.
Notes:
Type of the Policy Object Group
"""
return self._type
@type.setter
def type(self, value):
""" Set type value.
Notes:
Type of the Policy Object Group
"""
self._type = value
| 29.515267
| 175
| 0.617742
|
01cb2e482ec4faaa186deff4faaad63504f2727e
| 1,804
|
py
|
Python
|
src/awkward/_v2/types/unknowntype.py
|
douglasdavis/awkward-1.0
|
f00775803a5568efb0a8e2dae3b1a4f23228fa40
|
[
"BSD-3-Clause"
] | 2
|
2019-09-12T03:07:23.000Z
|
2019-09-27T05:32:07.000Z
|
src/awkward/_v2/types/unknowntype.py
|
douglasdavis/awkward-1.0
|
f00775803a5568efb0a8e2dae3b1a4f23228fa40
|
[
"BSD-3-Clause"
] | 1
|
2019-09-26T17:57:45.000Z
|
2019-09-26T17:57:45.000Z
|
src/awkward/_v2/types/unknowntype.py
|
douglasdavis/awkward-1.0
|
f00775803a5568efb0a8e2dae3b1a4f23228fa40
|
[
"BSD-3-Clause"
] | null | null | null |
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
import awkward as ak
from awkward._v2.types.type import Type
from awkward._v2.forms.form import _parameters_equal
class UnknownType(Type):
def __init__(self, parameters=None, typestr=None):
if parameters is not None and not isinstance(parameters, dict):
raise ak._v2._util.error(
TypeError(
"{} 'parameters' must be of type dict or None, not {}".format(
type(self).__name__, repr(parameters)
)
)
)
if typestr is not None and not ak._util.isstr(typestr):
raise ak._v2._util.error(
TypeError(
"{} 'typestr' must be of type string or None, not {}".format(
type(self).__name__, repr(typestr)
)
)
)
self._parameters = parameters
self._typestr = typestr
def _str(self, indent, compact):
if self._typestr is not None:
out = [self._typestr]
else:
params = self._str_parameters()
if params is None:
out = ["unknown"]
else:
out = ["unknown[", params, "]"]
return [self._str_categorical_begin()] + out + [self._str_categorical_end()]
def __repr__(self):
args = self._repr_args()
return "{}({})".format(type(self).__name__, ", ".join(args))
def __eq__(self, other):
if isinstance(other, UnknownType):
return self._typestr == other._typestr and _parameters_equal(
self._parameters, other._parameters, only_array_record=True
)
else:
return False
| 34.037736
| 87
| 0.545455
|
f5661a30fef62119aa23529ecd7be6f30cf45f55
| 9,230
|
py
|
Python
|
lib/streamlit/elements/write.py
|
ChangHoon-Sung/streamlit
|
83e0b80d2fa13e29e83d092a9fc4d946460bbf73
|
[
"Apache-2.0"
] | 1
|
2022-03-14T07:55:33.000Z
|
2022-03-14T07:55:33.000Z
|
lib/streamlit/elements/write.py
|
ChangHoon-Sung/streamlit
|
83e0b80d2fa13e29e83d092a9fc4d946460bbf73
|
[
"Apache-2.0"
] | 1
|
2022-03-15T04:05:17.000Z
|
2022-03-15T04:05:17.000Z
|
lib/streamlit/elements/write.py
|
ChangHoon-Sung/streamlit
|
83e0b80d2fa13e29e83d092a9fc4d946460bbf73
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018-2022 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import json as json
import types
from typing import cast, Any, List, Tuple, Type
import numpy as np
import streamlit
from streamlit import type_util
from streamlit.errors import StreamlitAPIException
from streamlit.state import AutoSessionState
# Special methods:
HELP_TYPES = (
types.BuiltinFunctionType,
types.BuiltinMethodType,
types.FunctionType,
types.MethodType,
types.ModuleType,
) # type: Tuple[Type[Any], ...]
class WriteMixin:
def write(self, *args, **kwargs):
"""Write arguments to the app.
This is the Swiss Army knife of Streamlit commands: it does different
things depending on what you throw at it. Unlike other Streamlit commands,
write() has some unique properties:
1. You can pass in multiple arguments, all of which will be written.
2. Its behavior depends on the input types as follows.
3. It returns None, so its "slot" in the App cannot be reused.
Parameters
----------
*args : any
One or many objects to print to the App.
Arguments are handled as follows:
- write(string) : Prints the formatted Markdown string, with
support for LaTeX expression and emoji shortcodes.
See docs for st.markdown for more.
- write(data_frame) : Displays the DataFrame as a table.
- write(error) : Prints an exception specially.
- write(func) : Displays information about a function.
- write(module) : Displays information about the module.
- write(dict) : Displays dict in an interactive widget.
- write(mpl_fig) : Displays a Matplotlib figure.
- write(altair) : Displays an Altair chart.
- write(keras) : Displays a Keras model.
- write(graphviz) : Displays a Graphviz graph.
- write(plotly_fig) : Displays a Plotly figure.
- write(bokeh_fig) : Displays a Bokeh figure.
- write(sympy_expr) : Prints SymPy expression using LaTeX.
- write(htmlable) : Prints _repr_html_() for the object if available.
- write(obj) : Prints str(obj) if otherwise unknown.
unsafe_allow_html : bool
This is a keyword-only argument that defaults to False.
By default, any HTML tags found in strings will be escaped and
therefore treated as pure text. This behavior may be turned off by
setting this argument to True.
That said, *we strongly advise against it*. It is hard to write secure
HTML, so by using this argument you may be compromising your users'
security. For more information, see:
https://github.com/streamlit/streamlit/issues/152
**Also note that `unsafe_allow_html` is a temporary measure and may be
removed from Streamlit at any time.**
If you decide to turn on HTML anyway, we ask you to please tell us your
exact use case here:
https://discuss.streamlit.io/t/96 .
This will help us come up with safe APIs that allow you to do what you
want.
Example
-------
Its basic use case is to draw Markdown-formatted text, whenever the
input is a string:
>>> write('Hello, *World!* :sunglasses:')
.. output::
https://share.streamlit.io/streamlit/docs/main/python/api-examples-source/text.write1.py
height: 150px
As mentioned earlier, `st.write()` also accepts other data formats, such as
numbers, data frames, styled data frames, and assorted objects:
>>> st.write(1234)
>>> st.write(pd.DataFrame({
... 'first column': [1, 2, 3, 4],
... 'second column': [10, 20, 30, 40],
... }))
.. output::
https://share.streamlit.io/streamlit/docs/main/python/api-examples-source/text.write2.py
height: 350px
Finally, you can pass in multiple arguments to do things like:
>>> st.write('1 + 1 = ', 2)
>>> st.write('Below is a DataFrame:', data_frame, 'Above is a dataframe.')
.. output::
https://share.streamlit.io/streamlit/docs/main/python/api-examples-source/text.write3.py
height: 410px
Oh, one more thing: `st.write` accepts chart objects too! For example:
>>> import pandas as pd
>>> import numpy as np
>>> import altair as alt
>>>
>>> df = pd.DataFrame(
... np.random.randn(200, 3),
... columns=['a', 'b', 'c'])
...
>>> c = alt.Chart(df).mark_circle().encode(
... x='a', y='b', size='c', color='c', tooltip=['a', 'b', 'c'])
>>>
>>> st.write(c)
.. output::
https://share.streamlit.io/streamlit/docs/main/python/api-examples-source/charts.vega_lite_chart.py
height: 300px
"""
string_buffer = [] # type: List[str]
unsafe_allow_html = kwargs.get("unsafe_allow_html", False)
# This bans some valid cases like: e = st.empty(); e.write("a", "b").
# BUT: 1) such cases are rare, 2) this rule is easy to understand,
# and 3) this rule should be removed once we have st.container()
if not self.dg._is_top_level and len(args) > 1:
raise StreamlitAPIException(
"Cannot replace a single element with multiple elements.\n\n"
"The `write()` method only supports multiple elements when "
"inserting elements rather than replacing. That is, only "
"when called as `st.write()` or `st.sidebar.write()`."
)
def flush_buffer():
if string_buffer:
self.dg.markdown(
" ".join(string_buffer),
unsafe_allow_html=unsafe_allow_html,
)
string_buffer[:] = []
for arg in args:
# Order matters!
if isinstance(arg, str):
string_buffer.append(arg)
elif type_util.is_dataframe_like(arg):
flush_buffer()
if len(np.shape(arg)) > 2:
self.dg.text(arg)
else:
self.dg.dataframe(arg)
elif isinstance(arg, Exception):
flush_buffer()
self.dg.exception(arg)
elif isinstance(arg, HELP_TYPES):
flush_buffer()
self.dg.help(arg)
elif type_util.is_altair_chart(arg):
flush_buffer()
self.dg.altair_chart(arg)
elif type_util.is_type(arg, "matplotlib.figure.Figure"):
flush_buffer()
self.dg.pyplot(arg)
elif type_util.is_plotly_chart(arg):
flush_buffer()
self.dg.plotly_chart(arg)
elif type_util.is_type(arg, "bokeh.plotting.figure.Figure"):
flush_buffer()
self.dg.bokeh_chart(arg)
elif type_util.is_graphviz_chart(arg):
flush_buffer()
self.dg.graphviz_chart(arg)
elif type_util.is_sympy_expession(arg):
flush_buffer()
self.dg.latex(arg)
elif type_util.is_keras_model(arg):
from tensorflow.python.keras.utils import vis_utils
flush_buffer()
dot = vis_utils.model_to_dot(arg)
self.dg.graphviz_chart(dot.to_string())
elif isinstance(arg, (dict, list, AutoSessionState)):
flush_buffer()
self.dg.json(arg)
elif type_util.is_namedtuple(arg):
flush_buffer()
self.dg.json(json.dumps(arg._asdict()))
elif type_util.is_pydeck(arg):
flush_buffer()
self.dg.pydeck_chart(arg)
elif inspect.isclass(arg):
flush_buffer()
self.dg.text(arg)
elif hasattr(arg, "_repr_html_"):
self.dg.markdown(
arg._repr_html_(),
unsafe_allow_html=True,
)
else:
string_buffer.append("`%s`" % str(arg).replace("`", "\\`"))
flush_buffer()
@property
def dg(self) -> "streamlit.delta_generator.DeltaGenerator":
"""Get our DeltaGenerator."""
return cast("streamlit.delta_generator.DeltaGenerator", self)
| 38.458333
| 111
| 0.576598
|
6bb69f25b25676e66ff170792737f4acbc9e5093
| 7,356
|
py
|
Python
|
herders/api_filters.py
|
marknach/swarfarm
|
326dcf8290ea4ef4a1832d574db5fc3eeefe39dd
|
[
"Apache-2.0"
] | null | null | null |
herders/api_filters.py
|
marknach/swarfarm
|
326dcf8290ea4ef4a1832d574db5fc3eeefe39dd
|
[
"Apache-2.0"
] | 8
|
2021-06-04T23:58:22.000Z
|
2022-03-12T00:47:56.000Z
|
herders/api_filters.py
|
hixio-mh/swarfarm
|
f30f1526566d305e11c216d1730f2af99d53b91d
|
[
"Apache-2.0"
] | null | null | null |
import django_filters
from django.contrib.auth.models import User
from django.db.models import Q
from bestiary.models import Monster, Skill, SkillEffect, LeaderSkill, ScalingStat
from .models import MonsterInstance, MonsterTag, RuneInstance, Team
class SummonerFilter(django_filters.FilterSet):
class Meta:
model = User
fields = {
'username': ['exact'],
'summoner__server': ['exact']
}
class MonsterInstanceFilter(django_filters.FilterSet):
monster = django_filters.NumberFilter()
monster__name = django_filters.CharFilter(method='filter_monster__name')
tags__pk = django_filters.ModelMultipleChoiceFilter(queryset=MonsterTag.objects.all(), to_field_name='pk', conjoined=True)
monster__element = django_filters.MultipleChoiceFilter(choices=Monster.ELEMENT_CHOICES)
monster__archetype = django_filters.MultipleChoiceFilter(choices=Monster.ARCHETYPE_CHOICES)
monster__awaken_level = django_filters.MultipleChoiceFilter(choices=Monster.AWAKEN_CHOICES)
priority = django_filters.MultipleChoiceFilter(choices=MonsterInstance.PRIORITY_CHOICES)
monster__leader_skill__attribute = django_filters.MultipleChoiceFilter(choices=LeaderSkill.ATTRIBUTE_CHOICES)
monster__leader_skill__area = django_filters.MultipleChoiceFilter(choices=LeaderSkill.AREA_CHOICES)
monster__skills__scaling_stats__pk = django_filters.ModelMultipleChoiceFilter(queryset=ScalingStat.objects.all(), to_field_name='pk', conjoined=True)
monster__skills__skill_effect__pk = django_filters.ModelMultipleChoiceFilter(queryset=SkillEffect.objects.all(), method='filter_monster__skills__skill_effect__pk')
monster__skills__passive = django_filters.BooleanFilter(method='filter_monster_skills_passive')
effects_logic = django_filters.BooleanFilter(method='filter_effects_logic')
monster__fusion_food = django_filters.BooleanFilter(method='filter_monster__fusion_food')
class Meta:
model = MonsterInstance
fields = {
'monster': ['exact'],
'monster__name': ['exact'],
'tags__pk': ['exact'],
'stars': ['gte', 'lte'],
'level': ['gte', 'lte'],
'monster__element': ['exact'],
'monster__archetype': ['exact'],
'priority': ['exact'],
'monster__awaken_level': ['exact'],
'monster__leader_skill__attribute': ['exact'],
'monster__leader_skill__area': ['exact'],
'monster__skills__skill_effect__pk': ['exact'],
'monster__skills__scaling_stats__pk': ['exact'],
'monster__skills__passive': ['exact'],
'effects_logic': ['exact'],
'fodder': ['exact'],
'in_storage': ['exact'],
'monster__fusion_food': ['exact'],
}
def filter_monster__name(self, queryset, name, value):
if value:
return queryset.filter(monster__name__istartswith=value)
else:
return queryset
def filter_monster__fusion_food(self, queryset, name, value):
if value:
return queryset.filter(monster__fusion_food=True).exclude(ignore_for_fusion=True)
else:
return queryset.filter(Q(monster__fusion_food=False) | Q(ignore_for_fusion=True))
def filter_monster__skills__skill_effect__pk(self, queryset, name, value):
old_filtering = self.form.cleaned_data.get('effects_logic', False)
stat_scaling = self.form.cleaned_data.get('monster__skills__scaling_stats__pk', [])
passive = self.form.cleaned_data.get('monster__skills__passive', None)
if old_filtering:
# Filter if any skill on the monster has the designated fields
for effect in value:
queryset = queryset.filter(monster__skills__skill_effect=effect)
for pk in stat_scaling:
queryset = queryset.filter(monster__skills__scaling_stats=pk)
if passive is not None:
queryset = queryset.filter(
monster__skills__passive=passive,
)
return queryset.distinct()
else:
# Filter effects based on effects of each individual skill. This ensures a monster will not show up unless it has
# the desired effects on the same skill rather than across any skills.
skills = Skill.objects.all()
for effect in value:
skills = skills.filter(skill_effect=effect)
for pk in stat_scaling:
skills = skills.filter(scaling_stats=pk)
if passive is not None:
skills = skills.filter(
passive=passive,
)
return queryset.filter(monster__skills__in=skills).distinct()
def filter_effects_logic(self, queryset, name, value):
# This field is just used to alter the logic of skill effect filter and is used in filter_monster__skills__skill_effect__pk()
return queryset
class RuneInstanceFilter(django_filters.FilterSet):
type = django_filters.MultipleChoiceFilter(choices=RuneInstance.TYPE_CHOICES)
slot = django_filters.MultipleChoiceFilter(choices=((1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)))
quality = django_filters.MultipleChoiceFilter(choices=RuneInstance.QUALITY_CHOICES)
original_quality = django_filters.MultipleChoiceFilter(choices=RuneInstance.QUALITY_CHOICES)
main_stat = django_filters.MultipleChoiceFilter(choices=RuneInstance.STAT_CHOICES)
innate_stat = django_filters.MultipleChoiceFilter(choices=RuneInstance.STAT_CHOICES)
substats = django_filters.MultipleChoiceFilter(choices=RuneInstance.STAT_CHOICES, method='filter_substats')
substat_logic = django_filters.BooleanFilter(method='filter_substat_logic')
assigned_to = django_filters.BooleanFilter(method='filter_assigned_to')
class Meta:
model = RuneInstance
fields = {
'type': ['exact'],
'level': ['exact', 'lte', 'lt', 'gte', 'gt'],
'stars': ['exact', 'lte', 'lt', 'gte', 'gt'],
'slot': ['exact'],
'quality': ['exact'],
'original_quality': ['exact'],
'ancient': ['exact'],
'assigned_to': ['exact'],
'main_stat': ['exact'],
'innate_stat': ['exact'],
'marked_for_sale': ['exact'],
'has_grind': ['exact', 'lte', 'lt', 'gte', 'gt'],
'has_gem': ['exact'],
}
def filter_substats(self, queryset, name, value):
any_substat = self.form.cleaned_data.get('substat_logic', False)
if len(value):
if any_substat:
return queryset.filter(substats__overlap=value)
else:
return queryset.filter(substats__contains=value)
else:
return queryset
def filter_substat_logic(self, queryset, name, value):
# This field is just used to alter the logic of substat filter
return queryset
def filter_assigned_to(self, queryset, name, value):
return queryset.filter(assigned_to__isnull=not value)
class TeamFilter(django_filters.FilterSet):
class Meta:
model = Team
fields = {
'name': ['exact', 'istartswith', 'icontains'],
'description': ['icontains']
}
| 43.785714
| 167
| 0.664628
|
75f2f30c452a8f8013f5d70b90ae1fe65e7a8db5
| 2,224
|
py
|
Python
|
addons14/account_financial_report/report/vat_report_xlsx.py
|
odoochain/addons_oca
|
55d456d798aebe16e49b4a6070765f206a8885ca
|
[
"MIT"
] | 1
|
2021-06-10T14:59:13.000Z
|
2021-06-10T14:59:13.000Z
|
addons14/account_financial_report/report/vat_report_xlsx.py
|
odoochain/addons_oca
|
55d456d798aebe16e49b4a6070765f206a8885ca
|
[
"MIT"
] | null | null | null |
addons14/account_financial_report/report/vat_report_xlsx.py
|
odoochain/addons_oca
|
55d456d798aebe16e49b4a6070765f206a8885ca
|
[
"MIT"
] | 1
|
2021-04-09T09:44:44.000Z
|
2021-04-09T09:44:44.000Z
|
# Copyright 2018 Forest and Biomass Romania
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import _, models
class VATReportXslx(models.AbstractModel):
_name = "report.a_f_r.report_vat_report_xlsx"
_description = "Vat Report XLSX Report"
_inherit = "report.account_financial_report.abstract_report_xlsx"
def _get_report_name(self, report, data):
company_id = data.get("company_id", False)
report_name = _("Vat Report")
if company_id:
company = self.env["res.company"].browse(company_id)
suffix = " - {} - {}".format(company.name, company.currency_id.name)
report_name = report_name + suffix
return report_name
def _get_report_columns(self, report):
return {
0: {"header": _("Code"), "field": "code", "width": 5},
1: {"header": _("Name"), "field": "name", "width": 100},
2: {"header": _("Net"), "field": "net", "type": "amount", "width": 14},
3: {"header": _("Tax"), "field": "tax", "type": "amount", "width": 14},
}
def _get_report_filters(self, report):
return [
[_("Date from"), report.date_from.strftime("%d/%m/%Y")],
[_("Date to"), report.date_to.strftime("%d/%m/%Y")],
[
_("Based on"),
_("Tax Tags") if report.based_on == "taxtags" else _("Tax Groups"),
],
]
def _get_col_count_filter_name(self):
return 0
def _get_col_count_filter_value(self):
return 2
def _generate_report_content(self, workbook, report, data):
res_data = self.env[
"report.account_financial_report.vat_report"
]._get_report_values(report, data)
vat_report = res_data["vat_report"]
tax_detail = res_data["tax_detail"]
# For each tax_tag tax_group
self.write_array_header()
for tag_or_group in vat_report:
# Write taxtag line
self.write_line_from_dict(tag_or_group)
# For each tax if detail taxes
if tax_detail:
for tax in tag_or_group["taxes"]:
self.write_line_from_dict(tax)
| 36.459016
| 83
| 0.584532
|
08db1eb58faeb587ebdd0ebd550110abb1010be3
| 2,825
|
py
|
Python
|
tdda/constraints/db/detect.py
|
Daniel-Mietchen/tdda
|
98718ec3b4b253bba3b575d4b10a14a6d70576b8
|
[
"MIT"
] | 232
|
2016-09-17T11:56:52.000Z
|
2022-03-18T23:13:41.000Z
|
tdda/constraints/db/detect.py
|
Daniel-Mietchen/tdda
|
98718ec3b4b253bba3b575d4b10a14a6d70576b8
|
[
"MIT"
] | 28
|
2016-11-14T04:04:22.000Z
|
2022-03-08T22:16:30.000Z
|
tdda/constraints/db/detect.py
|
Daniel-Mietchen/tdda
|
98718ec3b4b253bba3b575d4b10a14a6d70576b8
|
[
"MIT"
] | 30
|
2016-09-17T11:57:32.000Z
|
2022-03-29T10:57:16.000Z
|
# -*- coding: utf-8 -*-
"""
Support for database constraint detection from the command-line tool
"""
from __future__ import division
from __future__ import print_function
USAGE = '''
Parameters:
* table is one of:
- a database table name
- a schema-qualified table name of the form schema.table
- a database table name qualified by database type, of the
form dbtype:table or dbtype:schema.table
* constraints.tdda is a JSON .tdda file constaining constraints.
* detection output file is not implemented yet.
'''
import argparse
import os
import sys
from tdda import __version__
from tdda.constraints.flags import detect_parser, detect_flags
from tdda.constraints.db.constraints import detect_db_table
from tdda.constraints.db.drivers import (database_connection, parse_table_name,
database_arg_parser,
database_arg_flags)
def detect_database_table_from_file(table, constraints_path,
conn=None, dbtype=None, db=None,
host=None, port=None, user=None,
password=None, **kwargs):
"""
detect using the given database table, against constraints in the .tdda
file specified.
Not implemented
"""
(table, dbtype) = parse_table_name(table, dbtype)
db = database_connection(table=table, conn=conn, dbtype=dbtype, db=db,
host=host, port=port,
user=user, password=password)
print(detect_db_table(dbtype, db, table, constraints_path, **kwargs))
def get_detect_params(args):
parser = database_arg_parser(detect_parser, USAGE)
parser.add_argument('table', nargs=1, help='database table name')
parser.add_argument('constraints', nargs=1,
help='constraints file to verify against')
parser.add_argument('outpath', nargs='?',
help='file to write detection results to')
params = {}
flags = database_arg_flags(detect_flags, parser, args, params)
params['table'] = flags.table[0] if flags.table else None
params['constraints_path'] = (flags.constraints[0] if flags.constraints
else None)
params['outpath'] = flags.outpath
return params
class DatabaseDetector:
def __init__(self, argv, verbose=False):
self.argv = argv
self.verbose = verbose
def detect(self):
params = get_detect_params(self.argv[1:])
detect_database_table_from_file(**params)
def main(argv):
if len(argv) > 1 and argv[1] in ('-v', '--version'):
print(__version__)
sys.exit(0)
v = DatabaseDetector(argv)
v.detect()
if __name__ == '__main__':
main(sys.argv)
| 30.376344
| 79
| 0.633628
|
59639b603fcb1ed3fd5adad9404bbda4f99dc191
| 8,593
|
py
|
Python
|
src/power_forecast/functions.py
|
fserrey/eolo-project
|
f1c157b8c0675343534424ee8df82a2e1f2e6a2b
|
[
"MIT"
] | 1
|
2021-12-14T22:57:23.000Z
|
2021-12-14T22:57:23.000Z
|
src/power_forecast/functions.py
|
fserrey/eolo-project
|
f1c157b8c0675343534424ee8df82a2e1f2e6a2b
|
[
"MIT"
] | null | null | null |
src/power_forecast/functions.py
|
fserrey/eolo-project
|
f1c157b8c0675343534424ee8df82a2e1f2e6a2b
|
[
"MIT"
] | null | null | null |
import os
from os import listdir
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
import json
import xgboost as xgb
import matplotlib as plt
import folium
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
#from power_forecast.pickle_save_load import to_pickle
import webbrowser
def get_date(base_dir):
new_time = []
for file in listdir(base_dir):
file_path = f'{base_dir}/{file}'
match=file.split("_")[1]
date = pd.to_datetime(match, format = "%Y%m%d%H").strftime('%d/%m/%Y')
time = (datetime.strptime(match, "%Y%m%d%H") + timedelta(hours=6)).strftime('%H:%M')
new_time.append(date + " " + time)
return new_time
def get_variables(base_dir, var_list, diccionario, nz=26):
d3_var = ["HGTprs", "CLWMRprs", "RHprs","Velprs","UGRDprs","VGRDprs","TMPprs"]
d2_var = ["HGTsfc", "MSLETmsl", "PWATclm", "RH2m", "Vel100m", "UGRD100m", "VGRD100m",
"Vel80m", "UGRD80m", "VGRD80m", "Vel10m", "UGRD10m", "VGRD10m", "GUSTsfc",
"TMPsfc", "TMP2m", "no4LFTXsfc", "CAPEsfc", "SPFH2m", "SPFH80m"]
lst = []
for file in listdir(base_dir):
file_path = f'{base_dir}/{file}'
e_file = []
for key, value in diccionario.items():
if key in set(var_list).intersection(d3_var): #d3_var:
corte = value[0] + int(((value[1])/26)*nz)
e_file.append(np.fromfile(file_path, dtype=np.float32)[value[0]:corte])
elif key in set(var_list).intersection(d2_var):#d2_var:
e_file.append(np.fromfile(file_path, dtype=np.float32)[value[0]:value[1]])
lst.append(e_file)
return lst
def setup_x(dataframe):
"""Flat variables values for model training"""
dataframe.reset_index(level=0, inplace=True)
row_list =[]
for index, rows in dataframe.iterrows():
my_list = [rows.RHprs, rows.Velprs, rows.TMPprs, rows.Vel100m, rows.Vel80m,rows.TMPsfc, rows.SPFH80m]
row_list.append(my_list)
a = [np.concatenate(row_list[i]) for i in range(len(row_list))]
train_ = pd.DataFrame(a, index=dataframe["index"])
return train_
def get_var(main_dic, list_var, nz=26):
"""This function provides the selected variables in a nested dictionary with the given array
and level (consider that each level is around 50m heigth). Output is given as dictionary
:rtype: object
"""
dict_final = {}
size_3d = 13*9*nz
print("Now, we get the variables we want")
for datetime_key in main_dic: # iteración sobre las keys de 1º nivel
res = []
for var in list_var: # compruebo que la variable que saco está en mi lista
if var in main_dic.get(datetime_key).get("var_3d").keys():
# compruebo que esa variable está en las de 2º nivel
array_3d = main_dic[datetime_key]["var_3d"][var]["data"]
# Asigno el array del value de 4º nivel a una variable
arr_3d_nz = []
for j in range(0,len(array_3d), size_3d):
res.extend(array_3d[j: j+size_3d])
for var in list_var:
if var in main_dic.get(datetime_key).get("var_2d").keys():
array_2d = main_dic[datetime_key]["var_2d"][var]["data"]
res.extend(array_2d)
#for i in range(len(main_dic.keys())):
dict_final.update({datetime_key:res})
return dict_final
def get_X(dataframe):
meteo = dataframe
meteo.reset_index(level=0, inplace=True)
meteo["date"] = pd.to_datetime(meteo['index'], format='%d/%m/%Y %H:%M')
meteo = meteo.sort_values(by='date',ascending=True)
meteo = meteo.set_index("date").sort_index().loc[:'31/12/2016 00:00']
meteo = meteo[[x for x in meteo.columns if x != 'index']]
return meteo
def setting_X(dictionary):
meteo_df = pd.DataFrame(dictionary).T
meteo_df.reset_index(level=0, inplace=True)
meteo_df["date"]=pd.to_datetime(meteo_df['index'], format='%d/%m/%Y %H:%M')
meteo_df=meteo_df.sort_values(by='date',ascending=True)
meteo_df=meteo_df.set_index("date").sort_index().loc[:'31/12/2016 00:00']
meteo_df=meteo_df[[x for x in meteo_df.columns if x != 'index']]
return meteo_df
def setting_y(csv_file):
power_df = pd.read_csv(csv_file)
power_df['date'] = pd.to_datetime(power_df['date'], format='%d/%m/%Y %H:%M')
power_df = power_df.sort_values(by='date',ascending=True)
power_df=power_df.set_index("date").sort_index().loc[:'31/12/2016 00:00']
return power_df
def objetivo(space):
clf = xgb.XGBRegressor(n_estimators =int(space['n_estimators']),
learning_rate = space['learning_rate'],
max_depth = int(space['max_depth']),
min_child_weight = space['min_child_weight'],
subsample = space['subsample'],
gamma = space['gamma'],
reg_lambda = space['reg_lambda'],
objective='reg:squarederror')
eval_set=[(X_train, y_train), (X_test, y_test)]
clf.fit(X_train, y_train,
eval_set=eval_set, eval_metric="rmse", verbose=False)
y_pred = clf.predict(X_test)
rmse = mean_squared_error(y_test, y_pred)**(0.5)
return {'loss':rmse, 'status': STATUS_OK }
def get_vvel(base_dir):
"""This function gives you the values of all Velocity at 100m height as pandas data frame
"""
content = []
filenames = []
filenames.append(get_date(base_dir))
for file in os.listdir(base_dir):
file_path = f'{base_dir}/{file}'
filenames.append(file)
content.append(np.fromfile(file_path, dtype=np.float32)[21762:21879])
return pd.DataFrame(data=content)
def plotting_feature_importance(importance, model):
"""Plot the feature importances of the forest"""
std = np.std([modelo.feature_importances_ for modelo in model.estimators_],
axis=0)
index = np.argsort(feten)
plt.figure(figsize=(15, 15))
plt.title("Feature importances")
plt.barh(range(X_train.values.shape[1]), feten[index],
color="r", xerr=std[index], align="center")
plt.yticks(range(X_train.values.shape[1]), index)
plt.ylim([-1, X_train.values.shape[1]])
return plt.show()
def estimate_coord_plot(feten):
lon_res = 13
lat_res = 9
nz = 26
lat_step = 0.5
lon_step = 0.5
lat_start = 44
lat_end = lat_start + lat_step * (lat_res - 1) # calculas lat final
lon_start = -123
lon_end = lon_start + lon_step * (lon_res -1) # calculas lon final - con esto puedes construir mesh
lat = np.linspace(start=lat_start, stop=lat_end, endpoint=lat_end, num=lat_res)
lon = np.linspace(start=lon_start, stop=lon_end, endpoint=lon_end, num=lon_res) #
lon, lat = np.meshgrid(lon, lat)
Z = feten.reshape(lat_res, lon_res)
ptos = np.hstack((lat.reshape((lat.size,1)), lon.reshape((lon.size,1))))
fig = plt.figure(figsize=(12, 10))
im = plt.pcolormesh(lat, lon, Z) # Asignas valores a su posición en el mapa
return plt.colorbar(mappable=im)
def get_location(feten):
lon_res = 13
lat_res = 9
nz = 26
lat_step = 0.5
lon_step = 0.5
lat_start = 44
lat_end = lat_start + lat_step * (lat_res - 1) # calculas lat final
lon_start = -123
lon_end = lon_start + lon_step * (lon_res -1) # calculas lon final - con esto puedes construir mesh
lat = np.linspace(start=lat_start, stop=lat_end, endpoint=lat_end, num=lat_res)
lon = np.linspace(start=lon_start, stop=lon_end, endpoint=lon_end, num=lon_res)
lon, lat = np.meshgrid(lon, lat)
Z = feten.reshape(lat_res, lon_res)
point = Z.argmax()
ptos = np.hstack((lat.reshape((lat.size,1)), lon.reshape((lon.size,1))))
max_z_position = Z.argmax()
coordinates = list(ptos[point])
return coordinates
def drawing_map(result_point, radio=False, distance=False):
m = folium.Map(
location=[(lat_start + lat_end) / 2, (lon_start + lon_end) / 2, ],
zoom_start=7,
tiles='Stamen Terrain'
)
tooltip = 'I am here!'
if radio == True | distance == True:
folium.CircleMarker(location = [45.58163, -120.15285], radius = 100, popup = ' FRI ').add_to(m)
folium.PolyLine(locations = [(result_point), (45.18163, -120.15285)], line_opacity = 0.5).add_to(m)
folium.Marker([45.18163, -120.15285], popup='<b>Condon WindFarm</b>', tooltip=tooltip).add_to(m)
folium.Marker(result_point, popup='<i>Result</i>', tooltip=tooltip).add_to(m)
return m
| 35.655602
| 107
| 0.641918
|
b30a258bd6425fa19eea2221956891d085c9fe1b
| 413
|
py
|
Python
|
2020/test/test_day9.py
|
terezaif/adventofcode
|
67601f79a3b01d71434ef0236387ffd5ab7dca0f
|
[
"MIT"
] | 4
|
2020-12-06T13:11:59.000Z
|
2021-12-15T11:34:34.000Z
|
2020/test/test_day9.py
|
terezaif/adventofcode
|
67601f79a3b01d71434ef0236387ffd5ab7dca0f
|
[
"MIT"
] | null | null | null |
2020/test/test_day9.py
|
terezaif/adventofcode
|
67601f79a3b01d71434ef0236387ffd5ab7dca0f
|
[
"MIT"
] | 1
|
2021-12-02T16:32:50.000Z
|
2021-12-02T16:32:50.000Z
|
from days.day9 import get_first_number
from days.day9 import get_list_ends
from utils.reading_data import get_int_input_array
input = get_int_input_array(path="2020/test/data/day9.txt")
def test_get_count():
expected = 127
actual = get_first_number(5, input)
assert expected == actual
def test_get_count_2():
expected = 62
actual = get_list_ends(127, input)
assert expected == actual
| 22.944444
| 59
| 0.750605
|
612b9155d03f3c7f61e8d863730ebe89f6a6a3a5
| 1,013
|
py
|
Python
|
video/models.py
|
JisunParkRea/djangotube_tutorial
|
c173f624da4aee7252c99f0852789f06b4bff4c7
|
[
"MIT"
] | 2
|
2020-12-07T04:49:32.000Z
|
2021-04-12T04:46:09.000Z
|
video/models.py
|
JisunParkRea/djangotube_tutorial
|
c173f624da4aee7252c99f0852789f06b4bff4c7
|
[
"MIT"
] | 4
|
2020-04-28T07:54:02.000Z
|
2021-09-22T18:52:46.000Z
|
video/models.py
|
JisunParkRea/djangotube_tutorial
|
c173f624da4aee7252c99f0852789f06b4bff4c7
|
[
"MIT"
] | 1
|
2020-04-28T07:42:27.000Z
|
2020-04-28T07:42:27.000Z
|
from django.conf import settings
from django.db import models
class Video(models.Model):
class Category(models.TextChoices):
Music = 'music'
Movie = 'movie'
Drama = 'drama'
Comedy = 'comedy'
Information = 'info'
Daily = 'daily'
Beauty = 'beauty'
Art = 'art'
Book = 'book'
Sport = 'sport'
Food = 'food'
author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
title = models.CharField(max_length=100)
video_key = models.CharField(max_length=12)
likes_user = models.ManyToManyField(settings.AUTH_USER_MODEL, blank=True, related_name='likes_user')
upload_date = models.DateTimeField(auto_now_add=True, null=True) # first created date
category = models.TextField(choices=Category.choices, blank=True)
class Meta:
ordering = ['-upload_date']
def count_likes_user(self):
return self.likes_user.count()
def __str__(self):
return self.title
| 29.794118
| 104
| 0.655479
|
aa2da3d00fb9e037b4bb23b49c7b7ac2054d9b03
| 4,106
|
py
|
Python
|
test/functional/feature_minchainwork.py
|
VaderCoinProject/vadercoin
|
b513c794b014d40e5aad281dd1f54845c46d216c
|
[
"MIT"
] | null | null | null |
test/functional/feature_minchainwork.py
|
VaderCoinProject/vadercoin
|
b513c794b014d40e5aad281dd1f54845c46d216c
|
[
"MIT"
] | null | null | null |
test/functional/feature_minchainwork.py
|
VaderCoinProject/vadercoin
|
b513c794b014d40e5aad281dd1f54845c46d216c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2017-2020 The Vadercoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test logic for setting nMinimumChainWork on command line.
Nodes don't consider themselves out of "initial block download" until
their active chain has more work than nMinimumChainWork.
Nodes don't download blocks from a peer unless the peer's best known block
has more work than nMinimumChainWork.
While in initial block download, nodes won't relay blocks to their peers, so
test that this parameter functions as intended by verifying that block relay
only succeeds past a given node once its nMinimumChainWork has been exceeded.
"""
import time
from test_framework.test_framework import VadercoinTestFramework
from test_framework.util import assert_equal
# 2 hashes required per regtest block (with no difficulty adjustment)
REGTEST_WORK_PER_BLOCK = 2
class MinimumChainWorkTest(VadercoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [[], ["-minimumchainwork=0x65"], ["-minimumchainwork=0x65"]]
self.node_min_work = [0, 101, 101]
def setup_network(self):
# This test relies on the chain setup being:
# node0 <- node1 <- node2
# Before leaving IBD, nodes prefer to download blocks from outbound
# peers, so ensure that we're mining on an outbound peer and testing
# block relay to inbound peers.
self.setup_nodes()
for i in range(self.num_nodes-1):
self.connect_nodes(i+1, i)
def run_test(self):
# Start building a chain on node0. node2 shouldn't be able to sync until node1's
# minchainwork is exceeded
starting_chain_work = REGTEST_WORK_PER_BLOCK # Genesis block's work
self.log.info("Testing relay across node %d (minChainWork = %d)", 1, self.node_min_work[1])
starting_blockcount = self.nodes[2].getblockcount()
num_blocks_to_generate = int((self.node_min_work[1] - starting_chain_work) / REGTEST_WORK_PER_BLOCK)
self.log.info("Generating %d blocks on node0", num_blocks_to_generate)
hashes = self.nodes[0].generatetoaddress(num_blocks_to_generate,
self.nodes[0].get_deterministic_priv_key().address)
self.log.info("Node0 current chain work: %s", self.nodes[0].getblockheader(hashes[-1])['chainwork'])
# Sleep a few seconds and verify that node2 didn't get any new blocks
# or headers. We sleep, rather than sync_blocks(node0, node1) because
# it's reasonable either way for node1 to get the blocks, or not get
# them (since they're below node1's minchainwork).
time.sleep(3)
self.log.info("Verifying node 2 has no more blocks than before")
self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes])
# Node2 shouldn't have any new headers yet, because node1 should not
# have relayed anything.
assert_equal(len(self.nodes[2].getchaintips()), 1)
assert_equal(self.nodes[2].getchaintips()[0]['height'], 0)
assert self.nodes[1].getbestblockhash() != self.nodes[0].getbestblockhash()
assert_equal(self.nodes[2].getblockcount(), starting_blockcount)
self.log.info("Generating one more block")
self.nodes[0].generatetoaddress(1, self.nodes[0].get_deterministic_priv_key().address)
self.log.info("Verifying nodes are all synced")
# Because nodes in regtest are all manual connections (eg using
# addnode), node1 should not have disconnected node0. If not for that,
# we'd expect node1 to have disconnected node0 for serving an
# insufficient work chain, in which case we'd need to reconnect them to
# continue the test.
self.sync_all()
self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes])
if __name__ == '__main__':
MinimumChainWorkTest().main()
| 45.120879
| 108
| 0.698003
|
5a27514d3ca08d3797946bc9e86882294446f79d
| 13,040
|
py
|
Python
|
views/web/dustWeb/WebPage.py
|
twatteynelinear/dustlink_sierra
|
9dac02d4fdfdee240a8a9da2e6abc2d7fda3443b
|
[
"BSD-3-Clause"
] | 4
|
2016-09-07T05:46:20.000Z
|
2020-05-31T21:34:27.000Z
|
views/web/dustWeb/WebPage.py
|
twatteynelinear/dustlink_sierra
|
9dac02d4fdfdee240a8a9da2e6abc2d7fda3443b
|
[
"BSD-3-Clause"
] | null | null | null |
views/web/dustWeb/WebPage.py
|
twatteynelinear/dustlink_sierra
|
9dac02d4fdfdee240a8a9da2e6abc2d7fda3443b
|
[
"BSD-3-Clause"
] | 6
|
2015-01-22T10:14:24.000Z
|
2020-05-31T21:34:30.000Z
|
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('WebPage')
log.setLevel(logging.ERROR)
log.addHandler(NullHandler())
import os
import urllib
import web
from viz import Viz, \
VizBanner
TEMPLATE_PATH = os.path.join('templates')
LOOK_AND_FEEL = 'dust'
class WebPage(object):
DESIGN_ONE_COLUMN = 'one_column'
DESIGN_TWO_COLUMNS = 'two_columns'
DESIGN_ALL = [DESIGN_ONE_COLUMN,DESIGN_TWO_COLUMNS]
LAYOUT_HORIZONTAL = 'horizontal'
LAYOUT_VERTICAL = 'vertical'
LAYOUT_ALL = [LAYOUT_HORIZONTAL,LAYOUT_VERTICAL]
def __init__(self,webServer,url,title,webHandler,hidden=False):
# store params
self.webServer = webServer
self.url = url
self.title = title
self.webHandler = webHandler
self.hidden = hidden
# local variables
self.children = []
#======================== public ==========================================
def createPage(self,username=None,
currentPath=[],
design=DESIGN_TWO_COLUMNS,
layout=LAYOUT_VERTICAL,
visualizations=[]):
'''
\brief Create a full HTML page, ready to be sent back to the client.
\param[in] username The username associated with this client's session.
This can be used to display the username in the page.
\param[in] currentPath Path of the resulting page.
\param[in] design The design of the page, i.e. "look-and-feel" to expect.
This can translate in different templates.
Must be an element of DESIGN_ALL.
\param[in] layout The layout of the page, i.e. how the visualizations
are arranged inside the page.
Must be an element of LAYOUT_ALL.
\param[in] visualizations List of visualizations this page must contain.
Each visualization must be of type Viz.
'''
# filter errors
assert (not username) or isinstance(username,str)
assert isinstance(currentPath,list)
for p in currentPath:
assert isinstance(p,str)
assert design in self.DESIGN_ALL
assert layout in self.LAYOUT_ALL
assert isinstance(visualizations,list)
for v in visualizations:
assert isinstance(v,Viz.Viz)
# add a banner
visualizations += [
VizBanner.VizBanner(
webServer = self.webServer,
username = username,
resourcePath = ['banner'],
),
]
# get the pageTitle from the current path
pageTitle = self.webServer.getPageTitle(currentPath)
# get the template corresponding to the design
webtemplate = web.template.frender(
os.path.join(
TEMPLATE_PATH,
LOOK_AND_FEEL,
'{0}.html'.format(design)
)
)
# create the logFrameCode from the username
logFrameCode = self._buildLoginFrame(username)
# get the libraries from the visualizations
libraries = []
for v in visualizations:
libraries += v.getLibraries()
libraries = list(set(libraries)) # remove duplicates
# re-arrange library order to deal with dependencies
for lib in [Viz.Viz.LIBRARY_JQUERY]:
if lib in libraries:
# remove
libraries.remove(lib)
# put at front
libraries.insert(0,lib)
for lib in [Viz.Viz.LIBRARY_RAPHAEL,Viz.Viz.LIBRARY_MORRIS]:
if lib in libraries:
# remove
libraries.remove(lib)
# put at end
libraries.append(lib)
# create unique ID for each visualization
uniqueId = {}
for v in visualizations:
uniqueId[v] = 'id'+str(self.webServer.getUniqueNumber())
# create the headerCode from the visualizations
headerElems = []
for l in libraries:
headerElems += ['<script type="text/javascript" src="{0}"></script>'.format(l)]
for v in visualizations:
headerElems += [v.getHeaderCode(uniqueId[v])]
headerCode = '\n'.join(headerElems)
# get page level documentation
pathCopy = list(currentPath)
pathCopyLast = len(pathCopy) - 1
if pathCopyLast >= 0 and pathCopy[pathCopyLast].startswith("_"):
pathCopy[pathCopyLast] = '*'
pathTuple = tuple(pathCopy)
documentation = self.webServer.getDocumentation().getDocHTML(pathTuple, "page")
# create the bodyCode from the visualizations
bodyElems = []
for v in visualizations:
bodyElems += [v.getBodyCode(uniqueId[v])]
bodyCode = self._layoutElems(bodyElems,layout)
renderedPage = webtemplate (
pageTitle = pageTitle,
hierarchy = self.webServer.getUrlHierarchy(),
currentPath = currentPath,
logFrameCode = logFrameCode,
headerCode = headerCode,
bodyCode = bodyCode,
documentation = documentation,
)
return renderedPage
def registerPage(self,newChild):
# filter error
assert isinstance(newChild,WebPage)
# add to children
self.children.append(newChild)
def getUrlHierarchy(self,parentPath=[]):
assert not self.url.count('/')
newParentPath = parentPath+[self.url]
classUrl = newParentPath
if len(classUrl) and not classUrl[0]:
classUrl = classUrl[1:]
returnVal = {}
returnVal['url'] = self.urlListToString(newParentPath)
returnVal['title'] = self.title
returnVal['class'] = self.webServer.getDocumentation().getClass(classUrl)
returnVal['children'] = [c.getUrlHierarchy(newParentPath) for c in self.children if not c.hidden]
return returnVal
def getPageTitle(self,path):
# filter errors
assert isinstance(path,list)
for p in path:
assert isinstance(p,(str,unicode))
if len(path)>0:
if path[0].startswith('_'):
return urllib.unquote(urllib.unquote(path[0][1:]))
else:
for c in self.children:
urlElems = self.urlStringTolist(c.url)
if path[0]==urlElems[0]:
return c.getPageTitle(path[1:])
return 'unknown 1'
elif len(path)==0:
return self.title
else:
return 'unknown 2'
def getHandlerNameToHandlerClass(self,parentUrl=''):
assert not parentUrl.count('/')
assert not self.url.count('/')
returnVal = {}
# add my webHandler
returnVal[self.webHandler.__name__] = self.webHandler
# add my children's mapping
for child in self.children:
returnVal = dict(returnVal.items() + child.getHandlerNameToHandlerClass().items())
return returnVal
def getMappingUrlToHandlerName(self,parentUrl=''):
'''
\brief Return the mapping between URL's and webHandler's
This method returns a tuple, where URL's are in the odd positions and
webHandler in the even positions, e.g.:
(
'', 'rootHandler',
'level1', 'level1Handler',
'level1/level2','level2Handler',
)
This structure can be used directly by a web.py server.
'''
assert not parentUrl.count('/')
assert not self.url.count('/')
returnVal = []
# add me
returnVal += [self.urlListToString([parentUrl,self.url], trailingSlashOption=True),
self.webHandler.__name__]
returnVal += [self.urlListToString([parentUrl,self.url,'json','(.*)'],trailingSlashOption=True),
self.webHandler.__name__]
# add my children's mapping
for child in self.children:
returnVal += child.getMappingUrlToHandlerName(parentUrl=self.url)
# return a tuple
return tuple(returnVal)
#======================== private =========================================
def _buildLoginFrame(self,username):
if username in [self.webServer.defaultUsername]:
output = []
output += ["<form action=\"/login\" method=\"POST\">"]
output += [" <table id=\"login\">"]
output += [" <tr>"]
output += [" <td>Username:</td>"]
output += [" <td><input type=\"text\" name=\"username\"/></td>"]
output += [" <td>Password:</td>"]
output += [" <td><input type=\"password\" name=\"password\"/></td>"]
output += [" <td><input type=\"hidden\" name=\"action\" value=\"login\"/></td>"]
output += [" <td><input type=\"submit\" value=\"LOGIN\"/></td>"]
output += [" </tr>"]
output += [" </table>"]
output += ["</form>"]
return '\n'.join(output)
else:
output = []
output += ["<form action=\"/login\" method=\"POST\">"]
output += [" <table>"]
output += [" <tr>"]
output += [" <td>You are logged in as <b>{0}</b>.</td>".format(username)]
output += [" <td><input type=\"hidden\" name=\"action\" value=\"logout\"></td>"]
output += [" <td><input type=\"submit\" value=\"LOGOUT\"></td>"]
output += [" </tr>"]
output += [" </table>"]
output += ["</form>"]
return '\n'.join(output)
def _layoutElems(self,elems,layout):
# filter errors
assert isinstance(elems,list)
for e in elems:
assert isinstance(e,str)
assert layout in self.LAYOUT_ALL
returnVal = []
# returnVal += ['<table>']
if layout in [self.LAYOUT_HORIZONTAL]:
# returnVal += ['<tr>']
for e in elems:
# returnVal += ['<td>']
returnVal += [e]
# returnVal += ['</td>']
# returnVal += ['</tr>']
elif layout in [self.LAYOUT_VERTICAL]:
for e in elems:
# returnVal += ['<tr>']
# returnVal += ['<td>']
returnVal += [e]
# returnVal += ['</td>']
# returnVal += ['</tr>']
else:
raise SystemError('unexpected layout {0}'.format(layout))
# returnVal += ['</table>']
return '\n'.join(returnVal)
@classmethod
def urlListToString(self,urlList,trailingSlashOption=False):
# remove empty elements from urlList
urlList = [u for u in urlList if u]
returnVal = []
if urlList:
returnVal += ['/']
returnVal += ['/'.join(urlList)]
if trailingSlashOption:
returnVal += ['/?']
return ''.join(returnVal)
@classmethod
def urlStringTolist(self,urlString):
# filter errors
assert isinstance(urlString,(str,unicode))
# split into elements
urlList = urlString.split('/')
# remove empty elements (can happen with e.g. trailing slash)
urlList = [u for u in urlList if u]
# convert elements to string (can be unicode)
urlList = [str(u) for u in urlList]
return urlList
| 38.017493
| 108
| 0.486887
|
3c6ee1de0fc4651c5a4a56fab3106ac42dd99391
| 7,467
|
py
|
Python
|
core/utils.py
|
uktrade/export-wins-data
|
46caa444812e89abe504bec8c15aa7f7ba1a247e
|
[
"MIT"
] | 5
|
2016-09-12T12:52:45.000Z
|
2020-03-24T14:43:13.000Z
|
core/utils.py
|
uktrade/export-wins-data
|
46caa444812e89abe504bec8c15aa7f7ba1a247e
|
[
"MIT"
] | 435
|
2016-10-18T12:51:39.000Z
|
2021-06-09T17:22:08.000Z
|
core/utils.py
|
uktrade/export-wins-data
|
46caa444812e89abe504bec8c15aa7f7ba1a247e
|
[
"MIT"
] | 2
|
2016-12-06T10:37:21.000Z
|
2017-02-22T17:27:43.000Z
|
from functools import lru_cache
import boto3
import itertools
from collections import defaultdict
from operator import itemgetter
from typing import List, MutableMapping
from django.conf import settings
from rest_framework.fields import (
BooleanField, CharField, ChoiceField, DateField, DecimalField, EmailField, IntegerField,
UUIDField,
)
from extended_choices import Choices
import yaml
from django.core.serializers import base
from django.db import DEFAULT_DB_ALIAS, transaction
def filter_key(dict_, key_to_remove):
return {k: v for k, v in dict_.items() if k != key_to_remove}
def group_by_key(l: List[MutableMapping], key: str, flatten: bool = False) -> MutableMapping:
"""
:param l: list of dicts .e.g [{'a': 1, 'b': 1}, {'b': 2, 'a': 2}]
:param dict_key: the dict key to group by
:return: a dict with keys and an object or list of objects in the format:
{1: [{'b': 1}], 2: [{'b': 2}]} or if flatten=True {1: {'b': 1}, 2: {'b': 2}}
"""
key_getter = itemgetter(key)
l.sort(key=key_getter)
groups = defaultdict(list)
for group, vals in itertools.groupby(l, key=key_getter):
groups[group] = [filter_key(data, key) for data in vals]
return {k: v[0] if flatten else v for k, v in groups.items()}
def getitem_or_default(l, idx, default=None):
"""
gets the item at position idx or returns the default value
:param list: list of things
:param idx: position
:param default: optional default value
:return: thing at index idx or default
"""
try:
return l[idx]
except IndexError:
return default
class TrackedSupersetChoices(Choices):
"""
Same as a normal Choices object except subsets have access to
their superset.
"""
def add_subset(self, name, constants):
super(TrackedSupersetChoices, self).add_subset(name, constants)
subset = getattr(self, name)
subset.superset = self
def get_bucket_credentials(bucket_id):
"""Get S3 credentials for bucket id."""
if bucket_id not in settings.DOCUMENT_BUCKETS:
raise Exception(f'Bucket "{bucket_id}" not configured.')
return settings.DOCUMENT_BUCKETS[bucket_id]
def get_bucket_name(bucket_id):
"""Get bucket name for given bucket id."""
return get_bucket_credentials(bucket_id)['bucket']
@lru_cache()
def get_s3_client_for_bucket(bucket_id):
"""Get S3 client for bucket id."""
credentials = get_bucket_credentials(bucket_id)
return boto3.client(
's3',
aws_access_key_id=credentials['aws_access_key_id'],
aws_secret_access_key=credentials['aws_secret_access_key'],
region_name=credentials['aws_region'],
config=boto3.session.Config(signature_version='s3v4'),
)
def parse_bool(value):
"""Parses a boolean value from a string."""
return _parse_value(value, BooleanField())
def parse_date(value):
"""Parses a date from a string."""
return _parse_value(value, DateField())
def parse_decimal(value, max_digits=19, decimal_places=2):
"""Parses a decimal from a string."""
return _parse_value(value, DecimalField(max_digits, decimal_places))
def parse_email(value):
"""Parses an email address from a string."""
return _parse_value(value, EmailField(), blank_value='')
def parse_uuid(value):
"""Parses a UUID from a string."""
return _parse_value(value, UUIDField())
def parse_int(value):
"""Parses a integer from a string."""
return _parse_value(value, IntegerField())
def parse_uuid_list(value):
"""Parses a comma-separated list of UUIDs from a string."""
return _parse_list(value, UUIDField())
def parse_int_list(value):
"""Parses a comma-separated list of Integers from a string."""
return _parse_list(value, IntegerField())
def parse_choice(value, choices, blank_value=''):
"""Parses and validates a value from a list of choices."""
return _parse_value(value, ChoiceField(choices=choices), blank_value=blank_value)
def parse_limited_string(value, max_length=settings.CHAR_FIELD_MAX_LENGTH):
"""Parses/validates a string."""
return _parse_value(value, CharField(max_length=max_length), blank_value='')
def _parse_value(value, field, blank_value=None):
if not value or value.lower().strip() == 'null':
return blank_value
field.run_validation(value)
return field.to_internal_value(value)
def _parse_list(value, field):
"""Parses a comma-separated list of UUIDs from a string."""
if not value or value.lower().strip() == 'null':
return []
return [field.to_internal_value(item) for item in value.split(',')]
def _build_model_data(model, obj_pk, fields_data, using):
data = {}
# Handle each field
for (field_name, field_value) in fields_data.items():
field = model._meta.get_field(field_name)
# Handle many-to-many relations
if field.many_to_many:
raise NotImplementedError('Many-to-many fields not supported')
# Handle one-to-many relations
if field.one_to_many:
raise NotImplementedError('One-to-many fields not supported')
# Handle fk fields
if field.many_to_one:
try:
value = base.deserialize_fk_value(field, field_value, using, False)
except Exception as exc:
raise base.DeserializationError.WithData(
exc,
model._meta.model_name,
obj_pk,
field_value,
) from exc
data[field.attname] = value
# Handle all other fields
else:
try:
data[field.name] = field.to_python(field_value)
except Exception as exc:
raise base.DeserializationError.WithData(
exc,
model._meta.model_name,
obj_pk,
field_value,
) from exc
return data
def _load_data_in_migration(apps, object_list, using=DEFAULT_DB_ALIAS):
for list_item in object_list:
obj_pk = list_item.get('pk')
assert obj_pk, 'pk field required'
model_label = list_item['model']
model = apps.get_model(model_label)
fields_data = list_item['fields']
model_data = _build_model_data(model, obj_pk, fields_data, using)
model.objects.update_or_create(pk=obj_pk, defaults=model_data)
@transaction.atomic
def load_yaml_data_in_migration(apps, fixture_file_path):
"""
Loads the content of the yaml file `fixture_file_path` into the database.
This is similar to `loaddata` but:
- it's safe to be used in migrations
- it does not change the fields that are not present in the yaml
Motivation:
Calling `loaddata` from a data migration makes django use the latest version
of the models instead of the version at the time of that particular migration.
This causes problems e.g. adding a new field to a model which had a data migration
in the past is okay but migrating from zero fails as the model in
loaddata (the latest) has a field that did not exist at that migration time.
Limitations:
- Many-to-many fields are not supported yet
- all items in the yaml have to include a pk field
"""
with open(fixture_file_path, 'rb') as fixture:
object_list = yaml.safe_load(fixture)
_load_data_in_migration(apps, object_list)
| 32.04721
| 93
| 0.673229
|
93c263901e353879fbc21b83c93ffe91df26ff55
| 1,071
|
py
|
Python
|
templates/database/redis/actions.py
|
Jumpscale/ays9
|
63bd414ff06372ba885c55eec528f427e63bcbe1
|
[
"Apache-2.0"
] | 4
|
2017-06-07T08:10:06.000Z
|
2017-11-10T02:20:38.000Z
|
templates/database/redis/actions.py
|
Jumpscale/ays9
|
63bd414ff06372ba885c55eec528f427e63bcbe1
|
[
"Apache-2.0"
] | 242
|
2017-05-18T10:51:48.000Z
|
2019-09-18T15:09:47.000Z
|
templates/database/redis/actions.py
|
Jumpscale/ays9
|
63bd414ff06372ba885c55eec528f427e63bcbe1
|
[
"Apache-2.0"
] | 5
|
2017-06-16T15:43:25.000Z
|
2017-09-29T12:48:06.000Z
|
def install(job):
service = job.service
prefab = service.executor.prefab
prefab.db.redis.install()
prefab.db.redis.start(
name=service.name,
ip=service.model.data.host if service.model.data.host != '' else None,
port=service.model.data.port,
unixsocket=service.model.data.unixsocket if service.model.data.unixsocket != '' else None,
maxram=service.model.data.maxram,
appendonly=service.model.data.appendonly)
def start(job):
service = job.service
prefab = service.executor.prefab
prefab.db.redis.install()
prefab.db.redis.start(
name=service.name,
ip=service.model.data.host if service.model.data.host != '' else None,
port=service.model.data.port,
unixsocket=service.model.data.unixsocket if service.model.data.unixsocket != '' else None,
maxram=service.model.data.maxram,
appendonly=service.model.data.appendonly)
def stop(job):
service = job.service
prefab = service.executor.prefab
prefab.db.redis.stop(job.service.name)
| 32.454545
| 98
| 0.678805
|
8849d08969a60fff74a267de881e4271829b479b
| 975
|
py
|
Python
|
magpie/polar/radial.py
|
knaidoo29/magpie
|
efab3c2666aab2c928ca12a631758bc1b43c149c
|
[
"MIT"
] | null | null | null |
magpie/polar/radial.py
|
knaidoo29/magpie
|
efab3c2666aab2c928ca12a631758bc1b43c149c
|
[
"MIT"
] | null | null | null |
magpie/polar/radial.py
|
knaidoo29/magpie
|
efab3c2666aab2c928ca12a631758bc1b43c149c
|
[
"MIT"
] | null | null | null |
import numpy as np
def cumulative_radial(redges, f, sigma=None):
"""Returns the cumulative radial profile and errors if errors are given.
Parameters
----------
redges : array
Edges of the radial bins.
f : array
Radial profile.
sigma : array, optional
Radial errors.
Returns
-------
cumulative_f : array
Cumulative radial profile.
cumulative_sigma : array
If sigma is given then the cumulative errors are computed.
"""
area = np.pi*(redges[1:]**2. - redges[:-1]**2.)
f_area = area*f
cumulative_f = np.zeros(len(redges))
cumulative_f[1:] = np.cumsum(f_area)
if sigma is not None:
cumulative_var = np.zeros(len(redges))
var_area = (area*sigma)**2.
cumulative_var[1:] = np.cumsum(var_area)
cumulative_sigma = np.sqrt(cumulative_var)
if sigma is None:
return cumulative_f
else:
return cumulative_f, cumulative_sigma
| 27.083333
| 76
| 0.620513
|
022b58fcc72ada5befe8cf9f8514fa8c52ca86af
| 198
|
py
|
Python
|
__main__.py
|
macph/easement-curve
|
e1657682db3bc5b8d59a1fb06816732b784d8314
|
[
"MIT"
] | 1
|
2019-05-31T03:24:40.000Z
|
2019-05-31T03:24:40.000Z
|
__main__.py
|
macph/easement-curve
|
e1657682db3bc5b8d59a1fb06816732b784d8314
|
[
"MIT"
] | null | null | null |
__main__.py
|
macph/easement-curve
|
e1657682db3bc5b8d59a1fb06816732b784d8314
|
[
"MIT"
] | null | null | null |
# MIT License, copyright Ewan Macpherson, 2016; see LICENCE in root directory
# Main script for package.
import ec.tk
def main(argv=None):
ec.tk.main()
if __name__ == '__main__':
main()
| 16.5
| 77
| 0.691919
|
fe5286ba813dafe65c9a1327c40063bf91d92c1d
| 4,331
|
py
|
Python
|
homeassistant/components/lock/__init__.py
|
VirtualL/home-assistant
|
301829d02be8d865ab46c8901ac046d060849320
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/lock/__init__.py
|
VirtualL/home-assistant
|
301829d02be8d865ab46c8901ac046d060849320
|
[
"Apache-2.0"
] | 3
|
2021-09-08T03:34:57.000Z
|
2022-03-12T00:59:48.000Z
|
homeassistant/components/lock/__init__.py
|
VirtualL/home-assistant
|
301829d02be8d865ab46c8901ac046d060849320
|
[
"Apache-2.0"
] | null | null | null |
"""Component to interface with locks that can be controlled remotely."""
from datetime import timedelta
import functools as ft
import logging
import voluptuous as vol
from homeassistant.loader import bind_hass
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.config_validation import ( # noqa
PLATFORM_SCHEMA, PLATFORM_SCHEMA_BASE)
import homeassistant.helpers.config_validation as cv
from homeassistant.const import (
ATTR_CODE, ATTR_CODE_FORMAT, ATTR_ENTITY_ID, STATE_LOCKED, STATE_UNLOCKED,
SERVICE_LOCK, SERVICE_UNLOCK, SERVICE_OPEN)
from homeassistant.components import group
ATTR_CHANGED_BY = 'changed_by'
DOMAIN = 'lock'
DEPENDENCIES = ['group']
SCAN_INTERVAL = timedelta(seconds=30)
ENTITY_ID_ALL_LOCKS = group.ENTITY_ID_FORMAT.format('all_locks')
ENTITY_ID_FORMAT = DOMAIN + '.{}'
GROUP_NAME_ALL_LOCKS = 'all locks'
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
LOCK_SERVICE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.comp_entity_ids,
vol.Optional(ATTR_CODE): cv.string,
})
# Bitfield of features supported by the lock entity
SUPPORT_OPEN = 1
_LOGGER = logging.getLogger(__name__)
PROP_TO_ATTR = {
'changed_by': ATTR_CHANGED_BY,
'code_format': ATTR_CODE_FORMAT,
}
@bind_hass
def is_locked(hass, entity_id=None):
"""Return if the lock is locked based on the statemachine."""
entity_id = entity_id or ENTITY_ID_ALL_LOCKS
return hass.states.is_state(entity_id, STATE_LOCKED)
async def async_setup(hass, config):
"""Track states and offer events for locks."""
component = hass.data[DOMAIN] = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL, GROUP_NAME_ALL_LOCKS)
await component.async_setup(config)
component.async_register_entity_service(
SERVICE_UNLOCK, LOCK_SERVICE_SCHEMA,
'async_unlock'
)
component.async_register_entity_service(
SERVICE_LOCK, LOCK_SERVICE_SCHEMA,
'async_lock'
)
component.async_register_entity_service(
SERVICE_OPEN, LOCK_SERVICE_SCHEMA,
'async_open'
)
return True
async def async_setup_entry(hass, entry):
"""Set up a config entry."""
return await hass.data[DOMAIN].async_setup_entry(entry)
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
return await hass.data[DOMAIN].async_unload_entry(entry)
class LockDevice(Entity):
"""Representation of a lock."""
@property
def changed_by(self):
"""Last change triggered by."""
return None
@property
def code_format(self):
"""Regex for code format or None if no code is required."""
return None
@property
def is_locked(self):
"""Return true if the lock is locked."""
return None
def lock(self, **kwargs):
"""Lock the lock."""
raise NotImplementedError()
def async_lock(self, **kwargs):
"""Lock the lock.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(ft.partial(self.lock, **kwargs))
def unlock(self, **kwargs):
"""Unlock the lock."""
raise NotImplementedError()
def async_unlock(self, **kwargs):
"""Unlock the lock.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(ft.partial(self.unlock, **kwargs))
def open(self, **kwargs):
"""Open the door latch."""
raise NotImplementedError()
def async_open(self, **kwargs):
"""Open the door latch.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(ft.partial(self.open, **kwargs))
@property
def state_attributes(self):
"""Return the state attributes."""
state_attr = {}
for prop, attr in PROP_TO_ATTR.items():
value = getattr(self, prop)
if value is not None:
state_attr[attr] = value
return state_attr
@property
def state(self):
"""Return the state."""
locked = self.is_locked
if locked is None:
return None
return STATE_LOCKED if locked else STATE_UNLOCKED
| 27.762821
| 78
| 0.684369
|
1b76632e6550e2dcf9f235692734d3c37f41ec4b
| 820
|
py
|
Python
|
backend/blog/model/base.py
|
o8oo8o/blog
|
2a6f44f86469bfbb472dfd1bec4238587d8402bf
|
[
"MIT"
] | null | null | null |
backend/blog/model/base.py
|
o8oo8o/blog
|
2a6f44f86469bfbb472dfd1bec4238587d8402bf
|
[
"MIT"
] | null | null | null |
backend/blog/model/base.py
|
o8oo8o/blog
|
2a6f44f86469bfbb472dfd1bec4238587d8402bf
|
[
"MIT"
] | null | null | null |
#!/usr/bin/evn python3
# coding=utf-8
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.query import Query
from util.config import Config
__CONF = Config()
def all_data(self):
"""
# 猴子补丁,增加Query对象all_data 方法返回字典
"""
field = tuple([f["name"] for f in self.column_descriptions])
all_info = self.all()
result_data = []
for item in all_info:
result_data.append(dict(zip(field, item)))
return result_data
setattr(Query, "all_data", all_data)
# 创建数据库连接引擎
engine = create_engine(
__CONF.get_db_url(),
echo=__CONF.get_conf("orm")["sql_echo"],
pool_pre_ping=True
)
# 自己创建的model需要继承这个类
Base = declarative_base(engine)
# 数据库连接session
session = sessionmaker(engine)()
| 21.025641
| 64
| 0.721951
|
ba7807ed700a19a55fc0d6e6c1306790912363b1
| 2,324
|
py
|
Python
|
setup.py
|
vgiralt/djangosaml2
|
6571a03a139d6806da7d65201902499eeddffde9
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
vgiralt/djangosaml2
|
6571a03a139d6806da7d65201902499eeddffde9
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
vgiralt/djangosaml2
|
6571a03a139d6806da7d65201902499eeddffde9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2011-2012 Yaco Sistemas <lgs@yaco.es>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import codecs
import os
from setuptools import setup, find_packages
def read(*rnames):
return codecs.open(os.path.join(os.path.dirname(__file__), *rnames), encoding='utf-8').read()
setup(
name='djangosaml2',
version='1.0.0',
description='pysaml2 integration for Django',
long_description=read('README.rst'),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Django",
"Framework :: Django :: 3.0",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI",
"Topic :: Security",
"Topic :: Software Development :: Libraries :: Application Frameworks",
],
keywords="django,pysaml2,sso,saml2,federated authentication,authentication",
author="Yaco Sistemas and independent contributors",
author_email="lgs@yaco.es",
maintainer="Jozef Knaperek",
url="https://github.com/knaperek/djangosaml2",
download_url="https://pypi.org/project/djangosaml2/",
license='Apache 2.0',
packages=find_packages(exclude=["tests", "tests.*"]),
include_package_data=True,
zip_safe=False,
install_requires=[
'defusedxml>=0.4.1',
'Django>=2.2,<4',
'pysaml2>=5.3.0',
],
tests_require=[
# Provides assert_called_once.
'mock',
]
)
| 34.686567
| 97
| 0.652754
|
c96ef2a72953f0a9467a023231b0382d4332c67b
| 263
|
py
|
Python
|
tests/base.py
|
satchkat/warm-transfer-flask
|
0fee723f11f17cd816417c4e51e4aec08d1263cb
|
[
"MIT"
] | 3
|
2016-04-28T21:54:22.000Z
|
2019-02-04T05:02:47.000Z
|
tests/base.py
|
satchkat/warm-transfer-flask
|
0fee723f11f17cd816417c4e51e4aec08d1263cb
|
[
"MIT"
] | 202
|
2016-05-03T18:20:07.000Z
|
2022-03-31T06:28:13.000Z
|
tests/base.py
|
satchkat/warm-transfer-flask
|
0fee723f11f17cd816417c4e51e4aec08d1263cb
|
[
"MIT"
] | 6
|
2016-04-28T21:54:27.000Z
|
2022-03-11T20:12:11.000Z
|
from unittest import TestCase
from warm_transfer_flask import app, db
class BaseTestCase(TestCase):
def setUp(self):
self.client = app.test_client()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
| 18.785714
| 39
| 0.657795
|
806e762a7f4feef020f2cbcda59168b42de7382d
| 997
|
py
|
Python
|
defs.py
|
zaabjuda/test_chat_server
|
96db615223d4a1548629d70b31b3eb7e89dd5ff6
|
[
"MIT"
] | null | null | null |
defs.py
|
zaabjuda/test_chat_server
|
96db615223d4a1548629d70b31b3eb7e89dd5ff6
|
[
"MIT"
] | null | null | null |
defs.py
|
zaabjuda/test_chat_server
|
96db615223d4a1548629d70b31b3eb7e89dd5ff6
|
[
"MIT"
] | null | null | null |
# coding=utf-8
__author__ = "Dmitry Zhiltsov"
__copyright__ = "Copyright 2015, Dmitry Zhiltsov"
from enum import Enum, unique
from strictdict import StrictDict
from strictdict import fields as f
from strictdict.api import optlist, opt
supported_commands = {'JOIN': '_join_room', 'LEFT': '_leave_room', 'LOGIN': '_login', 'QUIT': '_quit'}
@unique
class ChatErrorState(Enum):
user_exist = 1
room_not_found = 2
command_not_found = 3
command_syntax_failed = 4
room_name_invalid = 5
unknow_error = 100
serialize_error = 101
protocol_error = 102
class ChatMessage(StrictDict):
msg = f.String(required=True)
channel = f.String(required=True)
args = optlist(f.String)
class ChatDataResponse(ChatMessage):
author = f.String(required=False)
class ChatErrorResponse(StrictDict):
error = f.Int(required=True)
msg = f.String(required=False)
class ChatResponse(StrictDict):
data = opt(ChatDataResponse)
error = opt(ChatErrorResponse)
| 22.659091
| 102
| 0.72317
|
71bd85b2ec7b7b5f34af914c2668e942334dd3db
| 5,432
|
py
|
Python
|
test/functional/rpc_mn_basic.py
|
aentan/ain
|
1d6db33159de1c8c7930d29a0ab0902f42b728c1
|
[
"MIT"
] | null | null | null |
test/functional/rpc_mn_basic.py
|
aentan/ain
|
1d6db33159de1c8c7930d29a0ab0902f42b728c1
|
[
"MIT"
] | null | null | null |
test/functional/rpc_mn_basic.py
|
aentan/ain
|
1d6db33159de1c8c7930d29a0ab0902f42b728c1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Copyright (c) DeFi Foundation
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the masternodes RPC.
- verify basic MN creation and resign
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.authproxy import JSONRPCException
from test_framework.util import assert_equal, \
connect_nodes_bi
import pprint
import time
class MasternodesRpcBasicTest (BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.setup_clean_chain = True
def run_test(self):
pprint.PrettyPrinter(indent=4)
assert_equal(len(self.nodes[0].mn_list()), 4)
self.nodes[0].generate(100)
time.sleep(2)
self.sync_blocks()
# Stop node #2 for future revert
self.stop_node(2)
# CREATION:
#========================
collateral0 = self.nodes[0].getnewaddress("", "legacy")
# Fail to create: Insufficient funds (not matured coins)
try:
idnode0 = self.nodes[0].mn_create([], {
# "operatorAuthAddress": operator0,
"collateralAddress": collateral0
})
except JSONRPCException as e:
errorString = e.error['message']
assert("Insufficient funds" in errorString)
# Create node0
self.nodes[0].generate(1)
idnode0 = self.nodes[0].mn_create([], {
# "operatorAuthAddress": operator0,
"collateralAddress": collateral0
})
# Create and sign (only) collateral spending tx
spendTx = self.nodes[0].createrawtransaction([{'txid':idnode0, 'vout':1}],[{collateral0:9.999}])
signedTx = self.nodes[0].signrawtransactionwithwallet(spendTx)
assert_equal(signedTx['complete'], True)
# Try to spend collateral of mempooled mn_create tx
try:
self.nodes[0].sendrawtransaction(signedTx['hex'])
except JSONRPCException as e:
errorString = e.error['message']
assert("mn-collateral-locked-in-mempool," in errorString)
self.nodes[0].generate(1)
# At this point, mn was created
assert_equal(self.nodes[0].mn_list([idnode0], False), { idnode0: "created"} )
self.sync_blocks(self.nodes[0:2])
# Stop node #1 for future revert
self.stop_node(1)
# Try to spend locked collateral again
try:
self.nodes[0].sendrawtransaction(signedTx['hex'])
except JSONRPCException as e:
errorString = e.error['message']
assert("mn-collateral-locked," in errorString)
# RESIGNING:
#========================
# Fail to resign: Forget to place params in config
try:
self.nodes[0].mn_resign([], idnode0)
except JSONRPCException as e:
errorString = e.error['message']
assert("You are not the owner" in errorString)
# Restart with new params, but have no money on ownerauth address
self.restart_node(0, extra_args=['-masternode_owner='+collateral0])
self.nodes[0].generate(1) # to broke "initial block downloading"
try:
self.nodes[0].mn_resign([], idnode0)
except JSONRPCException as e:
errorString = e.error['message']
assert("Can't find any UTXO's" in errorString)
# Funding auth address and successful resign
fundingTx = self.nodes[0].sendtoaddress(collateral0, 1)
self.nodes[0].generate(1)
resignTx = self.nodes[0].mn_resign([], idnode0)
self.nodes[0].generate(1)
assert_equal(self.nodes[0].mn_list()[idnode0]['status'], "created, resigned")
# Spend unlocked collateral
# This checks two cases at once:
# 1) Finally, we should not fail on accept to mempool
# 2) But we don't mine blocks after it, so, after chain reorg (on 'REVERTING'), we should not fail: tx should be removed from mempool!
self.nodes[0].generate(12)
sendedTxHash = self.nodes[0].sendrawtransaction(signedTx['hex'])
# Don't mine here, check mempool after reorg!
# self.nodes[0].generate(1)
# REVERTING:
#========================
# Revert resign!
self.start_node(1)
self.nodes[1].generate(20)
# Check that collateral spending tx is still in the mempool
assert_equal(sendedTxHash, self.nodes[0].getrawmempool()[0])
connect_nodes_bi(self.nodes, 0, 1)
self.sync_blocks(self.nodes[0:2])
# Check that collateral spending tx was deleted
# print ("CreateTx", idnode0)
# print ("ResignTx", resignTx)
# print ("FundingTx", fundingTx)
# print ("SpendTx", sendedTxHash)
assert_equal(self.nodes[0].getrawmempool(), [fundingTx, resignTx])
assert_equal(self.nodes[0].mn_list()[idnode0]['status'], "active")
# Revert creation!
self.start_node(2)
self.nodes[2].generate(25)
connect_nodes_bi(self.nodes, 0, 2)
self.sync_blocks(self.nodes[0:3])
assert_equal(len(self.nodes[0].mn_list()), 4)
assert_equal(self.nodes[0].getrawmempool(), [idnode0, fundingTx, resignTx])
if __name__ == '__main__':
MasternodesRpcBasicTest ().main ()
| 35.736842
| 142
| 0.621686
|
56fbd6858779ec7a95e2649a991ee5e03a8337e6
| 34,797
|
py
|
Python
|
broadlink/__init__.py
|
jfacevedo80/python-broadlink
|
2bed9fbdcbee19229ed5bec20851ac21713c1691
|
[
"MIT"
] | 1
|
2018-06-06T00:34:08.000Z
|
2018-06-06T00:34:08.000Z
|
broadlink/__init__.py
|
jfacevedo80/python-broadlink
|
2bed9fbdcbee19229ed5bec20851ac21713c1691
|
[
"MIT"
] | null | null | null |
broadlink/__init__.py
|
jfacevedo80/python-broadlink
|
2bed9fbdcbee19229ed5bec20851ac21713c1691
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
from datetime import datetime
try:
from Crypto.Cipher import AES
except ImportError as e:
import pyaes
import time
import random
import socket
import sys
import threading
import codecs
def gendevice(devtype, host, mac):
devices = {
sp1: [0],
sp2: [
0x2711, # SP2
0x2719,
0x7919,
0x271a,
0x791a, # Honeywell SP2
0x2720, # SPMini
0x753e, # SP3
0x7D00, # OEM branded SP3
0x947a,
0x9479, # SP3S
0x2728, # SPMini2
0x2733,
0x273e, # OEM branded SPMini
0x7530,
0x7918, # OEM branded SPMini2
0x2736 # SPMiniPlus
],
rm: [
0x2712, # RM2
0x2737, # RM Mini
0x273d, # RM Pro Phicomm
0x2783, # RM2 Home Plus
0x277c, # RM2 Home Plus GDT
0x272a, # RM2 Pro Plus
0x2787, # RM2 Pro Plus2
0x279d, # RM2 Pro Plus3
0x27a9, # RM2 Pro Plus_300
0x278b, # RM2 Pro Plus BL
0x2797, # RM2 Pro Plus HYC
0x27a1, # RM2 Pro Plus R1
0x27a6, # RM2 Pro PP
0x278f # RM Mini Shate
],
a1: [0x2714], # A1
mp1: [
0x4EB5, # MP1
0x4EF7 # Honyar oem mp1
],
hysen: [0x4EAD], # Hysen controller
S1C: [0x2722], # S1 (SmartOne Alarm Kit)
dooya: [0x4E4D] # Dooya DT360E (DOOYA_CURTAIN_V2)
}
# Look for the class associated to devtype in devices
[deviceClass] = [dev for dev in devices
if devtype in devices[dev]] or [None]
if deviceClass is None:
return device(host=host, mac=mac, devtype=devtype)
return deviceClass(host=host, mac=mac, devtype=devtype)
def discover(timeout=None, local_ip_address=None):
if local_ip_address is None:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8',
53)) # connecting to a UDP address doesn't send packets
local_ip_address = s.getsockname()[0]
address = local_ip_address.split('.')
cs = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
cs.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
cs.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
cs.bind((local_ip_address, 0))
port = cs.getsockname()[1]
starttime = time.time()
devices = []
timezone = int(time.timezone / -3600)
packet = bytearray(0x30)
year = datetime.now().year
if timezone < 0:
packet[0x08] = 0xff + timezone - 1
packet[0x09] = 0xff
packet[0x0a] = 0xff
packet[0x0b] = 0xff
else:
packet[0x08] = timezone
packet[0x09] = 0
packet[0x0a] = 0
packet[0x0b] = 0
packet[0x0c] = year & 0xff
packet[0x0d] = year >> 8
packet[0x0e] = datetime.now().minute
packet[0x0f] = datetime.now().hour
subyear = str(year)[2:]
packet[0x10] = int(subyear)
packet[0x11] = datetime.now().isoweekday()
packet[0x12] = datetime.now().day
packet[0x13] = datetime.now().month
packet[0x18] = int(address[0])
packet[0x19] = int(address[1])
packet[0x1a] = int(address[2])
packet[0x1b] = int(address[3])
packet[0x1c] = port & 0xff
packet[0x1d] = port >> 8
packet[0x26] = 6
checksum = 0xbeaf
for i in range(len(packet)):
checksum += packet[i]
checksum = checksum & 0xffff
packet[0x20] = checksum & 0xff
packet[0x21] = checksum >> 8
cs.sendto(packet, ('255.255.255.255', 80))
if timeout is None:
response = cs.recvfrom(1024)
responsepacket = bytearray(response[0])
host = response[1]
mac = responsepacket[0x3a:0x40]
devtype = responsepacket[0x34] | responsepacket[0x35] << 8
return gendevice(devtype, host, mac)
else:
while (time.time() - starttime) < timeout:
cs.settimeout(timeout - (time.time() - starttime))
try:
response = cs.recvfrom(1024)
except socket.timeout:
return devices
responsepacket = bytearray(response[0])
host = response[1]
devtype = responsepacket[0x34] | responsepacket[0x35] << 8
mac = responsepacket[0x3a:0x40]
dev = gendevice(devtype, host, mac)
devices.append(dev)
return devices
class device:
def __init__(self, host, mac, devtype, timeout=10):
self.host = host
self.mac = mac
self.devtype = devtype
self.timeout = timeout
self.count = random.randrange(0xffff)
self.key = bytearray([
0x09, 0x76, 0x28, 0x34, 0x3f, 0xe9, 0x9e, 0x23, 0x76, 0x5c, 0x15,
0x13, 0xac, 0xcf, 0x8b, 0x02
])
self.iv = bytearray([
0x56, 0x2e, 0x17, 0x99, 0x6d, 0x09, 0x3d, 0x28, 0xdd, 0xb3, 0xba,
0x69, 0x5a, 0x2e, 0x6f, 0x58
])
self.id = bytearray([0, 0, 0, 0])
self.cs = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.cs.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.cs.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.cs.bind(('', 0))
self.type = "Unknown"
self.lock = threading.Lock()
if 'pyaes' in globals():
self.encrypt = self.encrypt_pyaes
self.decrypt = self.decrypt_pyaes
else:
self.encrypt = self.encrypt_pycrypto
self.decrypt = self.decrypt_pycrypto
def encrypt_pyaes(self, payload):
aes = pyaes.AESModeOfOperationCBC(self.key, iv=bytes(self.iv))
return b"".join([
aes.encrypt(bytes(payload[i:i + 16]))
for i in range(0, len(payload), 16)
])
def decrypt_pyaes(self, payload):
aes = pyaes.AESModeOfOperationCBC(self.key, iv=bytes(self.iv))
return b"".join([
aes.decrypt(bytes(payload[i:i + 16]))
for i in range(0, len(payload), 16)
])
def encrypt_pycrypto(self, payload):
aes = AES.new(bytes(self.key), AES.MODE_CBC, bytes(self.iv))
return aes.encrypt(bytes(payload))
def decrypt_pycrypto(self, payload):
aes = AES.new(bytes(self.key), AES.MODE_CBC, bytes(self.iv))
return aes.decrypt(bytes(payload))
def auth(self):
payload = bytearray(0x50)
payload[0x04] = 0x31
payload[0x05] = 0x31
payload[0x06] = 0x31
payload[0x07] = 0x31
payload[0x08] = 0x31
payload[0x09] = 0x31
payload[0x0a] = 0x31
payload[0x0b] = 0x31
payload[0x0c] = 0x31
payload[0x0d] = 0x31
payload[0x0e] = 0x31
payload[0x0f] = 0x31
payload[0x10] = 0x31
payload[0x11] = 0x31
payload[0x12] = 0x31
payload[0x1e] = 0x01
payload[0x2d] = 0x01
payload[0x30] = ord('T')
payload[0x31] = ord('e')
payload[0x32] = ord('s')
payload[0x33] = ord('t')
payload[0x34] = ord(' ')
payload[0x35] = ord(' ')
payload[0x36] = ord('1')
response = self.send_packet(0x65, payload)
payload = self.decrypt(response[0x38:])
if not payload:
return False
key = payload[0x04:0x14]
if len(key) % 16 != 0:
return False
self.id = payload[0x00:0x04]
self.key = key
return True
def get_type(self):
return self.type
def send_packet(self, command, payload):
self.count = (self.count + 1) & 0xffff
packet = bytearray(0x38)
packet[0x00] = 0x5a
packet[0x01] = 0xa5
packet[0x02] = 0xaa
packet[0x03] = 0x55
packet[0x04] = 0x5a
packet[0x05] = 0xa5
packet[0x06] = 0xaa
packet[0x07] = 0x55
packet[0x24] = 0x2a
packet[0x25] = 0x27
packet[0x26] = command
packet[0x28] = self.count & 0xff
packet[0x29] = self.count >> 8
packet[0x2a] = self.mac[0]
packet[0x2b] = self.mac[1]
packet[0x2c] = self.mac[2]
packet[0x2d] = self.mac[3]
packet[0x2e] = self.mac[4]
packet[0x2f] = self.mac[5]
packet[0x30] = self.id[0]
packet[0x31] = self.id[1]
packet[0x32] = self.id[2]
packet[0x33] = self.id[3]
# pad the payload for AES encryption
if len(payload) > 0:
numpad = (len(payload) // 16 + 1) * 16
payload = payload.ljust(numpad, b"\x00")
checksum = 0xbeaf
for i in range(len(payload)):
checksum += payload[i]
checksum = checksum & 0xffff
payload = self.encrypt(payload)
packet[0x34] = checksum & 0xff
packet[0x35] = checksum >> 8
for i in range(len(payload)):
packet.append(payload[i])
checksum = 0xbeaf
for i in range(len(packet)):
checksum += packet[i]
checksum = checksum & 0xffff
packet[0x20] = checksum & 0xff
packet[0x21] = checksum >> 8
starttime = time.time()
with self.lock:
while True:
try:
self.cs.sendto(packet, self.host)
self.cs.settimeout(1)
response = self.cs.recvfrom(2048)
break
except socket.timeout:
if (time.time() - starttime) > self.timeout:
raise
return bytearray(response[0])
class mp1(device):
def __init__(self, host, mac, devtype):
device.__init__(self, host, mac, devtype)
self.type = "MP1"
def set_power_mask(self, sid_mask, state):
"""Sets the power state of the smart power strip."""
packet = bytearray(16)
packet[0x00] = 0x0d
packet[0x02] = 0xa5
packet[0x03] = 0xa5
packet[0x04] = 0x5a
packet[0x05] = 0x5a
packet[0x06] = 0xb2 + ((sid_mask << 1) if state else sid_mask)
packet[0x07] = 0xc0
packet[0x08] = 0x02
packet[0x0a] = 0x03
packet[0x0d] = sid_mask
packet[0x0e] = sid_mask if state else 0
response = self.send_packet(0x6a, packet)
err = response[0x22] | (response[0x23] << 8)
def set_power(self, sid, state):
"""Sets the power state of the smart power strip."""
sid_mask = 0x01 << (sid - 1)
return self.set_power_mask(sid_mask, state)
def check_power_raw(self):
"""Returns the power state of the smart power strip in raw format."""
packet = bytearray(16)
packet[0x00] = 0x0a
packet[0x02] = 0xa5
packet[0x03] = 0xa5
packet[0x04] = 0x5a
packet[0x05] = 0x5a
packet[0x06] = 0xae
packet[0x07] = 0xc0
packet[0x08] = 0x01
response = self.send_packet(0x6a, packet)
err = response[0x22] | (response[0x23] << 8)
if err == 0:
payload = self.decrypt(bytes(response[0x38:]))
if type(payload[0x4]) == int:
state = payload[0x0e]
else:
state = ord(payload[0x0e])
return state
def check_power(self):
"""Returns the power state of the smart power strip."""
state = self.check_power_raw()
data = {}
data['s1'] = bool(state & 0x01)
data['s2'] = bool(state & 0x02)
data['s3'] = bool(state & 0x04)
data['s4'] = bool(state & 0x08)
return data
class sp1(device):
def __init__(self, host, mac, devtype):
device.__init__(self, host, mac, devtype)
self.type = "SP1"
def set_power(self, state):
packet = bytearray(4)
packet[0] = state
self.send_packet(0x66, packet)
class sp2(device):
def __init__(self, host, mac, devtype):
device.__init__(self, host, mac, devtype)
self.type = "SP2"
def set_power(self, state):
"""Sets the power state of the smart plug."""
packet = bytearray(16)
packet[0] = 2
if self.check_nightlight():
packet[4] = 3 if state else 2
else:
packet[4] = 1 if state else 0
self.send_packet(0x6a, packet)
def set_nightlight(self, state):
"""Sets the night light state of the smart plug"""
packet = bytearray(16)
packet[0] = 2
if self.check_power():
packet[4] = 3 if state else 1
else:
packet[4] = 2 if state else 0
self.send_packet(0x6a, packet)
def check_power(self):
"""Returns the power state of the smart plug."""
packet = bytearray(16)
packet[0] = 1
response = self.send_packet(0x6a, packet)
err = response[0x22] | (response[0x23] << 8)
if err == 0:
payload = self.decrypt(bytes(response[0x38:]))
if type(payload[0x4]) == int:
if payload[0x4] == 1 or payload[0x4] == 3:
state = True
else:
state = False
else:
if ord(payload[0x4]) == 1 or ord(payload[0x4]) == 3:
state = True
else:
state = False
return state
def check_nightlight(self):
"""Returns the power state of the smart plug."""
packet = bytearray(16)
packet[0] = 1
response = self.send_packet(0x6a, packet)
err = response[0x22] | (response[0x23] << 8)
if err == 0:
payload = self.decrypt(bytes(response[0x38:]))
if type(payload[0x4]) == int:
if payload[0x4] == 2 or payload[0x4] == 3:
state = True
else:
state = False
else:
if ord(payload[0x4]) == 2 or ord(payload[0x4]) == 3:
state = True
else:
state = False
return state
def get_energy(self):
packet = bytearray([8, 0, 254, 1, 5, 1, 0, 0, 0, 45])
response = self.send_packet(0x6a, packet)
err = response[0x22] | (response[0x23] << 8)
if err == 0:
payload = self.decrypt(bytes(response[0x38:]))
if type(payload[0x07]) == int:
energy = int(hex(payload[0x07] * 256 + payload[0x06])
[2:]) + int(hex(payload[0x05])[2:]) / 100.0
else:
energy = int(
hex(ord(payload[0x07]) * 256 + ord(payload[0x06]))
[2:]) + int(hex(ord(payload[0x05]))[2:]) / 100.0
return energy
class a1(device):
def __init__(self, host, mac, devtype):
device.__init__(self, host, mac, devtype)
self.type = "A1"
def check_sensors(self):
packet = bytearray(16)
packet[0] = 1
response = self.send_packet(0x6a, packet)
err = response[0x22] | (response[0x23] << 8)
if err == 0:
data = {}
payload = self.decrypt(bytes(response[0x38:]))
if type(payload[0x4]) == int:
data['temperature'] = (payload[0x4] * 10 + payload[0x5]) / 10.0
data['humidity'] = (payload[0x6] * 10 + payload[0x7]) / 10.0
light = payload[0x8]
air_quality = payload[0x0a]
noise = payload[0xc]
else:
data['temperature'] = (
ord(payload[0x4]) * 10 + ord(payload[0x5])) / 10.0
data['humidity'] = (
ord(payload[0x6]) * 10 + ord(payload[0x7])) / 10.0
light = ord(payload[0x8])
air_quality = ord(payload[0x0a])
noise = ord(payload[0xc])
if light == 0:
data['light'] = 'dark'
elif light == 1:
data['light'] = 'dim'
elif light == 2:
data['light'] = 'normal'
elif light == 3:
data['light'] = 'bright'
else:
data['light'] = 'unknown'
if air_quality == 0:
data['air_quality'] = 'excellent'
elif air_quality == 1:
data['air_quality'] = 'good'
elif air_quality == 2:
data['air_quality'] = 'normal'
elif air_quality == 3:
data['air_quality'] = 'bad'
else:
data['air_quality'] = 'unknown'
if noise == 0:
data['noise'] = 'quiet'
elif noise == 1:
data['noise'] = 'normal'
elif noise == 2:
data['noise'] = 'noisy'
else:
data['noise'] = 'unknown'
return data
def check_sensors_raw(self):
packet = bytearray(16)
packet[0] = 1
response = self.send_packet(0x6a, packet)
err = response[0x22] | (response[0x23] << 8)
if err == 0:
data = {}
payload = self.decrypt(bytes(response[0x38:]))
if type(payload[0x4]) == int:
data['temperature'] = (payload[0x4] * 10 + payload[0x5]) / 10.0
data['humidity'] = (payload[0x6] * 10 + payload[0x7]) / 10.0
data['light'] = payload[0x8]
data['air_quality'] = payload[0x0a]
data['noise'] = payload[0xc]
else:
data['temperature'] = (
ord(payload[0x4]) * 10 + ord(payload[0x5])) / 10.0
data['humidity'] = (
ord(payload[0x6]) * 10 + ord(payload[0x7])) / 10.0
data['light'] = ord(payload[0x8])
data['air_quality'] = ord(payload[0x0a])
data['noise'] = ord(payload[0xc])
return data
class rm(device):
def __init__(self, host, mac, devtype):
device.__init__(self, host, mac, devtype)
self.type = "RM2"
def check_data(self):
packet = bytearray(16)
packet[0] = 4
response = self.send_packet(0x6a, packet)
err = response[0x22] | (response[0x23] << 8)
if err == 0:
payload = self.decrypt(bytes(response[0x38:]))
return payload[0x04:]
def send_data(self, data):
packet = bytearray([0x02, 0x00, 0x00, 0x00])
packet += data
self.send_packet(0x6a, packet)
def enter_learning(self):
packet = bytearray(16)
packet[0] = 3
self.send_packet(0x6a, packet)
def check_temperature(self):
packet = bytearray(16)
packet[0] = 1
response = self.send_packet(0x6a, packet)
err = response[0x22] | (response[0x23] << 8)
if err == 0:
payload = self.decrypt(bytes(response[0x38:]))
if type(payload[0x4]) == int:
temp = (payload[0x4] * 10 + payload[0x5]) / 10.0
else:
temp = (ord(payload[0x4]) * 10 + ord(payload[0x5])) / 10.0
return temp
# For legacy compatibility - don't use this
class rm2(rm):
def __init__(self):
device.__init__(self, None, None, None)
def discover(self):
dev = discover()
self.host = dev.host
self.mac = dev.mac
class hysen(device):
def __init__(self, host, mac, devtype):
device.__init__(self, host, mac, devtype)
self.type = "Hysen heating controller"
# Send a request
# input_payload should be a bytearray, usually 6 bytes, e.g. bytearray([0x01,0x06,0x00,0x02,0x10,0x00])
# Returns decrypted payload
# New behaviour: raises a ValueError if the device response indicates an error or CRC check fails
# The function prepends length (2 bytes) and appends CRC
def send_request(self, input_payload):
from PyCRC.CRC16 import CRC16
crc = CRC16(modbus_flag=True).calculate(bytes(input_payload))
# first byte is length, +2 for CRC16
request_payload = bytearray([len(input_payload) + 2, 0x00])
request_payload.extend(input_payload)
# append CRC
request_payload.append(crc & 0xFF)
request_payload.append((crc >> 8) & 0xFF)
# send to device
response = self.send_packet(0x6a, request_payload)
# check for error
err = response[0x22] | (response[0x23] << 8)
if err:
raise ValueError('broadlink_response_error', err)
response_payload = bytearray(self.decrypt(bytes(response[0x38:])))
# experimental check on CRC in response (first 2 bytes are len, and trailing bytes are crc)
response_payload_len = response_payload[0]
if response_payload_len + 2 > len(response_payload):
raise ValueError('hysen_response_error',
'first byte of response is not length')
crc = CRC16(modbus_flag=True).calculate(
bytes(response_payload[2:response_payload_len]))
if (response_payload[response_payload_len] == crc & 0xFF) and (
response_payload[response_payload_len + 1] == (crc >> 8) &
0xFF):
return response_payload[2:response_payload_len]
else:
raise ValueError('hysen_response_error',
'CRC check on response failed')
# Get current room temperature in degrees celsius
def get_temp(self):
payload = self.send_request(
bytearray([0x01, 0x03, 0x00, 0x00, 0x00, 0x08]))
return payload[0x05] / 2.0
# Get current external temperature in degrees celsius
def get_external_temp(self):
payload = self.send_request(
bytearray([0x01, 0x03, 0x00, 0x00, 0x00, 0x08]))
return payload[18] / 2.0
# Get full status (including timer schedule)
def get_full_status(self):
payload = self.send_request(
bytearray([0x01, 0x03, 0x00, 0x00, 0x00, 0x16]))
data = {}
data['remote_lock'] = payload[3] & 1
data['power'] = payload[4] & 1
data['active'] = (payload[4] >> 4) & 1
data['temp_manual'] = (payload[4] >> 6) & 1
data['room_temp'] = (payload[5] & 255) / 2.0
data['thermostat_temp'] = (payload[6] & 255) / 2.0
data['auto_mode'] = payload[7] & 15
data['loop_mode'] = (payload[7] >> 4) & 15
data['sensor'] = payload[8]
data['osv'] = payload[9]
data['dif'] = payload[10]
data['svh'] = payload[11]
data['svl'] = payload[12]
data['room_temp_adj'] = ((payload[13] << 8) + payload[14]) / 2.0
if data['room_temp_adj'] > 32767:
data['room_temp_adj'] = 32767 - data['room_temp_adj']
data['fre'] = payload[15]
data['poweron'] = payload[16]
data['unknown'] = payload[17]
data['external_temp'] = (payload[18] & 255) / 2.0
data['hour'] = payload[19]
data['min'] = payload[20]
data['sec'] = payload[21]
data['dayofweek'] = payload[22]
weekday = []
for i in range(0, 6):
weekday.append({
'start_hour': payload[2 * i + 23],
'start_minute': payload[2 * i + 24],
'temp': payload[i + 39] / 2.0
})
data['weekday'] = weekday
weekend = []
for i in range(6, 8):
weekend.append({
'start_hour': payload[2 * i + 23],
'start_minute': payload[2 * i + 24],
'temp': payload[i + 39] / 2.0
})
data['weekend'] = weekend
return data
# Change controller mode
# auto_mode = 1 for auto (scheduled/timed) mode, 0 for manual mode.
# Manual mode will activate last used temperature. In typical usage call set_temp to activate manual control and set temp.
# loop_mode refers to index in [ "12345,67", "123456,7", "1234567" ]
# E.g. loop_mode = 0 ("12345,67") means Saturday and Sunday follow the "weekend" schedule
# loop_mode = 2 ("1234567") means every day (including Saturday and Sunday) follows the "weekday" schedule
# The sensor command is currently experimental
def set_mode(self, auto_mode, loop_mode, sensor=0):
mode_byte = ((loop_mode + 1) << 4) + auto_mode
# print 'Mode byte: 0x'+ format(mode_byte, '02x')
self.send_request(
bytearray([0x01, 0x06, 0x00, 0x02, mode_byte, sensor]))
# Advanced settings
# Sensor mode (SEN) sensor = 0 for internal sensor, 1 for external sensor, 2 for internal control temperature, external limit temperature. Factory default: 0.
# Set temperature range for external sensor (OSV) osv = 5..99. Factory default: 42C
# Deadzone for floor temprature (dIF) dif = 1..9. Factory default: 2C
# Upper temperature limit for internal sensor (SVH) svh = 5..99. Factory default: 35C
# Lower temperature limit for internal sensor (SVL) svl = 5..99. Factory default: 5C
# Actual temperature calibration (AdJ) adj = -0.5. Prescision 0.1C
# Anti-freezing function (FrE) fre = 0 for anti-freezing function shut down, 1 for anti-freezing function open. Factory default: 0
# Power on memory (POn) poweron = 0 for power on memory off, 1 for power on memory on. Factory default: 0
def set_advanced(self, loop_mode, sensor, osv, dif, svh, svl, adj, fre,
poweron):
input_payload = bytearray([
0x01, 0x10, 0x00, 0x02, 0x00, 0x05, 0x0a, loop_mode, sensor, osv,
dif, svh, svl, (int(adj * 2) >> 8 & 0xff), (int(adj * 2) & 0xff),
fre, poweron
])
self.send_request(input_payload)
# For backwards compatibility only. Prefer calling set_mode directly. Note this function invokes loop_mode=0 and sensor=0.
def switch_to_auto(self):
self.set_mode(auto_mode=1, loop_mode=0)
def switch_to_manual(self):
self.set_mode(auto_mode=0, loop_mode=0)
# Set temperature for manual mode (also activates manual mode if currently in automatic)
def set_temp(self, temp):
self.send_request(
bytearray([0x01, 0x06, 0x00, 0x01, 0x00,
int(temp * 2)]))
# Set device on(1) or off(0), does not deactivate Wifi connectivity. Remote lock disables control by buttons on thermostat.
def set_power(self, power=1, remote_lock=0):
self.send_request(
bytearray([0x01, 0x06, 0x00, 0x00, remote_lock, power]))
# set time on device
# n.b. day=1 is Monday, ..., day=7 is Sunday
def set_time(self, hour, minute, second, day):
self.send_request(
bytearray([
0x01, 0x10, 0x00, 0x08, 0x00, 0x02, 0x04, hour, minute, second,
day
]))
# Set timer schedule
# Format is the same as you get from get_full_status.
# weekday is a list (ordered) of 6 dicts like:
# {'start_hour':17, 'start_minute':30, 'temp': 22 }
# Each one specifies the thermostat temp that will become effective at start_hour:start_minute
# weekend is similar but only has 2 (e.g. switch on in morning and off in afternoon)
def set_schedule(self, weekday, weekend):
# Begin with some magic values ...
input_payload = bytearray([0x01, 0x10, 0x00, 0x0a, 0x00, 0x0c, 0x18])
# Now simply append times/temps
# weekday times
for i in range(0, 6):
input_payload.append(weekday[i]['start_hour'])
input_payload.append(weekday[i]['start_minute'])
# weekend times
for i in range(0, 2):
input_payload.append(weekend[i]['start_hour'])
input_payload.append(weekend[i]['start_minute'])
# weekday temperatures
for i in range(0, 6):
input_payload.append(int(weekday[i]['temp'] * 2))
# weekend temperatures
for i in range(0, 2):
input_payload.append(int(weekend[i]['temp'] * 2))
self.send_request(input_payload)
S1C_SENSORS_TYPES = {
0x31: 'Door Sensor', # 49 as hex
0x91: 'Key Fob', # 145 as hex, as serial on fob corpse
0x21: 'Motion Sensor' # 33 as hex
}
class S1C(device):
def __init__(self, *a, **kw):
device.__init__(self, *a, **kw)
self.type = 'S1C'
def get_sensors_status(self):
packet = bytearray(16)
packet[0] = 0x06 # 0x06 - get sensors info, 0x07 - probably add sensors
response = self.send_packet(0x6a, packet)
err = response[0x22] | (response[0x23] << 8)
if err == 0:
aes = AES.new(bytes(self.key), AES.MODE_CBC, bytes(self.iv))
payload = aes.decrypt(bytes(response[0x38:]))
if payload:
count = payload[0x4]
sensors = payload[0x6:]
sensors_a = [
bytearray(sensors[i * 83:(i + 1) * 83])
for i in range(len(sensors) // 83)
]
sens_res = {}
for sens in sensors_a:
status = ord(chr(sens[0]))
_name = str(bytes(sens[4:26]).decode())
_order = ord(chr(sens[1]))
_type = ord(chr(sens[3]))
_serial = bytes(codecs.encode(sens[26:30], "hex")).decode()
type_str = S1C_SENSORS_TYPES.get(_type, 'Unknown')
r = {
'status': status,
'name': _name.strip('\x00'),
'type': type_str,
'order': _order,
'serial': _serial,
}
if r['serial'] != '00000000':
sens_res[r['serial']]=r
result = {'count': count, 'sensors': sens_res}
return result
else:
raise ValueError('broadlink_response_error', err)
def get_alarm_status(self):
packet = bytearray(16)
packet[0] = 0x12
response = self.send_packet(0x6a, packet)
err = response[0x22] | (response[0x23] << 8)
if err == 0:
aes = AES.new(bytes(self.key), AES.MODE_CBC, bytes(self.iv))
payload = aes.decrypt(bytes(response[0x38:]))
status = payload[4]
return status
else:
raise ValueError('broadlink_response_error', err)
def get_trigger_status(self):
packet = bytearray(16)
packet[0] = 0x10
response = self.send_packet(0x6a, packet)
err = response[0x22] | (response[0x23] << 8)
if err == 0:
aes = AES.new(bytes(self.key), AES.MODE_CBC, bytes(self.iv))
payload = aes.decrypt(bytes(response[0x38:]))
triggered = False
for i in range(payload[4]):
if payload[i * 2 + 4] == 1:
triggered = True
return triggered
else:
raise ValueError('broadlink_response_error', err)
# Set alarm status
# state = 0 disarm.
# state = 1 part_arm.
# state = 2 full_arm.
# notification_sound = True Message notification sound.
# alarm_sound = True AlarmSound
def set_alarm_status(self, state, notification_sound=None, alarm_sound=None):
packet = bytearray(16)
packet[0] = 0x11
if state==2: # full_arm
packet[4]=0x02
elif state==1: # part_arm:
packet[4]=0x01
elif state==0: # disarm:
packet[4]=0x00
if notification_sound is not None:
packet[13] = 0x02
if alarm_sound is not None:
packet[10] = 0x01
response = self.send_packet(0x6a, packet)
# check for error
err = response[0x22] | (response[0x23] << 8)
if err:
raise ValueError('broadlink_response_error', err)
class dooya(device):
def __init__(self, host, mac, devtype):
device.__init__(self, host, mac, devtype)
self.type = "Dooya DT360E"
def _send(self, magic1, magic2):
packet = bytearray(16)
packet[0] = 0x09
packet[2] = 0xbb
packet[3] = magic1
packet[4] = magic2
packet[9] = 0xfa
packet[10] = 0x44
response = self.send_packet(0x6a, packet)
err = response[0x22] | (response[0x23] << 8)
if err == 0:
payload = self.decrypt(bytes(response[0x38:]))
return ord(payload[4])
def open(self):
return self._send(0x01, 0x00)
def close(self):
return self._send(0x02, 0x00)
def stop(self):
return self._send(0x03, 0x00)
def get_percentage(self):
return self._send(0x06, 0x5d)
def set_percentage_and_wait(self, new_percentage):
current = self.get_percentage()
if current > new_percentage:
self.close()
while current is not None and current > new_percentage:
time.sleep(0.2)
current = self.get_percentage()
elif current < new_percentage:
self.open()
while current is not None and current < new_percentage:
time.sleep(0.2)
current = self.get_percentage()
self.stop()
# Setup a new Broadlink device via AP Mode. Review the README to see how to enter AP Mode.
# Only tested with Broadlink RM3 Mini (Blackbean)
def setup(ssid, password, security_mode):
# Security mode options are (0 - none, 1 = WEP, 2 = WPA1, 3 = WPA2, 4 = WPA1/2)
payload = bytearray(0x88)
payload[0x26] = 0x14 # This seems to always be set to 14
# Add the SSID to the payload
ssid_start = 68
ssid_length = 0
for letter in ssid:
payload[(ssid_start + ssid_length)] = ord(letter)
ssid_length += 1
# Add the WiFi password to the payload
pass_start = 100
pass_length = 0
for letter in password:
payload[(pass_start + pass_length)] = ord(letter)
pass_length += 1
payload[0x84] = ssid_length # Character length of SSID
payload[0x85] = pass_length # Character length of password
payload[0x86] = security_mode # Type of encryption (00 - none, 01 = WEP, 02 = WPA1, 03 = WPA2, 04 = WPA1/2)
checksum = 0xbeaf
for i in range(len(payload)):
checksum += payload[i]
checksum = checksum & 0xffff
payload[0x20] = checksum & 0xff # Checksum 1 position
payload[0x21] = checksum >> 8 # Checksum 2 position
sock = socket.socket(
socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.sendto(payload, ('255.255.255.255', 80))
| 35.113017
| 162
| 0.548179
|
ad51165eb42d56b612bf7bfb91631d9e3f410837
| 70,275
|
py
|
Python
|
clarifai_grpc/grpc/api/status/status_code_pb2.py
|
olga-clarifai/clarifai-python-grpc
|
c1d45ea965f781de5ccf682b142049c7628d0480
|
[
"Apache-2.0"
] | null | null | null |
clarifai_grpc/grpc/api/status/status_code_pb2.py
|
olga-clarifai/clarifai-python-grpc
|
c1d45ea965f781de5ccf682b142049c7628d0480
|
[
"Apache-2.0"
] | null | null | null |
clarifai_grpc/grpc/api/status/status_code_pb2.py
|
olga-clarifai/clarifai-python-grpc
|
c1d45ea965f781de5ccf682b142049c7628d0480
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: proto/clarifai/api/status/status_code.proto
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='proto/clarifai/api/status/status_code.proto',
package='clarifai.api.status',
syntax='proto3',
serialized_options=b'\n\034com.clarifai.grpc.api.statusP\001Zvgithub.com/Clarifai/clarifai-go-grpc/proto/clarifai/api/github.com/Clarifai/clarifai-go-grpc/proto/clarifai/api/status\242\002\004CAIP',
serialized_pb=b'\n+proto/clarifai/api/status/status_code.proto\x12\x13\x63larifai.api.status*\xe4\x46\n\nStatusCode\x12\x08\n\x04ZERO\x10\x00\x12\x0c\n\x07SUCCESS\x10\x90N\x12\x11\n\x0cMIXED_STATUS\x10\x9aN\x12\x0c\n\x07\x46\x41ILURE\x10\xa4N\x12\x0e\n\tTRY_AGAIN\x10\xaeN\x12\x14\n\x0fNOT_IMPLEMENTED\x10\xb8N\x12\x18\n\x13\x43ONN_ACCOUNT_ISSUES\x10\xf8U\x12\x1b\n\x12\x43ONN_TOKEN_INVALID\x10\xf9U\x1a\x02\x08\x01\x12\x1d\n\x18\x43ONN_CREDENTIALS_INVALID\x10\xfaU\x12\x1d\n\x18\x43ONN_EXCEED_HOURLY_LIMIT\x10\xfbU\x12\x1e\n\x19\x43ONN_EXCEED_MONTHLY_LIMIT\x10\xfcU\x12\x13\n\x0e\x43ONN_THROTTLED\x10\xfdU\x12\x18\n\x13\x43ONN_EXCEEDS_LIMITS\x10\xfeU\x12\x1d\n\x18\x43ONN_INSUFFICIENT_SCOPES\x10\xffU\x12\x15\n\x10\x43ONN_KEY_INVALID\x10\x80V\x12\x17\n\x12\x43ONN_KEY_NOT_FOUND\x10\x81V\x12\x1c\n\x17\x43ONN_BAD_REQUEST_FORMAT\x10\xdcV\x12\x18\n\x13\x43ONN_DOES_NOT_EXIST\x10\xddV\x12\x19\n\x14\x43ONN_INVALID_REQUEST\x10\xdeV\x12\x1c\n\x17\x43ONN_METHOD_NOT_ALLOWED\x10\xdfV\x12\x19\n\x14\x43ONN_NO_GDPR_CONSENT\x10\xe0V\x12\x1e\n\x19\x43ONN_AUTH_METHOD_DISABLED\x10\xc0W\x12\x13\n\rMODEL_TRAINED\x10\xec\xa4\x01\x12\x14\n\x0eMODEL_TRAINING\x10\xed\xa4\x01\x12\x15\n\x0fMODEL_UNTRAINED\x10\xee\xa4\x01\x12\x1f\n\x19MODEL_QUEUED_FOR_TRAINING\x10\xef\xa4\x01\x12\x15\n\x0fMODEL_UPLOADING\x10\xf0\xa4\x01\x12\x1c\n\x16MODEL_UPLOADING_FAILED\x10\xf1\xa4\x01\x12\x1c\n\x16MODEL_TRAINING_NO_DATA\x10\xf6\xa4\x01\x12!\n\x1bMODEL_TRAINING_NO_POSITIVES\x10\xf7\xa4\x01\x12*\n$MODEL_TRAINING_ONE_VS_N_SINGLE_CLASS\x10\xf8\xa4\x01\x12\x1e\n\x18MODEL_TRAINING_TIMED_OUT\x10\xf9\xa4\x01\x12\"\n\x1cMODEL_TRAINING_WAITING_ERROR\x10\xfa\xa4\x01\x12\"\n\x1cMODEL_TRAINING_UNKNOWN_ERROR\x10\xfb\xa4\x01\x12&\n\x1cMODEL_TRAINING_MSG_REDELIVER\x10\xfc\xa4\x01\x1a\x02\x08\x01\x12&\n MODEL_TRAINING_INSUFFICIENT_DATA\x10\xfd\xa4\x01\x12#\n\x1dMODEL_TRAINING_INVALID_PARAMS\x10\xfe\xa4\x01\x12\x34\n.MODEL_TRAINING_INVALID_DATA_TOLERANCE_EXCEEDED\x10\xff\xa4\x01\x12\x1a\n\x14MODEL_MODIFY_SUCCESS\x10\x9e\xa5\x01\x12\x1a\n\x14MODEL_MODIFY_PENDING\x10\x9f\xa5\x01\x12\x19\n\x13MODEL_MODIFY_FAILED\x10\xa0\xa5\x01\x12\x1a\n\x14MODEL_DOES_NOT_EXIST\x10\xd0\xa5\x01\x12\x1d\n\x17MODEL_PERMISSION_DENIED\x10\xd1\xa5\x01\x12\x1c\n\x16MODEL_INVALID_ARGUMENT\x10\xd2\xa5\x01\x12\x1b\n\x15MODEL_INVALID_REQUEST\x10\xd3\xa5\x01\x12\x15\n\x0fMODEL_EVALUATED\x10\xb4\xa6\x01\x12\x16\n\x10MODEL_EVALUATING\x10\xb5\xa6\x01\x12\x19\n\x13MODEL_NOT_EVALUATED\x10\xb6\xa6\x01\x12!\n\x1bMODEL_QUEUED_FOR_EVALUATION\x10\xb7\xa6\x01\x12 \n\x1aMODEL_EVALUATION_TIMED_OUT\x10\xbe\xa6\x01\x12$\n\x1eMODEL_EVALUATION_WAITING_ERROR\x10\xbf\xa6\x01\x12$\n\x1eMODEL_EVALUATION_UNKNOWN_ERROR\x10\xc0\xa6\x01\x12\x1d\n\x17MODEL_PREDICTION_FAILED\x10\xc1\xa6\x01\x12(\n\x1eMODEL_EVALUATION_MSG_REDELIVER\x10\xc2\xa6\x01\x1a\x02\x08\x01\x12\"\n\x1cMODEL_EVALUATION_NEED_LABELS\x10\xc3\xa6\x01\x12\"\n\x1cMODEL_EVALUATION_NEED_INPUTS\x10\xc4\xa6\x01\x12\x1d\n\x17MODEL_DEPLOYMENT_FAILED\x10\xe6\xa6\x01\x12\x15\n\x0fMODEL_DEPLOYING\x10\xe7\xa6\x01\x12!\n\x1bMODEL_QUEUED_FOR_DEPLOYMENT\x10\xe8\xa6\x01\x12\x18\n\x12MODEL_NOT_DEPLOYED\x10\xe9\xa6\x01\x12&\n MODEL_REFERENCE_INVALID_ARGUMENT\x10\x98\xa7\x01\x12*\n$MODEL_EXAMPLE_INPUT_INVALID_ARGUMENT\x10\xac\xa7\x01\x12 \n\x1aWORKFLOW_NO_MATCHING_INPUT\x10\xf1\xab\x01\x12$\n\x1eWORKFLOW_REQUIRE_TRAINED_MODEL\x10\xf2\xab\x01\x12\x18\n\x12WORKFLOW_DUPLICATE\x10\xd4\xac\x01\x12!\n\x1bWORKFLOW_UNSUPPORTED_FORMAT\x10\xd5\xac\x01\x12\x1d\n\x17WORKFLOW_DOES_NOT_EXIST\x10\xd6\xac\x01\x12 \n\x1aWORKFLOW_PERMISSION_DENIED\x10\xd7\xac\x01\x12\x1f\n\x19WORKFLOW_INVALID_ARGUMENT\x10\xd8\xac\x01\x12\x1d\n\x17WORKFLOW_INVALID_RECIPE\x10\xd9\xac\x01\x12\x1f\n\x19WORKFLOW_INVALID_TEMPLATE\x10\xda\xac\x01\x12\x1c\n\x16WORKFLOW_INVALID_GRAPH\x10\xdb\xac\x01\x12\x1f\n\x19WORKFLOW_INTERNAL_FAILURE\x10\xdc\xac\x01\x12\x1e\n\x18WORKFLOW_INVALID_REQUEST\x10\xd7\xb3\x01\x12\x1d\n\x17WORKFLOW_MODIFY_SUCCESS\x10\x86\xad\x01\x12\x1d\n\x17WORKFLOW_MODIFY_PENDING\x10\x87\xad\x01\x12\x1c\n\x16WORKFLOW_MODIFY_FAILED\x10\x88\xad\x01\x12\x1d\n\x17WORKFLOW_REINDEX_FAILED\x10\x89\xad\x01\x12\x1c\n\x16\x43ONCEPT_MODIFY_SUCCESS\x10\xee\xb4\x01\x12\x1c\n\x16\x43ONCEPT_MODIFY_PENDING\x10\xef\xb4\x01\x12\x1b\n\x15\x43ONCEPT_MODIFY_FAILED\x10\xf0\xb4\x01\x12\x18\n\x12\x41NNOTATION_SUCCESS\x10\xd6\xbc\x01\x12\x18\n\x12\x41NNOTATION_PENDING\x10\xd7\xbc\x01\x12\x17\n\x11\x41NNOTATION_FAILED\x10\xd8\xbc\x01\x12\x1f\n\x19\x41NNOTATION_UNKNOWN_STATUS\x10\xda\xbc\x01\x12!\n\x1b\x41NNOTATION_INVALID_ARGUMENT\x10\xdb\xbc\x01\x12\"\n\x1c\x41NNOTATION_PERMISSION_DENIED\x10\xdc\xbc\x01\x12 \n\x1a\x41NNOTATION_AWAITING_REVIEW\x10\xdd\xbc\x01\x12*\n$ANNOTATION_AWAITING_CONSENSUS_REVIEW\x10\xdf\xbc\x01\x12\x1e\n\x18\x41NNOTATION_REVIEW_DENIED\x10\xde\xbc\x01\x12\x1f\n\x19\x41NNOTATION_MODIFY_SUCCESS\x10\xba\xbd\x01\x12\x1f\n\x19\x41NNOTATION_MODIFY_PENDING\x10\xbb\xbd\x01\x12\x1e\n\x18\x41NNOTATION_MODIFY_FAILED\x10\xbc\xbd\x01\x12&\n METADATA_INVALID_PATCH_ARGUMENTS\x10\xc4\xc2\x01\x12\x1c\n\x16METADATA_PARSING_ISSUE\x10\xc5\xc2\x01\x12!\n\x1bMETADATA_MANIPULATION_ISSUE\x10\xc6\xc2\x01\x12\x1c\n\x16TRAINER_JOB_STATE_NONE\x10\xa8\xc3\x01\x12\x1e\n\x18TRAINER_JOB_STATE_QUEUED\x10\xa9\xc3\x01\x12\x1f\n\x19TRAINER_JOB_STATE_RUNNING\x10\xaa\xc3\x01\x12 \n\x1aTRAINER_JOB_STATE_COMPLETE\x10\xab\xc3\x01\x12\x1d\n\x17TRAINER_JOB_STATE_ERROR\x10\xac\xc3\x01\x12\x17\n\x11\x44\x41TA_DUMP_SUCCESS\x10\xbe\xc4\x01\x12\x17\n\x11\x44\x41TA_DUMP_PENDING\x10\xbf\xc4\x01\x12\x16\n\x10\x44\x41TA_DUMP_FAILED\x10\xc0\xc4\x01\x12\x1b\n\x15\x44\x41TA_DUMP_IN_PROGRESS\x10\xc1\xc4\x01\x12\x17\n\x11\x44\x41TA_DUMP_NO_DATA\x10\xc2\xc4\x01\x12\x1d\n\x17\x41PP_DUPLICATION_SUCCESS\x10\xf0\xc4\x01\x12\x1c\n\x16\x41PP_DUPLICATION_FAILED\x10\xf1\xc4\x01\x12\x1d\n\x17\x41PP_DUPLICATION_PENDING\x10\xf2\xc4\x01\x12!\n\x1b\x41PP_DUPLICATION_IN_PROGRESS\x10\xf3\xc4\x01\x12%\n\x1f\x41PP_DUPLICATION_INVALID_REQUEST\x10\xf4\xc4\x01\x12\x1c\n\x16INPUT_DOWNLOAD_SUCCESS\x10\xb0\xea\x01\x12\x1c\n\x16INPUT_DOWNLOAD_PENDING\x10\xb1\xea\x01\x12\x1b\n\x15INPUT_DOWNLOAD_FAILED\x10\xb2\xea\x01\x12 \n\x1aINPUT_DOWNLOAD_IN_PROGRESS\x10\xb3\xea\x01\x12 \n\x1aINPUT_STATUS_UPDATE_FAILED\x10\xb4\xea\x01\x12\x19\n\x13INPUT_DELETE_FAILED\x10\xb5\xea\x01\x12\x15\n\x0fINPUT_DUPLICATE\x10\x94\xeb\x01\x12\x1e\n\x18INPUT_UNSUPPORTED_FORMAT\x10\x95\xeb\x01\x12\x1a\n\x14INPUT_DOES_NOT_EXIST\x10\x96\xeb\x01\x12\x1d\n\x17INPUT_PERMISSION_DENIED\x10\x97\xeb\x01\x12\x1c\n\x16INPUT_INVALID_ARGUMENT\x10\x98\xeb\x01\x12\x16\n\x10INPUT_OVER_LIMIT\x10\x99\xeb\x01\x12\x17\n\x11INPUT_INVALID_URL\x10\x9a\xeb\x01\x12\x1a\n\x14INPUT_MODIFY_SUCCESS\x10\xf8\xeb\x01\x12\x1a\n\x14INPUT_MODIFY_PENDING\x10\xf9\xeb\x01\x12\x19\n\x13INPUT_MODIFY_FAILED\x10\xfb\xeb\x01\x12\x1f\n\x19INPUT_STORAGE_HOST_FAILED\x10\x82\xec\x01\x12\x1d\n\x17\x41LL_INPUT_INVALID_BYTES\x10\xdc\xec\x01\x12\x1b\n\x15INPUT_CLUSTER_SUCCESS\x10\xc0\xed\x01\x12\x1b\n\x15INPUT_CLUSTER_PENDING\x10\xc1\xed\x01\x12\x1a\n\x14INPUT_CLUSTER_FAILED\x10\xc2\xed\x01\x12\x1f\n\x19INPUT_CLUSTER_IN_PROGRESS\x10\xc3\xed\x01\x12\x1b\n\x15INPUT_REINDEX_SUCCESS\x10\xa4\xee\x01\x12\x1b\n\x15INPUT_REINDEX_PENDING\x10\xa5\xee\x01\x12\x1a\n\x14INPUT_REINDEX_FAILED\x10\xa6\xee\x01\x12\x1f\n\x19INPUT_REINDEX_IN_PROGRESS\x10\xa7\xee\x01\x12\"\n\x1cINPUT_VIDEO_DOWNLOAD_SUCCESS\x10\x98\xf2\x01\x12\"\n\x1cINPUT_VIDEO_DOWNLOAD_PENDING\x10\x99\xf2\x01\x12!\n\x1bINPUT_VIDEO_DOWNLOAD_FAILED\x10\x9a\xf2\x01\x12\x1b\n\x15INPUT_VIDEO_DUPLICATE\x10\xfc\xf2\x01\x12$\n\x1eINPUT_VIDEO_UNSUPPORTED_FORMAT\x10\xfd\xf2\x01\x12 \n\x1aINPUT_VIDEO_DOES_NOT_EXIST\x10\xfe\xf2\x01\x12#\n\x1dINPUT_VIDEO_PERMISSION_DENIED\x10\xff\xf2\x01\x12\"\n\x1cINPUT_VIDEO_INVALID_ARGUMENT\x10\x80\xf3\x01\x12\x1c\n\x16INPUT_VIDEO_OVER_LIMIT\x10\x81\xf3\x01\x12\x1d\n\x17INPUT_VIDEO_INVALID_URL\x10\x82\xf3\x01\x12 \n\x1aINPUT_VIDEO_MODIFY_SUCCESS\x10\xe0\xf3\x01\x12 \n\x1aINPUT_VIDEO_MODIFY_PENDING\x10\xe1\xf3\x01\x12\x1f\n\x19INPUT_VIDEO_MODIFY_FAILED\x10\xe3\xf3\x01\x12%\n\x1fINPUT_VIDEO_STORAGE_HOST_FAILED\x10\xea\xf3\x01\x12$\n\x1e\x41LL_INPUT_VIDEOS_INVALID_BYTES\x10\xc4\xf4\x01\x12\x1d\n\x17INPUT_CONNECTION_FAILED\x10\xbc\xb8\x02\x12&\n REQUEST_DISABLED_FOR_MAINTENANCE\x10\xbd\xb8\x02\x12+\n%INPUT_WRITES_DISABLED_FOR_MAINTENANCE\x10\xbe\xb8\x02\x12\x1b\n\x15INPUT_INVALID_REQUEST\x10\xbf\xb8\x02\x12\x1d\n\x17PREDICT_INVALID_REQUEST\x10\xc1\xb8\x02\x12\x1c\n\x16SEARCH_INVALID_REQUEST\x10\xc2\xb8\x02\x12\x1e\n\x18\x43ONCEPTS_INVALID_REQUEST\x10\xc3\xb8\x02\x12\x1b\n\x15STATS_INVALID_REQUEST\x10\xc4\xb8\x02\x12\x1c\n\x16\x44\x41TABASE_DUPLICATE_KEY\x10\xca\xb8\x02\x12 \n\x1a\x44\x41TABASE_STATEMENT_TIMEOUT\x10\xcb\xb8\x02\x12$\n\x1e\x44\x41TABASE_INVALID_ROWS_AFFECTED\x10\xcc\xb8\x02\x12 \n\x1a\x44\x41TABASE_DEADLOCK_DETECTED\x10\xcd\xb8\x02\x12\x18\n\x12\x44\x41TABASE_FAIL_TASK\x10\xce\xb8\x02\x12&\n DATABASE_FAIL_TO_GET_CONNECTIONS\x10\xcf\xb8\x02\x12\x1f\n\x19\x44\x41TABASE_TOO_MANY_CLIENTS\x10\xd0\xb8\x02\x12\"\n\x1c\x44\x41TABASE_CONSTRAINT_VIOLATED\x10\xd1\xb8\x02\x12\x1f\n\x19\x41SYNC_WORKER_MULTI_ERRORS\x10\xd4\xb8\x02\x12\x1c\n\x16RPC_REQUEST_QUEUE_FULL\x10\xde\xb8\x02\x12\x1c\n\x16RPC_SERVER_UNAVAILABLE\x10\xdf\xb8\x02\x12\x19\n\x13RPC_REQUEST_TIMEOUT\x10\xe0\xb8\x02\x12#\n\x1dRPC_MAX_MESSAGE_SIZE_EXCEEDED\x10\xe1\xb8\x02\x12\x12\n\x0cRPC_CANCELED\x10\xe3\xb8\x02\x12\x18\n\x12RPC_UNKNOWN_METHOD\x10\xe4\xb8\x02\x12\x1e\n\x18REQUEST_CANCELED_BY_USER\x10\xe5\xb8\x02\x12\x1e\n\x18\x43LUSTER_INTERNAL_FAILURE\x10\xa0\xd0\x02\x12\x1f\n\x19\x45XTERNAL_CONNECTION_ERROR\x10\xe2\xb8\x02\x12\x16\n\x10QUEUE_CONN_ERROR\x10\xa8\xc0\x02\x12!\n\x1bQUEUE_CLOSE_REQUEST_TIMEOUT\x10\xaa\xc0\x02\x12\x17\n\x11QUEUE_CONN_CLOSED\x10\xab\xc0\x02\x12\x1f\n\x19QUEUE_PUBLISH_ACK_TIMEOUT\x10\xac\xc0\x02\x12\x19\n\x13QUEUE_PUBLISH_ERROR\x10\xad\xc0\x02\x12 \n\x1aQUEUE_SUBSCRIPTION_TIMEOUT\x10\xae\xc0\x02\x12\x1e\n\x18QUEUE_SUBSCRIPTION_ERROR\x10\xaf\xc0\x02\x12\x1e\n\x18QUEUE_MARSHALLING_FAILED\x10\xb0\xc0\x02\x12 \n\x1aQUEUE_UNMARSHALLING_FAILED\x10\xb1\xc0\x02\x12\'\n!QUEUE_MAX_MSG_REDELIVERY_EXCEEDED\x10\xb2\xc0\x02\x12\x17\n\x11QUEUE_ACK_FAILURE\x10\xb3\xc0\x02\x12\x13\n\rSQS_OVERLIMIT\x10\x8c\xc1\x02\x12 \n\x1aSQS_INVALID_RECEIPT_HANDLE\x10\x8d\xc1\x02\x12\x11\n\x0bSQS_UNKNOWN\x10\x8e\xc1\x02\x12\x1d\n\x17SEARCH_INTERNAL_FAILURE\x10\xf9\xcf\x02\x12\x1f\n\x19SEARCH_PROJECTION_FAILURE\x10\xfa\xcf\x02\x12\x1f\n\x19SEARCH_PREDICTION_FAILURE\x10\xfb\xcf\x02\x12\'\n!SEARCH_BY_NOT_FULLY_INDEXED_INPUT\x10\xfc\xcf\x02\x12 \n\x1aSAVED_SEARCH_MODIFY_FAILED\x10\xfd\xcf\x02\x12\x17\n\x11\x45VALUATION_QUEUED\x10\xdc\xd0\x02\x12\x1c\n\x16\x45VALUATION_IN_PROGRESS\x10\xdd\xd0\x02\x12\x18\n\x12\x45VALUATION_SUCCESS\x10\xde\xd0\x02\x12(\n\"EVALUATION_FAILED_TO_RETRIEVE_DATA\x10\xdf\xd0\x02\x12!\n\x1b\x45VALUATION_INVALID_ARGUMENT\x10\xe0\xd0\x02\x12\x17\n\x11\x45VALUATION_FAILED\x10\xe1\xd0\x02\x12\x18\n\x12\x45VALUATION_PENDING\x10\xe2\xd0\x02\x12\x1a\n\x14\x45VALUATION_TIMED_OUT\x10\xe3\xd0\x02\x12!\n\x1b\x45VALUATION_UNEXPECTED_ERROR\x10\xe4\xd0\x02\x12\x16\n\x10\x45VALUATION_MIXED\x10\xe5\xd0\x02\x12\x18\n\x12STRIPE_EVENT_ERROR\x10\xe1\xd7\x02\x12\x10\n\nCACHE_MISS\x10\xc9\xdf\x02\x12&\n REDIS_SCRIPT_EXITED_WITH_FAILURE\x10\xca\xdf\x02\x12\x16\n\x10REDIS_STREAM_ERR\x10\xcb\xdf\x02\x12\x18\n\x12REDIS_NO_CONSUMERS\x10\xcc\xdf\x02\x12\x1a\n\x14REDIS_STREAM_BACKOFF\x10\xcd\xdf\x02\x12\x18\n\x12SIGNUP_EVENT_ERROR\x10\xb1\xe7\x02\x12\x14\n\x0eSIGNUP_FLAGGED\x10\xb2\xe7\x02\x12\x1a\n\x14\x46ILETYPE_UNSUPPORTED\x10\xb3\xe7\x02\x12\x1f\n\x19\x41PP_COUNT_INVALID_MESSAGE\x10\x99\xef\x02\x12\'\n!APP_COUNT_UPDATE_INCREMENT_FAILED\x10\x9a\xef\x02\x12\x1e\n\x18\x41PP_COUNT_REBUILD_FAILED\x10\x9b\xef\x02\x12 \n\x1a\x41PP_COUNT_INTERNAL_FAILURE\x10\x9c\xef\x02\x12\x17\n\x11MP_DOWNLOAD_ERROR\x10\xfd\xef\x02\x12\x1a\n\x14MP_RESOLVE_DNS_ERROR\x10\xfe\xef\x02\x12)\n#MP_DOWNLOAD_MAX_SIZE_EXCEEDED_ERROR\x10\xff\xef\x02\x12\x1b\n\x15MP_IMAGE_DECODE_ERROR\x10\x80\xf0\x02\x12\x19\n\x13MP_INVALID_ARGUMENT\x10\x81\xf0\x02\x12\x1f\n\x19MP_IMAGE_PROCESSING_ERROR\x10\x82\xf0\x02\x12\x19\n\x13\x44\x41TATIER_CONN_ERROR\x10\xe1\xf0\x02\x12\x17\n\x11USER_CONSENT_FACE\x10\xd1\x86\x03\x12\x14\n\x0eWORKER_MISSING\x10\xb8\x8e\x03\x12\x13\n\rWORKER_ACTIVE\x10\xb9\x8e\x03\x12\x15\n\x0fWORKER_INACTIVE\x10\xba\x8e\x03\x12\x17\n\x11\x43OLLECTOR_MISSING\x10\xa0\x96\x03\x12\x16\n\x10\x43OLLECTOR_ACTIVE\x10\xa1\x96\x03\x12\x18\n\x12\x43OLLECTOR_INACTIVE\x10\xa2\x96\x03\x12!\n\x1b\x43OLLECTOR_POST_INPUT_FAILED\x10\xa3\x96\x03\x12*\n$SSO_IDENTITY_PROVIDER_DOES_NOT_EXIST\x10\x89\x9e\x03\x12\x16\n\x10TASK_IN_PROGRESS\x10\xf1\xa5\x03\x12\x0f\n\tTASK_DONE\x10\xf2\xa5\x03\x12\x12\n\x0cTASK_WONT_DO\x10\xf3\xa5\x03\x12\"\n\x1cTASK_ADD_ANNOTATIONS_FAILURE\x10\xf5\xa5\x03\x12\x13\n\rTASK_CONFLICT\x10\xd4\xa6\x03\x12\x1a\n\x14TASK_NOT_IMPLEMENTED\x10\xd5\xa6\x03\x12\x12\n\x0cTASK_MISSING\x10\xd6\xa6\x03\x12\x19\n\x13LABEL_ORDER_PENDING\x10\xd9\xad\x03\x12\x1d\n\x17LABEL_ORDER_IN_PROGRESS\x10\xda\xad\x03\x12\x19\n\x13LABEL_ORDER_SUCCESS\x10\xdb\xad\x03\x12\x1a\n\x14LABEL_ORDER_CANCELED\x10\xdc\xad\x03\x12\x14\n\x0eLICENSE_ACTIVE\x10\xe0\xd4\x03\x12\x1c\n\x16LICENSE_DOES_NOT_EXIST\x10\xe1\xd4\x03\x12\x19\n\x13LICENSE_NEED_UPDATE\x10\xe2\xd4\x03\x12\x15\n\x0fLICENSE_EXPIRED\x10\xe3\xd4\x03\x12\x15\n\x0fLICENSE_REVOKED\x10\xe4\xd4\x03\x12\x15\n\x0fLICENSE_DELETED\x10\xe5\xd4\x03\x12\x1d\n\x17LICENSE_VOLUME_EXCEEDED\x10\xe6\xd4\x03\x12!\n\x1bPASSWORD_VALIDATION_SUCCESS\x10\xc8\xdc\x03\x12 \n\x1aPASSWORD_VALIDATION_FAILED\x10\xc9\xdc\x03\x12%\n\x1fPASSWORDPOLICY_INVALID_ARGUMENT\x10\xca\xdc\x03\x12\"\n\x1c\x46\x45\x41TUREFLAG_CONFIG_NOT_FOUND\x10\xb0\xe4\x03\x12\"\n\x1c\x46\x45\x41TUREFLAG_INVALID_ARGUMENT\x10\xb1\xe4\x03\x12\x19\n\x13\x46\x45\x41TUREFLAG_BLOCKED\x10\xb2\xe4\x03\x12\x19\n\x13MAINTENANCE_SUCCESS\x10\x98\xec\x03\x12\x18\n\x12MAINTENANCE_FAILED\x10\x99\xec\x03\x12\x1d\n\x17\x44\x41TASET_VERSION_PENDING\x10\x85\xf4\x03\x12!\n\x1b\x44\x41TASET_VERSION_IN_PROGRESS\x10\x8a\xf4\x03\x12\x1b\n\x15\x44\x41TASET_VERSION_READY\x10\x8f\xf4\x03\x12\x1d\n\x17\x44\x41TASET_VERSION_FAILURE\x10\x94\xf4\x03\x12&\n DATASET_VERSION_UNEXPECTED_ERROR\x10\x99\xf4\x03\x12\x10\n\nJOB_QUEUED\x10\x80\xf4\x03\x12\x11\n\x0bJOB_RUNNING\x10\x81\xf4\x03\x12\x13\n\rJOB_COMPLETED\x10\x82\xf4\x03\x12\x10\n\nJOB_FAILED\x10\x83\xf4\x03\x12\x1c\n\x16\x41UTH_MISSING_IDP_ASSOC\x10\xe8\xfb\x03\x12\x1b\n\x15INTERNAL_SERVER_ISSUE\x10\xd4\xfd\x05\x12\x1d\n\x17INTERNAL_FETCHING_ISSUE\x10\xd5\xfd\x05\x12\x1d\n\x17INTERNAL_DATABASE_ISSUE\x10\xd6\xfd\x05\x12!\n\x1bINTERNAL_UNEXPECTED_TIMEOUT\x10\xd9\xfd\x05\x12\x1c\n\x16INTERNAL_UNEXPECTED_V1\x10\xda\xfd\x05\x12\x1f\n\x19INTERNAL_UNEXPECTED_PANIC\x10\xdb\xfd\x05\x12\x1f\n\x19INTERNAL_UNEXPECTED_SPIRE\x10\xdc\xfd\x05\x12 \n\x1aINTERNAL_REDIS_UNAVAILABLE\x10\xdd\xfd\x05\x12!\n\x1bINTERNAL_RESOURCE_EXHAUSTED\x10\xde\xfd\x05\x12\"\n\x1cINTERNAL_REDIS_UNCATEGORIZED\x10\xdf\xfd\x05\x12 \n\x1aINTERNAL_AWS_UNCATEGORIZED\x10\xe0\xfd\x05\x12\"\n\x1cINTERNAL_AZURE_UNCATEGORIZED\x10\xe1\xfd\x05\x12\x18\n\x12\x43ONN_UNCATEGORIZED\x10\xb9\x85\x06\x12\x19\n\x13MODEL_UNCATEGORIZED\x10\xba\x85\x06\x12\x19\n\x13INPUT_UNCATEGORIZED\x10\xbb\x85\x06\x12\x1e\n\x18\x41NNOTATION_UNCATEGORIZED\x10\xbc\x85\x06\x12\x1b\n\x15\x42ILLING_UNCATEGORIZED\x10\xbd\x85\x06\x12\x1c\n\x16INTERNAL_UNCATEGORIZED\x10\xc1\x85\x06\x12\x11\n\x0b\x42\x41\x44_REQUEST\x10\xa0\xc2\x05\x12\x12\n\x0cSERVER_ERROR\x10\x84\xc3\x05\"\x08\x08\xe8\x81\x02\x10\xe8\x81\x02\"\x08\x08\xe9\x81\x02\x10\xe9\x81\x02\"\x08\x08\xea\x81\x02\x10\xea\x81\x02\"\x08\x08\xcc\x82\x02\x10\xcc\x82\x02\"\x08\x08\xcd\x82\x02\x10\xcd\x82\x02\"\x08\x08\xce\x82\x02\x10\xce\x82\x02\"\x08\x08\xcf\x82\x02\x10\xcf\x82\x02\"\x08\x08\xd0\x82\x02\x10\xd0\x82\x02\"\x08\x08\xd1\x82\x02\x10\xd1\x82\x02\"\x08\x08\xd2\x82\x02\x10\xd2\x82\x02\"\x08\x08\xb0\x83\x02\x10\xb0\x83\x02\"\x08\x08\xb1\x83\x02\x10\xb1\x83\x02\"\x08\x08\xb3\x83\x02\x10\xb3\x83\x02\"\x08\x08\xba\x83\x02\x10\xba\x83\x02\"\x08\x08\xbb\xb8\x02\x10\xbb\xb8\x02\"\x08\x08\xd2\xb8\x02\x10\xd2\xb8\x02\"\x08\x08\xd3\xb8\x02\x10\xd3\xb8\x02\"\x08\x08\xf0\xc1\x02\x10\xf0\xc1\x02\"\x08\x08\xf1\xc1\x02\x10\xf1\xc1\x02\"\x08\x08\xf2\xc1\x02\x10\xf2\xc1\x02\"\x08\x08\xf3\xc1\x02\x10\xf3\xc1\x02\"\x08\x08\xf4\xc1\x02\x10\xf4\xc1\x02\x42\x9f\x01\n\x1c\x63om.clarifai.grpc.api.statusP\x01Zvgithub.com/Clarifai/clarifai-go-grpc/proto/clarifai/api/github.com/Clarifai/clarifai-go-grpc/proto/clarifai/api/status\xa2\x02\x04\x43\x41IPb\x06proto3'
)
_STATUSCODE = _descriptor.EnumDescriptor(
name='StatusCode',
full_name='clarifai.api.status.StatusCode',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='ZERO', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SUCCESS', index=1, number=10000,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MIXED_STATUS', index=2, number=10010,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FAILURE', index=3, number=10020,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TRY_AGAIN', index=4, number=10030,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NOT_IMPLEMENTED', index=5, number=10040,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CONN_ACCOUNT_ISSUES', index=6, number=11000,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CONN_TOKEN_INVALID', index=7, number=11001,
serialized_options=b'\010\001',
type=None),
_descriptor.EnumValueDescriptor(
name='CONN_CREDENTIALS_INVALID', index=8, number=11002,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CONN_EXCEED_HOURLY_LIMIT', index=9, number=11003,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CONN_EXCEED_MONTHLY_LIMIT', index=10, number=11004,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CONN_THROTTLED', index=11, number=11005,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CONN_EXCEEDS_LIMITS', index=12, number=11006,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CONN_INSUFFICIENT_SCOPES', index=13, number=11007,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CONN_KEY_INVALID', index=14, number=11008,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CONN_KEY_NOT_FOUND', index=15, number=11009,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CONN_BAD_REQUEST_FORMAT', index=16, number=11100,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CONN_DOES_NOT_EXIST', index=17, number=11101,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CONN_INVALID_REQUEST', index=18, number=11102,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CONN_METHOD_NOT_ALLOWED', index=19, number=11103,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CONN_NO_GDPR_CONSENT', index=20, number=11104,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CONN_AUTH_METHOD_DISABLED', index=21, number=11200,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_TRAINED', index=22, number=21100,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_TRAINING', index=23, number=21101,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_UNTRAINED', index=24, number=21102,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_QUEUED_FOR_TRAINING', index=25, number=21103,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_UPLOADING', index=26, number=21104,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_UPLOADING_FAILED', index=27, number=21105,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_TRAINING_NO_DATA', index=28, number=21110,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_TRAINING_NO_POSITIVES', index=29, number=21111,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_TRAINING_ONE_VS_N_SINGLE_CLASS', index=30, number=21112,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_TRAINING_TIMED_OUT', index=31, number=21113,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_TRAINING_WAITING_ERROR', index=32, number=21114,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_TRAINING_UNKNOWN_ERROR', index=33, number=21115,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_TRAINING_MSG_REDELIVER', index=34, number=21116,
serialized_options=b'\010\001',
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_TRAINING_INSUFFICIENT_DATA', index=35, number=21117,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_TRAINING_INVALID_PARAMS', index=36, number=21118,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_TRAINING_INVALID_DATA_TOLERANCE_EXCEEDED', index=37, number=21119,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_MODIFY_SUCCESS', index=38, number=21150,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_MODIFY_PENDING', index=39, number=21151,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_MODIFY_FAILED', index=40, number=21152,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_DOES_NOT_EXIST', index=41, number=21200,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_PERMISSION_DENIED', index=42, number=21201,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_INVALID_ARGUMENT', index=43, number=21202,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_INVALID_REQUEST', index=44, number=21203,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_EVALUATED', index=45, number=21300,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_EVALUATING', index=46, number=21301,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_NOT_EVALUATED', index=47, number=21302,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_QUEUED_FOR_EVALUATION', index=48, number=21303,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_EVALUATION_TIMED_OUT', index=49, number=21310,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_EVALUATION_WAITING_ERROR', index=50, number=21311,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_EVALUATION_UNKNOWN_ERROR', index=51, number=21312,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_PREDICTION_FAILED', index=52, number=21313,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_EVALUATION_MSG_REDELIVER', index=53, number=21314,
serialized_options=b'\010\001',
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_EVALUATION_NEED_LABELS', index=54, number=21315,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_EVALUATION_NEED_INPUTS', index=55, number=21316,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_DEPLOYMENT_FAILED', index=56, number=21350,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_DEPLOYING', index=57, number=21351,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_QUEUED_FOR_DEPLOYMENT', index=58, number=21352,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_NOT_DEPLOYED', index=59, number=21353,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_REFERENCE_INVALID_ARGUMENT', index=60, number=21400,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_EXAMPLE_INPUT_INVALID_ARGUMENT', index=61, number=21420,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WORKFLOW_NO_MATCHING_INPUT', index=62, number=22001,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WORKFLOW_REQUIRE_TRAINED_MODEL', index=63, number=22002,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WORKFLOW_DUPLICATE', index=64, number=22100,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WORKFLOW_UNSUPPORTED_FORMAT', index=65, number=22101,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WORKFLOW_DOES_NOT_EXIST', index=66, number=22102,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WORKFLOW_PERMISSION_DENIED', index=67, number=22103,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WORKFLOW_INVALID_ARGUMENT', index=68, number=22104,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WORKFLOW_INVALID_RECIPE', index=69, number=22105,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WORKFLOW_INVALID_TEMPLATE', index=70, number=22106,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WORKFLOW_INVALID_GRAPH', index=71, number=22107,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WORKFLOW_INTERNAL_FAILURE', index=72, number=22108,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WORKFLOW_INVALID_REQUEST', index=73, number=22999,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WORKFLOW_MODIFY_SUCCESS', index=74, number=22150,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WORKFLOW_MODIFY_PENDING', index=75, number=22151,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WORKFLOW_MODIFY_FAILED', index=76, number=22152,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WORKFLOW_REINDEX_FAILED', index=77, number=22153,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CONCEPT_MODIFY_SUCCESS', index=78, number=23150,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CONCEPT_MODIFY_PENDING', index=79, number=23151,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CONCEPT_MODIFY_FAILED', index=80, number=23152,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ANNOTATION_SUCCESS', index=81, number=24150,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ANNOTATION_PENDING', index=82, number=24151,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ANNOTATION_FAILED', index=83, number=24152,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ANNOTATION_UNKNOWN_STATUS', index=84, number=24154,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ANNOTATION_INVALID_ARGUMENT', index=85, number=24155,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ANNOTATION_PERMISSION_DENIED', index=86, number=24156,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ANNOTATION_AWAITING_REVIEW', index=87, number=24157,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ANNOTATION_AWAITING_CONSENSUS_REVIEW', index=88, number=24159,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ANNOTATION_REVIEW_DENIED', index=89, number=24158,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ANNOTATION_MODIFY_SUCCESS', index=90, number=24250,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ANNOTATION_MODIFY_PENDING', index=91, number=24251,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ANNOTATION_MODIFY_FAILED', index=92, number=24252,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='METADATA_INVALID_PATCH_ARGUMENTS', index=93, number=24900,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='METADATA_PARSING_ISSUE', index=94, number=24901,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='METADATA_MANIPULATION_ISSUE', index=95, number=24902,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TRAINER_JOB_STATE_NONE', index=96, number=25000,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TRAINER_JOB_STATE_QUEUED', index=97, number=25001,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TRAINER_JOB_STATE_RUNNING', index=98, number=25002,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TRAINER_JOB_STATE_COMPLETE', index=99, number=25003,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TRAINER_JOB_STATE_ERROR', index=100, number=25004,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DATA_DUMP_SUCCESS', index=101, number=25150,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DATA_DUMP_PENDING', index=102, number=25151,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DATA_DUMP_FAILED', index=103, number=25152,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DATA_DUMP_IN_PROGRESS', index=104, number=25153,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DATA_DUMP_NO_DATA', index=105, number=25154,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='APP_DUPLICATION_SUCCESS', index=106, number=25200,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='APP_DUPLICATION_FAILED', index=107, number=25201,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='APP_DUPLICATION_PENDING', index=108, number=25202,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='APP_DUPLICATION_IN_PROGRESS', index=109, number=25203,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='APP_DUPLICATION_INVALID_REQUEST', index=110, number=25204,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INPUT_DOWNLOAD_SUCCESS', index=111, number=30000,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INPUT_DOWNLOAD_PENDING', index=112, number=30001,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INPUT_DOWNLOAD_FAILED', index=113, number=30002,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INPUT_DOWNLOAD_IN_PROGRESS', index=114, number=30003,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INPUT_STATUS_UPDATE_FAILED', index=115, number=30004,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INPUT_DELETE_FAILED', index=116, number=30005,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INPUT_DUPLICATE', index=117, number=30100,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INPUT_UNSUPPORTED_FORMAT', index=118, number=30101,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INPUT_DOES_NOT_EXIST', index=119, number=30102,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INPUT_PERMISSION_DENIED', index=120, number=30103,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INPUT_INVALID_ARGUMENT', index=121, number=30104,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INPUT_OVER_LIMIT', index=122, number=30105,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INPUT_INVALID_URL', index=123, number=30106,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INPUT_MODIFY_SUCCESS', index=124, number=30200,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INPUT_MODIFY_PENDING', index=125, number=30201,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INPUT_MODIFY_FAILED', index=126, number=30203,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INPUT_STORAGE_HOST_FAILED', index=127, number=30210,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ALL_INPUT_INVALID_BYTES', index=128, number=30300,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INPUT_CLUSTER_SUCCESS', index=129, number=30400,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INPUT_CLUSTER_PENDING', index=130, number=30401,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INPUT_CLUSTER_FAILED', index=131, number=30402,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INPUT_CLUSTER_IN_PROGRESS', index=132, number=30403,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INPUT_REINDEX_SUCCESS', index=133, number=30500,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INPUT_REINDEX_PENDING', index=134, number=30501,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INPUT_REINDEX_FAILED', index=135, number=30502,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INPUT_REINDEX_IN_PROGRESS', index=136, number=30503,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INPUT_VIDEO_DOWNLOAD_SUCCESS', index=137, number=31000,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INPUT_VIDEO_DOWNLOAD_PENDING', index=138, number=31001,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INPUT_VIDEO_DOWNLOAD_FAILED', index=139, number=31002,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INPUT_VIDEO_DUPLICATE', index=140, number=31100,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INPUT_VIDEO_UNSUPPORTED_FORMAT', index=141, number=31101,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INPUT_VIDEO_DOES_NOT_EXIST', index=142, number=31102,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INPUT_VIDEO_PERMISSION_DENIED', index=143, number=31103,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INPUT_VIDEO_INVALID_ARGUMENT', index=144, number=31104,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INPUT_VIDEO_OVER_LIMIT', index=145, number=31105,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INPUT_VIDEO_INVALID_URL', index=146, number=31106,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INPUT_VIDEO_MODIFY_SUCCESS', index=147, number=31200,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INPUT_VIDEO_MODIFY_PENDING', index=148, number=31201,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INPUT_VIDEO_MODIFY_FAILED', index=149, number=31203,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INPUT_VIDEO_STORAGE_HOST_FAILED', index=150, number=31210,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ALL_INPUT_VIDEOS_INVALID_BYTES', index=151, number=31300,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INPUT_CONNECTION_FAILED', index=152, number=39996,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='REQUEST_DISABLED_FOR_MAINTENANCE', index=153, number=39997,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INPUT_WRITES_DISABLED_FOR_MAINTENANCE', index=154, number=39998,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INPUT_INVALID_REQUEST', index=155, number=39999,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PREDICT_INVALID_REQUEST', index=156, number=40001,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SEARCH_INVALID_REQUEST', index=157, number=40002,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CONCEPTS_INVALID_REQUEST', index=158, number=40003,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STATS_INVALID_REQUEST', index=159, number=40004,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DATABASE_DUPLICATE_KEY', index=160, number=40010,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DATABASE_STATEMENT_TIMEOUT', index=161, number=40011,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DATABASE_INVALID_ROWS_AFFECTED', index=162, number=40012,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DATABASE_DEADLOCK_DETECTED', index=163, number=40013,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DATABASE_FAIL_TASK', index=164, number=40014,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DATABASE_FAIL_TO_GET_CONNECTIONS', index=165, number=40015,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DATABASE_TOO_MANY_CLIENTS', index=166, number=40016,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DATABASE_CONSTRAINT_VIOLATED', index=167, number=40017,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ASYNC_WORKER_MULTI_ERRORS', index=168, number=40020,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RPC_REQUEST_QUEUE_FULL', index=169, number=40030,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RPC_SERVER_UNAVAILABLE', index=170, number=40031,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RPC_REQUEST_TIMEOUT', index=171, number=40032,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RPC_MAX_MESSAGE_SIZE_EXCEEDED', index=172, number=40033,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RPC_CANCELED', index=173, number=40035,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RPC_UNKNOWN_METHOD', index=174, number=40036,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='REQUEST_CANCELED_BY_USER', index=175, number=40037,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CLUSTER_INTERNAL_FAILURE', index=176, number=43040,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EXTERNAL_CONNECTION_ERROR', index=177, number=40034,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='QUEUE_CONN_ERROR', index=178, number=41000,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='QUEUE_CLOSE_REQUEST_TIMEOUT', index=179, number=41002,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='QUEUE_CONN_CLOSED', index=180, number=41003,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='QUEUE_PUBLISH_ACK_TIMEOUT', index=181, number=41004,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='QUEUE_PUBLISH_ERROR', index=182, number=41005,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='QUEUE_SUBSCRIPTION_TIMEOUT', index=183, number=41006,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='QUEUE_SUBSCRIPTION_ERROR', index=184, number=41007,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='QUEUE_MARSHALLING_FAILED', index=185, number=41008,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='QUEUE_UNMARSHALLING_FAILED', index=186, number=41009,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='QUEUE_MAX_MSG_REDELIVERY_EXCEEDED', index=187, number=41010,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='QUEUE_ACK_FAILURE', index=188, number=41011,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SQS_OVERLIMIT', index=189, number=41100,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SQS_INVALID_RECEIPT_HANDLE', index=190, number=41101,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SQS_UNKNOWN', index=191, number=41102,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SEARCH_INTERNAL_FAILURE', index=192, number=43001,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SEARCH_PROJECTION_FAILURE', index=193, number=43002,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SEARCH_PREDICTION_FAILURE', index=194, number=43003,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SEARCH_BY_NOT_FULLY_INDEXED_INPUT', index=195, number=43004,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SAVED_SEARCH_MODIFY_FAILED', index=196, number=43005,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EVALUATION_QUEUED', index=197, number=43100,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EVALUATION_IN_PROGRESS', index=198, number=43101,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EVALUATION_SUCCESS', index=199, number=43102,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EVALUATION_FAILED_TO_RETRIEVE_DATA', index=200, number=43103,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EVALUATION_INVALID_ARGUMENT', index=201, number=43104,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EVALUATION_FAILED', index=202, number=43105,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EVALUATION_PENDING', index=203, number=43106,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EVALUATION_TIMED_OUT', index=204, number=43107,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EVALUATION_UNEXPECTED_ERROR', index=205, number=43108,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EVALUATION_MIXED', index=206, number=43109,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STRIPE_EVENT_ERROR', index=207, number=44001,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CACHE_MISS', index=208, number=45001,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='REDIS_SCRIPT_EXITED_WITH_FAILURE', index=209, number=45002,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='REDIS_STREAM_ERR', index=210, number=45003,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='REDIS_NO_CONSUMERS', index=211, number=45004,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='REDIS_STREAM_BACKOFF', index=212, number=45005,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SIGNUP_EVENT_ERROR', index=213, number=46001,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SIGNUP_FLAGGED', index=214, number=46002,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FILETYPE_UNSUPPORTED', index=215, number=46003,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='APP_COUNT_INVALID_MESSAGE', index=216, number=47001,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='APP_COUNT_UPDATE_INCREMENT_FAILED', index=217, number=47002,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='APP_COUNT_REBUILD_FAILED', index=218, number=47003,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='APP_COUNT_INTERNAL_FAILURE', index=219, number=47004,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MP_DOWNLOAD_ERROR', index=220, number=47101,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MP_RESOLVE_DNS_ERROR', index=221, number=47102,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MP_DOWNLOAD_MAX_SIZE_EXCEEDED_ERROR', index=222, number=47103,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MP_IMAGE_DECODE_ERROR', index=223, number=47104,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MP_INVALID_ARGUMENT', index=224, number=47105,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MP_IMAGE_PROCESSING_ERROR', index=225, number=47106,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DATATIER_CONN_ERROR', index=226, number=47201,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='USER_CONSENT_FACE', index=227, number=50001,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WORKER_MISSING', index=228, number=51000,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WORKER_ACTIVE', index=229, number=51001,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WORKER_INACTIVE', index=230, number=51002,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='COLLECTOR_MISSING', index=231, number=52000,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='COLLECTOR_ACTIVE', index=232, number=52001,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='COLLECTOR_INACTIVE', index=233, number=52002,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='COLLECTOR_POST_INPUT_FAILED', index=234, number=52003,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SSO_IDENTITY_PROVIDER_DOES_NOT_EXIST', index=235, number=53001,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TASK_IN_PROGRESS', index=236, number=54001,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TASK_DONE', index=237, number=54002,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TASK_WONT_DO', index=238, number=54003,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TASK_ADD_ANNOTATIONS_FAILURE', index=239, number=54005,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TASK_CONFLICT', index=240, number=54100,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TASK_NOT_IMPLEMENTED', index=241, number=54101,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TASK_MISSING', index=242, number=54102,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LABEL_ORDER_PENDING', index=243, number=55001,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LABEL_ORDER_IN_PROGRESS', index=244, number=55002,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LABEL_ORDER_SUCCESS', index=245, number=55003,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LABEL_ORDER_CANCELED', index=246, number=55004,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LICENSE_ACTIVE', index=247, number=60000,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LICENSE_DOES_NOT_EXIST', index=248, number=60001,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LICENSE_NEED_UPDATE', index=249, number=60002,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LICENSE_EXPIRED', index=250, number=60003,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LICENSE_REVOKED', index=251, number=60004,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LICENSE_DELETED', index=252, number=60005,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LICENSE_VOLUME_EXCEEDED', index=253, number=60006,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PASSWORD_VALIDATION_SUCCESS', index=254, number=61000,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PASSWORD_VALIDATION_FAILED', index=255, number=61001,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PASSWORDPOLICY_INVALID_ARGUMENT', index=256, number=61002,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FEATUREFLAG_CONFIG_NOT_FOUND', index=257, number=62000,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FEATUREFLAG_INVALID_ARGUMENT', index=258, number=62001,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FEATUREFLAG_BLOCKED', index=259, number=62002,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MAINTENANCE_SUCCESS', index=260, number=63000,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MAINTENANCE_FAILED', index=261, number=63001,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DATASET_VERSION_PENDING', index=262, number=64005,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DATASET_VERSION_IN_PROGRESS', index=263, number=64010,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DATASET_VERSION_READY', index=264, number=64015,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DATASET_VERSION_FAILURE', index=265, number=64020,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DATASET_VERSION_UNEXPECTED_ERROR', index=266, number=64025,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JOB_QUEUED', index=267, number=64000,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JOB_RUNNING', index=268, number=64001,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JOB_COMPLETED', index=269, number=64002,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JOB_FAILED', index=270, number=64003,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AUTH_MISSING_IDP_ASSOC', index=271, number=65000,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INTERNAL_SERVER_ISSUE', index=272, number=98004,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INTERNAL_FETCHING_ISSUE', index=273, number=98005,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INTERNAL_DATABASE_ISSUE', index=274, number=98006,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INTERNAL_UNEXPECTED_TIMEOUT', index=275, number=98009,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INTERNAL_UNEXPECTED_V1', index=276, number=98010,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INTERNAL_UNEXPECTED_PANIC', index=277, number=98011,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INTERNAL_UNEXPECTED_SPIRE', index=278, number=98012,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INTERNAL_REDIS_UNAVAILABLE', index=279, number=98013,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INTERNAL_RESOURCE_EXHAUSTED', index=280, number=98014,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INTERNAL_REDIS_UNCATEGORIZED', index=281, number=98015,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INTERNAL_AWS_UNCATEGORIZED', index=282, number=98016,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INTERNAL_AZURE_UNCATEGORIZED', index=283, number=98017,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CONN_UNCATEGORIZED', index=284, number=99001,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL_UNCATEGORIZED', index=285, number=99002,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INPUT_UNCATEGORIZED', index=286, number=99003,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ANNOTATION_UNCATEGORIZED', index=287, number=99004,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BILLING_UNCATEGORIZED', index=288, number=99005,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INTERNAL_UNCATEGORIZED', index=289, number=99009,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BAD_REQUEST', index=290, number=90400,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SERVER_ERROR', index=291, number=90500,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=69,
serialized_end=9129,
)
_sym_db.RegisterEnumDescriptor(_STATUSCODE)
StatusCode = enum_type_wrapper.EnumTypeWrapper(_STATUSCODE)
ZERO = 0
SUCCESS = 10000
MIXED_STATUS = 10010
FAILURE = 10020
TRY_AGAIN = 10030
NOT_IMPLEMENTED = 10040
CONN_ACCOUNT_ISSUES = 11000
CONN_TOKEN_INVALID = 11001
CONN_CREDENTIALS_INVALID = 11002
CONN_EXCEED_HOURLY_LIMIT = 11003
CONN_EXCEED_MONTHLY_LIMIT = 11004
CONN_THROTTLED = 11005
CONN_EXCEEDS_LIMITS = 11006
CONN_INSUFFICIENT_SCOPES = 11007
CONN_KEY_INVALID = 11008
CONN_KEY_NOT_FOUND = 11009
CONN_BAD_REQUEST_FORMAT = 11100
CONN_DOES_NOT_EXIST = 11101
CONN_INVALID_REQUEST = 11102
CONN_METHOD_NOT_ALLOWED = 11103
CONN_NO_GDPR_CONSENT = 11104
CONN_AUTH_METHOD_DISABLED = 11200
MODEL_TRAINED = 21100
MODEL_TRAINING = 21101
MODEL_UNTRAINED = 21102
MODEL_QUEUED_FOR_TRAINING = 21103
MODEL_UPLOADING = 21104
MODEL_UPLOADING_FAILED = 21105
MODEL_TRAINING_NO_DATA = 21110
MODEL_TRAINING_NO_POSITIVES = 21111
MODEL_TRAINING_ONE_VS_N_SINGLE_CLASS = 21112
MODEL_TRAINING_TIMED_OUT = 21113
MODEL_TRAINING_WAITING_ERROR = 21114
MODEL_TRAINING_UNKNOWN_ERROR = 21115
MODEL_TRAINING_MSG_REDELIVER = 21116
MODEL_TRAINING_INSUFFICIENT_DATA = 21117
MODEL_TRAINING_INVALID_PARAMS = 21118
MODEL_TRAINING_INVALID_DATA_TOLERANCE_EXCEEDED = 21119
MODEL_MODIFY_SUCCESS = 21150
MODEL_MODIFY_PENDING = 21151
MODEL_MODIFY_FAILED = 21152
MODEL_DOES_NOT_EXIST = 21200
MODEL_PERMISSION_DENIED = 21201
MODEL_INVALID_ARGUMENT = 21202
MODEL_INVALID_REQUEST = 21203
MODEL_EVALUATED = 21300
MODEL_EVALUATING = 21301
MODEL_NOT_EVALUATED = 21302
MODEL_QUEUED_FOR_EVALUATION = 21303
MODEL_EVALUATION_TIMED_OUT = 21310
MODEL_EVALUATION_WAITING_ERROR = 21311
MODEL_EVALUATION_UNKNOWN_ERROR = 21312
MODEL_PREDICTION_FAILED = 21313
MODEL_EVALUATION_MSG_REDELIVER = 21314
MODEL_EVALUATION_NEED_LABELS = 21315
MODEL_EVALUATION_NEED_INPUTS = 21316
MODEL_DEPLOYMENT_FAILED = 21350
MODEL_DEPLOYING = 21351
MODEL_QUEUED_FOR_DEPLOYMENT = 21352
MODEL_NOT_DEPLOYED = 21353
MODEL_REFERENCE_INVALID_ARGUMENT = 21400
MODEL_EXAMPLE_INPUT_INVALID_ARGUMENT = 21420
WORKFLOW_NO_MATCHING_INPUT = 22001
WORKFLOW_REQUIRE_TRAINED_MODEL = 22002
WORKFLOW_DUPLICATE = 22100
WORKFLOW_UNSUPPORTED_FORMAT = 22101
WORKFLOW_DOES_NOT_EXIST = 22102
WORKFLOW_PERMISSION_DENIED = 22103
WORKFLOW_INVALID_ARGUMENT = 22104
WORKFLOW_INVALID_RECIPE = 22105
WORKFLOW_INVALID_TEMPLATE = 22106
WORKFLOW_INVALID_GRAPH = 22107
WORKFLOW_INTERNAL_FAILURE = 22108
WORKFLOW_INVALID_REQUEST = 22999
WORKFLOW_MODIFY_SUCCESS = 22150
WORKFLOW_MODIFY_PENDING = 22151
WORKFLOW_MODIFY_FAILED = 22152
WORKFLOW_REINDEX_FAILED = 22153
CONCEPT_MODIFY_SUCCESS = 23150
CONCEPT_MODIFY_PENDING = 23151
CONCEPT_MODIFY_FAILED = 23152
ANNOTATION_SUCCESS = 24150
ANNOTATION_PENDING = 24151
ANNOTATION_FAILED = 24152
ANNOTATION_UNKNOWN_STATUS = 24154
ANNOTATION_INVALID_ARGUMENT = 24155
ANNOTATION_PERMISSION_DENIED = 24156
ANNOTATION_AWAITING_REVIEW = 24157
ANNOTATION_AWAITING_CONSENSUS_REVIEW = 24159
ANNOTATION_REVIEW_DENIED = 24158
ANNOTATION_MODIFY_SUCCESS = 24250
ANNOTATION_MODIFY_PENDING = 24251
ANNOTATION_MODIFY_FAILED = 24252
METADATA_INVALID_PATCH_ARGUMENTS = 24900
METADATA_PARSING_ISSUE = 24901
METADATA_MANIPULATION_ISSUE = 24902
TRAINER_JOB_STATE_NONE = 25000
TRAINER_JOB_STATE_QUEUED = 25001
TRAINER_JOB_STATE_RUNNING = 25002
TRAINER_JOB_STATE_COMPLETE = 25003
TRAINER_JOB_STATE_ERROR = 25004
DATA_DUMP_SUCCESS = 25150
DATA_DUMP_PENDING = 25151
DATA_DUMP_FAILED = 25152
DATA_DUMP_IN_PROGRESS = 25153
DATA_DUMP_NO_DATA = 25154
APP_DUPLICATION_SUCCESS = 25200
APP_DUPLICATION_FAILED = 25201
APP_DUPLICATION_PENDING = 25202
APP_DUPLICATION_IN_PROGRESS = 25203
APP_DUPLICATION_INVALID_REQUEST = 25204
INPUT_DOWNLOAD_SUCCESS = 30000
INPUT_DOWNLOAD_PENDING = 30001
INPUT_DOWNLOAD_FAILED = 30002
INPUT_DOWNLOAD_IN_PROGRESS = 30003
INPUT_STATUS_UPDATE_FAILED = 30004
INPUT_DELETE_FAILED = 30005
INPUT_DUPLICATE = 30100
INPUT_UNSUPPORTED_FORMAT = 30101
INPUT_DOES_NOT_EXIST = 30102
INPUT_PERMISSION_DENIED = 30103
INPUT_INVALID_ARGUMENT = 30104
INPUT_OVER_LIMIT = 30105
INPUT_INVALID_URL = 30106
INPUT_MODIFY_SUCCESS = 30200
INPUT_MODIFY_PENDING = 30201
INPUT_MODIFY_FAILED = 30203
INPUT_STORAGE_HOST_FAILED = 30210
ALL_INPUT_INVALID_BYTES = 30300
INPUT_CLUSTER_SUCCESS = 30400
INPUT_CLUSTER_PENDING = 30401
INPUT_CLUSTER_FAILED = 30402
INPUT_CLUSTER_IN_PROGRESS = 30403
INPUT_REINDEX_SUCCESS = 30500
INPUT_REINDEX_PENDING = 30501
INPUT_REINDEX_FAILED = 30502
INPUT_REINDEX_IN_PROGRESS = 30503
INPUT_VIDEO_DOWNLOAD_SUCCESS = 31000
INPUT_VIDEO_DOWNLOAD_PENDING = 31001
INPUT_VIDEO_DOWNLOAD_FAILED = 31002
INPUT_VIDEO_DUPLICATE = 31100
INPUT_VIDEO_UNSUPPORTED_FORMAT = 31101
INPUT_VIDEO_DOES_NOT_EXIST = 31102
INPUT_VIDEO_PERMISSION_DENIED = 31103
INPUT_VIDEO_INVALID_ARGUMENT = 31104
INPUT_VIDEO_OVER_LIMIT = 31105
INPUT_VIDEO_INVALID_URL = 31106
INPUT_VIDEO_MODIFY_SUCCESS = 31200
INPUT_VIDEO_MODIFY_PENDING = 31201
INPUT_VIDEO_MODIFY_FAILED = 31203
INPUT_VIDEO_STORAGE_HOST_FAILED = 31210
ALL_INPUT_VIDEOS_INVALID_BYTES = 31300
INPUT_CONNECTION_FAILED = 39996
REQUEST_DISABLED_FOR_MAINTENANCE = 39997
INPUT_WRITES_DISABLED_FOR_MAINTENANCE = 39998
INPUT_INVALID_REQUEST = 39999
PREDICT_INVALID_REQUEST = 40001
SEARCH_INVALID_REQUEST = 40002
CONCEPTS_INVALID_REQUEST = 40003
STATS_INVALID_REQUEST = 40004
DATABASE_DUPLICATE_KEY = 40010
DATABASE_STATEMENT_TIMEOUT = 40011
DATABASE_INVALID_ROWS_AFFECTED = 40012
DATABASE_DEADLOCK_DETECTED = 40013
DATABASE_FAIL_TASK = 40014
DATABASE_FAIL_TO_GET_CONNECTIONS = 40015
DATABASE_TOO_MANY_CLIENTS = 40016
DATABASE_CONSTRAINT_VIOLATED = 40017
ASYNC_WORKER_MULTI_ERRORS = 40020
RPC_REQUEST_QUEUE_FULL = 40030
RPC_SERVER_UNAVAILABLE = 40031
RPC_REQUEST_TIMEOUT = 40032
RPC_MAX_MESSAGE_SIZE_EXCEEDED = 40033
RPC_CANCELED = 40035
RPC_UNKNOWN_METHOD = 40036
REQUEST_CANCELED_BY_USER = 40037
CLUSTER_INTERNAL_FAILURE = 43040
EXTERNAL_CONNECTION_ERROR = 40034
QUEUE_CONN_ERROR = 41000
QUEUE_CLOSE_REQUEST_TIMEOUT = 41002
QUEUE_CONN_CLOSED = 41003
QUEUE_PUBLISH_ACK_TIMEOUT = 41004
QUEUE_PUBLISH_ERROR = 41005
QUEUE_SUBSCRIPTION_TIMEOUT = 41006
QUEUE_SUBSCRIPTION_ERROR = 41007
QUEUE_MARSHALLING_FAILED = 41008
QUEUE_UNMARSHALLING_FAILED = 41009
QUEUE_MAX_MSG_REDELIVERY_EXCEEDED = 41010
QUEUE_ACK_FAILURE = 41011
SQS_OVERLIMIT = 41100
SQS_INVALID_RECEIPT_HANDLE = 41101
SQS_UNKNOWN = 41102
SEARCH_INTERNAL_FAILURE = 43001
SEARCH_PROJECTION_FAILURE = 43002
SEARCH_PREDICTION_FAILURE = 43003
SEARCH_BY_NOT_FULLY_INDEXED_INPUT = 43004
SAVED_SEARCH_MODIFY_FAILED = 43005
EVALUATION_QUEUED = 43100
EVALUATION_IN_PROGRESS = 43101
EVALUATION_SUCCESS = 43102
EVALUATION_FAILED_TO_RETRIEVE_DATA = 43103
EVALUATION_INVALID_ARGUMENT = 43104
EVALUATION_FAILED = 43105
EVALUATION_PENDING = 43106
EVALUATION_TIMED_OUT = 43107
EVALUATION_UNEXPECTED_ERROR = 43108
EVALUATION_MIXED = 43109
STRIPE_EVENT_ERROR = 44001
CACHE_MISS = 45001
REDIS_SCRIPT_EXITED_WITH_FAILURE = 45002
REDIS_STREAM_ERR = 45003
REDIS_NO_CONSUMERS = 45004
REDIS_STREAM_BACKOFF = 45005
SIGNUP_EVENT_ERROR = 46001
SIGNUP_FLAGGED = 46002
FILETYPE_UNSUPPORTED = 46003
APP_COUNT_INVALID_MESSAGE = 47001
APP_COUNT_UPDATE_INCREMENT_FAILED = 47002
APP_COUNT_REBUILD_FAILED = 47003
APP_COUNT_INTERNAL_FAILURE = 47004
MP_DOWNLOAD_ERROR = 47101
MP_RESOLVE_DNS_ERROR = 47102
MP_DOWNLOAD_MAX_SIZE_EXCEEDED_ERROR = 47103
MP_IMAGE_DECODE_ERROR = 47104
MP_INVALID_ARGUMENT = 47105
MP_IMAGE_PROCESSING_ERROR = 47106
DATATIER_CONN_ERROR = 47201
USER_CONSENT_FACE = 50001
WORKER_MISSING = 51000
WORKER_ACTIVE = 51001
WORKER_INACTIVE = 51002
COLLECTOR_MISSING = 52000
COLLECTOR_ACTIVE = 52001
COLLECTOR_INACTIVE = 52002
COLLECTOR_POST_INPUT_FAILED = 52003
SSO_IDENTITY_PROVIDER_DOES_NOT_EXIST = 53001
TASK_IN_PROGRESS = 54001
TASK_DONE = 54002
TASK_WONT_DO = 54003
TASK_ADD_ANNOTATIONS_FAILURE = 54005
TASK_CONFLICT = 54100
TASK_NOT_IMPLEMENTED = 54101
TASK_MISSING = 54102
LABEL_ORDER_PENDING = 55001
LABEL_ORDER_IN_PROGRESS = 55002
LABEL_ORDER_SUCCESS = 55003
LABEL_ORDER_CANCELED = 55004
LICENSE_ACTIVE = 60000
LICENSE_DOES_NOT_EXIST = 60001
LICENSE_NEED_UPDATE = 60002
LICENSE_EXPIRED = 60003
LICENSE_REVOKED = 60004
LICENSE_DELETED = 60005
LICENSE_VOLUME_EXCEEDED = 60006
PASSWORD_VALIDATION_SUCCESS = 61000
PASSWORD_VALIDATION_FAILED = 61001
PASSWORDPOLICY_INVALID_ARGUMENT = 61002
FEATUREFLAG_CONFIG_NOT_FOUND = 62000
FEATUREFLAG_INVALID_ARGUMENT = 62001
FEATUREFLAG_BLOCKED = 62002
MAINTENANCE_SUCCESS = 63000
MAINTENANCE_FAILED = 63001
DATASET_VERSION_PENDING = 64005
DATASET_VERSION_IN_PROGRESS = 64010
DATASET_VERSION_READY = 64015
DATASET_VERSION_FAILURE = 64020
DATASET_VERSION_UNEXPECTED_ERROR = 64025
JOB_QUEUED = 64000
JOB_RUNNING = 64001
JOB_COMPLETED = 64002
JOB_FAILED = 64003
AUTH_MISSING_IDP_ASSOC = 65000
INTERNAL_SERVER_ISSUE = 98004
INTERNAL_FETCHING_ISSUE = 98005
INTERNAL_DATABASE_ISSUE = 98006
INTERNAL_UNEXPECTED_TIMEOUT = 98009
INTERNAL_UNEXPECTED_V1 = 98010
INTERNAL_UNEXPECTED_PANIC = 98011
INTERNAL_UNEXPECTED_SPIRE = 98012
INTERNAL_REDIS_UNAVAILABLE = 98013
INTERNAL_RESOURCE_EXHAUSTED = 98014
INTERNAL_REDIS_UNCATEGORIZED = 98015
INTERNAL_AWS_UNCATEGORIZED = 98016
INTERNAL_AZURE_UNCATEGORIZED = 98017
CONN_UNCATEGORIZED = 99001
MODEL_UNCATEGORIZED = 99002
INPUT_UNCATEGORIZED = 99003
ANNOTATION_UNCATEGORIZED = 99004
BILLING_UNCATEGORIZED = 99005
INTERNAL_UNCATEGORIZED = 99009
BAD_REQUEST = 90400
SERVER_ERROR = 90500
DESCRIPTOR.enum_types_by_name['StatusCode'] = _STATUSCODE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DESCRIPTOR._options = None
_STATUSCODE.values_by_name["CONN_TOKEN_INVALID"]._options = None
_STATUSCODE.values_by_name["MODEL_TRAINING_MSG_REDELIVER"]._options = None
_STATUSCODE.values_by_name["MODEL_EVALUATION_MSG_REDELIVER"]._options = None
# @@protoc_insertion_point(module_scope)
| 46.508934
| 16,348
| 0.761395
|
56aba217a3bafda6c785589e697410e21f1d8096
| 35,395
|
py
|
Python
|
electrum/util.py
|
exofoundation/EXOS-Electrum
|
89e00bc4a1c5f5cb48f9aa5ef77dd1a9bcad9da5
|
[
"MIT"
] | 5
|
2019-05-15T16:11:21.000Z
|
2021-02-20T14:12:20.000Z
|
electrum/util.py
|
exofoundation/EXOS-Electrum
|
89e00bc4a1c5f5cb48f9aa5ef77dd1a9bcad9da5
|
[
"MIT"
] | 38
|
2019-04-29T21:15:22.000Z
|
2021-12-04T18:36:28.000Z
|
electrum/util.py
|
exofoundation/EXOS-Electrum
|
89e00bc4a1c5f5cb48f9aa5ef77dd1a9bcad9da5
|
[
"MIT"
] | 5
|
2019-04-25T17:35:49.000Z
|
2021-08-12T19:50:41.000Z
|
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import binascii
import os, sys, re, json
from collections import defaultdict, OrderedDict
from typing import NamedTuple, Union, TYPE_CHECKING, Tuple, Optional, Callable, Any
from datetime import datetime
import decimal
from decimal import Decimal
import traceback
import urllib
import threading
import hmac
import stat
from locale import localeconv
import asyncio
import urllib.request, urllib.parse, urllib.error
import builtins
import json
import time
from typing import NamedTuple, Optional
import ssl
import platform
import aiohttp
from aiohttp_socks import SocksConnector, SocksVer
from aiorpcx import TaskGroup
import certifi
from .i18n import _
from .logging import get_logger, Logger
if TYPE_CHECKING:
from .network import Network
from .interface import Interface
from .simple_config import SimpleConfig
_logger = get_logger(__name__)
def inv_dict(d):
return {v: k for k, v in d.items()}
ca_path = certifi.where()
base_units = {'EXOS':8, 'mEXOS':5, 'uEXOS':2, 'exo':0}
base_units_inverse = inv_dict(base_units)
base_units_list = ['EXOS', 'mEXOS', 'uEXOS', 'exo'] # list(dict) does not guarantee order
DECIMAL_POINT_DEFAULT = 8 # EXOS
class UnknownBaseUnit(Exception): pass
def decimal_point_to_base_unit_name(dp: int) -> str:
# e.g. 8 -> "EXOS"
try:
return base_units_inverse[dp]
except KeyError:
raise UnknownBaseUnit(dp) from None
def base_unit_name_to_decimal_point(unit_name: str) -> int:
# e.g. "EXOS" -> 8
try:
return base_units[unit_name]
except KeyError:
raise UnknownBaseUnit(unit_name) from None
class NotEnoughFunds(Exception):
def __str__(self):
return _("Insufficient funds")
class NoDynamicFeeEstimates(Exception):
def __str__(self):
return _('Dynamic fee estimates not available')
class InvalidPassword(Exception):
def __str__(self):
return _("Incorrect password")
class FileImportFailed(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
return _("Failed to import from file.") + "\n" + self.message
class FileExportFailed(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
return _("Failed to export to file.") + "\n" + self.message
class WalletFileException(Exception): pass
class BitcoinException(Exception): pass
class UserFacingException(Exception):
"""Exception that contains information intended to be shown to the user."""
# Throw this exception to unwind the stack like when an error occurs.
# However unlike other exceptions the user won't be informed.
class UserCancelled(Exception):
'''An exception that is suppressed from the user'''
pass
# note: this is not a NamedTuple as then its json encoding cannot be customized
class Satoshis(object):
__slots__ = ('value',)
def __new__(cls, value):
self = super(Satoshis, cls).__new__(cls)
self.value = value
return self
def __repr__(self):
return 'Satoshis(%d)'%self.value
def __str__(self):
return format_satoshis(self.value) + " EXOS"
def __eq__(self, other):
return self.value == other.value
def __ne__(self, other):
return not (self == other)
# note: this is not a NamedTuple as then its json encoding cannot be customized
class Fiat(object):
__slots__ = ('value', 'ccy')
def __new__(cls, value: Optional[Decimal], ccy: str):
self = super(Fiat, cls).__new__(cls)
self.ccy = ccy
if not isinstance(value, (Decimal, type(None))):
raise TypeError(f"value should be Decimal or None, not {type(value)}")
self.value = value
return self
def __repr__(self):
return 'Fiat(%s)'% self.__str__()
def __str__(self):
if self.value is None or self.value.is_nan():
return _('No Data')
else:
return "{:.2f}".format(self.value)
def to_ui_string(self):
if self.value is None or self.value.is_nan():
return _('No Data')
else:
return "{:.2f}".format(self.value) + ' ' + self.ccy
def __eq__(self, other):
if self.ccy != other.ccy:
return False
if isinstance(self.value, Decimal) and isinstance(other.value, Decimal) \
and self.value.is_nan() and other.value.is_nan():
return True
return self.value == other.value
def __ne__(self, other):
return not (self == other)
class MyEncoder(json.JSONEncoder):
def default(self, obj):
# note: this does not get called for namedtuples :( https://bugs.python.org/issue30343
from .transaction import Transaction
if isinstance(obj, Transaction):
return obj.as_dict()
if isinstance(obj, Satoshis):
return str(obj)
if isinstance(obj, Fiat):
return str(obj)
if isinstance(obj, Decimal):
return str(obj)
if isinstance(obj, datetime):
return obj.isoformat(' ')[:-3]
if isinstance(obj, set):
return list(obj)
return super().default(obj)
class ThreadJob(Logger):
"""A job that is run periodically from a thread's main loop. run() is
called from that thread's context.
"""
def __init__(self):
Logger.__init__(self)
def run(self):
"""Called periodically from the thread"""
pass
class DebugMem(ThreadJob):
'''A handy class for debugging GC memory leaks'''
def __init__(self, classes, interval=30):
ThreadJob.__init__(self)
self.next_time = 0
self.classes = classes
self.interval = interval
def mem_stats(self):
import gc
self.logger.info("Start memscan")
gc.collect()
objmap = defaultdict(list)
for obj in gc.get_objects():
for class_ in self.classes:
if isinstance(obj, class_):
objmap[class_].append(obj)
for class_, objs in objmap.items():
self.logger.info(f"{class_.__name__}: {len(objs)}")
self.logger.info("Finish memscan")
def run(self):
if time.time() > self.next_time:
self.mem_stats()
self.next_time = time.time() + self.interval
class DaemonThread(threading.Thread, Logger):
""" daemon thread that terminates cleanly """
LOGGING_SHORTCUT = 'd'
def __init__(self):
threading.Thread.__init__(self)
Logger.__init__(self)
self.parent_thread = threading.currentThread()
self.running = False
self.running_lock = threading.Lock()
self.job_lock = threading.Lock()
self.jobs = []
def add_jobs(self, jobs):
with self.job_lock:
self.jobs.extend(jobs)
def run_jobs(self):
# Don't let a throwing job disrupt the thread, future runs of
# itself, or other jobs. This is useful protection against
# malformed or malicious server responses
with self.job_lock:
for job in self.jobs:
try:
job.run()
except Exception as e:
self.logger.exception('')
def remove_jobs(self, jobs):
with self.job_lock:
for job in jobs:
self.jobs.remove(job)
def start(self):
with self.running_lock:
self.running = True
return threading.Thread.start(self)
def is_running(self):
with self.running_lock:
return self.running and self.parent_thread.is_alive()
def stop(self):
with self.running_lock:
self.running = False
def on_stop(self):
if 'ANDROID_DATA' in os.environ:
import jnius
jnius.detach()
self.logger.info("jnius detach")
self.logger.info("stopped")
def print_stderr(*args):
args = [str(item) for item in args]
sys.stderr.write(" ".join(args) + "\n")
sys.stderr.flush()
def print_msg(*args):
# Stringify args
args = [str(item) for item in args]
sys.stdout.write(" ".join(args) + "\n")
sys.stdout.flush()
def json_encode(obj):
try:
s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder)
except TypeError:
s = repr(obj)
return s
def json_decode(x):
try:
return json.loads(x, parse_float=Decimal)
except:
return x
# taken from Django Source Code
def constant_time_compare(val1, val2):
"""Return True if the two strings are equal, False otherwise."""
return hmac.compare_digest(to_bytes(val1, 'utf8'), to_bytes(val2, 'utf8'))
# decorator that prints execution time
_profiler_logger = _logger.getChild('profiler')
def profiler(func):
def do_profile(args, kw_args):
name = func.__qualname__
t0 = time.time()
o = func(*args, **kw_args)
t = time.time() - t0
_profiler_logger.debug(f"{name} {t:,.4f}")
return o
return lambda *args, **kw_args: do_profile(args, kw_args)
def android_data_dir():
import jnius
PythonActivity = jnius.autoclass('org.kivy.android.PythonActivity')
return PythonActivity.mActivity.getFilesDir().getPath() + '/data'
def ensure_sparse_file(filename):
# On modern Linux, no need to do anything.
# On Windows, need to explicitly mark file.
if os.name == "nt":
try:
os.system('fsutil sparse setflag "{}" 1'.format(filename))
except Exception as e:
_logger.info(f'error marking file {filename} as sparse: {e}')
def get_headers_dir(config):
return config.path
def assert_datadir_available(config_path):
path = config_path
if os.path.exists(path):
return
else:
raise FileNotFoundError(
'EXOS-Electrum datadir does not exist. Was it deleted while running?' + '\n' +
'Should be at {}'.format(path))
def assert_file_in_datadir_available(path, config_path):
if os.path.exists(path):
return
else:
assert_datadir_available(config_path)
raise FileNotFoundError(
'Cannot find file but datadir is there.' + '\n' +
'Should be at {}'.format(path))
def standardize_path(path):
return os.path.normcase(os.path.realpath(os.path.abspath(path)))
def get_new_wallet_name(wallet_folder: str) -> str:
i = 1
while True:
filename = "wallet_%d" % i
if filename in os.listdir(wallet_folder):
i += 1
else:
break
return filename
def assert_bytes(*args):
"""
porting helper, assert args type
"""
try:
for x in args:
assert isinstance(x, (bytes, bytearray))
except:
print('assert bytes failed', list(map(type, args)))
raise
def assert_str(*args):
"""
porting helper, assert args type
"""
for x in args:
assert isinstance(x, str)
def to_string(x, enc) -> str:
if isinstance(x, (bytes, bytearray)):
return x.decode(enc)
if isinstance(x, str):
return x
else:
raise TypeError("Not a string or bytes like object")
def to_bytes(something, encoding='utf8') -> bytes:
"""
cast string to bytes() like object, but for python2 support it's bytearray copy
"""
if isinstance(something, bytes):
return something
if isinstance(something, str):
return something.encode(encoding)
elif isinstance(something, bytearray):
return bytes(something)
else:
raise TypeError("Not a string or bytes like object")
bfh = bytes.fromhex
def bh2u(x: bytes) -> str:
"""
str with hex representation of a bytes-like object
>>> x = bytes((1, 2, 10))
>>> bh2u(x)
'01020A'
"""
return x.hex()
def user_dir():
if 'ANDROID_DATA' in os.environ:
return android_data_dir()
elif os.name == 'posix':
return os.path.join(os.environ["HOME"], ".exos-electrum")
elif "APPDATA" in os.environ:
return os.path.join(os.environ["APPDATA"], "EXOS-Electrum")
elif "LOCALAPPDATA" in os.environ:
return os.path.join(os.environ["LOCALAPPDATA"], "EXOS-Electrum")
else:
#raise Exception("No home directory found in environment variables.")
return
def resource_path(*parts):
return os.path.join(pkg_dir, *parts)
# absolute path to python package folder of electrum ("lib")
pkg_dir = os.path.split(os.path.realpath(__file__))[0]
def is_valid_email(s):
regexp = r"[^@]+@[^@]+\.[^@]+"
return re.match(regexp, s) is not None
def is_hash256_str(text: Any) -> bool:
if not isinstance(text, str): return False
if len(text) != 64: return False
return is_hex_str(text)
def is_hex_str(text: Any) -> bool:
if not isinstance(text, str): return False
try:
bytes.fromhex(text)
except:
return False
return True
def is_non_negative_integer(val) -> bool:
try:
val = int(val)
if val >= 0:
return True
except:
pass
return False
def chunks(items, size: int):
"""Break up items, an iterable, into chunks of length size."""
if size < 1:
raise ValueError(f"size must be positive, not {repr(size)}")
for i in range(0, len(items), size):
yield items[i: i + size]
def format_satoshis_plain(x, decimal_point = 8):
"""Display a satoshi amount scaled. Always uses a '.' as a decimal
point and has no thousands separator"""
scale_factor = pow(10, decimal_point)
return "{:.8f}".format(Decimal(x) / scale_factor).rstrip('0').rstrip('.')
DECIMAL_POINT = localeconv()['decimal_point']
def format_satoshis(x, num_zeros=0, decimal_point=8, precision=None, is_diff=False, whitespaces=False):
if x is None:
return 'unknown'
if precision is None:
precision = decimal_point
# format string
decimal_format = "." + str(precision) if precision > 0 else ""
if is_diff:
decimal_format = '+' + decimal_format
# initial result
scale_factor = pow(10, decimal_point)
if not isinstance(x, Decimal):
x = Decimal(x).quantize(Decimal('1E-8'))
result = ("{:" + decimal_format + "f}").format(x / scale_factor)
if "." not in result: result += "."
result = result.rstrip('0')
# extra decimal places
integer_part, fract_part = result.split(".")
if len(fract_part) < num_zeros:
fract_part += "0" * (num_zeros - len(fract_part))
result = integer_part + DECIMAL_POINT + fract_part
# leading/trailing whitespaces
if whitespaces:
result += " " * (decimal_point - len(fract_part))
result = " " * (15 - len(result)) + result
return result
FEERATE_PRECISION = 1 # num fractional decimal places for exo/byte fee rates
_feerate_quanta = Decimal(10) ** (-FEERATE_PRECISION)
def format_fee_satoshis(fee, *, num_zeros=0, precision=None):
if precision is None:
precision = FEERATE_PRECISION
num_zeros = min(num_zeros, FEERATE_PRECISION) # no more zeroes than available prec
return format_satoshis(fee, num_zeros=num_zeros, decimal_point=0, precision=precision)
def quantize_feerate(fee):
"""Strip exo/byte fee rate of excess precision."""
if fee is None:
return None
return Decimal(fee).quantize(_feerate_quanta, rounding=decimal.ROUND_HALF_DOWN)
def timestamp_to_datetime(timestamp):
if timestamp is None:
return None
return datetime.fromtimestamp(timestamp)
def format_time(timestamp):
date = timestamp_to_datetime(timestamp)
return date.isoformat(' ')[:-3] if date else _("Unknown")
# Takes a timestamp and returns a string with the approximation of the age
def age(from_date, since_date = None, target_tz=None, include_seconds=False):
if from_date is None:
return "Unknown"
from_date = datetime.fromtimestamp(from_date)
if since_date is None:
since_date = datetime.now(target_tz)
td = time_difference(from_date - since_date, include_seconds)
return td + " ago" if from_date < since_date else "in " + td
def time_difference(distance_in_time, include_seconds):
#distance_in_time = since_date - from_date
distance_in_seconds = int(round(abs(distance_in_time.days * 86400 + distance_in_time.seconds)))
distance_in_minutes = int(round(distance_in_seconds/60))
if distance_in_minutes <= 1:
if include_seconds:
for remainder in [5, 10, 20]:
if distance_in_seconds < remainder:
return "less than %s seconds" % remainder
if distance_in_seconds < 40:
return "half a minute"
elif distance_in_seconds < 60:
return "less than a minute"
else:
return "1 minute"
else:
if distance_in_minutes == 0:
return "less than a minute"
else:
return "1 minute"
elif distance_in_minutes < 45:
return "%s minutes" % distance_in_minutes
elif distance_in_minutes < 90:
return "about 1 hour"
elif distance_in_minutes < 1440:
return "about %d hours" % (round(distance_in_minutes / 60.0))
elif distance_in_minutes < 2880:
return "1 day"
elif distance_in_minutes < 43220:
return "%d days" % (round(distance_in_minutes / 1440))
elif distance_in_minutes < 86400:
return "about 1 month"
elif distance_in_minutes < 525600:
return "%d months" % (round(distance_in_minutes / 43200))
elif distance_in_minutes < 1051200:
return "about 1 year"
else:
return "over %d years" % (round(distance_in_minutes / 525600))
mainnet_block_explorers = {
'BlockEXOS': ('https://blockexplorer.exos.to/#exos/',
{'tx': 'transactions/', 'addr': 'addresses/'}),
'cryptoID.info': ('https://chainz.cryptoid.info/exos/',
{'tx': 'tx.dws?', 'addr': 'address.dws?'}),
'system default': ('blockchain:/',
{'tx': 'tx/', 'addr': 'address/'}),
}
testnet_block_explorers = {
'BlockEXOS': ('https://blockexplorer.exos.to/#/texos/',
{'tx': 'transactions/', 'addr': 'addresses/'}),
'system default': ('blockchain://0000059bb2c2048493efcb0f1a034972b3ce4089d54c93b69aaab212fb369887/',
{'tx': 'tx/', 'addr': 'address/'}),
}
def block_explorer_info():
from . import constants
return mainnet_block_explorers if not constants.net.TESTNET else testnet_block_explorers
def block_explorer(config: 'SimpleConfig') -> str:
from . import constants
default_ = 'BlockEXOS'
be_key = config.get('block_explorer', default_)
be = block_explorer_info().get(be_key)
return be_key if be is not None else default_
def block_explorer_tuple(config: 'SimpleConfig') -> Optional[Tuple[str, dict]]:
return block_explorer_info().get(block_explorer(config))
def block_explorer_URL(config: 'SimpleConfig', kind: str, item: str) -> Optional[str]:
be_tuple = block_explorer_tuple(config)
if not be_tuple:
return
explorer_url, explorer_dict = be_tuple
kind_str = explorer_dict.get(kind)
if kind_str is None:
return
url_parts = [explorer_url, kind_str, item]
return ''.join(url_parts)
# URL decode
#_ud = re.compile('%([0-9a-hA-H]{2})', re.MULTILINE)
#urldecode = lambda x: _ud.sub(lambda m: chr(int(m.group(1), 16)), x)
class InvalidBitcoinURI(Exception): pass
def parse_URI(uri: str, on_pr: Callable = None, *, loop=None) -> dict:
"""Raises InvalidBitcoinURI on malformed URI."""
from . import bitcoin
from .bitcoin import COIN
if not isinstance(uri, str):
raise InvalidBitcoinURI(f"expected string, not {repr(uri)}")
if ':' not in uri:
if not bitcoin.is_address(uri):
raise InvalidBitcoinURI("Not an EXOS address")
return {'address': uri}
u = urllib.parse.urlparse(uri)
if u.scheme != 'exos':
raise InvalidBitcoinURI("Not an EXOS URI")
address = u.path
# python for android fails to parse query
if address.find('?') > 0:
address, query = u.path.split('?')
pq = urllib.parse.parse_qs(query)
else:
pq = urllib.parse.parse_qs(u.query)
for k, v in pq.items():
if len(v) != 1:
raise InvalidBitcoinURI(f'Duplicate Key: {repr(k)}')
out = {k: v[0] for k, v in pq.items()}
if address:
if not bitcoin.is_address(address):
raise InvalidBitcoinURI(f"Invalid EXOS address: {address}")
out['address'] = address
if 'amount' in out:
am = out['amount']
try:
m = re.match(r'([0-9.]+)X([0-9])', am)
if m:
k = int(m.group(2)) - 8
amount = Decimal(m.group(1)) * pow( Decimal(10) , k)
else:
amount = Decimal(am) * COIN
out['amount'] = int(amount)
except Exception as e:
raise InvalidBitcoinURI(f"failed to parse 'amount' field: {repr(e)}") from e
if 'message' in out:
out['message'] = out['message']
out['memo'] = out['message']
if 'time' in out:
try:
out['time'] = int(out['time'])
except Exception as e:
raise InvalidBitcoinURI(f"failed to parse 'time' field: {repr(e)}") from e
if 'exp' in out:
try:
out['exp'] = int(out['exp'])
except Exception as e:
raise InvalidBitcoinURI(f"failed to parse 'exp' field: {repr(e)}") from e
if 'sig' in out:
try:
out['sig'] = bh2u(bitcoin.base_decode(out['sig'], None, base=58))
except Exception as e:
raise InvalidBitcoinURI(f"failed to parse 'sig' field: {repr(e)}") from e
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if on_pr and (r or (name and sig)):
@log_exceptions
async def get_payment_request():
from . import paymentrequest as pr
if name and sig:
s = pr.serialize_request(out).SerializeToString()
request = pr.PaymentRequest(s)
else:
request = await pr.get_payment_request(r)
if on_pr:
on_pr(request)
loop = loop or asyncio.get_event_loop()
asyncio.run_coroutine_threadsafe(get_payment_request(), loop)
return out
def create_bip21_uri(addr, amount_sat: Optional[int], message: Optional[str],
*, extra_query_params: Optional[dict] = None) -> str:
from . import bitcoin
if not bitcoin.is_address(addr):
return ""
if extra_query_params is None:
extra_query_params = {}
query = []
if amount_sat:
query.append('amount=%s'%format_satoshis_plain(amount_sat))
if message:
query.append('message=%s'%urllib.parse.quote(message))
for k, v in extra_query_params.items():
if not isinstance(k, str) or k != urllib.parse.quote(k):
raise Exception(f"illegal key for URI: {repr(k)}")
v = urllib.parse.quote(v)
query.append(f"{k}={v}")
p = urllib.parse.ParseResult(scheme='exos', netloc='', path=addr, params='', query='&'.join(query), fragment='')
return str(urllib.parse.urlunparse(p))
# Python bug (http://bugs.python.org/issue1927) causes raw_input
# to be redirected improperly between stdin/stderr on Unix systems
#TODO: py3
def raw_input(prompt=None):
if prompt:
sys.stdout.write(prompt)
return builtin_raw_input()
builtin_raw_input = builtins.input
builtins.input = raw_input
def parse_json(message):
# TODO: check \r\n pattern
n = message.find(b'\n')
if n==-1:
return None, message
try:
j = json.loads(message[0:n].decode('utf8'))
except:
j = None
return j, message[n+1:]
def setup_thread_excepthook():
"""
Workaround for `sys.excepthook` thread bug from:
http://bugs.python.org/issue1230540
Call once from the main thread before creating any threads.
"""
init_original = threading.Thread.__init__
def init(self, *args, **kwargs):
init_original(self, *args, **kwargs)
run_original = self.run
def run_with_except_hook(*args2, **kwargs2):
try:
run_original(*args2, **kwargs2)
except Exception:
sys.excepthook(*sys.exc_info())
self.run = run_with_except_hook
threading.Thread.__init__ = init
def send_exception_to_crash_reporter(e: BaseException):
sys.excepthook(type(e), e, e.__traceback__)
def versiontuple(v):
return tuple(map(int, (v.split("."))))
def import_meta(path, validater, load_meta):
try:
with open(path, 'r', encoding='utf-8') as f:
d = validater(json.loads(f.read()))
load_meta(d)
#backwards compatibility for JSONDecodeError
except ValueError:
_logger.exception('')
raise FileImportFailed(_("Invalid JSON code."))
except BaseException as e:
_logger.exception('')
raise FileImportFailed(e)
def export_meta(meta, fileName):
try:
with open(fileName, 'w+', encoding='utf-8') as f:
json.dump(meta, f, indent=4, sort_keys=True)
except (IOError, os.error) as e:
_logger.exception('')
raise FileExportFailed(e)
def make_dir(path, allow_symlink=True):
"""Make directory if it does not yet exist."""
if not os.path.exists(path):
if not allow_symlink and os.path.islink(path):
raise Exception('Dangling link: ' + path)
os.mkdir(path)
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
def log_exceptions(func):
"""Decorator to log AND re-raise exceptions."""
assert asyncio.iscoroutinefunction(func), 'func needs to be a coroutine'
async def wrapper(*args, **kwargs):
self = args[0] if len(args) > 0 else None
try:
return await func(*args, **kwargs)
except asyncio.CancelledError as e:
raise
except BaseException as e:
mylogger = self.logger if hasattr(self, 'logger') else _logger
try:
mylogger.exception(f"Exception in {func.__name__}: {repr(e)}")
except BaseException as e2:
print(f"logging exception raised: {repr(e2)}... orig exc: {repr(e)} in {func.__name__}")
raise
return wrapper
def ignore_exceptions(func):
"""Decorator to silently swallow all exceptions."""
assert asyncio.iscoroutinefunction(func), 'func needs to be a coroutine'
async def wrapper(*args, **kwargs):
try:
return await func(*args, **kwargs)
except BaseException as e:
pass
return wrapper
class TxMinedInfo(NamedTuple):
height: int # height of block that mined tx
conf: Optional[int] = None # number of confirmations (None means unknown)
timestamp: Optional[int] = None # timestamp of block that mined tx
txpos: Optional[int] = None # position of tx in serialized block
header_hash: Optional[str] = None # hash of block that mined tx
def make_aiohttp_session(proxy: Optional[dict], headers=None, timeout=None):
if headers is None:
headers = {'User-Agent': 'Electrum'}
if timeout is None:
timeout = aiohttp.ClientTimeout(total=30)
elif isinstance(timeout, (int, float)):
timeout = aiohttp.ClientTimeout(total=timeout)
alt_context = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=ca_path)
ssl_context = SSLContextSafe.get_context(alt_context=alt_context)
if proxy:
connector = SocksConnector(
socks_ver=SocksVer.SOCKS5 if proxy['mode'] == 'socks5' else SocksVer.SOCKS4,
host=proxy['host'],
port=int(proxy['port']),
username=proxy.get('user', None),
password=proxy.get('password', None),
rdns=True,
ssl=ssl_context,
)
else:
connector = aiohttp.TCPConnector(ssl=ssl_context)
return aiohttp.ClientSession(headers=headers, timeout=timeout, connector=connector)
class SilentTaskGroup(TaskGroup):
def spawn(self, *args, **kwargs):
# don't complain if group is already closed.
if self._closed:
raise asyncio.CancelledError()
return super().spawn(*args, **kwargs)
class NetworkJobOnDefaultServer(Logger):
"""An abstract base class for a job that runs on the main network
interface. Every time the main interface changes, the job is
restarted, and some of its internals are reset.
"""
def __init__(self, network: 'Network'):
Logger.__init__(self)
asyncio.set_event_loop(network.asyncio_loop)
self.network = network
self.interface = None # type: Interface
self._restart_lock = asyncio.Lock()
self._reset()
asyncio.run_coroutine_threadsafe(self._restart(), network.asyncio_loop)
network.register_callback(self._restart, ['default_server_changed'])
def _reset(self):
"""Initialise fields. Called every time the underlying
server connection changes.
"""
self.group = SilentTaskGroup()
async def _start(self, interface: 'Interface'):
self.interface = interface
await interface.group.spawn(self._start_tasks)
async def _start_tasks(self):
"""Start tasks in self.group. Called every time the underlying
server connection changes.
"""
raise NotImplementedError() # implemented by subclasses
async def stop(self):
self.network.unregister_callback(self._restart)
await self._stop()
async def _stop(self):
await self.group.cancel_remaining()
@log_exceptions
async def _restart(self, *args):
interface = self.network.interface
if interface is None:
return # we should get called again soon
async with self._restart_lock:
await self._stop()
self._reset()
await self._start(interface)
@property
def session(self):
s = self.interface.session
assert s is not None
return s
def create_and_start_event_loop() -> Tuple[asyncio.AbstractEventLoop,
asyncio.Future,
threading.Thread]:
def on_exception(loop, context):
"""Suppress spurious messages it appears we cannot control."""
SUPPRESS_MESSAGE_REGEX = re.compile('SSL handshake|Fatal read error on|'
'SSL error in data received')
message = context.get('message')
if message and SUPPRESS_MESSAGE_REGEX.match(message):
return
loop.default_exception_handler(context)
loop = asyncio.get_event_loop()
loop.set_exception_handler(on_exception)
# loop.set_debug(1)
stopping_fut = asyncio.Future()
loop_thread = threading.Thread(target=loop.run_until_complete,
args=(stopping_fut,),
name='EventLoop')
loop_thread.start()
return loop, stopping_fut, loop_thread
class OrderedDictWithIndex(OrderedDict):
"""An OrderedDict that keeps track of the positions of keys.
Note: very inefficient to modify contents, except to add new items.
"""
def __init__(self):
super().__init__()
self._key_to_pos = {}
self._pos_to_key = {}
def _recalc_index(self):
self._key_to_pos = {key: pos for (pos, key) in enumerate(self.keys())}
self._pos_to_key = {pos: key for (pos, key) in enumerate(self.keys())}
def pos_from_key(self, key):
return self._key_to_pos[key]
def value_from_pos(self, pos):
key = self._pos_to_key[pos]
return self[key]
def popitem(self, *args, **kwargs):
ret = super().popitem(*args, **kwargs)
self._recalc_index()
return ret
def move_to_end(self, *args, **kwargs):
ret = super().move_to_end(*args, **kwargs)
self._recalc_index()
return ret
def clear(self):
ret = super().clear()
self._recalc_index()
return ret
def pop(self, *args, **kwargs):
ret = super().pop(*args, **kwargs)
self._recalc_index()
return ret
def update(self, *args, **kwargs):
ret = super().update(*args, **kwargs)
self._recalc_index()
return ret
def __delitem__(self, *args, **kwargs):
ret = super().__delitem__(*args, **kwargs)
self._recalc_index()
return ret
def __setitem__(self, key, *args, **kwargs):
is_new_key = key not in self
ret = super().__setitem__(key, *args, **kwargs)
if is_new_key:
pos = len(self) - 1
self._key_to_pos[key] = pos
self._pos_to_key[pos] = key
return ret
def multisig_type(wallet_type):
'''If wallet_type is mofn multi-sig, return [m, n],
otherwise return None.'''
if not wallet_type:
return None
match = re.match(r'(\d+)of(\d+)', wallet_type)
if match:
match = [int(x) for x in match.group(1, 2)]
return match
class SSLContextSafe:
@classmethod
def get_context(self, alt_context: ssl.SSLContext=None) -> ssl.SSLContext:
""" Returns a known path for cert trust store on platforms with
known issues validating certificate chains, or other
"""
context = alt_context
if sys.platform == 'darwin':
context = ssl.SSLContext()
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
v, _, _ = platform.mac_ver()
v = float('.'.join(v.split('.')[:2]))
if v >= 10.12:
if os.path.exists('/private/etc/ssl/cert.pem'):
context.load_verify_locations(cafile='/private/etc/ssl/cert.pem')
else:
context.load_verify_locations(cafile=certifi.where())
return context
| 31.130167
| 116
| 0.627942
|
2ebbd40b43347112ed0f71e67a530f2e3667aaaa
| 5,375
|
py
|
Python
|
meet_connect.py
|
kanishk-mahor/meetConnect
|
952d6cc1ab4478a7afa8b4c6e4383b2608d6f918
|
[
"MIT"
] | null | null | null |
meet_connect.py
|
kanishk-mahor/meetConnect
|
952d6cc1ab4478a7afa8b4c6e4383b2608d6f918
|
[
"MIT"
] | null | null | null |
meet_connect.py
|
kanishk-mahor/meetConnect
|
952d6cc1ab4478a7afa8b4c6e4383b2608d6f918
|
[
"MIT"
] | null | null | null |
# ======================================================================================================================
# Copyright (C) 2021 Kanishk Mahor - All rights reserved
# ========================================================================================
# Notice: All Rights Reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# ======================================================================================================================
import datetime
import warnings
import time
import re
import sched
import win32com.client
# --------------------
# pip install selenium
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
# ---------------------
# pip install pywinauto
from pywinauto.keyboard import send_keys
from pywinauto.timings import wait_until_passes
from pywinauto.application import Application
# ------------------------------------------------------------------------------------------------
# Chrome webbrowser driver path :you can download from https://chromedriver.chromium.org/downloads
PATH = "C:/Program Files/chromedriver.exe"
# --------------------------
# creating schedule instance
schedule = sched.scheduler(time.time, time.sleep)
# ----------------------------------
# Outlook calender API to get events
def get_calender():
outlook = win32com.client.Dispatch(
'Outlook.Application').GetNamespace('MAPI')
calender = outlook.getDefaultFolder(9).Items
# Including recurring events
calender.Sort('[Start]')
# ----------today date-----------
today = datetime.datetime.today()
begin = today.date().strftime("%d/%m/%Y")
# -------tomorrow date from today----------
tomorrow = datetime.timedelta(days=1)+today
end = tomorrow.date().strftime("%d/%m/%Y")
# -------------restrict calender events to today only ---------------
restriction = "[Start] >= '" + begin + "' AND [END] <= '" + end + "'"
calender = calender.Restrict(restriction)
events = {'Start': [], 'Subject': [], 'Body': []}
for a in calender:
events['Start'].append((a.start).strftime("%H:%M"))
events['Subject'].append(a.Subject)
events['Body'].append(a.body)
return events
# ----------------------------
# join metting at metting time
def join(calender_return, current_time):
# ----List if all todays meeting-----
meet = list(calender_return['Start'])
# ----index of current meeting----
to_join = meet.index(current_time)
# -extracting body content of current meeting-
link1 = list(calender_return['Body'])[to_join]
# -------------------------Parsing url from body-----------------------
link_to_go = re.search("(?P<url>https?://[^\\s]+)", link1).group("url")
link_to_go = link_to_go[:-1]
# wait for one minute before joing meeting
time.sleep(60)
# -Handelling the handshake errors-
options = webdriver.ChromeOptions()
options.add_argument('--ignore-certificate-errors')
options.add_argument('--ignore-ssl-errors')
options.add_experimental_option('excludeSwitches', ['enable-logging'])
# ---------creating webdriver instance----------
driver = webdriver.Chrome(PATH, options=options)
# opening link in webbrowser
driver.get(link_to_go)
app_chrome = Application().connect(
title_re='.*Start Your Meeting.*')
app_chrome_window = app_chrome.window(title_re='.*Start Your Meeting.*')
if app_chrome_window.exists():
app_chrome_window.set_focus()
# wait till the link get loaded
WebDriverWait(driver, 60)
# Open Meeting in Window app
send_keys("{LEFT}")
send_keys("{ENTER}")
# -----------------------------------------------------------------------------------------------------------
# Workaround is needed to open meeting in browser if app is not installed or dont want to open in window app
# -----------------------------------------------------------------------------------------------------------
# -----------handelling warnings if any -------------
warnings.simplefilter('ignore', category=UserWarning)
# --------Connect to cisco webex meetng app----------
try:
app = wait_until_passes(10, 1, lambda: Application().connect(
title_re=".*Meetings"))
app_window = app.window(title_re=".*Meetings")
# Close chrome tab and connect to meeting once app is connected
if app_window.exists():
app_window.set_focus()
time.sleep(7)
send_keys("{ENTER}")
send_keys("{ENTER}")
driver.close()
except Exception as e:
print(
f"{type(e).__name__} at line {e.__traceback__.tb_lineno} of {__file__}: {e}")
# Scheduling outlook calender events for 15 minutes
schedule.enter(900, 1, get_calender, ())
while(1):
schedule.run()
cal = get_calender()
meet = list(cal['Start'])
nowTime = datetime.datetime.now().strftime("%H:%M")
if nowTime in meet:
try:
join(cal, nowTime)
cal = {}
except Exception as e:
print(
f"{type(e).__name__} at line {e.__traceback__.tb_lineno} of {__file__}: {e}")
| 35.361842
| 120
| 0.546233
|
dee5d07e33ad1532159bba22f133ba65e292ade6
| 46,052
|
py
|
Python
|
3DLSCPTR/db/apollosim.py
|
liuruijin17/3DLSCPTR
|
271ec46187f1674049c88457dbdd38fc299c723a
|
[
"BSD-3-Clause"
] | 13
|
2021-04-12T11:13:18.000Z
|
2022-03-30T01:53:08.000Z
|
3DLSCPTR/db/apollosim.py
|
liuruijin17/3DLSCP
|
271ec46187f1674049c88457dbdd38fc299c723a
|
[
"BSD-3-Clause"
] | 1
|
2021-12-10T08:49:49.000Z
|
2021-12-11T09:04:37.000Z
|
3DLSCPTR/db/apollosim.py
|
liuruijin17/3DLSCP
|
271ec46187f1674049c88457dbdd38fc299c723a
|
[
"BSD-3-Clause"
] | 4
|
2021-04-14T14:31:47.000Z
|
2022-02-16T05:30:09.000Z
|
import sys
import json
import os
import numpy as np
import pickle
import cv2
from PIL import Image, ImageOps
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('TkAgg')
from mpl_toolkits.mplot3d import Axes3D
from tabulate import tabulate
from torchvision.transforms import ToTensor
import torchvision.transforms.functional as F
from copy import deepcopy
from scipy.interpolate import interp1d
import imgaug.augmenters as iaa
from imgaug.augmenters import Resize
from imgaug.augmentables.lines import LineString, LineStringsOnImage
from db.detection import DETECTION
from config import system_configs
from db.tools import eval_3D_lane
RED = (0, 0, 255)
GREEN = (0, 255, 0)
BLUE = (255, 0, 0)
DARK_GREEN = (115, 181, 34)
YELLOW = (0, 255, 255)
ORANGE = (0, 165, 255)
PURPLE = (255, 0, 255)
PINK = (180, 105, 255)
CYAN = (255, 128, 0)
CHOCOLATE = (30, 105, 210)
PEACHPUFF = (185, 218, 255)
STATEGRAY = (255, 226, 198)
GT_COLOR = [PINK, CYAN, ORANGE, YELLOW, BLUE]
PRED_COLOR = [RED, GREEN, DARK_GREEN, PURPLE, CHOCOLATE, PEACHPUFF, STATEGRAY]
PRED_HIT_COLOR = GREEN
PRED_MISS_COLOR = RED
IMAGENET_MEAN = np.array([0.485, 0.456, 0.406])
IMAGENET_STD = np.array([0.229, 0.224, 0.225])
class APOLLOSIM(DETECTION):
def __init__(self, db_config, split, is_eval=False, is_resample=True, is_predcam=False):
super(APOLLOSIM, self).__init__(db_config)
data_dir = system_configs.data_dir
# result_dir = system_configs.result_dir
cache_dir = system_configs.cache_dir
max_lanes = system_configs.max_lanes
self.metric = 'default'
self.is_resample = is_resample
print('is_resample: {}'.format(is_resample))
inp_h, inp_w = db_config['input_size']
# define image pre-processor
# self.totensor = transforms.ToTensor()
# self.normalize = transforms.Normalize(args.vgg_mean, args.vgg_std)
# self.data_aug = data_aug # False
# dataset parameters
# dataset_name = 'standard' # illus_chg/rare_subset/standard
self.dataset_name = system_configs.dataset_name # illus_chg
self.no_3d = False
self.no_centerline = True
self.h_org = 1080
self.w_org = 1920
self.org_h = 1080
self.org_w = 1920
self.h_crop = 0
self.crop_y = 0
# parameters related to service network
self.h_net = inp_h
self.w_net = inp_w
self.resize_h = inp_h
self.resize_w = inp_w
self.ipm_h = 208
self.ipm_w = 128
self.top_view_region = np.array([[-10, 103], [10, 103], [-10, 3], [10, 3]])
self.K = np.array([[2015., 0., 960.], [0., 2015., 540.], [0., 0., 1.]])
self.H_crop_ipm = self.homography_crop_resize([self.h_org, self.w_org], self.h_crop, [self.h_net, self.w_net])
self.H_crop_im = self.homography_crop_resize([self.h_org, self.w_org], self.h_crop, [self.h_org, self.w_org])
# org2resized+cropped
self.H_ipm2g = cv2.getPerspectiveTransform(
np.float32([[0, 0], [self.ipm_w - 1, 0], [0, self.ipm_h - 1], [self.ipm_w - 1, self.ipm_h - 1]]),
np.float32(self.top_view_region))
self.fix_cam = False
x_min = self.top_view_region[0, 0] # -10
x_max = self.top_view_region[1, 0] # 10
self.x_min = x_min # -10
self.x_max = x_max # 10
self.anchor_y_steps = [ 5, 10, 15, 20, 30, 40, 50, 60, 80, 100]
self.y_min = self.top_view_region[2, 1]
self.y_max = self.top_view_region[0, 1]
if self.is_resample:
self.gflatYnorm = self.anchor_y_steps[-1]
self.gflatZnorm = 10
self.gflatXnorm = 30
else:
self.gflatYnorm = 200
self.gflatZnorm = 1
self.gflatXnorm = 20
self.pitch = 3 # pitch angle of camera to ground in centi degree
self.cam_height = 1.55 # height of camera in meters
self.batch_size = system_configs.batch_size
if self.no_centerline: # False
self.num_types = 1
else:
self.num_types = 3
if self.is_resample:
self.sample_hz = 1
else:
self.sample_hz = 4
self._split = split
self._dataset = {
"train": ['train'],
"test": ['test'],
"sub_train": ['sub_train'],
"validation": ['validation']
}[self._split]
self.root = os.path.join(data_dir, 'Apollo_Sim_3D_Lane_Release')
data_dir = os.path.join(self.root, 'data_splits', self.dataset_name)
if self.root is None:
raise Exception('Please specify the root directory')
self.img_w, self.img_h = self.h_org, self.w_org # apollo sim original image resolution
self.max_2dlanes = 0
self.max_gflatlanes = 0
self.max_3dlanes = 0
self.max_2dpoints = 0
self.max_gflatpoints = 0
self.max_3dpoints = 0
self.X3d, self.Y3d, self.Z3d = [0, 0], [0, 0], [0, 0]
self.Xgflat, self.Ygflat = [0, 0], [0, 0]
self.normalize = True
self.to_tensor = ToTensor()
self.aug_chance = 0.9090909090909091
self._image_file = []
self.augmentations = [{'name': 'Affine', 'parameters': {'rotate': (-10, 10)}},
{'name': 'HorizontalFlip', 'parameters': {'p': 0.5}},
{'name': 'CropToFixedSize', 'parameters': {'height': 972, 'width': 1728}}]
# Force max_lanes, used when evaluating testing with models trained on other datasets
# if max_lanes is not None:
# self.max_lanes = max_lanes
self.anno_files = [os.path.join(data_dir, path + '.json') for path in self._dataset]
self._data = "apollosim"
self._mean = np.array([0.40789654, 0.44719302, 0.47026115], dtype=np.float32)
self._std = np.array([0.28863828, 0.27408164, 0.27809835], dtype=np.float32)
self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571], dtype=np.float32)
self._eig_vec = np.array([
[-0.58752847, -0.69563484, 0.41340352],
[-0.5832747, 0.00994535, -0.81221408],
[-0.56089297, 0.71832671, 0.41158938]
], dtype=np.float32)
self._cat_ids = [
0
] # 0 car
self._classes = {
ind + 1: cat_id for ind, cat_id in enumerate(self._cat_ids)
}
self._coco_to_class_map = {
value: key for key, value in self._classes.items()
}
self._cache_file = os.path.join(cache_dir, "apollosim_{}.pkl".format(self._dataset))
if self.augmentations is not None:
augmentations = [getattr(iaa, aug['name'])(**aug['parameters'])
for aug in self.augmentations] # add augmentation
transformations = iaa.Sequential([Resize({'height': inp_h, 'width': inp_w})])
self.transform = iaa.Sequential([iaa.Sometimes(then_list=augmentations, p=self.aug_chance), transformations])
if is_eval:
self._load_eval_data()
else:
self._load_data()
self._db_inds = np.arange(len(self._image_ids))
def _load_data(self, debug_lane=False):
print("loading from cache file: {}".format(self._cache_file))
if not os.path.exists(self._cache_file):
print("No cache file found...")
self._extract_data()
self._transform_annotations()
if debug_lane:
pass
else:
with open(self._cache_file, "wb") as f:
pickle.dump([self._annotations,
self._image_ids,
self._image_file,
self.max_2dlanes, self.max_3dlanes, self.max_gflatlanes,
self.max_2dpoints, self.max_3dpoints, self.max_gflatpoints,
self.X3d, self.Y3d, self.Z3d,
self.Xgflat, self.Ygflat], f)
else:
with open(self._cache_file, "rb") as f:
(self._annotations,
self._image_ids,
self._image_file,
self.max_2dlanes, self.max_3dlanes, self.max_gflatlanes,
self.max_2dpoints, self.max_3dpoints, self.max_gflatpoints,
self.X3d, self.Y3d, self.Z3d,
self.Xgflat, self.Ygflat) = pickle.load(f)
assert self.max_2dlanes == self.max_3dlanes
assert self.max_3dlanes == self.max_gflatlanes
assert self.max_2dpoints == self.max_3dpoints
assert self.max_3dpoints == self.max_gflatpoints
print('{}.max_2dlanes: {}\n'
'{}.max_3dlanes: {}\n'
'{}.max_gflatlanes: {}\n'
'{}.max_2dpoints: {}\n'
'{}.max_3dpoints: {}\n'
'{}.max_gflatpoints: {}\n'
'{}.X3d: {}\n'
'{}.Y3d: {}\n'
'{}.Z3d: {}\n'
'{}.Xgflat: {}\n'
'{}.Ygflat: {}'.format(self.dataset_name, self.max_2dlanes,
self.dataset_name, self.max_3dlanes,
self.dataset_name, self.max_gflatlanes,
self.dataset_name, self.max_2dpoints,
self.dataset_name, self.max_3dpoints,
self.dataset_name, self.max_gflatpoints,
self.dataset_name, self.X3d,
self.dataset_name, self.Y3d,
self.dataset_name, self.Z3d,
self.dataset_name, self.Xgflat,
self.dataset_name, self.Ygflat))
def _extract_data(self):
image_id = 0
max_2dlanes, max_3dlanes, max_gflatlanes = 0, 0, 0
self._old_annotations = {}
for anno_file in self.anno_files:
with open(anno_file, 'r') as anno_obj:
for line in anno_obj:
info_dict = json.loads(line)
# dict_keys(['raw_file', 'cam_height', 'cam_pitch',
# 'centerLines', 'laneLines', 'centerLines_visibility', 'laneLines_visibility'])
gt_lane_pts = info_dict['laneLines']
if len(gt_lane_pts) < 1:
continue
gt_lane_visibility = info_dict['laneLines_visibility']
image_path = os.path.join(self.root, info_dict['raw_file'])
assert os.path.exists(image_path), '{:s} not exist'.format(image_path)
# if not self.fix_cam:
gt_cam_height = info_dict['cam_height']
gt_cam_pitch = info_dict['cam_pitch']
P_g2im = self.projection_g2im(gt_cam_pitch, gt_cam_height, self.K) # used for x=PX (3D to 2D)
H_g2im = self.homograpthy_g2im(gt_cam_pitch, gt_cam_height, self.K)
H_im2g = np.linalg.inv(H_g2im)
P_g2gflat = np.matmul(H_im2g, P_g2im)
aug_mat = np.identity(3, dtype=np.float)
gt_lanes = []
# org_gt_lanes = []
for i, lane in enumerate(gt_lane_pts):
# A GT lane can be either 2D or 3D
# if a GT lane is 3D, the height is intact from 3D GT, so keep it intact here too
closest_point = lane[0]
remotest_point = lane[-1]
sampled_points = lane[1:-1:self.sample_hz]
sampled_points.insert(0, closest_point)
sampled_points.append(remotest_point)
lane = np.array(sampled_points)
# lane = np.array(lane[::self.sample_hz])
closest_viz = gt_lane_visibility[i][0]
remotest_viz = gt_lane_visibility[i][-1]
sampled_viz = gt_lane_visibility[i][1:-1:self.sample_hz]
sampled_viz.insert(0, closest_viz)
sampled_viz.append(remotest_viz)
lane_visibility = np.array(sampled_viz)
# lane_visibility = np.array(gt_lane_visibility[i][::self.sample_hz])
# prune gt lanes by visibility labels
pruned_lane = self.prune_3d_lane_by_visibility(lane, lane_visibility)
# prune out-of-range points are necessary before transformation -30~30
pruned_lane = self.prune_3d_lane_by_range(pruned_lane, 3*self.x_min, 3*self.x_max)
# Resample
if self.is_resample:
if pruned_lane.shape[0] < 2:
continue
# Above code resample 3D points
# print(pruned_lane.shape)
pruned_lane = self.make_lane_y_mono_inc(pruned_lane)
# print(pruned_lane.shape)
if pruned_lane.shape[0] < 2:
continue
x_values, z_values, visibility_vec = self.resample_laneline_in_y(pruned_lane,
self.anchor_y_steps,
out_vis=True)
x_values = x_values[visibility_vec]
z_values = z_values[visibility_vec]
y_values = np.array(self.anchor_y_steps)[visibility_vec]
pruned_lane = np.stack([x_values, y_values, z_values], axis=-1)
# print(pruned_lane.shape);exit()
if pruned_lane.shape[0] > 1:
gt_lanes.append(pruned_lane)
# save the gt 3d lanes
gt_3dlanes = deepcopy(gt_lanes)
# convert 3d lanes to flat ground space x_bar y_bar Z (meter i think)
self.convert_lanes_3d_to_gflat(gt_lanes, P_g2gflat)
gflatlanes = []
real_gt_3dlanes = []
for i in range(len(gt_lanes)):
gflatlane = gt_lanes[i]
gt_3dlane = gt_3dlanes[i]
valid_indices = np.logical_and(np.logical_and(gflatlane[:, 1] > 0, gflatlane[:, 1] < 200),
np.logical_and(gflatlane[:, 0] > 3 * self.x_min, gflatlane[:, 0] < 3 * self.x_max))
gflatlane = gflatlane[valid_indices, ...]
gt_3dlane = gt_3dlane[valid_indices, ...]
if gflatlane.shape[0] < 2 or np.sum(np.logical_and(gflatlane[:, 0] > self.x_min, gflatlane[:, 0] < self.x_max)) < 2:
continue
gflatlanes.append(gflatlane)
real_gt_3dlanes.append(gt_3dlane)
P_gt = np.matmul(self.H_crop_im, H_g2im)
P_gt = np.matmul(aug_mat, P_gt)
lanes = []
for i in range(len(gflatlanes)):
gflatlane = gflatlanes[i]
x_2d, y_2d = self.homographic_transformation(P_gt, gflatlane[:, 0], gflatlane[:, 1])
assert gflatlane.shape[0] == x_2d.shape[0]
assert x_2d.shape[0] == y_2d.shape[0]
# lanes.append([(x, y) for (x, y) in zip(x_2d, y_2d) if x >= 0])
lanes.append([(x, y) for (x, y) in zip(x_2d, y_2d)])
lanes = [lane for lane in lanes if len(lane) > 0]
if not len(lanes):
continue
self._image_file.append(image_path)
self._image_ids.append(image_id)
max_2dlanes = max(max_2dlanes, len(lanes))
self.max_2dlanes = max_2dlanes
max_gflatlanes = max(max_gflatlanes, len(gflatlanes))
self.max_gflatlanes = max_gflatlanes
max_3dlanes = max(max_3dlanes, len(real_gt_3dlanes))
self.max_3dlanes = max_3dlanes
self.max_2dpoints = max(self.max_2dpoints, max([len(l) for l in lanes]))
self.max_gflatpoints = max(self.max_gflatpoints, max([len(l) for l in gflatlanes]))
self.max_3dpoints = max(self.max_3dpoints, max([len(l) for l in real_gt_3dlanes]))
self.X3d[1] = max(self.X3d[1], max([np.max(l[:, 0]) for l in real_gt_3dlanes]))
self.X3d[0] = min(self.X3d[0], min([np.min(l[:, 0]) for l in real_gt_3dlanes]))
self.Y3d[1] = max(self.Y3d[1], max([np.max(l[:, 1]) for l in real_gt_3dlanes]))
self.Y3d[0] = min(self.Y3d[0], min([np.min(l[:, 1]) for l in real_gt_3dlanes]))
self.Z3d[1] = max(self.Z3d[1], max([np.max(l[:, 2]) for l in real_gt_3dlanes]))
self.Z3d[0] = min(self.Z3d[0], min([np.min(l[:, 2]) for l in real_gt_3dlanes]))
self.Xgflat[1] = max(self.Xgflat[1], max([np.max(l[:, 0]) for l in gflatlanes]))
self.Xgflat[0] = min(self.Xgflat[0], min([np.min(l[:, 0]) for l in gflatlanes]))
self.Ygflat[1] = max(self.Ygflat[1], max([np.max(l[:, 1]) for l in gflatlanes]))
self.Ygflat[0] = min(self.Ygflat[0], min([np.min(l[:, 1]) for l in gflatlanes]))
self._old_annotations[image_id] = {
'path': image_path,
'gt_2dlanes': lanes,
'gt_3dlanes': real_gt_3dlanes,
'gt_gflatlanes': gflatlanes,
'aug': False,
'relative_path': info_dict['raw_file'],
'gt_camera_pitch': gt_cam_pitch,
'gt_camera_height': gt_cam_height,
'json_line': info_dict,
}
image_id += 1
def _transform_annotation(self, anno, img_wh=None):
if img_wh is None:
img_h = self._get_img_heigth(anno['path'])
img_w = self._get_img_width(anno['path'])
else:
img_w, img_h = img_wh
gt_2dlanes = anno['gt_2dlanes']
gt_gflatlanes = anno['gt_gflatlanes']
gt_3dlanes = anno['gt_3dlanes']
assert len(gt_2dlanes) == len(gt_gflatlanes)
assert len(gt_3dlanes) == len(gt_gflatlanes)
categories = anno['categories'] if 'categories' in anno else [1] * len(gt_2dlanes)
gt_2dlanes = zip(gt_2dlanes, categories)
# 1+2+(2*self.max_2dpoints)+2+(2*self.max_2dpoints)+(3*self.max_2dpoints)
# c|2d_1|2d_2|u_2d|v_2d|3d_1|3d_2|3d_X|3d_Y|3d_Z|gflat_X|gflat_Y|gflat_Z
lanes = np.ones((self.max_2dlanes, 1+2+self.max_2dpoints*2), dtype=np.float32) * -1e5
lanes3d = np.ones((self.max_2dlanes, 2+self.max_2dpoints*3), dtype=np.float32) * -1e5
lanesgflat = np.ones((self.max_2dlanes, self.max_2dpoints*3), dtype=np.float32) * -1e5
lanes[:, 0] = 0
laneflags = np.ones((self.max_2dlanes, self.max_2dpoints), dtype=np.float32) * -1e-5
# old_lanes = sorted(old_lanes, key=lambda x: x[0][0][0])
for lane_pos, (lane, category) in enumerate(gt_2dlanes):
lower, upper = lane[0][1], lane[-1][1]
xs = np.array([p[0] for p in lane]) / img_w
ys = np.array([p[1] for p in lane]) / img_h
lanes[lane_pos, 0] = category
lanes[lane_pos, 1] = lower / img_h
lanes[lane_pos, 2] = upper / img_h
lanes[lane_pos, 1+2:1+2+len(xs)] = xs
lanes[lane_pos, (1+2+self.max_2dpoints):(1+2+self.max_2dpoints+len(ys))] = ys
laneflags[lane_pos, :len(xs)] = 1.
gt_3dlane = gt_3dlanes[lane_pos]
assert len(lane) == len(gt_3dlane)
lower, upper = gt_3dlane[0][1], gt_3dlane[-1][1]
Xs = np.array([p[0] for p in gt_3dlane]) / self.gflatXnorm
Ys = np.array([p[1] for p in gt_3dlane]) / self.gflatYnorm
Zs = np.array([p[2] for p in gt_3dlane]) / self.gflatZnorm
lanes3d[lane_pos, 0] = lower / self.gflatYnorm
lanes3d[lane_pos, 1] = upper / self.gflatYnorm
lanes3d[lane_pos, 2:(2+len(Xs))] = Xs
lanes3d[lane_pos, (2+self.max_3dpoints):(2+self.max_3dpoints+len(Ys))] = Ys
lanes3d[lane_pos, (2+self.max_3dpoints*2):(2+self.max_3dpoints*2+len(Zs))] = Zs
gflatlane = gt_gflatlanes[lane_pos]
assert len(lane) == len(gflatlane)
gflat_Xs = np.array([p[0] for p in gflatlane]) / self.gflatXnorm
gflat_Ys = np.array([p[1] for p in gflatlane]) / self.gflatYnorm
gflat_Zs = np.array([p[2] for p in gflatlane]) / self.gflatZnorm
lanesgflat[lane_pos, :len(gflat_Xs)] = gflat_Xs
lanesgflat[lane_pos, self.max_gflatpoints:(self.max_gflatpoints+len(gflat_Ys))] = gflat_Ys
lanesgflat[lane_pos, self.max_gflatpoints*2:(self.max_gflatpoints*2+len(gflat_Ys))] = gflat_Zs
lanes = np.concatenate([lanes, lanes3d, lanesgflat], axis=-1)
new_anno = {
'path': anno['path'],
'gt_2dgflatlabels': lanes,
'gt_2dgflatflags': laneflags,
'old_anno': anno,
'categories': [cat for _, cat in gt_2dlanes],
'gt_camera_pitch': anno['gt_camera_pitch'],
'gt_camera_height': anno['gt_camera_height'],
}
return new_anno
def _transform_annotations(self):
print('Now transforming annotations...')
self._annotations = {}
for image_id, old_anno in self._old_annotations.items():
self._annotations[image_id] = self._transform_annotation(old_anno)
def _load_eval_data(self):
self._extact_eval_data()
self._transform_eval_annotations()
def _extact_eval_data(self):
image_id = 0
self._old_annotations = {}
for anno_file in self.anno_files:
with open(anno_file, 'r') as anno_obj:
for line in anno_obj:
info_dict = json.loads(line)
# dict_keys(['raw_file', 'cam_height', 'cam_pitch',
# 'centerLines', 'laneLines', 'centerLines_visibility', 'laneLines_visibility'])
image_path = os.path.join(self.root, info_dict['raw_file'])
gt_cam_height = info_dict['cam_height']
gt_cam_pitch = info_dict['cam_pitch']
assert os.path.exists(image_path), '{:s} not exist'.format(image_path)
self._image_file.append(image_path)
self._image_ids.append(image_id)
self._old_annotations[image_id] = {
'path': image_path,
'aug': False,
'relative_path': info_dict['raw_file'],
'json_line': info_dict,
'gt_camera_pitch': gt_cam_pitch,
'gt_camera_height': gt_cam_height,
}
image_id += 1
def _transform_eval_annotation(self, anno):
new_anno = {
'path': anno['path'],
'old_anno': anno,
'gt_camera_pitch': anno['gt_camera_pitch'],
'gt_camera_height': anno['gt_camera_height'],
}
return new_anno
def _transform_eval_annotations(self):
print('Now transforming EVALEVALEVAL annotations...')
self._annotations = {}
for image_id, old_anno in self._old_annotations.items():
self._annotations[image_id] = self._transform_eval_annotation(old_anno)
def __getitem__(self, idx, transform=False):
# I think this part is only used when testing
item = self._annotations[idx]
img = cv2.imread(item['path'])
gt_2dflatlabels = item['gt_2dgflatlabels']
gt_2dgflatflags = item['gt_2dgflatflags']
gt_camera_pitch = item['gt_camera_pitch']
gt_camera_height = item['gt_camera_height']
if transform:
raise NotImplementedError
return (img, gt_2dflatlabels, gt_2dgflatflags, gt_camera_pitch, gt_camera_height, idx)
def pred2lanes(self, path, pred, y_samples, camera_height):
ys = np.array(y_samples) / self.gflatYnorm
lanes = []
probs = []
for lane in pred:
if lane[1] == 0:
continue
# pred_height = lane[-2]
# pred_height = lane[-2]
# pred_pitch = lane[-1]
lane_xsys = lane[6:6+4]
lane_zsys = lane[10:10+4]
X_pred = np.polyval(lane_xsys, ys) * self.gflatXnorm
Z_pred = np.polyval(lane_zsys, ys) * self.gflatZnorm
valid_indices = (ys > lane[4]) & (ys < lane[5])
if np.sum(valid_indices) < 2:
continue
X_pred = X_pred[valid_indices]
Y_pred = ys[valid_indices] * self.gflatYnorm
Z_pred = Z_pred[valid_indices]
# X_pred, Y_pred = self.transform_lane_gflat2g(camera_height, X_pred, Y_pred, Z_pred)
lanes.append(np.stack([X_pred, Y_pred, Z_pred], axis=-1).tolist())
probs.append(float(lane[0]))
return lanes, probs
def pred2apollosimformat(self, idx, pred, runtime):
runtime *= 1000. # s to ms
old_anno = self._annotations[idx]['old_anno']
# path = old_anno['path']
relative_path = old_anno['relative_path']
json_line = old_anno['json_line']
gt_camera_height = old_anno['gt_camera_height']
gt_camera_pitch = old_anno['gt_camera_pitch']
pred_cam_height = pred[0, -2]
pred_cam_pitch = pred[0, -1]
self.mae_height += np.abs(pred_cam_height - gt_camera_height)
self.mae_pitch += np.abs(pred_cam_pitch - gt_camera_pitch)
# print(gt_camera_height, gt_camera_pitch)
# print(pred[:, -2:])
# y_samples = self.anchor_y_steps
# y_samples = list((np.linspace(0, 1., num=100) * 200.))
y_samples = list((np.linspace(self.top_view_region[2, 1]/self.gflatYnorm, self.top_view_region[0, 1]/self.gflatYnorm, num=100) * self.gflatYnorm))
pred_lanes, prob_lanes = self.pred2lanes(relative_path, pred, y_samples, gt_camera_height)
json_line["laneLines"] = pred_lanes
json_line["laneLines_prob"] = prob_lanes
json_line["pred_cam_height"] = pred_cam_height
json_line["pred_cam_pitch"] = pred_cam_pitch
return json_line
def save_apollosim_predictions(self, predictions, runtimes, filename):
self.mae_height = 0
self.mae_pitch = 0
with open(filename, 'w') as jsonFile:
for idx in range(len(predictions)):
json_line = self.pred2apollosimformat(idx, predictions[idx], runtimes[idx])
json.dump(json_line, jsonFile)
jsonFile.write('\n')
print('Height(m):\t{}'.format(self.mae_height / len(predictions)))
print('Pitch(o):\t{}'.format(self.mae_pitch / len(predictions) * 180 / np.pi))
def eval(self, exp_dir, predictions, runtimes, label=None, only_metrics=False):
# raise NotImplementedError
pred_filename = 'apollosim_{}_{}_predictions_{}.json'.format(self.dataset_name, self.split, label)
pred_filename = os.path.join(exp_dir, pred_filename)
self.save_apollosim_predictions(predictions, runtimes, pred_filename)
if self.metric == 'default':
evaluator = eval_3D_lane.LaneEval(self)
eval_stats_pr = evaluator.bench_one_submit_varying_probs(pred_filename, self.anno_files[0])
max_f_prob = eval_stats_pr['max_F_prob_th']
eval_stats = evaluator.bench_one_submit(pred_filename, self.anno_files[0], prob_th=max_f_prob)
print("Metrics: AP, F-score, x error (close), x error (far), z error (close), z error (far)")
print("Laneline:{:.3}, {:.3}, {:.3}, {:.3}, {:.3}, {:.3}".format(
eval_stats_pr['laneline_AP'], eval_stats[0], eval_stats[3], eval_stats[4], eval_stats[5], eval_stats[6]))
result = {
'AP': eval_stats_pr['laneline_AP'],
'F-score': eval_stats[0],
'x error (close)': eval_stats[3],
'x error (far)': eval_stats[4],
'z error (close)': eval_stats[5],
'z error (far)': eval_stats[6]
}
# print("Centerline:{:.3}, {:.3}, {:.3}, {:.3}, {:.3}, {:.3}".format(
# eval_stats_pr['centerline_AP'], eval_stats[7], eval_stats[10], eval_stats[11], eval_stats[12], eval_stats[13]))
elif self.metric == 'ours':
raise NotImplementedError
if not only_metrics:
filename = 'apollosim_{}_{}_eval_result_{}.json'.format(self.dataset_name, self.split, label)
with open(os.path.join(exp_dir, filename), 'w') as out_file:
json.dump(result, out_file)
return eval_stats
def detections(self, ind):
image_id = self._image_ids[ind]
item = self._annotations[image_id]
return item
def __len__(self):
return len(self._annotations)
def _to_float(self, x):
return float("{:.2f}".format(x))
def class_name(self, cid):
cat_id = self._classes[cid]
return cat_id
def _get_img_heigth(self, path):
return 1080
def _get_img_width(self, path):
return 1920
def draw_annotation(self, idx, pred=None, img=None, cls_pred=None):
if img is None:
# raise NotImplementedError
img, gt_2dflatlabels, gt_2dgflatflags, gt_camera_pitch, gt_camera_height, _ = \
self.__getitem__(idx, transform=False)
# Tensor to opencv image
img = img.permute(1, 2, 0).numpy()
# Unnormalize
if self.normalize:
img = img * np.array(IMAGENET_STD) + np.array(IMAGENET_MEAN)
img = (img * 255).astype(np.uint8)
else:
img = (img - np.min(img)) / (np.max(img) - np.min(img))
_, gt_2dflatlabels, gt_2dgflatflags, gt_camera_pitch, gt_camera_height, _ = \
self.__getitem__(idx, transform=False)
img = (img * 255).astype(np.uint8)
img_h, img_w, _ = img.shape
img_canvas = deepcopy(img)
K = self.K
aug_mat = np.identity(3, dtype=np.float)
H_g2im = self.homograpthy_g2im(gt_camera_pitch, gt_camera_height, K)
H_im2ipm = np.linalg.inv(np.matmul(self.H_crop_ipm, np.matmul(H_g2im, self.H_ipm2g)))
H_im2ipm = np.matmul(H_im2ipm, np.linalg.inv(aug_mat))
P_g2im = self.projection_g2im(gt_camera_pitch, gt_camera_height, self.K) # used for x=PX (3D to 2D)
# H_g2im = self.homograpthy_g2im(gt_cam_pitch, gt_cam_height, self.K)
H_im2g = np.linalg.inv(H_g2im)
P_g2gflat = np.matmul(H_im2g, P_g2im)
ipm_canvas = deepcopy(img)
im_ipm = cv2.warpPerspective(ipm_canvas / 255., H_im2ipm, (self.ipm_w, self.ipm_h))
im_ipm = np.clip(im_ipm, 0, 1)
ipm_laneline = im_ipm.copy()
H_g2ipm = np.linalg.inv(self.H_ipm2g)
for i, lane in enumerate(gt_2dflatlabels):
# lane = lane[3:] # remove conf, upper and lower positions
seq_len = len(lane-5) // 8
xs = lane[3:3+seq_len][gt_2dgflatflags[i] > 0]
ys = lane[3+seq_len:3+seq_len*2][gt_2dgflatflags[i] > 0]
ys = ys[xs >= 0].astype(np.int)
xs = xs[xs >= 0].astype(np.int)
# for p in zip(xs, ys):
# p = (int(p[0] * img_w), int(p[1] * img_h))
# img_canvas = cv2.circle(img_canvas, p, 5, color=(0, 0, 255), thickness=-1)
for p in range(1, ys.shape[0]):
img_canvas = cv2.line(img_canvas, (xs[p - 1], ys[p - 1]), (xs[p], ys[p]), [0, 0, 1], 2)
gflatlane = lane[5+seq_len*5:]
gflatXs = gflatlane[:seq_len][gt_2dgflatflags[i] > 0] * self.gflatXnorm
gflatYs = gflatlane[seq_len:seq_len*2][gt_2dgflatflags[i] > 0] * self.gflatYnorm
x_ipm, y_ipm = self.homographic_transformation(H_g2ipm, gflatXs, gflatYs)
x_ipm = x_ipm.astype(np.int)
y_ipm = y_ipm.astype(np.int)
for k in range(1, x_ipm.shape[0]):
ipm_laneline = cv2.line(ipm_laneline, (x_ipm[k - 1], y_ipm[k - 1]), (x_ipm[k], y_ipm[k]), [0, 0, 1], 2)
# ipm_laneline = cv2.circle(ipm_laneline, (x_ipm[k], y_ipm[k]), 5, color=(255, 0, 0), thickness=-1)
ipm_laneline = (ipm_laneline * 255).astype(np.uint8)
# cv2.imshow('fff', ipm_laneline)
# cv2.waitKey(0)
# exit()
if pred is None:
print('Why')
return img_canvas, ipm_laneline
P_gt = np.matmul(self.H_crop_im, H_g2im)
P_gt = np.matmul(aug_mat, P_gt)
pred = pred[pred[:, 1].astype(int) == 1]
matches, accs, _ = self.get_metrics(pred, idx)
for i, lane in enumerate(pred):
lower, upper = lane[4], lane[5]
zlane = lane[10:14]
lane = lane[6:10] # remove upper, lower positions
ys = np.linspace(lower, upper, num=100)
xs = np.polyval(lane, ys)
zs = np.polyval(zlane, ys)
pred_ys = ys * self.gflatYnorm
pred_xs = xs * self.gflatXnorm
pred_zs = zs * self.gflatZnorm
pred_xs, pred_ys = self.projective_transformation(P_g2gflat, pred_xs, pred_ys,
pred_zs)
valid_indices = np.logical_and(np.logical_and(pred_ys > 0, pred_ys < 200),
np.logical_and(pred_xs > 3 * self.x_min, pred_xs < 3 * self.x_max))
pred_xs = pred_xs[valid_indices]
pred_ys = pred_ys[valid_indices]
if pred_xs.shape[0] < 2 or np.sum(np.logical_and(pred_xs > self.x_min, pred_xs < self.x_max)) < 2:
continue
pred_ipm_xs, pred_ipm_ys = self.homographic_transformation(H_g2ipm, pred_xs, pred_ys)
pred_ipm_xs = pred_ipm_xs.astype(np.int)
pred_ipm_ys = pred_ipm_ys.astype(np.int)
for k in range(1, pred_ipm_xs.shape[0]):
ipm_laneline = cv2.line(ipm_laneline, (pred_ipm_xs[k - 1], pred_ipm_ys[k - 1]),
(pred_ipm_xs[k], pred_ipm_ys[k]),
[255, 0, 0], 2)
pred_x2d, pred_y2d = self.homographic_transformation(P_gt, pred_xs, pred_ys)
pred_x2d = (pred_x2d * self.w_net / self.w_org).astype(np.int)
pred_y2d = (pred_y2d * self.h_net / self.h_org).astype(np.int)
for k in range(1, pred_x2d.shape[0]):
img_canvas = cv2.line(img_canvas, (pred_x2d[k - 1], pred_y2d[k - 1]), (pred_x2d[k], pred_y2d[k]),
[255, 0, 0], 2)
return img_canvas, ipm_laneline
def draw_3dannotation(self, idx, pred=None, img=None, cls_pred=None):
# _, _, draw_gt_xsys, draw_gt_zsys, draw_gt_flags, \
# draw_gt_camera_pitch, draw_gt_camera_height, draw_gtground_3dlanes, _ = self.__getitem__(idx)
_, gt_2dflatlabels, gt_2dgflatflags, gt_camera_pitch, gt_camera_height, _ = \
self.__getitem__(idx, transform=False)
img, ipm_img = img
fig = plt.figure()
ax1 = fig.add_subplot(231)
ax1.imshow(img)
ax2 = fig.add_subplot(232)
ax2.imshow(ipm_img)
ax = fig.add_subplot(233, projection='3d')
for i in range(gt_2dflatlabels.shape[0]):
lane = gt_2dflatlabels[i]
seq_len = len(lane-5) // 8
lane3D = lane[5+2*seq_len:5+5*seq_len]
Xs = lane3D[:seq_len][gt_2dgflatflags[i] > 0] * self.gflatXnorm
Ys = lane3D[seq_len:seq_len * 2][gt_2dgflatflags[i] > 0] * self.gflatYnorm
Zs = lane3D[seq_len * 2:seq_len * 3][gt_2dgflatflags[i] > 0] * self.gflatYnorm
ax.plot(Xs, Ys, Zs, color=[0, 0, 1])
if pred is None:
ax.set_xlabel('x axis')
ax.set_ylabel('y axis')
ax.set_zlabel('z axis')
bottom, top = ax.get_zlim()
ax.set_xlim(-20, 20)
ax.set_ylim(0, 100)
ax.set_zlim(min(bottom, -1), max(top, 1))
plt.show()
print('why')
return plt
pred = pred[pred[:, 1].astype(int) == 1]
matches, accs, _ = self.get_metrics(pred, idx)
for i, lane in enumerate(pred):
# lane = lane[1:] # remove conf
lower, upper = lane[4], lane[5]
zlane = lane[10:14]
lane = lane[6:10] # remove upper, lower positions
ys = np.linspace(lower, upper, num=100)
xs = np.polyval(lane, ys)
zs = np.polyval(zlane, ys)
pred_ys = ys * self.gflatYnorm
pred_xs = xs * self.gflatXnorm
pred_zs = zs * self.gflatZnorm
ax.plot(pred_xs, pred_ys, pred_zs, color=[1, 0, 0])
ax.set_xlabel('x axis')
ax.set_ylabel('y axis')
ax.set_zlabel('z axis')
bottom, top = ax.get_zlim()
ax.set_xlim(-20, 20)
ax.set_ylim(0, 100)
ax.set_zlim(min(bottom, -1), max(top, 1))
# ax.set_zlim(-0.1, 0.1)
# ax.set_zlim(bottom, top)
return plt
def get_metrics(self, lanes, idx):
# Placeholders
return [1] * len(lanes), [1] * len(lanes), None
def lane_to_linestrings(self, lanes):
lines = []
for lane in lanes:
lines.append(LineString(lane))
return lines
def linestrings_to_lanes(self, lines):
lanes = []
for line in lines:
lanes.append(line.coords)
return lanes
def homography_crop_resize(self, org_img_size, crop_y, resize_img_size):
"""
compute the homography matrix transform original image to cropped and resized image
:param org_img_size: [org_h, org_w]
:param crop_y:
:param resize_img_size: [resize_h, resize_w]
:return:
"""
# transform original image region to network input region
ratio_x = resize_img_size[1] / org_img_size[1]
ratio_y = resize_img_size[0] / (org_img_size[0] - crop_y)
H_c = np.array([[ratio_x, 0, 0],
[0, ratio_y, -ratio_y * crop_y],
[0, 0, 1]])
return H_c
def projection_g2im(self, cam_pitch, cam_height, K):
P_g2c = np.array([[1, 0, 0, 0],
[0, np.cos(np.pi / 2 + cam_pitch), -np.sin(np.pi / 2 + cam_pitch), cam_height],
[0, np.sin(np.pi / 2 + cam_pitch), np.cos(np.pi / 2 + cam_pitch), 0]])
P_g2im = np.matmul(K, P_g2c)
return P_g2im
def homograpthy_g2im(self, cam_pitch, cam_height, K):
# transform top-view region to original image region
R_g2c = np.array([[1, 0, 0],
[0, np.cos(np.pi / 2 + cam_pitch), -np.sin(np.pi / 2 + cam_pitch)],
[0, np.sin(np.pi / 2 + cam_pitch), np.cos(np.pi / 2 + cam_pitch)]])
H_g2im = np.matmul(K, np.concatenate([R_g2c[:, 0:2], [[0], [cam_height], [0]]], 1))
return H_g2im
def prune_3d_lane_by_visibility(self, lane_3d, visibility):
lane_3d = lane_3d[visibility > 0, ...]
return lane_3d
def prune_3d_lane_by_range(self, lane_3d, x_min, x_max):
# TODO: solve hard coded range later
# remove points with y out of range
# 3D label may miss super long straight-line with only two points: Not have to be 200, gt need a min-step
# 2D dataset requires this to rule out those points projected to ground, but out of meaningful range
lane_3d = lane_3d[np.logical_and(lane_3d[:, 1] > 0, lane_3d[:, 1] < 200), ...]
# remove lane points out of x range
lane_3d = lane_3d[np.logical_and(lane_3d[:, 0] > x_min,
lane_3d[:, 0] < x_max), ...]
return lane_3d
def convert_lanes_3d_to_gflat(self, lanes, P_g2gflat):
"""
Convert a set of lanes from 3D ground coordinates [X, Y, Z], to IPM-based
flat ground coordinates [x_gflat, y_gflat, Z]
:param lanes: a list of N x 3 numpy arrays recording a set of 3d lanes
:param P_g2gflat: projection matrix from 3D ground coordinates to frat ground coordinates
:return:
"""
# TODO: this function can be simplified with the derived formula
for lane in lanes:
# convert gt label to anchor label
lane_gflat_x, lane_gflat_y = self.projective_transformation(P_g2gflat, lane[:, 0], lane[:, 1], lane[:, 2])
lane[:, 0] = lane_gflat_x
lane[:, 1] = lane_gflat_y
def projective_transformation(self, Matrix, x, y, z):
"""
Helper function to transform coordinates defined by transformation matrix
Args:
Matrix (multi dim - array): 3x4 projection matrix
x (array): original x coordinates
y (array): original y coordinates
z (array): original z coordinates
"""
ones = np.ones((1, len(z)))
coordinates = np.vstack((x, y, z, ones))
trans = np.matmul(Matrix, coordinates)
x_vals = trans[0, :] / trans[2, :]
y_vals = trans[1, :] / trans[2, :]
return x_vals, y_vals
def homographic_transformation(self, Matrix, x, y):
"""
Helper function to transform coordinates defined by transformation matrix
Args:
Matrix (multi dim - array): 3x3 homography matrix
x (array): original x coordinates
y (array): original y coordinates
"""
ones = np.ones((1, len(y)))
coordinates = np.vstack((x, y, ones))
trans = np.matmul(Matrix, coordinates)
x_vals = trans[0, :] / trans[2, :]
y_vals = trans[1, :] / trans[2, :]
return x_vals, y_vals
def transform_lane_gflat2g(self, h_cam, X_gflat, Y_gflat, Z_g):
"""
Given X coordinates in flat ground space, Y coordinates in flat ground space, and Z coordinates in real 3D ground space
with projection matrix from 3D ground to flat ground, compute real 3D coordinates X, Y in 3D ground space.
:param P_g2gflat: a 3 X 4 matrix transforms lane form 3d ground x,y,z to flat ground x, y
:param X_gflat: X coordinates in flat ground space
:param Y_gflat: Y coordinates in flat ground space
:param Z_g: Z coordinates in real 3D ground space
:return:
"""
X_g = X_gflat - X_gflat * Z_g / h_cam
Y_g = Y_gflat - Y_gflat * Z_g / h_cam
return X_g, Y_g
def make_lane_y_mono_inc(self, lane):
"""
Due to lose of height dim, projected lanes to flat ground plane may not have monotonically increasing y.
This function trace the y with monotonically increasing y, and output a pruned lane
:param lane:
:return:
"""
idx2del = []
max_y = lane[0, 1]
for i in range(1, lane.shape[0]):
# hard-coded a smallest step, so the far-away near horizontal tail can be pruned
if lane[i, 1] <= max_y + 3:
idx2del.append(i)
else:
max_y = lane[i, 1]
lane = np.delete(lane, idx2del, 0)
return lane
def resample_laneline_in_y(self, input_lane, y_steps, out_vis=False):
"""
Interpolate x, z values at each anchor grid, including those beyond the range of input lnae y range
:param input_lane: N x 2 or N x 3 ndarray, one row for a point (x, y, z-optional).
It requires y values of input lane in ascending order
:param y_steps: a vector of steps in y
:param out_vis: whether to output visibility indicator which only depends on input y range
:return:
"""
# at least two points are included
assert (input_lane.shape[0] >= 2)
y_min = np.min(input_lane[:, 1]) - 5
y_max = np.max(input_lane[:, 1]) + 5
if input_lane.shape[1] < 3:
input_lane = np.concatenate([input_lane, np.zeros([input_lane.shape[0], 1], dtype=np.float32)], axis=1)
f_x = interp1d(input_lane[:, 1], input_lane[:, 0], fill_value="extrapolate")
f_z = interp1d(input_lane[:, 1], input_lane[:, 2], fill_value="extrapolate")
x_values = f_x(y_steps)
z_values = f_z(y_steps)
if out_vis:
output_visibility = np.logical_and(y_steps >= y_min, y_steps <= y_max)
return x_values, z_values, output_visibility
return x_values, z_values
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
| 44.797665
| 154
| 0.554742
|
a563a1bf3254f5d17e2f7617707b1760e76b6a5c
| 2,426
|
py
|
Python
|
tests/h/models/token_test.py
|
discodavey/h
|
7bff8478b3a5b936de82ac9fcd89b355f4afd3aa
|
[
"MIT"
] | 1
|
2018-03-09T02:15:16.000Z
|
2018-03-09T02:15:16.000Z
|
tests/h/models/token_test.py
|
discodavey/h
|
7bff8478b3a5b936de82ac9fcd89b355f4afd3aa
|
[
"MIT"
] | 16
|
2018-03-14T21:23:46.000Z
|
2019-04-29T18:55:28.000Z
|
tests/h/models/token_test.py
|
discodavey/h
|
7bff8478b3a5b936de82ac9fcd89b355f4afd3aa
|
[
"MIT"
] | 1
|
2021-03-12T09:45:04.000Z
|
2021-03-12T09:45:04.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import pytest
from h.models import Token
@pytest.mark.usefixtures('security')
class TestToken(object):
def test_ttl_is_none_if_token_has_no_expires(self):
assert Token().ttl is None
def test_ttl_when_token_does_expire(self):
expires = datetime.datetime.utcnow() + datetime.timedelta(hours=1)
token = Token(expires=expires)
assert 0 < token.ttl < 3601
def test_expired_is_false_if_expires_is_in_the_future(self):
expires = datetime.datetime.utcnow() + datetime.timedelta(hours=1)
token = Token(expires=expires)
assert token.expired is False
def test_expired_is_false_if_expires_is_none(self):
token = Token(expires=None)
assert token.expired is False
def test_expired_is_true_if_expires_is_in_the_past(self):
expires = datetime.datetime.utcnow() - datetime.timedelta(hours=1)
token = Token(expires=expires)
assert token.expired is True
def test_refresh_token_expired_is_false_if_in_future(self):
refresh_token_expires = datetime.datetime.utcnow() + datetime.timedelta(hours=1)
token = Token(refresh_token_expires=refresh_token_expires)
assert token.refresh_token_expired is False
def test_refresh_token_expired_is_false_if_none(self):
token = Token(refresh_token_expires=None)
assert token.refresh_token_expired is False
def test_refresh_token_expired_is_true_if_in_past(self):
refresh_token_expires = datetime.datetime.utcnow() - datetime.timedelta(hours=1)
token = Token(refresh_token_expires=refresh_token_expires)
assert token.refresh_token_expired is True
@pytest.fixture
def security(self, patch):
security = patch('h.models.token.security')
class TestTokenGenerator(object):
"""Return "TOKEN_1", then "TOKEN_2" and so on."""
def __init__(self):
self.i = 1
self.generated_tokens = []
def __call__(self):
self.generated_tokens.append("TOKEN_" + str(self.i))
self.i += 1
return self.generated_tokens[-1]
security.token_urlsafe.side_effect = TestTokenGenerator()
return security
def one_hour_from_now():
return datetime.datetime.now() + datetime.timedelta(hours=1)
| 30.708861
| 88
| 0.691261
|
d207b34e5fd1def9ef38630375e1d3d31a433c2e
| 3,809
|
py
|
Python
|
aperturedb/VideoLoader.py
|
aperture-data/aperturedb-python
|
186ae09a474df8e2d90ecdc7ba81e81879cef3ea
|
[
"Apache-2.0"
] | 1
|
2022-01-12T17:46:20.000Z
|
2022-01-12T17:46:20.000Z
|
aperturedb/VideoLoader.py
|
aperture-data/aperturedb-python
|
186ae09a474df8e2d90ecdc7ba81e81879cef3ea
|
[
"Apache-2.0"
] | 11
|
2021-07-14T16:54:05.000Z
|
2022-03-30T14:34:34.000Z
|
aperturedb/VideoLoader.py
|
aperture-data/aperturedb-python
|
186ae09a474df8e2d90ecdc7ba81e81879cef3ea
|
[
"Apache-2.0"
] | null | null | null |
import math
import time
from threading import Thread
import numpy as np
import cv2
from aperturedb import Status
from aperturedb import ParallelLoader
from aperturedb import CSVParser
HEADER_PATH = "filename"
PROPERTIES = "properties"
CONSTRAINTS = "constraints"
class VideoGeneratorCSV(CSVParser.CSVParser):
'''
ApertureDB Video Data loader.
Expects a csv file with the following columns:
filename,PROP_NAME_1, ... PROP_NAME_N,constraint_PROP1
Example csv file:
filename,id,label,constaint_id
/home/user/file1.jpg,321423532,dog,321423532
/home/user/file2.jpg,42342522,cat,4234252
...
'''
def __init__(self, filename, check_video=True):
super().__init__(filename)
self.check_video = check_video
self.props_keys = [x for x in self.header[1:] if not x.startswith(CSVParser.CONTRAINTS_PREFIX)]
self.constraints_keys = [x for x in self.header[1:] if x.startswith(CSVParser.CONTRAINTS_PREFIX) ]
def __getitem__(self, idx):
filename = self.df.loc[idx, HEADER_PATH]
data = {}
video_ok, video = self.load_video(filename)
if not video_ok:
print("Error loading video: " + filename )
raise Exception("Error loading video: " + filename )
data["video_blob"] = video
properties = self.parse_properties(self.df, idx)
constraints = self.parse_constraints(self.df, idx)
if properties:
data[PROPERTIES] = properties
if constraints:
data[CONSTRAINTS] = constraints
return data
def load_video(self, filename):
if self.check_video:
try:
a = cv2.VideoCapture(filename)
if a.isOpened() == False:
print("Video reading Error:", filename)
except:
print("Video Error:", filename)
try:
fd = open(filename, "rb")
buff = fd.read()
fd.close()
return True, buff
except:
print("Video Error:", filename)
return False, None
def validate(self):
self.header = list(self.df.columns.values)
if self.header[0] != HEADER_PATH:
raise Exception("Error with CSV file field: filename. Must be first field")
class VideoLoader(ParallelLoader.ParallelLoader):
'''
ApertureDB Video Loader.
This class is to be used in combination with a "generator".
The generator must be an iterable object that generated "image_data"
elements:
image_data = {
"properties": properties,
"constraints": constraints,
"operations": operations,
"video_blob": (bytes),
}
'''
def __init__(self, db, dry_run=False):
super().__init__(db, dry_run=dry_run)
self.type = "video"
def generate_batch(self, video_data):
q = []
blobs = []
for data in video_data:
ai = {
"AddVideo": {
}
}
if "properties" in data:
ai["AddVideo"]["properties"] = data["properties"]
if "constraints" in data:
ai["AddVideo"]["if_not_found"] = data["constraints"]
if "operations" in data:
ai["AddVideo"]["operations"] = data["operations"]
if "format" in data:
ai["AddVideo"]["format"] = data["format"]
if "video_blob" not in data or len(data["video_blob"]) == 0:
print("WARNING: Skipping empty video.")
continue
blobs.append(data["video_blob"])
q.append(ai)
return q, blobs
| 27.014184
| 109
| 0.568916
|
3241cd1971c92649e5ef2bd7a1b441ce7ca81947
| 81
|
py
|
Python
|
day01/t01/apps.py
|
lin8979/newrep01
|
d0d7e157d522c2e83d1976a35d6a815c9e7e4257
|
[
"Apache-2.0"
] | null | null | null |
day01/t01/apps.py
|
lin8979/newrep01
|
d0d7e157d522c2e83d1976a35d6a815c9e7e4257
|
[
"Apache-2.0"
] | null | null | null |
day01/t01/apps.py
|
lin8979/newrep01
|
d0d7e157d522c2e83d1976a35d6a815c9e7e4257
|
[
"Apache-2.0"
] | null | null | null |
from django.apps import AppConfig
class T01Config(AppConfig):
name = 't01'
| 13.5
| 33
| 0.728395
|
88ab855574de3a169c332b1b42f6309b0e31bacd
| 16,680
|
py
|
Python
|
ZConfig/tests/test_loader.py
|
derFreitag/ZConfig
|
276cae67e983f7c92ccfaf337327b950061b223e
|
[
"ZPL-2.1"
] | 7
|
2016-06-20T20:23:14.000Z
|
2021-04-09T03:28:48.000Z
|
ZConfig/tests/test_loader.py
|
derFreitag/ZConfig
|
276cae67e983f7c92ccfaf337327b950061b223e
|
[
"ZPL-2.1"
] | 64
|
2015-07-15T23:03:18.000Z
|
2021-09-09T07:54:16.000Z
|
ZConfig/tests/test_loader.py
|
derFreitag/ZConfig
|
276cae67e983f7c92ccfaf337327b950061b223e
|
[
"ZPL-2.1"
] | 8
|
2015-04-03T06:42:24.000Z
|
2021-09-15T04:40:25.000Z
|
##############################################################################
#
# Copyright (c) 2002, 2003, 2018 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Tests of ZConfig.loader classes and helper functions."""
import os.path
import sys
import tempfile
import unittest
import ZConfig
import ZConfig.loader
import ZConfig.url
from ZConfig._compat import NStringIO as StringIO
from ZConfig._compat import urllib2
from ZConfig.tests.support import CONFIG_BASE, TestHelper
myfile = os.path.abspath(__file__)
LIBRARY_DIR = os.path.join(os.path.dirname(myfile), "library")
class LoaderTestCase(TestHelper, unittest.TestCase):
def test_open_resource_non_ascii(self):
# Files are decoded using utf-8 on open
loader = ZConfig.loader.SchemaLoader()
url = ZConfig.url.urljoin(CONFIG_BASE, "non-ascii.txt")
with loader.openResource(url) as stream:
val = stream.read()
self.assertEqual(
val,
u'# -*-coding: utf-8; mode: conf-*-\n'
u'This file contains a snowman, U+2603: \u2603\n'
)
def test_schema_caching(self):
loader = ZConfig.loader.SchemaLoader()
url = ZConfig.url.urljoin(CONFIG_BASE, "simple.xml")
schema1 = loader.loadURL(url)
schema2 = loader.loadURL(url)
self.assertIs(schema1, schema2)
def test_simple_import_with_cache(self):
loader = ZConfig.loader.SchemaLoader()
url1 = ZConfig.url.urljoin(CONFIG_BASE, "library.xml")
schema1 = loader.loadURL(url1)
sio = StringIO("<schema>"
" <import src='library.xml'/>"
" <section type='type-a' name='section'/>"
"</schema>")
url2 = ZConfig.url.urljoin(CONFIG_BASE, "stringio")
schema2 = loader.loadFile(sio, url2)
self.assertTrue(schema1.gettype("type-a") is schema2.gettype("type-a"))
def test_schema_loader_source_errors(self):
loader = ZConfig.loader.SchemaLoader()
self.assertRaisesRegex(ZConfig.SchemaError,
"illegal schema component name",
loader.schemaComponentSource,
'', None)
self.assertRaisesRegex(ZConfig.SchemaError,
"illegal schema component name",
loader.schemaComponentSource,
'foo..bar', None)
def test_config_loader_abstract_schema(self):
class MockSchema(object):
_abstract = True
def isabstract(self):
return self._abstract
def gettype(self, _t):
return self
self.assertRaisesRegex(ZConfig.SchemaError,
"abstract type",
ZConfig.loader.ConfigLoader,
MockSchema())
s = MockSchema()
s._abstract = False
loader = ZConfig.loader.ConfigLoader(s)
s._abstract = True
self.assertRaisesRegex(ZConfig.ConfigurationError,
"cannot match abstract section",
loader.startSection,
None, None, None)
def test_simple_import_using_prefix(self):
self.load_schema_text("""\
<schema prefix='ZConfig.tests.library'>
<import package='.thing'/>
</schema>
""")
def test_import_errors(self):
# must specify exactly one of package or src
self.assertRaises(ZConfig.SchemaError, ZConfig.loadSchemaFile,
StringIO("<schema><import/></schema>"))
self.assertRaises(ZConfig.SchemaError, ZConfig.loadSchemaFile,
StringIO("<schema>"
" <import src='library.xml'"
" package='ZConfig'/>"
"</schema>"))
# cannot specify src and file
self.assertRaises(ZConfig.SchemaError, ZConfig.loadSchemaFile,
StringIO("<schema>"
" <import src='library.xml'"
" file='other.xml'/>"
"</schema>"))
# cannot specify module as package
sio = StringIO("<schema>"
" <import package='ZConfig.tests.test_loader'/>"
"</schema>")
with self.assertRaises(ZConfig.SchemaResourceError) as ctx:
ZConfig.loadSchemaFile(sio)
e = ctx.exception
self.assertEqual(e.filename, "component.xml")
self.assertEqual(e.package, "ZConfig.tests.test_loader")
self.assertTrue(e.path is None)
# make sure the str() doesn't raise an unexpected exception
str(e)
def test_import_from_package(self):
loader = ZConfig.loader.SchemaLoader()
sio = StringIO("<schema>"
" <import package='ZConfig.tests.library.widget'/>"
"</schema>")
schema = loader.loadFile(sio)
self.assertTrue(schema.gettype("widget-a") is not None)
def test_import_from_package_with_file(self):
loader = ZConfig.loader.SchemaLoader()
sio = StringIO("<schema>"
" <import package='ZConfig.tests.library.widget'"
" file='extra.xml' />"
"</schema>")
schema = loader.loadFile(sio)
self.assertTrue(schema.gettype("extra-type") is not None)
def test_import_from_package_extra_directory(self):
loader = ZConfig.loader.SchemaLoader()
sio = StringIO("<schema>"
" <import package='ZConfig.tests.library.thing'"
" file='extras.xml' />"
"</schema>")
schema = loader.loadFile(sio)
self.assertTrue(schema.gettype("extra-thing") is not None)
def test_import_from_package_with_missing_file(self):
loader = ZConfig.loader.SchemaLoader()
sio = StringIO("<schema>"
" <import package='ZConfig.tests.library.widget'"
" file='notthere.xml' />"
"</schema>")
with self.assertRaises(ZConfig.SchemaResourceError) as ctx:
loader.loadFile(sio)
e = ctx.exception
self.assertEqual(e.filename, "notthere.xml")
self.assertEqual(e.package, "ZConfig.tests.library.widget")
self.assertTrue(e.path)
# make sure the str() doesn't raise an unexpected exception
str(e)
def test_import_from_package_with_directory_file(self):
loader = ZConfig.loader.SchemaLoader()
sio = StringIO("<schema>"
" <import package='ZConfig.tests.library.widget'"
" file='really/notthere.xml' />"
"</schema>")
self.assertRaises(ZConfig.SchemaError, loader.loadFile, sio)
def test_import_two_components_one_package(self):
loader = ZConfig.loader.SchemaLoader()
sio = StringIO("<schema>"
" <import package='ZConfig.tests.library.widget' />"
" <import package='ZConfig.tests.library.widget'"
" file='extra.xml' />"
"</schema>")
schema = loader.loadFile(sio)
schema.gettype("widget-a")
schema.gettype("extra-type")
def test_import_component_twice_1(self):
# Make sure we can import a component twice from a schema.
# This is most likely to occur when the component is imported
# from each of two other components, or from the top-level
# schema and a component.
loader = ZConfig.loader.SchemaLoader()
sio = StringIO("<schema>"
" <import package='ZConfig.tests.library.widget' />"
" <import package='ZConfig.tests.library.widget' />"
"</schema>")
schema = loader.loadFile(sio)
schema.gettype("widget-a")
def test_import_component_twice_2(self):
# Make sure we can import a component from a config file even
# if it has already been imported from the schema.
loader = ZConfig.loader.SchemaLoader()
sio = StringIO("<schema>"
" <import package='ZConfig.tests.library.widget' />"
"</schema>")
schema = loader.loadFile(sio)
loader = ZConfig.loader.ConfigLoader(schema)
sio = StringIO("%import ZConfig.tests.library.widget")
loader.loadFile(sio)
def test_urlsplit_urlunsplit(self):
# Extracted from Python's test.test_urlparse module:
samples = [
('http://www.python.org',
('http', 'www.python.org', '', '', '', ''),
('http', 'www.python.org', '', '', '')),
('http://www.python.org#abc',
('http', 'www.python.org', '', '', '', 'abc'),
('http', 'www.python.org', '', '', 'abc')),
('http://www.python.org/#abc',
('http', 'www.python.org', '/', '', '', 'abc'),
('http', 'www.python.org', '/', '', 'abc')),
("http://a/b/c/d;p?q#f",
('http', 'a', '/b/c/d', 'p', 'q', 'f'),
('http', 'a', '/b/c/d;p', 'q', 'f')),
('file:///tmp/junk.txt',
('file', '', '/tmp/junk.txt', '', '', ''),
('file', '', '/tmp/junk.txt', '', '')),
]
for url, parsed, split in samples:
result = ZConfig.url.urlsplit(url)
self.assertEqual(result, split)
result2 = ZConfig.url.urlunsplit(result)
self.assertEqual(result2, url)
def test_file_url_normalization(self):
self.assertEqual(
ZConfig.url.urlnormalize("file:/abc/def"),
"file:///abc/def")
self.assertEqual(
ZConfig.url.urlunsplit(("file", "", "/abc/def", "", "")),
"file:///abc/def")
self.assertEqual(
ZConfig.url.urljoin("file:/abc/", "def"),
"file:///abc/def")
self.assertEqual(
ZConfig.url.urldefrag("file:/abc/def#frag"),
("file:///abc/def", "frag"))
def test_url_from_file(self):
class MockFile(object):
name = 'path'
self.assertEqual('file://',
ZConfig.loader._url_from_file(MockFile)[:7])
def test_isPath(self):
assertTrue = self.assertTrue
isPath = ZConfig.loader.SchemaLoader().isPath
assertTrue(isPath("abc"))
assertTrue(isPath("abc/def"))
assertTrue(isPath("/abc"))
assertTrue(isPath("/abc/def"))
assertTrue(isPath(r"\abc"))
assertTrue(isPath(r"\abc\def"))
assertTrue(isPath(r"c:\abc\def"))
assertTrue(isPath("/ab:cd"))
assertTrue(isPath(r"\ab:cd"))
assertTrue(isPath("long name with spaces"))
assertTrue(isPath("long name:with spaces"))
assertTrue(not isPath("ab:cd"))
assertTrue(not isPath("http://www.example.com/"))
assertTrue(not isPath("http://www.example.com/sample.conf"))
assertTrue(not isPath("file:///etc/zope/zope.conf"))
assertTrue(not isPath("file:///c|/foo/bar.conf"))
class TestNonExistentResources(unittest.TestCase):
# XXX Not sure if this is the best approach for these. These
# tests make sure that the error reported by ZConfig for missing
# resources is handled in a consistent way. Since ZConfig uses
# urllib2.urlopen() for opening all resources, what we do is
# replace that function with one that always raises an exception.
# Since urllib2.urlopen() can raise either IOError or OSError
# (depending on the version of Python), we run test for each
# exception. urllib2.urlopen() is restored after running the
# test.
def setUp(self):
self.urllib2_urlopen = urllib2.urlopen
urllib2.urlopen = self.fake_urlopen
def tearDown(self):
urllib2.urlopen = self.urllib2_urlopen
def fake_urlopen(self, url):
raise self.error()
def test_nonexistent_file_ioerror(self):
self.error = IOError
self.check_nonexistent_file()
def test_nonexistent_file_oserror(self):
self.error = OSError
self.check_nonexistent_file()
def check_nonexistent_file(self):
fn = tempfile.mktemp()
schema = ZConfig.loadSchemaFile(StringIO("<schema/>"))
self.assertRaises(ZConfig.ConfigurationError,
ZConfig.loadSchema, fn)
self.assertRaises(ZConfig.ConfigurationError,
ZConfig.loadConfig, schema, fn)
self.assertRaises(ZConfig.ConfigurationError,
ZConfig.loadConfigFile, schema,
StringIO("%include " + fn))
self.assertRaises(ZConfig.ConfigurationError,
ZConfig.loadSchema,
"http://www.zope.org/no-such-document/")
self.assertRaises(ZConfig.ConfigurationError,
ZConfig.loadConfig, schema,
"http://www.zope.org/no-such-document/")
class TestResourcesInZip(unittest.TestCase):
def setUp(self):
self.old_path = sys.path[:]
# now add our sample EGG to sys.path:
zipfile = os.path.join(os.path.dirname(myfile), "foosample.zip")
sys.path.append(zipfile)
def tearDown(self):
sys.path[:] = self.old_path
def test_zip_import_component_from_schema(self):
sio = StringIO('''
<schema>
<abstracttype name="something"/>
<import package="foo.sample"/>
<section name="*"
attribute="something"
type="something"
/>
</schema>
''')
schema = ZConfig.loadSchemaFile(sio)
t = schema.gettype("sample")
self.assertFalse(t.isabstract())
def test_zip_import_component_from_config(self):
sio = StringIO('''
<schema>
<abstracttype name="something"/>
<section name="*"
attribute="something"
type="something"
/>
</schema>
''')
schema = ZConfig.loadSchemaFile(sio)
value = '''
%import foo.sample
<sample>
data value
</sample>
'''
sio = StringIO(value)
config, _ = ZConfig.loadConfigFile(schema, sio)
self.assertEqual(config.something.data, "| value |")
sio = StringIO(value)
with self.assertRaises(ZConfig.ConfigurationSyntaxError):
ZConfig.loadConfigFile(schema, sio,
overrides=["sample/data=othervalue"])
class TestOpenPackageResource(TestHelper, unittest.TestCase):
magic_name = 'not a valid import name'
def setUp(self):
sys.modules[self.magic_name] = self
def tearDown(self):
del sys.modules[self.magic_name]
def test_package_loader_resource_error(self):
class MockLoader(object):
pass
self.__loader__ = MockLoader()
self.__path__ = ['dir']
self.assertRaisesRegex(ZConfig.SchemaResourceError,
"error opening schema component",
ZConfig.loader.openPackageResource,
self.magic_name, 'a path')
# Now with an empty path
self.__path__ = []
self.assertRaisesRegex(ZConfig.SchemaResourceError,
"schema component not found",
ZConfig.loader.openPackageResource,
self.magic_name, 'a path')
def test_resource(self):
r = ZConfig.loader.Resource(self, None)
self.assertEqual(self.magic_name, r.magic_name)
| 39.15493
| 79
| 0.553477
|
2ebe9d67c0136daffe640a54236cbb8804a1cace
| 123
|
py
|
Python
|
pdb_use.py
|
edgells/python-commons
|
38c0aa0ec10304a4147ea231c92c9e34da462052
|
[
"MIT"
] | null | null | null |
pdb_use.py
|
edgells/python-commons
|
38c0aa0ec10304a4147ea231c92c9e34da462052
|
[
"MIT"
] | null | null | null |
pdb_use.py
|
edgells/python-commons
|
38c0aa0ec10304a4147ea231c92c9e34da462052
|
[
"MIT"
] | null | null | null |
import pdb
def make_pdb():
pdb.set_trace()
print("I Don`t have time")
if __name__ == '__main__':
make_pdb()
| 12.3
| 30
| 0.617886
|
1f8073ceccf44ae06d2db0db92e48a6175987bd5
| 3,247
|
py
|
Python
|
siuba/tests/test_sql_verbs.py
|
tmastny/siuba
|
7a234bc6d03b7ad3ba6054c8899fd27ccb7f05aa
|
[
"MIT"
] | 831
|
2019-07-25T12:41:18.000Z
|
2022-03-31T14:47:27.000Z
|
siuba/tests/test_sql_verbs.py
|
tmastny/siuba
|
7a234bc6d03b7ad3ba6054c8899fd27ccb7f05aa
|
[
"MIT"
] | 295
|
2019-04-23T17:32:16.000Z
|
2022-03-29T23:19:44.000Z
|
siuba/tests/test_sql_verbs.py
|
tmastny/siuba
|
7a234bc6d03b7ad3ba6054c8899fd27ccb7f05aa
|
[
"MIT"
] | 42
|
2019-04-23T17:17:42.000Z
|
2022-03-31T14:36:07.000Z
|
from siuba.sql import group_by, mutate, LazyTbl, collect
from siuba.siu import _
from siuba.sql.translate import funcs
from sqlalchemy import sql
from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey
from sqlalchemy import create_engine
from pandas.testing import assert_frame_equal
import pytest
metadata = MetaData()
users = Table('users', metadata,
Column('id', Integer, primary_key=True),
Column('name', String),
Column('fullname', String),
)
addresses = Table('addresses', metadata,
Column('id', Integer, primary_key=True),
Column('user_id', None, ForeignKey('users.id')),
Column('email_address', String, nullable=False)
)
@pytest.fixture(scope = "module")
def db():
engine = create_engine('sqlite:///:memory:', echo=False)
metadata.create_all(engine)
conn = engine.connect()
ins = users.insert().values(name='jack', fullname='Jack Jones')
result = conn.execute(ins)
ins = users.insert()
conn.execute(ins, id=2, name='wendy', fullname='Wendy Williams')
yield conn
# LazyTbl ---------------------------------------------------------------------
def test_lazy_tbl_table_string(db):
tbl = LazyTbl(db, 'addresses')
tbl.tbl.columns.user_id
def test_lazy_tbl_manual_columns(db):
tbl = LazyTbl(db, 'addresses', columns = ('user_id', 'wrong_name'))
tbl.tbl.columns.wrong_name
tbl.tbl.columns.user_id
with pytest.raises(AttributeError):
tbl.tbl.columns.email_address
# SqlFunctionLookupError ------------------------------------------------------
from siuba import _
from siuba.sql import arrange, filter, mutate, summarize, SqlFunctionLookupError
from siuba.siu import strip_symbolic
def test_lazy_tbl_shape_call_error(db):
tbl = LazyTbl(db, 'addresses')
call = strip_symbolic(_.id.asdkfjsdf())
with pytest.raises(SqlFunctionLookupError) as err:
tbl.shape_call(call)
# suppresses context for shorter stack trace
assert err.__suppress_context__ == True
# TODO: remove these old tests? should be redundant ===========================
# mutate ----------------------------------------------------------------------
def test_sql_mutate(db):
tbl = LazyTbl(db, addresses, funcs = funcs)
f = mutate(user_id2 = _.user_id + 1)
out1 = tbl >> f >> collect()
out2 = tbl >> collect() >> f
assert_frame_equal(out1, out2)
# group_by --------------------------------------------------------------------
@pytest.mark.parametrize("group_vars", [
["id",], # string syntax
["id", "user_id"], # string syntax multiple
[_.id], # _ syntax
[_.id, _.user_id], # _ syntax multiple
])
def test_sql_group_by(db, group_vars):
tbl = LazyTbl(db, addresses, funcs = funcs)
group_by(tbl, *group_vars)
@pytest.mark.parametrize("group_var, error", [
(_.id + 1, NotImplementedError), # complex expressions
(_.notacol, KeyError) # missing columns
])
def tets_sql_group_by_fail(db, group_var, error):
tbl = LazyTbl(db, addresses, funcs = funcs)
with pytest.raises(error):
group_by(tbl, group_var)
| 28.991071
| 80
| 0.603326
|
b85c501f39565191047830c073cff51e4f12c68c
| 2,405
|
py
|
Python
|
cohesity_management_sdk/models/one_drive_info.py
|
nick6655/management-sdk-python
|
88e792cb83e5c24a22af495b220c145d0c45841d
|
[
"Apache-2.0"
] | 18
|
2019-09-24T17:35:53.000Z
|
2022-03-25T08:08:47.000Z
|
cohesity_management_sdk/models/one_drive_info.py
|
nick6655/management-sdk-python
|
88e792cb83e5c24a22af495b220c145d0c45841d
|
[
"Apache-2.0"
] | 18
|
2019-03-29T19:32:29.000Z
|
2022-01-03T23:16:45.000Z
|
cohesity_management_sdk/models/one_drive_info.py
|
nick6655/management-sdk-python
|
88e792cb83e5c24a22af495b220c145d0c45841d
|
[
"Apache-2.0"
] | 16
|
2019-02-27T06:54:12.000Z
|
2021-11-16T18:10:24.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
import cohesity_management_sdk.models.one_drive_item
class OneDriveInfo(object):
"""Implementation of the 'OneDriveInfo' model.
Specifies OneDrive details with the items which need to be restored.
Attributes:
drive_id (string): Specifies the Id of the Drive.
drive_item_list (list of OneDriveItem): Specifies the Drive items such
as files/folders.
restore_entire_drive (bool): Specifies whether entire drive is to be
restored. This should be set to false if specific drive items are
to be restored within 'DriveItemList'.
"""
# Create a mapping from Model property names to API property names
_names = {
"drive_id":'driveId',
"drive_item_list":'driveItemList',
"restore_entire_drive":'restoreEntireDrive'
}
def __init__(self,
drive_id=None,
drive_item_list=None,
restore_entire_drive=None):
"""Constructor for the OneDriveInfo class"""
# Initialize members of the class
self.drive_id = drive_id
self.drive_item_list = drive_item_list
self.restore_entire_drive = restore_entire_drive
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
drive_id = dictionary.get('driveId')
drive_item_list = None
if dictionary.get('driveItemList') != None:
drive_item_list = list()
for structure in dictionary.get('driveItemList'):
drive_item_list.append(cohesity_management_sdk.models.one_drive_item.OneDriveItem.from_dictionary(structure))
restore_entire_drive = dictionary.get('restoreEntireDrive')
# Return an object of this model
return cls(drive_id,
drive_item_list,
restore_entire_drive)
| 32.945205
| 125
| 0.642827
|
c29c12c49ea95e3601cdf3c72fe91ceba0bf13a1
| 10,896
|
py
|
Python
|
loss/CurricularNCE_loss.py
|
Feezhen/Contrastive-learning
|
9f7fc760c24ede2ffc485009ed787652551d0266
|
[
"MIT"
] | 1
|
2021-12-27T08:39:05.000Z
|
2021-12-27T08:39:05.000Z
|
loss/CurricularNCE_loss.py
|
Feezhen/Contrastive-learning
|
9f7fc760c24ede2ffc485009ed787652551d0266
|
[
"MIT"
] | null | null | null |
loss/CurricularNCE_loss.py
|
Feezhen/Contrastive-learning
|
9f7fc760c24ede2ffc485009ed787652551d0266
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import numpy as np
import os
import math
import sys
sys.path.append('../')
import params
class CurricularNCE_loss(nn.Module):
'''
NCE Loss with Curricular
'''
def __init__(self, gamma, keep_weight, T=0.07, m = 0.5, mlp=False):
"""
K: queue size; number of negative keys (default: 65536)
m: moco momentum of updating key encoder (default: 0.999)
T: softmax temperature (default: 0.07)
"""
super(CurricularNCE_loss, self).__init__()
self.gamma = gamma
self.keep_weight = keep_weight
self.T = T
self.m = m
self.cos_m = math.cos(m)
self.sin_m = math.sin(m)
self.threshold = math.cos(math.pi - m)
self.mm = math.sin(math.pi - m) * m
self.register_buffer('t', torch.zeros(1))
self.alpha = 0.1
self.weight_valid = False
self.weight_valid_threshold = 0.8
self.weight_scale = 5.0
self.loss = 0
@torch.no_grad()
def del_tensor_ele(self, tensor, dim, index):
"""
Delete an element from tensor
tensor: source tensor
dim: The dimension in which the element resides
index: the index of the element
"""
return tensor[torch.arange(tensor.size(dim))!=index]
@torch.no_grad()
def get_negatives(self, feature1, feature2, labels, index):
"""
get negative samples from batch
"""
neg_sample1 = feature1
neg_sample2 = feature2
for j in range(0, labels.shape[0]):
if labels[j] == labels[index]:
neg_sample1 = self.del_tensor_ele(tensor=neg_sample1, dim=0, index=j)
neg_sample2 = self.del_tensor_ele(tensor=neg_sample2, dim=0, index=j)
neg_samples = torch.cat([neg_sample1, neg_sample2], dim=0)
add_tensor = torch.zeros(1,neg_samples.shape[1]).cuda()
while neg_samples.shape[0] < (labels.shape[0]-1)*2:
add_tensor[0] = neg_samples[np.random.randint(0, neg_samples.shape[0])]
neg_samples = torch.cat([neg_samples, add_tensor], dim=0)
return neg_samples
def forward(self, feature1, feature2, labels):
feature1 = nn.functional.normalize(feature1, dim=1)
feature2 = nn.functional.normalize(feature2, dim=1)
batch_size = feature1.shape[0]
#特征相似度
similarity = torch.einsum('nc,kc->nk', [feature1, feature2])#nan
similarity = similarity.clamp(-1, 1)
with torch.no_grad():
origin_similarity = similarity.clone()
#对比学习正负样本对标签
labels_temp = labels.unsqueeze(1)
pos_labels = torch.eq(labels_temp, labels_temp.T).long().cuda()#正样本label
neg_labels = torch.eq(pos_labels, 0).long().cuda()
pos_labels_mask = (pos_labels == 1)
target_similarity = similarity[pos_labels_mask] #区分出正样本cos
with torch.no_grad():
self.t = target_similarity.mean() * self.alpha + (1-self.alpha) * self.t
if self.t > self.weight_valid_threshold and self.weight_valid == False:
self.weight_valid = True
print('CurricularNCE_loss weight valid' .center(30, '-'))
print(f't is now {self.t}'.center(30, '-'));
if self.weight_valid: #开始使用focal权重
weight_pos = pos_labels.clone().float().cuda()#正样本mask
weight_neg = neg_labels.clone().float().cuda()
weight_pos *= self.keep_weight #label乘以倍数
weight_neg *= self.keep_weight
# 相似度和label的乘积
sim_pos = torch.mul(similarity, pos_labels)
sim_neg = torch.mul(similarity, neg_labels)
# 正负样本困难度(权重)
diff_pos = pos_labels - sim_pos
diff_neg = sim_neg
diff_neg = diff_neg.clamp(0, 1)
# 正负样本权重
diff_pos = torch.pow(diff_pos, self.gamma) #exponent小数没有问题
diff_neg = torch.pow(diff_neg, self.gamma)
# diff_pos = torch.mul(diff_pos, pos_labels)#保证diff对应好正负样本
diff_pos += weight_pos
diff_neg += weight_neg
# diff_pos *= self.weight_scale
# diff_neg *= self.weight_scale
sin_theta = torch.sqrt(1.0 - torch.pow(target_similarity, 2))
cos_theta_m = target_similarity*self.cos_m - sin_theta*self.sin_m
final_target_similarity = torch.where(target_similarity > self.threshold, cos_theta_m, target_similarity-self.mm) #确保单调递减
similarity[pos_labels_mask] = final_target_similarity #把正样本位置变成cos_theta_m
target_similarity_per_row = torch.mul(similarity, pos_labels).sum(dim=1)
pos_labels_per_row = pos_labels.sum(dim=1)
target_similarity_per_row = (target_similarity_per_row / pos_labels_per_row).view(-1, 1) #每行的正样本cos_theta_m均值
# sin_theta_per_row = torch.sqrt(1.0 - torch.pow(target_similarity_per_row, 2))
# cos_theta_m_per_row = target_similarity_per_row*self.cos_m - sin_theta_per_row*self.sin_m
mask = similarity > target_similarity_per_row #困难样本
# final_target_similarity = torch.where(target_similarity > self.threshold, cos_theta_m, target_similarity-self.mm)
hard_example = similarity[mask] #困难样本
similarity[mask] = hard_example * (self.t + hard_example) #负样本cos
similarity[pos_labels_mask] = final_target_similarity #正样本cos
logits = similarity / self.T
logits = torch.exp(logits)
if self.weight_valid:
logit_pos = torch.mul(logits, diff_pos).sum(dim=1)
logit_neg = torch.mul(logits, diff_neg).sum(dim=1)
else:
logit_pos = torch.mul(logits, pos_labels).sum(dim=1)
logit_neg = torch.mul(logits, neg_labels).sum(dim=1)
self.loss = -torch.log((logit_pos) / (logit_neg + logit_pos)).sum() / batch_size# - torch.log((d_pos.sum() / torch.sum(pos_labels == 1)).pow(2))
return self.loss#, self.t
class CurricularNCE_loss2(nn.Module):
"""
NCE loss.
By gqc
"""
def __init__(self, gamma, keep_weight, T=0.07, mlp=False):
"""
K: queue size; number of negative keys (default: 65536)
m: moco momentum of updating key encoder (default: 0.999)
T: softmax temperature (default: 0.07)
"""
super(CurricularNCE_loss2, self).__init__()
self.gamma = gamma
self.T = T
self.register_buffer('t', torch.zeros(1))
self.alpha = 0.01
self.loss = 0
self.sigmoid = nn.Sigmoid()
self.keep_weight = 0
# self.relu = nn.ReLU(inplace=False)
@torch.no_grad()
def del_tensor_ele(self, tensor, dim, index):
"""
Delete an element from tensor
tensor: source tensor
dim: The dimension in which the element resides
index: the index of the element
"""
return tensor[torch.arange(tensor.size(dim))!=index]
@torch.no_grad()
def get_negatives(self, feature1, feature2, labels, index):
"""
get negative samples from batch
"""
neg_sample1 = feature1
neg_sample2 = feature2
for j in range(0, labels.shape[0]):
if labels[j] == labels[index]:
neg_sample1 = self.del_tensor_ele(tensor=neg_sample1, dim=0, index=j)
neg_sample2 = self.del_tensor_ele(tensor=neg_sample2, dim=0, index=j)
neg_samples = torch.cat([neg_sample1, neg_sample2], dim=0)
add_tensor = torch.zeros(1,neg_samples.shape[1]).cuda()
while neg_samples.shape[0] < (labels.shape[0]-1)*2:
add_tensor[0] = neg_samples[np.random.randint(0, neg_samples.shape[0])]
neg_samples = torch.cat([neg_samples, add_tensor], dim=0)
return neg_samples
# '''
def forward(self, feature1, feature2, labels):
"""
Input:
feature1: a batch of query images features
feature2: a batch of key images features
labels: a batch of positive labels
Output:
logits, targets
"""
feature1 = nn.functional.normalize(feature1, dim=1)
feature2 = nn.functional.normalize(feature2, dim=1)
batch_size = feature1.shape[0]
#特征相似度
similarity = torch.einsum('nc,kc->nk', [feature1, feature2])#nan
similarity = similarity.clamp(-1, 1)
logits = similarity / self.T
logits = torch.exp(logits) #这也是nan
labels_temp = labels.unsqueeze(1)
pos_labels = torch.eq(labels_temp, labels_temp.T).long().cuda()#正样本label
neg_labels = torch.eq(pos_labels, 0).long().cuda()
pos_labels_mask = (pos_labels == 1)
target_similarity = similarity[pos_labels_mask] #区分出正样本cos
with torch.no_grad(): #计算正样本cos分数
self.t = target_similarity.mean() * self.alpha + (1-self.alpha) * self.t
self.keep_weight = torch.ones(1).cuda() - self.t
mask_pos = pos_labels.clone().float().cuda()#正样本mask
mask_neg = neg_labels.clone().float().cuda()
mask_pos *= self.keep_weight #label乘以倍数
mask_neg *= self.keep_weight
# self.loss = criteria(logits, new_labels)
# 相似度和label的乘积
sim_pos = torch.mul(similarity, pos_labels)
sim_neg = torch.mul(similarity, neg_labels)
# 正负样本困难度(权重)
diff_pos = pos_labels - sim_pos
diff_neg = sim_neg
diff_neg = diff_neg.clamp(0, 1)
# 正负样本权重
diff_pos = torch.pow(diff_pos, self.gamma) #exponent小数没有问题
diff_neg = torch.pow(diff_neg, self.gamma)
# diff_pos = torch.mul(diff_pos, pos_labels)#保证diff对应好正负样本
# diff_neg = torch.mul(diff_neg, neg_labels)
diff_pos += mask_pos
diff_neg += mask_neg
# pri
l_pos = torch.mul(logits, diff_pos).sum(dim=1) #+ float(1e-8)
l_neg = torch.mul(logits, diff_neg).sum(dim=1) #+ float(1e-8)
self.loss = -torch.log((l_pos) / (l_neg + l_pos)).sum() / batch_size# - torch.log((d_pos.sum() / torch.sum(pos_labels == 1)).pow(2))
return self.loss, self.keep_weight
if __name__ == '__main__':
args = params.get_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
# loss = NCE_Loss().cuda()
loss = CurricularNCE_loss2(gamma=1, keep_weight=0, T=0.1).cuda()
feature1 = torch.tensor([[1.,2.,3.], [1.,-9.,3.], [3.,2.,-3.]]).cuda()
feature2 = torch.tensor([[1.,2.,4.], [1.,-9.,6.], [4.,2.,-3.]]).cuda()
label = torch.tensor([1, 2, 3]).cuda()
outloss, keep_wei = loss(feature1, feature2, label)
print(outloss)
| 41.272727
| 153
| 0.596366
|
95dfdbd40ffee46635f691e95e8d83c02de23b8e
| 14,225
|
py
|
Python
|
ansible-container/openshift-deploy/roles/ansible.kubernetes-modules/library/openshift_v1_user.py
|
LeHack/Docker-network-research
|
62a57a6d723d8701a6d045a07a5abd2bd844a409
|
[
"Beerware"
] | 4
|
2017-06-03T20:46:07.000Z
|
2017-12-19T02:15:00.000Z
|
ansible-container/k8s-deploy/roles/ansible.kubernetes-modules/library/openshift_v1_user.py
|
LeHack/Docker-network-research
|
62a57a6d723d8701a6d045a07a5abd2bd844a409
|
[
"Beerware"
] | 1
|
2017-06-03T20:32:37.000Z
|
2017-06-03T20:32:37.000Z
|
ansible-container/openshift-deploy/roles/ansible.kubernetes-modules/library/openshift_v1_user.py
|
LeHack/Docker-network-research
|
62a57a6d723d8701a6d045a07a5abd2bd844a409
|
[
"Beerware"
] | null | null | null |
#!/usr/bin/env python
from ansible.module_utils.openshift_common import OpenShiftAnsibleModule, OpenShiftAnsibleException
DOCUMENTATION = '''
module: openshift_v1_user
short_description: OpenShift User
description:
- Manage the lifecycle of a user object. Supports check mode, and attempts to to be
idempotent.
version_added: 2.3.0
author: OpenShift (@openshift)
options:
annotations:
description:
- Annotations is an unstructured key value map stored with a resource that may
be set by external tools to store and retrieve arbitrary metadata. They are
not queryable and should be preserved when modifying objects.
type: dict
api_key:
description:
- Token used to connect to the API.
cert_file:
description:
- Path to a certificate used to authenticate with the API.
type: path
context:
description:
- The name of a context found in the Kubernetes config file.
debug:
description:
- Enable debug output from the OpenShift helper. Logging info is written to KubeObjHelper.log
default: false
type: bool
force:
description:
- If set to C(True), and I(state) is C(present), an existing object will updated,
and lists will be replaced, rather than merged.
default: false
type: bool
full_name:
description:
- FullName is the full name of user
groups:
description:
- Groups specifies group names this user is a member of. This field is deprecated
and will be removed in a future release. Instead, create a Group object containing
the name of this User.
type: list
host:
description:
- Provide a URL for acessing the Kubernetes API.
identities:
description:
- Identities are the identities associated with this user
type: list
key_file:
description:
- Path to a key file used to authenticate with the API.
type: path
kubeconfig:
description:
- Path to an existing Kubernetes config file. If not provided, and no other connection
options are provided, the openshift client will attempt to load the default
configuration file from I(~/.kube/config.json).
type: path
labels:
description:
- Map of string keys and values that can be used to organize and categorize (scope
and select) objects. May match selectors of replication controllers and services.
type: dict
name:
description:
- Name must be unique within a namespace. Is required when creating resources,
although some resources may allow a client to request the generation of an appropriate
name automatically. Name is primarily intended for creation idempotence and
configuration definition. Cannot be updated.
namespace:
description:
- Namespace defines the space within each name must be unique. An empty namespace
is equivalent to the "default" namespace, but "default" is the canonical representation.
Not all objects are required to be scoped to a namespace - the value of this
field for those objects will be empty. Must be a DNS_LABEL. Cannot be updated.
password:
description:
- Provide a password for connecting to the API. Use in conjunction with I(username).
resource_definition:
description:
- Provide the YAML definition for the object, bypassing any modules parameters
intended to define object attributes.
type: dict
src:
description:
- Provide a path to a file containing the YAML definition of the object. Mutually
exclusive with I(resource_definition).
type: path
ssl_ca_cert:
description:
- Path to a CA certificate used to authenticate with the API.
type: path
state:
description:
- Determines if an object should be created, patched, or deleted. When set to
C(present), the object will be created, if it does not exist, or patched, if
parameter values differ from the existing object's attributes, and deleted,
if set to C(absent). A patch operation results in merging lists and updating
dictionaries, with lists being merged into a unique set of values. If a list
contains a dictionary with a I(name) or I(type) attribute, a strategic merge
is performed, where individual elements with a matching I(name_) or I(type)
are merged. To force the replacement of lists, set the I(force) option to C(True).
default: present
choices:
- present
- absent
username:
description:
- Provide a username for connecting to the API.
verify_ssl:
description:
- Whether or not to verify the API server's SSL certificates.
type: bool
requirements:
- openshift == 1.0.0-snapshot
'''
EXAMPLES = '''
'''
RETURN = '''
api_version:
type: string
description: Requested API version
user:
type: complex
returned: when I(state) = C(present)
contains:
api_version:
description:
- APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
type: str
full_name:
description:
- FullName is the full name of user
type: str
groups:
description:
- Groups specifies group names this user is a member of. This field is deprecated
and will be removed in a future release. Instead, create a Group object containing
the name of this User.
type: list
contains: str
identities:
description:
- Identities are the identities associated with this user
type: list
contains: str
kind:
description:
- Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to. Cannot
be updated. In CamelCase.
type: str
metadata:
description:
- Standard object's metadata.
type: complex
contains:
annotations:
description:
- Annotations is an unstructured key value map stored with a resource that
may be set by external tools to store and retrieve arbitrary metadata.
They are not queryable and should be preserved when modifying objects.
type: complex
contains: str, str
cluster_name:
description:
- The name of the cluster which the object belongs to. This is used to distinguish
resources with same name and namespace in different clusters. This field
is not set anywhere right now and apiserver is going to ignore it if set
in create or update request.
type: str
creation_timestamp:
description:
- CreationTimestamp is a timestamp representing the server time when this
object was created. It is not guaranteed to be set in happens-before order
across separate operations. Clients may not set this value. It is represented
in RFC3339 form and is in UTC. Populated by the system. Read-only. Null
for lists.
type: complex
contains: {}
deletion_grace_period_seconds:
description:
- Number of seconds allowed for this object to gracefully terminate before
it will be removed from the system. Only set when deletionTimestamp is
also set. May only be shortened. Read-only.
type: int
deletion_timestamp:
description:
- DeletionTimestamp is RFC 3339 date and time at which this resource will
be deleted. This field is set by the server when a graceful deletion is
requested by the user, and is not directly settable by a client. The resource
is expected to be deleted (no longer visible from resource lists, and
not reachable by name) after the time in this field. Once set, this value
may not be unset or be set further into the future, although it may be
shortened or the resource may be deleted prior to this time. For example,
a user may request that a pod is deleted in 30 seconds. The Kubelet will
react by sending a graceful termination signal to the containers in the
pod. After that 30 seconds, the Kubelet will send a hard termination signal
(SIGKILL) to the container and after cleanup, remove the pod from the
API. In the presence of network partitions, this object may still exist
after this timestamp, until an administrator or automated process can
determine the resource is fully terminated. If not set, graceful deletion
of the object has not been requested. Populated by the system when a graceful
deletion is requested. Read-only.
type: complex
contains: {}
finalizers:
description:
- Must be empty before the object is deleted from the registry. Each entry
is an identifier for the responsible component that will remove the entry
from the list. If the deletionTimestamp of the object is non-nil, entries
in this list can only be removed.
type: list
contains: str
generate_name:
description:
- GenerateName is an optional prefix, used by the server, to generate a
unique name ONLY IF the Name field has not been provided. If this field
is used, the name returned to the client will be different than the name
passed. This value will also be combined with a unique suffix. The provided
value has the same validation rules as the Name field, and may be truncated
by the length of the suffix required to make the value unique on the server.
If this field is specified and the generated name exists, the server will
NOT return a 409 - instead, it will either return 201 Created or 500 with
Reason ServerTimeout indicating a unique name could not be found in the
time allotted, and the client should retry (optionally after the time
indicated in the Retry-After header). Applied only if Name is not specified.
type: str
generation:
description:
- A sequence number representing a specific generation of the desired state.
Populated by the system. Read-only.
type: int
labels:
description:
- Map of string keys and values that can be used to organize and categorize
(scope and select) objects. May match selectors of replication controllers
and services.
type: complex
contains: str, str
name:
description:
- Name must be unique within a namespace. Is required when creating resources,
although some resources may allow a client to request the generation of
an appropriate name automatically. Name is primarily intended for creation
idempotence and configuration definition. Cannot be updated.
type: str
namespace:
description:
- Namespace defines the space within each name must be unique. An empty
namespace is equivalent to the "default" namespace, but "default" is the
canonical representation. Not all objects are required to be scoped to
a namespace - the value of this field for those objects will be empty.
Must be a DNS_LABEL. Cannot be updated.
type: str
owner_references:
description:
- List of objects depended by this object. If ALL objects in the list have
been deleted, this object will be garbage collected. If this object is
managed by a controller, then an entry in this list will point to this
controller, with the controller field set to true. There cannot be more
than one managing controller.
type: list
contains:
api_version:
description:
- API version of the referent.
type: str
controller:
description:
- If true, this reference points to the managing controller.
type: bool
kind:
description:
- Kind of the referent.
type: str
name:
description:
- Name of the referent.
type: str
uid:
description:
- UID of the referent.
type: str
resource_version:
description:
- An opaque value that represents the internal version of this object that
can be used by clients to determine when objects have changed. May be
used for optimistic concurrency, change detection, and the watch operation
on a resource or set of resources. Clients must treat these values as
opaque and passed unmodified back to the server. They may only be valid
for a particular resource or set of resources. Populated by the system.
Read-only. Value must be treated as opaque by clients and .
type: str
self_link:
description:
- SelfLink is a URL representing this object. Populated by the system. Read-only.
type: str
uid:
description:
- UID is the unique in time and space value for this object. It is typically
generated by the server on successful creation of a resource and is not
allowed to change on PUT operations. Populated by the system. Read-only.
type: str
'''
def main():
try:
module = OpenShiftAnsibleModule('user', 'V1')
except OpenShiftAnsibleException as exc:
# The helper failed to init, so there is no module object. All we can do is raise the error.
raise Exception(exc.message)
try:
module.execute_module()
except OpenShiftAnsibleException as exc:
module.fail_json(msg="Module failed!", error=str(exc))
if __name__ == '__main__':
main()
| 42.462687
| 100
| 0.665308
|
542a8ccfc0ccbf8bcb884f32af2a58046a62f54e
| 2,510
|
py
|
Python
|
ThunkLibs/Generators/libXfixes.py
|
phire/FEX
|
a721257cdd787bd641875ca8e138809aaad17e0c
|
[
"MIT"
] | null | null | null |
ThunkLibs/Generators/libXfixes.py
|
phire/FEX
|
a721257cdd787bd641875ca8e138809aaad17e0c
|
[
"MIT"
] | null | null | null |
ThunkLibs/Generators/libXfixes.py
|
phire/FEX
|
a721257cdd787bd641875ca8e138809aaad17e0c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
from ThunkHelpers import *
lib("libXfixes")
fn("const char* XFixesGetCursorName(Display*, Cursor, Atom*)")
fn("int XFixesQueryExtension(Display*, int*, int*)")
fn("int XFixesQueryVersion(Display*, int*, int*)")
fn("int XFixesVersion()")
fn("PointerBarrier XFixesCreatePointerBarrier(Display*, Window, int, int, int, int, int, int, int*)")
fn("void XFixesChangeCursorByName(Display*, Cursor, const char*)")
fn("void XFixesChangeCursor(Display*, Cursor, Cursor)")
fn("void XFixesChangeSaveSet(Display*, Window, int, int, int)")
fn("void XFixesCopyRegion(Display*, XserverRegion, XserverRegion)")
fn("void XFixesDestroyPointerBarrier(Display*, PointerBarrier)")
fn("void XFixesDestroyRegion(Display*, XserverRegion)")
fn("void XFixesExpandRegion(Display*, XserverRegion, XserverRegion, unsigned int, unsigned int, unsigned int, unsigned int)")
fn("void XFixesHideCursor(Display*, Window)")
fn("void XFixesIntersectRegion(Display*, XserverRegion, XserverRegion, XserverRegion)")
fn("void XFixesInvertRegion(Display*, XserverRegion, XRectangle*, XserverRegion)")
fn("void XFixesRegionExtents(Display*, XserverRegion, XserverRegion)")
fn("void XFixesSelectCursorInput(Display*, Window, long unsigned int)")
fn("void XFixesSelectSelectionInput(Display*, Window, Atom, long unsigned int)")
fn("void XFixesSetCursorName(Display*, Cursor, const char*)")
fn("void XFixesSetGCClipRegion(Display*, GC, int, int, XserverRegion)")
fn("void XFixesSetPictureClipRegion(Display*, XID, int, int, XserverRegion)")
fn("void XFixesSetRegion(Display*, XserverRegion, XRectangle*, int)")
fn("void XFixesSetWindowShapeRegion(Display*, Window, int, int, int, XserverRegion)")
fn("void XFixesShowCursor(Display*, Window)")
fn("void XFixesSubtractRegion(Display*, XserverRegion, XserverRegion, XserverRegion)")
fn("void XFixesTranslateRegion(Display*, XserverRegion, int, int)")
fn("void XFixesUnionRegion(Display*, XserverRegion, XserverRegion, XserverRegion)")
fn("XFixesCursorImage* XFixesGetCursorImage(Display*)")
fn("XRectangle* XFixesFetchRegionAndBounds(Display*, XserverRegion, int*, XRectangle*)")
fn("XRectangle* XFixesFetchRegion(Display*, XserverRegion, int*)")
fn("XserverRegion XFixesCreateRegion(Display*, XRectangle*, int)")
fn("XserverRegion XFixesCreateRegionFromBitmap(Display*, Pixmap)")
fn("XserverRegion XFixesCreateRegionFromGC(Display*, GC)")
fn("XserverRegion XFixesCreateRegionFromPicture(Display*, XID)")
fn("XserverRegion XFixesCreateRegionFromWindow(Display*, Window, int)")
Generate()
| 58.372093
| 125
| 0.786056
|
a82781a981feb2ab404af24debbc2711b15a0355
| 7,713
|
py
|
Python
|
gui/stdio.py
|
stashpayio/electrum-stash
|
a04e1fde408196e547cf80f8ce9d9391133bd865
|
[
"MIT"
] | null | null | null |
gui/stdio.py
|
stashpayio/electrum-stash
|
a04e1fde408196e547cf80f8ce9d9391133bd865
|
[
"MIT"
] | null | null | null |
gui/stdio.py
|
stashpayio/electrum-stash
|
a04e1fde408196e547cf80f8ce9d9391133bd865
|
[
"MIT"
] | null | null | null |
from decimal import Decimal
_ = lambda x:x
#from i18n import _
from electrum_dash import WalletStorage, Wallet
from electrum_dash.util import format_satoshis, set_verbosity
from electrum_dash.bitcoin import is_valid, COIN, TYPE_ADDRESS
from electrum_dash.network import filter_protocol
import sys, getpass, datetime
# minimal fdisk like gui for console usage
# written by rofl0r, with some bits stolen from the text gui (ncurses)
class ElectrumGui:
def __init__(self, config, daemon, plugins):
self.config = config
self.network = daemon.network
storage = WalletStorage(config.get_wallet_path())
if not storage.file_exists:
print "Wallet not found. try 'electrum-dash create'"
exit()
if storage.is_encrypted():
password = getpass.getpass('Password:', stream=None)
storage.decrypt(password)
self.done = 0
self.last_balance = ""
set_verbosity(False)
self.str_recipient = ""
self.str_description = ""
self.str_amount = ""
self.str_fee = ""
self.wallet = Wallet(storage)
self.wallet.start_threads(self.network)
self.contacts = self.wallet.contacts
self.network.register_callback(self.on_network, ['updated', 'banner'])
self.commands = [_("[h] - displays this help text"), \
_("[i] - display transaction history"), \
_("[o] - enter payment order"), \
_("[p] - print stored payment order"), \
_("[s] - send stored payment order"), \
_("[r] - show own receipt addresses"), \
_("[c] - display contacts"), \
_("[b] - print server banner"), \
_("[q] - quit") ]
self.num_commands = len(self.commands)
def on_network(self, event, *args):
if event == 'updated':
self.updated()
elif event == 'banner':
self.print_banner()
def main_command(self):
self.print_balance()
c = raw_input("enter command: ")
if c == "h" : self.print_commands()
elif c == "i" : self.print_history()
elif c == "o" : self.enter_order()
elif c == "p" : self.print_order()
elif c == "s" : self.send_order()
elif c == "r" : self.print_addresses()
elif c == "c" : self.print_contacts()
elif c == "b" : self.print_banner()
elif c == "n" : self.network_dialog()
elif c == "e" : self.settings_dialog()
elif c == "q" : self.done = 1
else: self.print_commands()
def updated(self):
s = self.get_balance()
if s != self.last_balance:
print(s)
self.last_balance = s
return True
def print_commands(self):
self.print_list(self.commands, "Available commands")
def print_history(self):
width = [20, 40, 14, 14]
delta = (80 - sum(width) - 4)/3
format_str = "%"+"%d"%width[0]+"s"+"%"+"%d"%(width[1]+delta)+"s"+"%" \
+ "%d"%(width[2]+delta)+"s"+"%"+"%d"%(width[3]+delta)+"s"
b = 0
messages = []
for item in self.wallet.get_history():
tx_hash, confirmations, value, timestamp, balance = item
if confirmations:
try:
time_str = datetime.datetime.fromtimestamp(timestamp).isoformat(' ')[:-3]
except Exception:
time_str = "unknown"
else:
time_str = 'unconfirmed'
label = self.wallet.get_label(tx_hash)
messages.append( format_str%( time_str, label, format_satoshis(value, whitespaces=True), format_satoshis(balance, whitespaces=True) ) )
self.print_list(messages[::-1], format_str%( _("Date"), _("Description"), _("Amount"), _("Balance")))
def print_balance(self):
print(self.get_balance())
def get_balance(self):
if self.wallet.network.is_connected():
if not self.wallet.up_to_date:
msg = _( "Synchronizing..." )
else:
c, u, x = self.wallet.get_balance()
msg = _("Balance")+": %f "%(Decimal(c) / COIN)
if u:
msg += " [%f unconfirmed]"%(Decimal(u) / COIN)
if x:
msg += " [%f unmatured]"%(Decimal(x) / COIN)
else:
msg = _( "Not connected" )
return(msg)
def print_contacts(self):
messages = map(lambda x: "%20s %45s "%(x[0], x[1][1]), self.contacts.items())
self.print_list(messages, "%19s %25s "%("Key", "Value"))
def print_addresses(self):
messages = map(lambda addr: "%30s %30s "%(addr, self.wallet.labels.get(addr,"")), self.wallet.get_addresses())
self.print_list(messages, "%19s %25s "%("Address", "Label"))
def print_order(self):
print("send order to " + self.str_recipient + ", amount: " + self.str_amount \
+ "\nfee: " + self.str_fee + ", desc: " + self.str_description)
def enter_order(self):
self.str_recipient = raw_input("Pay to: ")
self.str_description = raw_input("Description : ")
self.str_amount = raw_input("Amount: ")
self.str_fee = raw_input("Fee: ")
def send_order(self):
self.do_send()
def print_banner(self):
for i, x in enumerate( self.wallet.network.banner.split('\n') ):
print( x )
def print_list(self, list, firstline):
self.maxpos = len(list)
if not self.maxpos: return
print(firstline)
for i in range(self.maxpos):
msg = list[i] if i < len(list) else ""
print(msg)
def main(self):
while self.done == 0: self.main_command()
def do_send(self):
if not is_valid(self.str_recipient):
print(_('Invalid Dash address'))
return
try:
amount = int(Decimal(self.str_amount) * COIN)
except Exception:
print(_('Invalid Amount'))
return
try:
fee = int(Decimal(self.str_fee) * COIN)
except Exception:
print(_('Invalid Fee'))
return
if self.wallet.use_encryption:
password = self.password_dialog()
if not password:
return
else:
password = None
c = ""
while c != "y":
c = raw_input("ok to send (y/n)?")
if c == "n": return
try:
tx = self.wallet.mktx([(TYPE_ADDRESS, self.str_recipient, amount)], password, self.config, fee)
except Exception as e:
print(str(e))
return
if self.str_description:
self.wallet.labels[tx.hash()] = self.str_description
print(_("Please wait..."))
status, msg = self.network.broadcast(tx)
if status:
print(_('Payment sent.'))
#self.do_clear()
#self.update_contacts_tab()
else:
print(_('Error'))
def network_dialog(self):
print("use 'electrum-dash setconfig server/proxy' to change your network settings")
return True
def settings_dialog(self):
print("use 'electrum-dash setconfig' to change your settings")
return True
def password_dialog(self):
return getpass.getpass()
# XXX unused
def run_receive_tab(self, c):
#if c == 10:
# out = self.run_popup('Address', ["Edit label", "Freeze", "Prioritize"])
return
def run_contacts_tab(self, c):
pass
| 33.103004
| 147
| 0.543757
|
d41dc2e596e6c6de44b2706bf4178151afc1d314
| 3,356
|
py
|
Python
|
stevedore/tests/test_named.py
|
jaraco/stevedore
|
8846a3f24a65df82f48d724b3b49b8ac8f135dcd
|
[
"Apache-2.0"
] | 133
|
2015-01-29T20:10:51.000Z
|
2022-03-11T18:29:01.000Z
|
stevedore/tests/test_named.py
|
jaraco/stevedore
|
8846a3f24a65df82f48d724b3b49b8ac8f135dcd
|
[
"Apache-2.0"
] | 4
|
2016-01-05T20:56:25.000Z
|
2021-08-30T06:16:31.000Z
|
virtual/lib/python3.6/site-packages/stevedore/tests/test_named.py
|
Mercy-Njoroge/blog
|
404336fb0fc8d172ddde8b744042cb3f37d89c65
|
[
"MIT"
] | 39
|
2015-04-29T11:05:00.000Z
|
2021-12-02T16:55:51.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from stevedore import named
from stevedore.tests import utils
class TestNamed(utils.TestCase):
def test_named(self):
em = named.NamedExtensionManager(
'stevedore.test.extension',
names=['t1'],
invoke_on_load=True,
invoke_args=('a',),
invoke_kwds={'b': 'B'},
)
actual = em.names()
self.assertEqual(actual, ['t1'])
def test_enabled_before_load(self):
# Set up the constructor for the FauxExtension to cause an
# AssertionError so the test fails if the class is instantiated,
# which should only happen if it is loaded before the name of the
# extension is compared against the names that should be loaded by
# the manager.
init_name = 'stevedore.tests.test_extension.FauxExtension.__init__'
with mock.patch(init_name) as m:
m.side_effect = AssertionError
em = named.NamedExtensionManager(
'stevedore.test.extension',
# Look for an extension that does not exist so the
# __init__ we mocked should never be invoked.
names=['no-such-extension'],
invoke_on_load=True,
invoke_args=('a',),
invoke_kwds={'b': 'B'},
)
actual = em.names()
self.assertEqual(actual, [])
def test_extensions_listed_in_name_order(self):
# Since we don't know the "natural" order of the extensions, run
# the test both ways: if the sorting is broken, one of them will
# fail
em = named.NamedExtensionManager(
'stevedore.test.extension',
names=['t1', 't2'],
name_order=True
)
actual = em.names()
self.assertEqual(actual, ['t1', 't2'])
em = named.NamedExtensionManager(
'stevedore.test.extension',
names=['t2', 't1'],
name_order=True
)
actual = em.names()
self.assertEqual(actual, ['t2', 't1'])
def test_load_fail_ignored_when_sorted(self):
em = named.NamedExtensionManager(
'stevedore.test.extension',
names=['e1', 't2', 'e2', 't1'],
name_order=True,
invoke_on_load=True,
invoke_args=('a',),
invoke_kwds={'b': 'B'},
)
actual = em.names()
self.assertEqual(['t2', 't1'], actual)
em = named.NamedExtensionManager(
'stevedore.test.extension',
names=['e1', 't1'],
name_order=False,
invoke_on_load=True,
invoke_args=('a',),
invoke_kwds={'b': 'B'},
)
actual = em.names()
self.assertEqual(['t1'], actual)
| 35.702128
| 76
| 0.585221
|
63b789dc7a8daa83faf04efd18070a3d7726e98a
| 2,209
|
py
|
Python
|
unitypack/environment.py
|
garysheffield19/123
|
f7aff28ec5cc75383a9ffc20390a6f3afa6197f8
|
[
"MIT"
] | null | null | null |
unitypack/environment.py
|
garysheffield19/123
|
f7aff28ec5cc75383a9ffc20390a6f3afa6197f8
|
[
"MIT"
] | null | null | null |
unitypack/environment.py
|
garysheffield19/123
|
f7aff28ec5cc75383a9ffc20390a6f3afa6197f8
|
[
"MIT"
] | 1
|
2019-09-04T06:32:02.000Z
|
2019-09-04T06:32:02.000Z
|
import os
from urllib.parse import urlparse
from .asset import Asset
from .assetbundle import AssetBundle
class UnityEnvironment:
def __init__(self, base_path=""):
self.bundles = {}
self.assets = {}
self.base_path = base_path
def __repr__(self):
return "%s(base_path=%r)" % (self.__class__.__name__, self.base_path)
def load(self, file):
for bundle in self.bundles.values():
if os.path.abspath(file.name) == os.path.abspath(bundle.path):
return bundle
ret = AssetBundle(self)
ret.load(file)
self.bundles[ret.name.lower()] = ret
for asset in ret.assets:
self.assets[asset.name.lower()] = asset
return ret
def discover(self, name):
for bundle in list(self.bundles.values()):
dirname = os.path.dirname(os.path.abspath(bundle.path))
for filename in os.listdir(dirname):
basename = os.path.splitext(os.path.basename(filename))[0]
if name.lower() == "cab-" + basename.lower():
f = open(os.path.join(dirname, filename), "rb")
self.load(f)
def get_asset_by_filename(self, name):
if name not in self.assets:
path = os.path.join(self.base_path, name)
if os.path.exists(path):
f = open(path, "rb")
self.assets[name] = Asset.from_file(f)
else:
self.discover(name)
self.populate_assets()
if name not in self.assets:
raise KeyError("No such asset: %r" % (name))
return self.assets[name]
def populate_assets(self):
for bundle in self.bundles.values():
for asset in bundle.assets:
asset_name = asset.name.lower()
if asset_name not in self.assets:
self.assets[asset_name] = asset
def get_asset(self, url):
if not url:
return None
u = urlparse(url)
if u.scheme == "archive":
archive, name = os.path.split(u.path.lstrip("/").lower())
else:
raise NotImplementedError("Unsupported scheme: %r" % (u.scheme))
if archive not in self.bundles:
self.discover(archive)
# Still didn't find it? Give up...
if archive not in self.bundles:
raise NotImplementedError("Cannot find %r in %r" % (archive, self.bundles))
bundle = self.bundles[archive]
for asset in bundle.assets:
if asset.name.lower() == name:
return asset
raise KeyError("No such asset: %r" % (name))
| 27.962025
| 79
| 0.67904
|
b351a96e183c27d48268200ac0b0edf131a3d8ff
| 3,094
|
py
|
Python
|
Finder/gitfinder.py
|
cyal1/GitTools
|
13b6f917cb9dec73019a04d6f866507018760de3
|
[
"MIT"
] | null | null | null |
Finder/gitfinder.py
|
cyal1/GitTools
|
13b6f917cb9dec73019a04d6f866507018760de3
|
[
"MIT"
] | null | null | null |
Finder/gitfinder.py
|
cyal1/GitTools
|
13b6f917cb9dec73019a04d6f866507018760de3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
'''
Finder is part of https://github.com/internetwache/GitTools
Developed and maintained by @gehaxelt from @internetwache
Use at your own risk. Usage might be illegal in certain circumstances.
Only for educational purposes!
'''
import argparse
from functools import partial
from multiprocessing import Pool
from urllib.request import urlopen
from urllib.error import HTTPError, URLError
import sys
import requests
import ssl
import encodings.idna
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
def findgitrepo(output_file, domains):
domain = ".".join(encodings.idna.ToASCII(label).decode("ascii") for label in domains.strip().split("."))
# try:
# # Try to download http://target.tld/.git/HEAD
# with urlopen(''.join(['http://', domain, '/.git/HEAD']), context=ssl._create_unverified_context(), timeout=5) as response:
# answer = response.read(200).decode('utf-8', 'ignore')
# except HTTPError:
# return
# except URLError:
# return
# except OSError:
# return
# except ConnectionResetError:
# return
# except ValueError:
# return
# except (KeyboardInterrupt, SystemExit):
# raise
try:
resp = requests.get(domain+"/.git/HEAD",verify=False,timeout=5,headers={"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.96 Safari/537.36"})
except:
return
# Check if refs/heads is in the file
# print(domain,resp.text)
if 'refs/heads' not in resp.text:
return
# Write match to output_file
# print(domain)
with open(output_file, 'a') as file_handle:
file_handle.write(''.join([domain, '\n']))
print(''.join(['[*] Found: ', domain]))
def read_file(filename):
with open(filename) as file:
return file.readlines()
def main():
print("""
###########
# Finder is part of https://github.com/internetwache/GitTools
#
# Developed and maintained by @gehaxelt from @internetwache
#
# Use at your own risk. Usage might be illegal in certain circumstances.
# Only for educational purposes!
###########
""")
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--inputfile', default='input.txt', help='input file')
parser.add_argument('-o', '--outputfile', default='output.txt', help='output file')
parser.add_argument('-t', '--threads', default=200, help='threads')
args = parser.parse_args()
domain_file = args.inputfile
output_file = args.outputfile
try:
max_processes = int(args.threads)
except ValueError as err:
sys.exit(err)
try:
domains = read_file(domain_file)
except FileNotFoundError as err:
sys.exit(err)
fun = partial(findgitrepo, output_file)
print("Scanning...")
with Pool(processes=max_processes) as pool:
pool.map(fun, domains)
print("Finished")
if __name__ == '__main__':
main()
| 29.466667
| 217
| 0.670976
|
88efc69abdd4fe2ac5a529ebb1706d46ff6aab7d
| 803
|
py
|
Python
|
zdemo/manage.py
|
chenkaifeng-Li/test2
|
9c246ac746f65c7aad0c365e7e3f157ee566cc4b
|
[
"MIT"
] | null | null | null |
zdemo/manage.py
|
chenkaifeng-Li/test2
|
9c246ac746f65c7aad0c365e7e3f157ee566cc4b
|
[
"MIT"
] | null | null | null |
zdemo/manage.py
|
chenkaifeng-Li/test2
|
9c246ac746f65c7aad0c365e7e3f157ee566cc4b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "zdemo.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| 34.913043
| 77
| 0.641345
|
d1cba41f2744297c73cafc3f78fd7cc2ebb34aeb
| 1,280
|
py
|
Python
|
ssha512/__init__.py
|
michzimny/django-ssha512-hasher
|
7229f1643175cca60895f97780ecda472cb4c278
|
[
"MIT"
] | null | null | null |
ssha512/__init__.py
|
michzimny/django-ssha512-hasher
|
7229f1643175cca60895f97780ecda472cb4c278
|
[
"MIT"
] | null | null | null |
ssha512/__init__.py
|
michzimny/django-ssha512-hasher
|
7229f1643175cca60895f97780ecda472cb4c278
|
[
"MIT"
] | null | null | null |
from django.utils.crypto import get_random_string
from django.utils.translation import ugettext_noop as _
from django.contrib.auth.hashers import BasePasswordHasher
import base64
import hashlib
class SSHA512PasswordHasher(BasePasswordHasher):
algorithm = "ssha512"
def salt(self):
return get_random_string(8)
def encode(self, password, salt):
salt = str(salt)
base64_encoded = base64.encodestring(hashlib.sha512(password + salt).digest() + salt).replace('\n', '')
return 'ssha512${SSHA512}' + base64_encoded
def verify(self, password, encoded):
password = str(password)
encoded = str(encoded)
algorithm, data = encoded.split('$', 2)
assert algorithm == self.algorithm
assert data.startswith('{SSHA512}')
base64_decoded = base64.decodestring(data[9:])
assert len(base64_decoded) == 72
hashed_password_plus_salt = base64_decoded[:64]
salt = base64_decoded[64:]
return hashlib.sha512(password + salt).digest() == hashed_password_plus_salt
def safe_summary(self, encoded):
algorithm, data = encoded.split('$', 2)
assert algorithm == self.algorithm
return OrderedDict([
(_('algorithm'), algorithm),
])
| 33.684211
| 111
| 0.667969
|
41864424141f04b4657ea19325de2ebe78dcb330
| 2,401
|
py
|
Python
|
conf.py
|
NishithP2004/pslab-documentation
|
a1fff773b0f78fe59c3b5be6a6391d87c3a3ccda
|
[
"Apache-2.0"
] | null | null | null |
conf.py
|
NishithP2004/pslab-documentation
|
a1fff773b0f78fe59c3b5be6a6391d87c3a3ccda
|
[
"Apache-2.0"
] | null | null | null |
conf.py
|
NishithP2004/pslab-documentation
|
a1fff773b0f78fe59c3b5be6a6391d87c3a3ccda
|
[
"Apache-2.0"
] | null | null | null |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import sphinx_material
# -- Project information -----------------------------------------------------
project = 'PSLab'
html_title = 'Home'
copyright = '2019, FOSSASIA'
author = 'FOSSASIA'
master_doc = 'index'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['m2r', 'sphinx_material']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_material'
# Get the them path
html_theme_path = sphinx_material.html_theme_path()
# Register the required helpers for the html context
html_context = sphinx_material.get_html_context()
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_sidebars = {
"**": ["globaltoc.html", "localtoc.html", "searchbox.html"]
}
html_theme_options = {
# Set the color and the accent color
'color_primary': 'red',
'color_accent': 'red',
'nav_title': 'Pocket Science Labs Documentation',
}
html_css_files = [
'css/styles.css'
]
| 32.445946
| 79
| 0.67222
|
cbff01f34c97b2895d5f23d11763955da14e813e
| 828
|
py
|
Python
|
Python/ques3.py
|
achintya219/Autumn-of-Automation
|
b88abc1946e9d5e7476637f97fba2591f5a1fd77
|
[
"MIT"
] | null | null | null |
Python/ques3.py
|
achintya219/Autumn-of-Automation
|
b88abc1946e9d5e7476637f97fba2591f5a1fd77
|
[
"MIT"
] | null | null | null |
Python/ques3.py
|
achintya219/Autumn-of-Automation
|
b88abc1946e9d5e7476637f97fba2591f5a1fd77
|
[
"MIT"
] | null | null | null |
import math
class Complex(object):
def __init__(self, real, imag=0.0):
self.real = real
self.imag = imag
def add(self, other):
return Complex(self.real + other.real, self.imag + other.imag)
def sub(self, other):
return Complex(self.real - other.real, self.imag - other.imag)
def mul(self, other):
return Complex(self.real*other.real - self.imag*other.imag, self.imag*other.real + self.real*other.imag)
def magnitude(self):
return math.sqrt(self.real*self.real + self.imag*self.imag)
def conjugate(self):
return Complex(self.real, -self.imag)
def display(self):
if self.imag >= 0:
print(self.real,"+",self.imag,"i\n")
else:
print(self.real,self.imag,"i\n")
a = Complex(1,2)
a.display()
a.conjugate().display()
b = Complex(2, -3)
b.display()
c = b.add(a)
c.display()
d = b.mul(a)
d.display()
| 22.378378
| 106
| 0.676329
|
30f67b613988a53e4c7ffbb01d4d1658489a621c
| 3,040
|
py
|
Python
|
pgAdmin/pgadmin4/web/pgadmin/browser/server_groups/servers/databases/schemas/functions/tests/test_get_supported_functions.py
|
WeilerWebServices/PostgreSQL
|
ae594ed077bebbad1be3c1d95c38b7c2c2683e8c
|
[
"PostgreSQL"
] | null | null | null |
pgAdmin/pgadmin4/web/pgadmin/browser/server_groups/servers/databases/schemas/functions/tests/test_get_supported_functions.py
|
WeilerWebServices/PostgreSQL
|
ae594ed077bebbad1be3c1d95c38b7c2c2683e8c
|
[
"PostgreSQL"
] | null | null | null |
pgAdmin/pgadmin4/web/pgadmin/browser/server_groups/servers/databases/schemas/functions/tests/test_get_supported_functions.py
|
WeilerWebServices/PostgreSQL
|
ae594ed077bebbad1be3c1d95c38b7c2c2683e8c
|
[
"PostgreSQL"
] | null | null | null |
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2020, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import json
import uuid
from unittest.mock import patch
from pgadmin.browser.server_groups.servers.databases.tests import utils as \
database_utils
from pgadmin.utils.route import BaseTestGenerator
from regression.python_test_utils import test_utils as utils
from . import utils as funcs_utils
from .. import FunctionView
class FunctionGetSupportedFunctionsTestCase(BaseTestGenerator):
""" This class get supported functions. """
scenarios = [
(
'Fetch Function supported functions',
dict(
url='/browser/function/get_support_functions/',
is_positive_test=True,
mocking_required=False,
mock_data={},
expected_data={
"status_code": 200
},
),
),
(
'Fetch Function support functions fail',
dict(
url='/browser/function/get_support_functions/',
is_positive_test=False,
mocking_required=True,
mock_data={
"function_name": 'pgadmin.utils.driver.psycopg2.'
'connection.Connection.execute_2darray',
"return_value": "(False, 'Mocked Internal Server Error "
"while get supported function')"
},
expected_data={
"status_code": 500
}
),
)
]
def get_supported_functions(self):
response = self.tester.get(
self.url + str(utils.SERVER_GROUP) + '/' +
str(self.server_id) + '/' + str(self.db_id) +
'/' + str(self.schema_id) + '/',
content_type='html/json'
)
return response
def runTest(self):
""" This function will get function nodes under schema. """
if self.server_information['server_version'] < 120000:
message = "Supported functions are not supported by PG/EPAS " \
"< 120000."
self.skipTest(message)
super(FunctionGetSupportedFunctionsTestCase, self).runTest()
self = funcs_utils.set_up(self)
if self.is_positive_test:
response = self.get_supported_functions()
else:
with patch(self.mock_data["function_name"],
return_value=eval(self.mock_data["return_value"])):
response = self.get_supported_functions()
self.assertEqual(response.status_code,
self.expected_data['status_code'])
# Disconnect the database
database_utils.disconnect_database(self, self.server_id, self.db_id)
| 35.348837
| 77
| 0.543421
|
431687560731b473c575d44918197b4549723f83
| 16,012
|
py
|
Python
|
log_casp_act/model_796.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
log_casp_act/model_796.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
log_casp_act/model_796.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
# exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('ParpC')
Monomer('Xiap', ['Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('C6A', ['C8pro'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'BaxA_1', 'BaxA_2', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 199000.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('C6A_0', 0.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('C6A_obs', C6A())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6pro(C3A=None), C6pro_0)
| 84.273684
| 614
| 0.812266
|
2f1de9089e99ecc9383d05e98c8b1809316655ce
| 155
|
py
|
Python
|
cefiro_customizations/cefiro_customizations/doctype/bundle_transfer/test_bundle_transfer.py
|
saeedkola/cefiro_customizations
|
e7fcf92afaae37fb7d8abd49cdbd6328d18b9abb
|
[
"MIT"
] | null | null | null |
cefiro_customizations/cefiro_customizations/doctype/bundle_transfer/test_bundle_transfer.py
|
saeedkola/cefiro_customizations
|
e7fcf92afaae37fb7d8abd49cdbd6328d18b9abb
|
[
"MIT"
] | null | null | null |
cefiro_customizations/cefiro_customizations/doctype/bundle_transfer/test_bundle_transfer.py
|
saeedkola/cefiro_customizations
|
e7fcf92afaae37fb7d8abd49cdbd6328d18b9abb
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021, Element Labs and Contributors
# See license.txt
# import frappe
import unittest
class TestBundleTransfer(unittest.TestCase):
pass
| 17.222222
| 51
| 0.787097
|
7ff8000e77b5f0de86830657f810d8b4c71e15f6
| 581
|
py
|
Python
|
pyleecan/Methods/Mesh/MeshMat/get_node.py
|
tobsen2code/pyleecan
|
5b1ded9e389e0c79ed7b7c878b6e939f2d9962e9
|
[
"Apache-2.0"
] | 95
|
2019-01-23T04:19:45.000Z
|
2022-03-17T18:22:10.000Z
|
pyleecan/Methods/Mesh/MeshMat/get_node.py
|
ecs-kev/pyleecan
|
1faedde4b24acc6361fa1fdd4e980eaec4ca3a62
|
[
"Apache-2.0"
] | 366
|
2019-02-20T07:15:08.000Z
|
2022-03-31T13:37:23.000Z
|
pyleecan/Methods/Mesh/MeshMat/get_node.py
|
ecs-kev/pyleecan
|
1faedde4b24acc6361fa1fdd4e980eaec4ca3a62
|
[
"Apache-2.0"
] | 74
|
2019-01-24T01:47:31.000Z
|
2022-02-25T05:44:42.000Z
|
# -*- coding: utf-8 -*-
def get_node(self, indices=None):
"""Return a matrix of nodes coordinates.
Parameters
----------
self : Mesh
an Mesh object
indices : list
Indices of the targeted nodes. If None, return all.
is_indice: bool
Option to return the nodes indices (useful for unsorted
Returns
-------
coordinates: ndarray
nodes coordinates
indices : ndarray
nodes indices
"""
if indices is None:
return self.node.coordinate
else:
return self.node.get_coord(indices)
| 20.75
| 63
| 0.597246
|
f1c6b2d97621ee5bb2c1041801e6c73bb58d6289
| 17,002
|
py
|
Python
|
Lib/test/test_zipapp.py
|
dignissimus/cpython
|
17357108732c731d6ed4f2bd123ee6ba1ff6891b
|
[
"0BSD"
] | null | null | null |
Lib/test/test_zipapp.py
|
dignissimus/cpython
|
17357108732c731d6ed4f2bd123ee6ba1ff6891b
|
[
"0BSD"
] | 2
|
2021-12-01T15:01:15.000Z
|
2022-02-24T06:16:48.000Z
|
Lib/test/test_zipapp.py
|
sthagen/python-cpython
|
dfd438dfb2a0e299cd6ab166f203dfe9740868ae
|
[
"0BSD"
] | null | null | null |
"""Test harness for the zipapp module."""
import io
import pathlib
import stat
import sys
import tempfile
import unittest
import zipapp
import zipfile
from test.support import requires_zlib
from test.support import os_helper
from unittest.mock import patch
class ZipAppTest(unittest.TestCase):
"""Test zipapp module functionality."""
def setUp(self):
tmpdir = tempfile.TemporaryDirectory()
self.addCleanup(tmpdir.cleanup)
self.tmpdir = pathlib.Path(tmpdir.name)
def test_create_archive(self):
# Test packing a directory.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(str(source), str(target))
self.assertTrue(target.is_file())
def test_create_archive_with_pathlib(self):
# Test packing a directory using Path objects for source and target.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(source, target)
self.assertTrue(target.is_file())
def test_create_archive_with_subdirs(self):
# Test packing a directory includes entries for subdirectories.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
(source / 'foo').mkdir()
(source / 'bar').mkdir()
(source / 'foo' / '__init__.py').touch()
target = io.BytesIO()
zipapp.create_archive(str(source), target)
target.seek(0)
with zipfile.ZipFile(target, 'r') as z:
self.assertIn('foo/', z.namelist())
self.assertIn('bar/', z.namelist())
def test_create_sorted_archive(self):
# Test that zipapps order their files by name
source = self.tmpdir / 'source'
source.mkdir()
(source / 'zed.py').touch()
(source / 'bin').mkdir()
(source / 'bin' / 'qux').touch()
(source / 'bin' / 'baz').touch()
(source / '__main__.py').touch()
target = io.BytesIO()
zipapp.create_archive(str(source), target)
target.seek(0)
with zipfile.ZipFile(target, 'r') as zf:
self.assertEqual(zf.namelist(),
["__main__.py", "bin/", "bin/baz", "bin/qux", "zed.py"])
def test_create_archive_with_filter(self):
# Test packing a directory and using filter to specify
# which files to include.
def skip_pyc_files(path):
return path.suffix != '.pyc'
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
(source / 'test.py').touch()
(source / 'test.pyc').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(source, target, filter=skip_pyc_files)
with zipfile.ZipFile(target, 'r') as z:
self.assertIn('__main__.py', z.namelist())
self.assertIn('test.py', z.namelist())
self.assertNotIn('test.pyc', z.namelist())
def test_create_archive_filter_exclude_dir(self):
# Test packing a directory and using a filter to exclude a
# subdirectory (ensures that the path supplied to include
# is relative to the source location, as expected).
def skip_dummy_dir(path):
return path.parts[0] != 'dummy'
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
(source / 'test.py').touch()
(source / 'dummy').mkdir()
(source / 'dummy' / 'test2.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(source, target, filter=skip_dummy_dir)
with zipfile.ZipFile(target, 'r') as z:
self.assertEqual(len(z.namelist()), 2)
self.assertIn('__main__.py', z.namelist())
self.assertIn('test.py', z.namelist())
def test_create_archive_default_target(self):
# Test packing a directory to the default name.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
zipapp.create_archive(str(source))
expected_target = self.tmpdir / 'source.pyz'
self.assertTrue(expected_target.is_file())
@requires_zlib()
def test_create_archive_with_compression(self):
# Test packing a directory into a compressed archive.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
(source / 'test.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(source, target, compressed=True)
with zipfile.ZipFile(target, 'r') as z:
for name in ('__main__.py', 'test.py'):
self.assertEqual(z.getinfo(name).compress_type,
zipfile.ZIP_DEFLATED)
def test_no_main(self):
# Test that packing a directory with no __main__.py fails.
source = self.tmpdir / 'source'
source.mkdir()
(source / 'foo.py').touch()
target = self.tmpdir / 'source.pyz'
with self.assertRaises(zipapp.ZipAppError):
zipapp.create_archive(str(source), str(target))
def test_main_and_main_py(self):
# Test that supplying a main argument with __main__.py fails.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = self.tmpdir / 'source.pyz'
with self.assertRaises(zipapp.ZipAppError):
zipapp.create_archive(str(source), str(target), main='pkg.mod:fn')
def test_main_written(self):
# Test that the __main__.py is written correctly.
source = self.tmpdir / 'source'
source.mkdir()
(source / 'foo.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(str(source), str(target), main='pkg.mod:fn')
with zipfile.ZipFile(str(target), 'r') as z:
self.assertIn('__main__.py', z.namelist())
self.assertIn(b'pkg.mod.fn()', z.read('__main__.py'))
def test_main_only_written_once(self):
# Test that we don't write multiple __main__.py files.
# The initial implementation had this bug; zip files allow
# multiple entries with the same name
source = self.tmpdir / 'source'
source.mkdir()
# Write 2 files, as the original bug wrote __main__.py
# once for each file written :-(
# See http://bugs.python.org/review/23491/diff/13982/Lib/zipapp.py#newcode67Lib/zipapp.py:67
# (line 67)
(source / 'foo.py').touch()
(source / 'bar.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(str(source), str(target), main='pkg.mod:fn')
with zipfile.ZipFile(str(target), 'r') as z:
self.assertEqual(1, z.namelist().count('__main__.py'))
def test_main_validation(self):
# Test that invalid values for main are rejected.
source = self.tmpdir / 'source'
source.mkdir()
target = self.tmpdir / 'source.pyz'
problems = [
'', 'foo', 'foo:', ':bar', '12:bar', 'a.b.c.:d',
'.a:b', 'a:b.', 'a:.b', 'a:silly name'
]
for main in problems:
with self.subTest(main=main):
with self.assertRaises(zipapp.ZipAppError):
zipapp.create_archive(str(source), str(target), main=main)
def test_default_no_shebang(self):
# Test that no shebang line is written to the target by default.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(str(source), str(target))
with target.open('rb') as f:
self.assertNotEqual(f.read(2), b'#!')
def test_custom_interpreter(self):
# Test that a shebang line with a custom interpreter is written
# correctly.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(str(source), str(target), interpreter='python')
with target.open('rb') as f:
self.assertEqual(f.read(2), b'#!')
self.assertEqual(b'python\n', f.readline())
def test_pack_to_fileobj(self):
# Test that we can pack to a file object.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = io.BytesIO()
zipapp.create_archive(str(source), target, interpreter='python')
self.assertTrue(target.getvalue().startswith(b'#!python\n'))
def test_read_shebang(self):
# Test that we can read the shebang line correctly.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(str(source), str(target), interpreter='python')
self.assertEqual(zipapp.get_interpreter(str(target)), 'python')
def test_read_missing_shebang(self):
# Test that reading the shebang line of a file without one returns None.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(str(source), str(target))
self.assertEqual(zipapp.get_interpreter(str(target)), None)
def test_modify_shebang(self):
# Test that we can change the shebang of a file.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(str(source), str(target), interpreter='python')
new_target = self.tmpdir / 'changed.pyz'
zipapp.create_archive(str(target), str(new_target), interpreter='python2.7')
self.assertEqual(zipapp.get_interpreter(str(new_target)), 'python2.7')
def test_write_shebang_to_fileobj(self):
# Test that we can change the shebang of a file, writing the result to a
# file object.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(str(source), str(target), interpreter='python')
new_target = io.BytesIO()
zipapp.create_archive(str(target), new_target, interpreter='python2.7')
self.assertTrue(new_target.getvalue().startswith(b'#!python2.7\n'))
def test_read_from_pathobj(self):
# Test that we can copy an archive using a pathlib.Path object
# for the source.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target1 = self.tmpdir / 'target1.pyz'
target2 = self.tmpdir / 'target2.pyz'
zipapp.create_archive(source, target1, interpreter='python')
zipapp.create_archive(target1, target2, interpreter='python2.7')
self.assertEqual(zipapp.get_interpreter(target2), 'python2.7')
def test_read_from_fileobj(self):
# Test that we can copy an archive using an open file object.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = self.tmpdir / 'source.pyz'
temp_archive = io.BytesIO()
zipapp.create_archive(str(source), temp_archive, interpreter='python')
new_target = io.BytesIO()
temp_archive.seek(0)
zipapp.create_archive(temp_archive, new_target, interpreter='python2.7')
self.assertTrue(new_target.getvalue().startswith(b'#!python2.7\n'))
def test_remove_shebang(self):
# Test that we can remove the shebang from a file.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(str(source), str(target), interpreter='python')
new_target = self.tmpdir / 'changed.pyz'
zipapp.create_archive(str(target), str(new_target), interpreter=None)
self.assertEqual(zipapp.get_interpreter(str(new_target)), None)
def test_content_of_copied_archive(self):
# Test that copying an archive doesn't corrupt it.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = io.BytesIO()
zipapp.create_archive(str(source), target, interpreter='python')
new_target = io.BytesIO()
target.seek(0)
zipapp.create_archive(target, new_target, interpreter=None)
new_target.seek(0)
with zipfile.ZipFile(new_target, 'r') as z:
self.assertEqual(set(z.namelist()), {'__main__.py'})
# (Unix only) tests that archives with shebang lines are made executable
@unittest.skipIf(sys.platform == 'win32',
'Windows does not support an executable bit')
@os_helper.skip_unless_working_chmod
def test_shebang_is_executable(self):
# Test that an archive with a shebang line is made executable.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(str(source), str(target), interpreter='python')
self.assertTrue(target.stat().st_mode & stat.S_IEXEC)
@unittest.skipIf(sys.platform == 'win32',
'Windows does not support an executable bit')
def test_no_shebang_is_not_executable(self):
# Test that an archive with no shebang line is not made executable.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(str(source), str(target), interpreter=None)
self.assertFalse(target.stat().st_mode & stat.S_IEXEC)
class ZipAppCmdlineTest(unittest.TestCase):
"""Test zipapp module command line API."""
def setUp(self):
tmpdir = tempfile.TemporaryDirectory()
self.addCleanup(tmpdir.cleanup)
self.tmpdir = pathlib.Path(tmpdir.name)
def make_archive(self):
# Test that an archive with no shebang line is not made executable.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
target = self.tmpdir / 'source.pyz'
zipapp.create_archive(source, target)
return target
def test_cmdline_create(self):
# Test the basic command line API.
source = self.tmpdir / 'source'
source.mkdir()
(source / '__main__.py').touch()
args = [str(source)]
zipapp.main(args)
target = source.with_suffix('.pyz')
self.assertTrue(target.is_file())
def test_cmdline_copy(self):
# Test copying an archive.
original = self.make_archive()
target = self.tmpdir / 'target.pyz'
args = [str(original), '-o', str(target)]
zipapp.main(args)
self.assertTrue(target.is_file())
def test_cmdline_copy_inplace(self):
# Test copying an archive in place fails.
original = self.make_archive()
target = self.tmpdir / 'target.pyz'
args = [str(original), '-o', str(original)]
with self.assertRaises(SystemExit) as cm:
zipapp.main(args)
# Program should exit with a non-zero return code.
self.assertTrue(cm.exception.code)
def test_cmdline_copy_change_main(self):
# Test copying an archive doesn't allow changing __main__.py.
original = self.make_archive()
target = self.tmpdir / 'target.pyz'
args = [str(original), '-o', str(target), '-m', 'foo:bar']
with self.assertRaises(SystemExit) as cm:
zipapp.main(args)
# Program should exit with a non-zero return code.
self.assertTrue(cm.exception.code)
@patch('sys.stdout', new_callable=io.StringIO)
def test_info_command(self, mock_stdout):
# Test the output of the info command.
target = self.make_archive()
args = [str(target), '--info']
with self.assertRaises(SystemExit) as cm:
zipapp.main(args)
# Program should exit with a zero return code.
self.assertEqual(cm.exception.code, 0)
self.assertEqual(mock_stdout.getvalue(), "Interpreter: <none>\n")
def test_info_error(self):
# Test the info command fails when the archive does not exist.
target = self.tmpdir / 'dummy.pyz'
args = [str(target), '--info']
with self.assertRaises(SystemExit) as cm:
zipapp.main(args)
# Program should exit with a non-zero return code.
self.assertTrue(cm.exception.code)
if __name__ == "__main__":
unittest.main()
| 40.2891
| 100
| 0.617574
|
bd25f6f4b4b916dc3222f74041417db0b4ef15ea
| 754
|
py
|
Python
|
test_tokens.py
|
yeoedward/Robost-Fill
|
f8bbf7546732bc7e8412b53f0267e7c8b82e135e
|
[
"MIT"
] | 16
|
2018-12-18T05:01:23.000Z
|
2022-02-23T17:14:55.000Z
|
test_tokens.py
|
yeoedward/Robost-Fill
|
f8bbf7546732bc7e8412b53f0267e7c8b82e135e
|
[
"MIT"
] | null | null | null |
test_tokens.py
|
yeoedward/Robost-Fill
|
f8bbf7546732bc7e8412b53f0267e7c8b82e135e
|
[
"MIT"
] | 6
|
2019-09-19T19:49:44.000Z
|
2021-07-06T13:01:04.000Z
|
from unittest import TestCase
from sample import sample_program, sample_string
from tokens import build_token_tables
class TestTokens(TestCase):
def test_total_num_tokens(self):
token_tables = build_token_tables()
expected_num_tokens = 1118
self.assertEqual(expected_num_tokens, len(token_tables.token_op_table))
self.assertEqual(expected_num_tokens, len(token_tables.op_token_table))
def test_token_table_coverage_smoke_test(self):
token_tables = build_token_tables()
num_samples = 1000
for _ in range(num_samples):
sample_program(10).to_tokens(token_tables.op_token_table)
for char in sample_string(30):
token_tables.string_token_table[char]
| 32.782609
| 79
| 0.732095
|
d37113b9dd37eaeca35d3cb367ea9c3f91a78a17
| 1,384
|
py
|
Python
|
extraPackages/matplotlib-3.0.3/examples/userdemo/colormap_normalizations_bounds.py
|
dolboBobo/python3_ios
|
877f8c2c5890f26292ddd14909bea62a04fe2889
|
[
"BSD-3-Clause"
] | 130
|
2018-02-03T10:25:54.000Z
|
2022-03-25T22:27:22.000Z
|
extraPackages/matplotlib-3.0.2/examples/userdemo/colormap_normalizations_bounds.py
|
spacetime314/python3_ios
|
e149f1bc2e50046c8810f83dae7739a8dea939ee
|
[
"BSD-3-Clause"
] | 9
|
2018-12-14T07:31:42.000Z
|
2020-12-09T20:29:28.000Z
|
extraPackages/matplotlib-3.0.2/examples/userdemo/colormap_normalizations_bounds.py
|
spacetime314/python3_ios
|
e149f1bc2e50046c8810f83dae7739a8dea939ee
|
[
"BSD-3-Clause"
] | 64
|
2018-04-25T08:51:57.000Z
|
2022-01-29T14:13:57.000Z
|
"""
==============================
Colormap Normalizations Bounds
==============================
Demonstration of using norm to map colormaps onto data in non-linear ways.
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
N = 100
X, Y = np.mgrid[-3:3:complex(0, N), -2:2:complex(0, N)]
Z1 = np.exp(-X**2 - Y**2)
Z2 = np.exp(-(X - 1)**2 - (Y - 1)**2)
Z = (Z1 - Z2) * 2
'''
BoundaryNorm: For this one you provide the boundaries for your colors,
and the Norm puts the first color in between the first pair, the
second color between the second pair, etc.
'''
fig, ax = plt.subplots(3, 1, figsize=(8, 8))
ax = ax.flatten()
# even bounds gives a contour-like effect
bounds = np.linspace(-1, 1, 10)
norm = colors.BoundaryNorm(boundaries=bounds, ncolors=256)
pcm = ax[0].pcolormesh(X, Y, Z,
norm=norm,
cmap='RdBu_r')
fig.colorbar(pcm, ax=ax[0], extend='both', orientation='vertical')
# uneven bounds changes the colormapping:
bounds = np.array([-0.25, -0.125, 0, 0.5, 1])
norm = colors.BoundaryNorm(boundaries=bounds, ncolors=256)
pcm = ax[1].pcolormesh(X, Y, Z, norm=norm, cmap='RdBu_r')
fig.colorbar(pcm, ax=ax[1], extend='both', orientation='vertical')
pcm = ax[2].pcolormesh(X, Y, Z, cmap='RdBu_r', vmin=-np.max(Z))
fig.colorbar(pcm, ax=ax[2], extend='both', orientation='vertical')
plt.show()
| 30.755556
| 74
| 0.634393
|
60320c83a4bbf7da4064d8d1005ba43c1020228f
| 2,324
|
py
|
Python
|
25_vendor_terms.py
|
dionisiotorres/import_scripts
|
14e12c6874e1277b4ad4cdbe46f6b454b43c2aec
|
[
"Unlicense"
] | null | null | null |
25_vendor_terms.py
|
dionisiotorres/import_scripts
|
14e12c6874e1277b4ad4cdbe46f6b454b43c2aec
|
[
"Unlicense"
] | null | null | null |
25_vendor_terms.py
|
dionisiotorres/import_scripts
|
14e12c6874e1277b4ad4cdbe46f6b454b43c2aec
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import csv
from xmlrpc import client as xmlrpclib
import multiprocessing as mp
from scriptconfig import URL, DB, UID, PSW, WORKERS
def update_vendor_terms(pid, data_pool, write_ids, error_ids):
sock = xmlrpclib.ServerProxy(URL, allow_none=True)
while data_pool:
try:
data = data_pool.pop()
code = data.get('TERM-CODE')
vals = {'name': data.get('TERM-DESC').strip(),
'note': data.get('TERM-DESC').strip(),
'active': True,
'order_type': 'purchase',
'code': code,
'discount_per': data.get('TERM-DISC-PCT', 0),
'due_days': data.get('TERM-DISC-DAYS', 0),
}
res = write_ids.get(code, [])
if res:
sock.execute(DB, UID, PSW, 'account.payment.term', 'write', res, vals)
print(pid, 'UPDATE - VENDOR TERM', res)
else:
vals['line_ids'] = [(0, 0, {'type': 'balance', 'days': int(data.get('TERM-DAYS-DUE', 0) or 0)})]
res = sock.execute(DB, UID, PSW, 'account.payment.term', 'create', vals)
print(pid, 'CREATE - VENDOR TERM', res)
if not data_pool:
break
except:
break
def sync_terms():
manager = mp.Manager()
data_pool = manager.list()
error_ids = manager.list()
write_ids = manager.dict()
process_Q = []
fp = open('files/aplterm1.csv', 'r')
csv_reader = csv.DictReader(fp)
for vals in csv_reader:
data_pool.append(vals)
fp.close()
domain = [('order_type', '=', 'purchase')]
sock = xmlrpclib.ServerProxy(URL, allow_none=True)
res = sock.execute(DB, UID, PSW, 'account.payment.term', 'search_read', domain, ['id', 'code'])
write_ids = {term['code']: term['id'] for term in res}
res = None
term_codes = None
for i in range(WORKERS):
pid = "Worker-%d" % (i + 1)
worker = mp.Process(name=pid, target=update_vendor_terms, args=(pid, data_pool, write_ids, error_ids))
process_Q.append(worker)
worker.start()
for worker in process_Q:
worker.join()
if __name__ == "__main__":
# PARTNER
sync_terms()
| 29.417722
| 112
| 0.547762
|
ed387b57bc6b4918dab295fa4c8f4c2d1a021253
| 5,197
|
py
|
Python
|
src/config/ocio/Python/rv_ocio_setup.py
|
umediayvr/rvtools
|
7fbb986746e70cbec34d83b5a3b08659c3a462bb
|
[
"MIT"
] | 2
|
2018-05-28T20:59:29.000Z
|
2020-06-26T19:16:45.000Z
|
src/config/ocio/Python/rv_ocio_setup.py
|
umediayvr/rvtools
|
7fbb986746e70cbec34d83b5a3b08659c3a462bb
|
[
"MIT"
] | null | null | null |
src/config/ocio/Python/rv_ocio_setup.py
|
umediayvr/rvtools
|
7fbb986746e70cbec34d83b5a3b08659c3a462bb
|
[
"MIT"
] | 2
|
2018-07-10T15:03:26.000Z
|
2020-12-09T08:59:04.000Z
|
import rv
import os
import PyOpenColorIO as OCIO
from ingestor.ExpressionEvaluator import ExpressionEvaluator
# Convenience functions to set the values in the specific nodes
def setInt(node, prop, value):
"""Set an int property."""
propertyName = '{node}.{prop}'.format(node=node, prop=prop)
rv.commands.setIntProperty(propertyName, [value], True)
def getInt(node, prop):
"""Get an int property."""
propertyName = '{node}.{prop}'.format(node=node, prop=prop)
return rv.commands.getIntProperty(propertyName, 0, 1)[0]
def setFloat(node, prop, value):
"""Set a float property."""
propertyName = '{node}.{prop}'.format(node=node, prop=prop)
rv.commands.setFloatProperty(propertyName, [float(value)], True)
def getFloat(node, prop):
"""Get a float property."""
propertyName = '{node}.{prop}'.format(node=node, prop=prop)
return rv.commands.getFloatProperty(propertyName, 0, 1)[0]
def getString(node, prop):
"""Get a string property."""
propertyName = '{node}.{prop}'.format(node=node, prop=prop)
return rv.commands.getStringProperty(propertyName, 0, 1)[0]
def setString(node, prop, value):
"""Set a string property."""
propertyName = '{node}.{prop}'.format(node=node, prop=prop)
rv.commands.setStringProperty(propertyName, [value], True)
def setComponent(node, prop, value):
"""Set a component property."""
for k, v in value.items():
component = '{node}.{prop}.{key}'.format(node=node, prop=prop, key=k)
if not rv.commands.propertyExists(component):
rv.commands.newProperty(component, rv.commands.StringType, 1)
keyProperty = '{prop}.{key}'.format(prop=prop, key=k)
setString(node, keyProperty, v)
def groupMemberOfType(node, memberType):
"""Get a group member of specific type."""
for n in rv.commands.nodesInGroup(node):
if rv.commands.nodeType(n) == memberType:
return n
return None
def ocio_config_from_media(*args, **kwargs):
"""
Override the original 'ocio_config_from_media' from the 'ocio_source_setup' plugin.
This functions sets the open color io config file. It resolves the path base on the loaded file.
"""
media = rv.commands.sources()[0][0]
config_file = ExpressionEvaluator.run('rfindpath', 'config/OCIO/config.ocio', media)
return OCIO.Config.CreateFromFile(config_file)
def ocio_node_from_media(config, node, default, media=None, _=None):
"""
Override the original 'ocio_node_from_media' from the 'ocio_source_setup' plugin.
This function sets the usage of the views and color spaces in RV.
RVDisplayPipelineGroup - Viewer setup
RVLinearizePipelineGroup - Color space setup
RVLookPipelineGroup - Look setup
"""
result = [{"nodeType": d, "context": {}, "properties": {}} for d in default]
nodeType = rv.commands.nodeType(node)
if (nodeType == "RVDisplayPipelineGroup"):
# The display is always the color viewer, the color space in the ocio will set the final color space
media = rv.commands.sources()[0][0]
sg, _, projectName = getDataFromMedia(media)
sgProject = sg.find_one(
'Project',
[['name', 'is', projectName]],
['sg_rvcolorspaceviewer']
)
viewer = str(sgProject['sg_rvcolorspaceviewer'])
display = config.getDefaultDisplay()
result = [
{
"nodeType": "OCIODisplay",
"context": {},
"properties": {
"ocio.function": "display",
"ocio_display.view": viewer,
"ocio_display.display": display
}
}
]
elif (nodeType == "RVLinearizePipelineGroup"):
if not media:
return result
sg, _, projectName = getDataFromMedia(media)
# This is not the right way to do it, but in the future we will have the ids instead of the names
sgProject = sg.find_one(
'Project',
[['name', 'is', projectName]],
['sg_showcolorspacestudio', 'sg_showcolorspaceclient']
)
# Color space in
colorSpaceStudio = str(sgProject['sg_showcolorspacestudio'])
# Color space out
colorSpaceClient = str(sgProject['sg_showcolorspaceclient'])
result = [
{
"nodeType": "OCIOFile",
"context": {},
"properties": {
"ocio.function": "color",
"ocio.inColorSpace": colorSpaceStudio,
"ocio_color.outColorSpace": colorSpaceClient
}
}
]
elif (nodeType == "RVLookPipelineGroup"):
# We don't need to set the looks for so we can bypass this method (leave it to know it's a possiblity)
pass
return result
def getDataFromMedia(media):
"""
Get the shotgun session, shotname and project name base on the loaded file.
"""
from ushotgun import Session
sg = Session.get()
shotName = os.path.basename(media).split('_')[0]
projectName = shotName.split('-')[0]
return (sg, shotName, projectName)
| 33.529032
| 110
| 0.622859
|
d230585d56e6bbb18e8b4c6a3a46a356a5fe720e
| 802
|
py
|
Python
|
polymorphism_and_abstraction/exercise/04_wild_farm/project/animals/birds.py
|
Galchov/python-oop
|
1bf7c51ac2c605bae11b08df7edd4341e20a1b39
|
[
"MIT"
] | null | null | null |
polymorphism_and_abstraction/exercise/04_wild_farm/project/animals/birds.py
|
Galchov/python-oop
|
1bf7c51ac2c605bae11b08df7edd4341e20a1b39
|
[
"MIT"
] | null | null | null |
polymorphism_and_abstraction/exercise/04_wild_farm/project/animals/birds.py
|
Galchov/python-oop
|
1bf7c51ac2c605bae11b08df7edd4341e20a1b39
|
[
"MIT"
] | null | null | null |
from project.animals.animal import Bird
class Owl(Bird):
def __init__(self, name, weight, wing_size):
super().__init__(name, weight, wing_size)
def make_sound(self):
return "Hoot Hoot"
def feed(self, food):
food_type = type(food).__name__
animal_type = type(self).__name__
if food_type == "Meat":
self.food_eaten += food.quantity
self.weight += food.quantity * 0.25
else:
return f"{animal_type} does not eat {food_type}!"
class Hen(Bird):
def __init__(self, name, weight, wing_size):
super().__init__(name, weight, wing_size)
def make_sound(self):
return "Cluck"
def feed(self, food):
self.food_eaten += food.quantity
self.weight += food.quantity * 0.35
| 25.870968
| 61
| 0.609726
|
fcf5815868eb3669d4753c83febb5073ad144e43
| 3,640
|
py
|
Python
|
bitbox/script.py
|
lightswarm124/bitbox-py
|
67ee0d216e2630fd44dba83b5233f33c315dd30b
|
[
"MIT"
] | null | null | null |
bitbox/script.py
|
lightswarm124/bitbox-py
|
67ee0d216e2630fd44dba83b5233f33c315dd30b
|
[
"MIT"
] | null | null | null |
bitbox/script.py
|
lightswarm124/bitbox-py
|
67ee0d216e2630fd44dba83b5233f33c315dd30b
|
[
"MIT"
] | null | null | null |
class Script:
def opcodes():
codes = {
"OP_FALSE": 0,
"OP_0": 0,
"OP_PUSHDATA1": 76,
"OP_PUSHDATA2": 77,
"OP_PUSHDATA4": 78,
"OP_1NEGATE": 79,
"OP_RESERVED": 80,
"OP_TRUE": 81,
"OP_1": 81,
"OP_2": 82,
"OP_3": 83,
"OP_4": 84,
"OP_5": 85,
"OP_6": 86,
"OP_7": 87,
"OP_8": 88,
"OP_9": 89,
"OP_10": 90,
"OP_11": 91,
"OP_12": 92,
"OP_13": 93,
"OP_14": 94,
"OP_15": 95,
"OP_16": 96,
"OP_NOP": 97,
"OP_VER": 98,
"OP_IF": 99,
"OP_NOTIF": 100,
"OP_VERIF": 101,
"OP_VERNOTIF": 102,
"OP_ELSE": 103,
"OP_ENDIF": 104,
"OP_VERIFY": 105,
"OP_RETURN": 106,
"OP_TOALTSTACK": 107,
"OP_FROMALTSTACK": 108,
"OP_2DROP": 109,
"OP_2DUP": 110,
"OP_3DUP": 111,
"OP_2OVER": 112,
"OP_2ROT": 113,
"OP_2SWAP": 114,
"OP_IFDUP": 115,
"OP_DEPTH": 116,
"OP_DROP": 117,
"OP_DUP": 118,
"OP_NIP": 119,
"OP_OVER": 120,
"OP_PICK": 121,
"OP_ROLL": 122,
"OP_ROT": 123,
"OP_SWAP": 124,
"OP_TUCK": 125,
"OP_CAT": 126,
"OP_SPLIT": 127,
"OP_NUM2BIN": 128,
"OP_BIN2NUM": 129,
"OP_SIZE": 130,
"OP_INVERT": 131,
"OP_AND": 132,
"OP_OR": 133,
"OP_XOR": 134,
"OP_EQUAL": 135,
"OP_EQUALVERIFY": 136,
"OP_RESERVED1": 137,
"OP_RESERVED2": 138,
"OP_1ADD": 139,
"OP_1SUB": 140,
"OP_2MUL": 141,
"OP_2DIV": 142,
"OP_NEGATE": 143,
"OP_ABS": 144,
"OP_NOT": 145,
"OP_0NOTEQUAL": 146,
"OP_ADD": 147,
"OP_SUB": 148,
"OP_MUL": 149,
"OP_DIV": 150,
"OP_MOD": 151,
"OP_LSHIFT": 152,
"OP_RSHIFT": 153,
"OP_BOOLAND": 154,
"OP_BOOLOR": 155,
"OP_NUMEQUAL": 156,
"OP_NUMEQUALVERIFY": 157,
"OP_NUMNOTEQUAL": 158,
"OP_LESSTHAN": 159,
"OP_GREATERTHAN": 160,
"OP_LESSTHANOREQUAL": 161,
"OP_GREATERTHANOREQUAL": 162,
"OP_MIN": 163,
"OP_MAX": 164,
"OP_WITHIN": 165,
"OP_RIPEMD160": 166,
"OP_SHA1": 167,
"OP_SHA256": 168,
"OP_HASH160": 169,
"OP_HASH256": 170,
"OP_CODESEPARATOR": 171,
"OP_CHECKSIG": 172,
"OP_CHECKSIGVERIFY": 173,
"OP_CHECKMULTISIG": 174,
"OP_CHECKMULTISIGVERIFY": 175,
"OP_NOP1": 176,
"OP_NOP2": 177,
"OP_CHECKLOCKTIMEVERIFY": 177,
"OP_NOP3": 178,
"OP_CHECKSEQUENCEVERIFY": 178,
"OP_NOP4": 179,
"OP_NOP5": 180,
"OP_NOP6": 181,
"OP_NOP7": 182,
"OP_NOP8": 183,
"OP_NOP9": 184,
"OP_NOP10": 185,
"OP_CHECKDATASIG": 186,
"OP_CHECKDATASIGVERIFY": 187,
"OP_PUBKEYHASH": 253,
"OP_PUBKEY": 254,
"OP_INVALIDOPCODE": 255
}
return codes
| 28.888889
| 42
| 0.392308
|
9ee97c332de00ad5c63db196075481889e5b4702
| 9,291
|
py
|
Python
|
python/surf/protocols/clink/_ClinkTop.py
|
lsst-camera-daq/surf
|
e43b926507c1670fd511bc23f6c61d261100fcb4
|
[
"BSD-3-Clause-LBNL"
] | 134
|
2017-02-22T18:07:00.000Z
|
2022-03-21T16:12:23.000Z
|
python/surf/protocols/clink/_ClinkTop.py
|
lsst-camera-daq/surf
|
e43b926507c1670fd511bc23f6c61d261100fcb4
|
[
"BSD-3-Clause-LBNL"
] | 251
|
2017-04-26T23:42:42.000Z
|
2022-03-03T18:48:43.000Z
|
python/surf/protocols/clink/_ClinkTop.py
|
lsst-camera-daq/surf
|
e43b926507c1670fd511bc23f6c61d261100fcb4
|
[
"BSD-3-Clause-LBNL"
] | 38
|
2017-02-21T21:15:03.000Z
|
2022-02-06T00:22:37.000Z
|
#-----------------------------------------------------------------------------
# Title : PyRogue CameraLink module
#-----------------------------------------------------------------------------
# Description:
# PyRogue CameraLink module
#-----------------------------------------------------------------------------
# This file is part of the 'SLAC Firmware Standard Library'. It is subject to
# the license terms in the LICENSE.txt file found in the top-level directory
# of this distribution and at:
# https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.
# No part of the 'SLAC Firmware Standard Library', including this file, may be
# copied, modified, propagated, or distributed except according to the terms
# contained in the LICENSE.txt file.
#-----------------------------------------------------------------------------
import pyrogue as pr
import surf.protocols.clink
class ClinkTop(pr.Device):
def __init__(
self,
serial = [None,None],
camType = [None,None],
**kwargs):
super().__init__(**kwargs)
##############################
# Variables
##############################
self.add(pr.RemoteVariable(
name = "ChanCount",
description = "Supported channels",
offset = 0x00,
bitSize = 4,
bitOffset = 0x00,
mode = "RO",
))
self.add(pr.RemoteVariable(
name = "RstPll",
description = "Camera link channel PLL reset",
offset = 0x04,
bitSize = 1,
bitOffset = 0,
mode = "RW",
hidden = True,
))
@self.command(description="toggles Camera link channel PLL reset",)
def ResetPll():
self.RstPll.set(0x1)
self.RstPll.set(0x0)
self.add(pr.RemoteCommand(
name = "ResetFsm",
description = "Camera link channel FSM reset",
offset = 0x04,
bitSize = 1,
bitOffset = 1,
function = pr.BaseCommand.toggle,
))
self.add(pr.RemoteCommand(
name = "CntRst",
description = "",
offset = 0x04,
bitSize = 1,
bitOffset = 2,
function = pr.BaseCommand.toggle,
))
self.add(pr.RemoteVariable(
name = "LinkLockedA",
description = "Camera link channel locked status",
offset = 0x10,
bitSize = 1,
bitOffset = 0,
base = pr.Bool,
pollInterval = 1,
mode = "RO",
))
self.add(pr.RemoteVariable(
name = "LinkLockedB",
description = "Camera link channel locked status",
offset = 0x10,
bitSize = 1,
bitOffset = 1,
base = pr.Bool,
pollInterval = 1,
mode = "RO",
))
self.add(pr.RemoteVariable(
name = "LinkLockedC",
description = "Camera link channel locked status",
offset = 0x10,
bitSize = 1,
bitOffset = 2,
base = pr.Bool,
pollInterval = 1,
mode = "RO",
))
self.add(pr.RemoteVariable(
name = "LinkLockedCntA",
description = "Camera link channel locked status counter",
offset = 0x10,
bitSize = 8,
bitOffset = 8,
disp = '{}',
mode = "RO",
pollInterval = 1,
))
self.add(pr.RemoteVariable(
name = "LinkLockedCntB",
description = "Camera link channel locked status counter",
offset = 0x10,
bitSize = 8,
bitOffset = 16,
disp = '{}',
mode = "RO",
pollInterval = 1,
))
self.add(pr.RemoteVariable(
name = "LinkLockedCntC",
description = "Camera link channel locked status counter",
offset = 0x10,
bitSize = 8,
bitOffset = 24,
disp = '{}',
mode = "RO",
pollInterval = 1,
))
self.add(pr.RemoteVariable(
name = "ShiftCountA",
description = "Shift count for channel",
offset = 0x14,
bitSize = 3,
bitOffset = 0,
mode = "RO",
pollInterval = 1,
))
self.add(pr.RemoteVariable(
name = "ShiftCountB",
description = "Shift count for channel",
offset = 0x14,
bitSize = 3,
bitOffset = 8,
mode = "RO",
pollInterval = 1,
))
self.add(pr.RemoteVariable(
name = "ShiftCountC",
description = "Shift count for channel",
offset = 0x14,
bitSize = 3,
bitOffset = 16,
mode = "RO",
pollInterval = 1,
))
self.add(pr.RemoteVariable(
name = "DelayA",
description = "Precision delay for channel A",
offset = 0x18,
bitSize = 5,
bitOffset = 0,
mode = "RO",
pollInterval = 1,
))
self.add(pr.RemoteVariable(
name = "DelayB",
description = "Precision delay for channel B",
offset = 0x18,
bitSize = 5,
bitOffset = 8,
mode = "RO",
pollInterval = 1,
))
self.add(pr.RemoteVariable(
name = "DelayC",
description = "Precision delay for channel C",
offset = 0x18,
bitSize = 5,
bitOffset = 16,
mode = "RO",
pollInterval = 1,
))
self.addRemoteVariables(
name = "ClkInFreq",
description = "Clock Input Freq",
offset = 0x01C,
bitSize = 32,
bitOffset = 0,
units = 'Hz',
disp = '{:d}',
mode = "RO",
pollInterval = 1,
number = 3,
stride = 4,
)
self.addRemoteVariables(
name = "ClinkClkFreq",
description = "CameraLink Clock Freq",
offset = 0x028,
bitSize = 32,
bitOffset = 0,
units = 'Hz',
disp = '{:d}',
mode = "RO",
pollInterval = 1,
number = 3,
stride = 4,
)
for i in range(2):
if camType[i] is not None:
self.add(surf.protocols.clink.ClinkChannel(
name = f'Ch[{i}]',
offset = 0x100+(i*0x100),
serial = serial[i],
camType = camType[i],
# expand = False,
))
for i in range(3):
self.add(surf.protocols.clink.ClockManager(
name = f'Pll[{i}]',
offset = 0x1000+(i*0x1000),
type = 'MMCME2',
expand = False,
))
for i in range(3):
self.add(pr.LocalVariable(
name = f'PllConfig[{i}]',
description = 'Sets the PLL to a known set of configurations',
mode = 'RW',
value = '',
))
def hardReset(self):
super().hardReset()
self.ResetPll()
self.CntRst()
def initialize(self):
super().initialize()
# Hold the PLL in reset before configuration
self.RstPll.set(0x1)
# Loop through the PLL modules
for i in range(3):
# Check for 85 MHz configuration
if (self.PllConfig[i].get() == '85MHz'):
self.Pll[i].Config85MHz()
# Check for 80 MHz configuration
if (self.PllConfig[i].get() == '80MHz'):
# Same config as 85 MHz
self.Pll[i].Config85MHz()
# Check for 40 MHz configuration
if (self.PllConfig[i].get() == '40MHz'):
self.Pll[i].Config40MHz()
# Check for 25 MHz configuration
if (self.PllConfig[i].get() == '25MHz'):
self.Pll[i].Config25MHz()
# Release the reset after configuration
self.RstPll.set(0x0)
# Reset all the counters
self.CntRst()
def countReset(self):
super().countReset()
self.CntRst()
| 31.818493
| 79
| 0.409644
|
e64724bb1a31695df56c164b73319ac234e1323a
| 2,121
|
py
|
Python
|
tigrillo/analysis/optim_analysis.py
|
gabs48/tigrillo
|
663f7407808bb8101edebec02fb0cd81c59ad2f1
|
[
"MIT"
] | 5
|
2018-10-22T21:28:44.000Z
|
2020-09-03T07:01:36.000Z
|
tigrillo/analysis/optim_analysis.py
|
gabs48/tigrillo
|
663f7407808bb8101edebec02fb0cd81c59ad2f1
|
[
"MIT"
] | null | null | null |
tigrillo/analysis/optim_analysis.py
|
gabs48/tigrillo
|
663f7407808bb8101edebec02fb0cd81c59ad2f1
|
[
"MIT"
] | 1
|
2020-02-01T15:12:38.000Z
|
2020-02-01T15:12:38.000Z
|
#!/usr/bin/python3
"""
This script opens a windows to analyze various properties of the optimization results
"""
import configparser
from tigrillo.core.control import *
from tigrillo.core.optim import *
__author__ = "Gabriel Urbain"
__copyright__ = "Copyright 2017, Human Brain Projet, SP10"
__license__ = "MIT"
__version__ = "1.0"
__maintainer__ = "Gabriel Urbain"
__email__ = "gabriel.urbain@ugent.be"
__status__ = "Research"
__date__ = "July 3rd, 2017"
class OptimAnalysis:
def __init__(self):
self.result_folder = None
self.config = None
self.phys = None
self.sim_time = None
self.cont = None
self.score = None
def load(self, folder):
self.result_folder = folder
# Retrieve config file
self.config = configparser.ConfigParser()
self.config.read(self.result_folder + "config.conf")
self.config.set('Physics', 'rendering', "True")
# TODO: stop taking the first pipe result by default
param = ast.literal_eval(self.config.get("Experiment", "pipe"))[0]
self.phys = eval(param["phys"])(self.config)
self.sim_time = param["time"]
self.cont = eval(param["ctrl"])(self.config)
def simulate(self):
# Init
self.cont.load(self.result_folder + "/best_cont.pkl")
self.score = Score(self.phys, self.cont, self.config)
self.score.start()
t_init = time.time()
# Run
self.phys.start_sim()
self.cont.run(self.sim_time, self.phys)
# Stop
self.score.stop()
st = self.score.final_time
t_fin = time.time()
self.phys.stop_sim()
rt = t_fin - t_init
# Get score
score = self.score.get_score()
print("Simulation finished with score = {0:.3f}".format(score) +
" (rt = {0:.2f}s; ".format(rt) +
"st = {0:.2f}s; ".format(st) +
"acc: {0:.2f}X)".format(st/rt))
if __name__ == '__main__':
an = OptimAnalysis()
an.load("/home/gabs48/tigrillo/data/results/20170703-165722/")
an.simulate()
| 24.662791
| 85
| 0.60396
|
fbca7fd6418f9dac16902cffed18d2a7d336f351
| 211
|
py
|
Python
|
JPS_SHIP_CRAWLER/ship/spiders/config.py
|
mdhillmancmcl/TheWorldAvatar-CMCL-Fork
|
011aee78c016b76762eaf511c78fabe3f98189f4
|
[
"MIT"
] | 21
|
2021-03-08T01:58:25.000Z
|
2022-03-09T15:46:16.000Z
|
JPS_SHIP_CRAWLER/ship/spiders/config.py
|
mdhillmancmcl/TheWorldAvatar-CMCL-Fork
|
011aee78c016b76762eaf511c78fabe3f98189f4
|
[
"MIT"
] | 63
|
2021-05-04T15:05:30.000Z
|
2022-03-23T14:32:29.000Z
|
JPS_SHIP_CRAWLER/ship/spiders/config.py
|
mdhillmancmcl/TheWorldAvatar-CMCL-Fork
|
011aee78c016b76762eaf511c78fabe3f98189f4
|
[
"MIT"
] | 15
|
2021-03-08T07:52:03.000Z
|
2022-03-29T04:46:20.000Z
|
station_list_url = 'http://www.aishub.net/stations?Station%5BSID%5D=&Station%5Bstatus%5D=0&Station%5Buptime%5D=&Station%5BCOUNTRY%5D=singapore&Station%5BLOCATION%5D=&Station%5BCOUNT%5D=&Station%5BDISTINCT%5D=';
| 105.5
| 210
| 0.805687
|
ab3039af86afe5670ccf9d95fbe57c39314c6616
| 10,698
|
py
|
Python
|
sdks/python/apache_beam/runners/dataflow/native_io/iobase.py
|
davidtime/beam
|
f2d19fdf7118a08d222f0028753a58347e6352fd
|
[
"Apache-2.0"
] | null | null | null |
sdks/python/apache_beam/runners/dataflow/native_io/iobase.py
|
davidtime/beam
|
f2d19fdf7118a08d222f0028753a58347e6352fd
|
[
"Apache-2.0"
] | null | null | null |
sdks/python/apache_beam/runners/dataflow/native_io/iobase.py
|
davidtime/beam
|
f2d19fdf7118a08d222f0028753a58347e6352fd
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Dataflow native sources and sinks.
For internal use only; no backwards-compatibility guarantees.
"""
# pytype: skip-file
from __future__ import absolute_import
import logging
from builtins import object
from apache_beam import pvalue
from apache_beam.io import iobase
from apache_beam.transforms import ptransform
from apache_beam.transforms.display import HasDisplayData
_LOGGER = logging.getLogger(__name__)
def _dict_printable_fields(dict_object, skip_fields):
"""Returns a list of strings for the interesting fields of a dict."""
return ['%s=%r' % (name, value)
for name, value in dict_object.items()
# want to output value 0 but not None nor []
if (value or value == 0)
and name not in skip_fields]
_minor_fields = ['coder', 'key_coder', 'value_coder',
'config_bytes', 'elements',
'append_trailing_newlines', 'strip_trailing_newlines',
'compression_type']
class NativeSource(iobase.SourceBase):
"""A source implemented by Dataflow service.
This class is to be only inherited by sources natively implemented by Cloud
Dataflow service, hence should not be sub-classed by users.
This class is deprecated and should not be used to define new sources.
"""
def reader(self):
"""Returns a NativeSourceReader instance associated with this source."""
raise NotImplementedError
def is_bounded(self):
return True
def __repr__(self):
return '<{name} {vals}>'.format(
name=self.__class__.__name__,
vals=', '.join(_dict_printable_fields(self.__dict__,
_minor_fields)))
class NativeSourceReader(object):
"""A reader for a source implemented by Dataflow service."""
def __enter__(self):
"""Opens everything necessary for a reader to function properly."""
raise NotImplementedError
def __exit__(self, exception_type, exception_value, traceback):
"""Cleans up after a reader executed."""
raise NotImplementedError
def __iter__(self):
"""Returns an iterator over all the records of the source."""
raise NotImplementedError
@property
def returns_windowed_values(self):
"""Returns whether this reader returns windowed values."""
return False
def get_progress(self):
"""Returns a representation of how far the reader has read.
Returns:
A SourceReaderProgress object that gives the current progress of the
reader.
"""
def request_dynamic_split(self, dynamic_split_request):
"""Attempts to split the input in two parts.
The two parts are named the "primary" part and the "residual" part. The
current 'NativeSourceReader' keeps processing the primary part, while the
residual part will be processed elsewhere (e.g. perhaps on a different
worker).
The primary and residual parts, if concatenated, must represent the
same input as the current input of this 'NativeSourceReader' before this
call.
The boundary between the primary part and the residual part is
specified in a framework-specific way using 'DynamicSplitRequest' e.g.,
if the framework supports the notion of positions, it might be a
position at which the input is asked to split itself (which is not
necessarily the same position at which it *will* split itself); it
might be an approximate fraction of input, or something else.
This function returns a 'DynamicSplitResult', which encodes, in a
framework-specific way, the information sufficient to construct a
description of the resulting primary and residual inputs. For example, it
might, again, be a position demarcating these parts, or it might be a pair
of fully-specified input descriptions, or something else.
After a successful call to 'request_dynamic_split()', subsequent calls
should be interpreted relative to the new primary.
Args:
dynamic_split_request: A 'DynamicSplitRequest' describing the split
request.
Returns:
'None' if the 'DynamicSplitRequest' cannot be honored (in that
case the input represented by this 'NativeSourceReader' stays the same),
or a 'DynamicSplitResult' describing how the input was split into a
primary and residual part.
"""
_LOGGER.debug(
'SourceReader %r does not support dynamic splitting. Ignoring dynamic '
'split request: %r',
self, dynamic_split_request)
class ReaderProgress(object):
"""A representation of how far a NativeSourceReader has read."""
def __init__(self, position=None, percent_complete=None, remaining_time=None,
consumed_split_points=None, remaining_split_points=None):
self._position = position
if percent_complete is not None:
percent_complete = float(percent_complete)
if percent_complete < 0 or percent_complete > 1:
raise ValueError(
'The percent_complete argument was %f. Must be in range [0, 1].'
% percent_complete)
self._percent_complete = percent_complete
self._remaining_time = remaining_time
self._consumed_split_points = consumed_split_points
self._remaining_split_points = remaining_split_points
@property
def position(self):
"""Returns progress, represented as a ReaderPosition object."""
return self._position
@property
def percent_complete(self):
"""Returns progress, represented as a percentage of total work.
Progress range from 0.0 (beginning, nothing complete) to 1.0 (end of the
work range, entire WorkItem complete).
Returns:
Progress represented as a percentage of total work.
"""
return self._percent_complete
@property
def remaining_time(self):
"""Returns progress, represented as an estimated time remaining."""
return self._remaining_time
@property
def consumed_split_points(self):
return self._consumed_split_points
@property
def remaining_split_points(self):
return self._remaining_split_points
class ReaderPosition(object):
"""A representation of position in an iteration of a 'NativeSourceReader'."""
def __init__(self, end=None, key=None, byte_offset=None, record_index=None,
shuffle_position=None, concat_position=None):
"""Initializes ReaderPosition.
A ReaderPosition may get instantiated for one of these position types. Only
one of these should be specified.
Args:
end: position is past all other positions. For example, this may be used
to represent the end position of an unbounded range.
key: position is a string key.
byte_offset: position is a byte offset.
record_index: position is a record index
shuffle_position: position is a base64 encoded shuffle position.
concat_position: position is a 'ConcatPosition'.
"""
self.end = end
self.key = key
self.byte_offset = byte_offset
self.record_index = record_index
self.shuffle_position = shuffle_position
if concat_position is not None:
assert isinstance(concat_position, ConcatPosition)
self.concat_position = concat_position
class ConcatPosition(object):
"""A position that encapsulate an inner position and an index.
This is used to represent the position of a source that encapsulate several
other sources.
"""
def __init__(self, index, position):
"""Initializes ConcatPosition.
Args:
index: index of the source currently being read.
position: inner position within the source currently being read.
"""
if position is not None:
assert isinstance(position, ReaderPosition)
self.index = index
self.position = position
class DynamicSplitRequest(object):
"""Specifies how 'NativeSourceReader.request_dynamic_split' should split.
"""
def __init__(self, progress):
assert isinstance(progress, ReaderProgress)
self.progress = progress
class DynamicSplitResult(object):
pass
class DynamicSplitResultWithPosition(DynamicSplitResult):
def __init__(self, stop_position):
assert isinstance(stop_position, ReaderPosition)
self.stop_position = stop_position
class NativeSink(HasDisplayData):
"""A sink implemented by Dataflow service.
This class is to be only inherited by sinks natively implemented by Cloud
Dataflow service, hence should not be sub-classed by users.
"""
def writer(self):
"""Returns a SinkWriter for this source."""
raise NotImplementedError
def __repr__(self):
return '<{name} {vals}>'.format(
name=self.__class__.__name__,
vals=_dict_printable_fields(self.__dict__, _minor_fields))
class NativeSinkWriter(object):
"""A writer for a sink implemented by Dataflow service."""
def __enter__(self):
"""Opens everything necessary for a writer to function properly."""
raise NotImplementedError
def __exit__(self, exception_type, exception_value, traceback):
"""Cleans up after a writer executed."""
raise NotImplementedError
@property
def takes_windowed_values(self):
"""Returns whether this writer takes windowed values."""
return False
def Write(self, o): # pylint: disable=invalid-name
"""Writes a record to the sink associated with this writer."""
raise NotImplementedError
class _NativeWrite(ptransform.PTransform):
"""A PTransform for writing to a Dataflow native sink.
These are sinks that are implemented natively by the Dataflow service
and hence should not be updated by users. These sinks are processed
using a Dataflow native write transform.
Applying this transform results in a ``pvalue.PDone``.
"""
def __init__(self, sink):
"""Initializes a Write transform.
Args:
sink: Sink to use for the write
"""
super(_NativeWrite, self).__init__()
self.sink = sink
def expand(self, pcoll):
self._check_pcollection(pcoll)
return pvalue.PDone(pcoll.pipeline)
| 32.320242
| 79
| 0.724434
|
a5bfbd61d94ed6526fbbe50a1f7b0dc9943c0c4a
| 2,452
|
py
|
Python
|
.history/Missions_to_Mars/scrape_mars_20200809054709.py
|
ermiasgelaye/web-scraping-challenge
|
f99c3436dfb0169595c46dae7733d90e21385cc6
|
[
"ADSL"
] | null | null | null |
.history/Missions_to_Mars/scrape_mars_20200809054709.py
|
ermiasgelaye/web-scraping-challenge
|
f99c3436dfb0169595c46dae7733d90e21385cc6
|
[
"ADSL"
] | null | null | null |
.history/Missions_to_Mars/scrape_mars_20200809054709.py
|
ermiasgelaye/web-scraping-challenge
|
f99c3436dfb0169595c46dae7733d90e21385cc6
|
[
"ADSL"
] | 2
|
2020-11-02T08:12:16.000Z
|
2021-05-17T21:45:42.000Z
|
# Dependencies
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup as bs
import requests
from splinter import Browser
import re
# Initialize browser
def init_browser():
executable_path = {"executable_path": "/usr/local/bin/chromedriver"}
#executable_path = {'executable_path': 'chromedriver.exe'}
return Browser("chrome", **executable_path, headless=False)
def scrape():
browser = init_browser()
url = 'https://mars.nasa.gov/news/'
browser.visit(url)
html = browser.html
soup = bs(html, 'html.parser')
news_title = soup.find('div', class_='content_title').text
news_p = soup.find('div', class_='article_teaser_body').text
url = 'https://www.jpl.nasa.gov/spaceimages/'
browser.visit(url)
html = browser.html
soup = bs(html, 'html.parser')
base_url = 'https://www.jpl.nasa.gov'
image_url = soup.find("a", class_="button fancybox")["data-fancybox-href"]
featured_image_url = base_url + image_url
url = 'https://twitter.com/marswxreport?lang=en'
url = 'https://space-facts.com/mars/'
browser.visit(url)
tables = pd.read_html(url)
facts_df = tables[0]
facts_df.columns = ['Fact', 'Value']
facts_df['Fact'] = facts_df['Fact'].str.replace(':', '')
facts_df.reset_index(drop=True, inplace=True)
facts_html = facts_df.to_html()
url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(url)
html = browser.html
soup = bs(html, 'html.parser')
results = soup.find_all('div', class_="description")
base_url = 'https://astrogeology.usgs.gov/'
sites = []
for result in results:
link = result.find('a', class_="itemLink product-item")
link_text = link['href']
hemispheres_url = base_url + link_text
sites.append(hemispheres_url)
hemispheres = []
for site in sites:
browser.visit(site)
html = browser.html
soup = bs(html, 'html.parser')
title = soup.find('h2', class_="title").text.strip()
url = soup.find_all('a', target="_blank", href=True)[0]['href']
hemispheres.append({"title": title, "img_url": url})
output = {
"news_title": news_title,
"news_p": news_p,
"featured_image_url": featured_image_url,
"mars_weather": mars_weather,
"facts_html": facts_html,
"hemispheres": hemispheres
}
return output
| 31.844156
| 96
| 0.64845
|
6ea1e9bc1dae59265c3bb727c6a5d8fbd4626270
| 1,075
|
py
|
Python
|
packages/python/plotly/plotly/validators/pointcloud/marker/border/__init__.py
|
sgn/plotly.py
|
587075c9f5a57a3dd60b03b2d47d925fbbb9b9b6
|
[
"MIT"
] | 3
|
2020-02-04T21:39:20.000Z
|
2020-11-17T19:07:07.000Z
|
packages/python/plotly/plotly/validators/pointcloud/marker/border/__init__.py
|
sgn/plotly.py
|
587075c9f5a57a3dd60b03b2d47d925fbbb9b9b6
|
[
"MIT"
] | 12
|
2020-06-06T01:22:26.000Z
|
2022-03-12T00:13:42.000Z
|
packages/python/plotly/plotly/validators/pointcloud/marker/border/__init__.py
|
sgn/plotly.py
|
587075c9f5a57a3dd60b03b2d47d925fbbb9b9b6
|
[
"MIT"
] | 17
|
2019-11-21T14:11:29.000Z
|
2019-11-21T15:26:23.000Z
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="pointcloud.marker.border", **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", False),
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class ArearatioValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="arearatio", parent_name="pointcloud.marker.border", **kwargs
):
super(ArearatioValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 1),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
| 31.617647
| 87
| 0.622326
|
1cb83d8492d49f024338a77abed5c29547304b8d
| 5,264
|
py
|
Python
|
playbooks/files/rax-maas/plugins/swift-dispersion.py
|
JCallicoat/rpc-maas
|
879bab6623339c99c288acf9191b445fe1ea1fa2
|
[
"Apache-2.0"
] | 31
|
2015-01-03T10:30:56.000Z
|
2019-06-23T22:21:24.000Z
|
playbooks/files/rax-maas/plugins/swift-dispersion.py
|
JCallicoat/rpc-maas
|
879bab6623339c99c288acf9191b445fe1ea1fa2
|
[
"Apache-2.0"
] | 457
|
2015-01-01T15:58:47.000Z
|
2021-06-10T12:04:11.000Z
|
playbooks/files/rax-maas/plugins/swift-dispersion.py
|
JCallicoat/rpc-maas
|
879bab6623339c99c288acf9191b445fe1ea1fa2
|
[
"Apache-2.0"
] | 65
|
2015-03-02T02:39:59.000Z
|
2021-12-22T21:57:01.000Z
|
#!/usr/bin/env python3
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import re
import subprocess
import maas_common
# Example output::
# $ swift-dispersion-report --container-only
# > Queried 3 containers for dispersion reporting, 0s, 0 retries
# > 100.00% of container copies found (6 of 6)
# > Sample represents 1.17% of the container partition space
# $ swift-dispersion-report --object-only
# > Queried 2 objects for dispersion reporting, 0s, 0 retries
# > There were 2 partitions missing 0 copy.
# > 100.00% of object copies found (10 of 10)
# > Sample represents 0.78% of the object partition space
PARSE_RE = re.compile(
# First line of both types of output
r"Queried (?P<num_objects>\d+) \w+ for dispersion reporting, "
r"(?P<seconds>\d+)s, (?P<retries>\d+) retries\s+"
# Second line if working with object output only
r"(?:There were (?P<num_partitions>\d+) partitions? missing "
r"(?P<partition_copies>\d+) cop(y|ies)\.?\s+)?"
# Second line for containers, third for objects
r"(?P<percent>\d+\.\d+)% of \w+ copies found \((?P<copies_found>\d+) of "
r"(?P<total_copies>\d+)\)\s+"
# Last line for both types
r"Sample represents (?P<partition_percent>\d+.\d+)% of the \w+ "
r"partition space"
)
def generate_report(on):
"""Report on either object or container dispersion.
:param str on: Either "object" or "container"
:returns: string of ouptut
"""
if on not in {'object', 'container'}:
return ''
call = ['swift-dispersion-report', '--%s-only' % on]
return subprocess.check_output(call)
def print_metrics(report_for, match):
group = match.groupdict()
for (k, v) in group.items():
if v is None:
# This happens for container output. The named "num_partitions"
# and "partition_copies" groups end up in the dictionary with
# value None so we need to ignore them when they are found.
continue
if k.endswith('percent'):
metric_type = 'double'
else:
metric_type = 'uint64'
# Add units when we can
unit = 's' if k == 'seconds' else None
maas_common.metric('{0}_{1}'.format(report_for, k), metric_type, v,
unit)
def main():
# It's easier to parse the output if we make them independent reports
# If we simply use swift-dispersion-report then we'll have both outputs
# one after the other and we'll likely have a bad time.
try:
object_out = generate_report('object')
object_match = PARSE_RE.search(object_out)
except OSError:
# If the subprocess call returns anything other than exit code 0.
# we should probably error out too.
maas_common.status_err('Could not access object dispersion report',
m_name='maas_swift')
try:
container_out = generate_report('container')
container_match = PARSE_RE.search(container_out)
except OSError:
# If the subprocess call returns anything other than exit code 0.
# we should probably error out too.
maas_common.status_err('Could not access container dispersion report',
m_name='maas_swift')
if not (object_match and container_match):
maas_common.status_err('Could not parse dispersion report output',
m_name='maas_swift')
maas_common.status_ok(m_name='maas_swift')
print_metrics('object', object_match)
print_metrics('container', container_match)
# Example output::
# $ python swift-dispersion.py
# > status okay
# > metric object_retries uint64 0
# > metric object_seconds uint64 0 s
# > metric object_num_partitions uint64 2
# > metric object_num_objects uint64 2
# > metric object_percent double 100.00
# > metric object_copies_found uint64 10
# > metric object_partition_copies uint64 0
# > metric object_partition_percent double 0.78
# > metric object_total_copies uint64 10
# > metric container_retries uint64 0
# > metric container_seconds uint64 0 s
# > metric container_num_objects uint64 3
# > metric container_percent double 100.00
# > metric container_copies_found uint64 6
# > metric container_partition_percent double 1.17
# > metric container_total_copies uint64 6
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Swift dispersion check')
parser.add_argument('--telegraf-output',
action='store_true',
default=False,
help='Set the output format to telegraf')
args = parser.parse_args()
with maas_common.print_output(print_telegraf=args.telegraf_output):
main()
| 37.333333
| 78
| 0.673822
|
e6929e7fdc2939d38fdb11c94e9a2998565e9a63
| 6,838
|
py
|
Python
|
mneext/resample.py
|
maosenGao/openmiir-rl-2016
|
d2e5744b1fa503a896994d8a70b3ca45d521db14
|
[
"BSD-3-Clause"
] | null | null | null |
mneext/resample.py
|
maosenGao/openmiir-rl-2016
|
d2e5744b1fa503a896994d8a70b3ca45d521db14
|
[
"BSD-3-Clause"
] | null | null | null |
mneext/resample.py
|
maosenGao/openmiir-rl-2016
|
d2e5744b1fa503a896994d8a70b3ca45d521db14
|
[
"BSD-3-Clause"
] | null | null | null |
__author__ = 'sstober'
import logging
log = logging.getLogger(__name__)
from mne import pick_types
import numpy as np
import librosa
import mne
## old interface from mne/filter.py:
# def resample(self, sfreq, npad=100, window='boxcar',
# stim_picks=None, n_jobs=1, verbose=None):
def fast_resample_mne(raw, sfreq, stim_picks=None, preserve_events=True, res_type='sinc_best', verbose=None):
"""Resample data channels.
Resamples all channels. The data of the Raw object is modified inplace.
The Raw object has to be constructed using preload=True (or string).
WARNING:
The intended purpose of this function is primarily to speed up computations (e.g., projection calculation) when precise timing of events is not required, as downsampling raw data effectively jitters trigger timings.
It is generally recommended not to epoch downsampled data, but instead epoch and then downsample, as epoching downsampled data jitters triggers.
Parameters
----------
raw : nme raw object
Raw data to filter.
sfreq :
float
New sample rate to use.
stim_picks :
array of int | None
Stim channels.
These channels are simply subsampled or supersampled (without applying any filtering).
This reduces resampling artifacts in stim channels, but may lead to missing triggers.
If None, stim channels are automatically chosen usingzded mne.
pick_types(raw.info, meg=False, stim=True, exclude=[]).
res_type : str
If `scikits.samplerate` is installed, :func:`librosa.core.resample` will use ``res_type``.
(Chooae between 'sinc_fastest', 'sinc_medium' and 'sinc_best' for the desired speed-vs-quality trade-off.)
Otherwise, it will fall back on `scipy.signal.resample`
verbose :
bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Notes
-----
For some data, it may be more accurate to use npad=0 to reduce artifacts.
This is dataset dependent -- check your data!
"""
self = raw # this keeps the mne code intact
if not self.preload:
raise RuntimeError('Can only resample preloaded data')
sfreq = float(sfreq)
o_sfreq = float(self.info['sfreq'])
offsets = np.concatenate(([0], np.cumsum(self._raw_lengths)))
new_data = list()
# set up stim channel processing
if stim_picks is None:
stim_picks = pick_types(self.info, meg=False, ref_meg=False,
stim=True, exclude=[])
stim_picks = np.asanyarray(stim_picks)
### begin new code: save events in each stim channel ###
if preserve_events:
stim_events = dict()
for sp in stim_picks:
stim_channel_name = raw.ch_names[sp]
if verbose:
log.info('Saving events for stim channel "{}" (#{})'.format(stim_channel_name, sp))
stim_events[sp] = mne.find_events(raw, stim_channel=stim_channel_name,
shortest_event=0, verbose=verbose)
### end new code: save events in each stim channel ###
ratio = sfreq / o_sfreq
for ri in range(len(self._raw_lengths)):
data_chunk = self._data[:, offsets[ri]:offsets[ri + 1]]
### begin changed code ###
# new_data.append(resample(data_chunk, sfreq, o_sfreq, npad,
# n_jobs=n_jobs))
# if verbose:
log.info('Resampling {} channels from {} Hz to {} Hz ...'
.format(len(data_chunk), o_sfreq, sfreq))
new_data_chunk = list()
for i, channel in enumerate(data_chunk):
if verbose:
log.info('Processing channel #{}'.format(i))
# TODO: this could easily be parallelized
new_data_chunk.append(librosa.resample(channel, o_sfreq, sfreq, res_type=res_type))
new_data_chunk = np.vstack(new_data_chunk)
if verbose:
log.debug('data shape after resampling: {}'.format(new_data_chunk.shape))
new_data.append(new_data_chunk)
### end changed code ###
new_ntimes = new_data[ri].shape[1]
# Now deal with the stim channels. In empirical testing, it was
# faster to resample all channels (above) and then replace the
# stim channels than it was to only resample the proper subset
# of channels and then use np.insert() to restore the stims
# figure out which points in old data to subsample
# protect against out-of-bounds, which can happen (having
# one sample more than expected) due to padding
stim_inds = np.minimum(np.floor(np.arange(new_ntimes)
/ ratio).astype(int),
data_chunk.shape[1] - 1)
for sp in stim_picks:
new_data[ri][sp] = data_chunk[[sp]][:, stim_inds]
self._first_samps[ri] = int(self._first_samps[ri] * ratio)
self._last_samps[ri] = self._first_samps[ri] + new_ntimes - 1
self._raw_lengths[ri] = new_ntimes
# adjust affected variables
self._data = np.concatenate(new_data, axis=1)
self.info['sfreq'] = sfreq
self._update_times()
### begin new code: restore save events in each stim channel ###
if preserve_events:
for sp in stim_picks:
raw._data[sp,:].fill(0) # delete data in stim channel
if verbose:
stim_channel_name = raw.ch_names[sp]
log.info('Restoring events for stim channel "{}" (#{})'.format(stim_channel_name, sp))
# scale onset times
for event in stim_events[sp]:
onset = int(np.floor(event[0] * ratio))
event_id = event[2]
if raw._data[sp,onset] > 0:
log.warn('! event collision at {}: old={} new={}. Using onset+1'.format(
onset, raw._data[sp,onset], event_id))
raw._data[sp,onset+1] = event_id
else:
raw._data[sp,onset] = event_id
### end new code: save events in each stim channel ###
def resample_mne_events(events, o_sfreq, sfreq, fix_collisions=True):
ratio = sfreq / o_sfreq
resampled_events = list()
for event in events:
onset = int(np.floor(event[0] * ratio))
event_id = event[2]
if fix_collisions and \
len(resampled_events) > 0 and \
resampled_events[-1][0] == onset:
log.warn('! event collision at {}: old={} new={}. Using onset+1'.format(
onset, resampled_events[-1][0], event_id))
onset += 1
resampled_events.append([onset, 0, event_id])
return np.asarray(resampled_events)
| 39.988304
| 220
| 0.615531
|
2c88d567b72761a63c9f89ef87dca0c527f736f4
| 6,708
|
py
|
Python
|
tests/python/contrib/test_ethosn/infrastructure.py
|
mwillsey/incubator-tvm
|
e02dc69fef294eb73dd65d18949ed9e108f60cda
|
[
"Apache-2.0"
] | 2
|
2020-04-17T02:25:16.000Z
|
2020-11-25T11:39:43.000Z
|
tests/python/contrib/test_ethosn/infrastructure.py
|
mwillsey/incubator-tvm
|
e02dc69fef294eb73dd65d18949ed9e108f60cda
|
[
"Apache-2.0"
] | 3
|
2020-04-20T15:37:55.000Z
|
2020-05-13T05:34:28.000Z
|
tests/python/contrib/test_ethosn/infrastructure.py
|
mwillsey/incubator-tvm
|
e02dc69fef294eb73dd65d18949ed9e108f60cda
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Expose Ethos test functions to the Python front end"""
from __future__ import absolute_import, print_function
import tvm
from tvm import relay
from tvm.contrib import util, graph_runtime, download
from tvm.relay.testing import run_opt_pass
from enum import Enum
from hashlib import md5
from itertools import zip_longest, combinations
import numpy as np
from PIL import Image
import os
from . import _infrastructure
from tvm.relay.op.contrib import get_pattern_table
def make_module(func, params):
func = relay.Function(relay.analysis.free_vars(func), func)
if params:
relay.build_module.bind_params_by_name(func, params)
return tvm.IRModule.from_expr(func)
def make_ethosn_composite(ethosn_expr, name):
vars = relay.analysis.free_vars(ethosn_expr)
func = relay.Function([relay.Var("a")], ethosn_expr)
func = func.with_attr("Composite", name)
call = relay.Call(func, vars)
return call
def make_ethosn_partition(ethosn_expr):
# Create an Ethos-N global function
mod = tvm.IRModule({})
vars = relay.analysis.free_vars(ethosn_expr)
func = relay.Function(vars, ethosn_expr)
func = func.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
func = func.with_attr("Inline", tvm.tir.IntImm("int32", 1))
func = func.with_attr("Compiler", "ethos-n")
func = func.with_attr("global_symbol", "ethos-n_0")
g1 = relay.GlobalVar("ethos-n_0")
mod[g1] = func
# These are the vars to call the Ethos-N partition with
more_vars = relay.analysis.free_vars(ethosn_expr)
# Call the Ethos-N partition in main
call_fn1 = g1(*more_vars)
mod["main"] = relay.Function(more_vars, call_fn1)
return mod
def get_host_op_count(mod):
class Counter(tvm.relay.ExprVisitor):
def __init__(self):
super().__init__()
self.count = 0
def visit_call(self, call):
if isinstance(call.op, tvm.ir.Op):
self.count += 1
super().visit_call(call)
c = Counter()
c.visit(mod["main"])
return c.count
def build(mod, params, npu=True, expected_host_ops=0, npu_partitions=1):
relay.backend.compile_engine.get().clear()
with tvm.transform.PassContext(
opt_level=3, config={"relay.ext.ethos-n.options": {"variant": 0}}
):
with tvm.target.Target("llvm"):
if npu:
f = relay.build_module.bind_params_by_name(mod["main"], params)
mod = tvm.IRModule()
mod["main"] = f
pattern = get_pattern_table("ethos-n")
mod = relay.transform.MergeComposite(pattern)(mod)
mod = relay.transform.AnnotateTarget("ethos-n")(mod)
mod = relay.transform.MergeCompilerRegions()(mod)
mod = relay.transform.PartitionGraph()(mod)
host_op_count = get_host_op_count(mod)
assert (
host_op_count == expected_host_ops
), "Got {} host operators, expected {}".format(host_op_count, expected_host_ops)
partition_count = 0
for global_var in mod.get_global_vars():
if "ethos-n" in global_var.name_hint:
partition_count += 1
assert (
npu_partitions == partition_count
), "Got {} ethos-n partitions, expected {}".format(partition_count, npu_partitions)
return relay.build(mod, params=params)
def run(graph, lib, params, inputs, outputs, npu=True):
# Export and load lib to confirm this works
lib_name = "mod.so"
temp = util.tempdir()
lib_path = temp.relpath(lib_name)
lib.export_library(lib_path)
lib = tvm.runtime.load_module(lib_path)
module = graph_runtime.create(graph, lib, tvm.cpu())
module.set_input(**inputs)
module.set_input(**params)
module.run()
out = [module.get_output(i) for i in range(outputs)]
if not npu:
inference_result(0, out)
return out
def build_and_run(
mod, inputs, outputs, params, ctx=tvm.cpu(), npu=True, expected_host_ops=0, npu_partitions=1
):
graph, lib, params = build(mod, params, npu, expected_host_ops, npu_partitions)
return run(graph, lib, params, inputs, outputs, npu)
def verify(answers, atol, rtol=1e-07, verify_saturation=True):
"""Compare the array of answers. Each entry is a list of outputs"""
if len(answers) < 2:
print("No results to compare: expected at least two, found ", len(answers))
for answer in zip_longest(*answers):
for outs in combinations(answer, 2):
if verify_saturation:
assert (
np.count_nonzero(outs[0].asnumpy() == 255) < 0.25 * outs[0].asnumpy().size
), "Output is saturated: {}".format(outs[0])
assert (
np.count_nonzero(outs[0].asnumpy() == 0) < 0.25 * outs[0].asnumpy().size
), "Output is saturated: {}".format(outs[0])
tvm.testing.assert_allclose(outs[0].asnumpy(), outs[1].asnumpy(), rtol=rtol, atol=atol)
def inference_result(checksum, outputs):
"""Set the expected results of an Ethos inference, if the testing
infrastructure is available. This assumes that the entire graph
was offloaded to the neural processor."""
if tvm.get_global_func("relay.ethos-n.test.infra.inference_result", True):
return _infrastructure.inference_result(checksum, *outputs)
return False
def test_error(mod, params, err_msg):
caught = None
with tvm.transform.PassContext(opt_level=3):
with tvm.target.Target("llvm"):
try:
relay.build(mod, params)
except tvm.error.TVMError as e:
caught = e.args[0]
finally:
relay.backend.compile_engine.get().clear()
assert caught is not None
assert err_msg in caught, caught
| 37.266667
| 99
| 0.654293
|
8c2d7028355b301571cf41d7edf76f95d5f01c48
| 3,728
|
py
|
Python
|
examples/scripts/create-model.py
|
danieljtait/PyBaMM
|
f9d6143770e4a01099f06e3574142424730f731a
|
[
"BSD-3-Clause"
] | null | null | null |
examples/scripts/create-model.py
|
danieljtait/PyBaMM
|
f9d6143770e4a01099f06e3574142424730f731a
|
[
"BSD-3-Clause"
] | null | null | null |
examples/scripts/create-model.py
|
danieljtait/PyBaMM
|
f9d6143770e4a01099f06e3574142424730f731a
|
[
"BSD-3-Clause"
] | null | null | null |
# This script is intended to be a stripped back version of the
# 'examples/notebooks/create-model.ipnb' so for more details please see
# that notebook
import pybamm
import numpy as np
import matplotlib.pyplot as plt
# 1. Initialise model ------------------------------------------------------------------
model = pybamm.BaseModel()
# 2. Define parameters and variables ---------------------------------------------------
# dimensional parameters
k_dim = pybamm.Parameter("Reaction rate constant")
L_0_dim = pybamm.Parameter("Initial thickness")
V_hat_dim = pybamm.Parameter("Partial molar volume")
c_inf_dim = pybamm.Parameter("Bulk electrolyte solvent concentration")
def D_dim(cc):
return pybamm.FunctionParameter("Diffusivity", cc)
# dimensionless parameters
k = k_dim * L_0_dim / D_dim(c_inf_dim)
V_hat = V_hat_dim * c_inf_dim
def D(cc):
c_dim = c_inf_dim * cc
return D_dim(c_dim) / D_dim(c_inf_dim)
# variables
x = pybamm.SpatialVariable("x", domain="SEI layer", coord_sys="cartesian")
c = pybamm.Variable("Solvent concentration", domain="SEI layer")
L = pybamm.Variable("SEI thickness")
# 3. State governing equations ---------------------------------------------------------
R = k * pybamm.BoundaryValue(c, "left") # SEI reaction flux
N = -(1 / L) * D(c) * pybamm.grad(c) # solvent flux
dcdt = (V_hat * R) * pybamm.inner(x / L, pybamm.grad(c)) - (1 / L) * pybamm.div(
N
) # solvent concentration governing equation
dLdt = V_hat * R # SEI thickness governing equation
model.rhs = {c: dcdt, L: dLdt} # add to model
# 4. State boundary conditions ---------------------------------------------------------
D_left = pybamm.BoundaryValue(
D(c), "left"
) # pybamm requires BoundaryValue(D(c)) and not D(BoundaryValue(c))
grad_c_left = L * R / D_left # left bc
c_right = pybamm.Scalar(1) # right bc
# add to model
model.boundary_conditions = {
c: {"left": (grad_c_left, "Neumann"), "right": (c_right, "Dirichlet")}
}
# 5. State initial conditions ----------------------------------------------------------
model.initial_conditions = {c: pybamm.Scalar(1), L: pybamm.Scalar(1)}
# 6. State output variables ------------------------------------------------------------
model.variables = {
"SEI thickness": L,
"SEI growth rate": dLdt,
"Solvent concentration": c,
"SEI thickness [m]": L_0_dim * L,
"SEI growth rate [m/s]": (D_dim(c_inf_dim) / L_0_dim) * dLdt,
"Solvent concentration [mols/m^3]": c_inf_dim * c,
}
"--------------------------------------------------------------------------------------"
"Using the model"
# define geometry
geometry = {
"SEI layer": {"primary": {x: {"min": pybamm.Scalar(0), "max": pybamm.Scalar(1)}}}
}
# diffusivity function
def Diffusivity(cc):
return cc * 10 ** (-5)
# parameter values (not physically based, for example only!)
param = pybamm.ParameterValues(
{
"Reaction rate constant": 20,
"Initial thickness": 1e-6,
"Partial molar volume": 10,
"Bulk electrolyte solvent concentration": 1,
"Diffusivity": Diffusivity,
}
)
# process model and geometry
param.process_model(model)
param.process_geometry(geometry)
# mesh and discretise
submesh_types = {"SEI layer": pybamm.Uniform1DSubMesh}
var_pts = {x: 50}
mesh = pybamm.Mesh(geometry, submesh_types, var_pts)
spatial_methods = {"SEI layer": pybamm.FiniteVolume()}
disc = pybamm.Discretisation(mesh, spatial_methods)
disc.process_model(model)
# solve
solver = pybamm.ScipySolver()
t = np.linspace(0, 1, 100)
solution = solver.solve(model, t)
# Extract output variables
L_out = solution["SEI thickness"]
# plot
plt.plot(solution.t, L_out(solution.t))
plt.xlabel("Time")
plt.ylabel("SEI thickness")
plt.show()
| 29.824
| 88
| 0.615075
|
086aecb9c81314f3f17fd7c0d05638929f70b88e
| 2,421
|
py
|
Python
|
hierarchy_pos.py
|
fvalle1/tree_plotter
|
b3a2f7ce89d5ab928d8a8e6627c5eb158559cf85
|
[
"WTFPL"
] | null | null | null |
hierarchy_pos.py
|
fvalle1/tree_plotter
|
b3a2f7ce89d5ab928d8a8e6627c5eb158559cf85
|
[
"WTFPL"
] | null | null | null |
hierarchy_pos.py
|
fvalle1/tree_plotter
|
b3a2f7ce89d5ab928d8a8e6627c5eb158559cf85
|
[
"WTFPL"
] | null | null | null |
import networkx as nx
import random
def hierarchy_pos(G, root=None, width=1., vert_gap = 0.2, vert_loc = 0, xcenter = 0.5):
'''
From Joel's answer at https://stackoverflow.com/a/29597209/2966723.
Licensed under Creative Commons Attribution-Share Alike
If the graph is a tree this will return the positions to plot this in a
hierarchical layout.
G: the graph (must be a tree)
root: the root node of current branch
- if the tree is directed and this is not given,
the root will be found and used
- if the tree is directed and this is given, then
the positions will be just for the descendants of this node.
- if the tree is undirected and not given,
then a random choice will be used.
width: horizontal space allocated for this branch - avoids overlap with other branches
vert_gap: gap between levels of hierarchy
vert_loc: vertical location of root
xcenter: horizontal location of root
'''
if not nx.is_tree(G):
raise TypeError('cannot use hierarchy_pos on a graph that is not a tree')
if root is None:
if isinstance(G, nx.DiGraph):
root = next(iter(nx.topological_sort(G))) #allows back compatibility with nx version 1.11
else:
root = random.choice(list(G.nodes))
def _hierarchy_pos(G, root, width=1., vert_gap = 0.2, vert_loc = 0, xcenter = 0.5, pos = None, parent = None):
'''
see hierarchy_pos docstring for most arguments
pos: a dict saying where all nodes go if they have been assigned
parent: parent of this branch. - only affects it if non-directed
'''
if pos is None:
pos = {root:(xcenter,vert_loc)}
else:
pos[root] = (xcenter, vert_loc)
children = list(G.neighbors(root))
if not isinstance(G, nx.DiGraph) and parent is not None:
children.remove(parent)
if len(children)!=0:
dx = width/len(children)
nextx = xcenter - width/2 - dx/2
for child in children:
nextx += dx
pos = _hierarchy_pos(G,child, width = dx, vert_gap = vert_gap,
vert_loc = vert_loc-vert_gap, xcenter=nextx,
pos=pos, parent = root)
return pos
return _hierarchy_pos(G, root, width, vert_gap, vert_loc, xcenter)
| 35.602941
| 114
| 0.618753
|
0bc7dd4a3e6e6dc772dc5e9a3a8d2a78848bc70a
| 4,295
|
py
|
Python
|
quarty/src/quarty/server/__main__.py
|
quartictech/platform
|
d9f535f21d38fa836ec691d86ea2b2c610320757
|
[
"BSD-3-Clause"
] | 3
|
2017-11-07T21:49:39.000Z
|
2019-08-08T20:59:02.000Z
|
quarty/src/quarty/server/__main__.py
|
quartictech/platform
|
d9f535f21d38fa836ec691d86ea2b2c610320757
|
[
"BSD-3-Clause"
] | 1
|
2021-06-05T08:00:37.000Z
|
2021-06-05T08:00:37.000Z
|
quarty/src/quarty/server/__main__.py
|
quartictech/platform
|
d9f535f21d38fa836ec691d86ea2b2c610320757
|
[
"BSD-3-Clause"
] | 2
|
2018-01-09T10:49:48.000Z
|
2019-11-27T09:18:17.000Z
|
import tempfile
import logging
import json
from concurrent.futures import CancelledError
import aiohttp
import aiohttp.web
from quarty.common import initialise_repo, install_requirements, evaluate_pipeline, execute_pipeline
from quarty.utils import QuartyException, PipelineException
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
class States:
START = 0
INITIALISE = 1
EVALUATE = 2
EXECUTE = 3
def send_message(ws, msg_type, **kwargs):
j = {"type": msg_type}
j.update(kwargs)
ws.send_str(json.dumps(j))
def progress_message(ws, message):
send_message(ws, "progress", message=message)
def log_message(ws, stream, line):
send_message(ws, "log", stream=stream, line=str(line))
def result_message(ws, result):
send_message(ws, "result", result=result)
def error_message(ws, error):
send_message(ws, "error", detail=error)
def assert_state(state, *expected):
if state not in set(expected):
raise QuartyException("Expected state {} but is {}".format(
" | ".join(expected), state))
async def initialise(build_path, repo_url, repo_commit, ws):
progress_message(ws, "Initialising repository")
config = await initialise_repo(repo_url, repo_commit, build_path)
progress_message(ws, "Installing requirements")
await install_requirements(build_path)
result_message(ws, None)
return config
async def evaluate(config, build_path, ws):
result = await evaluate_pipeline(config["pipeline_directory"],
build_path,
lambda l: log_message(ws, "stdout", l),
lambda l: log_message(ws, "stderr", l))
result_message(ws, result)
async def execute(config, build_path, step, namespace, api_token, ws): # pylint: disable=too-many-arguments
await execute_pipeline(config["pipeline_directory"],
build_path,
step,
namespace,
api_token,
lambda l: log_message(ws, "stdout", l),
lambda l: log_message(ws, "stderr", l))
result_message(ws, None)
async def decode_message(raw_msg):
if raw_msg.type == aiohttp.WSMsgType.TEXT:
return json.loads(raw_msg.data)
else:
raise QuartyException("Error")
async def websocket_handler(request):
logging.info("Registering websocket connection")
ws = aiohttp.web.WebSocketResponse()
await ws.prepare(request)
state = States.START
config = None
build_path = tempfile.mkdtemp()
try:
async for raw_msg in ws:
log.info("Received message: %s", raw_msg)
msg = await decode_message(raw_msg)
if msg["type"] == "initialise":
assert_state(state, States.START)
repo_url = msg["repo_url"]
repo_commit = msg["repo_commit"]
config = await initialise(build_path, repo_url, repo_commit, ws)
log.info(config)
state = States.INITIALISE
log.info("done")
elif msg["type"] == "evaluate":
assert_state(state, States.INITIALISE)
await evaluate(config, build_path, ws)
state = States.EVALUATE
elif msg["type"] == "execute":
assert_state(state, States.EVALUATE, States.EXECUTE)
step = msg["step"]
namespace = msg["namespace"]
api_token = msg["api_token"]
await execute(config, build_path, step, namespace, api_token, ws)
state = States.EXECUTE
except PipelineException as e:
log.exception("Exception while running pipeline")
error_message(ws, e.args[0])
except CancelledError:
pass
except (QuartyException, Exception) as e: # pylint: disable=broad-except
log.exception("Something strange happened")
error_message(ws, "Quarty exception: {}".format(e))
finally:
log.info("Closing WebSocket connection")
await ws.close()
return ws
app = aiohttp.web.Application()
app.router.add_get("/", websocket_handler)
aiohttp.web.run_app(app, port=8080)
| 35.495868
| 108
| 0.623283
|
c645eabc6c887771d4c4e7c2ecfb05cf89f7da6a
| 6,113
|
py
|
Python
|
ui_extensions/api_extension/views.py
|
gamethis/cloudbolt-forge
|
f2e240d4a52483c0c1a738c539969ebd10663c68
|
[
"Apache-2.0"
] | null | null | null |
ui_extensions/api_extension/views.py
|
gamethis/cloudbolt-forge
|
f2e240d4a52483c0c1a738c539969ebd10663c68
|
[
"Apache-2.0"
] | null | null | null |
ui_extensions/api_extension/views.py
|
gamethis/cloudbolt-forge
|
f2e240d4a52483c0c1a738c539969ebd10663c68
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
from datetime import datetime
from django.db.models import Q
from django.utils.translation import ugettext as _
from rest_framework.exceptions import PermissionDenied
from rest_framework.response import Response
from rest_framework import status
from rest_framework import viewsets
from api.decorators import link
from api.exceptions import APIException
from rest_framework.decorators import api_view
from api.viewsets import CloudBoltViewSet, ImportExportViewsetMixin, action_return, dict_return
from api.v2.viewsets import ResourceHandlerViewSet, SetEnabledMixin
from resourcehandlers.models import ResourceHandler
from resources.models import Resource
from utilities.logger import ThreadLogger
from resourcehandlers.serializers import ResourceHandlerSerializer
from api.v2.pagination import ResourceHandlerPagination
from extensions.views import admin_extension
from django.shortcuts import render
from utilities.permissions import cbadmin_required
logger = ThreadLogger(__name__)
@admin_extension(title="API Extension")
def apiextensions(request, *args, **kwargs):
return render(request, "api_extension/templates/api.html")
class ResourceHandlerViewSetExtend(ResourceHandlerViewSet, SetEnabledMixin, ImportExportViewsetMixin):
def __init__(self, id, request):
self.id = id
self.request = request
model = ResourceHandler
serializer_class = ResourceHandlerSerializer
pagination_class = ResourceHandlerPagination
#@cbadmin_required
@link(methods=['post'])
def set_template_creds(self, *args, **kwargs):
"""
Endpoint for Setting Credentials on Template with an option sshkey, to
the RH.
Single POST:
{
"template": "{{template_name}}",
"user-name": "{{user_name}}",
"password": "{{password}}",
"ssh-key":"{{ssh_key}}"
}
Array POST:
[{
"template": "{{template_name}}",
"user-name": "{{user_name}}",
"password": "{{password}}",
"ssh-key":"{{ssh_key}}"
},
{
"template": "{{template_name}}",
}]
"""
resp = {}
rh = ResourceHandler.objects.get(id=self.id)
handler = rh.cast()
if not handler.can_import_templates_api:
raise APIException(_('Bad Request: Invalid Resource Handler'),
code=400,
details=_('API endpoint is not currently supported for this resource handler.'))
profile = self.request.get_user_profile()
if not profile.is_cbadmin:
raise PermissionDenied(
_("This action requires 'CB Admin' or 'Super Admin' privileges."))
combined_requests = self._verify_api_template_creds_json(self.request.data)
all_reqs = []
for req_template, req_username, req_password, req_sshkey in combined_requests:
logger.info(f"Attempting to set credentials on {req_template}")
template = handler.os_build_attributes.filter(
template_name=req_template).first().cast()
logger.info(template)
logger.info(dir(template))
if template:
template.password = req_password
template.username = req_username
template.save()
handler.save()
message = f"Template Credentials updated for {template}."
resp['message'] = message
resp['status_code'] = 200
all_reqs.append(resp)
else:
message = f"Template {req_template} Not Found"
resp['message'] = message
resp['status_code'] = 400
all_reqs.append(resp)
overall = {}
overall['message'] = f"Template Credentials updated for {template}."
overall['status_code'] = 200
for resp in all_reqs:
if resp['status_code'] != 200:
overall['status_code'] = resp['status_code']
overall['message'] = resp['message']
return Response(overall, status=overall['status_code'])
def _verify_api_template_creds_json(self, request_data):
"""
Validate incoming POST request data and create pairs of templates and os_builds
to import to a specific resource handler.
"""
logger.info("Confirming Payload for set-template-creds")
if not isinstance(request_data, list):
request_data = [
request_data]
requested_templates, requested_usernames, requested_passwords, requested_sshkeys = [], [], [], []
for template in request_data:
requested_template = template.get('template', None)
requested_username = template.get('user-name', None)
requested_password = template.get('password', None)
requested_sshkey = template.get('ssh-key', None)
if requested_sshkey == '':
requested_sshkey = None
logger.info(requested_sshkey)
if requested_template in requested_templates:
raise APIException(_('Bad Request: Duplicate Names'),
code=400,
details=_("'template' and 'os_build' must be assigned unique values for each entry in POST request"))
else:
requested_templates.append(requested_template)
requested_usernames.append(requested_username)
requested_passwords.append(requested_password)
requested_sshkeys.append(requested_sshkey)
return zip(requested_templates, requested_usernames, requested_passwords, requested_sshkeys)
@api_view(['POST'])
def set_template_creds(request, id, *args, **kwargs):
rh = ResourceHandlerViewSetExtend(id=id, request=request)
resp = rh.set_template_creds()
return resp
#Sample Payloads
#{
# "template": "templatename",
# "user-name": "myuser",
# "password": "mytemplatepassword"
#}
#
| 39.954248
| 121
| 0.640602
|
e14521d271f1a06b4f2f89e2656b097180745512
| 19,736
|
py
|
Python
|
frappe/translate.py
|
vCentre/vB062506-frappe
|
fa095e260b7993ad924ca771d23ba707a782c25b
|
[
"MIT"
] | null | null | null |
frappe/translate.py
|
vCentre/vB062506-frappe
|
fa095e260b7993ad924ca771d23ba707a782c25b
|
[
"MIT"
] | null | null | null |
frappe/translate.py
|
vCentre/vB062506-frappe
|
fa095e260b7993ad924ca771d23ba707a782c25b
|
[
"MIT"
] | 1
|
2018-03-22T02:28:59.000Z
|
2018-03-22T02:28:59.000Z
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""
frappe.translate
~~~~~~~~~~~~~~~~
Translation tools for frappe
"""
import frappe, os, re, codecs, json
from frappe.utils.jinja import render_include
from frappe.utils import strip
from jinja2 import TemplateError
import itertools, operator
def guess_language(lang_list=None):
"""Set `frappe.local.lang` from HTTP headers at beginning of request"""
lang_codes = frappe.request.accept_languages.values()
if not lang_codes:
return frappe.local.lang
guess = None
if not lang_list:
lang_list = get_all_languages() or []
for l in lang_codes:
code = l.strip()
if code in lang_list or code == "en":
guess = code
break
# check if parent language (pt) is setup, if variant (pt-BR)
if "-" in code:
code = code.split("-")[0]
if code in lang_list:
guess = code
break
return guess or frappe.local.lang
def get_user_lang(user=None):
"""Set frappe.local.lang from user preferences on session beginning or resumption"""
if not user:
user = frappe.session.user
# via cache
lang = frappe.cache().hget("lang", user)
if not lang:
# if defined in user profile
user_lang = frappe.db.get_value("User", user, "language")
if user_lang and user_lang!="Loading...":
lang = get_lang_dict().get(user_lang, user_lang) or frappe.local.lang
else:
default_lang = frappe.db.get_default("lang")
lang = default_lang or frappe.local.lang
frappe.cache().hset("lang", user, lang or "en")
return lang
def set_default_language(language):
"""Set Global default language"""
lang = get_lang_dict().get(language, language)
frappe.db.set_default("lang", lang)
frappe.local.lang = lang
def get_all_languages():
"""Returns all language codes ar, ch etc"""
return [a.split()[0] for a in get_lang_info()]
def get_lang_dict():
"""Returns all languages in dict format, full name is the key e.g. `{"english":"en"}`"""
return dict([[a[1], a[0]] for a in [a.split(None, 1) for a in get_lang_info()]])
def get_language_from_code(lang):
return dict(a.split(None, 1) for a in get_lang_info()).get(lang)
def get_lang_info():
"""Returns a listified version of `apps/languages.txt`"""
return frappe.cache().get_value("langinfo",
lambda:frappe.get_file_items(os.path.join(frappe.local.sites_path, "languages.txt")))
def get_dict(fortype, name=None):
"""Returns translation dict for a type of object.
:param fortype: must be one of `doctype`, `page`, `report`, `include`, `jsfile`, `boot`
:param name: name of the document for which assets are to be returned.
"""
fortype = fortype.lower()
cache = frappe.cache()
asset_key = fortype + ":" + (name or "-")
translation_assets = cache.hget("translation_assets", frappe.local.lang) or {}
if not asset_key in translation_assets:
if fortype=="doctype":
messages = get_messages_from_doctype(name)
elif fortype=="page":
messages = get_messages_from_page(name)
elif fortype=="report":
messages = get_messages_from_report(name)
elif fortype=="include":
messages = get_messages_from_include_files()
elif fortype=="jsfile":
messages = get_messages_from_file(name)
elif fortype=="boot":
messages = get_messages_from_include_files()
messages += frappe.db.sql("select 'DocType:', name from tabDocType")
messages += frappe.db.sql("select 'Role:', name from tabRole")
messages += frappe.db.sql("select 'Module:', name from `tabModule Def`")
translation_assets[asset_key] = make_dict_from_messages(messages)
translation_assets[asset_key].update(get_dict_from_hooks(fortype, name))
cache.hset("translation_assets", frappe.local.lang, translation_assets)
return translation_assets[asset_key]
def get_dict_from_hooks(fortype, name):
translated_dict = {}
hooks = frappe.get_hooks("get_translated_dict")
for (hook_fortype, fortype_name) in hooks:
if hook_fortype == fortype and fortype_name == name:
for method in hooks[(hook_fortype, fortype_name)]:
translated_dict.update(frappe.get_attr(method)())
return translated_dict
def add_lang_dict(code):
"""Extracts messages and returns Javascript code snippet to be appened at the end
of the given script
:param code: Javascript code snippet to which translations needs to be appended."""
messages = extract_messages_from_code(code)
messages = [message for pos, message in messages]
code += "\n\n$.extend(frappe._messages, %s)" % json.dumps(make_dict_from_messages(messages))
return code
def make_dict_from_messages(messages, full_dict=None):
"""Returns translated messages as a dict in Language specified in `frappe.local.lang`
:param messages: List of untranslated messages
"""
out = {}
if full_dict==None:
full_dict = get_full_dict(frappe.local.lang)
for m in messages:
if m[1] in full_dict:
out[m[1]] = full_dict[m[1]]
return out
def get_lang_js(fortype, name):
"""Returns code snippet to be appended at the end of a JS script.
:param fortype: Type of object, e.g. `DocType`
:param name: Document name
"""
return "\n\n$.extend(frappe._messages, %s)" % json.dumps(get_dict(fortype, name))
def get_full_dict(lang):
"""Load and return the entire translations dictionary for a language from :meth:`frape.cache`
:param lang: Language Code, e.g. `hi`
"""
if not lang or lang == "en":
return {}
if not frappe.local.lang_full_dict:
frappe.local.lang_full_dict = frappe.cache().hget("lang_full_dict", lang)
if not frappe.local.lang_full_dict:
frappe.local.lang_full_dict = load_lang(lang)
# cache lang
frappe.cache().hset("lang_full_dict", lang, frappe.local.lang_full_dict)
# get user specific transaltion data
user_translations = get_user_translations(lang)
if user_translations:
frappe.local.lang_full_dict.update(user_translations)
return frappe.local.lang_full_dict
def load_lang(lang, apps=None):
"""Combine all translations from `.csv` files in all `apps`"""
out = {}
for app in (apps or frappe.get_all_apps(True)):
path = os.path.join(frappe.get_pymodule_path(app), "translations", lang + ".csv")
out.update(get_translation_dict_from_file(path, lang, app))
return out
def get_translation_dict_from_file(path, lang, app):
"""load translation dict from given path"""
cleaned = {}
if os.path.exists(path):
csv_content = read_csv_file(path)
for item in csv_content:
if len(item)==3:
# with file and line numbers
cleaned[item[1]] = strip(item[2])
elif len(item)==2:
cleaned[item[0]] = strip(item[1])
else:
raise Exception("Bad translation in '{app}' for language '{lang}': {values}".format(
app=app, lang=lang, values=repr(item).encode("utf-8")
))
return cleaned
def get_user_translations(lang):
out = frappe.cache().hget('lang_user_translations', lang)
if not out:
out = {}
for fields in frappe.get_all('Translation',
fields= ["source_name", "target_name"],filters={'language_code': lang}):
out.update({fields.source_name: fields.target_name})
frappe.cache().hset('lang_user_translations', lang, out)
return out
# def get_user_translation_key():
# return 'lang_user_translations:{0}'.format(frappe.local.site)
def clear_cache():
"""Clear all translation assets from :meth:`frappe.cache`"""
cache = frappe.cache()
cache.delete_key("langinfo")
cache.delete_key("lang_full_dict")
cache.delete_key("translation_assets")
def get_messages_for_app(app):
"""Returns all messages (list) for a specified `app`"""
messages = []
modules = ", ".join(['"{}"'.format(m.title().replace("_", " ")) \
for m in frappe.local.app_modules[app]])
# doctypes
if modules:
for name in frappe.db.sql_list("""select name from tabDocType
where module in ({})""".format(modules)):
messages.extend(get_messages_from_doctype(name))
# pages
for name, title in frappe.db.sql("""select name, title from tabPage
where module in ({})""".format(modules)):
messages.append((None, title or name))
messages.extend(get_messages_from_page(name))
# reports
for name in frappe.db.sql_list("""select tabReport.name from tabDocType, tabReport
where tabReport.ref_doctype = tabDocType.name
and tabDocType.module in ({})""".format(modules)):
messages.append((None, name))
messages.extend(get_messages_from_report(name))
for i in messages:
if not isinstance(i, tuple):
raise Exception
# app_include_files
messages.extend(get_all_messages_from_js_files(app))
# server_messages
messages.extend(get_server_messages(app))
return deduplicate_messages(messages)
def get_messages_from_doctype(name):
"""Extract all translatable messages for a doctype. Includes labels, Python code,
Javascript code, html templates"""
messages = []
meta = frappe.get_meta(name)
messages = [meta.name, meta.module]
if meta.description:
messages.append(meta.description)
# translations of field labels, description and options
for d in meta.get("fields"):
messages.extend([d.label, d.description])
if d.fieldtype=='Select' and d.options:
options = d.options.split('\n')
if not "icon" in options[0]:
messages.extend(options)
# translations of roles
for d in meta.get("permissions"):
if d.role:
messages.append(d.role)
messages = [message for message in messages if message]
messages = [('DocType: ' + name, message) for message in messages if is_translatable(message)]
# extract from js, py files
doctype_file_path = frappe.get_module_path(meta.module, "doctype", meta.name, meta.name)
messages.extend(get_messages_from_file(doctype_file_path + ".js"))
messages.extend(get_messages_from_file(doctype_file_path + "_list.js"))
messages.extend(get_messages_from_file(doctype_file_path + "_list.html"))
messages.extend(get_messages_from_file(doctype_file_path + "_calendar.js"))
return messages
def get_messages_from_page(name):
"""Returns all translatable strings from a :class:`frappe.core.doctype.Page`"""
return _get_messages_from_page_or_report("Page", name)
def get_messages_from_report(name):
"""Returns all translatable strings from a :class:`frappe.core.doctype.Report`"""
report = frappe.get_doc("Report", name)
messages = _get_messages_from_page_or_report("Report", name,
frappe.db.get_value("DocType", report.ref_doctype, "module"))
# TODO position here!
if report.query:
messages.extend([(None, message) for message in re.findall('"([^:,^"]*):', report.query) if is_translatable(message)])
messages.append((None,report.report_name))
return messages
def _get_messages_from_page_or_report(doctype, name, module=None):
if not module:
module = frappe.db.get_value(doctype, name, "module")
doc_path = frappe.get_module_path(module, doctype, name)
messages = get_messages_from_file(os.path.join(doc_path, frappe.scrub(name) +".py"))
if os.path.exists(doc_path):
for filename in os.listdir(doc_path):
if filename.endswith(".js") or filename.endswith(".html"):
messages += get_messages_from_file(os.path.join(doc_path, filename))
return messages
def get_server_messages(app):
"""Extracts all translatable strings (tagged with :func:`frappe._`) from Python modules inside an app"""
messages = []
for basepath, folders, files in os.walk(frappe.get_pymodule_path(app)):
for dontwalk in (".git", "public", "locale"):
if dontwalk in folders: folders.remove(dontwalk)
for f in files:
if f.endswith(".py") or f.endswith(".html") or f.endswith(".js"):
messages.extend(get_messages_from_file(os.path.join(basepath, f)))
return messages
def get_messages_from_include_files(app_name=None):
"""Returns messages from js files included at time of boot like desk.min.js for desk and web"""
messages = []
for file in (frappe.get_hooks("app_include_js", app_name=app_name) or []) + (frappe.get_hooks("web_include_js", app_name=app_name) or []):
messages.extend(get_messages_from_file(os.path.join(frappe.local.sites_path, file)))
return messages
def get_all_messages_from_js_files(app_name=None):
"""Extracts all translatable strings from app `.js` files"""
messages = []
for app in ([app_name] if app_name else frappe.get_installed_apps()):
if os.path.exists(frappe.get_app_path(app, "public")):
for basepath, folders, files in os.walk(frappe.get_app_path(app, "public")):
if "frappe/public/js/lib" in basepath:
continue
for fname in files:
if fname.endswith(".js") or fname.endswith(".html"):
messages.extend(get_messages_from_file(os.path.join(basepath, fname)))
return messages
def get_messages_from_file(path):
"""Returns a list of transatable strings from a code file
:param path: path of the code file
"""
apps_path = get_bench_dir()
if os.path.exists(path):
with open(path, 'r') as sourcefile:
return [(os.path.relpath(" +".join([path, str(pos)]), apps_path),
message) for pos, message in extract_messages_from_code(sourcefile.read(), path.endswith(".py"))]
else:
# print "Translate: {0} missing".format(os.path.abspath(path))
return []
def extract_messages_from_code(code, is_py=False):
"""Extracts translatable srings from a code file
:param code: code from which translatable files are to be extracted
:param is_py: include messages in triple quotes e.g. `_('''message''')`"""
try:
code = render_include(code)
except TemplateError:
# Exception will occur when it encounters John Resig's microtemplating code
pass
messages = []
messages += [(m.start(), m.groups()[0]) for m in re.compile('_\("([^"]*)"').finditer(code)]
messages += [(m.start(), m.groups()[0]) for m in re.compile("_\('([^']*)'").finditer(code)]
if is_py:
messages += [(m.start(), m.groups()[0]) for m in re.compile('_\("{3}([^"]*)"{3}.*\)').finditer(code)]
messages = [(pos, message) for pos, message in messages if is_translatable(message)]
return pos_to_line_no(messages, code)
def is_translatable(m):
if re.search("[a-z]", m) and not m.startswith("icon-") and not m.endswith("px") and not m.startswith("eval:"):
return True
return False
def pos_to_line_no(messages, code):
ret = []
messages = sorted(messages, key=lambda x: x[0])
newlines = [m.start() for m in re.compile('\\n').finditer(code)]
line = 1
newline_i = 0
for pos, message in messages:
while newline_i < len(newlines) and pos > newlines[newline_i]:
line+=1
newline_i+= 1
ret.append((line, message))
return ret
def read_csv_file(path):
"""Read CSV file and return as list of list
:param path: File path"""
from csv import reader
with codecs.open(path, 'r', 'utf-8') as msgfile:
data = msgfile.read()
# for japanese! #wtf
data = data.replace(chr(28), "").replace(chr(29), "")
data = reader([r.encode('utf-8') for r in data.splitlines()])
newdata = [[unicode(val, 'utf-8') for val in row] for row in data]
return newdata
def write_csv_file(path, app_messages, lang_dict):
"""Write translation CSV file.
:param path: File path, usually `[app]/translations`.
:param app_messages: Translatable strings for this app.
:param lang_dict: Full translated dict.
"""
app_messages.sort(lambda x,y: cmp(x[1], y[1]))
from csv import writer
with open(path, 'wb') as msgfile:
w = writer(msgfile, lineterminator='\n')
for p, m in app_messages:
t = lang_dict.get(m, '')
# strip whitespaces
t = re.sub('{\s?([0-9]+)\s?}', "{\g<1>}", t)
w.writerow([p.encode('utf-8') if p else '', m.encode('utf-8'), t.encode('utf-8')])
def get_untranslated(lang, untranslated_file, get_all=False):
"""Returns all untranslated strings for a language and writes in a file
:param lang: Language code.
:param untranslated_file: Output file path.
:param get_all: Return all strings, translated or not."""
clear_cache()
apps = frappe.get_all_apps(True)
messages = []
untranslated = []
for app in apps:
messages.extend(get_messages_for_app(app))
messages = deduplicate_messages(messages)
def escape_newlines(s):
return (s.replace("\\\n", "|||||")
.replace("\\n", "||||")
.replace("\n", "|||"))
if get_all:
print str(len(messages)) + " messages"
with open(untranslated_file, "w") as f:
for m in messages:
# replace \n with ||| so that internal linebreaks don't get split
f.write((escape_newlines(m[1]) + os.linesep).encode("utf-8"))
else:
full_dict = get_full_dict(lang)
for m in messages:
if not full_dict.get(m[1]):
untranslated.append(m[1])
if untranslated:
print str(len(untranslated)) + " missing translations of " + str(len(messages))
with open(untranslated_file, "w") as f:
for m in untranslated:
# replace \n with ||| so that internal linebreaks don't get split
f.write((escape_newlines(m) + os.linesep).encode("utf-8"))
else:
print "all translated!"
def update_translations(lang, untranslated_file, translated_file):
"""Update translations from a source and target file for a given language.
:param lang: Language code (e.g. `en`).
:param untranslated_file: File path with the messages in English.
:param translated_file: File path with messages in language to be updated."""
clear_cache()
full_dict = get_full_dict(lang)
def restore_newlines(s):
return (s.replace("|||||", "\\\n")
.replace("| | | | |", "\\\n")
.replace("||||", "\\n")
.replace("| | | |", "\\n")
.replace("|||", "\n")
.replace("| | |", "\n"))
translation_dict = {}
for key, value in zip(frappe.get_file_items(untranslated_file, ignore_empty_lines=False),
frappe.get_file_items(translated_file, ignore_empty_lines=False)):
# undo hack in get_untranslated
translation_dict[restore_newlines(key)] = restore_newlines(value)
full_dict.update(translation_dict)
for app in frappe.get_all_apps(True):
write_translations_file(app, lang, full_dict)
def import_translations(lang, path):
"""Import translations from file in standard format"""
clear_cache()
full_dict = get_full_dict(lang)
full_dict.update(get_translation_dict_from_file(path, lang, 'import'))
for app in frappe.get_all_apps(True):
write_translations_file(app, lang, full_dict)
def rebuild_all_translation_files():
"""Rebuild all translation files: `[app]/translations/[lang].csv`."""
for lang in get_all_languages():
for app in frappe.get_all_apps():
write_translations_file(app, lang)
def write_translations_file(app, lang, full_dict=None, app_messages=None):
"""Write a translation file for a given language.
:param app: `app` for which translations are to be written.
:param lang: Language code.
:param full_dict: Full translated language dict (optional).
:param app_messages: Source strings (optional).
"""
if not app_messages:
app_messages = get_messages_for_app(app)
if not app_messages:
return
tpath = frappe.get_pymodule_path(app, "translations")
frappe.create_folder(tpath)
write_csv_file(os.path.join(tpath, lang + ".csv"),
app_messages, full_dict or get_full_dict(lang))
def send_translations(translation_dict):
"""Append translated dict in `frappe.local.response`"""
if "__messages" not in frappe.local.response:
frappe.local.response["__messages"] = {}
frappe.local.response["__messages"].update(translation_dict)
def deduplicate_messages(messages):
ret = []
op = operator.itemgetter(1)
messages = sorted(messages, key=op)
for k, g in itertools.groupby(messages, op):
ret.append(g.next())
return ret
def get_bench_dir():
return os.path.join(frappe.__file__, '..', '..', '..', '..')
def rename_language(old_name, new_name):
language_in_system_settings = frappe.db.get_single_value("System Settings", "language")
if language_in_system_settings == old_name:
frappe.db.set_value("System Settings", "System Settings", "language", new_name)
frappe.db.sql("""update `tabUser` set language=%(new_name)s where language=%(old_name)s""",
{ "old_name": old_name, "new_name": new_name })
| 33.337838
| 139
| 0.717825
|
d0ca2f215e9a7b41c18af1455ca67199fcb8bedf
| 1,777
|
py
|
Python
|
seleniumwire/thirdparty/mitmproxy/net/http/user_agents.py
|
KozminMoci/selenium-wire
|
063c44ab42ac5e53e28c8a8c49c9ae7036bd878b
|
[
"MIT"
] | 24,939
|
2015-01-01T17:13:21.000Z
|
2022-03-31T17:50:04.000Z
|
seleniumwire/thirdparty/mitmproxy/net/http/user_agents.py
|
KozminMoci/selenium-wire
|
063c44ab42ac5e53e28c8a8c49c9ae7036bd878b
|
[
"MIT"
] | 3,655
|
2015-01-02T12:31:43.000Z
|
2022-03-31T20:24:57.000Z
|
seleniumwire/thirdparty/mitmproxy/net/http/user_agents.py
|
KozminMoci/selenium-wire
|
063c44ab42ac5e53e28c8a8c49c9ae7036bd878b
|
[
"MIT"
] | 3,712
|
2015-01-06T06:47:06.000Z
|
2022-03-31T10:33:27.000Z
|
"""
A small collection of useful user-agent header strings. These should be
kept reasonably current to reflect common usage.
"""
# pylint: line-too-long
# A collection of (name, shortcut, string) tuples.
UASTRINGS = [
("android",
"a",
"Mozilla/5.0 (Linux; U; Android 4.1.1; en-gb; Nexus 7 Build/JRO03D) AFL/01.04.02"), # noqa
("blackberry",
"l",
"Mozilla/5.0 (BlackBerry; U; BlackBerry 9900; en) AppleWebKit/534.11+ (KHTML, like Gecko) Version/7.1.0.346 Mobile Safari/534.11+"), # noqa
("bingbot",
"b",
"Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)"), # noqa
("chrome",
"c",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1"), # noqa
("firefox",
"f",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:14.0) Gecko/20120405 Firefox/14.0a1"), # noqa
("googlebot",
"g",
"Googlebot/2.1 (+http://www.googlebot.com/bot.html)"), # noqa
("ie9",
"i",
"Mozilla/5.0 (Windows; U; MSIE 9.0; WIndows NT 9.0; en-US)"), # noqa
("ipad",
"p",
"Mozilla/5.0 (iPad; CPU OS 5_1 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9B176 Safari/7534.48.3"), # noqa
("iphone",
"h",
"Mozilla/5.0 (iPhone; CPU iPhone OS 4_2_1 like Mac OS X) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148a Safari/6533.18.5"), # noqa
("safari",
"s",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/534.55.3 (KHTML, like Gecko) Version/5.1.3 Safari/534.53.10"), # noqa
]
def get_by_shortcut(s):
"""
Retrieve a user agent entry by shortcut.
"""
for i in UASTRINGS:
if s == i[1]:
return i
| 34.843137
| 158
| 0.597074
|
32c77b8124e1fdd117e3a8f678cc84f4a39cddf2
| 4,251
|
py
|
Python
|
dace/libraries/mpi/nodes/wait.py
|
jnice-81/dace
|
5211794a2d17b7189037ac485ab0b292fb02aa0d
|
[
"BSD-3-Clause"
] | 227
|
2019-03-15T23:39:06.000Z
|
2022-03-30T07:49:08.000Z
|
dace/libraries/mpi/nodes/wait.py
|
jnice-81/dace
|
5211794a2d17b7189037ac485ab0b292fb02aa0d
|
[
"BSD-3-Clause"
] | 834
|
2019-07-31T22:49:31.000Z
|
2022-03-28T14:01:32.000Z
|
dace/libraries/mpi/nodes/wait.py
|
jnice-81/dace
|
5211794a2d17b7189037ac485ab0b292fb02aa0d
|
[
"BSD-3-Clause"
] | 64
|
2019-03-19T05:40:37.000Z
|
2022-03-11T15:02:42.000Z
|
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import dace.library
import dace.properties
import dace.sdfg.nodes
from dace.transformation.transformation import ExpandTransformation
from .. import environments
from dace import dtypes
@dace.library.expansion
class ExpandWaitMPI(ExpandTransformation):
environments = [environments.mpi.MPI]
@staticmethod
def expansion(node, parent_state, parent_sdfg, n=None, **kwargs):
req, status = node.validate(parent_sdfg, parent_state)
code = f"""
MPI_Status _s;
MPI_Wait(_request, &_s);
_stat_tag = _s.MPI_TAG;
_stat_source = _s.MPI_SOURCE;
"""
tasklet = dace.sdfg.nodes.Tasklet(node.name,
node.in_connectors,
node.out_connectors,
code,
language=dace.dtypes.Language.CPP)
conn = tasklet.in_connectors
conn = {
c: (dtypes.pointer(dtypes.opaque("MPI_Request"))
if c == '_request' else t)
for c, t in conn.items()
}
tasklet.in_connectors = conn
return tasklet
@dace.library.node
class Wait(dace.sdfg.nodes.LibraryNode):
# Global properties
implementations = {
"MPI": ExpandWaitMPI,
}
default_implementation = "MPI"
# Object fields
n = dace.properties.SymbolicProperty(allow_none=True, default=None)
def __init__(self, name, *args, **kwargs):
super().__init__(name,
*args,
inputs={"_request"},
outputs={"_stat_tag", "_stat_source"},
**kwargs)
def validate(self, sdfg, state):
"""
:return: req, status
"""
req, status = None, None
for e in state.in_edges(self):
if e.dst_conn == "_request":
req = sdfg.arrays[e.data.data]
for e in state.out_edges(self):
if e.src_conn == "_status":
status = sdfg.arrays[e.data.data]
return req, status
@dace.library.expansion
class ExpandWaitallPure(ExpandTransformation):
"""
Naive backend-agnostic expansion of Waitall.
"""
environments = []
@staticmethod
def expansion(node, parent_state, parent_sdfg, n=None, **kwargs):
raise (NotImplementedError)
@dace.library.expansion
class ExpandWaitallMPI(ExpandTransformation):
environments = [environments.mpi.MPI]
@staticmethod
def expansion(node, parent_state, parent_sdfg, n=None, **kwargs):
count = node.validate(parent_sdfg, parent_state)
code = f"""
MPI_Status _s[{count}];
MPI_Waitall({count}, _request, _s);
"""
tasklet = dace.sdfg.nodes.Tasklet(node.name,
node.in_connectors,
node.out_connectors,
code,
language=dace.dtypes.Language.CPP)
conn = tasklet.in_connectors
conn = {
c: (dtypes.pointer(dtypes.opaque("MPI_Request"))
if c == '_request' else t)
for c, t in conn.items()
}
tasklet.in_connectors = conn
return tasklet
@dace.library.node
class Waitall(dace.sdfg.nodes.LibraryNode):
# Global properties
implementations = {
"MPI": ExpandWaitallMPI,
}
default_implementation = "MPI"
# Object fields
n = dace.properties.SymbolicProperty(allow_none=True, default=None)
def __init__(self, name, *args, **kwargs):
super().__init__(name, *args, inputs={"_request"}, outputs={}, **kwargs)
def validate(self, sdfg, state):
"""
:return: req, status
"""
count = None
for e in state.in_edges(self):
if e.dst_conn == "_request":
count = e.data.subset.num_elements()
if not count:
raise ValueError(
"At least 1 request object must be passed to Waitall")
return count
| 29.317241
| 80
| 0.554928
|
316c8109fa8ed4190b84c8d097dc33afbbe6f22f
| 9,230
|
py
|
Python
|
python/model/vae.py
|
VAlex22/ND_VAE
|
38fecb46e51bbbe7a365e9a70eaa8dad629c7ef5
|
[
"BSD-3-Clause"
] | 7
|
2018-07-16T03:52:38.000Z
|
2021-09-06T09:32:14.000Z
|
python/model/vae.py
|
VAlex22/ND_VAE
|
38fecb46e51bbbe7a365e9a70eaa8dad629c7ef5
|
[
"BSD-3-Clause"
] | null | null | null |
python/model/vae.py
|
VAlex22/ND_VAE
|
38fecb46e51bbbe7a365e9a70eaa8dad629c7ef5
|
[
"BSD-3-Clause"
] | 2
|
2020-06-03T12:56:15.000Z
|
2021-03-09T18:17:21.000Z
|
# ############### Variational Autoencoder ####################
# This is an adapted implementation of vae for novelty detection from
# https://github.com/Lasagne/Recipes/blob/master/examples/variational_autoencoder/variational_autoencoder.py
import time
import lasagne as nn
import numpy as np
import theano
import theano.tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from util import get_model_params, get_training_params, model_path
from python.model import TrainData
# ##################### Custom layer for middle of VAE ######################
# This layer takes the mu and sigma (both DenseLayers) and combines them with
# a random vector epsilon to sample values for a multivariate Gaussian
class GaussianSampleLayer(nn.layers.MergeLayer):
def __init__(self, mu, logsigma, rng=None, **kwargs):
self.rng = rng if rng else RandomStreams(nn.random.get_rng().randint(1, 2147462579))
super(GaussianSampleLayer, self).__init__([mu, logsigma], **kwargs)
def get_output_shape_for(self, input_shapes):
return input_shapes[0]
def get_output_for(self, inputs, deterministic=False, **kwargs):
mu, logsigma = inputs
shape = (self.input_shapes[0][0] or inputs[0].shape[0],
self.input_shapes[0][1] or inputs[0].shape[1])
if deterministic:
return mu
return mu + T.exp(logsigma) * self.rng.normal(shape)
# ############################## Build Model #################################
# encoder has #depth hidden layer, where we get mu and sigma for Z given an inp X
# continuous decoder has #depth hidden layer, where we get reconstruction for X given Z
def build_vae(inputvar, n_channels, depth, z_dim, n_hid_first, L=5):
"""
:param inputvar:
:param n_channels: number of channels in the input vector
:param depth: depth of the encoder and decoder of the VAE
:param z_dim: dimensionality of the latent space
:param n_hid_first: number of neurons in the first hidden layer of the encoder.
For each respective layer of the encoder number of layers is twice less than the
number of layers in the previous layer. Decoder is symmetric to the encoder.
:param L: number of samples from latent space to compute output values
:return:
"""
# encoder
l = nn.layers.InputLayer(shape=(None, n_channels),
input_var=inputvar, name='input')
# encoder hidden layers
for i in range(depth):
num_units = int(n_hid_first / (2 ** i))
l = nn.layers.DenseLayer(l, num_units=num_units,
nonlinearity=nn.nonlinearities.rectify, name='enc_hid' + str(i))
l_enc_mean = nn.layers.DenseLayer(l, num_units=z_dim,
nonlinearity=None, name='enc_mu')
l_enc_logsigma = nn.layers.DenseLayer(l, num_units=z_dim,
nonlinearity=None, name='enc_logsigma')
# decoder
l_dec = {}
l_dec_mean_list = []
l_dec_logsigma_list = []
l_x_list = []
# tie the weights of all L versions so they are the "same" layer
W_dec_hid = [None] * depth
b_dec_hid = [None] * depth
W_dec_mean = None
b_dec_mean = None
W_dec_ls = None
b_dec_ls = None
for i in range(L):
l_dec[0] = GaussianSampleLayer(l_enc_mean, l_enc_logsigma, name='Z')
for j in range(depth):
num_units = int(n_hid_first / (2 ** (depth - i - 1)))
l_dec[j+1] = nn.layers.DenseLayer(l_dec[j], num_units=num_units,
nonlinearity=nn.nonlinearities.rectify,
W=nn.init.GlorotUniform() if W_dec_hid[j] is None else W_dec_hid[j],
b=nn.init.Constant(0.) if b_dec_hid[j] is None else b_dec_hid[j],
name='dec_hid' + str(j))
l_dec_mu = nn.layers.DenseLayer(l_dec[depth], num_units=n_channels,
nonlinearity=None,
W=nn.init.GlorotUniform() if W_dec_mean is None else W_dec_mean,
b=nn.init.Constant(0) if b_dec_mean is None else b_dec_mean,
name='dec_mu')
# relu_shift is for numerical stability - if training data has any
# dimensions where stdev=0, allowing logsigma to approach -inf
# will cause the loss function to become NAN. So we set the limit
# stdev >= exp(-1 * relu_shift)
relu_shift = 10
l_dec_logsigma = nn.layers.DenseLayer(l_dec[depth], num_units=n_channels,
nonlinearity=lambda a: T.nnet.relu(a+relu_shift)-relu_shift,
W=nn.init.GlorotUniform() if W_dec_ls is None else W_dec_ls,
b=nn.init.Constant(0) if b_dec_ls is None else b_dec_ls,
name='dec_logsigma')
l_x = GaussianSampleLayer(l_dec_mu, l_dec_logsigma,
name='dec_output')
l_dec_mean_list.append(l_dec_mu)
l_dec_logsigma_list.append(l_dec_logsigma)
l_x_list.append(l_x)
if W_dec_mean is None:
for j in range(depth):
W_dec_hid[j] = l_dec[j+1].W
b_dec_hid[j] = l_dec[j+1].b
W_dec_mean = l_dec_mu.W
b_dec_mean = l_dec_mu.b
W_dec_ls = l_dec_logsigma.W
b_dec_ls = l_dec_logsigma.b
l_x = nn.layers.ElemwiseSumLayer(l_x_list, coeffs=1. / L, name='x')
return l_enc_mean, l_enc_logsigma, l_dec_mean_list, l_dec_logsigma_list, l_x_list, l_x
def log_likelihood(tgt, mu, ls):
return T.sum(-(np.float32(0.5 * np.log(2 * np.pi)) + ls)
- 0.5 * T.sqr(tgt - mu) / T.exp(2 * ls))
def train_network(model):
n_channels, depth, z_dim, n_hid_first, lam, L = get_model_params(model)
batch_size, num_epochs, learning_rate = get_training_params(model)
data = TrainData(batch_size)
input_var = T.matrix('inputs')
# Create VAE model
l_z_mean, l_z_logsigma, l_x_mean_list, l_x_logsigma_list, l_x_list, l_x = \
build_vae(input_var, n_channels=n_channels, depth=depth, z_dim=z_dim,
n_hid_first=n_hid_first, L=L)
def build_loss(deterministic):
layer_outputs = nn.layers.get_output([l_z_mean, l_z_logsigma] + l_x_mean_list
+ l_x_logsigma_list,
deterministic=deterministic)
z_mean = layer_outputs[0]
z_ls = layer_outputs[1]
x_mean = layer_outputs[2: 2 + L]
x_logsigma = layer_outputs[2 + L : 2 + 2 * L]
# Loss function: - log p(x|z) + KL_div
kl_div = lam * 0.5 * T.sum(T.exp(2 * z_ls) + T.sqr(z_mean) - 1 - 2 * z_ls)
logpxz = sum(log_likelihood(input_var.flatten(2), mu, ls)
for mu, ls in zip(x_mean, x_logsigma)) / L
prediction = x_mean[0] if deterministic else T.sum(x_mean, axis=0) / L
loss = -logpxz + kl_div
return loss, prediction
loss, _ = build_loss(deterministic=False)
test_loss, test_prediction = build_loss(deterministic=True)
# ADAM updates
params = nn.layers.get_all_params(l_x, trainable=True)
updates = nn.updates.adam(loss, params, learning_rate=learning_rate)
train_fn = theano.function([input_var], loss, updates=updates)
val_fn = theano.function([input_var], test_loss)
previous_val_err_1 = float('inf')
previous_val_err_2 = float('inf')
for epoch in range(num_epochs):
train_err = 0.0
epoch_size = 0
start_time = time.time()
for i in range(data.train_size):
batch = data.next_batch()
this_err = train_fn(batch)
train_err += this_err
epoch_size += batch.shape[0]
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, num_epochs, time.time() - start_time))
print("training loss: {:.6f}".format(train_err / epoch_size))
val_err = 0.0
val_size = 0
test_data = data.validation_data()
for i in range(data.validation_size):
err = val_fn(test_data[i])
val_err += err
val_size += test_data[i].shape[0]
print("validation loss: {:.6f}".format(val_err / val_size))
# early stopping
if val_err > previous_val_err_1 and val_err > previous_val_err_2:
break
else:
previous_val_err_2 = previous_val_err_1
previous_val_err_1 = val_err
# save the parameters so they can be loaded for next time
np.savez(model_path(model) + str(epoch), *nn.layers.get_all_param_values(l_x))
# output samples
samples = data.validation_samples()
pred_fn = theano.function([input_var], test_prediction)
X_pred = pred_fn(samples)
for i in range(len(samples)):
print(samples[i] - X_pred[i])
if __name__ == '__main__':
train_network(1)
| 42.534562
| 113
| 0.59805
|
1682dde2d515a11c601f955324225a74e69edced
| 317
|
py
|
Python
|
individual_1.py
|
Alexander-fix/lab7-python3
|
ea448d7d04d4bc63c94bf89e684933663a3d6e3a
|
[
"MIT"
] | null | null | null |
individual_1.py
|
Alexander-fix/lab7-python3
|
ea448d7d04d4bc63c94bf89e684933663a3d6e3a
|
[
"MIT"
] | null | null | null |
individual_1.py
|
Alexander-fix/lab7-python3
|
ea448d7d04d4bc63c94bf89e684933663a3d6e3a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
if __name__ == '__main__':
with open('text.txt', 'r') as f:
text = f.read()
# Разбить текст на слова.
words = text.split(" ")
for i in range(1, len(words), 2):
words[i], words[i - 1] = words[i - 1], words[i]
print(' '.join(words))
| 22.642857
| 55
| 0.523659
|
149001c7af5e0b37eeff1cd4f70143b9dffd9397
| 5,437
|
py
|
Python
|
meiduo_mall/meiduo_mall/apps/orders/serializers.py
|
dienoe/django_demo
|
bd8201fcc663533123efba9b1b4eee823a288bab
|
[
"MIT"
] | null | null | null |
meiduo_mall/meiduo_mall/apps/orders/serializers.py
|
dienoe/django_demo
|
bd8201fcc663533123efba9b1b4eee823a288bab
|
[
"MIT"
] | null | null | null |
meiduo_mall/meiduo_mall/apps/orders/serializers.py
|
dienoe/django_demo
|
bd8201fcc663533123efba9b1b4eee823a288bab
|
[
"MIT"
] | null | null | null |
from django.db import transaction
from django_redis import get_redis_connection
from rest_framework import serializers
from goods.models import SKU
from orders.models import OrderInfo, OrderGoods
from django.utils import timezone
from decimal import Decimal
import logging
logger=logging.getLogger('django')
class CartSKUSerializer(serializers.ModelSerializer):
"""
购物车商品数据序列化器
"""
count = serializers.IntegerField(label='数量')
class Meta:
model = SKU
fields = ('id', 'name', 'default_image_url', 'price', 'count')
class OrderSettlementSerializer(serializers.Serializer):
"""
订单结算数据序列化器
"""
freight = serializers.DecimalField(label='运费', max_digits=10, decimal_places=2)
skus = CartSKUSerializer(many=True)
class SaveOrderSerializer(serializers.ModelSerializer):
"""
保存订单序列化器
"""
class Meta:
model = OrderInfo
fields = ('order_id', 'address', 'pay_method')
read_only_fields = ('order_id',)
extra_kwargs = {
'address': {
'write_only': True
},
'pay_method': {
'required': True
}
}
# 判断商品的库存
def create(self, validated_data):
"""
保存订单
:param validated_data:
:return:
"""
# 获取用户对象 suer
user=self.context['request'].user
# 生成订单编号 order_id
# 20180702150101 9位用户id
order_id=timezone.now().strftime('%Y%m%d%H%M%S')+('%09d'%user.id)
address=validated_data['address']
pay_method=validated_data['pay_method']
# 查询购物车 redis sku_id count selected
redis_conn = get_redis_connection('cart')
# hash 商品数量
redis_cart_dict = redis_conn.hgetall('cart_%s' % user.id)
# 勾选商品
redis_cart_selected = redis_conn.smembers('cart_selected_%s' % user.id)
cart = {}
# cart={
# 勾选商品信息
# sku_id:count
# }
for sku_id in redis_cart_selected:
cart[int(sku_id)] = int(redis_cart_dict[sku_id])
if not cart:
raise serializers.ValidationError('没有需要结算的商品')
# 创建事务 开启一个事务
with transaction.atomic():
# 创建保存点
save_id=transaction.savepoint()
try:
# 保存订单
# datetime-->str strftime
# str --> datetime strptime
# 创建订单基本信息表记录 OrderInfo
order=OrderInfo.objects.create(
order_id=order_id,
user=user,
address=address,
total_count=0,
total_amount=Decimal('0'),
freight=Decimal('10.00'),
pay_method=pay_method,
status=OrderInfo.ORDER_STATUS_ENUM['UNSEND'] if pay_method == OrderInfo.PAY_METHODS_ENUM['CASH'] else
OrderInfo.ORDER_STATUS_ENUM['UNPAID']
)
# 查询商品数据库 获取商品数据(库存)
sku_id_list = cart.keys()
# sku_obj_list = SKU.objects.filter(id__in=sku_id_list)
# 遍历需要结算的商品数据
for sku_id in sku_id_list:
while True:
# 查询商品最新的库存信息
sku=SKU.objects.get(id=sku_id)
# 用户需要购买的数量
sku_count=cart[sku.id]
origin_stock=sku.stock
origin_sales=sku.sales
# 判断库存
if sku.stock<sku_count:
# 回滚到保存点
transaction.savepoint_rollback(save_id)
raise serializers.ValidationError('商品%s库存不足'%sku.name)
# 库存减少 销量增加
# sku.stock-=sku_count
# sku.sales+=sku_count
# sku.save()
new_stock=origin_stock-sku_count
new_sales=origin_stock+sku_count
# update返回受影响的行数
result=SKU.objects.filter(id=sku.id,stock=origin_stock).update(stock=new_stock,sales=new_sales)
if result==0:
# 表示更新失败,有人抢了商品
# 结束本次while循环进行下一次while循环
continue
order.total_count+=sku_count
order.total_amount+=(sku.price*sku_count)
# 创建订单商品信息表记录 OrderGoods
OrderGoods.objects.create(
order=order,
sku=sku,
count=sku_count,
price=sku.price,
)
# 跳出while 循环 进行for循环
break
order.save()
except serializers.ValidationError:
raise
except Exception as e:
logging.error(e)
transaction.savepoint_rollback(save_id)
else:
transaction.savepoint_commit(save_id)
# 删除购物车中已结算的商品
pl=redis_conn.pipeline()
# hash
pl.hdel('cart_%s'%user.id,*redis_cart_selected)
# set
pl.srem('cart_selected_%s'%user.id,*redis_cart_selected)
pl.execute()
# 返回OrderInfo对象
return order
| 33.561728
| 121
| 0.506529
|
742f9020fa3fe1c925479cbc30e19635e5098b17
| 2,689
|
py
|
Python
|
bindings/python/ensmallen/datasets/kgobo/mco.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 5
|
2021-02-17T00:44:45.000Z
|
2021-08-09T16:41:47.000Z
|
bindings/python/ensmallen/datasets/kgobo/mco.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 18
|
2021-01-07T16:47:39.000Z
|
2021-08-12T21:51:32.000Z
|
bindings/python/ensmallen/datasets/kgobo/mco.py
|
AnacletoLAB/ensmallen
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 3
|
2021-01-14T02:20:59.000Z
|
2021-08-04T19:09:52.000Z
|
"""
This file offers the methods to automatically retrieve the graph MCO.
The graph is automatically retrieved from the KGOBO repository.
References
---------------------
Please cite the following if you use the data:
```bib
@misc{kgobo,
title = "KG-OBO",
year = "2021",
author = "{Reese, Justin and Caufield, Harry}",
howpublished = {\\url{https://github.com/Knowledge-Graph-Hub/kg-obo}},
note = {Online; accessed 14 September 2021}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def MCO(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/kgobo",
version: str = "2019-05-15",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the MCO graph.
The graph is automatically retrieved from the KGOBO repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "2019-05-15"
The version of the graph to retrieve.
The available versions are:
- 2019-05-15
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of MCO graph.
References
---------------------
Please cite the following if you use the data:
```bib
@misc{kgobo,
title = "KG-OBO",
year = "2021",
author = "{Reese, Justin and Caufield, Harry}",
howpublished = {\\url{https://github.com/Knowledge-Graph-Hub/kg-obo}},
note = {Online; accessed 14 September 2021}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="MCO",
repository="kgobo",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 27.721649
| 73
| 0.628115
|
afb2ef78e71d424e0e021ea27b52afe6bbdec690
| 7,707
|
py
|
Python
|
venv/lib/python3.7/site-packages/datalad/plugin/export_archive.py
|
emmetaobrien/dats-validator
|
fb25f97a32119c2bce4eb50dc080a93d5ee77141
|
[
"MIT"
] | null | null | null |
venv/lib/python3.7/site-packages/datalad/plugin/export_archive.py
|
emmetaobrien/dats-validator
|
fb25f97a32119c2bce4eb50dc080a93d5ee77141
|
[
"MIT"
] | null | null | null |
venv/lib/python3.7/site-packages/datalad/plugin/export_archive.py
|
emmetaobrien/dats-validator
|
fb25f97a32119c2bce4eb50dc080a93d5ee77141
|
[
"MIT"
] | null | null | null |
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""export a dataset as a compressed TAR/ZIP archive"""
__docformat__ = 'restructuredtext'
from datalad.interface.base import Interface
from datalad.interface.base import build_doc
from datalad.support import path
@build_doc
class ExportArchive(Interface):
"""Export the content of a dataset as a TAR/ZIP archive.
"""
from datalad.support.param import Parameter
from datalad.distribution.dataset import datasetmethod
from datalad.interface.utils import eval_results
from datalad.distribution.dataset import EnsureDataset
from datalad.support.constraints import EnsureNone, EnsureStr
_params_ = dict(
dataset=Parameter(
args=("-d", "--dataset"),
doc=""""specify the dataset to export. If no dataset is given, an
attempt is made to identify the dataset based on the current
working directory.""",
constraints=EnsureDataset() | EnsureNone()),
filename=Parameter(
args=("filename",),
metavar="PATH",
nargs='?',
doc="""File name of the generated TAR archive. If no file name is
given the archive will be generated in the current directory and
will be named: datalad_<dataset_uuid>.(tar.*|zip). To generate that
file in a different directory, provide an existing directory as the
file name.""",
constraints=EnsureStr() | EnsureNone()),
archivetype=Parameter(
args=("-t", "--archivetype"),
metavar="tar|zip",
doc="""Type of archive to generate.""",
constraints=EnsureStr()),
compression=Parameter(
args=("-c", "--compression"),
metavar="gz|bz2|",
doc="""Compression method to use. 'bz2' is not supported for ZIP
archives. No compression is used when an empty string is
given.""",
constraints=EnsureStr()),
missing_content=Parameter(
args=("--missing-content",),
metavar="error|continue|ignore",
doc="""By default, any discovered file with missing content will
result in an error and the export is aborted. Setting this to
'continue' will issue warnings instead of failing on error. The
value 'ignore' will only inform about problem at the 'debug' log
level. The latter two can be helpful when generating a TAR archive
from a dataset where some file content is not available
locally.""",
constraints=EnsureStr()),
)
@staticmethod
@datasetmethod(name='export_archive')
@eval_results
def __call__(dataset, filename=None, archivetype='tar', compression='gz',
missing_content='error'):
import os
import tarfile
import zipfile
from mock import patch
from os.path import join as opj, dirname, normpath, isabs
import os.path as op
from datalad.distribution.dataset import require_dataset
from datalad.utils import file_basename
from datalad.support.annexrepo import AnnexRepo
from datalad.dochelpers import exc_str
import logging
lgr = logging.getLogger('datalad.plugin.export_archive')
dataset = require_dataset(dataset, check_installed=True,
purpose='export archive')
repo = dataset.repo
committed_date = repo.get_commit_date()
# could be used later on to filter files by some criterion
def _filter_tarinfo(ti):
# Reset the date to match the one of the last commit, not from the
# filesystem since git doesn't track those at all
# TODO: use the date of the last commit when any particular
# file was changed -- would be the most kosher yoh thinks to the
# degree of our abilities
ti.mtime = committed_date
return ti
tar_args = dict(recursive=False, filter=_filter_tarinfo)
file_extension = '.{}{}'.format(
archivetype,
'{}{}'.format(
'.' if compression else '',
compression) if archivetype == 'tar' else '')
default_filename = "datalad_{.id}".format(dataset)
if filename is None:
filename = default_filename # in current directory
elif path.exists(filename) and path.isdir(filename):
filename = path.join(filename, default_filename) # under given directory
if not filename.endswith(file_extension):
filename += file_extension
root = dataset.path
# use dir inside matching the output filename
# TODO: could be an option to the export plugin allowing empty value
# for no leading dir
leading_dir = file_basename(filename)
# workaround for inability to pass down the time stamp
with patch('time.time', return_value=committed_date), \
tarfile.open(filename, "w:{}".format(compression)) \
if archivetype == 'tar' \
else zipfile.ZipFile(
filename, 'w',
zipfile.ZIP_STORED if not compression else zipfile.ZIP_DEFLATED) \
as archive:
add_method = archive.add if archivetype == 'tar' else archive.write
repo_files = sorted(repo.get_indexed_files())
if isinstance(repo, AnnexRepo):
annexed = repo.is_under_annex(
repo_files, allow_quick=True, batch=True)
# remember: returns False for files in Git!
has_content = repo.file_has_content(
repo_files, allow_quick=True, batch=True)
else:
annexed = [False] * len(repo_files)
has_content = [True] * len(repo_files)
for i, rpath in enumerate(repo_files):
fpath = opj(root, rpath)
if annexed[i]:
if not has_content[i]:
if missing_content in ('ignore', 'continue'):
(lgr.warning if missing_content == 'continue' else lgr.debug)(
'File %s has no content available, skipped', fpath)
continue
else:
raise IOError('File %s has no content available' % fpath)
# resolve to possible link target
if op.islink(fpath):
link_target = os.readlink(fpath)
if not isabs(link_target):
link_target = normpath(opj(dirname(fpath), link_target))
fpath = link_target
# name in the archive
aname = normpath(opj(leading_dir, rpath))
add_method(
fpath,
arcname=aname,
**(tar_args if archivetype == 'tar' else {}))
if not isabs(filename):
filename = opj(os.getcwd(), filename)
yield dict(
status='ok',
path=filename,
type='file',
action='export_archive',
logger=lgr)
__datalad_plugin__ = ExportArchive
| 42.816667
| 90
| 0.571429
|
12461a20c49e49a72b37ce8985e3c44e15760a49
| 1,104
|
py
|
Python
|
data.py
|
Qlanowski/rangle
|
53299209e5e1fb9ce1c9eed4cf44ac34684dba02
|
[
"MIT"
] | null | null | null |
data.py
|
Qlanowski/rangle
|
53299209e5e1fb9ce1c9eed4cf44ac34684dba02
|
[
"MIT"
] | null | null | null |
data.py
|
Qlanowski/rangle
|
53299209e5e1fb9ce1c9eed4cf44ac34684dba02
|
[
"MIT"
] | null | null | null |
# %%
# Drawing points on images
import json
import cv2
# ann_path = "ann/val_image.json"
# img_dir ="val_img"
ann_path = "ann/train_image.json"
img_dir ="train_img"
#%%
with open(ann_path) as json_val_ann:
images = json.load(json_val_ann)
def id_to_image(id):
return str(id).zfill(12) + ".jpg"
for filename in os.listdir(img_dir):
img_ann = [i for i in images if id_to_image(i["image_id"])==filename][0]
img = cv2.imread(f"{img_dir}/{filename}")
for person in img_ann["people"]:
p = person["keypoints"]
for i in range(int(len(p)/3)):
s = 2
cv2.rectangle(img, (p[i*3]-s, p[i*3+1]-s), (p[i*3]+s, p[i*3+1]+s), (255,0,0), 2)
font = cv2.FONT_HERSHEY_SIMPLEX
bottomLeftCornerOfText = (p[i*3]+s,p[i*3+1]+s)
fontScale = 1
fontColor = (255,255,255)
lineType = 2
cv2.putText(img,str(i+1), bottomLeftCornerOfText,
font, fontScale, fontColor, lineType)
cv2.imshow('image',img)
cv2.waitKey(0)
# %%
| 30.666667
| 92
| 0.556159
|
93e98194a420a0d86df51defec5d637580d7d983
| 9,940
|
py
|
Python
|
django/conf/locale/__init__.py
|
kix/django
|
5262a288df07daa050a0e17669c3f103f47a8640
|
[
"BSD-3-Clause"
] | 3
|
2015-10-14T09:13:48.000Z
|
2021-01-01T06:31:25.000Z
|
django/conf/locale/__init__.py
|
kix/django
|
5262a288df07daa050a0e17669c3f103f47a8640
|
[
"BSD-3-Clause"
] | 1
|
2016-02-19T00:22:18.000Z
|
2016-02-19T00:22:18.000Z
|
django/conf/locale/__init__.py
|
kix/django
|
5262a288df07daa050a0e17669c3f103f47a8640
|
[
"BSD-3-Clause"
] | 1
|
2015-10-14T09:13:48.000Z
|
2015-10-14T09:13:48.000Z
|
from __future__ import unicode_literals
LANG_INFO = {
'ar': {
'bidi': True,
'code': 'ar',
'name': 'Arabic',
'name_local': '\u0627\u0644\u0639\u0631\u0628\u064a\u0651\u0629',
},
'az': {
'bidi': True,
'code': 'az',
'name': 'Azerbaijani',
'name_local': 'az\u0259rbaycan dili',
},
'bg': {
'bidi': False,
'code': 'bg',
'name': 'Bulgarian',
'name_local': '\u0431\u044a\u043b\u0433\u0430\u0440\u0441\u043a\u0438',
},
'bn': {
'bidi': False,
'code': 'bn',
'name': 'Bengali',
'name_local': '\u09ac\u09be\u0982\u09b2\u09be',
},
'bs': {
'bidi': False,
'code': 'bs',
'name': 'Bosnian',
'name_local': 'bosanski',
},
'ca': {
'bidi': False,
'code': 'ca',
'name': 'Catalan',
'name_local': 'catal\xe0',
},
'cs': {
'bidi': False,
'code': 'cs',
'name': 'Czech',
'name_local': '\u010desky',
},
'cy': {
'bidi': False,
'code': 'cy',
'name': 'Welsh',
'name_local': 'Cymraeg',
},
'da': {
'bidi': False,
'code': 'da',
'name': 'Danish',
'name_local': 'Dansk',
},
'de': {
'bidi': False,
'code': 'de',
'name': 'German',
'name_local': 'Deutsch',
},
'el': {
'bidi': False,
'code': 'el',
'name': 'Greek',
'name_local': '\u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ac',
},
'en': {
'bidi': False,
'code': 'en',
'name': 'English',
'name_local': 'English',
},
'en-gb': {
'bidi': False,
'code': 'en-gb',
'name': 'British English',
'name_local': 'British English',
},
'eo': {
'bidi': False,
'code': 'eo',
'name': 'Esperanto',
'name_local': 'Esperanto',
},
'es': {
'bidi': False,
'code': 'es',
'name': 'Spanish',
'name_local': 'espa\xf1ol',
},
'es-ar': {
'bidi': False,
'code': 'es-ar',
'name': 'Argentinian Spanish',
'name_local': 'espa\xf1ol de Argentina',
},
'es-mx': {
'bidi': False,
'code': 'es-mx',
'name': 'Mexican Spanish',
'name_local': 'espa\xf1ol de Mexico',
},
'es-ni': {
'bidi': False,
'code': 'es-ni',
'name': 'Nicaraguan Spanish',
'name_local': 'espa\xf1ol de Nicaragua',
},
'et': {
'bidi': False,
'code': 'et',
'name': 'Estonian',
'name_local': 'eesti',
},
'eu': {
'bidi': False,
'code': 'eu',
'name': 'Basque',
'name_local': 'Basque',
},
'fa': {
'bidi': True,
'code': 'fa',
'name': 'Persian',
'name_local': '\u0641\u0627\u0631\u0633\u06cc',
},
'fi': {
'bidi': False,
'code': 'fi',
'name': 'Finnish',
'name_local': 'suomi',
},
'fr': {
'bidi': False,
'code': 'fr',
'name': 'French',
'name_local': 'Fran\xe7ais',
},
'fy-nl': {
'bidi': False,
'code': 'fy-nl',
'name': 'Frisian',
'name_local': 'Frisian',
},
'ga': {
'bidi': False,
'code': 'ga',
'name': 'Irish',
'name_local': 'Gaeilge',
},
'gl': {
'bidi': False,
'code': 'gl',
'name': 'Galician',
'name_local': 'galego',
},
'he': {
'bidi': True,
'code': 'he',
'name': 'Hebrew',
'name_local': '\u05e2\u05d1\u05e8\u05d9\u05ea',
},
'hi': {
'bidi': False,
'code': 'hi',
'name': 'Hindi',
'name_local': 'Hindi',
},
'hr': {
'bidi': False,
'code': 'hr',
'name': 'Croatian',
'name_local': 'Hrvatski',
},
'hu': {
'bidi': False,
'code': 'hu',
'name': 'Hungarian',
'name_local': 'Magyar',
},
'id': {
'bidi': False,
'code': 'id',
'name': 'Indonesian',
'name_local': 'Bahasa Indonesia',
},
'is': {
'bidi': False,
'code': 'is',
'name': 'Icelandic',
'name_local': '\xcdslenska',
},
'it': {
'bidi': False,
'code': 'it',
'name': 'Italian',
'name_local': 'italiano',
},
'ja': {
'bidi': False,
'code': 'ja',
'name': 'Japanese',
'name_local': '\u65e5\u672c\u8a9e',
},
'ka': {
'bidi': False,
'code': 'ka',
'name': 'Georgian',
'name_local': '\u10e5\u10d0\u10e0\u10d7\u10e3\u10da\u10d8',
},
'kk': {
'bidi': False,
'code': 'kk',
'name': 'Kazakh',
'name_local': '\u049a\u0430\u0437\u0430\u049b',
},
'km': {
'bidi': False,
'code': 'km',
'name': 'Khmer',
'name_local': 'Khmer',
},
'kn': {
'bidi': False,
'code': 'kn',
'name': 'Kannada',
'name_local': 'Kannada',
},
'ko': {
'bidi': False,
'code': 'ko',
'name': 'Korean',
'name_local': '\ud55c\uad6d\uc5b4',
},
'lt': {
'bidi': False,
'code': 'lt',
'name': 'Lithuanian',
'name_local': 'Lithuanian',
},
'lv': {
'bidi': False,
'code': 'lv',
'name': 'Latvian',
'name_local': 'latvie\u0161u',
},
'mk': {
'bidi': False,
'code': 'mk',
'name': 'Macedonian',
'name_local': '\u041c\u0430\u043a\u0435\u0434\u043e\u043d\u0441\u043a\u0438',
},
'ml': {
'bidi': False,
'code': 'ml',
'name': 'Malayalam',
'name_local': 'Malayalam',
},
'mn': {
'bidi': False,
'code': 'mn',
'name': 'Mongolian',
'name_local': 'Mongolian',
},
'nb': {
'bidi': False,
'code': 'nb',
'name': 'Norwegian Bokmal',
'name_local': 'Norsk (bokm\xe5l)',
},
'ne': {
'bidi': False,
'code': 'ne',
'name': 'Nepali',
'name_local': '\u0928\u0947\u092a\u093e\u0932\u0940',
},
'nl': {
'bidi': False,
'code': 'nl',
'name': 'Dutch',
'name_local': 'Nederlands',
},
'nn': {
'bidi': False,
'code': 'nn',
'name': 'Norwegian Nynorsk',
'name_local': 'Norsk (nynorsk)',
},
'no': {
'bidi': False,
'code': 'no',
'name': 'Norwegian',
'name_local': 'Norsk',
},
'pa': {
'bidi': False,
'code': 'pa',
'name': 'Punjabi',
'name_local': 'Punjabi',
},
'pl': {
'bidi': False,
'code': 'pl',
'name': 'Polish',
'name_local': 'polski',
},
'pt': {
'bidi': False,
'code': 'pt',
'name': 'Portuguese',
'name_local': 'Portugu\xeas',
},
'pt-br': {
'bidi': False,
'code': 'pt-br',
'name': 'Brazilian Portuguese',
'name_local': 'Portugu\xeas Brasileiro',
},
'ro': {
'bidi': False,
'code': 'ro',
'name': 'Romanian',
'name_local': 'Rom\xe2n\u0103',
},
'ru': {
'bidi': False,
'code': 'ru',
'name': 'Russian',
'name_local': '\u0420\u0443\u0441\u0441\u043a\u0438\u0439',
},
'sk': {
'bidi': False,
'code': 'sk',
'name': 'Slovak',
'name_local': 'slovensk\xfd',
},
'sl': {
'bidi': False,
'code': 'sl',
'name': 'Slovenian',
'name_local': 'Sloven\u0161\u010dina',
},
'sq': {
'bidi': False,
'code': 'sq',
'name': 'Albanian',
'name_local': 'Albanian',
},
'sr': {
'bidi': False,
'code': 'sr',
'name': 'Serbian',
'name_local': '\u0441\u0440\u043f\u0441\u043a\u0438',
},
'sr-latn': {
'bidi': False,
'code': 'sr-latn',
'name': 'Serbian Latin',
'name_local': 'srpski (latinica)',
},
'sv': {
'bidi': False,
'code': 'sv',
'name': 'Swedish',
'name_local': 'Svenska',
},
'sw': {
'bidi': False,
'code': 'sw',
'name': 'Swahili',
'name_local': 'Kiswahili',
},
'ta': {
'bidi': False,
'code': 'ta',
'name': 'Tamil',
'name_local': '\u0ba4\u0bae\u0bbf\u0bb4\u0bcd',
},
'te': {
'bidi': False,
'code': 'te',
'name': 'Telugu',
'name_local': '\u0c24\u0c46\u0c32\u0c41\u0c17\u0c41',
},
'th': {
'bidi': False,
'code': 'th',
'name': 'Thai',
'name_local': 'Thai',
},
'tr': {
'bidi': False,
'code': 'tr',
'name': 'Turkish',
'name_local': 'T\xfcrk\xe7e',
},
'tt': {
'bidi': False,
'code': 'tt',
'name': 'Tatar',
'name_local': '\u0422\u0430\u0442\u0430\u0440\u0447\u0430',
},
'uk': {
'bidi': False,
'code': 'uk',
'name': 'Ukrainian',
'name_local': '\u0423\u043a\u0440\u0430\u0457\u043d\u0441\u044c\u043a\u0430',
},
'ur': {
'bidi': False,
'code': 'ur',
'name': 'Urdu',
'name_local': '\u0627\u0631\u062f\u0648',
},
'vi': {
'bidi': False,
'code': 'vi',
'name': 'Vietnamese',
'name_local': 'Vietnamese',
},
'zh-cn': {
'bidi': False,
'code': 'zh-cn',
'name': 'Simplified Chinese',
'name_local': '\u7b80\u4f53\u4e2d\u6587',
},
'zh-tw': {
'bidi': False,
'code': 'zh-tw',
'name': 'Traditional Chinese',
'name_local': '\u7e41\u9ad4\u4e2d\u6587',
}
}
| 22.745995
| 85
| 0.407143
|
028d10a7e14884573ed681964defc25a2dcdf1d3
| 8,028
|
py
|
Python
|
maskrcnn_benchmark/config/paths_catalog.py
|
lzrobots/dgmn
|
515476b5c6a07dcc3b7a4d2243c541377624bb33
|
[
"MIT"
] | 54
|
2020-06-14T15:45:01.000Z
|
2022-03-26T07:25:46.000Z
|
maskrcnn_benchmark/config/paths_catalog.py
|
lzrobots/dgmn
|
515476b5c6a07dcc3b7a4d2243c541377624bb33
|
[
"MIT"
] | 3
|
2020-06-16T09:13:13.000Z
|
2021-05-10T03:26:30.000Z
|
maskrcnn_benchmark/config/paths_catalog.py
|
lzrobots/dgmn
|
515476b5c6a07dcc3b7a4d2243c541377624bb33
|
[
"MIT"
] | 10
|
2020-07-02T14:22:23.000Z
|
2022-03-23T02:13:41.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Centralized catalog of paths."""
import os
class DatasetCatalog(object):
DATA_DIR = "datasets"
DATASETS = {
"coco_2017_train": {
"img_dir": "coco/train2017",
"ann_file": "coco/annotations/instances_train2017.json"
},
"coco_2017_val": {
"img_dir": "coco/val2017",
"ann_file": "coco/annotations/instances_val2017.json"
},
"coco_2017_test": {
"img_dir": "coco/test2017",
"ann_file": "coco/annotations/image_info_test-dev2017.json",
},
"coco_2014_train": {
"img_dir": "coco/train2014",
"ann_file": "coco/annotations/instances_train2014.json"
},
"coco_2014_val": {
"img_dir": "coco/val2014",
"ann_file": "coco/annotations/instances_val2014.json"
},
"coco_2014_minival": {
"img_dir": "coco/val2014",
"ann_file": "coco/annotations/instances_minival2014.json"
},
"coco_2014_valminusminival": {
"img_dir": "coco/val2014",
"ann_file": "coco/annotations/instances_valminusminival2014.json"
},
"keypoints_coco_2014_train": {
"img_dir": "coco/train2014",
"ann_file": "coco/annotations/person_keypoints_train2014.json",
},
"keypoints_coco_2014_val": {
"img_dir": "coco/val2014",
"ann_file": "coco/annotations/person_keypoints_val2014.json"
},
"keypoints_coco_2014_minival": {
"img_dir": "coco/val2014",
"ann_file": "coco/annotations/person_keypoints_minival2014.json",
},
"keypoints_coco_2014_valminusminival": {
"img_dir": "coco/val2014",
"ann_file": "coco/annotations/person_keypoints_valminusminival2014.json",
},
"voc_2007_train": {
"data_dir": "voc/VOC2007",
"split": "train"
},
"voc_2007_train_cocostyle": {
"img_dir": "voc/VOC2007/JPEGImages",
"ann_file": "voc/VOC2007/Annotations/pascal_train2007.json"
},
"voc_2007_val": {
"data_dir": "voc/VOC2007",
"split": "val"
},
"voc_2007_val_cocostyle": {
"img_dir": "voc/VOC2007/JPEGImages",
"ann_file": "voc/VOC2007/Annotations/pascal_val2007.json"
},
"voc_2007_test": {
"data_dir": "voc/VOC2007",
"split": "test"
},
"voc_2007_test_cocostyle": {
"img_dir": "voc/VOC2007/JPEGImages",
"ann_file": "voc/VOC2007/Annotations/pascal_test2007.json"
},
"voc_2012_train": {
"data_dir": "voc/VOC2012",
"split": "train"
},
"voc_2012_train_cocostyle": {
"img_dir": "voc/VOC2012/JPEGImages",
"ann_file": "voc/VOC2012/Annotations/pascal_train2012.json"
},
"voc_2012_val": {
"data_dir": "voc/VOC2012",
"split": "val"
},
"voc_2012_val_cocostyle": {
"img_dir": "voc/VOC2012/JPEGImages",
"ann_file": "voc/VOC2012/Annotations/pascal_val2012.json"
},
"voc_2012_test": {
"data_dir": "voc/VOC2012",
"split": "test"
# PASCAL VOC2012 doesn't made the test annotations available, so there's no json annotation
},
"cityscapes_fine_instanceonly_seg_train_cocostyle": {
"img_dir": "cityscapes/images",
"ann_file": "cityscapes/annotations/instancesonly_filtered_gtFine_train.json"
},
"cityscapes_fine_instanceonly_seg_val_cocostyle": {
"img_dir": "cityscapes/images",
"ann_file": "cityscapes/annotations/instancesonly_filtered_gtFine_val.json"
},
"cityscapes_fine_instanceonly_seg_test_cocostyle": {
"img_dir": "cityscapes/images",
"ann_file": "cityscapes/annotations/instancesonly_filtered_gtFine_test.json"
}
}
@staticmethod
def get(name):
if "coco" in name:
data_dir = DatasetCatalog.DATA_DIR
attrs = DatasetCatalog.DATASETS[name]
args = dict(
root=os.path.join(data_dir, attrs["img_dir"]),
ann_file=os.path.join(data_dir, attrs["ann_file"]),
)
return dict(
factory="COCODataset",
args=args,
)
elif "voc" in name:
data_dir = DatasetCatalog.DATA_DIR
attrs = DatasetCatalog.DATASETS[name]
args = dict(
data_dir=os.path.join(data_dir, attrs["data_dir"]),
split=attrs["split"],
)
return dict(
factory="PascalVOCDataset",
args=args,
)
raise RuntimeError("Dataset not available: {}".format(name))
class ModelCatalog(object):
S3_C2_DETECTRON_URL = "https://dl.fbaipublicfiles.com/detectron"
C2_IMAGENET_MODELS = {
"MSRA/R-50": "ImageNetPretrained/MSRA/R-50.pkl",
"MSRA/R-50-GN": "ImageNetPretrained/47261647/R-50-GN.pkl",
"MSRA/R-101": "ImageNetPretrained/MSRA/R-101.pkl",
"MSRA/R-101-GN": "ImageNetPretrained/47592356/R-101-GN.pkl",
"FAIR/20171220/X-101-32x8d": "ImageNetPretrained/20171220/X-101-32x8d.pkl",
}
C2_DETECTRON_SUFFIX = "output/train/{}coco_2014_train%3A{}coco_2014_valminusminival/generalized_rcnn/model_final.pkl"
C2_DETECTRON_MODELS = {
"35857197/e2e_faster_rcnn_R-50-C4_1x": "01_33_49.iAX0mXvW",
"35857345/e2e_faster_rcnn_R-50-FPN_1x": "01_36_30.cUF7QR7I",
"35857890/e2e_faster_rcnn_R-101-FPN_1x": "01_38_50.sNxI7sX7",
"36761737/e2e_faster_rcnn_X-101-32x8d-FPN_1x": "06_31_39.5MIHi1fZ",
"35858791/e2e_mask_rcnn_R-50-C4_1x": "01_45_57.ZgkA7hPB",
"35858933/e2e_mask_rcnn_R-50-FPN_1x": "01_48_14.DzEQe4wC",
"35861795/e2e_mask_rcnn_R-101-FPN_1x": "02_31_37.KqyEK4tT",
"36761843/e2e_mask_rcnn_X-101-32x8d-FPN_1x": "06_35_59.RZotkLKI",
"37129812/e2e_mask_rcnn_X-152-32x8d-FPN-IN5k_1.44x": "09_35_36.8pzTQKYK",
# keypoints
"37697547/e2e_keypoint_rcnn_R-50-FPN_1x": "08_42_54.kdzV35ao"
}
@staticmethod
def get(name):
if name.startswith("Caffe2Detectron/COCO"):
return ModelCatalog.get_c2_detectron_12_2017_baselines(name)
if name.startswith("ImageNetPretrained"):
return ModelCatalog.get_c2_imagenet_pretrained(name)
raise RuntimeError("model not present in the catalog {}".format(name))
@staticmethod
def get_c2_imagenet_pretrained(name):
prefix = ModelCatalog.S3_C2_DETECTRON_URL
name = name[len("ImageNetPretrained/"):]
name = ModelCatalog.C2_IMAGENET_MODELS[name]
url = "/".join([prefix, name])
return url
@staticmethod
def get_c2_detectron_12_2017_baselines(name):
# Detectron C2 models are stored following the structure
# prefix/<model_id>/2012_2017_baselines/<model_name>.yaml.<signature>/suffix
# we use as identifiers in the catalog Caffe2Detectron/COCO/<model_id>/<model_name>
prefix = ModelCatalog.S3_C2_DETECTRON_URL
dataset_tag = "keypoints_" if "keypoint" in name else ""
suffix = ModelCatalog.C2_DETECTRON_SUFFIX.format(dataset_tag, dataset_tag)
# remove identification prefix
name = name[len("Caffe2Detectron/COCO/"):]
# split in <model_id> and <model_name>
model_id, model_name = name.split("/")
# parsing to make it match the url address from the Caffe2 models
model_name = "{}.yaml".format(model_name)
signature = ModelCatalog.C2_DETECTRON_MODELS[name]
unique_name = ".".join([model_name, signature])
url = "/".join([prefix, model_id, "12_2017_baselines", unique_name, suffix])
return url
| 40.341709
| 121
| 0.603886
|
2e8dca3fecc706ec031acbddef451f0e7ff24c87
| 505
|
py
|
Python
|
RegEx/re-search.py
|
tverma332/python3
|
544c4ec9c726c37293c8da5799f50575cc50852d
|
[
"MIT"
] | 3
|
2022-03-28T09:10:08.000Z
|
2022-03-29T10:47:56.000Z
|
RegEx/re-search.py
|
tverma332/python3
|
544c4ec9c726c37293c8da5799f50575cc50852d
|
[
"MIT"
] | 1
|
2022-03-27T11:52:58.000Z
|
2022-03-27T11:52:58.000Z
|
RegEx/re-search.py
|
tverma332/python3
|
544c4ec9c726c37293c8da5799f50575cc50852d
|
[
"MIT"
] | null | null | null |
# re.search() = It looks throughout the entire string and returns the first match
import re
text = "This is for python3 and there are two mahor vers python2 and python3 in future python4"
my_pat = r'\bpython\d?\b'
match_ob = re.search(my_pat , text)
if match_ob :
print(f"Match from your pattern : {match_ob.group()}")
print(f"Starting Index : {match_ob.start()}")
print(f"Ending Index : {match_ob.end() - 1}")
print(f"Length : {match_ob.end() - match_ob.start()}")
else :
print("No match found")
| 31.5625
| 95
| 0.70297
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.