hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e99385b476437e2b2258af182121e6b707636676
| 4,781
|
py
|
Python
|
lisa/base_tools/wget.py
|
anirudhrb/lisa
|
fe009802577c81e45ca2ff5a34d353878caa725d
|
[
"MIT"
] | 48
|
2018-05-19T17:46:34.000Z
|
2020-09-28T21:09:06.000Z
|
lisa/base_tools/wget.py
|
anirudhrb/lisa
|
fe009802577c81e45ca2ff5a34d353878caa725d
|
[
"MIT"
] | 1,261
|
2018-05-17T04:32:22.000Z
|
2020-11-23T17:29:13.000Z
|
lisa/base_tools/wget.py
|
anirudhrb/lisa
|
fe009802577c81e45ca2ff5a34d353878caa725d
|
[
"MIT"
] | 133
|
2018-05-15T23:12:14.000Z
|
2020-11-13T10:37:49.000Z
|
import re
from pathlib import PurePosixPath
from typing import TYPE_CHECKING, Optional, Type
from lisa.executable import Tool
from lisa.tools.ls import Ls
from lisa.tools.mkdir import Mkdir
from lisa.tools.powershell import PowerShell
from lisa.tools.rm import Rm
from lisa.util import LisaException, is_valid_url
if TYPE_CHECKING:
from lisa.operating_system import Posix
class Wget(Tool):
__pattern_path = re.compile(
r"([\w\W]*?)(-|File) (‘|')(?P<path>.+?)(’|') (saved|already there)"
)
@property
def command(self) -> str:
return "wget"
@property
def can_install(self) -> bool:
return True
def install(self) -> bool:
posix_os: Posix = self.node.os # type: ignore
posix_os.install_packages([self])
return self._check_exists()
def get(
self,
url: str,
file_path: str = "",
filename: str = "",
overwrite: bool = True,
executable: bool = False,
sudo: bool = False,
force_run: bool = False,
timeout: int = 600,
) -> str:
is_valid_url(url)
# combine download file path
# TODO: support current lisa folder in pathlib.
# So that here can use the corresponding path format.
if file_path:
# create folder when it doesn't exist
self.node.shell.mkdir(PurePosixPath(file_path), exist_ok=True)
download_path = f"{file_path}/{filename}"
else:
download_path = f"{self.node.working_path}/{filename}"
# remove existing file and dir to download again.
download_pure_path = self.node.get_pure_path(download_path)
if overwrite and self.node.shell.exists(download_pure_path):
self.node.shell.remove(download_pure_path, recursive=True)
command = f"'{url}' --no-check-certificate"
if filename:
command = f"{command} -O {download_path}"
else:
command = f"{command} -P {download_path}"
command_result = self.run(
command,
no_error_log=True,
shell=True,
sudo=sudo,
force_run=force_run,
timeout=timeout,
)
matched_result = self.__pattern_path.match(command_result.stdout)
if matched_result:
download_file_path = matched_result.group("path")
else:
raise LisaException(
f"cannot find file path in stdout of '{command}', it may be caused "
" due to failed download or pattern mismatch."
f" stdout: {command_result.stdout}"
)
actual_file_path = self.node.execute(
f"ls {download_file_path}", shell=True, sudo=sudo
)
if actual_file_path.exit_code != 0:
raise LisaException(f"File {actual_file_path} doesn't exist.")
if executable:
self.node.execute(f"chmod +x {actual_file_path}", sudo=sudo)
return actual_file_path.stdout
def verify_internet_access(self) -> bool:
try:
result = self.get("https://www.azure.com", force_run=True)
if result:
return True
except Exception as e:
self._log.debug(
f"Internet is not accessible, exception occurred with wget {e}"
)
return False
@classmethod
def _windows_tool(cls) -> Optional[Type[Tool]]:
return WindowsWget
class WindowsWget(Wget):
@property
def command(self) -> str:
return ""
def _check_exists(self) -> bool:
return True
def get(
self,
url: str,
file_path: str = "",
filename: str = "",
overwrite: bool = True,
executable: bool = False,
sudo: bool = False,
force_run: bool = False,
timeout: int = 600,
) -> str:
ls = self.node.tools[Ls]
fullpath = f"{file_path}\\{filename}"
# return if file exists and not overwrite
if ls.path_exists(file_path, sudo=sudo) and not overwrite:
self._log.debug(
f"File {fullpath} already exists and rewrite is set to False"
)
# create directory if it doesn't exist
self.node.tools[Mkdir].create_directory(file_path, sudo=sudo)
# TODO: add support for executables
# remove existing file if present and download
self.node.tools[Rm].remove_file(fullpath, sudo=sudo)
self.node.tools[PowerShell].run_cmdlet(
f"$ProgressPreference = 'SilentlyContinue'; Invoke-WebRequest -Uri '{url}'"
f" -OutFile '{fullpath}'",
sudo=sudo,
force_run=force_run,
timeout=timeout,
)
return fullpath
| 31.453947
| 87
| 0.590253
| 4,401
| 0.919749
| 0
| 0
| 273
| 0.057053
| 0
| 0
| 1,158
| 0.242006
|
e995e4148b59ca5a7b4ba1e5e2c168dedb8fd4e8
| 1,787
|
py
|
Python
|
Datacamp Assignments/Data Engineer Track/2. Streamlined Data Ingestion with pandas/35_handle_deeply_nested_data.py
|
Ali-Parandeh/Data_Science_Playground
|
c529e9b3692381572de259e7c93938d6611d83da
|
[
"MIT"
] | null | null | null |
Datacamp Assignments/Data Engineer Track/2. Streamlined Data Ingestion with pandas/35_handle_deeply_nested_data.py
|
Ali-Parandeh/Data_Science_Playground
|
c529e9b3692381572de259e7c93938d6611d83da
|
[
"MIT"
] | null | null | null |
Datacamp Assignments/Data Engineer Track/2. Streamlined Data Ingestion with pandas/35_handle_deeply_nested_data.py
|
Ali-Parandeh/Data_Science_Playground
|
c529e9b3692381572de259e7c93938d6611d83da
|
[
"MIT"
] | 1
|
2021-03-10T09:40:05.000Z
|
2021-03-10T09:40:05.000Z
|
# Load other business attributes and set meta prefix
from pandas.io.json import json_normalize
flat_cafes = json_normalize(data["businesses"],
sep="_",
record_path="categories",
meta=['name',
'alias',
'rating',
['coordinates', 'latitude'],
['coordinates', 'longitude']],
meta_prefix='biz_')
# View the data
print(flat_cafes.head())
'''
<script.py> output:
alias title biz_name biz_alias biz_rating biz_coordinates_latitude biz_coordinates_longitude
0 coffee Coffee & Tea White Noise white-noise-brooklyn-2 4.5 40.689358 -73.988415
1 coffee Coffee & Tea Devocion devocion-brooklyn-3 4.0 40.688570 -73.983340
2 coffeeroasteries Coffee Roasteries Devocion devocion-brooklyn-3 4.0 40.688570 -73.983340
3 cafes Cafes Devocion devocion-brooklyn-3 4.0 40.688570 -73.983340
4 coffee Coffee & Tea Coffee Project NY coffee-project-ny-new-york 4.5 40.726990 -73.989220
Naming meta columns can get tedious for datasets with many attributes,
and code is susceptible to breaking if column names or nesting levels change.
In such cases, you may have to write a custom function and
employ techniques like recursion to handle the data.
'''
| 52.558824
| 154
| 0.493005
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,391
| 0.7784
|
e9960edde95bcaeefa3f37767c2580e46bec455b
| 2,310
|
py
|
Python
|
deprecated/obsolete/src/coverinst.py
|
Anirban166/tstl
|
73dac02f084b10e1bf2f172a5d1306bb5fbd7f7e
|
[
"Apache-2.0"
] | 90
|
2015-04-07T10:26:53.000Z
|
2022-03-07T15:14:57.000Z
|
deprecated/obsolete/src/coverinst.py
|
Anirban166/tstl
|
73dac02f084b10e1bf2f172a5d1306bb5fbd7f7e
|
[
"Apache-2.0"
] | 14
|
2015-10-13T16:25:59.000Z
|
2021-01-21T18:31:03.000Z
|
deprecated/obsolete/src/coverinst.py
|
Anirban166/tstl
|
73dac02f084b10e1bf2f172a5d1306bb5fbd7f7e
|
[
"Apache-2.0"
] | 32
|
2015-04-07T10:41:29.000Z
|
2022-02-26T05:17:28.000Z
|
import sys
infn = sys.argv[1]
outfn = infn.split(".py")[0]+"_INST.py"
code = []
for l in open(infn):
code.append(l)
outf = open(outfn, 'w')
outf.write("import covertool\n")
ln = 0
inComment = False
justEnded = False
currentIndent = 0
lineIndent = 0
okChangeIndent = False
skipNext = False
doNotInstrument = ["class","def","import", "elif", "else:", "except", "}", "]", ")"]
indentChangers = ["class", "def", "if", "elif", "else:", "for", "try:", "except", "while"]
skipNextChars = [",","\\"]
conditionals = ["if","elif", "else"]
for l in code:
ln += 1
ls = l.split()
if l.find('"""') != -1:
inComment = not inComment
justEnded = True
if inComment:
outf.write(l)
continue
if justEnded:
outf.write(l)
justEnded = False
continue
lineIndent = 0
for c in l:
if c != " ":
break
else:
lineIndent += 1
instrument = False
if (lineIndent > currentIndent):
if okChangeIndent and not skipNext:
currentIndent = lineIndent
instrument = True
else:
instrument = ls != []
currentIndent = lineIndent
if (ls != []) and ((ls[0] in doNotInstrument) or (ls[0][0] == "#")):
instrument = False
if (ls != []) and (ls[0] in conditionals) and (":" in l) and (ls[-1][-1] != ":"):
if ls[0] == "if":
ld = infn + ":" + str(ln)
outf.write((" " * lineIndent) + 'covertool.cover("' + ld + '")\n')
ld = infn + ":" + str(ln)+":True"
sc = l.split(":")
sct = ""
started = False
for c in sc[1]:
if started or (c != " "):
started = True
sct += c
outf.write(sc[0] + ":" + "\n")
outf.write((" " * lineIndent) + ' covertool.cover("' + ld + '")\n')
outf.write((" " * lineIndent) + " " + sct + "\n")
okChangeIndent = False
skipNext = False
continue
if instrument:
ld = infn + ":" + str(ln)
outf.write((" " * lineIndent) + 'covertool.cover("' + ld + '")\n')
okChangeIndent = skipNext or ((ls != []) and (ls[0] in indentChangers))
skipNext = (len(l) > 2) and (l[-2] in skipNextChars)
outf.write(l)
outf.close()
| 25.666667
| 90
| 0.490909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 327
| 0.141558
|
e997ebbde4fce0c730819b363c5adbce38d2664d
| 8,729
|
py
|
Python
|
actionkit_templates/settings.py
|
MoveOnOrg/actionkit-templates
|
2d06ad7634fac59e352d5cd8625f3092624d30e4
|
[
"Unlicense",
"MIT"
] | 8
|
2016-11-29T07:34:04.000Z
|
2021-06-09T18:09:25.000Z
|
actionkit_templates/settings.py
|
MoveOnOrg/actionkit-templates
|
2d06ad7634fac59e352d5cd8625f3092624d30e4
|
[
"Unlicense",
"MIT"
] | 12
|
2016-12-06T17:24:58.000Z
|
2022-02-21T20:11:47.000Z
|
actionkit_templates/settings.py
|
MoveOnOrg/actionkit-templates
|
2d06ad7634fac59e352d5cd8625f3092624d30e4
|
[
"Unlicense",
"MIT"
] | 4
|
2016-12-25T11:16:34.000Z
|
2020-02-11T18:48:26.000Z
|
import json
import os
import sys
import time
try:
from urlparse import urlparse
except ImportError:
# python3
from urllib.parse import urlparse
from django.conf.urls import url
from django.conf.urls.static import static
from django.http import HttpResponse, Http404
from django.shortcuts import render_to_response, redirect
from django.template.loader import render_to_string
from django.template.base import add_to_builtins
from django.views.static import serve
from .moveon_fakeapi import mo_event_data
"""
try running with
aktemplates runserver 0.0.0.0:1234
"""
DEBUG = True
SECRET_KEY = 'who cares!'
INSTALLED_APPS = ['actionkit_templates', ]
try:
import template_debug #django-template-debug
INSTALLED_APPS.append('template_debug')
import django_extensions #django-extensions
INSTALLED_APPS.append('django_extensions')
except:
pass
#one directory down
APP_PATH = os.path.dirname(__file__)
PROJECT_ROOT_PATH = os.path.abspath(os.getcwd())
#############
# STATIC DIRECTORY
#############
#note this only works if DEBUG=True
STATIC_ROOT = os.environ.get('STATIC_ROOT', os.path.join(PROJECT_ROOT_PATH, './static'))
STATIC_URL = os.environ.get('STATIC_URL', '/static/')
STATIC_FALLBACK = os.environ.get('STATIC_FALLBACK', False)
STATIC_LOCAL = os.environ.get('STATIC_URL', None) # an explicit local or not
#############
# TEMPLATES
#############
DEFAULT_TEMPLATES = os.path.join(APP_PATH, 'templates')
DIR_TEMPLATES = []
if os.environ.get('TEMPLATE_DIR'):
DIR_TEMPLATES.append(os.environ.get('TEMPLATE_DIR'))
else:
for d in ('./', './template_set', './_layouts', './_includes'):
dd = os.path.join(PROJECT_ROOT_PATH, d)
if os.path.exists(dd):
DIR_TEMPLATES.append(dd)
DIR_TEMPLATES.append(DEFAULT_TEMPLATES)
TEMPLATES = [
{ 'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': DIR_TEMPLATES,
},
]
MIDDLEWARE_CLASSES = []
add_to_builtins('actionkit_templates.templatetags.actionkit_tags')
def _get_context_data(request, name=None, page=None, use_referer=False):
from actionkit_templates.contexts.page_contexts import contexts
port = '4000'
hostport = request.get_host().split(':')
if len(hostport) > 1:
port = hostport[1]
if use_referer:
paths = None
if request.META.get('HTTP_REFERER'):
paths = urlparse(request.META['HTTP_REFERER']).path.split('/')
elif request.GET.get('path'):
# e.g. &path=/events/event_search.html
paths = request.GET['path'].split('/')
if paths and len(paths) > 1:
name = paths[1]
if len(paths) > 2:
page = paths[2]
custom_contexts_file = os.path.join(PROJECT_ROOT_PATH,
os.environ.get('CUSTOM_CONTEXTS', 'contexts.json'))
if os.path.exists(custom_contexts_file):
try:
contexts.update({'Custom': json.loads(open(custom_contexts_file).read())})
except ValueError as e:
raise Exception("JSON Parsing Error for context file %s %s" % (
custom_contexts_file, e.message))
#first use ?template= if there, otherwise name's template, otherwise homepage
cxt = dict(
devenv={
'enabled': True,
'port': port,
'STATIC_URL': STATIC_URL,
'STATIC_LOCAL': STATIC_LOCAL,
'MO_EVENTS_API': '/fake/api/events'
}
)
context_data = contexts.get(name, {})
if page:
context_data = contexts.get(name, {}).get(page, {})
cxt.update(context_data)
if not context_data:
sections = []
for section, pages in sorted(contexts.items()):
sections.append([section, sorted(pages.items())])
cxt.update({
'page': {'title':'Homepage'},
'pagelinks': sections})
if request.GET.get('user_id'):
#for debugging tests based on user.id % 2, e.g.
context_data.setdefault('user', {}).update({'id': int(request.GET.get('user_id'))})
args = cxt.get('args', {}).copy()
args.update(request.GET.dict())
cxt['args'] = args
if 'akid' not in cxt:
cxt['akid'] = cxt['args'].get('akid')
cxt['request'] = request
cxt['js_context'] = '""' # FUTURE: what should go in here?
return cxt
#############
# HOME PAGE TEST
#############
def index(request, name, page=None):
cxt = _get_context_data(request, name, page)
template = request.GET.get('template',
cxt.get('filename', "homepagetest.html"))
return render_to_response(template, cxt)
def login_context(request):
cxt = _get_context_data(request, use_referer=True)
from actionkit_templates.contexts.event_context_json import event_json
event_json_copy = event_json.copy()
coming_from = request.GET.get('url','')
if 'event' in coming_from \
or 'logged_in' in coming_from \
or 'survey_logged_in' in coming_from:
if not request.GET.get('login') and 'survey_logged_in' not in coming_from:
del event_json_copy['name']
return HttpResponse(
'actionkit.forms.onContextLoaded(%s)' % json.dumps(event_json_copy))
elif cxt.get('context'):
return HttpResponse('actionkit.forms.onContextLoaded(%s)' % json.dumps(cxt['context']))
else:
return HttpResponse(
#text key has all the generic error messages
'actionkit.forms.onContextLoaded({"text": %s})' % json.dumps(event_json['text']))
def user_password_forgot(request):
return HttpResponse('unimplemented')
def logout(request):
if request.GET.get('next'):
return redirect(request.GET.get('next'))
return redirect('/logout.html')
def event_search_results(request, page):
cxt = _get_context_data(request, 'events', 'WILL_USE_REFERER_HEADER', use_referer=True)
# special query results context:
all = cxt['args'].get('all') == '1'
cxt.update({'all': all})
if cxt.get('SLOW_SEARCH'):
# This allows us to test for race conditions
time.sleep(2)
search_results = render_to_string('event_search_results.html', cxt)
return HttpResponse('actionkit.forms.onEventSearchResults({})'
.format(json.dumps(search_results)))
def event_api_moveon_fake(request):
"""Fake representation of MoveOn events api"""
cxt = _get_context_data(request, 'events', 'WILL_USE_REFERER_HEADER', use_referer=True)
events = cxt.get('events', [])
if cxt.get('SLOW_API'):
# This allows us to test for race conditions
time.sleep(2)
if cxt.get('500_API'):
raise Exception('Cause failure to allow graceful degradation')
search_results = [mo_event_data(evt) for evt in events]
return HttpResponse(json.dumps({'events': search_results}), content_type='application/json')
def proxy_serve(request, path, document_root=None, show_indexes=False):
try_proxy = True
try:
import requests
except ImportError:
try_proxy = False
try:
return serve(request, path, document_root, show_indexes)
except Http404:
if try_proxy:
prefix = request.path.split('/')[1]
content = requests.get('https://roboticdogs.actionkit.com/{}/{}'.format(prefix, path), verify=False)
if content.status_code == 200:
return HttpResponse(content.content, content_type=content.headers['Content-Type'])
raise Http404
#############
# URLS
#############
ROOT_URLCONF = 'actionkit_templates.settings'
urlpatterns = [
url(r'^context', login_context),
url(r'^progress', login_context, name='progress'),
url(r'^logout', logout, name="logout"),
url(r'^(?P<name>[-.\w]+)?(/(?P<page>[-.\w]+))?$', index),
url(r'^forgot/$', user_password_forgot, name='user_password_forgot'),
url(r'^cms/event/(?P<page>[-.\w]+)/search_results/', event_search_results, name='event_search_results'),
url(r'^fake/api/events', event_api_moveon_fake, name="event_api_moveon_fake"),
# ActionKit urls or {% url %} template tag:
url(r'^fake/stub/reverse', event_api_moveon_fake, name="reverse_donation"),
]
if STATIC_ROOT:
urlpatterns = (urlpatterns
+ static(STATIC_URL, document_root=STATIC_ROOT)
+ static('/resources/',
view=proxy_serve,
document_root=os.path.join(STATIC_ROOT, './resources'))
+ static('/media/',
view=proxy_serve,
document_root=os.path.join(STATIC_ROOT, './media'))
)
if os.path.exists(os.path.join(PROJECT_ROOT_PATH, 'local_settings.py')):
from local_settings import *
| 35.77459
| 112
| 0.643487
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,459
| 0.281705
|
e9a055a93eab839ab9a14c3a44071ae1537f4ac6
| 1,528
|
py
|
Python
|
fpga/test/fifo/fifo_tb.py
|
edge-analytics/fpga-sleep-tracker
|
50efd114500e134297be5229775a9ec6809abb53
|
[
"MIT"
] | 2
|
2021-11-05T13:27:35.000Z
|
2022-03-12T04:44:03.000Z
|
fpga/test/fifo/fifo_tb.py
|
edge-analytics/fpga-sleep-tracker
|
50efd114500e134297be5229775a9ec6809abb53
|
[
"MIT"
] | null | null | null |
fpga/test/fifo/fifo_tb.py
|
edge-analytics/fpga-sleep-tracker
|
50efd114500e134297be5229775a9ec6809abb53
|
[
"MIT"
] | null | null | null |
import cocotb
from cocotb.clock import Clock
from cocotb.triggers import ClockCycles, RisingEdge, FallingEdge, NextTimeStep, ReadWrite
N = 16
test_input = list(range(N))
async def writer(dut):
for i in test_input:
busy_check = lambda : not dut.ready_for_input.value
while busy_check():
await ClockCycles(dut.clk, 1)
dut.input_valid <= 1
dut.data_in <= i
await ClockCycles(dut.clk, 1)
dut.input_valid <= 0
await ClockCycles(dut.clk, 1)
# FIXME add more unit tests here
async def reader(dut):
dut.ready_for_output <=1
data_out = []
while (len(data_out) < N):
await RisingEdge(dut.clk)
await ReadWrite()
if dut.output_valid.value:
data_out.append(int(dut.data_out.value))
print(int(dut.data_out.value))
# Introduce random read delay to show that the fifo will respect
# ready for output signals
if (len(data_out) % (N//6)) == 0:
dut.ready_for_output <= 0
await ClockCycles(dut.clk, 100)
dut.ready_for_output <= 1
return data_out
@cocotb.test()
async def test_fifo(dut):
clk = dut.clk
cocotb.fork(Clock(clk, 10, units="ns").start())
# Reset Started
await NextTimeStep()
dut.reset <= 1
await ClockCycles(clk, 1)
dut.reset <= 0
await ClockCycles(clk, 1)
# Reset Done
writer_process = cocotb.fork(writer(dut))
fifo_readback = await reader(dut)
assert(test_input == fifo_readback)
| 28.296296
| 89
| 0.630236
| 0
| 0
| 0
| 0
| 396
| 0.259162
| 1,298
| 0.849476
| 153
| 0.100131
|
e9a05f45a351e31a1eadb205f7bd181f6ae63473
| 2,314
|
py
|
Python
|
Mock-exams/02-Mock-exam/notes/notes/app/views.py
|
M0673N/Python-Web-Basics
|
cecc27f7a12f990756edcc8885290eb3b2e487b7
|
[
"MIT"
] | null | null | null |
Mock-exams/02-Mock-exam/notes/notes/app/views.py
|
M0673N/Python-Web-Basics
|
cecc27f7a12f990756edcc8885290eb3b2e487b7
|
[
"MIT"
] | null | null | null |
Mock-exams/02-Mock-exam/notes/notes/app/views.py
|
M0673N/Python-Web-Basics
|
cecc27f7a12f990756edcc8885290eb3b2e487b7
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, redirect
from notes.app.forms import ProfileForm, NoteForm, NoteDeleteForm
from notes.app.models import Profile, Note
def home(request):
if request.method == 'GET':
profile = Profile.objects.first()
if not profile:
form = ProfileForm()
return render(request, 'home-no-profile.html', {'form': form})
else:
notes = Note.objects.all()
return render(request, 'home-with-profile.html', {'notes': notes})
else:
form = ProfileForm(request.POST)
if form.is_valid():
form.save()
return redirect('home')
else:
return render(request, 'home-no-profile.html', {'form': form})
def add_note(request):
if request.method == 'GET':
form = NoteForm()
return render(request, 'note-create.html', {'form': form})
else:
form = NoteForm(request.POST)
if form.is_valid():
form.save()
return redirect('home')
else:
return render(request, 'note-create.html', {'form': form})
def edit_note(request, pk):
note = Note.objects.get(pk=pk)
if request.method == 'GET':
form = NoteForm(instance=note)
return render(request, 'note-edit.html', {'form': form})
else:
form = NoteForm(request.POST, instance=note)
if form.is_valid():
form.save()
return redirect('home')
else:
return render(request, 'note-edit.html', {'form': form})
def delete_note(request, pk):
note = Note.objects.get(pk=pk)
if request.method == 'GET':
form = NoteDeleteForm(instance=note)
return render(request, 'note-delete.html', {'form': form})
else:
note.delete()
return redirect('home')
def note_details(request, pk):
note = Note.objects.get(pk=pk)
return render(request, 'note-details.html', {'note': note})
def profile_details(request):
profile = Profile.objects.first()
notes = Note.objects.all()
return render(request, 'profile.html', {'profile': profile, 'notes': notes.count()})
def delete_profile(request):
profile = Profile.objects.first()
notes = Note.objects.all()
profile.delete()
notes.delete()
return redirect('home')
| 29.291139
| 88
| 0.600259
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 308
| 0.133103
|
e9a09dff959ae1110da793fb71caa1d3736f73bf
| 3,066
|
py
|
Python
|
trainwiki.py
|
tomsonsgs/TRAN-MMA-master
|
91bf927c64a8d813ba60ae12e61e8f44830a82cc
|
[
"Apache-2.0"
] | null | null | null |
trainwiki.py
|
tomsonsgs/TRAN-MMA-master
|
91bf927c64a8d813ba60ae12e61e8f44830a82cc
|
[
"Apache-2.0"
] | null | null | null |
trainwiki.py
|
tomsonsgs/TRAN-MMA-master
|
91bf927c64a8d813ba60ae12e61e8f44830a82cc
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 2 00:56:18 2019
@author: tang
"""
seed=102
vocab="vocab.bin"
train_file="train.bin"
dropout=0.3
hidden_size=256
embed_size=100
action_embed_size=100
field_embed_size=32
type_embed_size=32
lr_decay=0.5
beam_size=5
patience=2
lstm='lstm'
col_att='affine'
model_name='wiki'
def updatetest(opt):
model_name1='wikitest.decode1'
#
opt.cuda =True
opt.mode ='test'
opt.load_model='saved_models/wikisql_bk/'+model_name+'.bin'
opt.beam_size=5
opt.parser='wikisql_parser'
opt.evaluator='wikisql_evaluator'
opt.sql_db_file='data/wikisql1/test.db'
opt.test_file='data/wikisql1/test.bin'
opt.save_decode_to='decodes/wikisql/'+model_name1
opt.decode_max_time_step=50
def update(opt):
opt.cuda=True
opt.seed=seed
opt.mode='train'
opt.batch_size=16
opt.parser='wikisql_parser'
opt.asdl_file='asdl/lang/sql/sql_asdl.txt'
opt.transition_system='sql'
opt.evaluator='wikisql_evaluator'
opt.train_file='data/wikisql1/'+train_file
opt.dev_file='data/wikisql1/test.bin'
opt.sql_db_file='data/wikisql1/test.db'
opt.vocab='data/wikisql1/'+vocab
opt.glove_embed_path='data/contrib/glove.6B.100d.txt'
opt.lstm =lstm
opt.column_att =col_att
opt.no_parent_state =True
opt.no_parent_field_embed =True
opt.no_parent_field_type_embed =True
opt.no_parent_production_embed =True
opt.hidden_size =hidden_size
opt.embed_size =embed_size
opt.action_embed_size =action_embed_size
opt.field_embed_size =field_embed_size
opt.type_embed_size =type_embed_size
opt.dropout =dropout
opt.patience =patience
opt.max_num_trial =5
opt.lr_decay =lr_decay
opt.glorot_init=True
opt.beam_size =beam_size
opt.eval_top_pred_only =True
opt.decode_max_time_step=50
opt.log_every=500
opt.save_to='saved_models/wikisql_bk/'+model_name
#python -u exp.py \
# --cuda \
# --seed ${seed} \
# --mode train \
# --batch_size 64 \
# --parser wikisql_parser \
# --asdl_file asdl/lang/sql/sql_asdl.txt \
# --transition_system sql \
# --evaluator wikisql_evaluator \
# --train_file data/wikisql/${train_file} \
# --dev_file data/wikisql/dev.bin \
# --sql_db_file data/wikisql/dev.db \
# --vocab data/wikisql/${vocab} \
# --glove_embed_path data/contrib/glove.6B.100d.txt \
# --lstm ${lstm} \
# --column_att ${col_att} \
# --no_parent_state \
# --no_parent_field_embed \
# --no_parent_field_type_embed \
# --no_parent_production_embed \
# --hidden_size ${hidden_size} \
# --embed_size ${embed_size} \
# --action_embed_size ${action_embed_size} \
# --field_embed_size ${field_embed_size} \
# --type_embed_size ${type_embed_size} \
# --dropout ${dropout} \
# --patience ${patience} \
# --max_num_trial 5 \
# --lr_decay ${lr_decay} \
# --glorot_init \
# --beam_size ${beam_size} \
# --eval_top_pred_only \
# --decode_max_time_step 50 \
# --log_every 10 \
# --save_to saved_models/wikisql/${model_name}
| 28.924528
| 63
| 0.689498
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,610
| 0.525114
|
e9a18b845016664a0d3350f6afe5c55f943340ff
| 3,476
|
py
|
Python
|
heritago/heritages/tests/tests_annotationdatamodel.py
|
SWE574-Groupago/heritago
|
ec7d279df667a4f2c3560dfac4b5b17046163a95
|
[
"MIT"
] | 6
|
2017-02-13T10:22:18.000Z
|
2017-03-11T20:38:30.000Z
|
heritago/heritages/tests/tests_annotationdatamodel.py
|
SWE574-Groupago/heritago
|
ec7d279df667a4f2c3560dfac4b5b17046163a95
|
[
"MIT"
] | 172
|
2017-02-12T21:07:27.000Z
|
2017-06-08T10:46:58.000Z
|
heritago/heritages/tests/tests_annotationdatamodel.py
|
SWE574-RenameMe/heritago
|
ec7d279df667a4f2c3560dfac4b5b17046163a95
|
[
"MIT"
] | 17
|
2017-02-13T08:29:37.000Z
|
2017-06-29T14:43:53.000Z
|
import unittest
from django.test import Client
class AnnotationDataModelTests(unittest.TestCase):
api_url_template = "/api/v1/heritages/#/annotations"
xpath_annotation_response = ""
heritage_path = ""
api_url_set = ""
@classmethod
def setUpClass(cls):
cls.heritage_path = "/api/v1/heritages/"
h_id = cls.create_heritage_item()
cls.api_url_set = cls.api_url_template.replace("#", str(h_id))
cls.ann_response = cls.create_XPATH_annotation()
cls.ann_id = cls.ann_response["id"].rsplit("/", 2)[-2]
cls.ann_get_response = Client().get(cls.api_url_set + "/" + str(cls.ann_id)).json()
@classmethod
def create_heritage_item(cls):
client = Client()
r = client.post(cls.heritage_path, {
"title": "Santa Clause",
"description": "Santa Claus, also known as Saint Nicholas, Saint Nick, Kris Kringle, Father Christmas, "
"or simply Santa (Santy in Hiberno-English), is a legendary figure of Western Christian "
"culture who is said to bring gifts to the homes of well-behaved (\"good\" or \"nice\") "
"children on Christmas Eve (24 December) and the early morning hours of Christmas Day "
"(25 December).",
"startDate": "1087",
"endDate": "continuing",
"exactDate": "1700",
"origin": [{"name": "Dutch"}, {"name": "British"}],
"basicInformation": [{"name": "AKA", "value": "Sinterklaas"}],
"tags": [{"name": "religion"}, {"name": "christmas"}, {"name": "figure"}]
})
return r.json()['id']
@classmethod
def create_XPATH_annotation(cls):
return Client().post(cls.api_url_set, {
"@context": "http://www.w3.org/ns/anno.jsonld",
"type": "Annotation",
"creator": "me",
"body": [
{
"type": "video",
"format": "text/plain",
"value": "loved it"
}
],
"target": [
{
"type": "text",
"format": "text/plain",
"selector": [
{
"type": "FragmentSelector",
"conformsTo": "http://tools.ietf.org/rfc/rfc5147",
"value": "char=2,4"
}
]
}
]
}).json()
def test_create_XPATH_annotation(self):
ann_id = self.create_XPATH_annotation()
self.assertTrue(len(ann_id) > 0)
def test_annotation_must_have_1_or_more_context_property(self):
self.assertTrue("@context" in self.ann_get_response.keys())
def test_an_annotation_must_have_exactly_1_IRI_that_defines_it(self):
self.assertTrue("id" in self.ann_get_response.keys())
def test_an_annotation_must_have_1_or_more_types_and_the_annotation_class_must_be_one_of_them(self):
self.assertTrue(self.ann_get_response["type"], "Annotation")
def test_an_annotation_must_have_body_relationships_associated_with_it(self):
self.assertTrue("body" in self.ann_get_response.keys())
def test_there_must_be_1_or_more_target_relationships_associated_with_an_annotation(self):
self.assertTrue("target" in self.ann_get_response.keys())
| 39.954023
| 116
| 0.561277
| 3,424
| 0.98504
| 0
| 0
| 2,324
| 0.668585
| 0
| 0
| 988
| 0.284235
|
e9a26fd47a49716298a92bfa1c231de0e135e9dd
| 824
|
py
|
Python
|
tests/test_main.py
|
cesarbruschetta/julio-cesar-decrypter
|
1f8b94b6370fb0a8bbfc1fa6b44adc9d69bf088c
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_main.py
|
cesarbruschetta/julio-cesar-decrypter
|
1f8b94b6370fb0a8bbfc1fa6b44adc9d69bf088c
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_main.py
|
cesarbruschetta/julio-cesar-decrypter
|
1f8b94b6370fb0a8bbfc1fa6b44adc9d69bf088c
|
[
"BSD-2-Clause"
] | null | null | null |
import unittest
from unittest.mock import patch
from jc_decrypter.main import process, main
class TestMainProcess(unittest.TestCase):
@patch("jc_decrypter.main.decrypter")
def test_arg_decrypter(self, mk_decrypter):
process(["--token", "1234567890"])
mk_decrypter.assert_called_once_with("1234567890")
def test_not_arg(self):
with self.assertRaises(SystemExit) as cm:
process([])
self.assertEqual(
"the following arguments are required: --token/-t", str(cm.exception)
)
class TestMainMain(unittest.TestCase):
@patch("jc_decrypter.main.process")
def test_main_process(self, mk_process):
mk_process.return_value = 0
self.assertRaises(SystemExit, main)
mk_process.assert_called_once_with(["test"])
| 27.466667
| 85
| 0.679612
| 725
| 0.879854
| 0
| 0
| 402
| 0.487864
| 0
| 0
| 145
| 0.175971
|
e9a341910fc41cf0116d2acf9b1914cdde30cec5
| 615
|
py
|
Python
|
library/tests/test_setup.py
|
pimoroni/mics6814-python
|
73c4f23d36c1f97dcdcb2d4ee08a52f6fedcda79
|
[
"MIT"
] | 6
|
2021-05-16T05:02:57.000Z
|
2022-01-05T16:02:46.000Z
|
library/tests/test_setup.py
|
pimoroni/mics6814-python
|
73c4f23d36c1f97dcdcb2d4ee08a52f6fedcda79
|
[
"MIT"
] | 3
|
2021-09-15T10:24:56.000Z
|
2022-01-24T21:16:05.000Z
|
library/tests/test_setup.py
|
pimoroni/mics6814-python
|
73c4f23d36c1f97dcdcb2d4ee08a52f6fedcda79
|
[
"MIT"
] | null | null | null |
import mock
def test_setup(ioexpander):
from mics6814 import MICS6814
mics6814 = MICS6814()
mics6814._ioe.set_pwm_period.assert_called_once_with(5100)
mics6814._ioe.set_mode.assert_has_calls((
mock.call(3, ioexpander.PWM),
mock.call(7, ioexpander.PWM),
mock.call(2, ioexpander.PWM),
mock.call(14, ioexpander.ADC),
mock.call(13, ioexpander.ADC),
mock.call(11, ioexpander.ADC),
mock.call(12, ioexpander.ADC),
mock.call(1, ioexpander.OUT)
))
mics6814._ioe.output.assert_called_once_with(1, ioexpander.LOW)
del mics6814
| 24.6
| 67
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
e9a3a2aba365270bf90b9a6d7673d3d58bca51fe
| 3,290
|
py
|
Python
|
template_maker/data/documents.py
|
codeforamerica/template-maker
|
66d4744c123d5b868cf259e947dc924bb5a25c9a
|
[
"BSD-3-Clause"
] | 9
|
2015-02-23T22:03:30.000Z
|
2020-01-31T19:06:50.000Z
|
template_maker/data/documents.py
|
codeforamerica/template-maker
|
66d4744c123d5b868cf259e947dc924bb5a25c9a
|
[
"BSD-3-Clause"
] | 37
|
2015-03-01T01:10:22.000Z
|
2015-12-31T17:24:42.000Z
|
template_maker/data/documents.py
|
codeforamerica/template-maker
|
66d4744c123d5b868cf259e947dc924bb5a25c9a
|
[
"BSD-3-Clause"
] | 2
|
2016-01-21T09:59:17.000Z
|
2021-04-16T10:51:04.000Z
|
import datetime
from template_maker.database import db
from template_maker.generator.models import DocumentBase, DocumentPlaceholder
from template_maker.builder.models import TemplateBase, TemplatePlaceholders
from template_maker.data.placeholders import get_template_placeholders
def get_all_documents():
'''
Returns all documents currently being edited
'''
return DocumentBase.query.all()
def get_documents_and_parent_templates():
return db.session.query(
DocumentBase.id, DocumentBase.name, TemplateBase.title
).filter(DocumentBase.template_id==TemplateBase.id).all()
def get_document_placeholders(document_id):
'''
Gets all the placeholders associated with a document
'''
return db.session.query(
DocumentPlaceholder.id, TemplatePlaceholders.full_name, TemplatePlaceholders.type,
TemplatePlaceholders.display_name, DocumentPlaceholder.value
).filter(DocumentPlaceholder.document_id==document_id).filter(
DocumentPlaceholder.placeholder_id==TemplatePlaceholders.id
).all()
def get_single_document(document_id):
'''
Returns a single document from a template_id
'''
return DocumentBase.query.get(document_id)
def get_single_document_and_parent_template(document_id):
return db.session.query(
DocumentBase.id, DocumentBase.name, TemplateBase.title
).filter(DocumentBase.template_id==TemplateBase.id).filter(
DocumentBase.id==document_id
).first()
def set_document_placeholders(template_id, document_base):
# create the placeholders for the document
placeholders = get_template_placeholders(template_id)
for placeholder in placeholders:
_placeholder = DocumentPlaceholder.query.filter(
DocumentPlaceholder.placeholder_id==placeholder.id
).filter(
DocumentPlaceholder.document_id==document_base.id
).first()
# if we already have this placeholder, pass
if _placeholder:
continue
new_placeholder = DocumentPlaceholder(
document_id=document_base.id,
placeholder_id=placeholder.id,
)
db.session.add(new_placeholder)
db.session.commit()
def update_documents(template_id):
# get all non-published documents based on the template
documents = DocumentBase.query.filter(
DocumentBase.template_id==template_id
).all()
for document in documents:
set_document_placeholders(template_id, document)
return len(documents)
def create_new_document(template_id, data):
now = datetime.datetime.utcnow()
# create the document
document_base = DocumentBase(
created_at=now,
updated_at=now,
name=data.get('name'),
template_id=template_id
)
db.session.add(document_base)
db.session.commit()
set_document_placeholders(template_id, document_base)
return document_base.id
def save_document_section(placeholders, data):
for placeholder in placeholders:
_placeholder = DocumentPlaceholder.query.get(placeholder.id)
_placeholder.value = data.get(placeholder.display_name, '')
db.session.commit()
return True
def delete_document(document):
db.session.delete(document)
db.session.commit()
return True
| 31.333333
| 90
| 0.730091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 357
| 0.108511
|
e9a3a67be8807d04ec27501d70d8ad63e1c4fad0
| 1,194
|
py
|
Python
|
app/db.py
|
JuanDM93/fcc-fastapi-demo
|
7d20f91fa96989d22426632c1ab2550f62898789
|
[
"MIT"
] | null | null | null |
app/db.py
|
JuanDM93/fcc-fastapi-demo
|
7d20f91fa96989d22426632c1ab2550f62898789
|
[
"MIT"
] | null | null | null |
app/db.py
|
JuanDM93/fcc-fastapi-demo
|
7d20f91fa96989d22426632c1ab2550f62898789
|
[
"MIT"
] | null | null | null |
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from .config import settings
SQLALCHEMY_DATABASE_URL = 'postgresql://{user}:{password}@{host}:{port}/{db}'.format(
user=settings.DB_USER,
password=settings.DB_PASSWORD,
host=settings.DB_HOST,
port=settings.DB_PORT,
db=settings.DB_NAME
)
engine = create_engine(SQLALCHEMY_DATABASE_URL)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
"""
while True:
try:
conn = psycopg2.connect(
host=settings.DB_HOST,
port=settings.DB_PORT,
database=settings.DB_NAME,
user=settings.DB_USER,
password=settings.DB_PASSWORD,
cursor_factory=RealDictCursor
)
cur = conn.cursor()
print("Connected to the database")
break
except (Exception, psycopg2.Error) as error:
print(error)
print(f"Retrying in {settings.SLEEP_TIME} secs")
sleep(settings.SLEEP_TIME)
"""
| 23.88
| 85
| 0.664154
| 0
| 0
| 95
| 0.079564
| 0
| 0
| 0
| 0
| 598
| 0.500838
|
e9a3b150e872655275d100c3ba1868368c2d52e0
| 716
|
py
|
Python
|
katph/spiders/stackoverflow_spider.py
|
trujunzhang/katph
|
b71b5a7171b133fcf087f77cd612c13a966ecd61
|
[
"MIT"
] | null | null | null |
katph/spiders/stackoverflow_spider.py
|
trujunzhang/katph
|
b71b5a7171b133fcf087f77cd612c13a966ecd61
|
[
"MIT"
] | null | null | null |
katph/spiders/stackoverflow_spider.py
|
trujunzhang/katph
|
b71b5a7171b133fcf087f77cd612c13a966ecd61
|
[
"MIT"
] | null | null | null |
import scrapy
from scrapy.selector import Selector
from katph.items import StackItem
class katphSpider(scrapy.Spider):
name = "stackoverflow"
allowed_domains = ["stackoverflow.com"]
start_urls = [
"%s/questions?pagesize=50&sort=newest" % "http://stackoverflow.com",
]
def parse(self, response):
questions = Selector(response).xpath('//div[@class="summary"]/h3')
for question in questions:
item = StackItem()
item['title'] = question.xpath(
'a[@class="question-hyperlink"]/text()').extract()[0]
item['url'] = question.xpath(
'a[@class="question-hyperlink"]/@href').extract()[0]
yield item
| 31.130435
| 76
| 0.603352
| 628
| 0.877095
| 416
| 0.581006
| 0
| 0
| 0
| 0
| 215
| 0.300279
|
e9a3c9a700552b660476506eef95bc2604a7a3bc
| 1,156
|
py
|
Python
|
migrations/0002_user_biography_user_gender_user_phone_number_and_more.py
|
sepydev/django-user
|
1a67caa197f9bb72ec41491cac1ae0a94385da87
|
[
"MIT"
] | 1
|
2022-02-05T18:26:02.000Z
|
2022-02-05T18:26:02.000Z
|
migrations/0002_user_biography_user_gender_user_phone_number_and_more.py
|
mrprocs/django-user
|
1a67caa197f9bb72ec41491cac1ae0a94385da87
|
[
"MIT"
] | null | null | null |
migrations/0002_user_biography_user_gender_user_phone_number_and_more.py
|
mrprocs/django-user
|
1a67caa197f9bb72ec41491cac1ae0a94385da87
|
[
"MIT"
] | null | null | null |
# Generated by Django 4.0.1 on 2022-02-07 17:53
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='biography',
field=models.TextField(blank=True, verbose_name='Biography'),
),
migrations.AddField(
model_name='user',
name='gender',
field=models.CharField(blank=True, max_length=20, verbose_name='Gender'),
),
migrations.AddField(
model_name='user',
name='phone_number',
field=models.CharField(blank=True, max_length=17, validators=[django.core.validators.RegexValidator(message="Phone number must be entered in the format: '+999999999'. Up to 15 digits allowed.", regex='^\\+?1?\\d{9,15}$')], verbose_name='Phone number'),
),
migrations.AddField(
model_name='user',
name='website',
field=models.URLField(blank=True, verbose_name='Website address'),
),
]
| 33.028571
| 264
| 0.601211
| 1,033
| 0.893599
| 0
| 0
| 0
| 0
| 0
| 0
| 287
| 0.24827
|
e9a5f013db2d4eef22aa1809148db7e678473ae5
| 501
|
py
|
Python
|
utils/constants.py
|
tholiao/learning-morph-and-ctrl
|
6093cc7cede3b7ab2f3304d7060815712d535a2d
|
[
"MIT"
] | 1
|
2022-03-10T08:17:18.000Z
|
2022-03-10T08:17:18.000Z
|
utils/constants.py
|
tholiao/learning-morph-and-ctrl
|
6093cc7cede3b7ab2f3304d7060815712d535a2d
|
[
"MIT"
] | null | null | null |
utils/constants.py
|
tholiao/learning-morph-and-ctrl
|
6093cc7cede3b7ab2f3304d7060815712d535a2d
|
[
"MIT"
] | null | null | null |
import numpy as np
from walkers import ScalableWalker
DEFAULT_SCENE = "scenes/walker.ttt"
DEFAULT_WALKER = ScalableWalker
N_MRPH_PARAMS = [3, 3, 6]
N_CTRL_PARAMS = [4, 8, 8]
MORPHOLOGY_BOUNDS = [
[[0.7] * 3, [1.4] * 3],
[[0.7] * 3, [1.4] * 3],
[[0.7] * 6, [1.4] * 6]
]
CONTROLLER_BOUNDS = [
[[1, -np.pi, 0, 0], [45, np.pi, 1, 1]],
[[1, -np.pi, 0, 0, 0, 0, .5, .5], [45, np.pi, .4, .4, .4, .4, 1, 1]],
[[1, -np.pi, 0, 0, 0, 0, .5, .5], [45, np.pi, .4, .4, .4, .4, 1, 1]]
]
| 22.772727
| 73
| 0.493014
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 19
| 0.037924
|
e9a6214120a911400cce37d1a1a474426ab60fe5
| 1,284
|
py
|
Python
|
hardware/joystick.py
|
davidji/roundbot
|
2ca34a83c9feb3331f1b818106f06b3182c4970e
|
[
"Apache-2.0"
] | null | null | null |
hardware/joystick.py
|
davidji/roundbot
|
2ca34a83c9feb3331f1b818106f06b3182c4970e
|
[
"Apache-2.0"
] | null | null | null |
hardware/joystick.py
|
davidji/roundbot
|
2ca34a83c9feb3331f1b818106f06b3182c4970e
|
[
"Apache-2.0"
] | null | null | null |
from solid import *
from solid.utils import *
import util
from util import inch_to_mm, tube, ABIT, corners, pipe
from fixings import M3
from math import tan, radians
"""
Sub-miniature analog joy-sticks.
There's not much useful in documentation of their measurements.
I'm going to treat it like a sphere with a 14mm radius, with a
12mm diameter cylinder sticking out the top.
40 degrees in any direction. The knob on the top is 20mm wide
so the hole in the panel must be at least that wide.
"""
fixing = M3
width=35.0
depth=35.0
pivot_height=9.6
panel_height=11.0
height=pivot_height+panel_height
def block():
return down(pivot_height+panel_height)(forward(1.8)(linear_extrude(height)(square([35,35], center=True))) -
up(pivot_height)(sphere(r=14.0)) -
down(ABIT)(cylinder(h=pivot_height+ABIT, r=14.0)) +
up(pivot_height)(hole()(cylinder(r1=6.0, r2=6.0 + tan(radians(30.0))*panel_height, h=panel_height))) -
forward(1.8)(linear_extrude(pivot_height)(square([14.0, depth], center=True))) -
forward(1.8)(linear_extrude(1.6)(square([25.5, 32.0], center=True))))
def fixings():
return corners(20.4, 26.6)
def export_scad():
util.save('joystick-block', block())
if __name__ == '__main__':
export_scad()
| 28.533333
| 114
| 0.696262
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 353
| 0.274922
|
e9a7d2f66b4f8dbaa2eb22e345ef51c2d6c7fe14
| 2,360
|
py
|
Python
|
src/Line.py
|
npanuhin/BIOCAD-BWA
|
50f56fd7d08b8ad1247934c902fb137f3c28cdf8
|
[
"MIT"
] | null | null | null |
src/Line.py
|
npanuhin/BIOCAD-BWA
|
50f56fd7d08b8ad1247934c902fb137f3c28cdf8
|
[
"MIT"
] | null | null | null |
src/Line.py
|
npanuhin/BIOCAD-BWA
|
50f56fd7d08b8ad1247934c902fb137f3c28cdf8
|
[
"MIT"
] | null | null | null |
from typing import List
from collections import deque
class Line:
"""
Properties:
start_x {0}
start_y {1}
end_x {2}
end_y {3}
dots = [dot1, ..., dotN] {4}
coords = (start_x, start_y, end_x, end_y)
"""
def __init__(self, start_x=None, start_y=None, end_x=None, end_y=None, dots=[]):
self.start_x = start_x
self.start_y = start_y
self.end_x = end_x
self.end_y = end_y
self.dots = dots
def __repr__(self):
return "Line(start_x={}, start_y={}, end_x={}, end_y={}, dots=[{}])".format(
self.start_x, self.start_y, self.end_x, self.end_y, len(self.dots)
)
@property
def coords(self):
return self.start_x, self.start_y, self.end_x, self.end_y
# @property
# def x1(self):
# return self.start_x
# @property
# def y1(self):
# return self.start_y
# @property
# def x2(self):
# return self.end_x
# @property
# def y2(self):
# return self.end_y
@property
def center_x(self):
return (self.start_x + self.end_x) // 2
@property
def center_y(self):
return (self.start_y + self.end_y) // 2
def isTiltedCorrectly(self):
return self.start_y <= self.end_y
@property
def k(self):
return (self.end_y - self.start_y) / (self.end_x - self.start_x)
@property
def b(self):
return self.end_y - self.end_x * self.k
def copyCoords(self):
return Line(self.start_x, self.start_y, self.end_x, self.end_y, dots=[])
def shift(self, dx=0, dy=0):
self.start_x += dx
self.start_y += dy
self.end_x += dx
self.end_y += dy
for i in range(len(self.dots)):
self.dots[i][0] += dx
self.dots[i][1] += dy
def rotateY(self, rotation_center, line=True, dots=False):
if line:
self.start_y -= (self.start_y - rotation_center) * 2
self.end_y -= (self.end_y - rotation_center) * 2
if dots:
for i in range(len(self.dots)):
self.dots[i][1] -= (self.dots[i][1] - rotation_center) * 2
def shiftLines(lines, count) -> List[Line]:
result = deque(lines)
for _ in range(count):
result.append(result.popleft())
return list(result)
| 25.106383
| 84
| 0.555932
| 2,140
| 0.90678
| 0
| 0
| 432
| 0.183051
| 0
| 0
| 459
| 0.194492
|
e9a8550e13deee649e253f45b07fa459658b1f18
| 205
|
py
|
Python
|
hw_asr/model/__init__.py
|
ArturGoldman/ASR-HW
|
96494a7ce3f6661fbafb8077f15ece8c6e4b1a11
|
[
"MIT"
] | null | null | null |
hw_asr/model/__init__.py
|
ArturGoldman/ASR-HW
|
96494a7ce3f6661fbafb8077f15ece8c6e4b1a11
|
[
"MIT"
] | null | null | null |
hw_asr/model/__init__.py
|
ArturGoldman/ASR-HW
|
96494a7ce3f6661fbafb8077f15ece8c6e4b1a11
|
[
"MIT"
] | 1
|
2021-10-29T18:46:14.000Z
|
2021-10-29T18:46:14.000Z
|
from hw_asr.model.baseline_model import BaselineModel, BasicLSTM, BasicGRU
from hw_asr.model.QuartzNet import QuartzNet
__all__ = [
"BaselineModel",
"BasicLSTM",
"BasicGRU",
"QuartzNet"
]
| 20.5
| 74
| 0.731707
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 47
| 0.229268
|
e9ab3dbd3f61574c06a9441f006ee914a6d3064c
| 4,458
|
py
|
Python
|
Fishers LDA/fishersLDA.py
|
Exorust/Machine-Learning-Algorithms
|
c634fd0a1a49ea2574f0867b591ee8a2cd401fd2
|
[
"MIT"
] | null | null | null |
Fishers LDA/fishersLDA.py
|
Exorust/Machine-Learning-Algorithms
|
c634fd0a1a49ea2574f0867b591ee8a2cd401fd2
|
[
"MIT"
] | null | null | null |
Fishers LDA/fishersLDA.py
|
Exorust/Machine-Learning-Algorithms
|
c634fd0a1a49ea2574f0867b591ee8a2cd401fd2
|
[
"MIT"
] | null | null | null |
'''**********************************************
CODE TO IMPLEMENT FISHER'S LDA -
Given two dimensional dataset with two classes 0 and 1,
Perform Fisher's LDA on the dataset,
Perform dimensionality reduction and find the suitable vector to project it onto,
Find the threshold value for separation of the two classes
***********************************************'''
import numpy as np
import matplotlib.pyplot as plt
import time
# to calculate the execution time of th clustering
start_time = time.time()
# reading data csv file
my_data = np.genfromtxt('datasets/dataset_3.csv', delimiter=',')
# deleting the serial number column
data=np.delete(my_data,0,1)
# separating the two classes and deleting the target variable column
class0 = data[np.nonzero(data[:,2] == 0)]
class1=data[np.nonzero(data[:,2]==1)]
class0=np.delete(class0,2,1)
class1=np.delete(class1,2,1)
# finding the mean of the the two classes
mean0=np.mean(class0,0)
mean1=np.mean(class1,0)
''' calculating the variability of the two classes using the formula :
variability=summation over points belonging to class 1((xi-mean)(xi-mean)tanspose)
'''
var0=np.zeros(1)
temp=np.array(mean0)
for i in range (class0.shape[0]) :
temp=(class0[i,:]-mean0)
var0+=np.dot(temp, temp.T)
var1=np.zeros(1)
temp=np.array(mean1)
for i in range (class1.shape[0]) :
temp=(class1[i,:]-mean1)
var1+=np.dot(temp, temp.T)
sw=var1+var0
# calculating the inverse of Sw matrix
invsw=np.array([(1/sw[0])])
# calculating the w vector using below formula
w=invsw*(mean1-mean0)
# declaring arrays for storing points' distance from the vector
dist0=np.zeros((class0.shape[0],1))
dist1=np.zeros((class1.shape[0],1))
# finding the the vector to project the points on;
# such that the means are farthest from each other
wperp=np.array([-w[1],w[0]])
# finding the norm of the w vector
norm_w=np.linalg.norm(wperp)
''' calculating the distance of original data points from the vector using the formula:
r=w.T/norm(w)
'''
for i in range(dist0.shape[0]):
dist0[i]=np.dot(wperp.T,class0[i,:])/norm_w
for i in range(dist1.shape[0]):
dist1[i]=np.dot(wperp.T,class1[i,:])/norm_w
''' declaring the arrays to store the projected points data using formula:
x_projected = x_actual-r*w/norm(w)
'''
class0proj=np.zeros((class0.shape[0],2))
class1proj=np.zeros((class1.shape[0],2))
for i in range(class0.shape[0]):
class0proj[i,:]=np.subtract((class0[i,:]),(dist0[i]*wperp.T/norm_w))
for i in range(class1.shape[0]):
class1proj[i,:]=np.subtract((class1[i,:]),(dist1[i]*wperp.T/norm_w))
# displaying the plot with the original data , projected points and line
plt.scatter(class0[:,0],class0[:,1])
plt.scatter(class1[:,0],class1[:,1])
plt.scatter(class0proj[:,0],class0proj[:,1],color='blue')
plt.scatter(class1proj[:,0],class1proj[:,1],color='red')
#concatenating the two classes into a single array
pointsproj=np.concatenate((class0proj,class1proj),axis=0)
plt.plot(pointsproj[:,0],pointsproj[:,1],'m')
# storing dimensionally reduced projected points in array using formula:
# y(x) = w.T*x
newproj0=np.zeros((class0.shape[0],1))
newproj1=np.zeros((class1.shape[0],1))
for i in range(class0.shape[0]):
newproj0[i,:]=np.dot(wperp.T,class0[i,:])
for i in range(class1.shape[0]):
newproj1[i,:]=np.dot(wperp.T,class1[i,:])
# storing the means and standard deviations of the projected points
proj0mean=np.mean(newproj0)
proj1mean=np.mean(newproj1)
proj0std=np.std(newproj0)
proj1std=np.std(newproj1)
'''
Below function "solve" to finds the threshold value separating the two
classes when dimensionally reduced -
input : m1, m2 - means of the two classes whose point of intersection needs to be found
std1, std2 - the standard deviations of the two classes
'''
def solve(m1,m2,std1,std2):
a = 1/(2*std1**2) - 1/(2*std2**2)
b = m2/(std2**2) - m1/(std1**2)
c = m1**2 /(2*std1**2) - m2**2 / (2*std2**2) - np.log(std2/std1)
roots= np.roots([a,b,c])
# since two possible points of intersection , we select the one which lies in between the two means
if roots.shape[0]>1:
for i in range(2):
if roots[i] !=max(m1,m2,roots[i]) or roots[i]!=min(m1,m2,roots[i]):
return roots[i]
else:
return roots
threshold=solve(proj0mean,proj1mean,proj0std,proj1std)
print("Threshold value =", threshold)
print("Time taken = ",(time.time()-start_time))
plt.savefig('Results/Result3.png')
| 32.540146
| 104
| 0.685509
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,006
| 0.449574
|
e9ad668ebc54401a790054fd2f8bfe6c1d6a7c9b
| 3,071
|
py
|
Python
|
study/pytorch_study/14_dropout.py
|
strawsyz/straw
|
db313c78c2e3c0355cd10c70ac25a15bb5632d41
|
[
"MIT"
] | 2
|
2020-04-06T09:09:19.000Z
|
2020-07-24T03:59:55.000Z
|
study/pytorch_study/14_dropout.py
|
strawsyz/straw
|
db313c78c2e3c0355cd10c70ac25a15bb5632d41
|
[
"MIT"
] | null | null | null |
study/pytorch_study/14_dropout.py
|
strawsyz/straw
|
db313c78c2e3c0355cd10c70ac25a15bb5632d41
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import torch
n_input = 1
# n_hidden should be very big to make dropout's effect more clear
n_hidden = 100
n_output = 1
EPOCH = 1000
LR = 0.01
torch.manual_seed(1) # reproducible
N_SAMPLES = 20
# training data
x = torch.unsqueeze(torch.linspace(-1, 1, N_SAMPLES), 1)
y = x + 0.3 * torch.normal(torch.zeros(N_SAMPLES, 1), torch.ones(N_SAMPLES, 1))
# test data
test_x = torch.unsqueeze(torch.linspace(-1, 1, N_SAMPLES), 1)
test_y = test_x + 0.3 * torch.normal(torch.zeros(N_SAMPLES, 1), torch.ones(N_SAMPLES, 1))
# show data
plt.scatter(x.data.numpy(), y.data.numpy(), c='magenta', s=50, alpha=0.5, label='train')
plt.scatter(test_x.data.numpy(), test_y.data.numpy(), c='cyan', s=50, alpha=0.5, label='test')
plt.legend(loc='upper left')
plt.ylim((-2.5, 2.5))
plt.show()
net_overfitting = torch.nn.Sequential(
torch.nn.Linear(n_input, n_hidden),
torch.nn.ReLU(),
torch.nn.Linear(n_hidden, n_hidden),
torch.nn.ReLU(),
torch.nn.Linear(n_hidden, n_output)
)
net_dropout = torch.nn.Sequential(
torch.nn.Linear(n_input, n_hidden),
torch.nn.Dropout(0.5),
torch.nn.ReLU(),
torch.nn.Linear(n_hidden, n_hidden),
torch.nn.Dropout(0.5),
torch.nn.ReLU(),
torch.nn.Linear(n_hidden, n_output)
)
optimizer_overfit = torch.optim.Adam(net_overfitting.parameters(), lr=LR)
optimizer_drop = torch.optim.Adam(net_dropout.parameters(), lr=LR)
loss_func = torch.nn.MSELoss()
plt.ion()
for i in range(EPOCH):
pred_overfit = net_overfitting(x)
pred_drop = net_dropout(x)
loss_overfit = loss_func(pred_overfit, y)
loss_drop = loss_func(pred_drop, y)
optimizer_overfit.zero_grad()
optimizer_drop.zero_grad()
loss_overfit.backward()
loss_drop.backward()
optimizer_overfit.step()
optimizer_drop.step()
# 接着上面来
if i % 10 == 0: # 每 10 步画一次图
# change to eval mode in order to fix drop out effect
net_overfitting.eval()
# parameters for dropout differ from train mode
net_dropout.eval()
# plotting
plt.cla()
test_pred_ofit = net_overfitting(test_x)
test_pred_drop = net_dropout(test_x)
plt.scatter(x.data.numpy(), y.data.numpy(), c='magenta', s=5, alpha=0.3, label='train')
plt.scatter(test_x.data.numpy(), test_y.data.numpy(), c='cyan', s=5, alpha=0.3, label='test')
plt.plot(test_x.data.numpy(), test_pred_ofit.data.numpy(), 'r-', lw=3, label='overfitting')
plt.plot(test_x.data.numpy(), test_pred_drop.data.numpy(), 'b--', lw=3, label='dropout(50%)')
plt.text(0, -1.2, 'overfitting loss=%.4f' % loss_func(test_pred_ofit, test_y).data.numpy(),
fontdict={'size': 12, 'color': 'red'})
plt.text(0, -1.5, 'dropout loss=%.4f' % loss_func(test_pred_drop, test_y).data.numpy(),
fontdict={'size': 12, 'color': 'orange'})
plt.legend(loc='upper left');
plt.ylim((-2.5, 2.5));
plt.pause(0.1)
# 将两个网络改回 训练形式
net_overfitting.train()
net_dropout.train()
plt.ioff()
plt.show()
| 32.326316
| 101
| 0.652231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 500
| 0.160514
|
e9b1301b28dc40f613c5048548a9e3fd67d1e1a8
| 72,649
|
py
|
Python
|
harmonica/twiss.py
|
i-a-morozov/harmonica
|
546e664e59457ad9cc354d108402137e90e0d8c2
|
[
"MIT"
] | null | null | null |
harmonica/twiss.py
|
i-a-morozov/harmonica
|
546e664e59457ad9cc354d108402137e90e0d8c2
|
[
"MIT"
] | null | null | null |
harmonica/twiss.py
|
i-a-morozov/harmonica
|
546e664e59457ad9cc354d108402137e90e0d8c2
|
[
"MIT"
] | null | null | null |
"""
Twiss module.
Compute twiss parameters from amplitude & phase data.
Twiss filtering & processing.
"""
import numpy
import torch
import pandas
from scipy import odr
from .util import mod, generate_pairs, generate_other
from .statistics import weighted_mean, weighted_variance
from .statistics import median, biweight_midvariance, standardize
from .anomaly import threshold, dbscan, local_outlier_factor, isolation_forest
from .decomposition import Decomposition
from .model import Model
from .table import Table
class Twiss():
"""
Returns
----------
Twiss class instance.
Parameters
----------
model: 'Model'
Model instance
table: 'Table'
Table instance
flag: torch.Tensor
external flags for each model location
limit: int | tuple
range limit to use, (min, max), 1 <= min <= max, mim is excluded, for full range min==max
use_model: bool
flag to use precomputed model data
Attributes
----------
model: 'Model'
Model instance
table: 'Table'
Table instance
limit: int | tuple
range limit to use, (min, max), 1 <= min <= max, mim is excluded, for full range min==max
use_model: bool
flag to use precomputed model data
dtype: torch.dtype
data type (from model)
device: torch.device
data device (from model)
flag: torch.Tensor
location flags
count: torch.Tensor
(uncoupled) range limit endpoints [1, 6, 15, 28, 45, 66, 91, 120, ...]
combo: torch.Tensor
(uncoupled) index combinations [..., [..., [[i, j], [i, k]], ...], ...]
shape: torch.Size
initial shape of combo
distance: torch.Tensor
(uncoupled) distance
fx: torch.Tensor
x phase for each location
fy: torch.Tensor
y phase for each location
sigma_fx: torch.Tensor
x phase error for each location
sigma_fy: torch.Tensor
y phase error for each location
fx_correct: torch.Tensor
corrected x phase for each location
fy_correct: torch.Tensor
corrected y phase for each location
sigma_fx_correct: torch.Tensor
corrected x phase error for each location
sigma_fy_correct: torch.Tensor
corrected y phase error for each location
virtual_x: dict
x plane virtual phase data
virtual_y: dict
y plane virtual phase data
correct_x: dict
x plane corrected phase data
correct_y: dict
y plane corrected phase data
action: dict
action data
dict_keys(['jx', 'sigma_jx', 'center_jx', 'spread_jx', 'jy', 'sigma_jy', 'center_jy', 'spread_jy', 'mask'])
data_amplitude: dict
twiss from amplitude data
dict_keys(['bx', 'sigma_bx', 'by', 'sigma_by'])
data_phase: dict
twiss from phase data
dict_keys(['fx_ij', 'sigma_fx_ij', 'fx_m_ij', 'sigma_fx_m_ij', 'fx_ik', 'sigma_fx_ik', 'fx_m_ik', 'sigma_fx_m_ik', 'fy_ij', 'sigma_fy_ij', 'fy_m_ij', 'sigma_fy_m_ij', 'fy_ik', 'sigma_fy_ik', 'fy_m_ik', 'sigma_fy_m_ik', 'ax', 'sigma_ax', 'bx', 'sigma_bx', 'ay', 'sigma_ay', 'by', 'sigma_by'])
ax: torch.Tensor
alfa x
sigma_ax: torch.Tensor
sigma alfa x
bx: torch.Tensor
beta x
sigma_bx: torch.Tensor
sigma beta x
ay: torch.Tensor
alfa y
sigma_ay: torch.Tensor
sigma alfa y
by: torch.Tensor
beta y
sigma_by: torch.Tensor
sigma beta y
Methods
----------
__init__(self, model:'Model', table:'Table', limit:int=8, use_model:bool=False) -> None
Twiss instance initialization.
get_action(self, *, data_threshold:dict={'use': True, 'factor': 5.0}, data_dbscan:dict={'use': False, 'factor': 2.5}, data_local_outlier_factor:dict={'use': False, 'contamination': 0.01}, data_isolation_forest:dict={'use': False, 'contamination': 0.01}, bx:torch.Tensor=None, by:torch.Tensor=None, sigma_bx:torch.Tensor=None, sigma_by:torch.Tensor=None)
Estimate actions at each monitor location with optional data cleaning and estimate action center and spread.
get_twiss_from_amplitude(self) -> None
Estimate twiss from amplitude.
phase_virtual(self, limit:int=None, exclude:list=None, **kwargs) -> None
Estimate x & y phase for virtual locations.
phase_correct(self, *, limit:int=None, **kwargs) -> None
Correct x & y phase for monitor locations.
phase_alfa(a_m:torch.Tensor, f_ij:torch.Tensor, f_m_ij:torch.Tensor, f_ik:torch.Tensor, f_m_ik:torch.Tensor, *, error:bool=True, model:bool=True, sigma_a_m:torch.Tensor=0.0, sigma_f_ij:torch.Tensor=0.0, sigma_f_m_ij:torch.Tensor=0.0, sigma_f_ik:torch.Tensor=0.0, sigma_f_m_ik:torch.Tensor=0.0) -> tuple
Estimate twiss alfa at index (i) from given triplet (i, j, k) phase data.
phase_beta(b_m:torch.Tensor, f_ij:torch.Tensor, f_m_ij:torch.Tensor, f_ik:torch.Tensor, f_m_ik:torch.Tensor, *, error:bool=True, model:bool=True, sigma_b_m:torch.Tensor=0.0, sigma_f_ij:torch.Tensor=0.0, sigma_f_m_ij:torch.Tensor=0.0, sigma_f_ik:torch.Tensor=0.0, sigma_f_m_ik:torch.Tensor=0.0) -> tuple
Estimate twiss beta at index (i) from given triplet (i, j, k) phase data.
get_twiss_from_phase(self, *, virtual:bool=True, error:bool=True, model:bool=False, use_correct:bool=False, use_correct_sigma:bool=False) -> None
Estimate twiss from phase data.
filter_twiss(self, plane:str = 'x', *, phase:dict={'use': True, 'threshold': 10.00}, model:dict={'use': True, 'threshold': 00.50}, value:dict={'use': True, 'threshold': 00.50}, sigma:dict={'use': True, 'threshold': 00.25}, limit:dict={'use': True, 'threshold': 05.00}) -> dict
Filter twiss for given data plane and cleaning options.
mask_range(self, limit:tuple) -> torch.Tensor
Generate weight mask based on given range limit.
mask_location(self, table:list) -> torch.Tensor
Generate weight mask based on given range limit.
mask_distance(self, function) -> torch.Tensor
Generate weight mask based on given range limit.
process_twiss(self, plane:str='x', *, weight:bool=True, mask:torch.Tensor=None) -> dict
Process twiss data.
get_twiss_from_data(self, n:int, x:torch.Tensor, y:torch.Tensor, *, refit:bool=False, factor:float=5.0, level:float=1.0E-6, sigma_x:torch.Tensor=None, sigma_y:torch.Tensor=None, ax:torch.Tensor=None, bx:torch.Tensor=None, ay:torch.Tensor=None, by:torch.Tensor=None, transport:torch.Tensor=None, **kwargs) -> dict
Estimate twiss from tbt data using ODR fit.
get_ax(self, index:int) -> torch.Tensor
Get ax value and error at given index.
get_bx(self, index:int) -> torch.Tensor
Get bx value and error at given index.
get_fx(self, index:int) -> torch.Tensor
Get fx value and error at given index.
get_ay(self, index:int) -> torch.Tensor
Get ay value and error at given index.
get_by(self, index:int) -> torch.Tensor
Get by value and error at given index.
get_fy(self, index:int) -> torch.Tensor
Get fy value and error at given index.
get_twiss(self, index:int) -> dict
Return twiss data at given index.
get_table(self) -> pandas.DataFrame
Return twiss data at all locations as dataframe.
__repr__(self) -> str
String representation.
__len__(self) -> int:
Number of locations.
__call__(self, limit:int=None) -> pandas.DataFrame
Perform twiss loop with default parameters.
matrix(self, probe:torch.Tensor, other:torch.Tensor) -> tuple
Generate uncoupled transport matrix (or matrices) for given locations.
make_transport(self) -> None
Set transport matrices between adjacent locations.
matrix_transport(self, probe:int, other:int) -> torch.Tensor
Generate transport matrix from probe to other using self.transport.
normal(self, probe:torch.Tensor) -> tuple
Generate uncoupled normal matrix (or matrices) for given locations.
"""
def __init__(self, model:'Model', table:'Table', flag:torch.Tensor=None, limit:int=8, use_model:bool=False) -> None:
"""
Twiss instance initialization.
Parameters
----------
model: 'Model'
Model instance
table: 'Table'
Table instance
flag: torch.Tensor
external flags for each model location
limit: int | tuple
range limit to use, (min, max), 1 <= min <= max, mim is excluded, for full range min==max
use_model: bool
flag to use precomputed model data
Returns
-------
None
"""
self.model, self.table, self.limit, self.use_model = model, table, limit, use_model
self.limit = self.limit if isinstance(self.limit, tuple) else (self.limit, self.limit)
if self.use_model:
if self.model.limit is None:
raise Exception(f'TWISS: model limit is None')
if self.model.limit < max(self.limit):
raise Exception(f'TWISS: requested limit={self.limit} should be less than model limit={self.model.limit}')
self.size, self.dtype, self.device = self.model.size, self.model.dtype, self.model.device
if self.model.monitor_count != self.table.size:
raise Exception(f'TWISS: expected {self.model.monitor_count} monitors in Model, got {self.table.size} in Table')
if flag is None:
self.flag = [flag if kind == self.model._monitor else 0 for flag, kind in zip(self.model.flag, self.model.kind)]
self.flag = torch.tensor(self.flag, dtype=torch.int64, device=self.device)
else:
if len(flag) != self.size:
raise Exception(f'TWISS: external flag length {len(flag)}, expected length {self.size}')
self.flag = flag.to(torch.int64).to(self.device)
if self.use_model:
self.count = self.model.count
self.combo = self.model.combo
self.index = self.model.index
else:
self.count = torch.tensor([limit*(2*limit - 1) for limit in range(1, max(self.limit) + 1)], dtype=torch.int64, device=self.device)
self.combo = [generate_other(probe, max(self.limit), self.flag) for probe in range(self.size)]
self.combo = torch.stack([generate_pairs(max(self.limit), 1 + 1, probe=probe, table=table, dtype=torch.int64, device=self.device) for probe, table in enumerate(self.combo)])
self.index = mod(self.combo, self.size).to(torch.int64)
self.shape = self.combo.shape
self.distance = torch.ones(max(self.limit)*(2*max(self.limit) - 1), dtype=self.dtype, device=self.device)
for index in self.count:
self.distance[index:] += 1.0
limit_min, limit_max = self.limit
if limit_min == limit_max:
self.count = self.count[:limit_max]
*_, count_max = self.count
self.combo = self.combo[:, :count_max]
self.index = self.index[:, :count_max]
self.distance = self.distance[:count_max]
if limit_min < limit_max:
self.count = self.count[limit_min - 1:limit_max]
count_min, *_, count_max = self.count
self.combo = self.combo[:, count_min:count_max]
self.index = self.index[:, count_min:count_max]
self.distance = self.distance[count_min:count_max]
if limit_min > limit_max:
raise Exception(f'TWISS: invalid limit={self.limit}')
self.fx = torch.zeros_like(self.model.fx)
self.fy = torch.zeros_like(self.model.fy)
self.fx[self.model.monitor_index] = self.table.fx
self.fy[self.model.monitor_index] = self.table.fy
self.sigma_fx = torch.zeros_like(self.model.sigma_fx)
self.sigma_fy = torch.zeros_like(self.model.sigma_fy)
self.sigma_fx[self.model.monitor_index] = self.table.sigma_fx
self.sigma_fy[self.model.monitor_index] = self.table.sigma_fy
self.fx_correct, self.sigma_fx_correct = torch.clone(self.fx), torch.clone(self.sigma_fx)
self.fy_correct, self.sigma_fy_correct = torch.clone(self.fy), torch.clone(self.sigma_fy)
self.virtual_x, self.correct_x = {}, {}
self.virtual_y, self.correct_y = {}, {}
self.action, self.data_amplitude, self.data_phase = {}, {}, {}
self.ax, self.sigma_ax = torch.zeros_like(self.model.ax), torch.zeros_like(self.model.sigma_ax)
self.bx, self.sigma_bx = torch.zeros_like(self.model.bx), torch.zeros_like(self.model.sigma_bx)
self.ay, self.sigma_ay = torch.zeros_like(self.model.ay), torch.zeros_like(self.model.sigma_ay)
self.by, self.sigma_by = torch.zeros_like(self.model.by), torch.zeros_like(self.model.sigma_by)
if self.use_model:
self.fx_ij, self.sigma_fx_ij = self.model.fx_ij.to(self.dtype).to(self.device), self.model.sigma_fx_ij.to(self.dtype).to(self.device)
self.fx_ik, self.sigma_fx_ik = self.model.fx_ik.to(self.dtype).to(self.device), self.model.sigma_fx_ik.to(self.dtype).to(self.device)
self.fy_ij, self.sigma_fy_ij = self.model.fy_ij.to(self.dtype).to(self.device), self.model.sigma_fy_ij.to(self.dtype).to(self.device)
self.fy_ik, self.sigma_fy_ik = self.model.fy_ik.to(self.dtype).to(self.device), self.model.sigma_fy_ik.to(self.dtype).to(self.device)
if self.use_model and flag != None:
size, length, *_ = self.index.shape
self.mask = torch.ones((size, length)).to(torch.bool).to(self.device)
for location, flag in enumerate(self.flag):
if not flag and self.model.flag[location] != 0:
_, other = self.index.swapaxes(0, -1)
other = torch.mul(*(other != location).swapaxes(0, 1)).T
self.mask = (self.mask == other)
def get_action(self, *,
data_threshold:dict={'use': True, 'factor': 5.0},
data_dbscan:dict={'use': False, 'factor': 2.5},
data_local_outlier_factor:dict={'use': False, 'contamination': 0.01},
data_isolation_forest:dict={'use': False, 'contamination': 0.01},
bx:torch.Tensor=None, by:torch.Tensor=None,
sigma_bx:torch.Tensor=None, sigma_by:torch.Tensor=None) -> None:
"""
Estimate actions at each monitor location with optional data cleaning and estimate action center and spread.
Parameters
----------
data_threshold: dict
parameters for threshold detector
data_dbscan: dict
parameters for dbscan detector
data_local_outlier_factor: dict
parameters for local outlier factor detector
data_isolation_forest: dict
parameters for isolation forest detector
bx: torch.Tensor
bx values at monitor locations
by: torch.Tensor
by values at monitor locations
sigma_bx: torch.Tensor
bx errors at monitor locations
sigma_by: torch.Tensor
by errors at monitor locations
Returns
-------
None, update self.action dictionary
"""
self.action = {}
index = self.model.monitor_index
bx = bx if bx is not None else self.model.bx[index]
by = by if by is not None else self.model.by[index]
sigma_bx = sigma_bx if sigma_bx is not None else self.model.sigma_bx[index]
sigma_by = sigma_by if sigma_by is not None else self.model.sigma_by[index]
jx = self.table.ax**2/(2.0*bx)
jy = self.table.ay**2/(2.0*by)
sigma_jx = self.table.ax**2/bx**2*self.table.sigma_ax**2
sigma_jx += self.table.ax**4/bx**4/4*sigma_bx**2
sigma_jx.sqrt_()
sigma_jy = self.table.ay**2/by**2*self.table.sigma_ay**2
sigma_jy += self.table.ay**4/by**4/4*sigma_by**2
sigma_jy.sqrt_()
mask = torch.clone(self.flag[index])
mask = torch.stack([mask, mask]).to(torch.bool)
data = standardize(torch.stack([jx, jy]), center_estimator=median, spread_estimator=biweight_midvariance)
if data_threshold['use']:
factor = data_threshold['factor']
center = median(data)
spread = biweight_midvariance(data).sqrt()
min_value, max_value = center - factor*spread, center + factor*spread
mask *= threshold(data, min_value, max_value)
if data_dbscan['use']:
factor = data_dbscan['factor']
for case in range(1):
mask[case] *= dbscan(data[case].reshape(-1, 1), epsilon=factor)
if data_local_outlier_factor['use']:
for case in range(1):
mask[case] *= local_outlier_factor(data[case].reshape(-1, 1), contamination=data_local_outlier_factor['contamination'])
if data_isolation_forest['use']:
for case in range(1):
mask[case] *= isolation_forest(data[case].reshape(-1, 1), contamination=data_isolation_forest['contamination'])
mask_jx, mask_jy = mask
mask_jx, mask_jy = mask_jx/sigma_jx**2, mask_jy/sigma_jy**2
center_jx = weighted_mean(jx, weight=mask_jx)
spread_jx = weighted_variance(jx, weight=mask_jx, center=center_jx).sqrt()
center_jy = weighted_mean(jy, weight=mask_jy)
spread_jy = weighted_variance(jy, weight=mask_jy, center=center_jy).sqrt()
self.action['jx'], self.action['sigma_jx'] = jx, sigma_jx
self.action['center_jx'], self.action['spread_jx'] = center_jx, spread_jx
self.action['jy'], self.action['sigma_jy'] = jy, sigma_jy
self.action['center_jy'], self.action['spread_jy'] = center_jy, spread_jy
self.action['mask'] = mask
def get_twiss_from_amplitude(self) -> None:
"""
Estimate twiss from amplitude.
Note, action dictionary should be precomputed
Parameters
----------
None
Returns
-------
None, update self.twiss_from_amplitude dictionary
"""
if self.action == {}:
raise Exception('error: action dictionary is empty')
self.data_amplitude = {}
ax, sigma_ax = self.table.ax, self.table.sigma_ax
ay, sigma_ay = self.table.ay, self.table.sigma_ay
jx, sigma_jx = self.action['center_jx'], self.action['spread_jx']
jy, sigma_jy = self.action['center_jy'], self.action['spread_jy']
bx, by = ax**2/(2.0*jx), ay**2/(2.0*jy)
sigma_bx = torch.sqrt(ax**2/jx**2*sigma_ax**2 + 0.25*ax**4/jx**4*sigma_jx**2)
sigma_by = torch.sqrt(ay**2/jy**2*sigma_ay**2 + 0.25*ay**4/jy**4*sigma_jy**2)
index = self.model.monitor_index
bx_model, by_model = self.model.bx[index], self.model.by[index]
self.data_amplitude['bx'], self.data_amplitude['sigma_bx'] = bx, sigma_bx
self.data_amplitude['by'], self.data_amplitude['sigma_by'] = by, sigma_by
def phase_virtual(self, limit:int=None, exclude:list=None, **kwargs) -> None:
"""
Estimate x & y phase for virtual locations.
Parameters
----------
limit: int
range limit to use
exclude: list
list of virtual location to exclude
**kwargs:
passed to Decomposition.phase_virtual
Returns
-------
None, update self.virtual_x and self.virtual_y dictionaries
"""
self.virtual_x, self.virtual_y = {}, {}
limit = max(self.limit) if limit is None else limit
exclude = [] if exclude is None else exclude
index = [index for index in self.model.virtual_index if index not in exclude]
nux, sigma_nux = self.table.nux, self.table.sigma_nux
NUX, sigma_NUX = self.model.nux, self.model.sigma_nux
nuy, sigma_nuy = self.table.nuy, self.table.sigma_nuy
NUY, sigma_NUY = self.model.nuy, self.model.sigma_nuy
fx, sigma_fx = self.fx, self.sigma_fx
FX, sigma_FX = self.model.fx, self.model.sigma_fx
fy, sigma_fy = self.fy, self.sigma_fy
FY, sigma_FY = self.model.fy, self.model.sigma_fy
def auxiliary_x(probe):
return Decomposition.phase_virtual(probe, limit, self.flag, nux, NUX, fx, FX,
sigma_frequency=sigma_nux, sigma_frequency_model=sigma_NUX,
sigma_phase=sigma_fx, sigma_phase_model=sigma_FX,
**kwargs)
def auxiliary_y(probe):
return Decomposition.phase_virtual(probe, limit, self.flag, nuy, NUY, fy, FY,
sigma_frequency=sigma_nuy, sigma_frequency_model=sigma_NUY,
sigma_phase=sigma_fy, sigma_phase_model=sigma_FY,
**kwargs)
data_x = [auxiliary_x(probe) for probe in index]
data_y = [auxiliary_y(probe) for probe in index]
for count, probe in enumerate(index):
self.virtual_x[probe], self.virtual_y[probe] = data_x[count], data_y[count]
self.fx[probe], self.sigma_fx[probe] = self.virtual_x[probe].get('model')
self.fy[probe], self.sigma_fy[probe] = self.virtual_y[probe].get('model')
def phase_correct(self, *, limit:int=None, **kwargs) -> None:
"""
Correct x & y phase for monitor locations.
Note, this introduce strong bias towards model, do not use large range limit
Note, phase at the location is not used
Parameters
----------
limit: int
range limit
**kwargs:
passed to phase_virtual Decomposition method
Returns
-------
None, update self.correct_x and self.correct_y dictionaries
"""
self.correct_x, self.correct_y = {}, {}
limit = max(self.limit) if limit is None else limit
index = self.model.monitor_index
self.fx_correct, self.sigma_fx_correct = torch.clone(self.fx), torch.clone(self.sigma_fx)
self.fy_correct, self.sigma_fy_correct = torch.clone(self.fy), torch.clone(self.sigma_fy)
nux, sigma_nux = self.table.nux, self.table.sigma_nux
NUX, sigma_NUX = self.model.nux, self.model.sigma_nux
nuy, sigma_nuy = self.table.nuy, self.table.sigma_nuy
NUY, sigma_NUY = self.model.nuy, self.model.sigma_nuy
fx, sigma_fx = self.fx, self.sigma_fx
FX, sigma_FX = self.model.fx, self.model.sigma_fx
fy, sigma_fy = self.fy, self.sigma_fy
FY, sigma_FY = self.model.fy, self.model.sigma_fy
def auxiliary_x(probe):
return Decomposition.phase_virtual(probe, limit, self.flag, nux, NUX, fx, FX,
sigma_frequency=sigma_nux, sigma_frequency_model=sigma_NUX,
sigma_phase=sigma_fx, sigma_phase_model=sigma_FX,
**kwargs)
def auxiliary_y(probe):
return Decomposition.phase_virtual(probe, limit, self.flag, nuy, NUY, fy, FY,
sigma_frequency=sigma_nuy, sigma_frequency_model=sigma_NUY,
sigma_phase=sigma_fy, sigma_phase_model=sigma_FY,
**kwargs)
data_x = [auxiliary_x(probe) for probe in index]
data_y = [auxiliary_y(probe) for probe in index]
for count, probe in enumerate(index):
self.correct_x[probe], self.correct_y[probe] = data_x[count], data_y[count]
self.fx_correct[probe], self.sigma_fx_correct[probe] = self.correct_x[probe].get('model')
self.fy_correct[probe], self.sigma_fy_correct[probe] = self.correct_y[probe].get('model')
@staticmethod
def phase_alfa(a_m:torch.Tensor,
f_ij:torch.Tensor, f_m_ij:torch.Tensor,
f_ik:torch.Tensor, f_m_ik:torch.Tensor,
*,
error:bool=True, model:bool=True,
sigma_a_m:torch.Tensor=0.0,
sigma_f_ij:torch.Tensor=0.0, sigma_f_m_ij:torch.Tensor=0.0,
sigma_f_ik:torch.Tensor=0.0, sigma_f_m_ik:torch.Tensor=0.0) -> tuple:
"""
Estimate twiss alfa at index (i) from given triplet (i, j, k) phase data.
Note, probed index (i), other indices (j) and (k), pairs (i, j) and (i, k)
Phase advance is assumed to be from (i) to other indices, should be negative if (i) is ahead of the other index (timewise)
Parameters
----------
a_m: torch.Tensor
model value
f_ij: torch.Tensor
phase advance between probed and the 1st index (j)
f_m_ij: torch.Tensor
model phase advance between probed and the 1st index (j)
f_ik: torch.Tensor
phase advance between probed and the 2nd index (k)
f_m_ik: torch.Tensor
model phase advance between probed and 2nd index (k)
error: bool
flag to compute error
model: bool
flag to include model error
sigma_a_m: torch.Tensor
model value error
sigma_f_ij: torch.Tensor
phase advance error between probed and the 1st index (j)
sigma_f_m_ij: torch.Tensor
model phase advance error between probed and the 1st index (j)
sigma_f_ik: torch.Tensor
phase advance error between probed and the 2nd index (k)
sigma_f_m_ik: torch.Tensor
model phase advance error between probed and the 2nd index (k)
Returns
-------
(a, 0) or (a, sigma_a)
"""
a = a_m*(1.0/torch.tan(f_ij)-1.0/torch.tan(f_ik))/(1.0/torch.tan(f_m_ij)-1.0/torch.tan(f_m_ik))-1.0/torch.tan(f_ij)*1.0/torch.sin(f_m_ij - f_m_ik)*torch.cos(f_m_ik)*torch.sin(f_m_ij) + 1.0/torch.tan(f_ik)*1.0/torch.sin(f_m_ij - f_m_ik)*torch.cos(f_m_ij)*torch.sin(f_m_ik)
if not error:
return (a, torch.zeros_like(a))
sigma_a = sigma_f_ij**2*(1.0/torch.sin(f_ij))**4*(1.0/torch.tan(f_m_ik) + a_m)**2/(1.0/torch.tan(f_m_ij) - 1.0/torch.tan(f_m_ik))**2
sigma_a += sigma_f_ik**2*(1.0/torch.sin(f_ik))**4*(1.0/torch.tan(f_m_ij) + a_m)**2/(1.0/torch.tan(f_m_ij) - 1.0/torch.tan(f_m_ik))**2
if model:
sigma_a += sigma_a_m**2*((1.0/torch.tan(f_ij) - 1.0/torch.tan(f_ik))**2/(1.0/torch.tan(f_m_ij) - 1.0/torch.tan(f_m_ik))**2)
sigma_a += sigma_f_m_ik**2*(1.0/torch.tan(f_ij) - 1.0/torch.tan(f_ik))**2*(1.0/torch.sin(f_m_ij - f_m_ik))**4*torch.sin(f_m_ij)**2*(torch.cos(f_m_ij) + a_m*torch.sin(f_m_ij))**2
sigma_a += sigma_f_m_ij**2*(1.0/torch.tan(f_ij) - 1.0/torch.tan(f_ik))**2*(1.0/torch.sin(f_m_ij - f_m_ik))**4*torch.sin(f_m_ik)**2*(torch.cos(f_m_ik) + a_m*torch.sin(f_m_ik))**2
sigma_a.sqrt_()
return (a, sigma_a)
@staticmethod
def phase_beta(b_m:torch.Tensor,
f_ij:torch.Tensor, f_m_ij:torch.Tensor,
f_ik:torch.Tensor, f_m_ik:torch.Tensor,
*,
error:bool=True, model:bool=True,
sigma_b_m:torch.Tensor=0.0,
sigma_f_ij:torch.Tensor=0.0, sigma_f_m_ij:torch.Tensor=0.0,
sigma_f_ik:torch.Tensor=0.0, sigma_f_m_ik:torch.Tensor=0.0) -> tuple:
"""
Estimate twiss beta at index (i) from given triplet (i, j, k) phase data.
Note, probed index (i), other indices (j) and (k), pairs (i, j) and (i, k)
Phase advance is assumed to be from (i) to other indices, should be negative if (i) is ahead of the other index (timewise)
Parameters
----------
b_m: torch.Tensor
model value
f_ij: torch.Tensor
phase advance between probed and the 1st index (j)
f_m_ij: torch.Tensor
model phase advance between probed and the 1st index (j)
f_ik: torch.Tensor
phase advance between probed and the 2nd index (k)
f_m_ik: torch.Tensor
model phase advance between probed and 2nd index (k)
error: bool
flag to compute error
model: bool
flag to include model error
sigma_b_m: torch.Tensor
model value error
sigma_f_ij: torch.Tensor
phase advance error between probed and the 1st index (j)
sigma_f_m_ij: torch.Tensor
model phase advance error between probed and the 1st index (j)
sigma_f_ik: torch.Tensor
phase advance error between probed and the 2nd index (k)
sigma_f_m_ik: torch.Tensor
model phase advance error between probed and the 2nd index (k)
Returns
-------
(b, 0) or (b, sigma_b)
"""
b = b_m*(1.0/torch.tan(f_ij) - 1.0/torch.tan(f_ik))/(1.0/torch.tan(f_m_ij) - 1.0/torch.tan(f_m_ik))
if not error:
return (b, torch.zeros_like(b))
sigma_b = sigma_f_ij**2*b_m**2*(1.0/torch.sin(f_ij))**4/(1.0/torch.tan(f_m_ij) - 1.0/torch.tan(f_m_ik))**2
sigma_b += sigma_f_ik**2*b_m**2*(1.0/torch.sin(f_ik))**4/(1.0/torch.tan(f_m_ij) - 1.0/torch.tan(f_m_ik))**2
if model:
sigma_b += sigma_b_m**2*(1.0/torch.tan(f_ij) - 1.0/torch.tan(f_ik))**2/(1.0/torch.tan(f_m_ij) - 1.0/torch.tan(f_m_ik))**2
sigma_b += sigma_f_m_ij**2*b_m**2*(1.0/torch.tan(f_ij) - 1.0/torch.tan(f_ik))**2*(1.0/torch.sin(f_m_ij))**4/(1.0/torch.tan(f_m_ij) - 1.0/torch.tan(f_m_ik))**4
sigma_b += sigma_f_m_ik**2*b_m**2*(1.0/torch.tan(f_ij) - 1.0/torch.tan(f_ik))**2*(1.0/torch.sin(f_m_ik))**4/(1.0/torch.tan(f_m_ij) - 1.0/torch.tan(f_m_ik))**4
sigma_b.sqrt_()
return (b, sigma_b)
def get_twiss_from_phase(self, *, virtual:bool=True, error:bool=True, model:bool=False,
use_correct:bool=False, use_correct_sigma:bool=False, use_model:bool=False) -> None:
"""
Estimate twiss from phase data.
Note, raw data is saved, no cleaning is performed
Values (and errors) are computed for each triplet
Parameters
----------
error: bool
flag to compute twiss errors
model: bool
flag to include model error
use_correct: bool
flag to use corrected phases
use_correct_sigma: bool
flag to use corrected phase errors
use_model: bool
flag to use precomputed model data
Returns
-------
None, update self.twiss_from_phase dictionary
"""
self.data_phase = {}
fx = self.fx_correct if use_correct else self.fx
fy = self.fy_correct if use_correct else self.fy
sigma_fx = self.sigma_fx_correct if use_correct_sigma else self.sigma_fx
sigma_fy = self.sigma_fy_correct if use_correct_sigma else self.sigma_fy
ax_m, bx_m = self.model.ax, self.model.bx
ay_m, by_m = self.model.ay, self.model.by
index = self.combo.swapaxes(0, -1)
value, sigma = Decomposition.phase_advance(*index, self.table.nux, fx, error=error, model=False, sigma_frequency=self.table.sigma_nux, sigma_phase=sigma_fx)
fx_ij, fx_ik = value.swapaxes(0, 1)
sx_ij, sx_ik = sigma.swapaxes(0, 1)
value, sigma = Decomposition.phase_advance(*index, self.table.nuy, fy, error=error, model=False, sigma_frequency=self.table.sigma_nuy, sigma_phase=sigma_fy)
fy_ij, fy_ik = value.swapaxes(0, 1)
sy_ij, sy_ik = sigma.swapaxes(0, 1)
if use_model:
fx_m_ij, fx_m_ik = self.fx_ij, self.fx_ik
sx_m_ij, sx_m_ik = self.sigma_fx_ij, self.sigma_fx_ik
fy_m_ij, fy_m_ik = self.fy_ij, self.fy_ik
sy_m_ij, sy_m_ik = self.sigma_fy_ij, self.sigma_fy_ik
else:
value, sigma = Decomposition.phase_advance(*index, self.model.nux, self.model.fx, error=error*model, model=True, sigma_frequency=self.model.sigma_nux, sigma_phase=self.model.sigma_fx)
fx_m_ij, fx_m_ik = value.swapaxes(0, 1)
sx_m_ij, sx_m_ik = sigma.swapaxes(0, 1)
value, sigma = Decomposition.phase_advance(*index, self.model.nuy, self.model.fy, error=error*model, model=True, sigma_frequency=self.model.sigma_nuy, sigma_phase=self.model.sigma_fy)
fy_m_ij, fy_m_ik = value.swapaxes(0, 1)
sy_m_ij, sy_m_ik = sigma.swapaxes(0, 1)
ax, sigma_ax = self.phase_alfa(ax_m, fx_ij, fx_m_ij, fx_ik, fx_m_ik, error=error, model=model, sigma_a_m=self.model.sigma_ax, sigma_f_ij=sx_ij, sigma_f_ik=sx_ik, sigma_f_m_ij=sx_m_ij, sigma_f_m_ik=sx_m_ik)
bx, sigma_bx = self.phase_beta(bx_m, fx_ij, fx_m_ij, fx_ik, fx_m_ik, error=error, model=model, sigma_b_m=self.model.sigma_bx, sigma_f_ij=sx_ij, sigma_f_ik=sx_ik, sigma_f_m_ij=sx_m_ij, sigma_f_m_ik=sx_m_ik)
ay, sigma_ay = self.phase_alfa(ay_m, fy_ij, fy_m_ij, fy_ik, fy_m_ik, error=error, model=model, sigma_a_m=self.model.sigma_ay, sigma_f_ij=sy_ij, sigma_f_ik=sy_ik, sigma_f_m_ij=sy_m_ij, sigma_f_m_ik=sy_m_ik)
by, sigma_by = self.phase_beta(by_m, fy_ij, fy_m_ij, fy_ik, fy_m_ik, error=error, model=model, sigma_b_m=self.model.sigma_by, sigma_f_ij=sy_ij, sigma_f_ik=sy_ik, sigma_f_m_ij=sy_m_ij, sigma_f_m_ik=sy_m_ik)
self.data_phase['fx_ij'], self.data_phase['sigma_fx_ij'], self.data_phase['fx_m_ij'], self.data_phase['sigma_fx_m_ij'] = fx_ij.T, sx_ij.T, fx_m_ij.T, sx_m_ij.T
self.data_phase['fx_ik'], self.data_phase['sigma_fx_ik'], self.data_phase['fx_m_ik'], self.data_phase['sigma_fx_m_ik'] = fx_ik.T, sx_ik.T, fx_m_ik.T, sx_m_ik.T
self.data_phase['fy_ij'], self.data_phase['sigma_fy_ij'], self.data_phase['fy_m_ij'], self.data_phase['sigma_fy_m_ij'] = fy_ij.T, sy_ij.T, fy_ij.T, sy_m_ij.T
self.data_phase['fy_ik'], self.data_phase['sigma_fy_ik'], self.data_phase['fy_m_ik'], self.data_phase['sigma_fy_m_ik'] = fy_ik.T, sy_ik.T, fy_ik.T, sy_m_ik.T
self.data_phase['ax'], self.data_phase['sigma_ax'], self.data_phase['bx'], self.data_phase['sigma_bx'] = ax.T, sigma_ax.T, bx.T, sigma_bx.T
self.data_phase['ay'], self.data_phase['sigma_ay'], self.data_phase['by'], self.data_phase['sigma_by'] = ay.T, sigma_ay.T, by.T, sigma_by.T
def filter_twiss(self, plane:str = 'x', *,
phase:dict={'use': True, 'threshold': 10.00},
model:dict={'use': True, 'threshold': 00.50},
value:dict={'use': True, 'threshold': 00.50},
sigma:dict={'use': True, 'threshold': 00.25},
limit:dict={'use': True, 'threshold': 05.00}) -> dict:
"""
Filter twiss for given data plane and cleaning options.
Parameters
----------
plane: str
data plane ('x' or 'y')
phase: dict
clean based on advance phase data
used if 'use' is True, remove combinations with absolute value of phase advance cotangents above threshold value
model: dict
clean based on phase advance proximity to model
used if 'use' is True, remove combinations with (x - x_model)/x_model > threshold value
value: dict
clean based on estimated twiss beta error value
used if 'use' is True, remove combinations with x/sigma_x < 1/threshold value
sigma: dict
clean based on estimated phase advance error value
used if 'use' is True, remove combinations with x/sigma_x < 1/threshold value
limit: dict
clean outliers outside scaled interval
used if 'use' is True
Returns
-------
mask (torch.Tensor)
"""
size, length, *_ = self.index.shape
mask = torch.ones((size, length), device=self.device).to(torch.bool)
if plane == 'x':
a_m, b_m = self.model.ax.reshape(-1, 1), self.model.bx.reshape(-1, 1)
a, b, sigma_a, sigma_b = self.data_phase['ax'], self.data_phase['bx'], self.data_phase['sigma_ax'], self.data_phase['sigma_bx']
f_ij, sigma_f_ij, f_m_ij, sigma_f_m_ij = self.data_phase['fx_ij'], self.data_phase['sigma_fx_ij'], self.data_phase['fx_m_ij'], self.data_phase['sigma_fx_m_ij']
f_ik, sigma_f_ik, f_m_ik, sigma_f_m_ik = self.data_phase['fx_ik'], self.data_phase['sigma_fx_ik'], self.data_phase['fx_m_ik'], self.data_phase['sigma_fx_m_ik']
if plane == 'y':
a_m, b_m = self.model.ay.reshape(-1, 1), self.model.by.reshape(-1, 1)
a, b, sigma_a, sigma_b = self.data_phase['ay'], self.data_phase['by'], self.data_phase['sigma_ay'], self.data_phase['sigma_by']
f_ij, sigma_f_ij, f_m_ij, sigma_f_m_ij = self.data_phase['fy_ij'], self.data_phase['sigma_fy_ij'], self.data_phase['fy_m_ij'], self.data_phase['sigma_fy_m_ij']
f_ik, sigma_f_ik, f_m_ik, sigma_f_m_ik = self.data_phase['fy_ik'], self.data_phase['sigma_fy_ik'], self.data_phase['fy_m_ik'], self.data_phase['sigma_fy_m_ik']
if phase['use']:
cot_ij, cot_m_ij = torch.abs(1.0/torch.tan(f_ij)), torch.abs(1.0/torch.tan(f_m_ij))
cot_ik, cot_m_ik = torch.abs(1.0/torch.tan(f_ij)), torch.abs(1.0/torch.tan(f_m_ij))
mask *= phase['threshold'] > cot_ij
mask *= phase['threshold'] > cot_m_ij
mask *= phase['threshold'] > cot_ik
mask *= phase['threshold'] > cot_m_ik
if model['use']:
mask *= model['threshold'] > torch.abs((f_ij - f_m_ij)/f_m_ij)
mask *= model['threshold'] > torch.abs((f_ik - f_m_ik)/f_m_ik)
if value['use']:
mask *= value['threshold'] > torch.abs((b - b_m)/b_m)
if sigma['use']:
mask *= 1/sigma['threshold'] < torch.abs(f_ij/sigma_f_ij)
mask *= 1/sigma['threshold'] < torch.abs(f_ik/sigma_f_ik)
if limit['use']:
factor = torch.tensor(limit['threshold'], dtype=self.dtype, device=self.device)
mask *= threshold(standardize(a, center_estimator=median, spread_estimator=biweight_midvariance), -factor, +factor)
mask *= threshold(standardize(b, center_estimator=median, spread_estimator=biweight_midvariance), -factor, +factor)
return mask
def mask_range(self, limit:tuple) -> torch.Tensor:
"""
Generate weight mask based on given range limit.
Parameters
----------
limit: tuple
range limit to use, (min, max), 1 <= min <= max, mim is excluded, for full range min==max
Returns
-------
weight mask (torch.Tensor)
"""
size, length, *_ = self.shape
mask = torch.zeros((size, length), dtype=torch.int64, device=self.device)
count = torch.tensor([limit*(2*limit - 1) for limit in range(1, max(self.limit) + 1)], dtype=torch.int64, device=self.device)
limit_min, limit_max = limit
if limit_min == limit_max:
count = count[:limit_max]
*_, count_max = count
mask[:, :count_max] = 1
if limit_min < limit_max:
count = count[limit_min - 1:limit_max]
count_min, *_, count_max = count
mask[:, count_min:count_max] = 1
count = torch.tensor([limit*(2*limit - 1) for limit in range(1, max(self.limit) + 1)], dtype=torch.int64, device=self.device)
limit_min, limit_max = self.limit
if limit_min == limit_max:
count = count[:limit_max]
*_, count_max = count
mask = mask[:, :count_max]
if limit_min < limit_max:
count = count[limit_min - 1:limit_max]
count_min, *_, count_max = count
mask = mask[:, count_min:count_max]
return mask
def mask_location(self, table:list) -> torch.Tensor:
"""
Generate weight mask based on given range limit.
Parameters
----------
table: list
list of locations to remove
Returns
-------
weight mask (torch.Tensor)
"""
size, length, *_ = self.combo.shape
mask = torch.zeros((size, length), dtype=torch.int64, device=self.device)
for location in table:
_, other = self.index.swapaxes(0, -1)
other = torch.mul(*(other != location).swapaxes(0, 1)).T
mask = (mask == other)
return mask.logical_not()
def mask_distance(self, function) -> torch.Tensor:
"""
Generate weight mask based on given range limit.
Parameters
----------
function: Callable
function to apply to distance data
Returns
-------
weight mask (torch.Tensor)
"""
mask = torch.stack([function(distance) for distance in self.distance])
mask = torch.stack([mask for _ in range(self.size)])
return mask
def process_twiss(self, plane:str='x', *,
weight:bool=True, mask:torch.Tensor=None) -> dict:
"""
Process twiss data.
Parameters
----------
plane: str
data plane ('x' or 'y')
weight: bool
flag to use weights
mask: torch.Tensor
mask
Returns
-------
twiss data (dict)
dict_keys(['value_a', 'sigma_a', 'error_a', 'value_b', 'sigma_b', 'error_b'])
"""
result = {}
if mask == None:
size, length, *_ = self.index.shape
mask = torch.ones((size, length), device=self.device).to(torch.bool)
if plane == 'x':
a, sigma_a, a_m = self.data_phase['ax'], self.data_phase['sigma_ax'], self.model.ax
b, sigma_b, b_m = self.data_phase['bx'], self.data_phase['sigma_bx'], self.model.bx
if plane == 'y':
a, sigma_a, a_m = self.data_phase['ay'], self.data_phase['sigma_ay'], self.model.ay
b, sigma_b, b_m = self.data_phase['by'], self.data_phase['sigma_by'], self.model.by
if not weight:
center = weighted_mean(a, weight=mask)
spread = weighted_variance(a, weight=mask, center=center).sqrt()
result['value_a'] = center
result['sigma_a'] = spread
result['error_a'] = (center - a_m)/a_m
center = weighted_mean(b, weight=mask)
spread = weighted_variance(b, weight=mask, center=center).sqrt()
result['value_b'] = center
result['sigma_b'] = spread
result['error_b'] = (center - b_m)/b_m
return result
weight = (mask.to(self.dtype)/sigma_a**2).nan_to_num(posinf=0.0, neginf=0.0)
center = weighted_mean(a, weight=weight)
spread = weighted_variance(a, weight=weight, center=center).sqrt()
result['value_a'] = center
result['sigma_a'] = spread
result['error_a'] = (center - a_m)/a_m
weight = (mask.to(self.dtype)/sigma_b**2).nan_to_num(posinf=0.0, neginf=0.0)
center = weighted_mean(b, weight=weight)
spread = weighted_variance(b, weight=weight, center=center).sqrt()
result['value_b'] = center
result['sigma_b'] = spread
result['error_b'] = (center - b_m)/b_m
if plane == 'x':
self.ax, self.sigma_ax = result['value_a'], result['sigma_a']
self.bx, self.sigma_bx = result['value_b'], result['sigma_b']
if plane == 'y':
self.ay, self.sigma_ay = result['value_a'], result['sigma_a']
self.by, self.sigma_by = result['value_b'], result['sigma_b']
return result
def get_twiss_from_data(self, n:int, x:torch.Tensor, y:torch.Tensor, *,
refit:bool=False, factor:float=5.0,
level:float=1.0E-6, sigma_x:torch.Tensor=None, sigma_y:torch.Tensor=None,
ax:torch.Tensor=None, bx:torch.Tensor=None, ay:torch.Tensor=None, by:torch.Tensor=None,
transport:torch.Tensor=None, **kwargs) -> dict:
"""
Estimate twiss from tbt data using ODR fit.
Note, if no initial guesses for twiss and/or transport are given, model values will be used
This method is sensitive to noise and calibration errors
Parameters
----------
n: int
number of turns to use
x: torch.Tensor
x data
y: torch.Tensor
y data
refit: bool
flag to refit twiss using estimated invariants
factor: float
threshold factor for invariants spread
level: float
default noise level
sigma_x: torch.Tensor
x noise sigma for each signal
sigma_y: torch.Tensor
y noise sigma for each signal
ax, bx, ay, by: torch.Tensor
initial guess for twiss parameters at monitor locations
transport: torch.Tensor
transport matrices between monitor locations
Returns
-------
fit result (dict)
dict_keys(['jx', 'ax', 'bx', 'sigma_jx', 'sigma_ax', 'sigma_bx', 'jy', 'ay', 'by', 'sigma_jy', 'sigma_ay', 'sigma_by', 'mux', 'muy'])
"""
if ax is None:
ax = self.model.ax[self.model.monitor_index].cpu().numpy()
else:
ax = ax.cpu().numpy()
if bx is None:
bx = self.model.bx[self.model.monitor_index].cpu().numpy()
else:
bx = bx.cpu().numpy()
if ay is None:
ay = self.model.ay[self.model.monitor_index].cpu().numpy()
else:
ay = ay.cpu().numpy()
if by is None:
by = self.model.by[self.model.monitor_index].cpu().numpy()
else:
by = by.cpu().numpy()
if transport is None:
probe = torch.tensor(self.model.monitor_index, dtype=torch.int64, device=self.device)
other = torch.roll(probe, -1)
other[-1] += self.model.size
transport = self.model.matrix(probe, other)
copy = torch.clone(transport)
def ellipse(w, x):
alpha, beta, action = w
q1, q2, m11, m12 = x
return 1/beta*(q1**2 + (alpha*q1 + beta*(q2 - q1*m11)/m12)**2) - action
value_jx, error_jx = [], []
value_jy, error_jy = [], []
value_ax, error_ax = [], []
value_ay, error_ay = [], []
value_bx, error_bx = [], []
value_by, error_by = [], []
for i in range(self.model.monitor_count):
q1 = x[i, :n].cpu().numpy()
q2 = x[int(mod(i + 1, self.model.monitor_count)), :n].cpu().numpy()
if i + 1 == self.model.monitor_count:
q2 = x[int(mod(i + 1, self.model.monitor_count)), 1:n+1].cpu().numpy()
if sigma_x is not None:
s1, s2 = sigma_x[i].cpu().numpy(), sigma_x[int(mod(i + 1, self.model.monitor_count))].cpu().numpy()
else:
s1, s2 = level, level
m11 = transport[i, 0, 0].cpu().numpy()
m12 = transport[i, 0, 1].cpu().numpy()
alpha, beta = ax[i], bx[i]
action = numpy.median(1/beta*(q1**2 + (alpha*q1 + beta*(q2 - q1*m11)/m12)**2))
m11 = m11*numpy.ones(n)
m12 = m12*numpy.ones(n)
X = numpy.array([q1, q2, m11, m12])
data = odr.RealData(X, y=1, sx=[s1, s2, level, level], sy=1.0E-16)
model = odr.Model(ellipse, implicit=True)
fit = odr.ODR(data, model, beta0=[alpha, beta, action], **kwargs).run()
alpha, beta, action = fit.beta
sigma_alpha, sigma_beta, sigma_action = fit.sd_beta
value_jx.append(action)
value_ax.append(alpha)
value_bx.append(beta)
error_jx.append(sigma_action)
error_ax.append(sigma_alpha)
error_bx.append(sigma_beta)
q1 = y[i, :n].cpu().numpy()
q2 = y[int(mod(i + 1, self.model.monitor_count)), :n].cpu().numpy()
if i + 1 == self.model.monitor_count:
q2 = y[int(mod(i + 1, self.model.monitor_count)), 1:n+1].cpu().numpy()
if sigma_y is not None:
s1, s2 = sigma_y[i].cpu().numpy(), sigma_y[int(mod(i + 1, self.model.monitor_count))].cpu().numpy()
else:
s1, s2 = level, level
m11 = transport[i, 2, 2].cpu().numpy()
m12 = transport[i, 2, 3].cpu().numpy()
alpha, beta = ay[i], by[i]
action = numpy.median(1/beta*(q1**2 + (alpha*q1 + beta*(q2 - q1*m11)/m12)**2))
m11 = m11*numpy.ones(n)
m12 = m12*numpy.ones(n)
X = numpy.array([q1, q2, m11, m12])
data = odr.RealData(X, y=1, sx=[s1, s2, level, level], sy=1.0E-16)
model = odr.Model(ellipse, implicit=True)
fit = odr.ODR(data, model, beta0=[alpha, beta, action], **kwargs).run()
alpha, beta, action = fit.beta
sigma_alpha, sigma_beta, sigma_action = fit.sd_beta
value_jy.append(action)
value_ay.append(alpha)
value_by.append(beta)
error_jy.append(sigma_action)
error_ay.append(sigma_alpha)
error_by.append(sigma_beta)
result = {}
result['center_jx'] = None
result['spread_jx'] = None
result['center_jy'] = None
result['spread_jy'] = None
result['jx'] = 0.5*torch.tensor(value_jx, dtype=self.dtype, device=self.device)
result['ax'] = torch.tensor(value_ax, dtype=self.dtype, device=self.device)
result['bx'] = torch.tensor(value_bx, dtype=self.dtype, device=self.device)
result['sigma_jx'] = 0.5*torch.tensor(error_jx, dtype=self.dtype, device=self.device)
result['sigma_ax'] = torch.tensor(error_ax, dtype=self.dtype, device=self.device)
result['sigma_bx'] = torch.tensor(error_bx, dtype=self.dtype, device=self.device)
result['jy'] = 0.5*torch.tensor(value_jy, dtype=self.dtype, device=self.device)
result['ay'] = torch.tensor(value_ay, dtype=self.dtype, device=self.device)
result['by'] = torch.tensor(value_by, dtype=self.dtype, device=self.device)
result['sigma_jy'] = 0.5*torch.tensor(error_jy, dtype=self.dtype, device=self.device)
result['sigma_ay'] = torch.tensor(error_ay, dtype=self.dtype, device=self.device)
result['sigma_by'] = torch.tensor(error_by, dtype=self.dtype, device=self.device)
factor = torch.tensor(factor, dtype=self.dtype, device=self.device)
mask_jx = threshold(standardize(result['jx'], center_estimator=median, spread_estimator=biweight_midvariance), -factor, +factor)
mask_jx = mask_jx.squeeze()/(result['sigma_jx']/result['sigma_jx'].sum())**2
center_jx = weighted_mean(result['jx'], weight=mask_jx)
spread_jx = weighted_variance(result['jx'], weight=mask_jx, center=center_jx).sqrt()
mask_jy = threshold(standardize(result['jy'], center_estimator=median, spread_estimator=biweight_midvariance), -factor, +factor)
mask_jy = mask_jy.squeeze()/(result['sigma_jy']/result['sigma_jy'].sum())**2
center_jy = weighted_mean(result['jy'], weight=mask_jy)
spread_jy = weighted_variance(result['jy'], weight=mask_jy, center=center_jy).sqrt()
result['center_jx'] = center_jx
result['spread_jx'] = spread_jx
result['center_jy'] = center_jy
result['spread_jy'] = spread_jy
advance = []
for i in range(self.model.monitor_count):
normal = self.model.cs_normal(result['ax'][i], result['bx'][i], result['ay'][i], result['by'][i])
values, _ = self.model.advance_twiss(normal, transport[i])
advance.append(values)
advance = torch.stack(advance).T
result['mux'], result['muy'] = advance
if not refit:
return result
def ellipse(w, x):
alpha, beta = w
q1, q2, m11, m12 = x
return 1/beta*(q1**2 + (alpha*q1 + beta*(q2 - q1*m11)/m12)**2) - action
value_ax, error_ax = [], []
value_ay, error_ay = [], []
value_bx, error_bx = [], []
value_by, error_by = [], []
for i in range(self.model.monitor_count):
action = 2.0*center_jx.cpu().numpy()
q1 = x[i, :n].cpu().numpy()
q2 = x[int(mod(i + 1, self.model.monitor_count)), :n].cpu().numpy()
if i + 1 == self.model.monitor_count:
q2 = x[int(mod(i + 1, self.model.monitor_count)), 1:n+1].cpu().numpy()
if sigma_x is not None:
s1, s2 = sigma_x[i].cpu().numpy(), sigma_x[int(mod(i + 1, self.model.monitor_count))].cpu().numpy()
else:
s1, s2 = level, level
m11 = transport[i, 0, 0].cpu().numpy()
m12 = transport[i, 0, 1].cpu().numpy()
alpha, beta = result['ax'][i].cpu().numpy(), result['bx'][i].cpu().numpy()
m11 = m11*numpy.ones(n)
m12 = m12*numpy.ones(n)
X = numpy.array([q1, q2, m11, m12])
data = odr.RealData(X, y=1, sx=[s1, s2, level, level], sy=1.0E-16)
model = odr.Model(ellipse, implicit=True)
fit = odr.ODR(data, model, beta0=[alpha, beta], **kwargs).run()
alpha, beta = fit.beta
sigma_alpha, sigma_beta = fit.sd_beta
value_ax.append(alpha)
value_bx.append(beta)
error_ax.append(sigma_alpha)
error_bx.append(sigma_beta)
action = 2.0*center_jy.cpu().numpy()
q1 = y[i, :n].cpu().numpy()
q2 = y[int(mod(i + 1, self.model.monitor_count)), :n].cpu().numpy()
if i + 1 == self.model.monitor_count:
q2 = y[int(mod(i + 1, self.model.monitor_count)), 1:n+1].cpu().numpy()
if sigma_y is not None:
s1, s2 = sigma_y[i].cpu().numpy(), sigma_y[int(mod(i + 1, self.model.monitor_count))].cpu().numpy()
else:
s1, s2 = level, level
m11 = transport[i, 2, 2].cpu().numpy()
m12 = transport[i, 2, 3].cpu().numpy()
alpha, beta = result['ay'][i].cpu().numpy(), result['by'][i].cpu().numpy()
m11 = m11*numpy.ones(n)
m12 = m12*numpy.ones(n)
X = numpy.array([q1, q2, m11, m12])
data = odr.RealData(X, y=1, sx=[s1, s2, level, level], sy=1.0E-16)
model = odr.Model(ellipse, implicit=True)
fit = odr.ODR(data, model, beta0=[alpha, beta], **kwargs).run()
alpha, beta = fit.beta
sigma_alpha, sigma_beta = fit.sd_beta
value_ay.append(alpha)
value_by.append(beta)
error_ay.append(sigma_alpha)
error_by.append(sigma_beta)
result['ax'] = torch.tensor(value_ax, dtype=self.dtype, device=self.device)
result['bx'] = torch.tensor(value_bx, dtype=self.dtype, device=self.device)
result['sigma_ax'] = torch.tensor(error_ax, dtype=self.dtype, device=self.device)
result['sigma_bx'] = torch.tensor(error_bx, dtype=self.dtype, device=self.device)
result['ay'] = torch.tensor(value_ay, dtype=self.dtype, device=self.device)
result['by'] = torch.tensor(value_by, dtype=self.dtype, device=self.device)
result['sigma_ay'] = torch.tensor(error_ay, dtype=self.dtype, device=self.device)
result['sigma_by'] = torch.tensor(error_by, dtype=self.dtype, device=self.device)
advance = []
for i in range(self.model.monitor_count):
normal = self.model.cs_normal(result['ax'][i], result['bx'][i], result['ay'][i], result['by'][i])
values, _ = self.model.advance_twiss(normal, transport[i])
advance.append(values)
advance = torch.stack(advance).T
result['mux'], result['muy'] = advance
return result
def get_ax(self, index:int) -> torch.Tensor:
"""
Get ax value and error at given index.
Parameters
----------
index: int
index or location name
Returns
-------
[ax, sigma_ax] (torch.Tensor)
"""
if isinstance(index, str) and index in self.model.name:
return self.get_ax(self.model.get_index(index))
index = int(mod(index, self.size))
return torch.stack([self.ax[index], self.sigma_ax[index]])
def get_bx(self, index:int) -> torch.Tensor:
"""
Get bx value and error at given index.
Parameters
----------
index: int
index or location name
Returns
-------
[bx, sigma_bx] (torch.Tensor)
"""
if isinstance(index, str) and index in self.model.name:
return self.get_bx(self.model.get_index(index))
index = int(mod(index, self.size))
return torch.stack([self.bx[index], self.sigma_bx[index]])
def get_fx(self, index:int) -> torch.Tensor:
"""
Get fx value and error at given index.
Parameters
----------
index: int
index or location name
Returns
-------
[fx, sigma_fx] (torch.Tensor)
"""
if isinstance(index, str) and index in self.model.name:
return self.get_fx(self.model.get_index(index))
index = int(mod(index, self.size))
return torch.stack([self.fx[index], self.sigma_fx[index]])
def get_ay(self, index:int) -> torch.Tensor:
"""
Get ay value and error at given index.
Parameters
----------
index: int
index or location name
Returns
-------
[ay, sigma_ay] (torch.Tensor)
"""
if isinstance(index, str) and index in self.model.name:
return self.get_ay(self.model.get_index(index))
index = int(mod(index, self.size))
return torch.stack([self.ay[index], self.sigma_ay[index]])
def get_by(self, index:int) -> torch.Tensor:
"""
Get by value and error at given index.
Parameters
----------
index: int
index or location name
Returns
-------
[by, sigma_by] (torch.Tensor)
"""
if isinstance(index, str) and index in self.model.name:
return self.get_by(self.model.get_index(index))
index = int(mod(index, self.size))
return torch.stack([self.by[index], self.sigma_by[index]])
def get_fy(self, index:int) -> torch.Tensor:
"""
Get fy value and error at given index.
Parameters
----------
index: int
index or location name
Returns
-------
[fy, sigma_fy] (torch.Tensor)
"""
if isinstance(index, str) and index in self.model.name:
return self.get_fy(self.model.get_index(index))
index = int(mod(index, self.size))
return torch.stack([self.fy[index], self.sigma_fy[index]])
def get_twiss(self, index:int) -> dict:
"""
Return twiss data at given index.
Parameters
----------
index: int
index or location name
Returns
-------
twiss data (dict)
"""
if isinstance(index, str) and index in self.model.name:
return self.get_twiss(self.model.get_index(index))
table = {}
table['ax'], table['sigma_ax'] = self.get_ax(index)
table['bx'], table['sigma_bx'] = self.get_bx(index)
table['fx'], table['sigma_fx'] = self.get_fx(index)
table['ay'], table['sigma_ay'] = self.get_ay(index)
table['by'], table['sigma_by'] = self.get_by(index)
table['fy'], table['sigma_fy'] = self.get_fy(index)
return table
def get_table(self) -> pandas.DataFrame:
"""
Return twiss data at all locations as dataframe.
Parameters
----------
None
Returns
-------
twiss data (pandas.DataFrame)
"""
df = pandas.DataFrame()
df['name'] = self.model.name
df['kind'] = self.model.kind
df['flag'] = self.flag.cpu().numpy()
df['time'] = self.model.time.cpu().numpy()
df['ax'], df['sigma_ax'] = self.ax.cpu().numpy(), self.sigma_ax.cpu().numpy()
df['bx'], df['sigma_bx'] = self.bx.cpu().numpy(), self.sigma_bx.cpu().numpy()
df['fx'], df['sigma_fx'] = self.fx.cpu().numpy(), self.sigma_fx.cpu().numpy()
df['ay'], df['sigma_ay'] = self.ay.cpu().numpy(), self.sigma_ay.cpu().numpy()
df['by'], df['sigma_by'] = self.by.cpu().numpy(), self.sigma_by.cpu().numpy()
df['fy'], df['sigma_fy'] = self.fy.cpu().numpy(), self.sigma_fy.cpu().numpy()
return df
def __repr__(self) -> str:
"""
String representation.
"""
return f'{self.__class__.__name__}({self.model}, {self.table}, {self.limit})'
def __len__(self) -> int:
"""
Number of locations.
"""
return self.size
def __call__(self, limit:int=None) -> pandas.DataFrame:
"""
Perform twiss loop with default parameters.
Parameters
----------
limit: int
range limit for virtual phase computation
Returns
-------
twiss table (pandas.DataFrame)
"""
limit = max(self.limit) if limit is None else limit
self.get_action()
self.get_twiss_from_amplitude()
self.phase_virtual(limit=limit)
self.get_twiss_from_phase()
select = {
'phase': {'use': True, 'threshold': 10.00},
'model': {'use': False, 'threshold': 00.50},
'value': {'use': False, 'threshold': 00.50},
'sigma': {'use': False, 'threshold': 00.25},
'limit': {'use': True, 'threshold': 05.00}
}
mask_x = self.filter_twiss(plane='x', **select)
mask_y = self.filter_twiss(plane='y', **select)
_ = self.process_twiss(plane='x', mask=mask_x, weight=True)
_ = self.process_twiss(plane='y', mask=mask_y, weight=True)
return self.get_table()
def matrix(self, probe:torch.Tensor, other:torch.Tensor) -> tuple:
"""
Generate uncoupled transport matrix (or matrices) for given locations.
Matrices are generated from probe to other
One-turn matrices are generated where probe == other
Input parameters should be 1D tensors with matching length
Additionaly probe and/or other input parameter can be an int or str in self.model.name (not checked)
Note, twiss parameters are treated as independent variables in error propagation
Parameters
----------
probe: torch.Tensor
probe locations
other: torch.Tensor
other locations
Returns
-------
uncoupled transport matrices and error matrices(tuple)
"""
if isinstance(probe, int):
probe = torch.tensor([probe], dtype=torch.int64, device=self.device)
if isinstance(probe, str):
probe = torch.tensor([self.model.name.index(probe)], dtype=torch.int64, device=self.device)
if isinstance(other, int):
other = torch.tensor([other], dtype=torch.int64, device=self.device)
if isinstance(other, str):
other = torch.tensor([self.model.name.index(other)], dtype=torch.int64, device=self.device)
other[probe == other] += self.size
fx, sigma_fx = Decomposition.phase_advance(probe, other, self.table.nux, self.fx, error=True, sigma_frequency=self.table.sigma_nux, sigma_phase=self.sigma_fx)
fy, sigma_fy = Decomposition.phase_advance(probe, other, self.table.nuy, self.fy, error=True, sigma_frequency=self.table.sigma_nuy, sigma_phase=self.sigma_fy)
probe = mod(probe, self.size).to(torch.int64)
other = mod(other, self.size).to(torch.int64)
transport = self.model.matrix_uncoupled(self.ax[probe], self.bx[probe], self.ax[other], self.bx[other], fx, self.ay[probe], self.by[probe], self.ay[other], self.by[other], fy)
sigma_transport = torch.zeros_like(transport)
sigma_transport[:, 0, 0] += self.sigma_ax[probe]**2*self.bx[other]*torch.sin(fx)**2/self.bx[probe]
sigma_transport[:, 0, 0] += self.sigma_bx[probe]**2*self.bx[other]*(torch.cos(fx) + self.ax[probe]*torch.sin(fx))**2/(4.0*self.bx[probe]**3)
sigma_transport[:, 0, 0] += self.sigma_bx[other]**2*(torch.cos(fx) + self.ax[probe]*torch.sin(fx))**2/(4.0*self.bx[probe]*self.bx[other])
sigma_transport[:, 0, 0] += sigma_fx**2*self.bx[other]*(-self.ax[probe]*torch.cos(fx) + torch.sin(fx))**2/self.bx[probe]
sigma_transport[:, 0, 1] += self.sigma_bx[probe]**2*self.bx[other]*torch.sin(fx)**2/(4.0*self.bx[probe])
sigma_transport[:, 0, 1] += self.sigma_bx[other]**2*self.bx[probe]*torch.sin(fx)**2/(4.0*self.bx[other])
sigma_transport[:, 0, 1] += sigma_fx**2*self.bx[probe]*self.bx[other]*torch.cos(fx)**2
sigma_transport[:, 1, 0] += self.sigma_ax[probe]**2*(torch.cos(fx) - self.ax[other]*torch.sin(fx))**2/(self.bx[probe]*self.bx[other])
sigma_transport[:, 1, 0] += self.sigma_ax[other]**2*(torch.cos(fx) + self.ax[probe]*torch.sin(fx))**2/(self.bx[probe]*self.bx[other])
sigma_transport[:, 1, 0] += self.sigma_bx[probe]**2*((-self.ax[probe] + self.ax[other])*torch.cos(fx) + (1.0 + self.ax[probe]*self.ax[other])*torch.sin(fx))**2/(4.0*self.bx[probe]**3*self.bx[other])
sigma_transport[:, 1, 0] += self.sigma_bx[other]**2*((-self.ax[probe] + self.ax[other])*torch.cos(fx) + (1.0 + self.ax[probe]*self.ax[other])*torch.sin(fx))**2/(4.0*self.bx[probe]*self.bx[other]**3)
sigma_transport[:, 1, 0] += sigma_fx**2*((1.0 + self.ax[probe]*self.ax[other])*torch.cos(fx) + (self.ax[probe] - self.ax[other])*torch.sin(fx))**2/(self.bx[probe]*self.bx[other])
sigma_transport[:, 1, 1] += self.sigma_bx[probe]**2*(torch.cos(fx) - self.ax[other]*torch.sin(fx))**2/(4.0*self.bx[probe]*self.bx[other])
sigma_transport[:, 1, 1] += self.sigma_ax[other]**2*self.bx[probe]*torch.sin(fx)**2/self.bx[other]
sigma_transport[:, 1, 1] += self.sigma_bx[other]**2*self.bx[probe]*(torch.cos(fx) - self.ax[other]*torch.sin(fx))**2/(4.0*self.bx[other]**3)
sigma_transport[:, 1, 1] += sigma_fx**2*self.bx[probe]*(self.ax[other]*torch.cos(fx) + torch.sin(fx))**2/self.bx[other]
sigma_transport[:, 2, 2] += self.sigma_ay[probe]**2*self.by[other]*torch.sin(fy)**2/self.by[probe]
sigma_transport[:, 2, 2] += self.sigma_by[probe]**2*self.by[other]*(torch.cos(fy) + self.ay[probe]*torch.sin(fy))**2/(4.0*self.by[probe]**3)
sigma_transport[:, 2, 2] += self.sigma_by[other]**2*(torch.cos(fy) + self.ay[probe]*torch.sin(fy))**2/(4.0*self.by[probe]*self.by[other])
sigma_transport[:, 2, 2] += sigma_fy**2*self.by[other]*(-self.ay[probe]*torch.cos(fy) + torch.sin(fy))**2/self.by[probe]
sigma_transport[:, 2, 3] += self.sigma_by[probe]**2*self.by[other]*torch.sin(fy)**2/(4.0*self.by[probe])
sigma_transport[:, 2, 3] += self.sigma_by[other]**2*self.by[probe]*torch.sin(fy)**2/(4.0*self.by[other])
sigma_transport[:, 2, 3] += sigma_fy**2*self.by[probe]*self.by[other]*torch.cos(fy)**2
sigma_transport[:, 3, 2] += self.sigma_ay[probe]**2*(torch.cos(fy) - self.ay[other]*torch.sin(fy))**2/(self.by[probe]*self.by[other])
sigma_transport[:, 3, 2] += self.sigma_ay[other]**2*(torch.cos(fy) + self.ay[probe]*torch.sin(fy))**2/(self.by[probe]*self.by[other])
sigma_transport[:, 3, 2] += self.sigma_by[probe]**2*((-self.ay[probe] + self.ay[other])*torch.cos(fy) + (1.0 + self.ay[probe]*self.ay[other])*torch.sin(fy))**2/(4.0*self.by[probe]**3*self.by[other])
sigma_transport[:, 3, 2] += self.sigma_by[other]**2*((-self.ay[probe] + self.ay[other])*torch.cos(fy) + (1.0 + self.ay[probe]*self.ay[other])*torch.sin(fy))**2/(4.0*self.by[probe]*self.by[other]**3)
sigma_transport[:, 3, 2] += sigma_fy**2*((1.0 + self.ay[probe]*self.ay[other])*torch.cos(fy) + (self.ay[probe] - self.ay[other])*torch.sin(fy))**2/(self.by[probe]*self.by[other])
sigma_transport[:, 3, 3] += self.sigma_by[probe]**2*(torch.cos(fy) - self.ay[other]*torch.sin(fy))**2/(4.0*self.by[probe]*self.by[other])
sigma_transport[:, 3, 3] += self.sigma_ay[other]**2*self.by[probe]*torch.sin(fy)**2/self.by[other]
sigma_transport[:, 3, 3] += self.sigma_by[other]**2*self.by[probe]*(torch.cos(fy) - self.ay[other]*torch.sin(fy))**2/(4.0*self.by[other]**3)
sigma_transport[:, 3, 3] += sigma_fy**2*self.by[probe]*(self.ay[other]*torch.cos(fy) + torch.sin(fy))**2/self.by[other]
sigma_transport.sqrt_()
return (transport.squeeze(), sigma_transport.squeeze())
def make_transport(self) -> None:
"""
Set transport matrices between adjacent locations.
self.transport[i] is a transport matrix from i to i + 1
Parameters
----------
None
Returns
-------
None
"""
probe = torch.arange(self.size, dtype=torch.int64, device=self.device)
other = 1 + probe
self.transport, _ = self.matrix(probe, other)
def matrix_transport(self, probe:int, other:int) -> torch.Tensor:
"""
Generate transport matrix from probe to other using self.transport.
Parameters
----------
probe: int
probe location
other: int
other location
Returns
-------
transport matrix (torch.Tensor)
"""
if isinstance(probe, str):
probe = self.name.index(probe)
if isinstance(other, str):
other = self.name.index(other)
if probe < other:
matrix = self.transport[probe]
for i in range(probe + 1, other):
matrix = self.transport[int(mod(i, self.size))] @ matrix
return matrix
if probe > other:
matrix = self.transport[other]
for i in range(other + 1, probe):
matrix = self.transport[int(mod(i, self.size))] @ matrix
return torch.inverse(matrix)
def normal(self, probe:torch.Tensor) -> tuple:
"""
Generate uncoupled normal matrix (or matrices) for given locations.
Note, twiss parameters are treated as independent variables in error propagation
Parameters
----------
probe: torch.Tensor
probe locations
Returns
-------
uncoupled normal matrices and error matrices(tuple)
"""
if isinstance(probe, int):
probe = torch.tensor([probe], dtype=torch.int64, device=self.device)
if isinstance(probe, str):
probe = torch.tensor([self.model.name.index(probe)], dtype=torch.int64, device=self.device)
probe = mod(probe, self.size).to(torch.int64)
matrix = torch.zeros((len(probe), 4, 4), dtype=self.dtype, device=self.device)
sigma_matrix = torch.zeros_like(matrix)
matrix[:, 0, 0] = self.bx[probe].sqrt()
matrix[:, 1, 0] = -self.ax[probe]/self.bx[probe].sqrt()
matrix[:, 1, 1] = 1.0/self.bx[probe].sqrt()
matrix[:, 2, 2] = self.by[probe].sqrt()
matrix[:, 3, 2] = -self.ay[probe]/self.by[probe].sqrt()
matrix[:, 3, 3] = 1.0/self.by[probe].sqrt()
sigma_matrix[:, 0, 0] += self.sigma_bx[probe]**2/(4.0*self.bx[probe])
sigma_matrix[:, 1, 0] += self.sigma_ax[probe]**2/self.bx[probe] + self.sigma_bx[probe]**2*self.ax[probe]/(4.0*self.bx[probe]**3)
sigma_matrix[:, 1, 1] += self.sigma_bx[probe]**2/(4.0*self.bx[probe]**3)
sigma_matrix[:, 2, 2] += self.sigma_by[probe]**2/(4.0*self.by[probe])
sigma_matrix[:, 3, 2] += self.sigma_ay[probe]**2/self.by[probe] + self.sigma_by[probe]**2*self.ay[probe]/(4.0*self.by[probe]**3)
sigma_matrix[:, 3, 3] += self.sigma_by[probe]**2/(4.0*self.by[probe]**3)
return (matrix.squeeze(), sigma_matrix.sqrt().squeeze())
def main():
pass
if __name__ == '__main__':
main()
| 42.360933
| 357
| 0.595218
| 72,066
| 0.991975
| 0
| 0
| 5,986
| 0.082396
| 0
| 0
| 23,083
| 0.317733
|
e9b137261c2449a945a4fb05ca0369045630920a
| 879
|
py
|
Python
|
casbin/util/util.py
|
goodrain/pycasbin
|
1a481ba1af7619e1cc7e83896581d14976927d80
|
[
"Apache-2.0"
] | null | null | null |
casbin/util/util.py
|
goodrain/pycasbin
|
1a481ba1af7619e1cc7e83896581d14976927d80
|
[
"Apache-2.0"
] | null | null | null |
casbin/util/util.py
|
goodrain/pycasbin
|
1a481ba1af7619e1cc7e83896581d14976927d80
|
[
"Apache-2.0"
] | null | null | null |
def escape_assertion(s):
"""escapes the dots in the assertion, because the expression evaluation doesn't support such variable names."""
s = s.replace("r.", "r_")
s = s.replace("p.", "p_")
return s
def remove_comments(s):
"""removes the comments starting with # in the text."""
pos = s.find("#")
if pos == -1:
return s
return s[0:pos].strip()
def array_remove_duplicates(s):
"""removes any duplicated elements in a string array."""
found = dict()
j = 0
for x in s:
if x not in found.keys():
found[x] = True
s[j] = x
j = j + 1
return s[:j]
def array_to_string(s):
"""gets a printable string for a string array."""
return ", ".join(s)
def params_to_string(*s):
"""gets a printable string for variable number of parameters."""
return ", ".join(s)
| 20.44186
| 115
| 0.576792
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 362
| 0.411832
|
e9b3ab19d9a17d4244b5f5d4ce4ee01f10101503
| 399
|
py
|
Python
|
multiobj_rationale/fuseprop/__init__.py
|
binghong-ml/multiobj-rationale
|
735916854fba1886730ecac306dd509e930d67bd
|
[
"MIT"
] | 1
|
2021-08-17T00:43:11.000Z
|
2021-08-17T00:43:11.000Z
|
multiobj_rationale/fuseprop/__init__.py
|
binghong-ml/multiobj-rationale
|
735916854fba1886730ecac306dd509e930d67bd
|
[
"MIT"
] | null | null | null |
multiobj_rationale/fuseprop/__init__.py
|
binghong-ml/multiobj-rationale
|
735916854fba1886730ecac306dd509e930d67bd
|
[
"MIT"
] | 1
|
2021-08-17T00:43:12.000Z
|
2021-08-17T00:43:12.000Z
|
from multiobj_rationale.fuseprop.mol_graph import MolGraph
from multiobj_rationale.fuseprop.vocab import common_atom_vocab
from multiobj_rationale.fuseprop.gnn import AtomVGNN
from multiobj_rationale.fuseprop.dataset import *
from multiobj_rationale.fuseprop.chemutils import find_clusters, random_subgraph, extract_subgraph, enum_subgraph, dual_random_subgraph, unique_rationales, merge_rationales
| 66.5
| 172
| 0.892231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
e9b44aa1e89f954d4739decd6c84438a72e8d03d
| 5,445
|
py
|
Python
|
thespian/test/test_troupe.py
|
dendron2000/Thespian
|
0acbc5a0803f6d2be3421ea6eb08c6beecbf3802
|
[
"MIT"
] | null | null | null |
thespian/test/test_troupe.py
|
dendron2000/Thespian
|
0acbc5a0803f6d2be3421ea6eb08c6beecbf3802
|
[
"MIT"
] | null | null | null |
thespian/test/test_troupe.py
|
dendron2000/Thespian
|
0acbc5a0803f6d2be3421ea6eb08c6beecbf3802
|
[
"MIT"
] | null | null | null |
import time
import datetime
from thespian.test import *
from thespian.actors import *
from thespian.troupe import troupe
max_listen_wait = datetime.timedelta(seconds=4)
max_ask_wait = datetime.timedelta(seconds=2.5)
class Bee(Actor):
def receiveMessage(self, msg, sender):
if isinstance(msg, tuple):
time.sleep(msg[0])
self.send(sender, msg[1] + ' buzz')
@troupe()
class Hive(Bee):
pass
@troupe()
class Colony(ActorTypeDispatcher):
def receiveMsg_tuple(self, msg, sender):
if not hasattr(self, 'hive'):
self.hive = self.createActor(Hive)
self.asker = []
self.asker.append(sender)
self.send(self.hive, msg)
self.troupe_work_in_progress = True
def receiveMsg_str(self, msg, sender):
self.send(self.asker.pop(), msg)
self.troupe_work_in_progress = bool(getattr(self, 'asker', False))
# Ensure there are more test data elements than workers so that
# some workers get multiple messages
testdata = [(0.5, 'Fizz'), (1, 'Honey'),
(0.25, 'Flower'), (0.75, 'Pollen'),
] + ([(0.005, 'Orchid'), (0.005, 'Rose'),
(0.005, 'Carnation'), (0.005, 'Lily'),
(0.005, 'Daffodil'), (0.005, 'Begonia'),
(0.005, 'Violet'), (0.005, 'Aster'),
] * 3)
def useActorForTest(asys, bee):
# Run multiple passes to allow workers to be reaped between passes
for X in range(2):
print(X)
for each in testdata:
asys.tell(bee, each)
remaining = testdata[:]
for readnum in range(len(testdata)):
rsp = asys.listen(max_listen_wait)
assert rsp
print(str(rsp))
remaining = [R for R in remaining
if not rsp.startswith(R[1])]
assert not remaining
asys.tell(bee, ActorExitRequest())
def testSingleBee(asys):
useActorForTest(asys, asys.createActor(Bee))
def testHive(asys):
useActorForTest(asys, asys.createActor(Hive))
def testColony(asys):
useActorForTest(asys, asys.createActor(Colony))
# ------------------------------------------------------------
class SimpleSourceAuthority(ActorTypeDispatcher):
def receiveMsg_str(self, msg, sender):
self.registerSourceAuthority()
self.send(sender, 'ok')
def receiveMsg_ValidateSource(self, msg, sender):
self.send(sender, ValidatedSource(msg.sourceHash, msg.sourceData))
class LoadWatcher(ActorTypeDispatcher):
def receiveMsg_str(self, msg, sender):
if msg == 'go':
self.notifyOnSourceAvailability(True)
self._tell = sender
self.send(sender, 'ok')
elif msg == 'stop':
self.notifyOnSourceAvailability(False)
self._tell = None
def receiveMsg_LoadedSource(self, loadmsg, sender):
if getattr(self, '_tell', None):
self.send(self._tell, loadmsg.sourceHash)
def receiveMsg_UnloadedSource(self, unloadmsg, sender):
if getattr(self, '_tell', None):
self.send(self._tell, ('unloaded', unloadmsg.sourceHash))
import tempfile, zipfile, os, shutil
@pytest.fixture()
def source_zip(request):
tmpdir = tempfile.mkdtemp()
zipfname = os.path.join(tmpdir, 'hivesrc.zip')
hivezip = zipfile.ZipFile(zipfname, 'w')
hivezip.writestr('__init__.py', '')
hivezip.writestr('forest/__init__.py', '')
hivezip.writestr('forest/clearing/__init__.py', '')
hivezip.writestr('forest/clearing/beehive.py', '''
import time
from thespian.actors import *
from thespian.troupe import troupe
class Bee(Actor):
def receiveMessage(self, msg, sender):
if isinstance(msg, tuple):
time.sleep(msg[0])
self.send(sender, msg[1] + ' buzz')
@troupe()
class Hive(Bee): pass
@troupe()
class Colony(Bee):
def receiveMessage(self, msg, sender):
if isinstance(msg, tuple):
if not hasattr(self, 'hive'):
self.hive = self.createActor(Hive)
self.asker = []
self.asker.append(sender)
self.send(self.hive, msg)
self.troupe_work_in_progress = True
elif isinstance(msg, str):
self.send(self.asker.pop(), msg)
self.troupe_work_in_progress = bool(self.asker)
''')
hivezip.close()
request.addfinalizer(lambda d=tmpdir:
os.path.exists(d) and shutil.rmtree(d))
return zipfname
def testLoadableHive(asys, source_zip):
r = asys.ask(asys.createActor(SimpleSourceAuthority), 'go', max_ask_wait)
assert r == 'ok'
r = asys.ask(asys.createActor(LoadWatcher), 'go', max_ask_wait)
assert r == 'ok'
srchash = asys.loadActorSource(source_zip)
r = asys.listen(max_listen_wait)
assert r == srchash
bee = asys.createActor('forest.clearing.beehive.Hive',
sourceHash=srchash)
useActorForTest(asys, bee)
def testLoadableColony(asys, source_zip):
r = asys.ask(asys.createActor(SimpleSourceAuthority), 'go', max_ask_wait)
assert r == 'ok'
r = asys.ask(asys.createActor(LoadWatcher), 'go', max_ask_wait)
assert r == 'ok'
srchash = asys.loadActorSource(source_zip)
r = asys.listen(max_listen_wait)
assert r == srchash
bee = asys.createActor('forest.clearing.beehive.Colony',
sourceHash=srchash)
useActorForTest(asys, bee)
| 30.082873
| 77
| 0.617998
| 1,613
| 0.296235
| 0
| 0
| 1,816
| 0.333517
| 0
| 0
| 1,383
| 0.253994
|
e9b739710ac88a977ee95593a167d4e063e1ba18
| 1,197
|
py
|
Python
|
tools/upd.py
|
vladimirivanoviliev/amps-blog-web-grid-bake-off
|
25c24e1fbfc57df4e669487957dd440b338c7847
|
[
"MIT"
] | 3
|
2017-10-21T01:37:03.000Z
|
2021-07-22T16:08:02.000Z
|
tools/upd.py
|
vladimirivanoviliev/amps-blog-web-grid-bake-off
|
25c24e1fbfc57df4e669487957dd440b338c7847
|
[
"MIT"
] | 2
|
2020-01-15T22:50:18.000Z
|
2020-07-19T14:55:28.000Z
|
tools/upd.py
|
vladimirivanoviliev/amps-blog-web-grid-bake-off
|
25c24e1fbfc57df4e669487957dd440b338c7847
|
[
"MIT"
] | 5
|
2020-01-27T13:52:04.000Z
|
2020-10-28T07:38:46.000Z
|
from AMPS import Client
import random
import time
import json
import sys
def main(*args):
publish_rate = None # publish as fast as possible by default
try:
publish_rate = int(args[0])
start = int(args[1])
end = int(args[2])
except Exception:
pass
# set up the client
client = Client('the-publisher')
client.connect('tcp://localhost:9007/amps/json')
client.logon()
while True:
# generate and publish data
current_id = random.randint(start, end)
price_usd = random.randint(20000, 30000)
quantity = random.randint(1, 100)
total = price_usd * quantity
client.publish(
'orders',
json.dumps({
'order_id': current_id,
'name': '>>> TESLA UPDATE <<<',
'price_usd': price_usd,
'quantity': quantity,
'total': total
})
)
if publish_rate is not None and publish_rate > 0:
time.sleep(1.0 / publish_rate)
if __name__ == '__main__':
# detect command line arguments
if len(sys.argv) > 1:
main(*sys.argv[1:])
else:
main()
| 23.94
| 65
| 0.548872
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 248
| 0.207185
|
e9b97dc8d0695130796496766449b39ed72da44e
| 8,299
|
py
|
Python
|
sdk/python/pulumi_aws/sns/platform_application.py
|
pulumi-bot/pulumi-aws
|
756c60135851e015232043c8206567101b8ebd85
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/sns/platform_application.py
|
pulumi-bot/pulumi-aws
|
756c60135851e015232043c8206567101b8ebd85
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/sns/platform_application.py
|
pulumi-bot/pulumi-aws
|
756c60135851e015232043c8206567101b8ebd85
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import pulumi
import pulumi.runtime
class PlatformApplication(pulumi.CustomResource):
"""
Provides an SNS platform application resource
"""
def __init__(__self__, __name__, __opts__=None, event_delivery_failure_topic_arn=None, event_endpoint_created_topic_arn=None, event_endpoint_deleted_topic_arn=None, event_endpoint_updated_topic_arn=None, failure_feedback_role_arn=None, name=None, platform=None, platform_credential=None, platform_principal=None, success_feedback_role_arn=None, success_feedback_sample_rate=None):
"""Create a PlatformApplication resource with the given unique name, props, and options."""
if not __name__:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(__name__, basestring):
raise TypeError('Expected resource name to be a string')
if __opts__ and not isinstance(__opts__, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
if event_delivery_failure_topic_arn and not isinstance(event_delivery_failure_topic_arn, basestring):
raise TypeError('Expected property event_delivery_failure_topic_arn to be a basestring')
__self__.event_delivery_failure_topic_arn = event_delivery_failure_topic_arn
"""
SNS Topic triggered when a delivery to any of the platform endpoints associated with your platform application encounters a permanent failure.
"""
__props__['eventDeliveryFailureTopicArn'] = event_delivery_failure_topic_arn
if event_endpoint_created_topic_arn and not isinstance(event_endpoint_created_topic_arn, basestring):
raise TypeError('Expected property event_endpoint_created_topic_arn to be a basestring')
__self__.event_endpoint_created_topic_arn = event_endpoint_created_topic_arn
"""
SNS Topic triggered when a new platform endpoint is added to your platform application.
"""
__props__['eventEndpointCreatedTopicArn'] = event_endpoint_created_topic_arn
if event_endpoint_deleted_topic_arn and not isinstance(event_endpoint_deleted_topic_arn, basestring):
raise TypeError('Expected property event_endpoint_deleted_topic_arn to be a basestring')
__self__.event_endpoint_deleted_topic_arn = event_endpoint_deleted_topic_arn
"""
SNS Topic triggered when an existing platform endpoint is deleted from your platform application.
"""
__props__['eventEndpointDeletedTopicArn'] = event_endpoint_deleted_topic_arn
if event_endpoint_updated_topic_arn and not isinstance(event_endpoint_updated_topic_arn, basestring):
raise TypeError('Expected property event_endpoint_updated_topic_arn to be a basestring')
__self__.event_endpoint_updated_topic_arn = event_endpoint_updated_topic_arn
"""
SNS Topic triggered when an existing platform endpoint is changed from your platform application.
"""
__props__['eventEndpointUpdatedTopicArn'] = event_endpoint_updated_topic_arn
if failure_feedback_role_arn and not isinstance(failure_feedback_role_arn, basestring):
raise TypeError('Expected property failure_feedback_role_arn to be a basestring')
__self__.failure_feedback_role_arn = failure_feedback_role_arn
"""
The IAM role permitted to receive failure feedback for this application.
"""
__props__['failureFeedbackRoleArn'] = failure_feedback_role_arn
if name and not isinstance(name, basestring):
raise TypeError('Expected property name to be a basestring')
__self__.name = name
"""
The friendly name for the SNS platform application
"""
__props__['name'] = name
if not platform:
raise TypeError('Missing required property platform')
elif not isinstance(platform, basestring):
raise TypeError('Expected property platform to be a basestring')
__self__.platform = platform
"""
The platform that the app is registered with. See [Platform][1] for supported platforms.
"""
__props__['platform'] = platform
if not platform_credential:
raise TypeError('Missing required property platform_credential')
elif not isinstance(platform_credential, basestring):
raise TypeError('Expected property platform_credential to be a basestring')
__self__.platform_credential = platform_credential
"""
Application Platform credential. See [Credential][1] for type of credential required for platform. The value of this attribute when stored into the Terraform state is only a hash of the real value, so therefore it is not practical to use this as an attribute for other resources.
"""
__props__['platformCredential'] = platform_credential
if platform_principal and not isinstance(platform_principal, basestring):
raise TypeError('Expected property platform_principal to be a basestring')
__self__.platform_principal = platform_principal
"""
Application Platform principal. See [Principal][2] for type of principal required for platform. The value of this attribute when stored into the Terraform state is only a hash of the real value, so therefore it is not practical to use this as an attribute for other resources.
"""
__props__['platformPrincipal'] = platform_principal
if success_feedback_role_arn and not isinstance(success_feedback_role_arn, basestring):
raise TypeError('Expected property success_feedback_role_arn to be a basestring')
__self__.success_feedback_role_arn = success_feedback_role_arn
"""
The IAM role permitted to receive success feedback for this application.
"""
__props__['successFeedbackRoleArn'] = success_feedback_role_arn
if success_feedback_sample_rate and not isinstance(success_feedback_sample_rate, basestring):
raise TypeError('Expected property success_feedback_sample_rate to be a basestring')
__self__.success_feedback_sample_rate = success_feedback_sample_rate
"""
The percentage of success to sample (0-100)
"""
__props__['successFeedbackSampleRate'] = success_feedback_sample_rate
__self__.arn = pulumi.runtime.UNKNOWN
"""
The ARN of the SNS platform application
"""
super(PlatformApplication, __self__).__init__(
'aws:sns/platformApplication:PlatformApplication',
__name__,
__props__,
__opts__)
def set_outputs(self, outs):
if 'arn' in outs:
self.arn = outs['arn']
if 'eventDeliveryFailureTopicArn' in outs:
self.event_delivery_failure_topic_arn = outs['eventDeliveryFailureTopicArn']
if 'eventEndpointCreatedTopicArn' in outs:
self.event_endpoint_created_topic_arn = outs['eventEndpointCreatedTopicArn']
if 'eventEndpointDeletedTopicArn' in outs:
self.event_endpoint_deleted_topic_arn = outs['eventEndpointDeletedTopicArn']
if 'eventEndpointUpdatedTopicArn' in outs:
self.event_endpoint_updated_topic_arn = outs['eventEndpointUpdatedTopicArn']
if 'failureFeedbackRoleArn' in outs:
self.failure_feedback_role_arn = outs['failureFeedbackRoleArn']
if 'name' in outs:
self.name = outs['name']
if 'platform' in outs:
self.platform = outs['platform']
if 'platformCredential' in outs:
self.platform_credential = outs['platformCredential']
if 'platformPrincipal' in outs:
self.platform_principal = outs['platformPrincipal']
if 'successFeedbackRoleArn' in outs:
self.success_feedback_role_arn = outs['successFeedbackRoleArn']
if 'successFeedbackSampleRate' in outs:
self.success_feedback_sample_rate = outs['successFeedbackSampleRate']
| 54.960265
| 384
| 0.719605
| 8,076
| 0.973129
| 0
| 0
| 0
| 0
| 0
| 0
| 3,689
| 0.444511
|
e9bbd39d6d8b86209c5aaf7a41e2a233bc9104f2
| 4,815
|
py
|
Python
|
functions/height.py
|
Hilvcha/PINGAN
|
0eb1435750c2ce3dc5de3a50d390aae044360fd5
|
[
"MIT"
] | 7
|
2018-04-01T17:24:56.000Z
|
2021-06-07T09:39:52.000Z
|
functions/height.py
|
Hilvcha/PINGAN
|
0eb1435750c2ce3dc5de3a50d390aae044360fd5
|
[
"MIT"
] | 5
|
2018-03-31T18:24:52.000Z
|
2019-10-09T16:27:49.000Z
|
functions/height.py
|
Hilvcha/PINGAN
|
0eb1435750c2ce3dc5de3a50d390aae044360fd5
|
[
"MIT"
] | 2
|
2020-03-04T08:48:54.000Z
|
2021-06-07T09:39:51.000Z
|
# coding : utf-8
# created by wyj
import numpy as np
import pandas as pd
import math
from utils.feature_utils import df_empty
# TERMINALNO,TIME,TRIP_ID,LONGITUDE,LATITUDE,DIRECTION,HEIGHT,SPEED,CALLSTATE,Y
# 对传入的表按trip_id分组,取每组的海拔的最大连续子数组,对每个人的所有行程的子数组取最大,平均, 方差。
# def max_sub(arr):
# sum = 0
# height = -999
# tempheight = arr.iloc[0]
# for h in arr:
# sum += h - tempheight
# if sum > height:
# height = sum
# if sum < 0:
# sum = 0
# tempheight = h
# arr['secc_inc']=sum
# return arr
def speed_risk(arr):
# 上坡的最大子数组
# sum = 0
# height = -999
tempheight = arr['HEIGHT'].iloc[0]
tempdirection = arr['DIRECTION'].iloc[0]
tempspeed = arr['SPEED'].iloc[0]
# 海拔变化危险系数
height_risk = 0
# 方向变化危险系数
dir_risk = 0
# 通话危险系数
call_risk = 0
for index, row in arr.iterrows():
# sum += row['HEIGHT'] - tempheight
# if sum > height:
# height = sum
# if sum < 0:
# sum = 0
if tempspeed > 0 and row["CALLSTATE"] != 4:
if row["CALLSTATE"] == 0:
call_risk += math.exp(tempspeed / 10) * 0.02
else:
call_risk += math.exp(tempspeed / 10)
D_height = abs(row['HEIGHT'] - tempheight)
D_speed = abs(row['SPEED'] - tempspeed)
height_risk += math.pow(row["SPEED"], D_height / 100)
tempspeed = row['SPEED']
tempheight = row['HEIGHT']
D_direction = min(abs(row["DIRECTION"] - tempdirection), abs(360 + tempdirection - row["DIRECTION"])) / 90.0
dir_risk += math.pow((row["SPEED"] / 10), D_direction / 10)
tempdirection = row['DIRECTION']
# arr['SUCC_INC'] = height
arr["CALLSTATE"] = call_risk
arr['HEIGHT'] = height_risk
arr['DIRECTION'] = dir_risk
return arr
def height_feet(data):
# 加入了危险系数
data_speed_risk = data[["TERMINALNO", 'TRIP_ID', 'HEIGHT', 'SPEED', 'DIRECTION', "CALLSTATE"]].groupby(
["TERMINALNO", 'TRIP_ID'],
as_index=False).apply(
speed_risk)
# 为tripid聚合
data_speed_risk = data_speed_risk[
["TERMINALNO", 'TRIP_ID', 'HEIGHT', 'DIRECTION', "CALLSTATE"]].groupby(
["TERMINALNO", 'TRIP_ID'],
as_index=False).first()
# max_data = data_speed_risk[["TERMINALNO", 'SUCC_INC']].groupby(["TERMINALNO"], as_index=True).max()
# mean_data = data_speed_risk[["TERMINALNO", 'SUCC_INC']].groupby(["TERMINALNO"], as_index=True).mean()
# var_data = data_speed_risk[["TERMINALNO", 'SUCC_INC']].groupby(["TERMINALNO"], as_index=True).var()
# train_data=pd.concat([max_data, mean_data, var_data], axis=1)
# train_data.columns = ['MAX_SUCC_INC', 'MEAN_SUCC_INC', 'VAR_SUCC_INC']
train_data = data_speed_risk[["TERMINALNO", 'HEIGHT', 'DIRECTION', "CALLSTATE"]].groupby(
["TERMINALNO"],
as_index=True).sum()
# 时间统计特征
height_sta = data[['TERMINALNO', "HEIGHT"]].groupby(['TERMINALNO']).agg([np.mean, np.var])
# 最大行程时间
max_time = data[['TERMINALNO', "TRIP_ID", "TIME"]].groupby(["TERMINALNO", 'TRIP_ID'], as_index=False).count()
max_time = max_time[['TERMINALNO', 'TIME']].groupby(["TERMINALNO"]).max()
# 速度统计特征
speed_sta = data[['TERMINALNO', "SPEED"]].groupby(['TERMINALNO']).agg([np.mean, np.max])
# # 平均下
# height_down = data[['TERMINALNO', "TRIP_ID", "HEIGHT"]].groupby(["TERMINALNO", 'TRIP_ID'], as_index=False).agg(
# maxSubArray)
# height_down = height_down[['TERMINALNO', "HEIGHT"]].groupby(['TERMINALNO']).agg([np.mean, np.min])
# # 平均上坡
# height_up = data[['TERMINALNO', "TRIP_ID", "HEIGHT"]].groupby(["TERMINALNO", 'TRIP_ID'], as_index=False).agg(
# minSubArray)
# height_up = height_up[['TERMINALNO', "HEIGHT"]].groupby(['TERMINALNO']).agg([np.mean, np.max])
train_data = pd.concat([train_data, height_sta, max_time, speed_sta,], axis=1)
train_data.columns = ['height_risk', 'direction_risk', "callstate_risk", "height_mean", "height_var", "max_time",
"speed_mean", "speed_max",]
return train_data
# 'TERMINALNO', 'maxTime', 'phonerisk', 'dir_risk', 'height_risk', 'speed_max',
# 'speed_mean', 'height_mean', 'Zao', 'Wan', 'Sheye'
def maxSubArray(arr):
height = 99999
sum = 0
tempheight = arr.iloc[0]
for h in arr:
sum += h - tempheight
if sum < height:
height = sum
if sum > 0:
sum = 0
tempheight = h
return height
def minSubArray(arr):
height = -99999
sum = 0
tempheight = arr.iloc[0]
for h in arr:
sum += h - tempheight
if sum > height:
height = sum
if sum < 0:
sum = 0
tempheight = h
return height
| 32.979452
| 117
| 0.586708
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,582
| 0.512607
|
e9bc10c08079eec1973b577a0d5e59f56835d97e
| 2,850
|
py
|
Python
|
live_cd_scripts/os_scanner.py
|
ForbiddenApplePy/applepy
|
4eb0965f7f634b0f340beee54dce09c12e3e4f54
|
[
"WTFPL"
] | null | null | null |
live_cd_scripts/os_scanner.py
|
ForbiddenApplePy/applepy
|
4eb0965f7f634b0f340beee54dce09c12e3e4f54
|
[
"WTFPL"
] | null | null | null |
live_cd_scripts/os_scanner.py
|
ForbiddenApplePy/applepy
|
4eb0965f7f634b0f340beee54dce09c12e3e4f54
|
[
"WTFPL"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import os
import json
import windows_utilman
import pyAesCrypt
import requests
from secureCrypt import cryptResult
os.system('loadkeys fr')
os.system('lsblk > result.txt')
if os.path.exists('/mnt/targetDrive'):
pass
else:
os.system('mkdir /mnt/targetDrive')
def parse(file_name):
# Listing all drives and removing special char from the command return and saving them to a file
result = []
with open(file_name) as input_file:
for line in input_file:
temp_arr = line.split(' ')
for item in temp_arr:
if '└─' in item or '├─' in item:
result.append(item.replace('└─', '').replace('├─', ''))
os.remove(file_name)
return result
def check_for_os(list):
# Checking for OS installed on the drive
os_list = {'Os': 'location'}
hosts = {'Host': 'address'}
servers = {"DNS": "address"}
for drive in drives_list:
os.system('mount /dev/%s /mnt/targetDrive' % (drive))
print('Looking for OS on '+drive+'...\n')
if os.path.isdir('/mnt/targetDrive/Windows'):
# Checking for Windows installation
os_list['Windows'] = drive
windows_utilman.utilman()
elif os.path.isdir('/mnt/targetDrive/etc'):
# Looking for Linux and grabbing files
f = open('/mnt/targetDrive/etc/issue')
for x in f:
# Listing distros
x = x.split()
x = x[:len(x)-2]
x = ' '.join(x)
if x != '':
os_list[x] = drive
f = open('/etc/hosts')
for x in f:
# Checking hosts
x = x.split()
hosts[x[1]] = x[0]
f = open('/etc/resolv.conf')
for x in f:
# Checking DNS
x = x.split()
if x:
if x[0] != "#":
if x[0] == "options":
pass
else:
servers[x[0]] = x[1]
results = []
results.append(os_list)
results.append(hosts)
results.append(servers)
return results
# Program starts here
drives_list = parse("result.txt")
results = check_for_os(drives_list)
# Saving results as json file
json = json.dumps(results)
if os.path.exists('results.json'):
f = open('results.json', 'w')
else:
f = open('results.json', 'x')
f.write(json)
f.close()
# Crypting file before sending it to our server and removing the base file just in case
cryptResult("results.json")
os.remove("results.json")
# Sending file to the server
os.system('curl -i -X POST -H "Content-Type: multipart/form-data" -F "host=test" -F "file=@results.json.aes" https://exft.avapxia.tk/')
| 29.6875
| 135
| 0.545614
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,021
| 0.356246
|
e9bd49e911285196dc03e66b536b44da7fb8a285
| 2,336
|
py
|
Python
|
app/views.py
|
taeram/idiocy
|
01acf569785f0294540a1b0214b8eccd81818b9c
|
[
"MIT"
] | null | null | null |
app/views.py
|
taeram/idiocy
|
01acf569785f0294540a1b0214b8eccd81818b9c
|
[
"MIT"
] | 1
|
2019-12-06T21:20:10.000Z
|
2019-12-06T21:20:11.000Z
|
app/views.py
|
taeram/idiocy
|
01acf569785f0294540a1b0214b8eccd81818b9c
|
[
"MIT"
] | null | null | null |
from app import app
import os
from flask import abort, \
redirect, \
render_template, \
request, \
send_from_directory, \
url_for
from .helpers import generate_code, \
is_valid_url, \
is_authenticated, \
strip_file_extension
from .database import db, \
Urls
from .filters import strip_www
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'), 'favicon.png', mimetype='image/png')
@app.route('/', methods=['GET', 'POST', 'HEAD'])
def shorten():
if request.method == 'GET':
return render_template('hello.html')
elif request.method == 'POST':
if not is_authenticated():
return app.response_class(response='{"error": "Invalid API key"}', mimetype='application/json', status=403)
url = request.form['url'].strip()
if not is_valid_url(url):
return app.response_class(response='{"error": "Invalid URL"}', mimetype='application/json', status=403)
# Has this URL been previously stored?
row = db.session.query(Urls).\
filter(Urls.url == url).\
first()
if not row:
row = Urls(url=url, code=generate_code())
db.session.add(row)
db.session.commit()
return strip_www(url_for('bounce', code=row.code, _external=True))
@app.route('/<code>', methods=['GET', 'DELETE'])
def bounce(code):
code = strip_file_extension(code)
row = db.session.query(Urls).\
filter(Urls.code == code).\
first()
if not row:
abort(404)
if request.method == 'GET':
row.clicks += 1
db.session.add(row)
db.session.commit()
return redirect(row.url)
elif request.method == 'DELETE':
db.session.delete(row);
db.session.commit()
return strip_www(url_for('bounce', code=row.code, _external=True))
@app.route('/list', methods=['GET'])
def list():
urls = db.session.query(Urls).\
order_by(Urls.created).\
limit(25).\
all()
return render_template('list.html', urls=urls)
| 31.146667
| 119
| 0.550514
| 0
| 0
| 0
| 0
| 1,867
| 0.799229
| 0
| 0
| 298
| 0.127568
|
e9bd8c135bbdae40ce2ed51669ba9beb880235de
| 561
|
py
|
Python
|
typeidea/comment/adminx.py
|
LastDanceG/typeblog
|
fdd043546813866669c004bc8d8aedbfcfa326f2
|
[
"MIT"
] | 1
|
2020-02-20T12:01:43.000Z
|
2020-02-20T12:01:43.000Z
|
typeidea/comment/adminx.py
|
LastDanceG/typeblog
|
fdd043546813866669c004bc8d8aedbfcfa326f2
|
[
"MIT"
] | 2
|
2020-06-06T00:45:15.000Z
|
2021-06-10T22:35:31.000Z
|
typeidea/comment/adminx.py
|
LastDanceG/typeblog
|
fdd043546813866669c004bc8d8aedbfcfa326f2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import xadmin
from django.contrib import admin
# from typeidea.custom_site import custom_site
from typeidea.custom_admin import BaseOwnerAdmin
from .models import Comment
# Register your models here.
class CommentAdmin(object):
list_display = ['target', 'nickname', 'status', 'website', 'email', 'create_time']
search_fields = ['nickname', 'status']
actions_on_top = True
actions_on_bottom = True
date_hierarchy = 'create_time'
xadmin.site.register(Comment, CommentAdmin)
| 26.714286
| 86
| 0.750446
| 247
| 0.440285
| 0
| 0
| 0
| 0
| 0
| 0
| 183
| 0.326203
|
e9be790eafe58cf7aaa0017314b055dd2bdf4ea4
| 79
|
py
|
Python
|
core/devices/__init__.py
|
kisonho/torchmanager
|
ac01c61a132238bc0d39bf2173dfd37f44dbbf30
|
[
"BSD-2-Clause"
] | null | null | null |
core/devices/__init__.py
|
kisonho/torchmanager
|
ac01c61a132238bc0d39bf2173dfd37f44dbbf30
|
[
"BSD-2-Clause"
] | null | null | null |
core/devices/__init__.py
|
kisonho/torchmanager
|
ac01c61a132238bc0d39bf2173dfd37f44dbbf30
|
[
"BSD-2-Clause"
] | null | null | null |
from .devices import data_parallel, empty_cache, find, move_to_device, CPU, GPU
| 79
| 79
| 0.822785
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
e9bfd30a608b29439adc950385f985d929b086eb
| 1,443
|
py
|
Python
|
prepare_dataset/filter_ratio_and_warnings.py
|
Florian-Barthel/stylegan2
|
4ef87038bf9370596cf2b729e1d1a1bc3ebcddd8
|
[
"BSD-Source-Code"
] | null | null | null |
prepare_dataset/filter_ratio_and_warnings.py
|
Florian-Barthel/stylegan2
|
4ef87038bf9370596cf2b729e1d1a1bc3ebcddd8
|
[
"BSD-Source-Code"
] | null | null | null |
prepare_dataset/filter_ratio_and_warnings.py
|
Florian-Barthel/stylegan2
|
4ef87038bf9370596cf2b729e1d1a1bc3ebcddd8
|
[
"BSD-Source-Code"
] | null | null | null |
from tqdm import tqdm
import shutil
import os
from PIL import Image
import warnings
src_folder = '../../modified_datasets/cars_flat'
dest_folder = '../../modified_datasets/cars_flat_ratio_warnings'
if not os.path.exists(dest_folder):
os.makedirs(dest_folder)
num_kept = 0
num_removed = 0
num_corrupt_EXIF = 0
for file in tqdm(os.listdir(src_folder)):
if file.lower().endswith(".jpg") or file.lower().endswith(".jpeg") or file.lower().endswith(".png"):
src_img = src_folder + '/' + file
dest_img = dest_folder + '/' + file
src_label = src_folder + '/' + file + '.json'
dest_label = dest_folder + '/' + file + '.json'
with warnings.catch_warnings() as my_warning:
warnings.simplefilter('error', UserWarning)
try:
img = Image.open(src_img)
w, h = img.size
if w < h:
print('removed invalid ratio')
num_removed += 1
continue
shutil.copyfile(src_img, dest_img)
if os.path.exists(src_label):
shutil.copyfile(src_label, dest_label)
num_kept += 1
except:
print('removed invalid format')
num_corrupt_EXIF += 1
print('Summary:')
print('removed corrupt_exif: ' + str(num_corrupt_EXIF))
print('removed: ' + str(num_removed))
print('kept: ' + str(num_kept))
| 31.369565
| 104
| 0.582121
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 237
| 0.164241
|
e9c1766ed44cd38de086fdbbfdce35e66d2ab6f5
| 2,548
|
py
|
Python
|
src/backend/apps/posts/utils.py
|
Vixx-X/ati-project
|
0ef80772a6fc3807e401cf58b9e15f3628373383
|
[
"MIT"
] | null | null | null |
src/backend/apps/posts/utils.py
|
Vixx-X/ati-project
|
0ef80772a6fc3807e401cf58b9e15f3628373383
|
[
"MIT"
] | 61
|
2021-06-10T03:27:06.000Z
|
2022-03-12T01:01:34.000Z
|
src/backend/apps/posts/utils.py
|
Vixx-X/ati-project
|
0ef80772a6fc3807e401cf58b9e15f3628373383
|
[
"MIT"
] | null | null | null |
from mongoengine.queryset.visitor import Q
from backend.apps.posts.models import Post
from backend.apps.user.signals import check_comment_signal
from backend.apps.user.utils import are_friends
def _get_two_last_obj_with_path(path_list):
size = len(path_list)
if not size:
raise Exception("path_list cannot be empty")
pk = path_list[0]
if size == 1:
post = Post.objects.get(id=pk)
return post, post
parent, son = _get_two_last_obj_with_path(path_list[1:])
parent = son
son = son.comments.get(id=pk)
return parent, son
def get_two_last_obj_with_path(path):
return _get_two_last_obj_with_path(path.split("/")[::-1])
def get_object_by_path(path):
_, son = _get_two_last_obj_with_path(path)
return son
def save_comment_by_path(path, comment):
"""
Saving comment inserting it in root comment or post, given that we
only have 2-depth comments
"""
parent, son = get_two_last_obj_with_path(path)
if isinstance(parent, Post) and not isinstance(son, Post):
son.comments.append(comment)
else:
parent.comments.append(comment)
post = get_object_by_path(path.split("/")[0])
post.save()
# notify son author
check_comment_signal.send(comment.author, son.author)
def get_comments(obj, page=1, size=10, path=None):
start = (page - 1) * size
end = start + size
raiz = []
for com in obj.comments[start:end]:
curr = com.as_dict()
childs = []
for child in com.comments[: size / 2]:
c = child.as_dict()
c["reply"] = path.split("/") + [curr["id"], c["id"]]
childs.append(c)
setattr(curr, "comments", childs)
curr["reply"] = path.split("/") + [curr["id"]]
junior = {"comments": curr, "more": path.split("/") + [curr["id"]]}
raiz.append(junior)
ret = {"comments": raiz}
if len(raiz) == size:
ret["more"] = path.split("/")
return raiz
def get_comments_by_path(path, page, size):
comment = get_object_by_path(path)
return get_comments(comment, page, size, path)
def get_main_posts(requester):
return Post.objects.filter(
Q(public=True) or Q(author__in=requester.friends)
).order_by("-time_created")
def get_posts_by_user(user, requester):
friends = are_friends(user, requester)
priv_filter = Q() if friends else (Q(public=True) | Q(author=requester))
filter_param = Q(author=user) & priv_filter
return Post.objects.filter(filter_param).order_by("-time_created")
| 29.627907
| 76
| 0.654631
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 279
| 0.109498
|
e9c28c0a97751dccd5d3dc1b21e7750a3497a062
| 272
|
py
|
Python
|
src/globus_cli/commands/timer/__init__.py
|
globusonline/globus-cli
|
696857baafac198141edc3c1c29c72215f217df1
|
[
"Apache-2.0"
] | null | null | null |
src/globus_cli/commands/timer/__init__.py
|
globusonline/globus-cli
|
696857baafac198141edc3c1c29c72215f217df1
|
[
"Apache-2.0"
] | 1
|
2016-04-09T17:26:05.000Z
|
2016-04-11T16:13:50.000Z
|
src/globus_cli/commands/timer/__init__.py
|
globusonline/globus-cli
|
696857baafac198141edc3c1c29c72215f217df1
|
[
"Apache-2.0"
] | null | null | null |
from globus_cli.parsing import group
from .list import list_command
from .show import show_command
@group("timer")
def timer_command():
"""Schedule and manage jobs in Globus Timer"""
timer_command.add_command(list_command)
timer_command.add_command(show_command)
| 19.428571
| 50
| 0.794118
| 0
| 0
| 0
| 0
| 87
| 0.319853
| 0
| 0
| 53
| 0.194853
|
e9c3402cd5440828a3062f0ffd949c6878c6a821
| 5,340
|
py
|
Python
|
openslides_backend/action/actions/projector/toggle.py
|
ostcar/openslides-backend
|
e6ceac497c37a1e3e7f408c6cfb29cf21d985b4c
|
[
"MIT"
] | 5
|
2020-01-20T13:57:15.000Z
|
2021-03-27T14:14:44.000Z
|
openslides_backend/action/actions/projector/toggle.py
|
ostcar/openslides-backend
|
e6ceac497c37a1e3e7f408c6cfb29cf21d985b4c
|
[
"MIT"
] | 859
|
2020-01-11T22:58:37.000Z
|
2022-03-30T14:54:06.000Z
|
openslides_backend/action/actions/projector/toggle.py
|
ostcar/openslides-backend
|
e6ceac497c37a1e3e7f408c6cfb29cf21d985b4c
|
[
"MIT"
] | 16
|
2020-01-04T20:28:57.000Z
|
2022-02-10T12:06:54.000Z
|
from typing import Any, Dict, List
from ....models.models import Projection, Projector
from ....permissions.permissions import Permissions
from ....shared.filters import And, FilterOperator
from ....shared.patterns import Collection, FullQualifiedId, string_to_fqid
from ....shared.schema import required_id_schema
from ...generics.update import UpdateAction
from ...util.assert_belongs_to_meeting import assert_belongs_to_meeting
from ...util.default_schema import DefaultSchema
from ...util.register import register_action
from ...util.typing import ActionData
from ..projection.create import ProjectionCreate
from ..projection.delete import ProjectionDelete
from ..projection.update import ProjectionUpdate
@register_action("projector.toggle")
class ProjectorToggle(UpdateAction):
"""
Action to toggle projections.
"""
model = Projector()
schema = DefaultSchema(Projection()).get_default_schema(
title="Projector toggle stable schema",
required_properties=["content_object_id", "meeting_id"],
optional_properties=["options", "type", "stable"],
additional_required_fields={
"ids": {
"type": "array",
"items": required_id_schema,
"uniqueItems": True,
"minItems": 1,
},
},
)
permission = Permissions.Projector.CAN_MANAGE
def get_updated_instances(self, action_data: ActionData) -> ActionData:
for instance in action_data:
# check meeting ids from projector ids and content_object
meeting_id = instance["meeting_id"]
fqid_content_object = string_to_fqid(instance["content_object_id"])
assert_belongs_to_meeting(
self.datastore,
[fqid_content_object]
+ [
FullQualifiedId(Collection("projector"), id)
for id in instance["ids"]
],
meeting_id,
)
for projector_id in instance["ids"]:
stable = instance.get("stable", False)
filter_ = And(
FilterOperator("current_projector_id", "=", projector_id),
FilterOperator(
"content_object_id", "=", instance["content_object_id"]
),
FilterOperator("stable", "=", stable),
)
if instance.get("type"):
filter_ = And(
filter_, FilterOperator("type", "=", instance["type"])
)
result = self.datastore.filter(
Collection("projection"), filter_, ["id"]
)
if result:
projection_ids = [id_ for id_ in result]
if stable:
self.execute_other_action(
ProjectionDelete, [{"id": id_} for id_ in projection_ids]
)
else:
self.move_projections_to_history(projector_id, projection_ids)
else:
data: Dict[str, Any] = {
"current_projector_id": projector_id,
"stable": stable,
"type": instance.get("type"),
"content_object_id": instance["content_object_id"],
"options": instance.get("options"),
"meeting_id": meeting_id,
}
if not stable:
self.move_all_unstable_projections_to_history(
projector_id, meeting_id
)
yield {"id": projector_id, "scroll": 0}
self.execute_other_action(ProjectionCreate, [data])
def move_projections_to_history(
self, projector_id: int, projection_ids: List[int]
) -> None:
max_weight = self.get_max_projection_weight(projector_id)
for projection_id in projection_ids:
self.execute_other_action(
ProjectionUpdate,
[
{
"id": int(projection_id),
"current_projector_id": None,
"history_projector_id": projector_id,
"weight": max_weight + 1,
}
],
)
max_weight += 1
def get_max_projection_weight(self, projector_id: int) -> int:
filter_ = FilterOperator("history_projector_id", "=", projector_id)
maximum = self.datastore.max(Collection("projection"), filter_, "weight", "int")
if maximum is None:
maximum = 0
return maximum
def move_all_unstable_projections_to_history(
self, projector_id: int, meeting_id: int
) -> None:
filter_ = And(
FilterOperator("meeting_id", "=", meeting_id),
FilterOperator("current_projector_id", "=", projector_id),
FilterOperator("stable", "=", False),
)
result = self.datastore.filter(Collection("projection"), filter_, ["id"])
if result:
self.move_projections_to_history(projector_id, [int(id_) for id_ in result])
| 40.763359
| 88
| 0.548876
| 4,589
| 0.859363
| 2,492
| 0.466667
| 4,626
| 0.866292
| 0
| 0
| 727
| 0.136142
|
e9c3d07f73748d2980ffa18343832a605023692e
| 3,235
|
py
|
Python
|
sfftk_migrate/__init__.py
|
emdb-empiar/sfftk-migrate
|
fc8941082256456edb61fe22ecbf932f6258352a
|
[
"Apache-2.0"
] | null | null | null |
sfftk_migrate/__init__.py
|
emdb-empiar/sfftk-migrate
|
fc8941082256456edb61fe22ecbf932f6258352a
|
[
"Apache-2.0"
] | 2
|
2020-04-02T15:25:10.000Z
|
2020-04-03T14:32:12.000Z
|
sfftk_migrate/__init__.py
|
emdb-empiar/sfftk-migrate
|
fc8941082256456edb61fe22ecbf932f6258352a
|
[
"Apache-2.0"
] | null | null | null |
"""
sfftk-migrate
==============
This is a simple tool to allow users to easily migrate older versions of EMDB-SFF files to the latest (supported version).
It has only one dependency: `lxml` which effects part of the migrations.
Presently it only works with XML (.sff) EMDB-SFF files.
How does it work?
-----------------
Each migration consists of two components:
1. a Python module which implements a `migrate` function, and
2. an XSL stylesheet which defines how the `source` is transformed into the `target`
The `migrate` function in (1) has the following signature:
.. code-block:: python
def migrate(infile, outfile, stylesheet, args, encoding='utf-8', **params):
...
where `infile` and `outfile` are the names of the source and target files, `stylesheet` is the
XSL file, `args` is the argument namespace, `encoding` defines what encoding the outfile will
be writing in, and `**params` is a dictionary of any params specified in the XSL file.
Please reference https://www.w3schools.com/xml/xsl_intro.asp on how XSL works.
Migrations are effected using the `migrate.do_migration` function which has the following signature:
.. code-block:: python
def do_migration(args, value_list=None, version_list=VERSION_LIST):
...
Lessons learned in using `lxml`
---------------------------------
* etree.parse() takes XML files/file objects and returns an ElementTree
* etree.XML() takes a string and returns an Element regardless of the content
* etree.ElementTree(root_element) converts an element into an ElementTree
* etree.XSLT() takes an ElementTree or Element object and returns a transformer object;
a transformer object should take an ElementTree (but seems to also take Element objects)
* the result of a transformation is an _XSLTResultTree which behaves like an ElementTree but submits to str()
from: https://lxml.de/xpathxslt.html#xslt-result-objects
It is possible to pass parameters, in the form of XPath expressions, to the XSLT template:
>>> xslt_tree = etree.XML('''\
... <xsl:stylesheet version="1.0"
... xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
... <xsl:param name="a" />
... <xsl:template match="/">
... <foo><xsl:value-of select="$a" /></foo>
... </xsl:template>
... </xsl:stylesheet>''')
>>> transform = etree.XSLT(xslt_tree)
>>> doc_root = etree.XML('<a><b>Text</b></a>')
The parameters are passed as keyword parameters to the transform call. First, let's try passing in a simple integer expression:
>>> result = transform(doc_root, a="5")
>>> str(result)
'<?xml version="1.0"?>\n<foo>5</foo>\n'
"""
import os
SFFTK_MIGRATIONS_VERSION = '0.1.0b7'
VERSION_LIST = [
'0.7.0.dev0',
'0.8.0.dev1'
]
TEST_DATA_PATH = os.path.join(os.path.dirname(__file__))
XSL = os.path.join(TEST_DATA_PATH, 'data', 'xsl')
XML = os.path.join(TEST_DATA_PATH, 'data', 'xml')
MIGRATIONS_PACKAGE = 'sfftk_migrate.migrations'
STYLESHEETS_DIR = os.path.join(os.path.dirname(__file__), 'stylesheets')
ENDIANNESS = {
"little": "<",
"big": ">",
}
MODE = {
"int8": "b",
"uint8": "B",
"int16": "h",
"uint16": "H",
"int32": "i",
"uint32": "I",
"int64": "q",
"uint64": "Q",
"float32": "f",
"float64": "d"
}
| 29.678899
| 127
| 0.678825
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,819
| 0.871406
|
e9c9d816a148fcaa90837526278207a3fb99ed20
| 802
|
py
|
Python
|
RTplzrunBlog/ThisandThat/1168.py
|
lkc263/Algorithm_Study_Python
|
5b9a74ecf7e864c861df2280a1bf4b393b0fcbca
|
[
"MIT"
] | null | null | null |
RTplzrunBlog/ThisandThat/1168.py
|
lkc263/Algorithm_Study_Python
|
5b9a74ecf7e864c861df2280a1bf4b393b0fcbca
|
[
"MIT"
] | null | null | null |
RTplzrunBlog/ThisandThat/1168.py
|
lkc263/Algorithm_Study_Python
|
5b9a74ecf7e864c861df2280a1bf4b393b0fcbca
|
[
"MIT"
] | null | null | null |
from sys import stdin as s
n, k = map(int, s.readline().split())
tree = [0] * 400005
def init(node, s, e):
if s == e:
tree[node] = 1
return tree[node]
mid = (s + e) >> 1
tree[node] = init(2 * node, s, mid) + init(2 * node + 1, mid + 1, e)
return tree[node]
def query(node, s, e, k):
tree[node] -= 1
if s == e:
return s
mid = (s + e) >> 1
if tree[2 * node] >= k:
return query(2 * node, s, mid, k)
else:
return query(2 * node + 1, mid + 1, e, k - tree[2 * node])
init(1, 1, n)
x = k
print("<", end="")
for idx in range(0, n - 1):
print("%d, " % query(1, 1, n, x), end="")
x += k - 1
if x % tree[1] == 0:
x = tree[1]
else:
x %= tree[1]
print("%d" % query(1, 1, n, x), end="")
print(">")
| 19.095238
| 72
| 0.451372
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 22
| 0.027431
|
e9ca0883ad077b45b1ec76d740903d31001f2d85
| 609
|
py
|
Python
|
Python Advanced/File Handling/Exercise/directory_traversal.py
|
DonikaChervenkova/SoftUni
|
bff579c037ec48f39ed193b34bc3502a32e90732
|
[
"MIT"
] | 1
|
2022-03-16T10:23:04.000Z
|
2022-03-16T10:23:04.000Z
|
Python Advanced/Advanced/File Handling/Exercise/directory_traversal.py
|
IvanTodorovBG/SoftUni
|
7b667f6905d9f695ab1484efbb02b6715f6d569e
|
[
"MIT"
] | null | null | null |
Python Advanced/Advanced/File Handling/Exercise/directory_traversal.py
|
IvanTodorovBG/SoftUni
|
7b667f6905d9f695ab1484efbb02b6715f6d569e
|
[
"MIT"
] | 1
|
2021-12-04T12:30:57.000Z
|
2021-12-04T12:30:57.000Z
|
from os import listdir, path
def traverse_dir(current_path, files_by_ext):
for el in listdir(current_path):
if path.isdir(path.join(current_path, el)):
traverse_dir(path.join(current_path, el), files_by_ext)
else:
extension = el.split(".")[-1]
if extension not in files_by_ext:
files_by_ext[extension] = []
files_by_ext[extension].append(el)
files_by_ext = {}
traverse_dir(".", files_by_ext)
for ext, files in sorted(files_by_ext.items()):
print(f".{ext}")
for file in sorted(files):
print(f'---{file}')
| 27.681818
| 67
| 0.619048
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 27
| 0.044335
|
e9cf0657e1bdcd66fa582464fcd1514ef88a229a
| 263
|
py
|
Python
|
server/api/serializers/letter.py
|
MePsyDuck/OAPS
|
ddbc69fbd11d1a02d37514d48f5dabe04cd23405
|
[
"MIT"
] | null | null | null |
server/api/serializers/letter.py
|
MePsyDuck/OAPS
|
ddbc69fbd11d1a02d37514d48f5dabe04cd23405
|
[
"MIT"
] | null | null | null |
server/api/serializers/letter.py
|
MePsyDuck/OAPS
|
ddbc69fbd11d1a02d37514d48f5dabe04cd23405
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from api.models import Letter
class LetterSerializer(serializers.ModelSerializer):
class Meta:
model = Letter
fields = ('id', 'subject', 'body', 'created', 'modified', 'sender', 'receiver', 'remarks')
| 26.3
| 98
| 0.692015
| 190
| 0.722433
| 0
| 0
| 0
| 0
| 0
| 0
| 65
| 0.247148
|
e9d02a79d3683b498fff07cac5f4adf3cfd6b53d
| 356
|
py
|
Python
|
applications/admin.py
|
Riphiphip/website
|
dc5bf64f24d5cf78661686af0281705f4d1d2576
|
[
"MIT"
] | null | null | null |
applications/admin.py
|
Riphiphip/website
|
dc5bf64f24d5cf78661686af0281705f4d1d2576
|
[
"MIT"
] | null | null | null |
applications/admin.py
|
Riphiphip/website
|
dc5bf64f24d5cf78661686af0281705f4d1d2576
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Application, ApplicationGroup, ApplicationPeriod
class ApplicationAdmin(admin.ModelAdmin):
list_display = [
'__str__',
]
admin.site.register(Application, ApplicationAdmin)
admin.site.register(ApplicationGroup, ApplicationAdmin)
admin.site.register(ApplicationPeriod, ApplicationAdmin)
| 29.666667
| 68
| 0.803371
| 87
| 0.244382
| 0
| 0
| 0
| 0
| 0
| 0
| 9
| 0.025281
|
e9d6bbb09e450702b15b4ceb0a5be3a4e585501e
| 7,237
|
py
|
Python
|
flink_rest_client/v1/jars.py
|
frego-dev/flink-rest-client
|
e63e3bc4e6ec73a1a86adb3bfbc011087a5248bd
|
[
"MIT"
] | null | null | null |
flink_rest_client/v1/jars.py
|
frego-dev/flink-rest-client
|
e63e3bc4e6ec73a1a86adb3bfbc011087a5248bd
|
[
"MIT"
] | null | null | null |
flink_rest_client/v1/jars.py
|
frego-dev/flink-rest-client
|
e63e3bc4e6ec73a1a86adb3bfbc011087a5248bd
|
[
"MIT"
] | null | null | null |
import ntpath
import os
from flink_rest_client.common import _execute_rest_request, RestException
class JarsClient:
def __init__(self, prefix):
"""
Constructor.
Parameters
----------
prefix: str
REST API url prefix. It must contain the host, port pair.
"""
self.prefix = f"{prefix}/jars"
def all(self):
"""
Returns a list of all jars previously uploaded via '/jars/upload'.
Endpoint: [GET] /jars
Returns
-------
dict
List all the jars were previously uploaded.
"""
return _execute_rest_request(url=self.prefix)
def upload(self, path_to_jar):
"""
Uploads a jar to the cluster from the input path. The jar's name will be the original filename from the input
path.
Endpoint: [POST] /jars/upload
Parameters
----------
path_to_jar: str
Path to the jar file.
Returns
-------
dict
Result of jar upload.
"""
filename = os.path.basename(path_to_jar)
files = {
"file": (filename, (open(path_to_jar, "rb")), "application/x-java-archive")
}
return _execute_rest_request(
url=f"{self.prefix}/upload", http_method="POST", files=files
)
def get_plan(self, jar_id):
"""
Returns the dataflow plan of a job contained in a jar previously uploaded via '/jars/upload'.
Endpoint: [POST] /jars/:jarid/plan
Parameters
----------
jar_id: str
String value that identifies a jar. When uploading the jar a path is returned, where the filename is the ID.
This value is equivalent to the `id` field in the list of uploaded jars.xe
Returns
-------
dict
Details of the jar_id's plan.
Raises
------
RestException
If the jar_id does not exist.
"""
return _execute_rest_request(
url=f"{self.prefix}/{jar_id}/plan", http_method="POST"
)["plan"]
def run(
self,
jar_id,
arguments=None,
entry_class=None,
parallelism=None,
savepoint_path=None,
allow_non_restored_state=None,
):
"""
Submits a job by running a jar previously uploaded via '/jars/upload'.
Endpoint: [POST] /jars/:jarid/run
Parameters
----------
jar_id: str
String value that identifies a jar. When uploading the jar a path is returned, where the filename is the ID.
This value is equivalent to the `id` field in the list of uploaded jars.
arguments: dict
(Optional) Dict of program arguments.
entry_class: str
(Optional) String value that specifies the fully qualified name of the entry point class. Overrides the
class defined in the jar file manifest.
parallelism: int
(Optional) Positive integer value that specifies the desired parallelism for the job.
savepoint_path: str
(Optional) String value that specifies the path of the savepoint to restore the job from.
allow_non_restored_state: bool
(Optional) Boolean value that specifies whether the job submission should be rejected if the savepoint
contains state that cannot be mapped back to the job.
Returns
-------
str
32-character hexadecimal string value that identifies a job.
Raises
------
RestException
If the jar_id does not exist.
"""
data = {}
if arguments is not None:
data["programArgs"] = " ".join([f"--{k} {v}" for k, v in arguments.items()])
if entry_class is not None:
data["entry-class"] = entry_class
if parallelism is not None:
if parallelism < 0:
raise RestException(
"get_plan method's parallelism parameter must be a positive integer."
)
data["parallelism"] = parallelism
if savepoint_path is not None:
data["savepointPath"] = savepoint_path
if allow_non_restored_state is not None:
data["allowNonRestoredState"] = allow_non_restored_state
return _execute_rest_request(
url=f"{self.prefix}/{jar_id}/run", http_method="POST", json=data
)["jobid"]
def upload_and_run(
self,
path_to_jar,
arguments=None,
entry_class=None,
parallelism=None,
savepoint_path=None,
allow_non_restored_state=None,
):
"""
Helper method to upload and start a jar in one method call.
Parameters
----------
path_to_jar: str
Path to the jar file.
arguments: dict
(Optional) Comma-separated list of program arguments.
entry_class: str
(Optional) String value that specifies the fully qualified name of the entry point class. Overrides the
class defined in the jar file manifest.
parallelism: int
(Optional) Positive integer value that specifies the desired parallelism for the job.
savepoint_path: str
(Optional) String value that specifies the path of the savepoint to restore the job from.
allow_non_restored_state: bool
(Optional) Boolean value that specifies whether the job submission should be rejected if the savepoint
contains state that cannot be mapped back to the job.
Returns
-------
str
32-character hexadecimal string value that identifies a job.
Raises
------
RestException
If an error occurred during the upload of jar file.
"""
result = self.upload(path_to_jar=path_to_jar)
if not result["status"] == "success":
raise RestException("Could not upload the input jar file.", result)
return self.run(
ntpath.basename(result["filename"]),
arguments=arguments,
entry_class=entry_class,
parallelism=parallelism,
savepoint_path=savepoint_path,
allow_non_restored_state=allow_non_restored_state,
)
def delete(self, jar_id):
"""
Deletes a jar previously uploaded via '/jars/upload'.
Endpoint: [DELETE] /jars/:jarid
Parameters
----------
jar_id: str
String value that identifies a jar. When uploading the jar a path is returned, where the filename is the ID.
This value is equivalent to the `id` field in the list of uploaded jars.
Returns
-------
bool
True, if jar_id has been successfully deleted, otherwise False.
Raises
------
RestException
If the jar_id does not exist.
"""
res = _execute_rest_request(url=f"{self.prefix}/{jar_id}", http_method="DELETE")
if len(res.keys()) < 1:
return True
else:
return False
| 30.92735
| 120
| 0.579107
| 7,135
| 0.985906
| 0
| 0
| 0
| 0
| 0
| 0
| 4,934
| 0.681774
|
e9d8b5a198128b03e1b20f916a54a13a506755fc
| 112,962
|
py
|
Python
|
earthy/wordlist.py
|
alvations/earthy
|
29a4e01050a5fd46a0ad49cd0aedfd25cd8ba787
|
[
"Apache-2.0"
] | 6
|
2017-05-09T18:12:00.000Z
|
2017-12-22T16:26:35.000Z
|
earthy/wordlist.py
|
alvations/earthy
|
29a4e01050a5fd46a0ad49cd0aedfd25cd8ba787
|
[
"Apache-2.0"
] | 1
|
2017-05-09T05:40:32.000Z
|
2017-05-09T05:40:32.000Z
|
earthy/wordlist.py
|
alvations/earthy
|
29a4e01050a5fd46a0ad49cd0aedfd25cd8ba787
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import string
from charguana import get_charset
punctuations = set(list(string.punctuation) +
list(get_charset('Currency_Symbol')) +
list(get_charset('Close_Punctuation')) +
list(get_charset('Open_Punctuation')) +
list(get_charset('punctuation')))
# From https://raw.githubusercontent.com/6/stopwords-json/master/stopwords-all.json
stopwords = {"af":[u"'n",u"aan",u"af",u"al",u"as",u"baie",u"by",u"daar",u"dag",u"dat",u"die",u"dit",u"een",u"ek",u"en",u"gaan",u"gesê",u"haar",u"het",u"hom",u"hulle",u"hy",u"in",u"is",u"jou",u"jy",u"kan",u"kom",u"ma",u"maar",u"met",u"my",u"na",u"nie",u"om",u"ons",u"op",u"saam",u"sal",u"se",u"sien",u"so",u"sy",u"te",u"toe",u"uit",u"van",u"vir",u"was",u"wat",u"ʼn"],u"ha":[u"a",u"amma",u"ba",u"ban",u"ce",u"cikin",u"da",u"don",u"ga",u"in",u"ina",u"ita",u"ji",u"ka",u"ko",u"kuma",u"lokacin",u"ma",u"mai",u"na",u"ne",u"ni",u"sai",u"shi",u"su",u"suka",u"sun",u"ta",u"tafi",u"take",u"tana",u"wani",u"wannan",u"wata",u"ya",u"yake",u"yana",u"yi",u"za"],u"so":[u"aad",u"albaabkii",u"atabo",u"ay",u"ayaa",u"ayee",u"ayuu",u"dhan",u"hadana",u"in",u"inuu",u"isku",u"jiray",u"jirtay",u"ka",u"kale",u"kasoo",u"ku",u"kuu",u"lakin",u"markii",u"oo",u"si",u"soo",u"uga",u"ugu",u"uu",u"waa",u"waxa",u"waxuu"],u"st":[u"a",u"ba",u"bane",u"bona",u"e",u"ea",u"eaba",u"empa",u"ena",u"ha",u"hae",u"hape",u"ho",u"hore",u"ka",u"ke",u"la",u"le",u"li",u"me",u"mo",u"moo",u"ne",u"o",u"oa",u"re",u"sa",u"se",u"tloha",u"tsa",u"tse"],u"sw":[u"akasema",u"alikuwa",u"alisema",u"baada",u"basi",u"bila",u"cha",u"chini",u"hadi",u"hapo",u"hata",u"hivyo",u"hiyo",u"huku",u"huo",u"ili",u"ilikuwa",u"juu",u"kama",u"karibu",u"katika",u"kila",u"kima",u"kisha",u"kubwa",u"kutoka",u"kuwa",u"kwa",u"kwamba",u"kwenda",u"kwenye",u"la",u"lakini",u"mara",u"mdogo",u"mimi",u"mkubwa",u"mmoja",u"moja",u"muda",u"mwenye",u"na",u"naye",u"ndani",u"ng",u"ni",u"nini",u"nonkungu",u"pamoja",u"pia",u"sana",u"sasa",u"sauti",u"tafadhali",u"tena",u"tu",u"vile",u"wa",u"wakati",u"wake",u"walikuwa",u"wao",u"watu",u"wengine",u"wote",u"ya",u"yake",u"yangu",u"yao",u"yeye",u"yule",u"za",u"zaidi",u"zake"],u"yo":[u"a",u"an",u"bá",u"bí",u"bẹ̀rẹ̀",u"fún",u"fẹ́",u"gbogbo",u"inú",u"jù",u"jẹ",u"jẹ́",u"kan",u"kì",u"kí",u"kò",u"láti",u"lè",u"lọ",u"mi",u"mo",u"máa",u"mọ̀",u"ni",u"náà",u"ní",u"nígbà",u"nítorí",u"nǹkan",u"o",u"padà",u"pé",u"púpọ̀",u"pẹ̀lú",u"rẹ̀",u"sì",u"sí",u"sínú",u"ṣ",u"ti",u"tí",u"wà",u"wá",u"wọn",u"wọ́n",u"yìí",u"àti",u"àwọn",u"é",u"í",u"òun",u"ó",u"ń",u"ńlá",u"ṣe",u"ṣé",u"ṣùgbọ́n",u"ẹmọ́",u"ọjọ́",u"ọ̀pọ̀lọpọ̀"],u"zu":[u"futhi",u"kahle",u"kakhulu",u"kanye",u"khona",u"kodwa",u"kungani",u"kusho",u"la",u"lakhe",u"lapho",u"mina",u"ngesikhathi",u"nje",u"phansi",u"phezulu",u"u",u"ukuba",u"ukuthi",u"ukuze",u"uma",u"wahamba",u"wakhe",u"wami",u"wase",u"wathi",u"yakhe",u"zakhe",u"zonke"],u"da":[u"af",u"alle",u"andet",u"andre",u"at",u"begge",u"da",u"de",u"den",u"denne",u"der",u"deres",u"det",u"dette",u"dig",u"din",u"dog",u"du",u"ej",u"eller",u"en",u"end",u"ene",u"eneste",u"enhver",u"et",u"fem",u"fire",u"flere",u"fleste",u"for",u"fordi",u"forrige",u"fra",u"få",u"før",u"god",u"han",u"hans",u"har",u"hendes",u"her",u"hun",u"hvad",u"hvem",u"hver",u"hvilken",u"hvis",u"hvor",u"hvordan",u"hvorfor",u"hvornår",u"i",u"ikke",u"ind",u"ingen",u"intet",u"jeg",u"jeres",u"kan",u"kom",u"kommer",u"lav",u"lidt",u"lille",u"man",u"mand",u"mange",u"med",u"meget",u"men",u"mens",u"mere",u"mig",u"ned",u"ni",u"nogen",u"noget",u"ny",u"nyt",u"nær",u"næste",u"næsten",u"og",u"op",u"otte",u"over",u"på",u"se",u"seks",u"ses",u"som",u"stor",u"store",u"syv",u"ti",u"til",u"to",u"tre",u"ud",u"var"],u"de":[u"Ernst",u"Ordnung",u"Schluss",u"a",u"ab",u"aber",u"ach",u"acht",u"achte",u"achten",u"achter",u"achtes",u"ag",u"alle",u"allein",u"allem",u"allen",u"aller",u"allerdings",u"alles",u"allgemeinen",u"als",u"also",u"am",u"an",u"andere",u"anderen",u"andern",u"anders",u"au",u"auch",u"auf",u"aus",u"ausser",u"ausserdem",u"außer",u"außerdem",u"b",u"bald",u"bei",u"beide",u"beiden",u"beim",u"beispiel",u"bekannt",u"bereits",u"besonders",u"besser",u"besten",u"bin",u"bis",u"bisher",u"bist",u"c",u"d",u"d.h",u"da",u"dabei",u"dadurch",u"dafür",u"dagegen",u"daher",u"dahin",u"dahinter",u"damals",u"damit",u"danach",u"daneben",u"dank",u"dann",u"daran",u"darauf",u"daraus",u"darf",u"darfst",u"darin",u"darum",u"darunter",u"darüber",u"das",u"dasein",u"daselbst",u"dass",u"dasselbe",u"davon",u"davor",u"dazu",u"dazwischen",u"daß",u"dein",u"deine",u"deinem",u"deiner",u"dem",u"dementsprechend",u"demgegenüber",u"demgemäss",u"demgemäß",u"demselben",u"demzufolge",u"den",u"denen",u"denn",u"denselben",u"der",u"deren",u"derjenige",u"derjenigen",u"dermassen",u"dermaßen",u"derselbe",u"derselben",u"des",u"deshalb",u"desselben",u"dessen",u"deswegen",u"dich",u"die",u"diejenige",u"diejenigen",u"dies",u"diese",u"dieselbe",u"dieselben",u"diesem",u"diesen",u"dieser",u"dieses",u"dir",u"doch",u"dort",u"drei",u"drin",u"dritte",u"dritten",u"dritter",u"drittes",u"du",u"durch",u"durchaus",u"durfte",u"durften",u"dürfen",u"dürft",u"e",u"eben",u"ebenso",u"ehrlich",u"ei",u"ei,u",u"eigen",u"eigene",u"eigenen",u"eigener",u"eigenes",u"ein",u"einander",u"eine",u"einem",u"einen",u"einer",u"eines",u"einige",u"einigen",u"einiger",u"einiges",u"einmal",u"eins",u"elf",u"en",u"ende",u"endlich",u"entweder",u"er",u"erst",u"erste",u"ersten",u"erster",u"erstes",u"es",u"etwa",u"etwas",u"euch",u"euer",u"eure",u"f",u"folgende",u"früher",u"fünf",u"fünfte",u"fünften",u"fünfter",u"fünftes",u"für",u"g",u"gab",u"ganz",u"ganze",u"ganzen",u"ganzer",u"ganzes",u"gar",u"gedurft",u"gegen",u"gegenüber",u"gehabt",u"gehen",u"geht",u"gekannt",u"gekonnt",u"gemacht",u"gemocht",u"gemusst",u"genug",u"gerade",u"gern",u"gesagt",u"geschweige",u"gewesen",u"gewollt",u"geworden",u"gibt",u"ging",u"gleich",u"gott",u"gross",u"grosse",u"grossen",u"grosser",u"grosses",u"groß",u"große",u"großen",u"großer",u"großes",u"gut",u"gute",u"guter",u"gutes",u"h",u"habe",u"haben",u"habt",u"hast",u"hat",u"hatte",u"hatten",u"hattest",u"hattet",u"heisst",u"her",u"heute",u"hier",u"hin",u"hinter",u"hoch",u"hätte",u"hätten",u"i",u"ich",u"ihm",u"ihn",u"ihnen",u"ihr",u"ihre",u"ihrem",u"ihren",u"ihrer",u"ihres",u"im",u"immer",u"in",u"indem",u"infolgedessen",u"ins",u"irgend",u"ist",u"j",u"ja",u"jahr",u"jahre",u"jahren",u"je",u"jede",u"jedem",u"jeden",u"jeder",u"jedermann",u"jedermanns",u"jedes",u"jedoch",u"jemand",u"jemandem",u"jemanden",u"jene",u"jenem",u"jenen",u"jener",u"jenes",u"jetzt",u"k",u"kam",u"kann",u"kannst",u"kaum",u"kein",u"keine",u"keinem",u"keinen",u"keiner",u"kleine",u"kleinen",u"kleiner",u"kleines",u"kommen",u"kommt",u"konnte",u"konnten",u"kurz",u"können",u"könnt",u"könnte",u"l",u"lang",u"lange",u"leicht",u"leide",u"lieber",u"los",u"m",u"machen",u"macht",u"machte",u"mag",u"magst",u"mahn",u"mal",u"man",u"manche",u"manchem",u"manchen",u"mancher",u"manches",u"mann",u"mehr",u"mein",u"meine",u"meinem",u"meinen",u"meiner",u"meines",u"mensch",u"menschen",u"mich",u"mir",u"mit",u"mittel",u"mochte",u"mochten",u"morgen",u"muss",u"musst",u"musste",u"mussten",u"muß",u"mußt",u"möchte",u"mögen",u"möglich",u"mögt",u"müssen",u"müsst",u"müßt",u"n",u"na",u"nach",u"nachdem",u"nahm",u"natürlich",u"neben",u"nein",u"neue",u"neuen",u"neun",u"neunte",u"neunten",u"neunter",u"neuntes",u"nicht",u"nichts",u"nie",u"niemand",u"niemandem",u"niemanden",u"noch",u"nun",u"nur",u"o",u"ob",u"oben",u"oder",u"offen",u"oft",u"ohne",u"p",u"q",u"r",u"recht",u"rechte",u"rechten",u"rechter",u"rechtes",u"richtig",u"rund",u"s",u"sa",u"sache",u"sagt",u"sagte",u"sah",u"satt",u"schlecht",u"schon",u"sechs",u"sechste",u"sechsten",u"sechster",u"sechstes",u"sehr",u"sei",u"seid",u"seien",u"sein",u"seine",u"seinem",u"seinen",u"seiner",u"seines",u"seit",u"seitdem",u"selbst",u"sich",u"sie",u"sieben",u"siebente",u"siebenten",u"siebenter",u"siebentes",u"sind",u"so",u"solang",u"solche",u"solchem",u"solchen",u"solcher",u"solches",u"soll",u"sollen",u"sollst",u"sollt",u"sollte",u"sollten",u"sondern",u"sonst",u"soweit",u"sowie",u"später",u"startseite",u"statt",u"steht",u"suche",u"t",u"tag",u"tage",u"tagen",u"tat",u"teil",u"tel",u"tritt",u"trotzdem",u"tun",u"u",u"uhr",u"um",u"und",u"und?",u"uns",u"unser",u"unsere",u"unserer",u"unter",u"v",u"vergangenen",u"viel",u"viele",u"vielem",u"vielen",u"vielleicht",u"vier",u"vierte",u"vierten",u"vierter",u"viertes",u"vom",u"von",u"vor",u"w",u"wahr?",u"wann",u"war",u"waren",u"wart",u"warum",u"was",u"wegen",u"weil",u"weit",u"weiter",u"weitere",u"weiteren",u"weiteres",u"welche",u"welchem",u"welchen",u"welcher",u"welches",u"wem",u"wen",u"wenig",u"wenige",u"weniger",u"weniges",u"wenigstens",u"wenn",u"wer",u"werde",u"werden",u"werdet",u"weshalb",u"wessen",u"wie",u"wieder",u"wieso",u"will",u"willst",u"wir",u"wird",u"wirklich",u"wirst",u"wissen",u"wo",u"wohl",u"wollen",u"wollt",u"wollte",u"wollten",u"worden",u"wurde",u"wurden",u"während",u"währenddem",u"währenddessen",u"wäre",u"würde",u"würden",u"x",u"y",u"z",u"z.b",u"zehn",u"zehnte",u"zehnten",u"zehnter",u"zehntes",u"zeit",u"zu",u"zuerst",u"zugleich",u"zum",u"zunächst",u"zur",u"zurück",u"zusammen",u"zwanzig",u"zwar",u"zwei",u"zweite",u"zweiten",u"zweiter",u"zweites",u"zwischen",u"zwölf",u"über",u"überhaupt",u"übrigens"],u"es":[u"a",u"actualmente",u"acuerdo",u"adelante",u"ademas",u"además",u"adrede",u"afirmó",u"agregó",u"ahi",u"ahora",u"ahí",u"al",u"algo",u"alguna",u"algunas",u"alguno",u"algunos",u"algún",u"alli",u"allí",u"alrededor",u"ambos",u"ampleamos",u"antano",u"antaño",u"ante",u"anterior",u"antes",u"apenas",u"aproximadamente",u"aquel",u"aquella",u"aquellas",u"aquello",u"aquellos",u"aqui",u"aquél",u"aquélla",u"aquéllas",u"aquéllos",u"aquí",u"arriba",u"arribaabajo",u"aseguró",u"asi",u"así",u"atras",u"aun",u"aunque",u"ayer",u"añadió",u"aún",u"b",u"bajo",u"bastante",u"bien",u"breve",u"buen",u"buena",u"buenas",u"bueno",u"buenos",u"c",u"cada",u"casi",u"cerca",u"cierta",u"ciertas",u"cierto",u"ciertos",u"cinco",u"claro",u"comentó",u"como",u"con",u"conmigo",u"conocer",u"conseguimos",u"conseguir",u"considera",u"consideró",u"consigo",u"consigue",u"consiguen",u"consigues",u"contigo",u"contra",u"cosas",u"creo",u"cual",u"cuales",u"cualquier",u"cuando",u"cuanta",u"cuantas",u"cuanto",u"cuantos",u"cuatro",u"cuenta",u"cuál",u"cuáles",u"cuándo",u"cuánta",u"cuántas",u"cuánto",u"cuántos",u"cómo",u"d",u"da",u"dado",u"dan",u"dar",u"de",u"debajo",u"debe",u"deben",u"debido",u"decir",u"dejó",u"del",u"delante",u"demasiado",u"demás",u"dentro",u"deprisa",u"desde",u"despacio",u"despues",u"después",u"detras",u"detrás",u"dia",u"dias",u"dice",u"dicen",u"dicho",u"dieron",u"diferente",u"diferentes",u"dijeron",u"dijo",u"dio",u"donde",u"dos",u"durante",u"día",u"días",u"dónde",u"e",u"ejemplo",u"el",u"ella",u"ellas",u"ello",u"ellos",u"embargo",u"empleais",u"emplean",u"emplear",u"empleas",u"empleo",u"en",u"encima",u"encuentra",u"enfrente",u"enseguida",u"entonces",u"entre",u"era",u"eramos",u"eran",u"eras",u"eres",u"es",u"esa",u"esas",u"ese",u"eso",u"esos",u"esta",u"estaba",u"estaban",u"estado",u"estados",u"estais",u"estamos",u"estan",u"estar",u"estará",u"estas",u"este",u"esto",u"estos",u"estoy",u"estuvo",u"está",u"están",u"ex",u"excepto",u"existe",u"existen",u"explicó",u"expresó",u"f",u"fin",u"final",u"fue",u"fuera",u"fueron",u"fui",u"fuimos",u"g",u"general",u"gran",u"grandes",u"gueno",u"h",u"ha",u"haber",u"habia",u"habla",u"hablan",u"habrá",u"había",u"habían",u"hace",u"haceis",u"hacemos",u"hacen",u"hacer",u"hacerlo",u"haces",u"hacia",u"haciendo",u"hago",u"han",u"hasta",u"hay",u"haya",u"he",u"hecho",u"hemos",u"hicieron",u"hizo",u"horas",u"hoy",u"hubo",u"i",u"igual",u"incluso",u"indicó",u"informo",u"informó",u"intenta",u"intentais",u"intentamos",u"intentan",u"intentar",u"intentas",u"intento",u"ir",u"j",u"junto",u"k",u"l",u"la",u"lado",u"largo",u"las",u"le",u"lejos",u"les",u"llegó",u"lleva",u"llevar",u"lo",u"los",u"luego",u"lugar",u"m",u"mal",u"manera",u"manifestó",u"mas",u"mayor",u"me",u"mediante",u"medio",u"mejor",u"mencionó",u"menos",u"menudo",u"mi",u"mia",u"mias",u"mientras",u"mio",u"mios",u"mis",u"misma",u"mismas",u"mismo",u"mismos",u"modo",u"momento",u"mucha",u"muchas",u"mucho",u"muchos",u"muy",u"más",u"mí",u"mía",u"mías",u"mío",u"míos",u"n",u"nada",u"nadie",u"ni",u"ninguna",u"ningunas",u"ninguno",u"ningunos",u"ningún",u"no",u"nos",u"nosotras",u"nosotros",u"nuestra",u"nuestras",u"nuestro",u"nuestros",u"nueva",u"nuevas",u"nuevo",u"nuevos",u"nunca",u"o",u"ocho",u"os",u"otra",u"otras",u"otro",u"otros",u"p",u"pais",u"para",u"parece",u"parte",u"partir",u"pasada",u"pasado",u"paìs",u"peor",u"pero",u"pesar",u"poca",u"pocas",u"poco",u"pocos",u"podeis",u"podemos",u"poder",u"podria",u"podriais",u"podriamos",u"podrian",u"podrias",u"podrá",u"podrán",u"podría",u"podrían",u"poner",u"por",u"porque",u"posible",u"primer",u"primera",u"primero",u"primeros",u"principalmente",u"pronto",u"propia",u"propias",u"propio",u"propios",u"proximo",u"próximo",u"próximos",u"pudo",u"pueda",u"puede",u"pueden",u"puedo",u"pues",u"q",u"qeu",u"que",u"quedó",u"queremos",u"quien",u"quienes",u"quiere",u"quiza",u"quizas",u"quizá",u"quizás",u"quién",u"quiénes",u"qué",u"r",u"raras",u"realizado",u"realizar",u"realizó",u"repente",u"respecto",u"s",u"sabe",u"sabeis",u"sabemos",u"saben",u"saber",u"sabes",u"salvo",u"se",u"sea",u"sean",u"segun",u"segunda",u"segundo",u"según",u"seis",u"ser",u"sera",u"será",u"serán",u"sería",u"señaló",u"si",u"sido",u"siempre",u"siendo",u"siete",u"sigue",u"siguiente",u"sin",u"sino",u"sobre",u"sois",u"sola",u"solamente",u"solas",u"solo",u"solos",u"somos",u"son",u"soy",u"soyos",u"su",u"supuesto",u"sus",u"suya",u"suyas",u"suyo",u"sé",u"sí",u"sólo",u"t",u"tal",u"tambien",u"también",u"tampoco",u"tan",u"tanto",u"tarde",u"te",u"temprano",u"tendrá",u"tendrán",u"teneis",u"tenemos",u"tener",u"tenga",u"tengo",u"tenido",u"tenía",u"tercera",u"ti",u"tiempo",u"tiene",u"tienen",u"toda",u"todas",u"todavia",u"todavía",u"todo",u"todos",u"total",u"trabaja",u"trabajais",u"trabajamos",u"trabajan",u"trabajar",u"trabajas",u"trabajo",u"tras",u"trata",u"través",u"tres",u"tu",u"tus",u"tuvo",u"tuya",u"tuyas",u"tuyo",u"tuyos",u"tú",u"u",u"ultimo",u"un",u"una",u"unas",u"uno",u"unos",u"usa",u"usais",u"usamos",u"usan",u"usar",u"usas",u"uso",u"usted",u"ustedes",u"v",u"va",u"vais",u"valor",u"vamos",u"van",u"varias",u"varios",u"vaya",u"veces",u"ver",u"verdad",u"verdadera",u"verdadero",u"vez",u"vosotras",u"vosotros",u"voy",u"vuestra",u"vuestras",u"vuestro",u"vuestros",u"w",u"x",u"y",u"ya",u"yo",u"z",u"él",u"ésa",u"ésas",u"ése",u"ésos",u"ésta",u"éstas",u"éste",u"éstos",u"última",u"últimas",u"último",u"últimos"],u"et":[u"aga",u"ei",u"et",u"ja",u"jah",u"kas",u"kui",u"kõik",u"ma",u"me",u"mida",u"midagi",u"mind",u"minu",u"mis",u"mu",u"mul",u"mulle",u"nad",u"nii",u"oled",u"olen",u"oli",u"oma",u"on",u"pole",u"sa",u"seda",u"see",u"selle",u"siin",u"siis",u"ta",u"te",u"ära"],u"fi":[u"aiemmin",u"aika",u"aikaa",u"aikaan",u"aikaisemmin",u"aikaisin",u"aikajen",u"aikana",u"aikoina",u"aikoo",u"aikovat",u"aina",u"ainakaan",u"ainakin",u"ainoa",u"ainoat",u"aiomme",u"aion",u"aiotte",u"aist",u"aivan",u"ajan",u"alas",u"alemmas",u"alkuisin",u"alkuun",u"alla",u"alle",u"aloitamme",u"aloitan",u"aloitat",u"aloitatte",u"aloitattivat",u"aloitettava",u"aloitettevaksi",u"aloitettu",u"aloitimme",u"aloitin",u"aloitit",u"aloititte",u"aloittaa",u"aloittamatta",u"aloitti",u"aloittivat",u"alta",u"aluksi",u"alussa",u"alusta",u"annettavaksi",u"annetteva",u"annettu",u"ansiosta",u"antaa",u"antamatta",u"antoi",u"aoua",u"apu",u"asia",u"asiaa",u"asian",u"asiasta",u"asiat",u"asioiden",u"asioihin",u"asioita",u"asti",u"avuksi",u"avulla",u"avun",u"avutta",u"edelle",u"edelleen",u"edellä",u"edeltä",u"edemmäs",u"edes",u"edessä",u"edestä",u"ehkä",u"ei",u"eikä",u"eilen",u"eivät",u"eli",u"ellei",u"elleivät",u"ellemme",u"ellen",u"ellet",u"ellette",u"emme",u"en",u"enemmän",u"eniten",u"ennen",u"ensi",u"ensimmäinen",u"ensimmäiseksi",u"ensimmäisen",u"ensimmäisenä",u"ensimmäiset",u"ensimmäisiksi",u"ensimmäisinä",u"ensimmäisiä",u"ensimmäistä",u"ensin",u"entinen",u"entisen",u"entisiä",u"entisten",u"entistä",u"enää",u"eri",u"erittäin",u"erityisesti",u"eräiden",u"eräs",u"eräät",u"esi",u"esiin",u"esillä",u"esimerkiksi",u"et",u"eteen",u"etenkin",u"etessa",u"ette",u"ettei",u"että",u"haikki",u"halua",u"haluaa",u"haluamatta",u"haluamme",u"haluan",u"haluat",u"haluatte",u"haluavat",u"halunnut",u"halusi",u"halusimme",u"halusin",u"halusit",u"halusitte",u"halusivat",u"halutessa",u"haluton",u"he",u"hei",u"heidän",u"heihin",u"heille",u"heiltä",u"heissä",u"heistä",u"heitä",u"helposti",u"heti",u"hetkellä",u"hieman",u"hitaasti",u"hoikein",u"huolimatta",u"huomenna",u"hyvien",u"hyviin",u"hyviksi",u"hyville",u"hyviltä",u"hyvin",u"hyvinä",u"hyvissä",u"hyvistä",u"hyviä",u"hyvä",u"hyvät",u"hyvää",u"hän",u"häneen",u"hänelle",u"hänellä",u"häneltä",u"hänen",u"hänessä",u"hänestä",u"hänet",u"ihan",u"ilman",u"ilmeisesti",u"itse",u"itsensä",u"itseään",u"ja",u"jo",u"johon",u"joiden",u"joihin",u"joiksi",u"joilla",u"joille",u"joilta",u"joissa",u"joista",u"joita",u"joka",u"jokainen",u"jokin",u"joko",u"joku",u"jolla",u"jolle",u"jolloin",u"jolta",u"jompikumpi",u"jonka",u"jonkin",u"jonne",u"joo",u"jopa",u"jos",u"joskus",u"jossa",u"josta",u"jota",u"jotain",u"joten",u"jotenkin",u"jotenkuten",u"jotka",u"jotta",u"jouduimme",u"jouduin",u"jouduit",u"jouduitte",u"joudumme",u"joudun",u"joudutte",u"joukkoon",u"joukossa",u"joukosta",u"joutua",u"joutui",u"joutuivat",u"joutumaan",u"joutuu",u"joutuvat",u"juuri",u"jälkeen",u"jälleen",u"jää",u"kahdeksan",u"kahdeksannen",u"kahdella",u"kahdelle",u"kahdelta",u"kahden",u"kahdessa",u"kahdesta",u"kahta",u"kahteen",u"kai",u"kaiken",u"kaikille",u"kaikilta",u"kaikkea",u"kaikki",u"kaikkia",u"kaikkiaan",u"kaikkialla",u"kaikkialle",u"kaikkialta",u"kaikkien",u"kaikkin",u"kaksi",u"kannalta",u"kannattaa",u"kanssa",u"kanssaan",u"kanssamme",u"kanssani",u"kanssanne",u"kanssasi",u"kauan",u"kauemmas",u"kaukana",u"kautta",u"kehen",u"keiden",u"keihin",u"keiksi",u"keille",u"keillä",u"keiltä",u"keinä",u"keissä",u"keistä",u"keitten",u"keittä",u"keitä",u"keneen",u"keneksi",u"kenelle",u"kenellä",u"keneltä",u"kenen",u"kenenä",u"kenessä",u"kenestä",u"kenet",u"kenettä",u"kennessästä",u"kenties",u"kerran",u"kerta",u"kertaa",u"keskellä",u"kesken",u"keskimäärin",u"ketkä",u"ketä",u"kiitos",u"kohti",u"koko",u"kokonaan",u"kolmas",u"kolme",u"kolmen",u"kolmesti",u"koska",u"koskaan",u"kovin",u"kuin",u"kuinka",u"kuinkan",u"kuitenkaan",u"kuitenkin",u"kuka",u"kukaan",u"kukin",u"kukka",u"kumpainen",u"kumpainenkaan",u"kumpi",u"kumpikaan",u"kumpikin",u"kun",u"kuten",u"kuuden",u"kuusi",u"kuutta",u"kylliksi",u"kyllä",u"kymmenen",u"kyse",u"liian",u"liki",u"lisäksi",u"lisää",u"lla",u"luo",u"luona",u"lähekkäin",u"lähelle",u"lähellä",u"läheltä",u"lähemmäs",u"lähes",u"lähinnä",u"lähtien",u"läpi",u"mahdollisimman",u"mahdollista",u"me",u"meidän",u"meille",u"meillä",u"melkein",u"melko",u"menee",u"meneet",u"menemme",u"menen",u"menet",u"menette",u"menevät",u"meni",u"menimme",u"menin",u"menit",u"menivät",u"mennessä",u"mennyt",u"menossa",u"mihin",u"mikin",u"miksi",u"mikä",u"mikäli",u"mikään",u"milloin",u"milloinkan",u"minne",u"minun",u"minut",u"minä",u"missä",u"mistä",u"miten",u"mitä",u"mitään",u"moi",u"molemmat",u"mones",u"monesti",u"monet",u"moni",u"moniaalla",u"moniaalle",u"moniaalta",u"monta",u"muassa",u"muiden",u"muita",u"muka",u"mukaan",u"mukaansa",u"mukana",u"mutta",u"muu",u"muualla",u"muualle",u"muualta",u"muuanne",u"muulloin",u"muun",u"muut",u"muuta",u"muutama",u"muutaman",u"muuten",u"myöhemmin",u"myös",u"myöskin",u"myöskään",u"myötä",u"ne",u"neljä",u"neljän",u"neljää",u"niiden",u"niin",u"niistä",u"niitä",u"noin",u"nopeammin",u"nopeasti",u"nopeiten",u"nro",u"nuo",u"nyt",u"näiden",u"näin",u"näissä",u"näissähin",u"näissälle",u"näissältä",u"näissästä",u"näitä",u"nämä",u"ohi",u"oikea",u"oikealla",u"oikein",u"ole",u"olemme",u"olen",u"olet",u"olette",u"oleva",u"olevan",u"olevat",u"oli",u"olimme",u"olin",u"olisi",u"olisimme",u"olisin",u"olisit",u"olisitte",u"olisivat",u"olit",u"olitte",u"olivat",u"olla",u"olleet",u"olli",u"ollut",u"oma",u"omaa",u"omaan",u"omaksi",u"omalle",u"omalta",u"oman",u"omassa",u"omat",u"omia",u"omien",u"omiin",u"omiksi",u"omille",u"omilta",u"omissa",u"omista",u"on",u"onkin",u"onko",u"ovat",u"paikoittain",u"paitsi",u"pakosti",u"paljon",u"paremmin",u"parempi",u"parhaillaan",u"parhaiten",u"perusteella",u"peräti",u"pian",u"pieneen",u"pieneksi",u"pienelle",u"pienellä",u"pieneltä",u"pienempi",u"pienestä",u"pieni",u"pienin",u"puolesta",u"puolestaan",u"päälle",u"runsaasti",u"saakka",u"sadam",u"sama",u"samaa",u"samaan",u"samalla",u"samallalta",u"samallassa",u"samallasta",u"saman",u"samat",u"samoin",u"sata",u"sataa",u"satojen",u"se",u"seitsemän",u"sekä",u"sen",u"seuraavat",u"siellä",u"sieltä",u"siihen",u"siinä",u"siis",u"siitä",u"sijaan",u"siksi",u"silloin",u"sillä",u"silti",u"sinne",u"sinua",u"sinulle",u"sinulta",u"sinun",u"sinussa",u"sinusta",u"sinut",u"sinä",u"sisäkkäin",u"sisällä",u"siten",u"sitten",u"sitä",u"ssa",u"sta",u"suoraan",u"suuntaan",u"suuren",u"suuret",u"suuri",u"suuria",u"suurin",u"suurten",u"taa",u"taas",u"taemmas",u"tahansa",u"tai",u"takaa",u"takaisin",u"takana",u"takia",u"tapauksessa",u"tarpeeksi",u"tavalla",u"tavoitteena",u"te",u"tietysti",u"todella",u"toinen",u"toisaalla",u"toisaalle",u"toisaalta",u"toiseen",u"toiseksi",u"toisella",u"toiselle",u"toiselta",u"toisemme",u"toisen",u"toisensa",u"toisessa",u"toisesta",u"toista",u"toistaiseksi",u"toki",u"tosin",u"tuhannen",u"tuhat",u"tule",u"tulee",u"tulemme",u"tulen",u"tulet",u"tulette",u"tulevat",u"tulimme",u"tulin",u"tulisi",u"tulisimme",u"tulisin",u"tulisit",u"tulisitte",u"tulisivat",u"tulit",u"tulitte",u"tulivat",u"tulla",u"tulleet",u"tullut",u"tuntuu",u"tuo",u"tuolla",u"tuolloin",u"tuolta",u"tuonne",u"tuskin",u"tykö",u"tähän",u"tällä",u"tällöin",u"tämä",u"tämän",u"tänne",u"tänä",u"tänään",u"tässä",u"tästä",u"täten",u"tätä",u"täysin",u"täytyvät",u"täytyy",u"täällä",u"täältä",u"ulkopuolella",u"usea",u"useasti",u"useimmiten",u"usein",u"useita",u"uudeksi",u"uudelleen",u"uuden",u"uudet",u"uusi",u"uusia",u"uusien",u"uusinta",u"uuteen",u"uutta",u"vaan",u"vahemmän",u"vai",u"vaiheessa",u"vaikea",u"vaikean",u"vaikeat",u"vaikeilla",u"vaikeille",u"vaikeilta",u"vaikeissa",u"vaikeista",u"vaikka",u"vain",u"varmasti",u"varsin",u"varsinkin",u"varten",u"vasen",u"vasenmalla",u"vasta",u"vastaan",u"vastakkain",u"vastan",u"verran",u"vielä",u"vierekkäin",u"vieressä",u"vieri",u"viiden",u"viime",u"viimeinen",u"viimeisen",u"viimeksi",u"viisi",u"voi",u"voidaan",u"voimme",u"voin",u"voisi",u"voit",u"voitte",u"voivat",u"vuoden",u"vuoksi",u"vuosi",u"vuosien",u"vuosina",u"vuotta",u"vähemmän",u"vähintään",u"vähiten",u"vähän",u"välillä",u"yhdeksän",u"yhden",u"yhdessä",u"yhteen",u"yhteensä",u"yhteydessä",u"yhteyteen",u"yhtä",u"yhtäälle",u"yhtäällä",u"yhtäältä",u"yhtään",u"yhä",u"yksi",u"yksin",u"yksittäin",u"yleensä",u"ylemmäs",u"yli",u"ylös",u"ympäri",u"älköön",u"älä"],u"fr":[u"a",u"abord",u"absolument",u"afin",u"ah",u"ai",u"aie",u"ailleurs",u"ainsi",u"ait",u"allaient",u"allo",u"allons",u"allô",u"alors",u"anterieur",u"anterieure",u"anterieures",u"apres",u"après",u"as",u"assez",u"attendu",u"au",u"aucun",u"aucune",u"aujourd",u"aujourd'hui",u"aupres",u"auquel",u"aura",u"auraient",u"aurait",u"auront",u"aussi",u"autre",u"autrefois",u"autrement",u"autres",u"autrui",u"aux",u"auxquelles",u"auxquels",u"avaient",u"avais",u"avait",u"avant",u"avec",u"avoir",u"avons",u"ayant",u"b",u"bah",u"bas",u"basee",u"bat",u"beau",u"beaucoup",u"bien",u"bigre",u"boum",u"bravo",u"brrr",u"c",u"car",u"ce",u"ceci",u"cela",u"celle",u"celle-ci",u"celle-là",u"celles",u"celles-ci",u"celles-là",u"celui",u"celui-ci",u"celui-là",u"cent",u"cependant",u"certain",u"certaine",u"certaines",u"certains",u"certes",u"ces",u"cet",u"cette",u"ceux",u"ceux-ci",u"ceux-là",u"chacun",u"chacune",u"chaque",u"cher",u"chers",u"chez",u"chiche",u"chut",u"chère",u"chères",u"ci",u"cinq",u"cinquantaine",u"cinquante",u"cinquantième",u"cinquième",u"clac",u"clic",u"combien",u"comme",u"comment",u"comparable",u"comparables",u"compris",u"concernant",u"contre",u"couic",u"crac",u"d",u"da",u"dans",u"de",u"debout",u"dedans",u"dehors",u"deja",u"delà",u"depuis",u"dernier",u"derniere",u"derriere",u"derrière",u"des",u"desormais",u"desquelles",u"desquels",u"dessous",u"dessus",u"deux",u"deuxième",u"deuxièmement",u"devant",u"devers",u"devra",u"different",u"differentes",u"differents",u"différent",u"différente",u"différentes",u"différents",u"dire",u"directe",u"directement",u"dit",u"dite",u"dits",u"divers",u"diverse",u"diverses",u"dix",u"dix-huit",u"dix-neuf",u"dix-sept",u"dixième",u"doit",u"doivent",u"donc",u"dont",u"douze",u"douzième",u"dring",u"du",u"duquel",u"durant",u"dès",u"désormais",u"e",u"effet",u"egale",u"egalement",u"egales",u"eh",u"elle",u"elle-même",u"elles",u"elles-mêmes",u"en",u"encore",u"enfin",u"entre",u"envers",u"environ",u"es",u"est",u"et",u"etant",u"etc",u"etre",u"eu",u"euh",u"eux",u"eux-mêmes",u"exactement",u"excepté",u"extenso",u"exterieur",u"f",u"fais",u"faisaient",u"faisant",u"fait",u"façon",u"feront",u"fi",u"flac",u"floc",u"font",u"g",u"gens",u"h",u"ha",u"hein",u"hem",u"hep",u"hi",u"ho",u"holà",u"hop",u"hormis",u"hors",u"hou",u"houp",u"hue",u"hui",u"huit",u"huitième",u"hum",u"hurrah",u"hé",u"hélas",u"i",u"il",u"ils",u"importe",u"j",u"je",u"jusqu",u"jusque",u"juste",u"k",u"l",u"la",u"laisser",u"laquelle",u"las",u"le",u"lequel",u"les",u"lesquelles",u"lesquels",u"leur",u"leurs",u"longtemps",u"lors",u"lorsque",u"lui",u"lui-meme",u"lui-même",u"là",u"lès",u"m",u"ma",u"maint",u"maintenant",u"mais",u"malgre",u"malgré",u"maximale",u"me",u"meme",u"memes",u"merci",u"mes",u"mien",u"mienne",u"miennes",u"miens",u"mille",u"mince",u"minimale",u"moi",u"moi-meme",u"moi-même",u"moindres",u"moins",u"mon",u"moyennant",u"multiple",u"multiples",u"même",u"mêmes",u"n",u"na",u"naturel",u"naturelle",u"naturelles",u"ne",u"neanmoins",u"necessaire",u"necessairement",u"neuf",u"neuvième",u"ni",u"nombreuses",u"nombreux",u"non",u"nos",u"notamment",u"notre",u"nous",u"nous-mêmes",u"nouveau",u"nul",u"néanmoins",u"nôtre",u"nôtres",u"o",u"oh",u"ohé",u"ollé",u"olé",u"on",u"ont",u"onze",u"onzième",u"ore",u"ou",u"ouf",u"ouias",u"oust",u"ouste",u"outre",u"ouvert",u"ouverte",u"ouverts",u"o|",u"où",u"p",u"paf",u"pan",u"par",u"parce",u"parfois",u"parle",u"parlent",u"parler",u"parmi",u"parseme",u"partant",u"particulier",u"particulière",u"particulièrement",u"pas",u"passé",u"pendant",u"pense",u"permet",u"personne",u"peu",u"peut",u"peuvent",u"peux",u"pff",u"pfft",u"pfut",u"pif",u"pire",u"plein",u"plouf",u"plus",u"plusieurs",u"plutôt",u"possessif",u"possessifs",u"possible",u"possibles",u"pouah",u"pour",u"pourquoi",u"pourrais",u"pourrait",u"pouvait",u"prealable",u"precisement",u"premier",u"première",u"premièrement",u"pres",u"probable",u"probante",u"procedant",u"proche",u"près",u"psitt",u"pu",u"puis",u"puisque",u"pur",u"pure",u"q",u"qu",u"quand",u"quant",u"quant-à-soi",u"quanta",u"quarante",u"quatorze",u"quatre",u"quatre-vingt",u"quatrième",u"quatrièmement",u"que",u"quel",u"quelconque",u"quelle",u"quelles",u"quelqu'un",u"quelque",u"quelques",u"quels",u"qui",u"quiconque",u"quinze",u"quoi",u"quoique",u"r",u"rare",u"rarement",u"rares",u"relative",u"relativement",u"remarquable",u"rend",u"rendre",u"restant",u"reste",u"restent",u"restrictif",u"retour",u"revoici",u"revoilà",u"rien",u"s",u"sa",u"sacrebleu",u"sait",u"sans",u"sapristi",u"sauf",u"se",u"sein",u"seize",u"selon",u"semblable",u"semblaient",u"semble",u"semblent",u"sent",u"sept",u"septième",u"sera",u"seraient",u"serait",u"seront",u"ses",u"seul",u"seule",u"seulement",u"si",u"sien",u"sienne",u"siennes",u"siens",u"sinon",u"six",u"sixième",u"soi",u"soi-même",u"soit",u"soixante",u"son",u"sont",u"sous",u"souvent",u"specifique",u"specifiques",u"speculatif",u"stop",u"strictement",u"subtiles",u"suffisant",u"suffisante",u"suffit",u"suis",u"suit",u"suivant",u"suivante",u"suivantes",u"suivants",u"suivre",u"superpose",u"sur",u"surtout",u"t",u"ta",u"tac",u"tant",u"tardive",u"te",u"tel",u"telle",u"tellement",u"telles",u"tels",u"tenant",u"tend",u"tenir",u"tente",u"tes",u"tic",u"tien",u"tienne",u"tiennes",u"tiens",u"toc",u"toi",u"toi-même",u"ton",u"touchant",u"toujours",u"tous",u"tout",u"toute",u"toutefois",u"toutes",u"treize",u"trente",u"tres",u"trois",u"troisième",u"troisièmement",u"trop",u"très",u"tsoin",u"tsouin",u"tu",u"té",u"u",u"un",u"une",u"unes",u"uniformement",u"unique",u"uniques",u"uns",u"v",u"va",u"vais",u"vas",u"vers",u"via",u"vif",u"vifs",u"vingt",u"vivat",u"vive",u"vives",u"vlan",u"voici",u"voilà",u"vont",u"vos",u"votre",u"vous",u"vous-mêmes",u"vu",u"vé",u"vôtre",u"vôtres",u"w",u"x",u"y",u"z",u"zut",u"à",u"â",u"ça",u"ès",u"étaient",u"étais",u"était",u"étant",u"été",u"être",u"ô"],u"hr":[u"a",u"ako",u"ali",u"bi",u"bih",u"bila",u"bili",u"bilo",u"bio",u"bismo",u"biste",u"biti",u"bumo",u"da",u"do",u"duž",u"ga",u"hoće",u"hoćemo",u"hoćete",u"hoćeš",u"hoću",u"i",u"iako",u"ih",u"ili",u"iz",u"ja",u"je",u"jedna",u"jedne",u"jedno",u"jer",u"jesam",u"jesi",u"jesmo",u"jest",u"jeste",u"jesu",u"jim",u"joj",u"još",u"ju",u"kada",u"kako",u"kao",u"koja",u"koje",u"koji",u"kojima",u"koju",u"kroz",u"li",u"me",u"mene",u"meni",u"mi",u"mimo",u"moj",u"moja",u"moje",u"mu",u"na",u"nad",u"nakon",u"nam",u"nama",u"nas",u"naš",u"naša",u"naše",u"našeg",u"ne",u"nego",u"neka",u"neki",u"nekog",u"neku",u"nema",u"netko",u"neće",u"nećemo",u"nećete",u"nećeš",u"neću",u"nešto",u"ni",u"nije",u"nikoga",u"nikoje",u"nikoju",u"nisam",u"nisi",u"nismo",u"niste",u"nisu",u"njega",u"njegov",u"njegova",u"njegovo",u"njemu",u"njezin",u"njezina",u"njezino",u"njih",u"njihov",u"njihova",u"njihovo",u"njim",u"njima",u"njoj",u"nju",u"no",u"o",u"od",u"odmah",u"on",u"ona",u"oni",u"ono",u"ova",u"pa",u"pak",u"po",u"pod",u"pored",u"prije",u"s",u"sa",u"sam",u"samo",u"se",u"sebe",u"sebi",u"si",u"smo",u"ste",u"su",u"sve",u"svi",u"svog",u"svoj",u"svoja",u"svoje",u"svom",u"ta",u"tada",u"taj",u"tako",u"te",u"tebe",u"tebi",u"ti",u"to",u"toj",u"tome",u"tu",u"tvoj",u"tvoja",u"tvoje",u"u",u"uz",u"vam",u"vama",u"vas",u"vaš",u"vaša",u"vaše",u"već",u"vi",u"vrlo",u"za",u"zar",u"će",u"ćemo",u"ćete",u"ćeš",u"ću",u"što"],u"hu":[u"a",u"abba",u"abban",u"abból",u"addig",u"ahhoz",u"ahogy",u"ahol",u"aki",u"akik",u"akkor",u"akár",u"alapján",u"alatt",u"alatta",u"alattad",u"alattam",u"alattatok",u"alattuk",u"alattunk",u"alá",u"alád",u"alájuk",u"alám",u"alánk",u"alátok",u"alól",u"alóla",u"alólad",u"alólam",u"alólatok",u"alóluk",u"alólunk",u"amely",u"amelybol",u"amelyek",u"amelyekben",u"amelyeket",u"amelyet",u"amelyik",u"amelynek",u"ami",u"amikor",u"amit",u"amolyan",u"amott",u"amíg",u"annak",u"annál",u"arra",u"arról",u"attól",u"az",u"aznap",u"azok",u"azokat",u"azokba",u"azokban",u"azokból",u"azokhoz",u"azokig",u"azokkal",u"azokká",u"azoknak",u"azoknál",u"azokon",u"azokra",u"azokról",u"azoktól",u"azokért",u"azon",u"azonban",u"azonnal",u"azt",u"aztán",u"azután",u"azzal",u"azzá",u"azért",u"bal",u"balra",u"ban",u"be",u"belé",u"beléd",u"beléjük",u"belém",u"belénk",u"belétek",u"belül",u"belőle",u"belőled",u"belőlem",u"belőletek",u"belőlük",u"belőlünk",u"ben",u"benne",u"benned",u"bennem",u"bennetek",u"bennük",u"bennünk",u"bár",u"bárcsak",u"bármilyen",u"búcsú",u"cikk",u"cikkek",u"cikkeket",u"csak",u"csakhogy",u"csupán",u"de",u"dehogy",u"e",u"ebbe",u"ebben",u"ebből",u"eddig",u"egy",u"egyebek",u"egyebet",u"egyedül",u"egyelőre",u"egyes",u"egyet",u"egyetlen",u"egyik",u"egymás",u"egyre",u"egyszerre",u"egyéb",u"együtt",u"egész",u"egészen",u"ehhez",u"ekkor",u"el",u"eleinte",u"ellen",u"ellenes",u"elleni",u"ellenére",u"elmondta",u"első",u"elsők",u"elsősorban",u"elsőt",u"elé",u"eléd",u"elég",u"eléjük",u"elém",u"elénk",u"elétek",u"elő",u"előbb",u"elől",u"előle",u"előled",u"előlem",u"előletek",u"előlük",u"előlünk",u"először",u"előtt",u"előtte",u"előtted",u"előttem",u"előttetek",u"előttük",u"előttünk",u"előző",u"emilyen",u"engem",u"ennek",u"ennyi",u"ennél",u"enyém",u"erre",u"erről",u"esetben",u"ettől",u"ez",u"ezek",u"ezekbe",u"ezekben",u"ezekből",u"ezeken",u"ezeket",u"ezekhez",u"ezekig",u"ezekkel",u"ezekké",u"ezeknek",u"ezeknél",u"ezekre",u"ezekről",u"ezektől",u"ezekért",u"ezen",u"ezentúl",u"ezer",u"ezret",u"ezt",u"ezután",u"ezzel",u"ezzé",u"ezért",u"fel",u"fele",u"felek",u"felet",u"felett",u"felé",u"fent",u"fenti",u"fél",u"fölé",u"gyakran",u"ha",u"halló",u"hamar",u"hanem",u"harmadik",u"harmadikat",u"harminc",u"hat",u"hatodik",u"hatodikat",u"hatot",u"hatvan",u"helyett",u"hetedik",u"hetediket",u"hetet",u"hetven",u"hirtelen",u"hiszen",u"hiába",u"hogy",u"hogyan",u"hol",u"holnap",u"holnapot",u"honnan",u"hova",u"hozzá",u"hozzád",u"hozzájuk",u"hozzám",u"hozzánk",u"hozzátok",u"hurrá",u"huszadik",u"hány",u"hányszor",u"hármat",u"három",u"hát",u"hátha",u"hátulsó",u"hét",u"húsz",u"ide",u"ide-оda",u"idén",u"igazán",u"igen",u"ill",u"illetve",u"ilyen",u"ilyenkor",u"immár",u"inkább",u"is",u"ismét",u"ison",u"itt",u"jelenleg",u"jobban",u"jobbra",u"jó",u"jól",u"jólesik",u"jóval",u"jövőre",u"kell",u"kellene",u"kellett",u"kelljen",u"keressünk",u"keresztül",u"ketten",u"kettő",u"kettőt",u"kevés",u"ki",u"kiben",u"kiből",u"kicsit",u"kicsoda",u"kihez",u"kik",u"kikbe",u"kikben",u"kikből",u"kiken",u"kiket",u"kikhez",u"kikkel",u"kikké",u"kiknek",u"kiknél",u"kikre",u"kikről",u"kiktől",u"kikért",u"kilenc",u"kilencedik",u"kilencediket",u"kilencet",u"kilencven",u"kin",u"kinek",u"kinél",u"kire",u"kiről",u"kit",u"kitől",u"kivel",u"kivé",u"kié",u"kiért",u"korábban",u"képest",u"kérem",u"kérlek",u"kész",u"késő",u"később",u"későn",u"két",u"kétszer",u"kívül",u"körül",u"köszönhetően",u"köszönöm",u"közben",u"közel",u"közepesen",u"közepén",u"közé",u"között",u"közül",u"külön",u"különben",u"különböző",u"különbözőbb",u"különbözőek",u"lassan",u"le",u"legalább",u"legyen",u"lehet",u"lehetetlen",u"lehetett",u"lehetőleg",u"lehetőség",u"lenne",u"lenni",u"lennék",u"lennének",u"lesz",u"leszek",u"lesznek",u"leszünk",u"lett",u"lettek",u"lettem",u"lettünk",u"lévő",u"ma",u"maga",u"magad",u"magam",u"magatokat",u"magukat",u"magunkat",u"magát",u"mai",u"majd",u"majdnem",u"manapság",u"meg",u"megcsinál",u"megcsinálnak",u"megint",u"megvan",u"mellett",u"mellette",u"melletted",u"mellettem",u"mellettetek",u"mellettük",u"mellettünk",u"mellé",u"melléd",u"melléjük",u"mellém",u"mellénk",u"mellétek",u"mellől",u"mellőle",u"mellőled",u"mellőlem",u"mellőletek",u"mellőlük",u"mellőlünk",u"mely",u"melyek",u"melyik",u"mennyi",u"mert",u"mi",u"miatt",u"miatta",u"miattad",u"miattam",u"miattatok",u"miattuk",u"miattunk",u"mibe",u"miben",u"miből",u"mihez",u"mik",u"mikbe",u"mikben",u"mikből",u"miken",u"miket",u"mikhez",u"mikkel",u"mikké",u"miknek",u"miknél",u"mikor",u"mikre",u"mikről",u"miktől",u"mikért",u"milyen",u"min",u"mind",u"mindegyik",u"mindegyiket",u"minden",u"mindenesetre",u"mindenki",u"mindent",u"mindenütt",u"mindig",u"mindketten",u"minek",u"minket",u"mint",u"mintha",u"minél",u"mire",u"miről",u"mit",u"mitől",u"mivel",u"mivé",u"miért",u"mondta",u"most",u"mostanáig",u"már",u"más",u"másik",u"másikat",u"másnap",u"második",u"másodszor",u"mások",u"másokat",u"mást",u"még",u"mégis",u"míg",u"mögé",u"mögéd",u"mögéjük",u"mögém",u"mögénk",u"mögétek",u"mögött",u"mögötte",u"mögötted",u"mögöttem",u"mögöttetek",u"mögöttük",u"mögöttünk",u"mögül",u"mögüle",u"mögüled",u"mögülem",u"mögületek",u"mögülük",u"mögülünk",u"múltkor",u"múlva",u"na",u"nagy",u"nagyobb",u"nagyon",u"naponta",u"napot",u"ne",u"negyedik",u"negyediket",u"negyven",u"neked",u"nekem",u"neki",u"nekik",u"nektek",u"nekünk",u"nem",u"nemcsak",u"nemrég",u"nincs",u"nyolc",u"nyolcadik",u"nyolcadikat",u"nyolcat",u"nyolcvan",u"nála",u"nálad",u"nálam",u"nálatok",u"náluk",u"nálunk",u"négy",u"négyet",u"néha",u"néhány",u"nélkül",u"o",u"oda",u"ok",u"olyan",u"onnan",u"ott",u"pedig",u"persze",u"pár",u"például",u"rajta",u"rajtad",u"rajtam",u"rajtatok",u"rajtuk",u"rajtunk",u"rendben",u"rosszul",u"rá",u"rád",u"rájuk",u"rám",u"ránk",u"rátok",u"régen",u"régóta",u"részére",u"róla",u"rólad",u"rólam",u"rólatok",u"róluk",u"rólunk",u"rögtön",u"s",u"saját",u"se",u"sem",u"semmi",u"semmilyen",u"semmiség",u"senki",u"soha",u"sok",u"sokan",u"sokat",u"sokkal",u"sokszor",u"sokáig",u"során",u"stb.",u"szemben",u"szerbusz",u"szerint",u"szerinte",u"szerinted",u"szerintem",u"szerintetek",u"szerintük",u"szerintünk",u"szervusz",u"szinte",u"számára",u"száz",u"századik",u"százat",u"szépen",u"szét",u"szíves",u"szívesen",u"szíveskedjék",u"sőt",u"talán",u"tavaly",u"te",u"tegnap",u"tegnapelőtt",u"tehát",u"tele",u"teljes",u"tessék",u"ti",u"tied",u"titeket",u"tizedik",u"tizediket",u"tizenegy",u"tizenegyedik",u"tizenhat",u"tizenhárom",u"tizenhét",u"tizenkettedik",u"tizenkettő",u"tizenkilenc",u"tizenkét",u"tizennyolc",u"tizennégy",u"tizenöt",u"tizet",u"tovább",u"további",u"továbbá",u"távol",u"téged",u"tényleg",u"tíz",u"több",u"többi",u"többször",u"túl",u"tőle",u"tőled",u"tőlem",u"tőletek",u"tőlük",u"tőlünk",u"ugyanakkor",u"ugyanez",u"ugyanis",u"ugye",u"urak",u"uram",u"urat",u"utoljára",u"utolsó",u"után",u"utána",u"vagy",u"vagyis",u"vagyok",u"vagytok",u"vagyunk",u"vajon",u"valahol",u"valaki",u"valakit",u"valamelyik",u"valami",u"valamint",u"való",u"van",u"vannak",u"vele",u"veled",u"velem",u"veletek",u"velük",u"velünk",u"vissza",u"viszlát",u"viszont",u"viszontlátásra",u"volna",u"volnának",u"volnék",u"volt",u"voltak",u"voltam",u"voltunk",u"végre",u"végén",u"végül",u"által",u"általában",u"ám",u"át",u"éljen",u"én",u"éppen",u"érte",u"érted",u"értem",u"értetek",u"értük",u"értünk",u"és",u"év",u"évben",u"éve",u"évek",u"éves",u"évi",u"évvel",u"így",u"óta",u"ön",u"önbe",u"önben",u"önből",u"önhöz",u"önnek",u"önnel",u"önnél",u"önre",u"önről",u"önt",u"öntől",u"önért",u"önök",u"önökbe",u"önökben",u"önökből",u"önöket",u"önökhöz",u"önökkel",u"önöknek",u"önöknél",u"önökre",u"önökről",u"önöktől",u"önökért",u"önökön",u"önön",u"össze",u"öt",u"ötven",u"ötödik",u"ötödiket",u"ötöt",u"úgy",u"úgyis",u"úgynevezett",u"új",u"újabb",u"újra",u"úr",u"ő",u"ők",u"őket",u"őt"],u"it":[u"IE",u"a",u"abbastanza",u"abbia",u"abbiamo",u"abbiano",u"abbiate",u"accidenti",u"ad",u"adesso",u"affinche",u"agl",u"agli",u"ahime",u"ahimè",u"ai",u"al",u"alcuna",u"alcuni",u"alcuno",u"all",u"alla",u"alle",u"allo",u"allora",u"altri",u"altrimenti",u"altro",u"altrove",u"altrui",u"anche",u"ancora",u"anni",u"anno",u"ansa",u"anticipo",u"assai",u"attesa",u"attraverso",u"avanti",u"avemmo",u"avendo",u"avente",u"aver",u"avere",u"averlo",u"avesse",u"avessero",u"avessi",u"avessimo",u"aveste",u"avesti",u"avete",u"aveva",u"avevamo",u"avevano",u"avevate",u"avevi",u"avevo",u"avrai",u"avranno",u"avrebbe",u"avrebbero",u"avrei",u"avremmo",u"avremo",u"avreste",u"avresti",u"avrete",u"avrà",u"avrò",u"avuta",u"avute",u"avuti",u"avuto",u"basta",u"bene",u"benissimo",u"berlusconi",u"brava",u"bravo",u"c",u"casa",u"caso",u"cento",u"certa",u"certe",u"certi",u"certo",u"che",u"chi",u"chicchessia",u"chiunque",u"ci",u"ciascuna",u"ciascuno",u"cima",u"cio",u"cioe",u"cioè",u"circa",u"citta",u"città",u"ciò",u"co",u"codesta",u"codesti",u"codesto",u"cogli",u"coi",u"col",u"colei",u"coll",u"coloro",u"colui",u"come",u"cominci",u"comunque",u"con",u"concernente",u"conciliarsi",u"conclusione",u"consiglio",u"contro",u"cortesia",u"cos",u"cosa",u"cosi",u"così",u"cui",u"d",u"da",u"dagl",u"dagli",u"dai",u"dal",u"dall",u"dalla",u"dalle",u"dallo",u"dappertutto",u"davanti",u"degl",u"degli",u"dei",u"del",u"dell",u"della",u"delle",u"dello",u"dentro",u"detto",u"deve",u"di",u"dice",u"dietro",u"dire",u"dirimpetto",u"diventa",u"diventare",u"diventato",u"dopo",u"dov",u"dove",u"dovra",u"dovrà",u"dovunque",u"due",u"dunque",u"durante",u"e",u"ebbe",u"ebbero",u"ebbi",u"ecc",u"ecco",u"ed",u"effettivamente",u"egli",u"ella",u"entrambi",u"eppure",u"era",u"erano",u"eravamo",u"eravate",u"eri",u"ero",u"esempio",u"esse",u"essendo",u"esser",u"essere",u"essi",u"ex",u"fa",u"faccia",u"facciamo",u"facciano",u"facciate",u"faccio",u"facemmo",u"facendo",u"facesse",u"facessero",u"facessi",u"facessimo",u"faceste",u"facesti",u"faceva",u"facevamo",u"facevano",u"facevate",u"facevi",u"facevo",u"fai",u"fanno",u"farai",u"faranno",u"fare",u"farebbe",u"farebbero",u"farei",u"faremmo",u"faremo",u"fareste",u"faresti",u"farete",u"farà",u"farò",u"fatto",u"favore",u"fece",u"fecero",u"feci",u"fin",u"finalmente",u"finche",u"fine",u"fino",u"forse",u"forza",u"fosse",u"fossero",u"fossi",u"fossimo",u"foste",u"fosti",u"fra",u"frattempo",u"fu",u"fui",u"fummo",u"fuori",u"furono",u"futuro",u"generale",u"gia",u"giacche",u"giorni",u"giorno",u"già",u"gli",u"gliela",u"gliele",u"glieli",u"glielo",u"gliene",u"governo",u"grande",u"grazie",u"gruppo",u"ha",u"haha",u"hai",u"hanno",u"ho",u"i",u"ieri",u"il",u"improvviso",u"in",u"inc",u"infatti",u"inoltre",u"insieme",u"intanto",u"intorno",u"invece",u"io",u"l",u"la",u"lasciato",u"lato",u"lavoro",u"le",u"lei",u"li",u"lo",u"lontano",u"loro",u"lui",u"lungo",u"luogo",u"là",u"ma",u"macche",u"magari",u"maggior",u"mai",u"male",u"malgrado",u"malissimo",u"mancanza",u"marche",u"me",u"medesimo",u"mediante",u"meglio",u"meno",u"mentre",u"mesi",u"mezzo",u"mi",u"mia",u"mie",u"miei",u"mila",u"miliardi",u"milioni",u"minimi",u"ministro",u"mio",u"modo",u"molti",u"moltissimo",u"molto",u"momento",u"mondo",u"mosto",u"nazionale",u"ne",u"negl",u"negli",u"nei",u"nel",u"nell",u"nella",u"nelle",u"nello",u"nemmeno",u"neppure",u"nessun",u"nessuna",u"nessuno",u"niente",u"no",u"noi",u"non",u"nondimeno",u"nonostante",u"nonsia",u"nostra",u"nostre",u"nostri",u"nostro",u"novanta",u"nove",u"nulla",u"nuovo",u"o",u"od",u"oggi",u"ogni",u"ognuna",u"ognuno",u"oltre",u"oppure",u"ora",u"ore",u"osi",u"ossia",u"ottanta",u"otto",u"paese",u"parecchi",u"parecchie",u"parecchio",u"parte",u"partendo",u"peccato",u"peggio",u"per",u"perche",u"perchè",u"perché",u"percio",u"perciò",u"perfino",u"pero",u"persino",u"persone",u"però",u"piedi",u"pieno",u"piglia",u"piu",u"piuttosto",u"più",u"po",u"pochissimo",u"poco",u"poi",u"poiche",u"possa",u"possedere",u"posteriore",u"posto",u"potrebbe",u"preferibilmente",u"presa",u"press",u"prima",u"primo",u"principalmente",u"probabilmente",u"proprio",u"puo",u"pure",u"purtroppo",u"può",u"qualche",u"qualcosa",u"qualcuna",u"qualcuno",u"quale",u"quali",u"qualunque",u"quando",u"quanta",u"quante",u"quanti",u"quanto",u"quantunque",u"quasi",u"quattro",u"quel",u"quella",u"quelle",u"quelli",u"quello",u"quest",u"questa",u"queste",u"questi",u"questo",u"qui",u"quindi",u"realmente",u"recente",u"recentemente",u"registrazione",u"relativo",u"riecco",u"salvo",u"sara",u"sarai",u"saranno",u"sarebbe",u"sarebbero",u"sarei",u"saremmo",u"saremo",u"sareste",u"saresti",u"sarete",u"sarà",u"sarò",u"scola",u"scopo",u"scorso",u"se",u"secondo",u"seguente",u"seguito",u"sei",u"sembra",u"sembrare",u"sembrato",u"sembri",u"sempre",u"senza",u"sette",u"si",u"sia",u"siamo",u"siano",u"siate",u"siete",u"sig",u"solito",u"solo",u"soltanto",u"sono",u"sopra",u"sotto",u"spesso",u"srl",u"sta",u"stai",u"stando",u"stanno",u"starai",u"staranno",u"starebbe",u"starebbero",u"starei",u"staremmo",u"staremo",u"stareste",u"staresti",u"starete",u"starà",u"starò",u"stata",u"state",u"stati",u"stato",u"stava",u"stavamo",u"stavano",u"stavate",u"stavi",u"stavo",u"stemmo",u"stessa",u"stesse",u"stessero",u"stessi",u"stessimo",u"stesso",u"steste",u"stesti",u"stette",u"stettero",u"stetti",u"stia",u"stiamo",u"stiano",u"stiate",u"sto",u"su",u"sua",u"subito",u"successivamente",u"successivo",u"sue",u"sugl",u"sugli",u"sui",u"sul",u"sull",u"sulla",u"sulle",u"sullo",u"suo",u"suoi",u"tale",u"tali",u"talvolta",u"tanto",u"te",u"tempo",u"ti",u"titolo",u"torino",u"tra",u"tranne",u"tre",u"trenta",u"troppo",u"trovato",u"tu",u"tua",u"tue",u"tuo",u"tuoi",u"tutta",u"tuttavia",u"tutte",u"tutti",u"tutto",u"uguali",u"ulteriore",u"ultimo",u"un",u"una",u"uno",u"uomo",u"va",u"vale",u"vari",u"varia",u"varie",u"vario",u"verso",u"vi",u"via",u"vicino",u"visto",u"vita",u"voi",u"volta",u"volte",u"vostra",u"vostre",u"vostri",u"vostro",u"è"],u"ko":[u"!",u"\"",u"$",u"%",u"&",u"'",u"(",u")",u"*",u"+",u",u",u"-",u".",u"...",u"0",u"1",u"2",u"3",u"4",u"5",u"6",u"7",u"8",u"9",u";",u"<",u"=",u">",u"?",u"@",u"\\",u"^",u"_",u"`",u"|",u"~",u"·",u"—",u"——",u"‘",u"’",u"“",u"”",u"…",u"、",u"。",u"〈",u"〉",u"《",u"》",u"가",u"가까스로",u"가령",u"각",u"각각",u"각자",u"각종",u"갖고말하자면",u"같다",u"같이",u"개의치않고",u"거니와",u"거바",u"거의",u"것",u"것과 같이",u"것들",u"게다가",u"게우다",u"겨우",u"견지에서",u"결과에 이르다",u"결국",u"결론을 낼 수 있다",u"겸사겸사",u"고려하면",u"고로",u"곧",u"공동으로",u"과",u"과연",u"관계가 있다",u"관계없이",u"관련이 있다",u"관하여",u"관한",u"관해서는",u"구",u"구체적으로",u"구토하다",u"그",u"그들",u"그때",u"그래",u"그래도",u"그래서",u"그러나",u"그러니",u"그러니까",u"그러면",u"그러므로",u"그러한즉",u"그런 까닭에",u"그런데",u"그런즉",u"그럼",u"그럼에도 불구하고",u"그렇게 함으로써",u"그렇지",u"그렇지 않다면",u"그렇지 않으면",u"그렇지만",u"그렇지않으면",u"그리고",u"그리하여",u"그만이다",u"그에 따르는",u"그위에",u"그저",u"그중에서",u"그치지 않다",u"근거로",u"근거하여",u"기대여",u"기점으로",u"기준으로",u"기타",u"까닭으로",u"까악",u"까지",u"까지 미치다",u"까지도",u"꽈당",u"끙끙",u"끼익",u"나",u"나머지는",u"남들",u"남짓",u"너",u"너희",u"너희들",u"네",u"넷",u"년",u"논하지 않다",u"놀라다",u"누가 알겠는가",u"누구",u"다른",u"다른 방면으로",u"다만",u"다섯",u"다소",u"다수",u"다시 말하자면",u"다시말하면",u"다음",u"다음에",u"다음으로",u"단지",u"답다",u"당신",u"당장",u"대로 하다",u"대하면",u"대하여",u"대해 말하자면",u"대해서",u"댕그",u"더구나",u"더군다나",u"더라도",u"더불어",u"더욱더",u"더욱이는",u"도달하다",u"도착하다",u"동시에",u"동안",u"된바에야",u"된이상",u"두번째로",u"둘",u"둥둥",u"뒤따라",u"뒤이어",u"든간에",u"들",u"등",u"등등",u"딩동",u"따라",u"따라서",u"따위",u"따지지 않다",u"딱",u"때",u"때가 되어",u"때문에",u"또",u"또한",u"뚝뚝",u"라 해도",u"령",u"로",u"로 인하여",u"로부터",u"로써",u"륙",u"를",u"마음대로",u"마저",u"마저도",u"마치",u"막론하고",u"만 못하다",u"만약",u"만약에",u"만은 아니다",u"만이 아니다",u"만일",u"만큼",u"말하자면",u"말할것도 없고",u"매",u"매번",u"메쓰겁다",u"몇",u"모",u"모두",u"무렵",u"무릎쓰고",u"무슨",u"무엇",u"무엇때문에",u"물론",u"및",u"바꾸어말하면",u"바꾸어말하자면",u"바꾸어서 말하면",u"바꾸어서 한다면",u"바꿔 말하면",u"바로",u"바와같이",u"밖에 안된다",u"반대로",u"반대로 말하자면",u"반드시",u"버금",u"보는데서",u"보다더",u"보드득",u"본대로",u"봐",u"봐라",u"부류의 사람들",u"부터",u"불구하고",u"불문하고",u"붕붕",u"비걱거리다",u"비교적",u"비길수 없다",u"비로소",u"비록",u"비슷하다",u"비추어 보아",u"비하면",u"뿐만 아니라",u"뿐만아니라",u"뿐이다",u"삐걱",u"삐걱거리다",u"사",u"삼",u"상대적으로 말하자면",u"생각한대로",u"설령",u"설마",u"설사",u"셋",u"소생",u"소인",u"솨",u"쉿",u"습니까",u"습니다",u"시각",u"시간",u"시작하여",u"시초에",u"시키다",u"실로",u"심지어",u"아",u"아니",u"아니나다를가",u"아니라면",u"아니면",u"아니었다면",u"아래윗",u"아무거나",u"아무도",u"아야",u"아울러",u"아이",u"아이고",u"아이구",u"아이야",u"아이쿠",u"아하",u"아홉",u"안 그러면",u"않기 위하여",u"않기 위해서",u"알 수 있다",u"알았어",u"앗",u"앞에서",u"앞의것",u"야",u"약간",u"양자",u"어",u"어기여차",u"어느",u"어느 년도",u"어느것",u"어느곳",u"어느때",u"어느쪽",u"어느해",u"어디",u"어때",u"어떠한",u"어떤",u"어떤것",u"어떤것들",u"어떻게",u"어떻해",u"어이",u"어째서",u"어쨋든",u"어쩔수 없다",u"어찌",u"어찌됏든",u"어찌됏어",u"어찌하든지",u"어찌하여",u"언제",u"언젠가",u"얼마",u"얼마 안 되는 것",u"얼마간",u"얼마나",u"얼마든지",u"얼마만큼",u"얼마큼",u"엉엉",u"에",u"에 가서",u"에 달려 있다",u"에 대해",u"에 있다",u"에 한하다",u"에게",u"에서",u"여",u"여기",u"여덟",u"여러분",u"여보시오",u"여부",u"여섯",u"여전히",u"여차",u"연관되다",u"연이서",u"영",u"영차",u"옆사람",u"예",u"예를 들면",u"예를 들자면",u"예컨대",u"예하면",u"오",u"오로지",u"오르다",u"오자마자",u"오직",u"오호",u"오히려",u"와",u"와 같은 사람들",u"와르르",u"와아",u"왜",u"왜냐하면",u"외에도",u"요만큼",u"요만한 것",u"요만한걸",u"요컨대",u"우르르",u"우리",u"우리들",u"우선",u"우에 종합한것과같이",u"운운",u"월",u"위에서 서술한바와같이",u"위하여",u"위해서",u"윙윙",u"육",u"으로",u"으로 인하여",u"으로서",u"으로써",u"을",u"응",u"응당",u"의",u"의거하여",u"의지하여",u"의해",u"의해되다",u"의해서",u"이",u"이 되다",u"이 때문에",u"이 밖에",u"이 외에",u"이 정도의",u"이것",u"이곳",u"이때",u"이라면",u"이래",u"이러이러하다",u"이러한",u"이런",u"이럴정도로",u"이렇게 많은 것",u"이렇게되면",u"이렇게말하자면",u"이렇구나",u"이로 인하여",u"이르기까지",u"이리하여",u"이만큼",u"이번",u"이봐",u"이상",u"이어서",u"이었다",u"이와 같다",u"이와 같은",u"이와 반대로",u"이와같다면",u"이외에도",u"이용하여",u"이유만으로",u"이젠",u"이지만",u"이쪽",u"이천구",u"이천육",u"이천칠",u"이천팔",u"인 듯하다",u"인젠",u"일",u"일것이다",u"일곱",u"일단",u"일때",u"일반적으로",u"일지라도",u"임에 틀림없다",u"입각하여",u"입장에서",u"잇따라",u"있다",u"자",u"자기",u"자기집",u"자마자",u"자신",u"잠깐",u"잠시",u"저",u"저것",u"저것만큼",u"저기",u"저쪽",u"저희",u"전부",u"전자",u"전후",u"점에서 보아",u"정도에 이르다",u"제",u"제각기",u"제외하고",u"조금",u"조차",u"조차도",u"졸졸",u"좀",u"좋아",u"좍좍",u"주룩주룩",u"주저하지 않고",u"줄은 몰랏다",u"줄은모른다",u"중에서",u"중의하나",u"즈음하여",u"즉",u"즉시",u"지든지",u"지만",u"지말고",u"진짜로",u"쪽으로",u"차라리",u"참",u"참나",u"첫번째로",u"쳇",u"총적으로",u"총적으로 말하면",u"총적으로 보면",u"칠",u"콸콸",u"쾅쾅",u"쿵",u"타다",u"타인",u"탕탕",u"토하다",u"통하여",u"툭",u"퉤",u"틈타",u"팍",u"팔",u"퍽",u"펄렁",u"하",u"하게될것이다",u"하게하다",u"하겠는가",u"하고 있다",u"하고있었다",u"하곤하였다",u"하구나",u"하기 때문에",u"하기 위하여",u"하기는한데",u"하기만 하면",u"하기보다는",u"하기에",u"하나",u"하느니",u"하는 김에",u"하는 편이 낫다",u"하는것도",u"하는것만 못하다",u"하는것이 낫다",u"하는바",u"하더라도",u"하도다",u"하도록시키다",u"하도록하다",u"하든지",u"하려고하다",u"하마터면",u"하면 할수록",u"하면된다",u"하면서",u"하물며",u"하여금",u"하여야",u"하자마자",u"하지 않는다면",u"하지 않도록",u"하지마",u"하지마라",u"하지만",u"하하",u"한 까닭에",u"한 이유는",u"한 후",u"한다면",u"한다면 몰라도",u"한데",u"한마디",u"한적이있다",u"한켠으로는",u"한항목",u"할 따름이다",u"할 생각이다",u"할 줄 안다",u"할 지경이다",u"할 힘이 있다",u"할때",u"할만하다",u"할망정",u"할뿐",u"할수있다",u"할수있어",u"할줄알다",u"할지라도",u"할지언정",u"함께",u"해도된다",u"해도좋다",u"해봐요",u"해서는 안된다",u"해야한다",u"해요",u"했어요",u"향하다",u"향하여",u"향해서",u"허",u"허걱",u"허허",u"헉",u"헉헉",u"헐떡헐떡",u"형식으로 쓰여",u"혹시",u"혹은",u"혼자",u"훨씬",u"휘익",u"휴",u"흐흐",u"흥",u"힘입어",u"︿",u"!",u"#",u"$",u"%",u"&",u"(",u")",u"*",u"+",u",",u"0",u"1",u"2",u"3",u"4",u"5",u"6",u"7",u"8",u"9",u":",u";",u"<",u">",u"?",u"@",u"[",u"]",u"{",u"|",u"}",u"~",u"¥"],u"nl":[u"aan",u"achte",u"achter",u"af",u"al",u"alle",u"alleen",u"alles",u"als",u"ander",u"anders",u"beetje",u"behalve",u"beide",u"beiden",u"ben",u"beneden",u"bent",u"bij",u"bijna",u"bijv",u"blijkbaar",u"blijken",u"boven",u"bv",u"daar",u"daardoor",u"daarin",u"daarna",u"daarom",u"daaruit",u"dan",u"dat",u"de",u"deden",u"deed",u"derde",u"derhalve",u"dertig",u"deze",u"dhr",u"die",u"dit",u"doe",u"doen",u"doet",u"door",u"drie",u"duizend",u"echter",u"een",u"eens",u"eerst",u"eerste",u"eigen",u"eigenlijk",u"elk",u"elke",u"en",u"enige",u"er",u"erg",u"ergens",u"etc",u"etcetera",u"even",u"geen",u"genoeg",u"geweest",u"haar",u"haarzelf",u"had",u"hadden",u"heb",u"hebben",u"hebt",u"hedden",u"heeft",u"heel",u"hem",u"hemzelf",u"hen",u"het",u"hetzelfde",u"hier",u"hierin",u"hierna",u"hierom",u"hij",u"hijzelf",u"hoe",u"honderd",u"hun",u"ieder",u"iedere",u"iedereen",u"iemand",u"iets",u"ik",u"in",u"inderdaad",u"intussen",u"is",u"ja",u"je",u"jij",u"jijzelf",u"jou",u"jouw",u"jullie",u"kan",u"kon",u"konden",u"kun",u"kunnen",u"kunt",u"laatst",u"later",u"lijken",u"lijkt",u"maak",u"maakt",u"maakte",u"maakten",u"maar",u"mag",u"maken",u"me",u"meer",u"meest",u"meestal",u"men",u"met",u"mevr",u"mij",u"mijn",u"minder",u"miss",u"misschien",u"missen",u"mits",u"mocht",u"mochten",u"moest",u"moesten",u"moet",u"moeten",u"mogen",u"mr",u"mrs",u"mw",u"na",u"naar",u"nam",u"namelijk",u"nee",u"neem",u"negen",u"nemen",u"nergens",u"niemand",u"niet",u"niets",u"niks",u"noch",u"nochtans",u"nog",u"nooit",u"nu",u"nv",u"of",u"om",u"omdat",u"ondanks",u"onder",u"ondertussen",u"ons",u"onze",u"onzeker",u"ooit",u"ook",u"op",u"over",u"overal",u"overige",u"paar",u"per",u"recent",u"redelijk",u"samen",u"sinds",u"steeds",u"te",u"tegen",u"tegenover",u"thans",u"tien",u"tiende",u"tijdens",u"tja",u"toch",u"toe",u"tot",u"totdat",u"tussen",u"twee",u"tweede",u"u",u"uit",u"uw",u"vaak",u"van",u"vanaf",u"veel",u"veertig",u"verder",u"verscheidene",u"verschillende",u"via",u"vier",u"vierde",u"vijf",u"vijfde",u"vijftig",u"volgend",u"volgens",u"voor",u"voordat",u"voorts",u"waar",u"waarom",u"waarschijnlijk",u"wanneer",u"waren",u"was",u"wat",u"we",u"wederom",u"weer",u"weinig",u"wel",u"welk",u"welke",u"werd",u"werden",u"werder",u"whatever",u"wie",u"wij",u"wijzelf",u"wil",u"wilden",u"willen",u"word",u"worden",u"wordt",u"zal",u"ze",u"zei",u"zeker",u"zelf",u"zelfde",u"zes",u"zeven",u"zich",u"zij",u"zijn",u"zijzelf",u"zo",u"zoals",u"zodat",u"zou",u"zouden",u"zulk",u"zullen"],u"no":[u"alle",u"at",u"av",u"bare",u"begge",u"ble",u"blei",u"bli",u"blir",u"blitt",u"både",u"båe",u"da",u"de",u"deg",u"dei",u"deim",u"deira",u"deires",u"dem",u"den",u"denne",u"der",u"dere",u"deres",u"det",u"dette",u"di",u"din",u"disse",u"ditt",u"du",u"dykk",u"dykkar",u"då",u"eg",u"ein",u"eit",u"eitt",u"eller",u"elles",u"en",u"enn",u"er",u"et",u"ett",u"etter",u"for",u"fordi",u"fra",u"før",u"ha",u"hadde",u"han",u"hans",u"har",u"hennar",u"henne",u"hennes",u"her",u"hjå",u"ho",u"hoe",u"honom",u"hoss",u"hossen",u"hun",u"hva",u"hvem",u"hver",u"hvilke",u"hvilken",u"hvis",u"hvor",u"hvordan",u"hvorfor",u"i",u"ikke",u"ikkje",u"ingen",u"ingi",u"inkje",u"inn",u"inni",u"ja",u"jeg",u"kan",u"kom",u"korleis",u"korso",u"kun",u"kunne",u"kva",u"kvar",u"kvarhelst",u"kven",u"kvi",u"kvifor",u"man",u"mange",u"me",u"med",u"medan",u"meg",u"meget",u"mellom",u"men",u"mi",u"min",u"mine",u"mitt",u"mot",u"mykje",u"ned",u"no",u"noe",u"noen",u"noka",u"noko",u"nokon",u"nokor",u"nokre",u"nå",u"når",u"og",u"også",u"om",u"opp",u"oss",u"over",u"på",u"samme",u"seg",u"selv",u"si",u"sia",u"sidan",u"siden",u"sin",u"sine",u"sitt",u"sjøl",u"skal",u"skulle",u"slik",u"so",u"som",u"somme",u"somt",u"så",u"sånn",u"til",u"um",u"upp",u"ut",u"uten",u"var",u"vart",u"varte",u"ved",u"vere",u"verte",u"vi",u"vil",u"ville",u"vore",u"vors",u"vort",u"vår",u"være",u"vært",u"å"],u"pl":[u"aby",u"ach",u"aj",u"albo",u"ale",u"ani",u"aż",u"bardzo",u"bez",u"bo",u"bowiem",u"by",u"byli",u"bym",u"być",u"był",u"była",u"było",u"były",u"będzie",u"będą",u"chce",u"choć",u"ci",u"ciebie",u"cię",u"co",u"coraz",u"coś",u"czy",u"czyli",u"często",u"daleko",u"dla",u"dlaczego",u"dlatego",u"do",u"dobrze",u"dokąd",u"dość",u"dr",u"dużo",u"dwa",u"dwaj",u"dwie",u"dwoje",u"dzisiaj",u"dziś",u"gdy",u"gdyby",u"gdyż",u"gdzie",u"go",u"godz",u"hab",u"i",u"ich",u"ii",u"iii",u"ile",u"im",u"inne",u"inny",u"inż",u"iv",u"ix",u"iż",u"ja",u"jak",u"jakby",u"jaki",u"jakie",u"jako",u"je",u"jeden",u"jedna",u"jednak",u"jedno",u"jednym",u"jedynie",u"jego",u"jej",u"jemu",u"jest",u"jestem",u"jeszcze",u"jeśli",u"jeżeli",u"już",u"ją",u"każdy",u"kiedy",u"kierunku",u"kilku",u"kto",u"która",u"które",u"którego",u"której",u"który",u"których",u"którym",u"którzy",u"ku",u"lat",u"lecz",u"lub",u"ma",u"mają",u"mam",u"mamy",u"mgr",u"mi",u"miał",u"mimo",u"mnie",u"mną",u"mogą",u"moi",u"moja",u"moje",u"może",u"można",u"mu",u"musi",u"my",u"mój",u"na",u"nad",u"nam",u"nami",u"nas",u"nasi",u"nasz",u"nasza",u"nasze",u"natychmiast",u"nawet",u"nic",u"nich",u"nie",u"niego",u"niej",u"niemu",u"nigdy",u"nim",u"nimi",u"nią",u"niż",u"no",u"nowe",u"np",u"nr",u"o",u"o.o.",u"obok",u"od",u"ok",u"około",u"on",u"ona",u"one",u"oni",u"ono",u"oraz",u"owszem",u"pan",u"pl",u"po",u"pod",u"ponad",u"ponieważ",u"poza",u"prof",u"przed",u"przede",u"przedtem",u"przez",u"przy",u"raz",u"razie",u"roku",u"również",u"sam",u"sama",u"się",u"skąd",u"sobie",u"sposób",u"swoje",u"są",u"ta",u"tak",u"taki",u"takich",u"takie",u"także",u"tam",u"te",u"tego",u"tej",u"tel",u"temu",u"ten",u"teraz",u"też",u"to",u"tobie",u"tobą",u"trzeba",u"tu",u"tutaj",u"twoi",u"twoja",u"twoje",u"twój",u"ty",u"tych",u"tylko",u"tym",u"tys",u"tzw",u"tę",u"u",u"ul",u"vi",u"vii",u"viii",u"vol",u"w",u"wam",u"wami",u"was",u"wasi",u"wasz",u"wasza",u"wasze",u"we",u"wie",u"więc",u"wszystko",u"wtedy",u"www",u"wy",u"właśnie",u"wśród",u"xi",u"xii",u"xiii",u"xiv",u"xv",u"z",u"za",u"zawsze",u"zaś",u"ze",u"zł",u"żaden",u"że",u"żeby"],u"pt":[u"a",u"acerca",u"adeus",u"agora",u"ainda",u"algmas",u"algo",u"algumas",u"alguns",u"ali",u"além",u"ambos",u"ano",u"anos",u"antes",u"ao",u"aos",u"apenas",u"apoio",u"apontar",u"após",u"aquela",u"aquelas",u"aquele",u"aqueles",u"aqui",u"aquilo",u"as",u"assim",u"através",u"atrás",u"até",u"aí",u"baixo",u"bastante",u"bem",u"bom",u"breve",u"cada",u"caminho",u"catorze",u"cedo",u"cento",u"certamente",u"certeza",u"cima",u"cinco",u"coisa",u"com",u"como",u"comprido",u"conhecido",u"conselho",u"contra",u"corrente",u"custa",u"cá",u"da",u"daquela",u"daquele",u"dar",u"das",u"de",u"debaixo",u"demais",u"dentro",u"depois",u"desde",u"desligado",u"dessa",u"desse",u"desta",u"deste",u"deve",u"devem",u"deverá",u"dez",u"dezanove",u"dezasseis",u"dezassete",u"dezoito",u"dia",u"diante",u"direita",u"diz",u"dizem",u"dizer",u"do",u"dois",u"dos",u"doze",u"duas",u"dá",u"dão",u"dúvida",u"e",u"ela",u"elas",u"ele",u"eles",u"em",u"embora",u"enquanto",u"entre",u"então",u"era",u"essa",u"essas",u"esse",u"esses",u"esta",u"estado",u"estar",u"estará",u"estas",u"estava",u"este",u"estes",u"esteve",u"estive",u"estivemos",u"estiveram",u"estiveste",u"estivestes",u"estou",u"está",u"estás",u"estão",u"eu",u"exemplo",u"falta",u"fará",u"favor",u"faz",u"fazeis",u"fazem",u"fazemos",u"fazer",u"fazes",u"fazia",u"faço",u"fez",u"fim",u"final",u"foi",u"fomos",u"for",u"fora",u"foram",u"forma",u"foste",u"fostes",u"fui",u"geral",u"grande",u"grandes",u"grupo",u"hoje",u"horas",u"há",u"iniciar",u"inicio",u"ir",u"irá",u"isso",u"ista",u"iste",u"isto",u"já",u"lado",u"ligado",u"local",u"logo",u"longe",u"lugar",u"lá",u"maior",u"maioria",u"maiorias",u"mais",u"mal",u"mas",u"me",u"meio",u"menor",u"menos",u"meses",u"mesmo",u"meu",u"meus",u"mil",u"minha",u"minhas",u"momento",u"muito",u"muitos",u"máximo",u"mês",u"na",u"nada",u"naquela",u"naquele",u"nas",u"nem",u"nenhuma",u"nessa",u"nesse",u"nesta",u"neste",u"no",u"noite",u"nome",u"nos",u"nossa",u"nossas",u"nosso",u"nossos",u"nova",u"nove",u"novo",u"novos",u"num",u"numa",u"nunca",u"não",u"nível",u"nós",u"número",u"o",u"obra",u"obrigada",u"obrigado",u"oitava",u"oitavo",u"oito",u"onde",u"ontem",u"onze",u"os",u"ou",u"outra",u"outras",u"outro",u"outros",u"para",u"parece",u"parte",u"partir",u"pegar",u"pela",u"pelas",u"pelo",u"pelos",u"perto",u"pessoas",u"pode",u"podem",u"poder",u"poderá",u"podia",u"ponto",u"pontos",u"por",u"porque",u"porquê",u"posição",u"possivelmente",u"posso",u"possível",u"pouca",u"pouco",u"povo",u"primeira",u"primeiro",u"promeiro",u"próprio",u"próximo",u"puderam",u"pôde",u"põe",u"põem",u"qual",u"qualquer",u"quando",u"quanto",u"quarta",u"quarto",u"quatro",u"que",u"quem",u"quer",u"quero",u"questão",u"quieto",u"quinta",u"quinto",u"quinze",u"quê",u"relação",u"sabe",u"saber",u"se",u"segunda",u"segundo",u"sei",u"seis",u"sem",u"sempre",u"ser",u"seria",u"sete",u"seu",u"seus",u"sexta",u"sexto",u"sim",u"sistema",u"sob",u"sobre",u"sois",u"somente",u"somos",u"sou",u"sua",u"suas",u"são",u"sétima",u"sétimo",u"tal",u"talvez",u"também",u"tanto",u"tarde",u"te",u"tem",u"temos",u"tempo",u"tendes",u"tenho",u"tens",u"tentar",u"tentaram",u"tente",u"tentei",u"ter",u"terceira",u"terceiro",u"teu",u"teus",u"teve",u"tipo",u"tive",u"tivemos",u"tiveram",u"tiveste",u"tivestes",u"toda",u"todas",u"todo",u"todos",u"trabalhar",u"trabalho",u"treze",u"três",u"tu",u"tua",u"tuas",u"tudo",u"tão",u"têm",u"um",u"uma",u"umas",u"uns",u"usa",u"usar",u"vai",u"vais",u"valor",u"veja",u"vem",u"vens",u"ver",u"verdade",u"verdadeiro",u"vez",u"vezes",u"viagem",u"vindo",u"vinte",u"você",u"vocês",u"vos",u"vossa",u"vossas",u"vosso",u"vossos",u"vários",u"vão",u"vêm",u"vós",u"zero",u"à",u"às",u"área",u"é",u"és",u"último"],u"ru":[u"а",u"алло",u"без",u"белый",u"близко",u"более",u"больше",u"большой",u"будем",u"будет",u"будете",u"будешь",u"будто",u"буду",u"будут",u"будь",u"бы",u"бывает",u"бывь",u"был",u"была",u"были",u"было",u"быть",u"в",u"важная",u"важное",u"важные",u"важный",u"вам",u"вами",u"вас",u"ваш",u"ваша",u"ваше",u"ваши",u"вверх",u"вдали",u"вдруг",u"ведь",u"везде",u"вернуться",u"весь",u"вечер",u"взгляд",u"взять",u"вид",u"видеть",u"вместе",u"вниз",u"внизу",u"во",u"вода",u"война",u"вокруг",u"вон",u"вообще",u"вопрос",u"восемнадцатый",u"восемнадцать",u"восемь",u"восьмой",u"вот",u"впрочем",u"времени",u"время",u"все",u"всегда",u"всего",u"всем",u"всеми",u"всему",u"всех",u"всею",u"всю",u"всюду",u"вся",u"всё",u"второй",u"вы",u"выйти",u"г",u"где",u"главный",u"глаз",u"говорил",u"говорит",u"говорить",u"год",u"года",u"году",u"голова",u"голос",u"город",u"да",u"давать",u"давно",u"даже",u"далекий",u"далеко",u"дальше",u"даром",u"дать",u"два",u"двадцатый",u"двадцать",u"две",u"двенадцатый",u"двенадцать",u"дверь",u"двух",u"девятнадцатый",u"девятнадцать",u"девятый",u"девять",u"действительно",u"дел",u"делать",u"дело",u"день",u"деньги",u"десятый",u"десять",u"для",u"до",u"довольно",u"долго",u"должно",u"должный",u"дом",u"дорога",u"друг",u"другая",u"другие",u"других",u"друго",u"другое",u"другой",u"думать",u"душа",u"е",u"его",u"ее",u"ей",u"ему",u"если",u"есть",u"еще",u"ещё",u"ею",u"её",u"ж",u"ждать",u"же",u"жена",u"женщина",u"жизнь",u"жить",u"за",u"занят",u"занята",u"занято",u"заняты",u"затем",u"зато",u"зачем",u"здесь",u"земля",u"знать",u"значит",u"значить",u"и",u"идти",u"из",u"или",u"им",u"именно",u"иметь",u"ими",u"имя",u"иногда",u"их",u"к",u"каждая",u"каждое",u"каждые",u"каждый",u"кажется",u"казаться",u"как",u"какая",u"какой",u"кем",u"книга",u"когда",u"кого",u"ком",u"комната",u"кому",u"конец",u"конечно",u"которая",u"которого",u"которой",u"которые",u"который",u"которых",u"кроме",u"кругом",u"кто",u"куда",u"лежать",u"лет",u"ли",u"лицо",u"лишь",u"лучше",u"любить",u"люди",u"м",u"маленький",u"мало",u"мать",u"машина",u"между",u"меля",u"менее",u"меньше",u"меня",u"место",u"миллионов",u"мимо",u"минута",u"мир",u"мира",u"мне",u"много",u"многочисленная",u"многочисленное",u"многочисленные",u"многочисленный",u"мной",u"мною",u"мог",u"могут",u"мож",u"может",u"можно",u"можхо",u"мои",u"мой",u"мор",u"москва",u"мочь",u"моя",u"моё",u"мы",u"на",u"наверху",u"над",u"надо",u"назад",u"наиболее",u"найти",u"наконец",u"нам",u"нами",u"народ",u"нас",u"начала",u"начать",u"наш",u"наша",u"наше",u"наши",u"не",u"него",u"недавно",u"недалеко",u"нее",u"ней",u"некоторый",u"нельзя",u"нем",u"немного",u"нему",u"непрерывно",u"нередко",u"несколько",u"нет",u"нею",u"неё",u"ни",u"нибудь",u"ниже",u"низко",u"никакой",u"никогда",u"никто",u"никуда",u"ними",u"них",u"ничего",u"ничто",u"но",u"новый",u"нога",u"ночь",u"ну",u"нужно",u"нужный",u"нх",u"о",u"об",u"оба",u"обычно",u"один",u"одиннадцатый",u"одиннадцать",u"однажды",u"однако",u"одного",u"одной",u"оказаться",u"окно",u"около",u"он",u"она",u"они",u"оно",u"опять",u"особенно",u"остаться",u"от",u"ответить",u"отец",u"отовсюду",u"отсюда",u"очень",u"первый",u"перед",u"писать",u"плечо",u"по",u"под",u"подумать",u"пожалуйста",u"позже",u"пойти",u"пока",u"пол",u"получить",u"помнить",u"понимать",u"понять",u"пор",u"пора",u"после",u"последний",u"посмотреть",u"посреди",u"потом",u"потому",u"почему",u"почти",u"правда",u"прекрасно",u"при",u"про",u"просто",u"против",u"процентов",u"пятнадцатый",u"пятнадцать",u"пятый",u"пять",u"работа",u"работать",u"раз",u"разве",u"рано",u"раньше",u"ребенок",u"решить",u"россия",u"рука",u"русский",u"ряд",u"рядом",u"с",u"сам",u"сама",u"сами",u"самим",u"самими",u"самих",u"само",u"самого",u"самой",u"самом",u"самому",u"саму",u"самый",u"свет",u"свое",u"своего",u"своей",u"свои",u"своих",u"свой",u"свою",u"сделать",u"сеаой",u"себе",u"себя",u"сегодня",u"седьмой",u"сейчас",u"семнадцатый",u"семнадцать",u"семь",u"сидеть",u"сила",u"сих",u"сказал",u"сказала",u"сказать",u"сколько",u"слишком",u"слово",u"случай",u"смотреть",u"сначала",u"снова",u"со",u"собой",u"собою",u"советский",u"совсем",u"спасибо",u"спросить",u"сразу",u"стал",u"старый",u"стать",u"стол",u"сторона",u"стоять",u"страна",u"суть",u"считать",u"т",u"та",u"так",u"такая",u"также",u"таки",u"такие",u"такое",u"такой",u"там",u"твой",u"твоя",u"твоё",u"те",u"тебе",u"тебя",u"тем",u"теми",u"теперь",u"тех",u"то",u"тобой",u"тобою",u"товарищ",u"тогда",u"того",u"тоже",u"только",u"том",u"тому",u"тот",u"тою",u"третий",u"три",u"тринадцатый",u"тринадцать",u"ту",u"туда",u"тут",u"ты",u"тысяч",u"у",u"увидеть",u"уж",u"уже",u"улица",u"уметь",u"утро",u"хороший",u"хорошо",u"хотеть",u"хоть",u"хотя",u"хочешь",u"час",u"часто",u"часть",u"чаще",u"чего",u"человек",u"чем",u"чему",u"через",u"четвертый",u"четыре",u"четырнадцатый",u"четырнадцать",u"что",u"чтоб",u"чтобы",u"чуть",u"шестнадцатый",u"шестнадцать",u"шестой",u"шесть",u"эта",u"эти",u"этим",u"этими",u"этих",u"это",u"этого",u"этой",u"этом",u"этому",u"этот",u"эту",u"я"],u"sv":[u"aderton",u"adertonde",u"adjö",u"aldrig",u"alla",u"allas",u"allt",u"alltid",u"alltså",u"andra",u"andras",u"annan",u"annat",u"artonde",u"artonn",u"att",u"av",u"bakom",u"bara",u"behöva",u"behövas",u"behövde",u"behövt",u"beslut",u"beslutat",u"beslutit",u"bland",u"blev",u"bli",u"blir",u"blivit",u"bort",u"borta",u"bra",u"bäst",u"bättre",u"båda",u"bådas",u"dag",u"dagar",u"dagarna",u"dagen",u"de",u"del",u"delen",u"dem",u"den",u"denna",u"deras",u"dess",u"dessa",u"det",u"detta",u"dig",u"din",u"dina",u"dit",u"ditt",u"dock",u"du",u"där",u"därför",u"då",u"efter",u"eftersom",u"ej",u"elfte",u"eller",u"elva",u"en",u"enkel",u"enkelt",u"enkla",u"enligt",u"er",u"era",u"ert",u"ett",u"ettusen",u"fanns",u"fem",u"femte",u"femtio",u"femtionde",u"femton",u"femtonde",u"fick",u"fin",u"finnas",u"finns",u"fjorton",u"fjortonde",u"fjärde",u"fler",u"flera",u"flesta",u"fram",u"framför",u"från",u"fyra",u"fyrtio",u"fyrtionde",u"få",u"får",u"fått",u"följande",u"för",u"före",u"förlåt",u"förra",u"första",u"genast",u"genom",u"gick",u"gjorde",u"gjort",u"god",u"goda",u"godare",u"godast",u"gott",u"gälla",u"gäller",u"gällt",u"gärna",u"gå",u"går",u"gått",u"gör",u"göra",u"ha",u"hade",u"haft",u"han",u"hans",u"har",u"heller",u"hellre",u"helst",u"helt",u"henne",u"hennes",u"hit",u"hon",u"honom",u"hundra",u"hundraen",u"hundraett",u"hur",u"här",u"hög",u"höger",u"högre",u"högst",u"i",u"ibland",u"icke",u"idag",u"igen",u"igår",u"imorgon",u"in",u"inför",u"inga",u"ingen",u"ingenting",u"inget",u"innan",u"inne",u"inom",u"inte",u"inuti",u"ja",u"jag",u"ju",u"jämfört",u"kan",u"kanske",u"knappast",u"kom",u"komma",u"kommer",u"kommit",u"kr",u"kunde",u"kunna",u"kunnat",u"kvar",u"legat",u"ligga",u"ligger",u"lika",u"likställd",u"likställda",u"lilla",u"lite",u"liten",u"litet",u"länge",u"längre",u"längst",u"lätt",u"lättare",u"lättast",u"långsam",u"långsammare",u"långsammast",u"långsamt",u"långt",u"man",u"med",u"mellan",u"men",u"mer",u"mera",u"mest",u"mig",u"min",u"mina",u"mindre",u"minst",u"mitt",u"mittemot",u"mot",u"mycket",u"många",u"måste",u"möjlig",u"möjligen",u"möjligt",u"möjligtvis",u"ned",u"nederst",u"nedersta",u"nedre",u"nej",u"ner",u"ni",u"nio",u"nionde",u"nittio",u"nittionde",u"nitton",u"nittonde",u"nog",u"noll",u"nr",u"nu",u"nummer",u"när",u"nästa",u"någon",u"någonting",u"något",u"några",u"nödvändig",u"nödvändiga",u"nödvändigt",u"nödvändigtvis",u"och",u"också",u"ofta",u"oftast",u"olika",u"olikt",u"om",u"oss",u"på",u"rakt",u"redan",u"rätt",u"sade",u"sagt",u"samma",u"sedan",u"senare",u"senast",u"sent",u"sex",u"sextio",u"sextionde",u"sexton",u"sextonde",u"sig",u"sin",u"sina",u"sist",u"sista",u"siste",u"sitt",u"sitta",u"sju",u"sjunde",u"sjuttio",u"sjuttionde",u"sjutton",u"sjuttonde",u"själv",u"sjätte",u"ska",u"skall",u"skulle",u"slutligen",u"små",u"smått",u"snart",u"som",u"stor",u"stora",u"stort",u"större",u"störst",u"säga",u"säger",u"sämre",u"sämst",u"så",u"sådan",u"sådana",u"sådant",u"tack",u"tidig",u"tidigare",u"tidigast",u"tidigt",u"till",u"tills",u"tillsammans",u"tio",u"tionde",u"tjugo",u"tjugoen",u"tjugoett",u"tjugonde",u"tjugotre",u"tjugotvå",u"tjungo",u"tolfte",u"tolv",u"tre",u"tredje",u"trettio",u"trettionde",u"tretton",u"trettonde",u"två",u"tvåhundra",u"under",u"upp",u"ur",u"ursäkt",u"ut",u"utan",u"utanför",u"ute",u"vad",u"var",u"vara",u"varför",u"varifrån",u"varit",u"varje",u"varken",u"vars",u"varsågod",u"vart",u"vem",u"vems",u"verkligen",u"vi",u"vid",u"vidare",u"viktig",u"viktigare",u"viktigast",u"viktigt",u"vilka",u"vilkas",u"vilken",u"vilket",u"vill",u"vänster",u"vänstra",u"värre",u"vår",u"våra",u"vårt",u"än",u"ännu",u"är",u"även",u"åt",u"åtminstone",u"åtta",u"åttio",u"åttionde",u"åttonde",u"över",u"övermorgon",u"överst",u"övre"],u"tr":[u"acaba",u"acep",u"adeta",u"altmýþ",u"altmış",u"altý",u"altı",u"ama",u"ancak",u"arada",u"artýk",u"aslında",u"aynen",u"ayrıca",u"az",u"bana",u"bari",u"bazen",u"bazý",u"bazı",u"baţka",u"belki",u"ben",u"benden",u"beni",u"benim",u"beri",u"beþ",u"beş",u"beţ",u"bile",u"bin",u"bir",u"biraz",u"biri",u"birkaç",u"birkez",u"birçok",u"birþey",u"birþeyi",u"birşey",u"birşeyi",u"birţey",u"biz",u"bizden",u"bize",u"bizi",u"bizim",u"bu",u"buna",u"bunda",u"bundan",u"bunlar",u"bunları",u"bunların",u"bunu",u"bunun",u"burada",u"böyle",u"böylece",u"bütün",u"da",u"daha",u"dahi",u"dahil",u"daima",u"dair",u"dayanarak",u"de",u"defa",u"deđil",u"değil",u"diye",u"diđer",u"diğer",u"doksan",u"dokuz",u"dolayı",u"dolayısıyla",u"dört",u"edecek",u"eden",u"ederek",u"edilecek",u"ediliyor",u"edilmesi",u"ediyor",u"elli",u"en",u"etmesi",u"etti",u"ettiği",u"ettiğini",u"eđer",u"eğer",u"fakat",u"gibi",u"göre",u"halbuki",u"halen",u"hangi",u"hani",u"hariç",u"hatta",u"hele",u"hem",u"henüz",u"hep",u"hepsi",u"her",u"herhangi",u"herkes",u"herkesin",u"hiç",u"hiçbir",u"iken",u"iki",u"ila",u"ile",u"ilgili",u"ilk",u"illa",u"ise",u"itibaren",u"itibariyle",u"iyi",u"iyice",u"için",u"işte",u"iţte",u"kadar",u"kanýmca",u"karşın",u"katrilyon",u"kendi",u"kendilerine",u"kendini",u"kendisi",u"kendisine",u"kendisini",u"kere",u"kez",u"keţke",u"ki",u"kim",u"kimden",u"kime",u"kimi",u"kimse",u"kýrk",u"kýsaca",u"kırk",u"lakin",u"madem",u"međer",u"milyar",u"milyon",u"mu",u"mü",u"mý",u"mı",u"nasýl",u"nasıl",u"ne",u"neden",u"nedenle",u"nerde",u"nere",u"nerede",u"nereye",u"nitekim",u"niye",u"niçin",u"o",u"olan",u"olarak",u"oldu",u"olduklarını",u"olduğu",u"olduğunu",u"olmadı",u"olmadığı",u"olmak",u"olması",u"olmayan",u"olmaz",u"olsa",u"olsun",u"olup",u"olur",u"olursa",u"oluyor",u"on",u"ona",u"ondan",u"onlar",u"onlardan",u"onlari",u"onlarýn",u"onları",u"onların",u"onu",u"onun",u"otuz",u"oysa",u"pek",u"rağmen",u"sadece",u"sanki",u"sekiz",u"seksen",u"sen",u"senden",u"seni",u"senin",u"siz",u"sizden",u"sizi",u"sizin",u"sonra",u"tarafından",u"trilyon",u"tüm",u"var",u"vardı",u"ve",u"veya",u"veyahut",u"ya",u"yahut",u"yani",u"yapacak",u"yapmak",u"yaptı",u"yaptıkları",u"yaptığı",u"yaptığını",u"yapılan",u"yapılması",u"yapıyor",u"yedi",u"yerine",u"yetmiþ",u"yetmiş",u"yetmiţ",u"yine",u"yirmi",u"yoksa",u"yüz",u"zaten",u"çok",u"çünkü",u"öyle",u"üzere",u"üç",u"þey",u"þeyden",u"þeyi",u"þeyler",u"þu",u"þuna",u"þunda",u"þundan",u"þunu",u"şey",u"şeyden",u"şeyi",u"şeyler",u"şu",u"şuna",u"şunda",u"şundan",u"şunları",u"şunu",u"şöyle",u"ţayet",u"ţimdi",u"ţu",u"ţöyle"],u"zh":[u"、",u"。",u"〈",u"〉",u"《",u"》",u"一",u"一切",u"一则",u"一方面",u"一旦",u"一来",u"一样",u"一般",u"七",u"万一",u"三",u"上下",u"不仅",u"不但",u"不光",u"不单",u"不只",u"不如",u"不怕",u"不惟",u"不成",u"不拘",u"不比",u"不然",u"不特",u"不独",u"不管",u"不论",u"不过",u"不问",u"与",u"与其",u"与否",u"与此同时",u"且",u"两者",u"个",u"临",u"为",u"为了",u"为什么",u"为何",u"为着",u"乃",u"乃至",u"么",u"之",u"之一",u"之所以",u"之类",u"乌乎",u"乎",u"乘",u"九",u"也",u"也好",u"也罢",u"了",u"二",u"于",u"于是",u"于是乎",u"云云",u"五",u"人家",u"什么",u"什么样",u"从",u"从而",u"他",u"他人",u"他们",u"以",u"以便",u"以免",u"以及",u"以至",u"以至于",u"以致",u"们",u"任",u"任何",u"任凭",u"似的",u"但",u"但是",u"何",u"何况",u"何处",u"何时",u"作为",u"你",u"你们",u"使得",u"例如",u"依",u"依照",u"俺",u"俺们",u"倘",u"倘使",u"倘或",u"倘然",u"倘若",u"借",u"假使",u"假如",u"假若",u"像",u"八",u"六",u"兮",u"关于",u"其",u"其一",u"其中",u"其二",u"其他",u"其余",u"其它",u"其次",u"具体地说",u"具体说来",u"再者",u"再说",u"冒",u"冲",u"况且",u"几",u"几时",u"凭",u"凭借",u"则",u"别",u"别的",u"别说",u"到",u"前后",u"前者",u"加之",u"即",u"即令",u"即使",u"即便",u"即或",u"即若",u"又",u"及",u"及其",u"及至",u"反之",u"反过来",u"反过来说",u"另",u"另一方面",u"另外",u"只是",u"只有",u"只要",u"只限",u"叫",u"叮咚",u"可",u"可以",u"可是",u"可见",u"各",u"各个",u"各位",u"各种",u"各自",u"同",u"同时",u"向",u"向着",u"吓",u"吗",u"否则",u"吧",u"吧哒",u"吱",u"呀",u"呃",u"呕",u"呗",u"呜",u"呜呼",u"呢",u"呵",u"呸",u"呼哧",u"咋",u"和",u"咚",u"咦",u"咱",u"咱们",u"咳",u"哇",u"哈",u"哈哈",u"哉",u"哎",u"哎呀",u"哎哟",u"哗",u"哟",u"哦",u"哩",u"哪",u"哪个",u"哪些",u"哪儿",u"哪天",u"哪年",u"哪怕",u"哪样",u"哪边",u"哪里",u"哼",u"哼唷",u"唉",u"啊",u"啐",u"啥",u"啦",u"啪达",u"喂",u"喏",u"喔唷",u"嗡嗡",u"嗬",u"嗯",u"嗳",u"嘎",u"嘎登",u"嘘",u"嘛",u"嘻",u"嘿",u"四",u"因",u"因为",u"因此",u"因而",u"固然",u"在",u"在下",u"地",u"多",u"多少",u"她",u"她们",u"如",u"如上所述",u"如何",u"如其",u"如果",u"如此",u"如若",u"宁",u"宁可",u"宁愿",u"宁肯",u"它",u"它们",u"对",u"对于",u"将",u"尔后",u"尚且",u"就",u"就是",u"就是说",u"尽",u"尽管",u"岂但",u"己",u"并",u"并且",u"开外",u"开始",u"归",u"当",u"当着",u"彼",u"彼此",u"往",u"待",u"得",u"怎",u"怎么",u"怎么办",u"怎么样",u"怎样",u"总之",u"总的来看",u"总的来说",u"总的说来",u"总而言之",u"恰恰相反",u"您",u"慢说",u"我",u"我们",u"或",u"或是",u"或者",u"所",u"所以",u"打",u"把",u"抑或",u"拿",u"按",u"按照",u"换句话说",u"换言之",u"据",u"接着",u"故",u"故此",u"旁人",u"无宁",u"无论",u"既",u"既是",u"既然",u"时候",u"是",u"是的",u"替",u"有",u"有些",u"有关",u"有的",u"望",u"朝",u"朝着",u"本",u"本着",u"来",u"来着",u"极了",u"果然",u"果真",u"某",u"某个",u"某些",u"根据",u"正如",u"此",u"此外",u"此间",u"毋宁",u"每",u"每当",u"比",u"比如",u"比方",u"沿",u"沿着",u"漫说",u"焉",u"然则",u"然后",u"然而",u"照",u"照着",u"甚么",u"甚而",u"甚至",u"用",u"由",u"由于",u"由此可见",u"的",u"的话",u"相对而言",u"省得",u"着",u"着呢",u"矣",u"离",u"第",u"等",u"等等",u"管",u"紧接着",u"纵",u"纵令",u"纵使",u"纵然",u"经",u"经过",u"结果",u"给",u"继而",u"综上所述",u"罢了",u"者",u"而",u"而且",u"而况",u"而外",u"而已",u"而是",u"而言",u"能",u"腾",u"自",u"自个儿",u"自从",u"自各儿",u"自家",u"自己",u"自身",u"至",u"至于",u"若",u"若是",u"若非",u"莫若",u"虽",u"虽则",u"虽然",u"虽说",u"被",u"要",u"要不",u"要不是",u"要不然",u"要么",u"要是",u"让",u"论",u"设使",u"设若",u"该",u"诸位",u"谁",u"谁知",u"赶",u"起",u"起见",u"趁",u"趁着",u"越是",u"跟",u"较",u"较之",u"边",u"过",u"还是",u"还有",u"这",u"这个",u"这么",u"这么些",u"这么样",u"这么点儿",u"这些",u"这会儿",u"这儿",u"这就是说",u"这时",u"这样",u"这边",u"这里",u"进而",u"连",u"连同",u"通过",u"遵照",u"那",u"那个",u"那么",u"那么些",u"那么样",u"那些",u"那会儿",u"那儿",u"那时",u"那样",u"那边",u"那里",u"鄙人",u"鉴于",u"阿",u"除",u"除了",u"除此之外",u"除非",u"随",u"随着",u"零",u"非但",u"非徒",u"靠",u"顺",u"顺着",u"首先",u"︿",u"!",u"#",u"$",u"%",u"&",u"(",u")",u"*",u"+",u",",u"0",u"1",u"2",u"3",u"4",u"5",u"6",u"7",u"8",u"9",u":",u";",u"<",u">",u"?",u"@",u"[",u"]",u"{",u"|",u"}",u"~",u"¥"],u"eo":[u"adiaŭ",u"ajn",u"al",u"ankoraŭ",u"antaŭ",u"aŭ",u"bonan",u"bonvole",u"bonvolu",u"bv",u"ci",u"cia",u"cian",u"cin",u"d-ro",u"da",u"de",u"dek",u"deka",u"do",u"doktor'",u"doktoro",u"du",u"dua",u"dum",u"eble",u"ekz",u"ekzemple",u"en",u"estas",u"estis",u"estos",u"estu",u"estus",u"eĉ",u"f-no",u"feliĉan",u"for",u"fraŭlino",u"ha",u"havas",u"havis",u"havos",u"havu",u"havus",u"he",u"ho",u"hu",u"ili",u"ilia",u"ilian",u"ilin",u"inter",u"io",u"ion",u"iu",u"iujn",u"iun",u"ja",u"jam",u"je",u"jes",u"k",u"kaj",u"ke",u"kio",u"kion",u"kiu",u"kiujn",u"kiun",u"kvankam",u"kvar",u"kvara",u"kvazaŭ",u"kvin",u"kvina",u"la",u"li",u"lia",u"lian",u"lin",u"malantaŭ",u"male",u"malgraŭ",u"mem",u"mi",u"mia",u"mian",u"min",u"minus",u"naŭ",u"naŭa",u"ne",u"nek",u"nenio",u"nenion",u"neniu",u"neniun",u"nepre",u"ni",u"nia",u"nian",u"nin",u"nu",u"nun",u"nur",u"ok",u"oka",u"oni",u"onia",u"onian",u"onin",u"plej",u"pli",u"plu",u"plus",u"por",u"post",u"preter",u"s-no",u"s-ro",u"se",u"sed",u"sep",u"sepa",u"ses",u"sesa",u"si",u"sia",u"sian",u"sin",u"sinjor'",u"sinjorino",u"sinjoro",u"sub",u"super",u"supren",u"sur",u"tamen",u"tio",u"tion",u"tiu",u"tiujn",u"tiun",u"tra",u"tri",u"tria",u"tuj",u"tute",u"unu",u"unua",u"ve",u"verŝajne",u"vi",u"via",u"vian",u"vin",u"ĉi",u"ĉio",u"ĉion",u"ĉiu",u"ĉiujn",u"ĉiun",u"ĉu",u"ĝi",u"ĝia",u"ĝian",u"ĝin",u"ĝis",u"ĵus",u"ŝi",u"ŝia",u"ŝin"],u"he":[u"אבל",u"או",u"אולי",u"אותה",u"אותו",u"אותי",u"אותך",u"אותם",u"אותן",u"אותנו",u"אז",u"אחר",u"אחרות",u"אחרי",u"אחריכן",u"אחרים",u"אחרת",u"אי",u"איזה",u"איך",u"אין",u"איפה",u"איתה",u"איתו",u"איתי",u"איתך",u"איתכם",u"איתכן",u"איתם",u"איתן",u"איתנו",u"אך",u"אל",u"אלה",u"אלו",u"אם",u"אנחנו",u"אני",u"אס",u"אף",u"אצל",u"אשר",u"את",u"אתה",u"אתכם",u"אתכן",u"אתם",u"אתן",u"באיזומידה",u"באמצע",u"באמצעות",u"בגלל",u"בין",u"בלי",u"במידה",u"במקוםשבו",u"ברם",u"בשביל",u"בשעהש",u"בתוך",u"גם",u"דרך",u"הוא",u"היא",u"היה",u"היכן",u"היתה",u"היתי",u"הם",u"הן",u"הנה",u"הסיבהשבגללה",u"הרי",u"ואילו",u"ואת",u"זאת",u"זה",u"זות",u"יהיה",u"יוכל",u"יוכלו",u"יותרמדי",u"יכול",u"יכולה",u"יכולות",u"יכולים",u"יכל",u"יכלה",u"יכלו",u"יש",u"כאן",u"כאשר",u"כולם",u"כולן",u"כזה",u"כי",u"כיצד",u"כך",u"ככה",u"כל",u"כלל",u"כמו",u"כן",u"כפי",u"כש",u"לא",u"לאו",u"לאיזותכלית",u"לאן",u"לבין",u"לה",u"להיות",u"להם",u"להן",u"לו",u"לי",u"לכם",u"לכן",u"למה",u"למטה",u"למעלה",u"למקוםשבו",u"למרות",u"לנו",u"לעבר",u"לעיכן",u"לפיכך",u"לפני",u"מאד",u"מאחורי",u"מאיזוסיבה",u"מאין",u"מאיפה",u"מבלי",u"מבעד",u"מדוע",u"מה",u"מהיכן",u"מול",u"מחוץ",u"מי",u"מכאן",u"מכיוון",u"מלבד",u"מן",u"מנין",u"מסוגל",u"מעט",u"מעטים",u"מעל",u"מצד",u"מקוםבו",u"מתחת",u"מתי",u"נגד",u"נגר",u"נו",u"עד",u"עז",u"על",u"עלי",u"עליה",u"עליהם",u"עליהן",u"עליו",u"עליך",u"עליכם",u"עלינו",u"עם",u"עצמה",u"עצמהם",u"עצמהן",u"עצמו",u"עצמי",u"עצמם",u"עצמן",u"עצמנו",u"פה",u"רק",u"שוב",u"של",u"שלה",u"שלהם",u"שלהן",u"שלו",u"שלי",u"שלך",u"שלכה",u"שלכם",u"שלכן",u"שלנו",u"שם",u"תהיה",u"תחת"],u"la":[u"a",u"ab",u"ac",u"ad",u"at",u"atque",u"aut",u"autem",u"cum",u"de",u"dum",u"e",u"erant",u"erat",u"est",u"et",u"etiam",u"ex",u"haec",u"hic",u"hoc",u"in",u"ita",u"me",u"nec",u"neque",u"non",u"per",u"qua",u"quae",u"quam",u"qui",u"quibus",u"quidem",u"quo",u"quod",u"re",u"rebus",u"rem",u"res",u"sed",u"si",u"sic",u"sunt",u"tamen",u"tandem",u"te",u"ut",u"vel"],u"sk":[u"a",u"aby",u"aj",u"ako",u"aký",u"ale",u"alebo",u"ani",u"avšak",u"ba",u"bez",u"buï",u"cez",u"do",u"ho",u"hoci",u"i",u"ich",u"im",u"ja",u"jeho",u"jej",u"jemu",u"ju",u"k",u"kam",u"kde",u"kedže",u"keï",u"kto",u"ktorý",u"ku",u"lebo",u"ma",u"mi",u"mne",u"mnou",u"mu",u"my",u"mòa",u"môj",u"na",u"nad",u"nami",u"neho",u"nej",u"nemu",u"nich",u"nielen",u"nim",u"no",u"nám",u"nás",u"náš",u"ním",u"o",u"od",u"on",u"ona",u"oni",u"ono",u"ony",u"po",u"pod",u"pre",u"pred",u"pri",u"s",u"sa",u"seba",u"sem",u"so",u"svoj",u"taký",u"tam",u"teba",u"tebe",u"tebou",u"tej",u"ten",u"ti",u"tie",u"to",u"toho",u"tomu",u"tou",u"tvoj",u"ty",u"tá",u"tým",u"v",u"vami",u"veï",u"vo",u"vy",u"vám",u"vás",u"váš",u"však",u"z",u"za",u"zo",u"a",u"èi",u"èo",u"èí",u"òom",u"òou",u"òu",u"že"],u"sl":[u"a",u"ali",u"april",u"avgust",u"b",u"bi",u"bil",u"bila",u"bile",u"bili",u"bilo",u"biti",u"blizu",u"bo",u"bodo",u"bojo",u"bolj",u"bom",u"bomo",u"boste",u"bova",u"boš",u"brez",u"c",u"cel",u"cela",u"celi",u"celo",u"d",u"da",u"daleč",u"dan",u"danes",u"datum",u"december",u"deset",u"deseta",u"deseti",u"deseto",u"devet",u"deveta",u"deveti",u"deveto",u"do",u"dober",u"dobra",u"dobri",u"dobro",u"dokler",u"dol",u"dolg",u"dolga",u"dolgi",u"dovolj",u"drug",u"druga",u"drugi",u"drugo",u"dva",u"dve",u"e",u"eden",u"en",u"ena",u"ene",u"eni",u"enkrat",u"eno",u"etc.",u"f",u"februar",u"g",u"g.",u"ga",u"ga.",u"gor",u"gospa",u"gospod",u"h",u"halo",u"i",u"idr.",u"ii",u"iii",u"in",u"iv",u"ix",u"iz",u"j",u"januar",u"jaz",u"je",u"ji",u"jih",u"jim",u"jo",u"julij",u"junij",u"jutri",u"k",u"kadarkoli",u"kaj",u"kajti",u"kako",u"kakor",u"kamor",u"kamorkoli",u"kar",u"karkoli",u"katerikoli",u"kdaj",u"kdo",u"kdorkoli",u"ker",u"ki",u"kje",u"kjer",u"kjerkoli",u"ko",u"koder",u"koderkoli",u"koga",u"komu",u"kot",u"kratek",u"kratka",u"kratke",u"kratki",u"l",u"lahka",u"lahke",u"lahki",u"lahko",u"le",u"lep",u"lepa",u"lepe",u"lepi",u"lepo",u"leto",u"m",u"maj",u"majhen",u"majhna",u"majhni",u"malce",u"malo",u"manj",u"marec",u"me",u"med",u"medtem",u"mene",u"mesec",u"mi",u"midva",u"midve",u"mnogo",u"moj",u"moja",u"moje",u"mora",u"morajo",u"moram",u"moramo",u"morate",u"moraš",u"morem",u"mu",u"n",u"na",u"nad",u"naj",u"najina",u"najino",u"najmanj",u"naju",u"največ",u"nam",u"narobe",u"nas",u"nato",u"nazaj",u"naš",u"naša",u"naše",u"ne",u"nedavno",u"nedelja",u"nek",u"neka",u"nekaj",u"nekatere",u"nekateri",u"nekatero",u"nekdo",u"neke",u"nekega",u"neki",u"nekje",u"neko",u"nekoga",u"nekoč",u"ni",u"nikamor",u"nikdar",u"nikjer",u"nikoli",u"nič",u"nje",u"njega",u"njegov",u"njegova",u"njegovo",u"njej",u"njemu",u"njen",u"njena",u"njeno",u"nji",u"njih",u"njihov",u"njihova",u"njihovo",u"njiju",u"njim",u"njo",u"njun",u"njuna",u"njuno",u"no",u"nocoj",u"november",u"npr.",u"o",u"ob",u"oba",u"obe",u"oboje",u"od",u"odprt",u"odprta",u"odprti",u"okoli",u"oktober",u"on",u"onadva",u"one",u"oni",u"onidve",u"osem",u"osma",u"osmi",u"osmo",u"oz.",u"p",u"pa",u"pet",u"peta",u"petek",u"peti",u"peto",u"po",u"pod",u"pogosto",u"poleg",u"poln",u"polna",u"polni",u"polno",u"ponavadi",u"ponedeljek",u"ponovno",u"potem",u"povsod",u"pozdravljen",u"pozdravljeni",u"prav",u"prava",u"prave",u"pravi",u"pravo",u"prazen",u"prazna",u"prazno",u"prbl.",u"precej",u"pred",u"prej",u"preko",u"pri",u"pribl.",u"približno",u"primer",u"pripravljen",u"pripravljena",u"pripravljeni",u"proti",u"prva",u"prvi",u"prvo",u"r",u"ravno",u"redko",u"res",u"reč",u"s",u"saj",u"sam",u"sama",u"same",u"sami",u"samo",u"se",u"sebe",u"sebi",u"sedaj",u"sedem",u"sedma",u"sedmi",u"sedmo",u"sem",u"september",u"seveda",u"si",u"sicer",u"skoraj",u"skozi",u"slab",u"smo",u"so",u"sobota",u"spet",u"sreda",u"srednja",u"srednji",u"sta",u"ste",u"stran",u"stvar",u"sva",u"t",u"ta",u"tak",u"taka",u"take",u"taki",u"tako",u"takoj",u"tam",u"te",u"tebe",u"tebi",u"tega",u"težak",u"težka",u"težki",u"težko",u"ti",u"tista",u"tiste",u"tisti",u"tisto",u"tj.",u"tja",u"to",u"toda",u"torek",u"tretja",u"tretje",u"tretji",u"tri",u"tu",u"tudi",u"tukaj",u"tvoj",u"tvoja",u"tvoje",u"u",u"v",u"vaju",u"vam",u"vas",u"vaš",u"vaša",u"vaše",u"ve",u"vedno",u"velik",u"velika",u"veliki",u"veliko",u"vendar",u"ves",u"več",u"vi",u"vidva",u"vii",u"viii",u"visok",u"visoka",u"visoke",u"visoki",u"vsa",u"vsaj",u"vsak",u"vsaka",u"vsakdo",u"vsake",u"vsaki",u"vsakomur",u"vse",u"vsega",u"vsi",u"vso",u"včasih",u"včeraj",u"x",u"z",u"za",u"zadaj",u"zadnji",u"zakaj",u"zaprta",u"zaprti",u"zaprto",u"zdaj",u"zelo",u"zunaj",u"č",u"če",u"često",u"četrta",u"četrtek",u"četrti",u"četrto",u"čez",u"čigav",u"š",u"šest",u"šesta",u"šesti",u"šesto",u"štiri",u"ž",u"že"],u"br":[u"a",u"ainda",u"alem",u"ambas",u"ambos",u"antes",u"ao",u"aonde",u"aos",u"apos",u"aquele",u"aqueles",u"as",u"assim",u"com",u"como",u"contra",u"contudo",u"cuja",u"cujas",u"cujo",u"cujos",u"da",u"das",u"de",u"dela",u"dele",u"deles",u"demais",u"depois",u"desde",u"desta",u"deste",u"dispoe",u"dispoem",u"diversa",u"diversas",u"diversos",u"do",u"dos",u"durante",u"e",u"ela",u"elas",u"ele",u"eles",u"em",u"entao",u"entre",u"essa",u"essas",u"esse",u"esses",u"esta",u"estas",u"este",u"estes",u"ha",u"isso",u"isto",u"logo",u"mais",u"mas",u"mediante",u"menos",u"mesma",u"mesmas",u"mesmo",u"mesmos",u"na",u"nao",u"nas",u"nem",u"nesse",u"neste",u"nos",u"o",u"os",u"ou",u"outra",u"outras",u"outro",u"outros",u"pelas",u"pelo",u"pelos",u"perante",u"pois",u"por",u"porque",u"portanto",u"propios",u"proprio",u"quais",u"qual",u"qualquer",u"quando",u"quanto",u"que",u"quem",u"quer",u"se",u"seja",u"sem",u"sendo",u"seu",u"seus",u"sob",u"sobre",u"sua",u"suas",u"tal",u"tambem",u"teu",u"teus",u"toda",u"todas",u"todo",u"todos",u"tua",u"tuas",u"tudo",u"um",u"uma",u"umas",u"uns"],u"ca":[u"a",u"abans",u"ací",u"ah",u"així",u"això",u"al",u"aleshores",u"algun",u"alguna",u"algunes",u"alguns",u"alhora",u"allà",u"allí",u"allò",u"als",u"altra",u"altre",u"altres",u"amb",u"ambdues",u"ambdós",u"apa",u"aquell",u"aquella",u"aquelles",u"aquells",u"aquest",u"aquesta",u"aquestes",u"aquests",u"aquí",u"baix",u"cada",u"cadascuna",u"cadascunes",u"cadascuns",u"cadascú",u"com",u"contra",u"d'un",u"d'una",u"d'unes",u"d'uns",u"dalt",u"de",u"del",u"dels",u"des",u"després",u"dins",u"dintre",u"donat",u"doncs",u"durant",u"e",u"eh",u"el",u"els",u"em",u"en",u"encara",u"ens",u"entre",u"eren",u"es",u"esta",u"estaven",u"esteu",u"està",u"estàvem",u"estàveu",u"et",u"etc",u"ets",u"fins",u"fora",u"gairebé",u"ha",u"han",u"has",u"havia",u"he",u"hem",u"heu",u"hi",u"ho",u"i",u"igual",u"iguals",u"ja",u"l'hi",u"la",u"les",u"li",u"li'n",u"llavors",u"m'he",u"ma",u"mal",u"malgrat",u"mateix",u"mateixa",u"mateixes",u"mateixos",u"me",u"mentre",u"meu",u"meus",u"meva",u"meves",u"molt",u"molta",u"moltes",u"molts",u"mon",u"mons",u"més",u"n'he",u"n'hi",u"ne",u"ni",u"no",u"nogensmenys",u"només",u"nosaltres",u"nostra",u"nostre",u"nostres",u"o",u"oh",u"oi",u"on",u"pas",u"pel",u"pels",u"per",u"perquè",u"però",u"poc",u"poca",u"pocs",u"poques",u"potser",u"propi",u"qual",u"quals",u"quan",u"quant",u"que",u"quelcom",u"qui",u"quin",u"quina",u"quines",u"quins",u"què",u"s'ha",u"s'han",u"sa",u"semblant",u"semblants",u"ses",u"seu",u"seus",u"seva",u"seves",u"si",u"sobre",u"sobretot",u"solament",u"sols",u"son",u"sons",u"sota",u"sou",u"sóc",u"són",u"t'ha",u"t'han",u"t'he",u"ta",u"tal",u"també",u"tampoc",u"tan",u"tant",u"tanta",u"tantes",u"teu",u"teus",u"teva",u"teves",u"ton",u"tons",u"tot",u"tota",u"totes",u"tots",u"un",u"una",u"unes",u"uns",u"us",u"va",u"vaig",u"vam",u"van",u"vas",u"veu",u"vosaltres",u"vostra",u"vostre",u"vostres",u"érem",u"éreu",u"és"],u"cs":[u"a",u"aby",u"ahoj",u"aj",u"ale",u"anebo",u"ani",u"ano",u"asi",u"aspoň",u"atd",u"atp",u"ačkoli",u"až",u"bez",u"beze",u"blízko",u"bohužel",u"brzo",u"bude",u"budem",u"budeme",u"budete",u"budeš",u"budou",u"budu",u"by",u"byl",u"byla",u"byli",u"bylo",u"byly",u"bys",u"být",u"během",u"chce",u"chceme",u"chcete",u"chceš",u"chci",u"chtít",u"chtějí",u"chut'",u"chuti",u"co",u"což",u"cz",u"daleko",u"další",u"den",u"deset",u"devatenáct",u"devět",u"dnes",u"do",u"dobrý",u"docela",u"dva",u"dvacet",u"dvanáct",u"dvě",u"dál",u"dále",u"děkovat",u"děkujeme",u"děkuji",u"ho",u"hodně",u"i",u"jak",u"jakmile",u"jako",u"jakož",u"jde",u"je",u"jeden",u"jedenáct",u"jedna",u"jedno",u"jednou",u"jedou",u"jeho",u"jehož",u"jej",u"jejich",u"její",u"jelikož",u"jemu",u"jen",u"jenom",u"jestli",u"jestliže",u"ještě",u"jež",u"ji",u"jich",u"jimi",u"jinak",u"jiné",u"již",u"jsem",u"jseš",u"jsi",u"jsme",u"jsou",u"jste",u"já",u"jí",u"jím",u"jíž",u"k",u"kam",u"kde",u"kdo",u"kdy",u"když",u"ke",u"kolik",u"kromě",u"kterou",u"která",u"které",u"který",u"kteří",u"kvůli",u"mají",u"mezi",u"mi",u"mne",u"mnou",u"mně",u"moc",u"mohl",u"mohou",u"moje",u"moji",u"možná",u"musí",u"my",u"má",u"málo",u"mám",u"máme",u"máte",u"máš",u"mé",u"mí",u"mít",u"mě",u"můj",u"může",u"na",u"nad",u"nade",u"napište",u"naproti",u"načež",u"naše",u"naši",u"ne",u"nebo",u"nebyl",u"nebyla",u"nebyli",u"nebyly",u"nedělají",u"nedělá",u"nedělám",u"neděláme",u"neděláte",u"neděláš",u"neg",u"nejsi",u"nejsou",u"nemají",u"nemáme",u"nemáte",u"neměl",u"není",u"nestačí",u"nevadí",u"než",u"nic",u"nich",u"nimi",u"nové",u"nový",u"nula",u"nám",u"námi",u"nás",u"náš",u"ním",u"ně",u"něco",u"nějak",u"někde",u"někdo",u"němu",u"němuž",u"o",u"od",u"ode",u"on",u"ona",u"oni",u"ono",u"ony",u"osm",u"osmnáct",u"pak",u"patnáct",u"po",u"pod",u"podle",u"pokud",u"potom",u"pouze",u"pozdě",u"pořád",u"pravé",u"pro",u"prostě",u"prosím",u"proti",u"proto",u"protože",u"proč",u"první",u"pta",u"pět",u"před",u"přes",u"přese",u"při",u"přičemž",u"re",u"rovně",u"s",u"se",u"sedm",u"sedmnáct",u"si",u"skoro",u"smí",u"smějí",u"snad",u"spolu",u"sta",u"sto",u"strana",u"sté",u"své",u"svých",u"svým",u"svými",u"ta",u"tady",u"tak",u"takhle",u"taky",u"také",u"takže",u"tam",u"tamhle",u"tamhleto",u"tamto",u"tato",u"tebe",u"tebou",u"ted'",u"tedy",u"ten",u"tento",u"teto",u"ti",u"tipy",u"tisíc",u"tisíce",u"to",u"tobě",u"tohle",u"toho",u"tohoto",u"tom",u"tomto",u"tomu",u"tomuto",u"toto",u"trošku",u"tu",u"tuto",u"tvoje",u"tvá",u"tvé",u"tvůj",u"ty",u"tyto",u"téma",u"tím",u"tímto",u"tě",u"těm",u"těmu",u"třeba",u"tři",u"třináct",u"u",u"určitě",u"už",u"v",u"vaše",u"vaši",u"ve",u"vedle",u"večer",u"vlastně",u"vy",u"vám",u"vámi",u"vás",u"váš",u"více",u"však",u"všechno",u"všichni",u"vůbec",u"vždy",u"z",u"za",u"zatímco",u"zač",u"zda",u"zde",u"ze",u"zprávy",u"zpět",u"čau",u"či",u"článku",u"články",u"čtrnáct",u"čtyři",u"šest",u"šestnáct",u"že"],u"el":[u"αλλα",u"αν",u"αντι",u"απο",u"αυτα",u"αυτεσ",u"αυτη",u"αυτο",u"αυτοι",u"αυτοσ",u"αυτουσ",u"αυτων",u"για",u"δε",u"δεν",u"εαν",u"ειμαι",u"ειμαστε",u"ειναι",u"εισαι",u"ειστε",u"εκεινα",u"εκεινεσ",u"εκεινη",u"εκεινο",u"εκεινοι",u"εκεινοσ",u"εκεινουσ",u"εκεινων",u"ενω",u"επι",u"η",u"θα",u"ισωσ",u"κ",u"και",u"κατα",u"κι",u"μα",u"με",u"μετα",u"μη",u"μην",u"να",u"ο",u"οι",u"ομωσ",u"οπωσ",u"οσο",u"οτι",u"παρα",u"ποια",u"ποιεσ",u"ποιο",u"ποιοι",u"ποιοσ",u"ποιουσ",u"ποιων",u"που",u"προσ",u"πωσ",u"σε",u"στη",u"στην",u"στο",u"στον",u"τα",u"την",u"τησ",u"το",u"τον",u"τοτε",u"του",u"των",u"ωσ"],u"eu":[u"al",u"anitz",u"arabera",u"asko",u"baina",u"bat",u"batean",u"batek",u"bati",u"batzuei",u"batzuek",u"batzuetan",u"batzuk",u"bera",u"beraiek",u"berau",u"berauek",u"bere",u"berori",u"beroriek",u"beste",u"bezala",u"da",u"dago",u"dira",u"ditu",u"du",u"dute",u"edo",u"egin",u"ere",u"eta",u"eurak",u"ez",u"gainera",u"gu",u"gutxi",u"guzti",u"haiei",u"haiek",u"haietan",u"hainbeste",u"hala",u"han",u"handik",u"hango",u"hara",u"hari",u"hark",u"hartan",u"hau",u"hauei",u"hauek",u"hauetan",u"hemen",u"hemendik",u"hemengo",u"hi",u"hona",u"honek",u"honela",u"honetan",u"honi",u"hor",u"hori",u"horiei",u"horiek",u"horietan",u"horko",u"horra",u"horrek",u"horrela",u"horretan",u"horri",u"hortik",u"hura",u"izan",u"ni",u"noiz",u"nola",u"non",u"nondik",u"nongo",u"nor",u"nora",u"ze",u"zein",u"zen",u"zenbait",u"zenbat",u"zer",u"zergatik",u"ziren",u"zituen",u"zu",u"zuek",u"zuen",u"zuten"],u"ga":[u"a",u"ach",u"ag",u"agus",u"an",u"aon",u"ar",u"arna",u"as",u"b'",u"ba",u"beirt",u"bhúr",u"caoga",u"ceathair",u"ceathrar",u"chomh",u"chtó",u"chuig",u"chun",u"cois",u"céad",u"cúig",u"cúigear",u"d'",u"daichead",u"dar",u"de",u"deich",u"deichniúr",u"den",u"dhá",u"do",u"don",u"dtí",u"dá",u"dár",u"dó",u"faoi",u"faoin",u"faoina",u"faoinár",u"fara",u"fiche",u"gach",u"gan",u"go",u"gur",u"haon",u"hocht",u"i",u"iad",u"idir",u"in",u"ina",u"ins",u"inár",u"is",u"le",u"leis",u"lena",u"lenár",u"m'",u"mar",u"mo",u"mé",u"na",u"nach",u"naoi",u"naonúr",u"ná",u"ní",u"níor",u"nó",u"nócha",u"ocht",u"ochtar",u"os",u"roimh",u"sa",u"seacht",u"seachtar",u"seachtó",u"seasca",u"seisear",u"siad",u"sibh",u"sinn",u"sna",u"sé",u"sí",u"tar",u"thar",u"thú",u"triúr",u"trí",u"trína",u"trínár",u"tríocha",u"tú",u"um",u"ár",u"é",u"éis",u"í",u"ó",u"ón",u"óna",u"ónár"],u"gl":[u"a",u"alí",u"ao",u"aos",u"aquel",u"aquela",u"aquelas",u"aqueles",u"aquilo",u"aquí",u"as",u"así",u"aínda",u"ben",u"cando",u"che",u"co",u"coa",u"coas",u"comigo",u"con",u"connosco",u"contigo",u"convosco",u"cos",u"cun",u"cunha",u"cunhas",u"cuns",u"da",u"dalgunha",u"dalgunhas",u"dalgún",u"dalgúns",u"das",u"de",u"del",u"dela",u"delas",u"deles",u"desde",u"deste",u"do",u"dos",u"dun",u"dunha",u"dunhas",u"duns",u"e",u"el",u"ela",u"elas",u"eles",u"en",u"era",u"eran",u"esa",u"esas",u"ese",u"eses",u"esta",u"estaba",u"estar",u"este",u"estes",u"estiven",u"estou",u"está",u"están",u"eu",u"facer",u"foi",u"foron",u"fun",u"había",u"hai",u"iso",u"isto",u"la",u"las",u"lle",u"lles",u"lo",u"los",u"mais",u"me",u"meu",u"meus",u"min",u"miña",u"miñas",u"moi",u"na",u"nas",u"neste",u"nin",u"no",u"non",u"nos",u"nosa",u"nosas",u"noso",u"nosos",u"nun",u"nunha",u"nunhas",u"nuns",u"nós",u"o",u"os",u"ou",u"para",u"pero",u"pode",u"pois",u"pola",u"polas",u"polo",u"polos",u"por",u"que",u"se",u"senón",u"ser",u"seu",u"seus",u"sexa",u"sido",u"sobre",u"súa",u"súas",u"tamén",u"tan",u"te",u"ten",u"ter",u"teu",u"teus",u"teñen",u"teño",u"ti",u"tido",u"tiven",u"tiña",u"túa",u"túas",u"un",u"unha",u"unhas",u"uns",u"vos",u"vosa",u"vosas",u"voso",u"vosos",u"vós",u"á",u"é",u"ó",u"ós"],u"hy":[u"այդ",u"այլ",u"այն",u"այս",u"դու",u"դուք",u"եմ",u"են",u"ենք",u"ես",u"եք",u"է",u"էի",u"էին",u"էինք",u"էիր",u"էիք",u"էր",u"ըստ",u"թ",u"ի",u"ին",u"իսկ",u"իր",u"կամ",u"համար",u"հետ",u"հետո",u"մենք",u"մեջ",u"մի",u"ն",u"նա",u"նաև",u"նրա",u"նրանք",u"որ",u"որը",u"որոնք",u"որպես",u"ու",u"ում",u"պիտի",u"վրա",u"և"],u"id":[u"ada",u"adalah",u"adanya",u"adapun",u"agak",u"agaknya",u"agar",u"akan",u"akankah",u"akhirnya",u"aku",u"akulah",u"amat",u"amatlah",u"anda",u"andalah",u"antar",u"antara",u"antaranya",u"apa",u"apaan",u"apabila",u"apakah",u"apalagi",u"apatah",u"atau",u"ataukah",u"ataupun",u"bagai",u"bagaikan",u"bagaimana",u"bagaimanakah",u"bagaimanapun",u"bagi",u"bahkan",u"bahwa",u"bahwasanya",u"banyak",u"beberapa",u"begini",u"beginian",u"beginikah",u"beginilah",u"begitu",u"begitukah",u"begitulah",u"begitupun",u"belum",u"belumlah",u"berapa",u"berapakah",u"berapalah",u"berapapun",u"bermacam",u"bersama",u"betulkah",u"biasa",u"biasanya",u"bila",u"bilakah",u"bisa",u"bisakah",u"boleh",u"bolehkah",u"bolehlah",u"buat",u"bukan",u"bukankah",u"bukanlah",u"bukannya",u"cuma",u"dahulu",u"dalam",u"dan",u"dapat",u"dari",u"daripada",u"dekat",u"demi",u"demikian",u"demikianlah",u"dengan",u"depan",u"di",u"dia",u"dialah",u"diantara",u"diantaranya",u"dikarenakan",u"dini",u"diri",u"dirinya",u"disini",u"disinilah",u"dong",u"dulu",u"enggak",u"enggaknya",u"entah",u"entahlah",u"hal",u"hampir",u"hanya",u"hanyalah",u"harus",u"haruslah",u"harusnya",u"hendak",u"hendaklah",u"hendaknya",u"hingga",u"ia",u"ialah",u"ibarat",u"ingin",u"inginkah",u"inginkan",u"ini",u"inikah",u"inilah",u"itu",u"itukah",u"itulah",u"jangan",u"jangankan",u"janganlah",u"jika",u"jikalau",u"juga",u"justru",u"kala",u"kalau",u"kalaulah",u"kalaupun",u"kalian",u"kami",u"kamilah",u"kamu",u"kamulah",u"kan",u"kapan",u"kapankah",u"kapanpun",u"karena",u"karenanya",u"ke",u"kecil",u"kemudian",u"kenapa",u"kepada",u"kepadanya",u"ketika",u"khususnya",u"kini",u"kinilah",u"kiranya",u"kita",u"kitalah",u"kok",u"lagi",u"lagian",u"lah",u"lain",u"lainnya",u"lalu",u"lama",u"lamanya",u"lebih",u"macam",u"maka",u"makanya",u"makin",u"malah",u"malahan",u"mampu",u"mampukah",u"mana",u"manakala",u"manalagi",u"masih",u"masihkah",u"masing",u"mau",u"maupun",u"melainkan",u"melalui",u"memang",u"mengapa",u"mereka",u"merekalah",u"merupakan",u"meski",u"meskipun",u"mungkin",u"mungkinkah",u"nah",u"namun",u"nanti",u"nantinya",u"nyaris",u"oleh",u"olehnya",u"pada",u"padahal",u"padanya",u"paling",u"pantas",u"para",u"pasti",u"pastilah",u"per",u"percuma",u"pernah",u"pula",u"pun",u"rupanya",u"saat",u"saatnya",u"saja",u"sajalah",u"saling",u"sama",u"sambil",u"sampai",u"sana",u"sangat",u"sangatlah",u"saya",u"sayalah",u"se",u"sebab",u"sebabnya",u"sebagai",u"sebagaimana",u"sebagainya",u"sebaliknya",u"sebanyak",u"sebegini",u"sebegitu",u"sebelum",u"sebelumnya",u"sebenarnya",u"seberapa",u"sebetulnya",u"sebisanya",u"sebuah",u"sedang",u"sedangkan",u"sedemikian",u"sedikit",u"sedikitnya",u"segala",u"segalanya",u"segera",u"seharusnya",u"sehingga",u"sejak",u"sejenak",u"sekali",u"sekalian",u"sekaligus",u"sekalipun",u"sekarang",u"seketika",u"sekiranya",u"sekitar",u"sekitarnya",u"sela",u"selagi",u"selain",u"selaku",u"selalu",u"selama",u"selamanya",u"seluruh",u"seluruhnya",u"semacam",u"semakin",u"semasih",u"semaunya",u"sementara",u"sempat",u"semua",u"semuanya",u"semula",u"sendiri",u"sendirinya",u"seolah",u"seorang",u"sepanjang",u"sepantasnya",u"sepantasnyalah",u"seperti",u"sepertinya",u"sering",u"seringnya",u"serta",u"serupa",u"sesaat",u"sesama",u"sesegera",u"sesekali",u"seseorang",u"sesuatu",u"sesuatunya",u"sesudah",u"sesudahnya",u"setelah",u"seterusnya",u"setiap",u"setidaknya",u"sewaktu",u"siapa",u"siapakah",u"siapapun",u"sini",u"sinilah",u"suatu",u"sudah",u"sudahkah",u"sudahlah",u"supaya",u"tadi",u"tadinya",u"tak",u"tanpa",u"tapi",u"telah",u"tentang",u"tentu",u"tentulah",u"tentunya",u"terdiri",u"terhadap",u"terhadapnya",u"terlalu",u"terlebih",u"tersebut",u"tersebutlah",u"tertentu",u"tetapi",u"tiap",u"tidak",u"tidakkah",u"tidaklah",u"toh",u"waduh",u"wah",u"wahai",u"walau",u"walaupun",u"wong",u"yaitu",u"yakni",u"yang"],u"ja":[u"あっ",u"あり",u"ある",u"い",u"いう",u"いる",u"う",u"うち",u"お",u"および",u"おり",u"か",u"かつて",u"から",u"が",u"き",u"ここ",u"こと",u"この",u"これ",u"これら",u"さ",u"さらに",u"し",u"しかし",u"する",u"ず",u"せ",u"せる",u"そして",u"その",u"その他",u"その後",u"それ",u"それぞれ",u"た",u"ただし",u"たち",u"ため",u"たり",u"だ",u"だっ",u"つ",u"て",u"で",u"でき",u"できる",u"です",u"では",u"でも",u"と",u"という",u"といった",u"とき",u"ところ",u"として",u"とともに",u"とも",u"と共に",u"な",u"ない",u"なお",u"なかっ",u"ながら",u"なく",u"なっ",u"など",u"なら",u"なり",u"なる",u"に",u"において",u"における",u"について",u"にて",u"によって",u"により",u"による",u"に対して",u"に対する",u"に関する",u"の",u"ので",u"のみ",u"は",u"ば",u"へ",u"ほか",u"ほとんど",u"ほど",u"ます",u"また",u"または",u"まで",u"も",u"もの",u"ものの",u"や",u"よう",u"より",u"ら",u"られ",u"られる",u"れ",u"れる",u"を",u"ん",u"及び",u"特に"],u"lv":[u"aiz",u"ap",u"apakš",u"apakšpus",u"ar",u"arī",u"augšpus",u"bet",u"bez",u"bija",u"biji",u"biju",u"bijām",u"bijāt",u"būs",u"būsi",u"būsiet",u"būsim",u"būt",u"būšu",u"caur",u"diemžēl",u"diezin",u"droši",u"dēļ",u"esam",u"esat",u"esi",u"esmu",u"gan",u"gar",u"iekam",u"iekams",u"iekām",u"iekāms",u"iekš",u"iekšpus",u"ik",u"ir",u"it",u"itin",u"iz",u"ja",u"jau",u"jeb",u"jebšu",u"jel",u"jo",u"jā",u"ka",u"kamēr",u"kaut",u"kolīdz",u"kopš",u"kā",u"kļuva",u"kļuvi",u"kļuvu",u"kļuvām",u"kļuvāt",u"kļūs",u"kļūsi",u"kļūsiet",u"kļūsim",u"kļūst",u"kļūstam",u"kļūstat",u"kļūsti",u"kļūstu",u"kļūt",u"kļūšu",u"labad",u"lai",u"lejpus",u"līdz",u"līdzko",u"ne",u"nebūt",u"nedz",u"nekā",u"nevis",u"nezin",u"no",u"nu",u"nē",u"otrpus",u"pa",u"par",u"pat",u"pie",u"pirms",u"pret",u"priekš",u"pār",u"pēc",u"starp",u"tad",u"tak",u"tapi",u"taps",u"tapsi",u"tapsiet",u"tapsim",u"tapt",u"tapāt",u"tapšu",u"taču",u"te",u"tiec",u"tiek",u"tiekam",u"tiekat",u"tieku",u"tik",u"tika",u"tikai",u"tiki",u"tikko",u"tiklab",u"tiklīdz",u"tiks",u"tiksiet",u"tiksim",u"tikt",u"tiku",u"tikvien",u"tikām",u"tikāt",u"tikšu",u"tomēr",u"topat",u"turpretim",u"turpretī",u"tā",u"tādēļ",u"tālab",u"tāpēc",u"un",u"uz",u"vai",u"var",u"varat",u"varēja",u"varēji",u"varēju",u"varējām",u"varējāt",u"varēs",u"varēsi",u"varēsiet",u"varēsim",u"varēt",u"varēšu",u"vien",u"virs",u"virspus",u"vis",u"viņpus",u"zem",u"ārpus",u"šaipus"],u"th":[u"กล่าว",u"กว่า",u"กัน",u"กับ",u"การ",u"ก็",u"ก่อน",u"ขณะ",u"ขอ",u"ของ",u"ขึ้น",u"คง",u"ครั้ง",u"ความ",u"คือ",u"จะ",u"จัด",u"จาก",u"จึง",u"ช่วง",u"ซึ่ง",u"ดัง",u"ด้วย",u"ด้าน",u"ตั้ง",u"ตั้งแต่",u"ตาม",u"ต่อ",u"ต่าง",u"ต่างๆ",u"ต้อง",u"ถึง",u"ถูก",u"ถ้า",u"ทั้ง",u"ทั้งนี้",u"ทาง",u"ที่",u"ที่สุด",u"ทุก",u"ทํา",u"ทําให้",u"นอกจาก",u"นัก",u"นั้น",u"นี้",u"น่า",u"นํา",u"บาง",u"ผล",u"ผ่าน",u"พบ",u"พร้อม",u"มา",u"มาก",u"มี",u"ยัง",u"รวม",u"ระหว่าง",u"รับ",u"ราย",u"ร่วม",u"ลง",u"วัน",u"ว่า",u"สุด",u"ส่ง",u"ส่วน",u"สําหรับ",u"หนึ่ง",u"หรือ",u"หลัง",u"หลังจาก",u"หลาย",u"หาก",u"อยาก",u"อยู่",u"อย่าง",u"ออก",u"อะไร",u"อาจ",u"อีก",u"เขา",u"เข้า",u"เคย",u"เฉพาะ",u"เช่น",u"เดียว",u"เดียวกัน",u"เนื่องจาก",u"เปิด",u"เปิดเผย",u"เป็น",u"เป็นการ",u"เพราะ",u"เพื่อ",u"เมื่อ",u"เรา",u"เริ่ม",u"เลย",u"เห็น",u"เอง",u"แต่",u"แบบ",u"แรก",u"และ",u"แล้ว",u"แห่ง",u"โดย",u"ใน",u"ให้",u"ได้",u"ไป",u"ไม่",u"ไว้"],u"ar":[u"،",u"أ",u"ا",u"اثر",u"اجل",u"احد",u"اخرى",u"اذا",u"اربعة",u"اطار",u"اعادة",u"اعلنت",u"اف",u"اكثر",u"اكد",u"الا",u"الاخيرة",u"الان",u"الاول",u"الاولى",u"التى",u"التي",u"الثاني",u"الثانية",u"الذاتي",u"الذى",u"الذي",u"الذين",u"السابق",u"الف",u"الماضي",u"المقبل",u"الوقت",u"الى",u"اليوم",u"اما",u"امام",u"امس",u"ان",u"انه",u"انها",u"او",u"اول",u"اي",u"ايار",u"ايام",u"ايضا",u"ب",u"باسم",u"بان",u"برس",u"بسبب",u"بشكل",u"بعد",u"بعض",u"بن",u"به",u"بها",u"بين",u"تم",u"ثلاثة",u"ثم",u"جميع",u"حاليا",u"حتى",u"حوالى",u"حول",u"حيث",u"حين",u"خلال",u"دون",u"ذلك",u"زيارة",u"سنة",u"سنوات",u"شخصا",u"صباح",u"صفر",u"ضد",u"ضمن",u"عام",u"عاما",u"عدة",u"عدد",u"عدم",u"عشر",u"عشرة",u"على",u"عليه",u"عليها",u"عن",u"عند",u"عندما",u"غدا",u"غير",u"ـ",u"ف",u"فان",u"فى",u"في",u"فيه",u"فيها",u"قال",u"قبل",u"قد",u"قوة",u"كان",u"كانت",u"كل",u"كلم",u"كما",u"لا",u"لدى",u"لقاء",u"لكن",u"للامم",u"لم",u"لن",u"له",u"لها",u"لوكالة",u"ما",u"مايو",u"مساء",u"مع",u"مقابل",u"مليار",u"مليون",u"من",u"منذ",u"منها",u"نحو",u"نفسه",u"نهاية",u"هذا",u"هذه",u"هناك",u"هو",u"هي",u"و",u"و6",u"واحد",u"واضاف",u"واضافت",u"واكد",u"وان",u"واوضح",u"وفي",u"وقال",u"وقالت",u"وقد",u"وقف",u"وكان",u"وكانت",u"ولا",u"ولم",u"ومن",u"وهو",u"وهي",u"يكون",u"يمكن",u"يوم"],u"bg":[u"а",u"автентичен",u"аз",u"ако",u"ала",u"бе",u"без",u"беше",u"би",u"бивш",u"бивша",u"бившо",u"бил",u"била",u"били",u"било",u"благодаря",u"близо",u"бъдат",u"бъде",u"бяха",u"в",u"вас",u"ваш",u"ваша",u"вероятно",u"вече",u"взема",u"ви",u"вие",u"винаги",u"внимава",u"време",u"все",u"всеки",u"всички",u"всичко",u"всяка",u"във",u"въпреки",u"върху",u"г",u"ги",u"главен",u"главна",u"главно",u"глас",u"го",u"година",u"години",u"годишен",u"д",u"да",u"дали",u"два",u"двама",u"двамата",u"две",u"двете",u"ден",u"днес",u"дни",u"до",u"добра",u"добре",u"добро",u"добър",u"докато",u"докога",u"дори",u"досега",u"доста",u"друг",u"друга",u"други",u"е",u"евтин",u"едва",u"един",u"една",u"еднаква",u"еднакви",u"еднакъв",u"едно",u"екип",u"ето",u"живот",u"за",u"забавям",u"зад",u"заедно",u"заради",u"засега",u"заспал",u"затова",u"защо",u"защото",u"и",u"из",u"или",u"им",u"има",u"имат",u"иска",u"й",u"каза",u"как",u"каква",u"какво",u"както",u"какъв",u"като",u"кога",u"когато",u"което",u"които",u"кой",u"който",u"колко",u"която",u"къде",u"където",u"към",u"лесен",u"лесно",u"ли",u"лош",u"м",u"май",u"малко",u"ме",u"между",u"мек",u"мен",u"месец",u"ми",u"много",u"мнозина",u"мога",u"могат",u"може",u"мокър",u"моля",u"момента",u"му",u"н",u"на",u"над",u"назад",u"най",u"направи",u"напред",u"например",u"нас",u"не",u"него",u"нещо",u"нея",u"ни",u"ние",u"никой",u"нито",u"нищо",u"но",u"нов",u"нова",u"нови",u"новина",u"някои",u"някой",u"няколко",u"няма",u"обаче",u"около",u"освен",u"особено",u"от",u"отгоре",u"отново",u"още",u"пак",u"по",u"повече",u"повечето",u"под",u"поне",u"поради",u"после",u"почти",u"прави",u"пред",u"преди",u"през",u"при",u"пък",u"първата",u"първи",u"първо",u"пъти",u"равен",u"равна",u"с",u"са",u"сам",u"само",u"се",u"сега",u"си",u"син",u"скоро",u"след",u"следващ",u"сме",u"смях",u"според",u"сред",u"срещу",u"сте",u"съм",u"със",u"също",u"т",u"т.н.",u"тази",u"така",u"такива",u"такъв",u"там",u"твой",u"те",u"тези",u"ти",u"то",u"това",u"тогава",u"този",u"той",u"толкова",u"точно",u"три",u"трябва",u"тук",u"тъй",u"тя",u"тях",u"у",u"утре",u"харесва",u"хиляди",u"ч",u"часа",u"че",u"често",u"чрез",u"ще",u"щом",u"юмрук",u"я",u"як"],u"bn":[u"অনেক",u"অন্য",u"অবশ্য",u"আগে",u"আছে",u"আজ",u"আবার",u"আমরা",u"আমাদের",u"আর",u"ই",u"উত্তর",u"উপর",u"উপরে",u"এ",u"এই",u"এক্",u"এখন",u"এত",u"এব",u"এমন",u"এমনি",u"এর",u"এস",u"এসে",u"ও",u"ওই",u"কমনে",u"করা",u"করে",u"কাছে",u"কাজ",u"কাজে",u"কারণ",u"কি",u"কিছু",u"কে",u"কেউ",u"কেখা",u"কেন",u"কোটি",u"কোনো",u"কয়েক",u"খুব",u"গিয়ে",u"গেল",u"চার",u"চালু",u"চেষ্টা",u"ছিল",u"জানা",u"জ্নজন",u"টি",u"তখন",u"তবে",u"তা",u"তাই",u"তো",u"থাকা",u"থেকে",u"দিন",u"দু",u"দুই",u"দেওয়া",u"ধামার",u"নতুন",u"না",u"নাগাদ",u"নিয়ে",u"নেওয়া",u"নয়",u"পর",u"পরে",u"পাচ",u"পি",u"পেয়্র্",u"প্রতি",u"প্রথম",u"প্রযন্ত",u"প্রাথমিক",u"প্রায়",u"বক্তব্য",u"বন",u"বলা",u"বলে",u"বলেন",u"বহু",u"বা",u"বি",u"বিভিন্ন",u"বেশ",u"বেশি",u"মতো",u"মধ্যে",u"মনে",u"যখন",u"যদি",u"যা",u"যাওয়া",u"যে",u"র",u"রকম",u"লক্ষ",u"শুধু",u"শুরু",u"সঙ্গে",u"সব",u"সহ",u"সাধারণ",u"সামনে",u"সি",u"সে",u"সেই",u"হতে",u"হাজার",u"হয়"],u"fa":[u"آباد",u"آره",u"آری",u"آمد",u"آمده",u"آن",u"آنان",u"آنجا",u"آنكه",u"آنها",u"آنچه",u"آورد",u"آورده",u"آيد",u"آیا",u"اثرِ",u"از",u"است",u"استفاده",u"اش",u"اكنون",u"البته",u"البتّه",u"ام",u"اما",u"امروز",u"امسال",u"اند",u"انکه",u"او",u"اول",u"اي",u"ايشان",u"ايم",u"اين",u"اينكه",u"اگر",u"با",u"بار",u"بارة",u"باره",u"باشد",u"باشند",u"باشيم",u"بالا",u"بالایِ",u"بايد",u"بدون",u"بر",u"برابرِ",u"براساس",u"براي",u"برایِ",u"برخوردار",u"برخي",u"برداري",u"بروز",u"بسيار",u"بسياري",u"بعد",u"بعری",u"بعضي",u"بلكه",u"بله",u"بلکه",u"بلی",u"بنابراين",u"بندي",u"به",u"بهترين",u"بود",u"بودن",u"بودند",u"بوده",u"بي",u"بيست",u"بيش",u"بيشتر",u"بيشتري",u"بين",u"بی",u"بیرونِ",u"تا",u"تازه",u"تاكنون",u"تان",u"تحت",u"تر",u"ترين",u"تمام",u"تمامي",u"تنها",u"تواند",u"توانند",u"توسط",u"تولِ",u"تویِ",u"جا",u"جاي",u"جايي",u"جدا",u"جديد",u"جريان",u"جز",u"جلوگيري",u"جلویِ",u"حتي",u"حدودِ",u"حق",u"خارجِ",u"خدمات",u"خواست",u"خواهد",u"خواهند",u"خواهيم",u"خود",u"خويش",u"خیاه",u"داد",u"دادن",u"دادند",u"داده",u"دارد",u"دارند",u"داريم",u"داشت",u"داشتن",u"داشتند",u"داشته",u"دانست",u"دانند",u"در",u"درباره",u"دنبالِ",u"ده",u"دهد",u"دهند",u"دو",u"دوم",u"ديده",u"ديروز",u"ديگر",u"ديگران",u"ديگري",u"دیگر",u"را",u"راه",u"رفت",u"رفته",u"روب",u"روزهاي",u"روي",u"رویِ",u"ريزي",u"زياد",u"زير",u"زيرا",u"زیرِ",u"سابق",u"ساخته",u"سازي",u"سراسر",u"سریِ",u"سعي",u"سمتِ",u"سوم",u"سوي",u"سویِ",u"سپس",u"شان",u"شايد",u"شد",u"شدن",u"شدند",u"شده",u"شش",u"شما",u"شناسي",u"شود",u"شوند",u"صورت",u"ضدِّ",u"ضمن",u"طبقِ",u"طريق",u"طور",u"طي",u"عقبِ",u"علّتِ",u"عنوانِ",u"غير",u"فقط",u"فكر",u"فوق",u"قابل",u"قبل",u"قصدِ",u"كرد",u"كردم",u"كردن",u"كردند",u"كرده",u"كسي",u"كل",u"كمتر",u"كند",u"كنم",u"كنند",u"كنيد",u"كنيم",u"كه",u"لطفاً",u"ما",u"مان",u"مانند",u"مانندِ",u"مثل",u"مثلِ",u"مختلف",u"مدّتی",u"مردم",u"مرسی",u"مقابل",u"من",u"مورد",u"مي",u"ميليارد",u"ميليون",u"مگر",u"ناشي",u"نام",u"نبايد",u"نبود",u"نخست",u"نخستين",u"نخواهد",u"ندارد",u"ندارند",u"نداشته",u"نزديك",u"نزدِ",u"نزدیکِ",u"نشان",u"نشده",u"نظير",u"نكرده",u"نمايد",u"نمي",u"نه",u"نوعي",u"نيز",u"نيست",u"ها",u"هاي",u"هايي",u"هر",u"هرگز",u"هزار",u"هست",u"هستند",u"هستيم",u"هفت",u"هم",u"همان",u"همه",u"همواره",u"همين",u"همچنان",u"همچنين",u"همچون",u"همین",u"هنوز",u"هنگام",u"هنگامِ",u"هنگامی",u"هيچ",u"هیچ",u"و",u"وسطِ",u"وقتي",u"وقتیکه",u"ولی",u"وي",u"وگو",u"يا",u"يابد",u"يك",u"يكديگر",u"يكي",u"ّه",u"پاعینِ",u"پس",u"پنج",u"پيش",u"پیش",u"پیشِ",u"چرا",u"چطور",u"چند",u"چندین",u"چنين",u"چه",u"چهار",u"چون",u"چيزي",u"چگونه",u"چیز",u"چیزی",u"چیست",u"کجا",u"کجاست",u"کدام",u"کس",u"کسی",u"کنارِ",u"که",u"کَی",u"کی",u"گذاري",u"گذاشته",u"گردد",u"گرفت",u"گرفته",u"گروهي",u"گفت",u"گفته",u"گويد",u"گويند",u"گيرد",u"گيري",u"یا",u"یک"],u"hi":[u"अंदर",u"अत",u"अदि",u"अप",u"अपना",u"अपनि",u"अपनी",u"अपने",u"अभि",u"अभी",u"आदि",u"आप",u"इंहिं",u"इंहें",u"इंहों",u"इतयादि",u"इत्यादि",u"इन",u"इनका",u"इन्हीं",u"इन्हें",u"इन्हों",u"इस",u"इसका",u"इसकि",u"इसकी",u"इसके",u"इसमें",u"इसि",u"इसी",u"इसे",u"उंहिं",u"उंहें",u"उंहों",u"उन",u"उनका",u"उनकि",u"उनकी",u"उनके",u"उनको",u"उन्हीं",u"उन्हें",u"उन्हों",u"उस",u"उसके",u"उसि",u"उसी",u"उसे",u"एक",u"एवं",u"एस",u"एसे",u"ऐसे",u"ओर",u"और",u"कइ",u"कई",u"कर",u"करता",u"करते",u"करना",u"करने",u"करें",u"कहते",u"कहा",u"का",u"काफि",u"काफ़ी",u"कि",u"किंहें",u"किंहों",u"कितना",u"किन्हें",u"किन्हों",u"किया",u"किर",u"किस",u"किसि",u"किसी",u"किसे",u"की",u"कुछ",u"कुल",u"के",u"को",u"कोइ",u"कोई",u"कोन",u"कोनसा",u"कौन",u"कौनसा",u"गया",u"घर",u"जब",u"जहाँ",u"जहां",u"जा",u"जिंहें",u"जिंहों",u"जितना",u"जिधर",u"जिन",u"जिन्हें",u"जिन्हों",u"जिस",u"जिसे",u"जीधर",u"जेसा",u"जेसे",u"जैसा",u"जैसे",u"जो",u"तक",u"तब",u"तरह",u"तिंहें",u"तिंहों",u"तिन",u"तिन्हें",u"तिन्हों",u"तिस",u"तिसे",u"तो",u"था",u"थि",u"थी",u"थे",u"दबारा",u"दवारा",u"दिया",u"दुसरा",u"दुसरे",u"दूसरे",u"दो",u"द्वारा",u"न",u"नहिं",u"नहीं",u"ना",u"निचे",u"निहायत",u"नीचे",u"ने",u"पर",u"पहले",u"पुरा",u"पूरा",u"पे",u"फिर",u"बनि",u"बनी",u"बहि",u"बही",u"बहुत",u"बाद",u"बाला",u"बिलकुल",u"भि",u"भितर",u"भी",u"भीतर",u"मगर",u"मानो",u"मे",u"में",u"यदि",u"यह",u"यहाँ",u"यहां",u"यहि",u"यही",u"या",u"यिह",u"ये",u"रखें",u"रवासा",u"रहा",u"रहे",u"ऱ्वासा",u"लिए",u"लिये",u"लेकिन",u"व",u"वगेरह",u"वरग",u"वर्ग",u"वह",u"वहाँ",u"वहां",u"वहिं",u"वहीं",u"वाले",u"वुह",u"वे",u"वग़ैरह",u"संग",u"सकता",u"सकते",u"सबसे",u"सभि",u"सभी",u"साथ",u"साबुत",u"साभ",u"सारा",u"से",u"सो",u"हि",u"ही",u"हुअ",u"हुआ",u"हुइ",u"हुई",u"हुए",u"हे",u"हें",u"है",u"हैं",u"हो",u"होता",u"होति",u"होती",u"होते",u"होना",u"होने"],u"mr":[u"अधिक",u"अनेक",u"अशी",u"असलयाचे",u"असलेल्या",u"असा",u"असून",u"असे",u"आज",u"आणि",u"आता",u"आपल्या",u"आला",u"आली",u"आले",u"आहे",u"आहेत",u"एक",u"एका",u"कमी",u"करणयात",u"करून",u"का",u"काम",u"काय",u"काही",u"किवा",u"की",u"केला",u"केली",u"केले",u"कोटी",u"गेल्या",u"घेऊन",u"जात",u"झाला",u"झाली",u"झाले",u"झालेल्या",u"टा",u"डॉ",u"तर",u"तरी",u"तसेच",u"ता",u"ती",u"तीन",u"ते",u"तो",u"त्या",u"त्याचा",u"त्याची",u"त्याच्या",u"त्याना",u"त्यानी",u"त्यामुळे",u"त्री",u"दिली",u"दोन",u"न",u"नाही",u"निर्ण्य",u"पण",u"पम",u"परयतन",u"पाटील",u"म",u"मात्र",u"माहिती",u"मी",u"मुबी",u"म्हणजे",u"म्हणाले",u"म्हणून",u"या",u"याचा",u"याची",u"याच्या",u"याना",u"यानी",u"येणार",u"येत",u"येथील",u"येथे",u"लाख",u"व",u"व्यकत",u"सर्व",u"सागित्ले",u"सुरू",u"हजार",u"हा",u"ही",u"हे",u"होणार",u"होत",u"होता",u"होती",u"होते"],u"ro":[u"acea",u"aceasta",u"această",u"aceea",u"acei",u"aceia",u"acel",u"acela",u"acele",u"acelea",u"acest",u"acesta",u"aceste",u"acestea",u"aceşti",u"aceştia",u"acolo",u"acord",u"acum",u"ai",u"aia",u"aibă",u"aici",u"al",u"ale",u"alea",u"altceva",u"altcineva",u"am",u"ar",u"are",u"asemenea",u"asta",u"astea",u"astăzi",u"asupra",u"au",u"avea",u"avem",u"aveţi",u"azi",u"aş",u"aşadar",u"aţi",u"bine",u"bucur",u"bună",u"ca",u"care",u"caut",u"ce",u"cel",u"ceva",u"chiar",u"cinci",u"cine",u"cineva",u"contra",u"cu",u"cum",u"cumva",u"curând",u"curînd",u"când",u"cât",u"câte",u"câtva",u"câţi",u"cînd",u"cît",u"cîte",u"cîtva",u"cîţi",u"că",u"căci",u"cărei",u"căror",u"cărui",u"către",u"da",u"dacă",u"dar",u"datorită",u"dată",u"dau",u"de",u"deci",u"deja",u"deoarece",u"departe",u"deşi",u"din",u"dinaintea",u"dintr-",u"dintre",u"doi",u"doilea",u"două",u"drept",u"după",u"dă",u"ea",u"ei",u"el",u"ele",u"eram",u"este",u"eu",u"eşti",u"face",u"fata",u"fi",u"fie",u"fiecare",u"fii",u"fim",u"fiu",u"fiţi",u"frumos",u"fără",u"graţie",u"halbă",u"iar",u"ieri",u"la",u"le",u"li",u"lor",u"lui",u"lângă",u"lîngă",u"mai",u"mea",u"mei",u"mele",u"mereu",u"meu",u"mi",u"mie",u"mine",u"mult",u"multă",u"mulţi",u"mulţumesc",u"mâine",u"mîine",u"mă",u"ne",u"nevoie",u"nici",u"nicăieri",u"nimeni",u"nimeri",u"nimic",u"nişte",u"noastre",u"noastră",u"noi",u"noroc",u"nostru",u"nouă",u"noştri",u"nu",u"opt",u"ori",u"oricare",u"orice",u"oricine",u"oricum",u"oricând",u"oricât",u"oricînd",u"oricît",u"oriunde",u"patra",u"patru",u"patrulea",u"pe",u"pentru",u"peste",u"pic",u"poate",u"pot",u"prea",u"prima",u"primul",u"prin",u"printr-",u"puţin",u"puţina",u"puţină",u"până",u"pînă",u"rog",u"sa",u"sale",u"sau",u"se",u"spate",u"spre",u"sub",u"sunt",u"suntem",u"sunteţi",u"sută",u"sînt",u"sîntem",u"sînteţi",u"să",u"săi",u"său",u"ta",u"tale",u"te",u"timp",u"tine",u"toate",u"toată",u"tot",u"totuşi",u"toţi",u"trei",u"treia",u"treilea",u"tu",u"tăi",u"tău",u"un",u"una",u"unde",u"undeva",u"unei",u"uneia",u"unele",u"uneori",u"unii",u"unor",u"unora",u"unu",u"unui",u"unuia",u"unul",u"vi",u"voastre",u"voastră",u"voi",u"vostru",u"vouă",u"voştri",u"vreme",u"vreo",u"vreun",u"vă",u"zece",u"zero",u"zi",u"zice",u"îi",u"îl",u"îmi",u"împotriva",u"în",u"înainte",u"înaintea",u"încotro",u"încât",u"încît",u"între",u"întrucât",u"întrucît",u"îţi",u"ăla",u"ălea",u"ăsta",u"ăstea",u"ăştia",u"şapte",u"şase",u"şi",u"ştiu",u"ţi",u"ţie"],u"en":[u"a",u"a's",u"able",u"about",u"above",u"according",u"accordingly",u"across",u"actually",u"after",u"afterwards",u"again",u"against",u"ain't",u"all",u"allow",u"allows",u"almost",u"alone",u"along",u"already",u"also",u"although",u"always",u"am",u"among",u"amongst",u"an",u"and",u"another",u"any",u"anybody",u"anyhow",u"anyone",u"anything",u"anyway",u"anyways",u"anywhere",u"apart",u"appear",u"appreciate",u"appropriate",u"are",u"aren't",u"around",u"as",u"aside",u"ask",u"asking",u"associated",u"at",u"available",u"away",u"awfully",u"b",u"be",u"became",u"because",u"become",u"becomes",u"becoming",u"been",u"before",u"beforehand",u"behind",u"being",u"believe",u"below",u"beside",u"besides",u"best",u"better",u"between",u"beyond",u"both",u"brief",u"but",u"by",u"c",u"c'mon",u"c's",u"came",u"can",u"can't",u"cannot",u"cant",u"cause",u"causes",u"certain",u"certainly",u"changes",u"clearly",u"co",u"com",u"come",u"comes",u"concerning",u"consequently",u"consider",u"considering",u"contain",u"containing",u"contains",u"corresponding",u"could",u"couldn't",u"course",u"currently",u"d",u"definitely",u"described",u"despite",u"did",u"didn't",u"different",u"do",u"does",u"doesn't",u"doing",u"don't",u"done",u"down",u"downwards",u"during",u"e",u"each",u"edu",u"eg",u"eight",u"either",u"else",u"elsewhere",u"enough",u"entirely",u"especially",u"et",u"etc",u"even",u"ever",u"every",u"everybody",u"everyone",u"everything",u"everywhere",u"ex",u"exactly",u"example",u"except",u"f",u"far",u"few",u"fifth",u"first",u"five",u"followed",u"following",u"follows",u"for",u"former",u"formerly",u"forth",u"four",u"from",u"further",u"furthermore",u"g",u"get",u"gets",u"getting",u"given",u"gives",u"go",u"goes",u"going",u"gone",u"got",u"gotten",u"greetings",u"h",u"had",u"hadn't",u"happens",u"hardly",u"has",u"hasn't",u"have",u"haven't",u"having",u"he",u"he's",u"hello",u"help",u"hence",u"her",u"here",u"here's",u"hereafter",u"hereby",u"herein",u"hereupon",u"hers",u"herself",u"hi",u"him",u"himself",u"his",u"hither",u"hopefully",u"how",u"howbeit",u"however",u"i",u"i'd",u"i'll",u"i'm",u"i've",u"ie",u"if",u"ignored",u"immediate",u"in",u"inasmuch",u"inc",u"indeed",u"indicate",u"indicated",u"indicates",u"inner",u"insofar",u"instead",u"into",u"inward",u"is",u"isn't",u"it",u"it'd",u"it'll",u"it's",u"its",u"itself",u"j",u"just",u"k",u"keep",u"keeps",u"kept",u"know",u"known",u"knows",u"l",u"last",u"lately",u"later",u"latter",u"latterly",u"least",u"less",u"lest",u"let",u"let's",u"like",u"liked",u"likely",u"little",u"look",u"looking",u"looks",u"ltd",u"m",u"mainly",u"many",u"may",u"maybe",u"me",u"mean",u"meanwhile",u"merely",u"might",u"more",u"moreover",u"most",u"mostly",u"much",u"must",u"my",u"myself",u"n",u"name",u"namely",u"nd",u"near",u"nearly",u"necessary",u"need",u"needs",u"neither",u"never",u"nevertheless",u"new",u"next",u"nine",u"no",u"nobody",u"non",u"none",u"noone",u"nor",u"normally",u"not",u"nothing",u"novel",u"now",u"nowhere",u"o",u"obviously",u"of",u"off",u"often",u"oh",u"ok",u"okay",u"old",u"on",u"once",u"one",u"ones",u"only",u"onto",u"or",u"other",u"others",u"otherwise",u"ought",u"our",u"ours",u"ourselves",u"out",u"outside",u"over",u"overall",u"own",u"p",u"particular",u"particularly",u"per",u"perhaps",u"placed",u"please",u"plus",u"possible",u"presumably",u"probably",u"provides",u"q",u"que",u"quite",u"qv",u"r",u"rather",u"rd",u"re",u"really",u"reasonably",u"regarding",u"regardless",u"regards",u"relatively",u"respectively",u"right",u"s",u"said",u"same",u"saw",u"say",u"saying",u"says",u"second",u"secondly",u"see",u"seeing",u"seem",u"seemed",u"seeming",u"seems",u"seen",u"self",u"selves",u"sensible",u"sent",u"serious",u"seriously",u"seven",u"several",u"shall",u"she",u"should",u"shouldn't",u"since",u"six",u"so",u"some",u"somebody",u"somehow",u"someone",u"something",u"sometime",u"sometimes",u"somewhat",u"somewhere",u"soon",u"sorry",u"specified",u"specify",u"specifying",u"still",u"sub",u"such",u"sup",u"sure",u"t",u"t's",u"take",u"taken",u"tell",u"tends",u"th",u"than",u"thank",u"thanks",u"thanx",u"that",u"that's",u"thats",u"the",u"their",u"theirs",u"them",u"themselves",u"then",u"thence",u"there",u"there's",u"thereafter",u"thereby",u"therefore",u"therein",u"theres",u"thereupon",u"these",u"they",u"they'd",u"they'll",u"they're",u"they've",u"think",u"third",u"this",u"thorough",u"thoroughly",u"those",u"though",u"three",u"through",u"throughout",u"thru",u"thus",u"to",u"together",u"too",u"took",u"toward",u"towards",u"tried",u"tries",u"truly",u"try",u"trying",u"twice",u"two",u"u",u"un",u"under",u"unfortunately",u"unless",u"unlikely",u"until",u"unto",u"up",u"upon",u"us",u"use",u"used",u"useful",u"uses",u"using",u"usually",u"uucp",u"v",u"value",u"various",u"very",u"via",u"viz",u"vs",u"w",u"want",u"wants",u"was",u"wasn't",u"way",u"we",u"we'd",u"we'll",u"we're",u"we've",u"welcome",u"well",u"went",u"were",u"weren't",u"what",u"what's",u"whatever",u"when",u"whence",u"whenever",u"where",u"where's",u"whereafter",u"whereas",u"whereby",u"wherein",u"whereupon",u"wherever",u"whether",u"which",u"while",u"whither",u"who",u"who's",u"whoever",u"whole",u"whom",u"whose",u"why",u"will",u"willing",u"wish",u"with",u"within",u"without",u"won't",u"wonder",u"would",u"wouldn't",u"x",u"y",u"yes",u"yet",u"you",u"you'd",u"you'll",u"you're",u"you've",u"your",u"yours",u"yourself",u"yourselves",u"z",u"zero"]}
| 8,068.714286
| 112,534
| 0.63964
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 118,657
| 0.897937
|
e9da4c23e10982ade2cffc9ff31b496b0afdcefd
| 2,320
|
py
|
Python
|
kovalenko1.py
|
Maxim-Kovalenko/turtle-graphics-programms
|
768866f9b6658dc0933b0391387a6bdec64ad6ec
|
[
"Apache-2.0"
] | 1
|
2020-04-14T08:31:24.000Z
|
2020-04-14T08:31:24.000Z
|
kovalenko1.py
|
Maxim-Kovalenko/turtle-graphics-programms
|
768866f9b6658dc0933b0391387a6bdec64ad6ec
|
[
"Apache-2.0"
] | null | null | null |
kovalenko1.py
|
Maxim-Kovalenko/turtle-graphics-programms
|
768866f9b6658dc0933b0391387a6bdec64ad6ec
|
[
"Apache-2.0"
] | 1
|
2021-01-05T15:47:59.000Z
|
2021-01-05T15:47:59.000Z
|
from turtle import *
from random import *
# from random import *
def move(x, y):
penup()
goto(x, y)
pendown()
def fillpolygon(side, count, color1, color2):
pencolor(color2)
fillcolor(color1)
begin_fill()
for i in range(count):
forward(side)
left(360 / count)
end_fill()
def christmas_tree(size, xStart, yStart):
move(xStart, yStart)
fillpolygon(size, 4, "brown", "black")
left(90)
forward(size)
right(90)
backward(size)
for g in range(2):
fillpolygon(size * 3, 3, "lightgreen", "green")
left(60)
forward(size * 2)
right(60)
backward(size)
fillpolygon(size * 3, 3, "lightgreen", "green")
'''left(60)
forward(size*3)
right(60)
backward(size/4)
fillpolygon(size/2, 5, "orange", "darkorange")'''
def treesLine(side, minX, y, count, distBetw):
for counter in range(count):
x = minX + distBetw * counter
christmas_tree(side, x, y)
def star(side, mainColor, fillColor, x, y):
move(x, y)
pencolor(mainColor)
fillcolor(fillColor)
begin_fill()
left(107)
for count in range(5):
forward(side)
left(144)
penup()
right(107)
end_fill()
def starLine(side, minX, y, count, distBetw):
for counter in range(count):
x = minX + distBetw * counter
star(side, "yellow", "yellow", x, y)
def moon(radius, color, minX, minY, maxX, maxY):
x = randint(minX, maxX)
y = randint(minY, maxY)
move(x, y)
dot(radius, color)
def frame(x1, y1, x2, y2, color):
pensize(10)
pencolor(color)
move(x1, y1)
goto(x1, y2)
goto(x2, y2)
goto(x2, y1)
goto(x1, y1)
'''def writeline(line, color):
pencolor(color)
left(90)
penup()
forward(55)
left(90)
forward(30)
write(line)'''
bgcolor("gray")
speed(0)
moon(200, "white", -925, 300, 900, 400)
frame(-950, -490, 950, 500, "darkorange")
pensize(3)
starLine(40, -900, 450, 15, 120)
starLine(40, -900, 380, 13, 150)
starLine(40, -900, 300, 15, 120)
treesLine(20, -900, 100, 23, 80)
treesLine(20, -700, -100, 18, 80)
treesLine(20, -900, -300, 23, 80)
#'''
writeline("Merry Christmas!", "darkblue")
| 21.886792
| 56
| 0.563793
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 419
| 0.180603
|
e9da66e1d85822aa027c49a94455e58540701de1
| 7,156
|
py
|
Python
|
iec/iec_lookup/tests/test_integration_services.py
|
divinedeveloper/iec-web-service
|
0d5e7a05356cc8d88372a559bd5df787d8dc1f75
|
[
"MIT"
] | null | null | null |
iec/iec_lookup/tests/test_integration_services.py
|
divinedeveloper/iec-web-service
|
0d5e7a05356cc8d88372a559bd5df787d8dc1f75
|
[
"MIT"
] | null | null | null |
iec/iec_lookup/tests/test_integration_services.py
|
divinedeveloper/iec-web-service
|
0d5e7a05356cc8d88372a559bd5df787d8dc1f75
|
[
"MIT"
] | null | null | null |
from iec_lookup.services.iec_lookup_service import IECLookupService
from iec_lookup.tests.fixtures import mongo_test_db_setup, importer_exporter_code_details_as_json, importer_exporter_code_details_as_object, dgft_succes_response_html_string, dgft_error_response_html_string, iec_table_section_list, basic_iec_details_as_object, dgft_error_message
# from bs4 import BeautifulSoup, NavigableString, Tag
import bs4
from iec_lookup.models import ImporterExporterCodeDetails, Director, Branch, RegistrationDetails, RegistrationCumMembershipCertificateDetails, ImporterExporterCodeToBeRetrieved
from iec_lookup.custom_exceptions import CustomApiException
from django.conf import settings
from rest_framework import status
from collections import OrderedDict
import mock
from mock import MagicMock
import mongoengine
import pytest
from iec_lookup import utils
from pprint import pprint
from pytest_mock import mocker
import mongomock
import requests
import json
import logging
# Create your tests here.
# Feature: To be able to do something
# In order to do something
# As someone
# I want the system to do this thing
# Scenario: A sample one
# Given this situation
# When I do something
# Then what I get is what I was expecting for
@pytest.mark.integrationtest
@pytest.mark.usefixtures('mongo_test_db_setup')
class TestIntegrationIecLookupService:
def setup_method(self):
"""
Initial data setup
"""
self.request_json = {'code': "1198002743",'name': "CAP"}
self.non_existing_iec_json = {'code': "1298001743",'name': "PAC"}
self.iec_lookup_service = IECLookupService()
@pytest.mark.xfail(raises= requests.exceptions.ConnectionError, reason="DGFT site down")
def test_integration_check_dgft_site_down(self):
"""
This method will tests poll_dgft_site_with_iec_and_name method in service
check if dgft site is down and get ERROR
assert message is ERROR then site is down else not
"""
dgft_site_response = self.iec_lookup_service.poll_dgft_site_with_iec_and_name(self.request_json)
assert dgft_site_response != "ERROR"
def test_integration_save_complete_iec_details(self, basic_iec_details_as_object, importer_exporter_code_details_as_object):
"""
This method will tests save_complete_iec_details method in service
get all data and save in iec details document
assert data is persisted
"""
saved_iec_details = self.iec_lookup_service.save_complete_iec_details(basic_iec_details_as_object,
importer_exporter_code_details_as_object.directors,importer_exporter_code_details_as_object.branches,
importer_exporter_code_details_as_object.registration_details,importer_exporter_code_details_as_object.rcmc_details)
mongo_test_db_setup.document_id = saved_iec_details.id
assert saved_iec_details.importer_exporter_code == basic_iec_details_as_object.importer_exporter_code
assert basic_iec_details_as_object.party_name in saved_iec_details.party_name
assert saved_iec_details.exporter_type != "" or None
assert saved_iec_details.importer_exporter_code_status != "" or None
assert saved_iec_details.nature_of_concern != "" or None
def test_integration_get_iec_with_code_and_name(self):
"""
This method will tests get_iec_with_code_and_name method in service
to check if dgft site is up and get data
assert data is returned as per request
"""
importer_exporter_code_details = self.iec_lookup_service.get_iec_with_code_and_name(self.request_json)
assert importer_exporter_code_details.id == mongo_test_db_setup.document_id
assert importer_exporter_code_details.importer_exporter_code == self.request_json['code']
assert self.request_json['name'] in importer_exporter_code_details.party_name
assert importer_exporter_code_details.exporter_type != "" or None
assert importer_exporter_code_details.importer_exporter_code_status != "" or None
assert importer_exporter_code_details.nature_of_concern != "" or None
def test_integration_iec_with_code_and_name_not_in_db(self):
"""
This method will tests get_iec_with_code_and_name method in service
to check if iec data is available in db
assert none is returned
"""
importer_exporter_code_details = self.iec_lookup_service.get_iec_with_code_and_name(self.non_existing_iec_json)
assert importer_exporter_code_details == None
def test_integration_save_iec_to_retrieve_data(self):
"""
This method will tests get_or_save_iec_to_retrieve_data method in service
to check if dgft site is down save iec code and name to fetch data later
assert iec to be retrieved is persisted
"""
importer_exporter_code_to_retrieve = self.iec_lookup_service.get_or_save_iec_to_retrieve_data(self.non_existing_iec_json)
mongo_test_db_setup.document_id = importer_exporter_code_to_retrieve.id
assert importer_exporter_code_to_retrieve.importer_exporter_code == self.non_existing_iec_json['code']
assert self.non_existing_iec_json['name'] == importer_exporter_code_to_retrieve.name
assert importer_exporter_code_to_retrieve.is_iec_data_retrieved == False
def test_integration_get_iec_to_retrieve_data(self):
"""
This method will tests get_or_save_iec_to_retrieve_data method in service
to check if dgft site is down fetch iec code and name from iec to be retrieved
assert iec to be retrieved exists
"""
importer_exporter_code_to_retrieve = self.iec_lookup_service.get_or_save_iec_to_retrieve_data(self.non_existing_iec_json)
assert importer_exporter_code_to_retrieve.id == mongo_test_db_setup.document_id
assert importer_exporter_code_to_retrieve != None
def test_integration_no_iec_with_code_and_name_in_db(self):
"""
This method will tests get_iec_with_code_and_name method in service
to check if dgft site is up and get data
assert data is returned as per request
"""
importer_exporter_code_details = self.iec_lookup_service.get_iec_with_code_and_name(self.non_existing_iec_json)
assert importer_exporter_code_details == None
def test_integration_retrieve_iec_data_with_code(self):
"""
This method will tests retrieve_iec_data_with_code method in service
to check if iec data is present in db
assert data is returned as per iec code
"""
importer_exporter_code_details = self.iec_lookup_service.retrieve_iec_data_with_code(self.request_json['code'])
assert importer_exporter_code_details.importer_exporter_code == self.request_json['code']
assert self.request_json['name'] in importer_exporter_code_details.party_name
assert importer_exporter_code_details.exporter_type != "" or None
assert importer_exporter_code_details.importer_exporter_code_status != "" or None
assert importer_exporter_code_details.nature_of_concern != "" or None
def test_integration_not_found_retrieve_iec_data_with_code(self):
"""
This method will tests retrieve_iec_data_with_code method in service
assert iec not found exception is raised
"""
with pytest.raises(CustomApiException) as exc_info:
self.iec_lookup_service.retrieve_iec_data_with_code(self.non_existing_iec_json['code'])
def teardown_method(self):
"""
Set values to none
"""
self.request_json = None
self.non_existing_iec_json = None
self.iec_lookup_service = None
| 42.595238
| 279
| 0.827138
| 5,832
| 0.81498
| 0
| 0
| 5,909
| 0.825741
| 0
| 0
| 2,046
| 0.285914
|
e9db9333dcabf339b75e8e3dafb52fedc14104d7
| 9,713
|
py
|
Python
|
tests.py
|
klen/http-router
|
b571aed91200e9d57da4d2136d7e1a5312ef6c4e
|
[
"MIT"
] | 11
|
2020-11-10T15:12:58.000Z
|
2022-01-24T13:14:53.000Z
|
tests.py
|
klen/http-router
|
b571aed91200e9d57da4d2136d7e1a5312ef6c4e
|
[
"MIT"
] | 2
|
2021-05-01T13:59:14.000Z
|
2022-03-09T20:45:02.000Z
|
tests.py
|
klen/http-router
|
b571aed91200e9d57da4d2136d7e1a5312ef6c4e
|
[
"MIT"
] | null | null | null |
"""HTTP Router tests."""
import inspect
import typing as t
from re import compile as re
import pytest
@pytest.fixture
def router():
from http_router import Router, NotFound, MethodNotAllowed, RouterError # noqa
return Router()
def test_router_basic(router):
assert router
assert not router.trim_last_slash
assert router.validator
assert router.NotFound
assert router.RouterError
assert router.MethodNotAllowed
router.trim_last_slash = True
assert router.trim_last_slash
def test_router_route_re(router):
router.route(re('test.jpg'))('test1 passed')
assert router('test.jpg').target == 'test1 passed'
assert router('testAjpg').target == 'test1 passed'
assert router('testAjpg/regex/can/be/dangerous').target == 'test1 passed'
router.route(re(r'params/(\w+)'))('test2 passed')
match = router('params/mike')
assert match
assert not match.params
router.route(re(r'params2/(?P<User>\w+)'))('test3 passed')
match = router('params2/mike')
assert match
assert match.params == {'User': 'mike'}
def test_router_route_str(router):
router.route('test.jpg')(True)
match = router('test.jpg')
assert match
with pytest.raises(router.NotFound):
router('test.jpeg')
router.route('/any/{item}')(True)
match = router('/any/test')
assert match
assert match.params == {'item': 'test'}
router.route('/str/{item:str}')(True)
match = router('/str/42')
assert match
assert match.params == {'item': '42'}
router.route('/int/{item:int}')(True)
match = router('/int/42')
assert match
assert match.params == {'item': 42}
router.route(r'/regex/{item:\d{3}}')(True)
match = router('/regex/422')
assert match
assert match.params == {'item': '422'}
def test_parse_path():
from http_router.utils import parse_path
assert parse_path('/') == ('/', None, {})
assert parse_path('/test.jpg') == ('/test.jpg', None, {})
assert parse_path('/{foo') == ('/{foo', None, {})
path, regex, params = parse_path(r'/{foo}/')
assert isinstance(regex, t.Pattern)
assert regex.pattern == r'^/(?P<foo>[^/]+)/$'
assert path == '/{foo}/'
assert params == {'foo': str}
path, regex, params = parse_path(r'/{foo:int}/')
assert isinstance(regex, t.Pattern)
assert regex.pattern == r'^/(?P<foo>\d+)/$'
assert path == '/{foo}/'
assert params == {'foo': int}
path, regex, params = parse_path(re(r'/(?P<foo>\d{1,3})/'))
assert isinstance(regex, t.Pattern)
assert params == {}
assert path
path, regex, params = parse_path(r'/api/v1/items/{item:str}/subitems/{ subitem:\d{3} }/find')
assert path == '/api/v1/items/{item}/subitems/{subitem}/find'
assert regex.match('/api/v1/items/foo/subitems/300/find')
assert params['item']
assert params['subitem']
def test_route():
from http_router.routes import Route
route = Route('/only-post', {'POST'}, None)
assert route.methods
assert route.match('/only-post', 'POST')
assert not route.match('/only-post', '')
route = Route('/only-post', set(), None)
assert not route.methods
def test_dynamic_route():
from http_router.routes import DynamicRoute
route = DynamicRoute(r'/order/{id:int}', set(), None)
match = route.match('/order/100', '')
assert match
assert match.params == {'id': 100}
match = route.match('/order/unknown', '')
assert not match
assert not match.params
route = DynamicRoute(re('/regex(/opt)?'), set(), None)
match = route.match('/regex', '')
assert match
match = route.match('/regex/opt', '')
assert match
def test_router():
"""Base tests."""
from http_router import Router
router = Router(trim_last_slash=True)
with pytest.raises(router.RouterError):
router.route(lambda: 12)
with pytest.raises(router.NotFound):
assert router('/unknown')
router.route('/', '/simple')('simple')
match = router('/', 'POST')
assert match.target == 'simple'
assert not match.params
match = router('/simple', 'DELETE')
assert match.target == 'simple'
assert not match.params
router.route('/only-post', methods='post')('only-post')
assert router.plain['/only-post'][0].methods == {'POST'}
with pytest.raises(router.MethodNotAllowed):
assert router('/only-post')
match = router('/only-post', 'POST')
assert match.target == 'only-post'
assert not match.params
router.route('/dynamic1/{id}')('dyn1')
router.route('/dynamic2/{ id }')('dyn2')
match = router('/dynamic1/11/')
assert match.target == 'dyn1'
assert match.params == {'id': '11'}
match = router('/dynamic2/22/')
assert match.target == 'dyn2'
assert match.params == {'id': '22'}
@router.route(r'/hello/{name:str}', methods='post')
def hello():
return 'hello'
match = router('/hello/john/', 'POST')
assert match.target() == 'hello'
assert match.params == {'name': 'john'}
@router.route('/params', var='value')
def params(**opts):
return opts
match = router('/params', 'POST')
assert match.target() == {'var': 'value'}
assert router.routes()
assert router.routes()[0].path == ''
def test_mounts():
from http_router import Router
from http_router.routes import Mount
router = Router()
route = Mount('/api/', set(), router)
assert route.path == '/api'
match = route.match('/api/e1', '')
assert not match
router.route('/e1')('e1')
match = route.match('/api/e1', 'UNKNOWN')
assert match
assert match.target == 'e1'
root = Router()
subrouter = Router()
root.route('/api')(1)
root.route(re('/api/test'))(2)
root.route('/api')(subrouter)
subrouter.route('/test')(3)
assert root('/api').target == 1
assert root('/api/test').target == 3
def test_trim_last_slash():
from http_router import Router
router = Router()
router.route('/route1')('route1')
router.route('/route2/')('route2')
assert router('/route1').target == 'route1'
assert router('/route2/').target == 'route2'
with pytest.raises(router.NotFound):
assert not router('/route1/')
with pytest.raises(router.NotFound):
assert not router('/route2')
router = Router(trim_last_slash=True)
router.route('/route1')('route1')
router.route('/route2/')('route2')
assert router('/route1').target == 'route1'
assert router('/route2/').target == 'route2'
assert router('/route1/').target == 'route1'
assert router('/route2').target == 'route2'
def test_validator():
from http_router import Router
# The router only accepts async functions
router = Router(validator=inspect.iscoroutinefunction)
with pytest.raises(router.RouterError):
router.route('/', '/simple')(lambda: 'simple')
def test_converter():
from http_router import Router
# The router only accepts async functions
router = Router(converter=lambda v: lambda r: (r, v))
router.route('/')('simple')
match = router('/')
assert match.target('test') == ('test', 'simple')
def test_custom_route():
from http_router import Router
class View:
methods = 'get', 'post'
def __new__(cls, *args, **kwargs):
"""Init the class and call it."""
self = super().__new__(cls)
return self(*args, **kwargs)
@classmethod
def __route__(cls, router, *paths, **params):
return router.bind(cls, *paths, methods=cls.methods)
# The router only accepts async functions
router = Router()
router.route('/')(View)
assert router.plain['/'][0].methods == {'GET', 'POST'}
match = router('/')
assert match.target is View
def test_nested_routers():
from http_router import Router
child = Router()
child.route('/url', methods='PATCH')('child_url')
match = child('/url', 'PATCH')
assert match.target == 'child_url'
root = Router()
root.route('/child')(child)
with pytest.raises(root.NotFound):
root('/child')
with pytest.raises(root.NotFound):
root('/child/unknown')
with pytest.raises(root.MethodNotAllowed):
root('/child/url')
match = root('/child/url', 'PATCH')
assert match.target == 'child_url'
def test_readme():
from http_router import Router
router = Router(trim_last_slash=True)
@router.route('/simple')
def simple():
return 'simple'
match = router('/simple')
assert match.target() == 'simple'
assert match.params is None
def test_method_shortcuts(router):
router.delete('/delete')('DELETE')
router.get('/get')('GET')
router.post('/post')('POST')
for route in router.routes():
method = route.target
assert route.methods == {method}
def test_benchmark(router, benchmark):
import random
import string
CHARS = string.ascii_letters + string.digits
RANDOM = lambda: ''.join(random.choices(CHARS, k=10)) # noqa
METHODS = 'GET', 'POST'
routes = [f"/{ RANDOM() }/{ RANDOM() }" for _ in range(100)]
routes += [f"/{ RANDOM() }/{{item}}/{ RANDOM() }" for _ in range(100)]
random.shuffle(routes)
paths = []
for route in routes:
router.route(route, methods=random.choice(METHODS))('OK')
paths.append(route.format(item=RANDOM()))
paths = [route.format(item=RANDOM()) for route in routes]
def do_work():
for path in paths:
try:
assert router(path)
except router.MethodNotAllowed:
pass
benchmark(do_work)
| 25.901333
| 97
| 0.616596
| 356
| 0.036652
| 0
| 0
| 503
| 0.051786
| 0
| 0
| 2,124
| 0.218676
|
e9ded115d696296991152dc24cb65d091ffcff68
| 4,072
|
py
|
Python
|
venv/Lib/site-packages/tests/unit/driver/test_async_generic_driver.py
|
melihteke/ebook_study
|
4848ea42e37ee1d6ec777bfc33f49984653ace34
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/tests/unit/driver/test_async_generic_driver.py
|
melihteke/ebook_study
|
4848ea42e37ee1d6ec777bfc33f49984653ace34
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/tests/unit/driver/test_async_generic_driver.py
|
melihteke/ebook_study
|
4848ea42e37ee1d6ec777bfc33f49984653ace34
|
[
"MIT"
] | null | null | null |
from pathlib import Path
import pytest
import scrapli
from scrapli.channel import Channel
from scrapli.transport import Transport
from ...test_data.unit_test_cases import TEST_CASES
TEST_DATA_DIR = f"{Path(scrapli.__file__).parents[1]}/tests/test_data"
def test_init(sync_generic_driver_conn):
"""Test that all arguments get properly passed from driver/transport to channel on init"""
assert isinstance(sync_generic_driver_conn.channel.transport, Transport)
assert isinstance(sync_generic_driver_conn.channel, Channel)
@pytest.mark.asyncio
@pytest.mark.parametrize(
"strip_prompt", [True, False], ids=["strip_prompt", "no_strip_prompt"],
)
async def test_send_command(async_generic_driver_conn, strip_prompt):
expected_raw = TEST_CASES["cisco_iosxe"]["test_send_input"]["raw_result"]
expected_processed = (
TEST_CASES["cisco_iosxe"]["test_send_input"]["processed_result"]["strip"]
if strip_prompt
else TEST_CASES["cisco_iosxe"]["test_send_input"]["processed_result"]["no_strip"]
)
await async_generic_driver_conn.open()
await async_generic_driver_conn.send_command(command="terminal length 0")
response = await async_generic_driver_conn.send_command(
command="show version", strip_prompt=strip_prompt
)
assert response.raw_result == expected_raw.encode()
assert response.result == expected_processed
@pytest.mark.asyncio
@pytest.mark.parametrize(
"strip_prompt", [True, False], ids=["strip_prompt", "no_strip_prompt"],
)
async def test_send_commands(async_generic_driver_conn, strip_prompt):
expected_raw = TEST_CASES["cisco_iosxe"]["test_send_input"]["raw_result"]
expected_processed = (
TEST_CASES["cisco_iosxe"]["test_send_input"]["processed_result"]["strip"]
if strip_prompt
else TEST_CASES["cisco_iosxe"]["test_send_input"]["processed_result"]["no_strip"]
)
await async_generic_driver_conn.open()
multi_response = await async_generic_driver_conn.send_commands(
commands=["terminal length 0", "show version"], strip_prompt=strip_prompt
)
assert multi_response[1].raw_result == expected_raw.encode()
assert multi_response[1].result == expected_processed
@pytest.mark.asyncio
@pytest.mark.parametrize(
"strip_prompt", [True, False], ids=["strip_prompt", "no_strip_prompt"],
)
async def test_send_commands_from_file(async_generic_driver_conn, strip_prompt):
expected_raw = TEST_CASES["cisco_iosxe"]["test_send_input"]["raw_result"]
expected_processed = (
TEST_CASES["cisco_iosxe"]["test_send_input"]["processed_result"]["strip"]
if strip_prompt
else TEST_CASES["cisco_iosxe"]["test_send_input"]["processed_result"]["no_strip"]
)
await async_generic_driver_conn.open()
await async_generic_driver_conn.send_command(command="terminal length 0")
multi_response = await async_generic_driver_conn.send_commands_from_file(
file=f"{TEST_DATA_DIR}/files/cisco_iosxe_commands", strip_prompt=strip_prompt
)
assert multi_response[0].raw_result == expected_raw.encode()
assert multi_response[0].result == expected_processed
@pytest.mark.asyncio
async def test_send_interactive(async_generic_driver_conn):
expected_raw = TEST_CASES["cisco_iosxe"]["test_send_inputs_interact"]["raw_result"]
expected_processed = TEST_CASES["cisco_iosxe"]["test_send_inputs_interact"]["processed_result"]
interact_events = TEST_CASES["cisco_iosxe"]["test_send_inputs_interact"]["interact_events"]
await async_generic_driver_conn.open()
response = await async_generic_driver_conn.send_interactive(interact_events=interact_events)
assert expected_raw.encode() in response.raw_result
assert expected_processed in response.result
@pytest.mark.asyncio
async def test_get_prompt(async_generic_driver_conn):
expected_prompt = TEST_CASES["cisco_iosxe"]["test_get_prompt"]["privilege_exec"]
await async_generic_driver_conn.open()
found_prompt = await async_generic_driver_conn.get_prompt()
assert found_prompt == expected_prompt
| 42.416667
| 99
| 0.766208
| 0
| 0
| 0
| 0
| 3,520
| 0.86444
| 3,103
| 0.762033
| 1,087
| 0.266945
|
e9dfba4367dce3d55494d724918c8e7f5c9acb43
| 2,301
|
py
|
Python
|
tests/timesheet/test_regroup.py
|
simonbru/taxi
|
3940f520b6d61b5ac7c851c38dfd05da2f65b647
|
[
"WTFPL"
] | 17
|
2016-02-02T14:10:49.000Z
|
2021-11-30T00:04:29.000Z
|
tests/timesheet/test_regroup.py
|
simonbru/taxi
|
3940f520b6d61b5ac7c851c38dfd05da2f65b647
|
[
"WTFPL"
] | 70
|
2015-01-08T17:02:42.000Z
|
2021-09-21T20:08:07.000Z
|
tests/timesheet/test_regroup.py
|
simonbru/taxi
|
3940f520b6d61b5ac7c851c38dfd05da2f65b647
|
[
"WTFPL"
] | 8
|
2015-08-23T12:50:36.000Z
|
2021-11-26T10:33:45.000Z
|
import datetime
from . import create_timesheet
def test_regroup_doesnt_regroup_entries_with_different_alias():
contents = """01.04.2013
foo 2 bar
bar 2 bar"""
t = create_timesheet(contents)
entries = list(t.entries.filter(regroup=True).values())[0]
assert len(entries) == 2
def test_regroup_doesnt_regroup_entries_with_different_description():
contents = """01.04.2013
foo 2 bar
foo 2 baz"""
t = create_timesheet(contents)
entries = list(t.entries.filter(regroup=True).values())[0]
assert len(entries) == 2
def test_regroup_regroups_entries_with_same_alias_and_description():
contents = """01.04.2013
foo 2 bar
foo 3 bar
bar 1 barz"""
t = create_timesheet(contents)
entries = list(t.entries.filter(regroup=True).values())[0]
assert len(entries) == 2
def test_regroup_adds_time():
contents = """01.04.2013
foo 2 bar
foo 3 bar"""
t = create_timesheet(contents)
entries = list(t.entries.filter(regroup=True).values())[0]
assert entries[0].hours == 5
def test_regroup_adds_time_with_start_and_end_time():
contents = """01.04.2013
foo 2 bar
foo 0900-1000 bar"""
t = create_timesheet(contents)
entries = list(t.entries.filter(regroup=True).values())[0]
assert entries[0].hours == 3
def test_regroup_doesnt_regroup_ignored_entries_with_non_ignored_entries():
contents = """01.04.2013
foo 2 bar
? foo 3 test"""
t = create_timesheet(contents)
entries = list(t.entries.filter(regroup=True).values())[0]
assert len(entries) == 2
def test_regroup_regroups_entries_with_partial_time():
contents = """01.04.2013
foo 2 bar
foo 0800-0900 bar
bar -1000 bar
foo -1100 bar"""
t = create_timesheet(contents)
entries = t.entries.filter(regroup=True)[datetime.date(2013, 4, 1)]
assert len(entries) == 2
assert entries[0].hours == 4
def test_set_pushed_flag_on_regrouped_entry_sets_flag_on_associated_entries():
contents = """01.04.2013
foo 2 bar
bar 0900-1000 bar
foo 1 bar"""
t = create_timesheet(contents)
entries = t.entries.filter(regroup=True)[datetime.date(2013, 4, 1)]
for entry in entries:
entry.pushed = True
lines = t.entries.to_lines()
assert lines == ["01.04.2013", "= foo 2 bar", "= bar 0900-1000 bar",
"= foo 1 bar"]
| 25.285714
| 78
| 0.694046
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 423
| 0.183833
|
e9e0012334031a979e4c8078a3fc972d1c90c1a0
| 5,192
|
py
|
Python
|
speaker/adam.py
|
shannon-jia/speaker
|
31c642f018725dd4878ef6a4e7a19b12b05774c8
|
[
"MIT"
] | null | null | null |
speaker/adam.py
|
shannon-jia/speaker
|
31c642f018725dd4878ef6a4e7a19b12b05774c8
|
[
"MIT"
] | null | null | null |
speaker/adam.py
|
shannon-jia/speaker
|
31c642f018725dd4878ef6a4e7a19b12b05774c8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# _*_ coding: utf-8 _*_
import asyncio
import logging
import struct
log = logging.getLogger(__name__)
class TcpClientProtocol(asyncio.Protocol):
def __init__(self, master):
self.master = master
def connection_made(self, transport):
self.transport = transport
self.master.connected = True
def data_received(self, data):
log.info('Data received: {!r}'.format(data))
def connection_lost(self, exc):
log.error('The server closed the connection')
self.master.connected = None
class Adam(object):
HEAD = b'\x00\x00\x00\x00\x00\x06'
def __init__(self, loop, host, port=502):
self.station_address = 1
self.function_code = 5
self.coil_address = 0x10
self.send_str = b''
self.loop = loop or asyncio.get_event_loop()
self.host = host
self.port = port
self.connected = None
self.loop.create_task(self._do_connect())
self.transport = None
self.coils_state = 0
self.transaction_id = 0
self.protocol_id = 0
# self.loop.call_later(6, self.keepAlive)
async def _do_connect(self):
while True:
await asyncio.sleep(5)
if self.connected:
continue
try:
xt, _ = await self.loop.create_connection(
lambda: TcpClientProtocol(self),
self.host,
self.port)
log.info('Connection create on {}'.format(xt))
self.transport = xt
self.connected = True
self.read_coils_status()
# self.login()
except OSError:
log.error('Server not up retrying in 5 seconds...')
except Exception as e:
log.error('Error when connect to server: {}'.format(e))
def _command_head(self, length):
self.transaction_id += 1
s = struct.Struct('>HHH')
values = (self.transaction_id,
self.protocol_id,
length)
return s.pack(*values)
# function code is 1
def read_coils_status(self):
self.send_str = self._command_head(6)
s = struct.Struct('>BBHH')
values = (self.station_address,
1,
self.coil_address,
8)
self.send_str += s.pack(*values)
log.info('Adam-6017 read_coil_status...')
return self.call(self.send_str)
# function code is 5
def force_single_coil(self, address, action):
if action.upper() == 'OFF':
act = 0x0000
elif action.upper() == 'ON':
act = 0xFF00
else:
act = 0xFFFF
self.send_str = self._command_head(6)
s = struct.Struct('>BBHH')
values = (self.station_address,
5,
address,
act)
self.send_str += s.pack(*values)
log.info('Adam-6017 Function[0x05]({})'.format(action, address))
return self.call(self.send_str)
# function code is f
def force_multi_coils(self, data):
self.send_str = self._command_head(8)
s = struct.Struct('>BBHHBB')
values = (self.station_address,
0x0f,
self.coil_address,
0x08,
0x01,
data)
self.send_str += s.pack(*values)
log.info('Adam-6017 Function[0x0F]({})'.format(data))
return self.call(self.send_str)
def call(self, cmd):
log.info('Try to send: {}'.format(cmd))
if self.transport:
self.transport.write(cmd)
log.debug('send cmd to server: {}'.format(cmd))
else:
log.error('Invalid server transport.')
# zone = 0: do-0
# zone = 1: do-1
def alarm_task(self, action, task, zone=0):
if action.upper() == 'OFF':
self.coils_state &= ~(1 << zone)
elif action.upper() == 'ON':
self.coils_state |= (1 << zone)
else:
self.coils_state = 0
self.force_single_coil(self.coil_address + zone,
action)
# self.read_coils_status()
# self.force_multi_coils(self.coils_state)
if __name__ == '__main__':
log = logging.getLogger("")
formatter = logging.Formatter("%(asctime)s %(levelname)s " +
"[%(module)s:%(lineno)d] %(message)s")
# log the things
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
log.addHandler(ch)
loop = asyncio.get_event_loop()
port = 8502
host = '127.0.0.1'
adam = Adam(loop, host, port)
asyncio.sleep(10)
adam.alarm_task('ON', 1)
adam.alarm_task('OFF', 1)
adam.alarm_task('release', 1)
adam.alarm_task('ON', 1, 1)
adam.alarm_task('OFF', 1, 1)
adam.alarm_task('release', 1, 1)
# Serve requests until Ctrl+C is pressed
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# Close the server
loop.close()
| 28.685083
| 72
| 0.548151
| 4,192
| 0.807396
| 0
| 0
| 0
| 0
| 742
| 0.142912
| 848
| 0.163328
|
e9e0f660874b7198857a18d3f6b0c75b556083fb
| 721
|
py
|
Python
|
forMySQL/countupgetpoints.py
|
ryosuke0503/DockerMySQL
|
c1f3a8e92623cdf0297cd6f721fb9d92046f4091
|
[
"MIT"
] | null | null | null |
forMySQL/countupgetpoints.py
|
ryosuke0503/DockerMySQL
|
c1f3a8e92623cdf0297cd6f721fb9d92046f4091
|
[
"MIT"
] | null | null | null |
forMySQL/countupgetpoints.py
|
ryosuke0503/DockerMySQL
|
c1f3a8e92623cdf0297cd6f721fb9d92046f4091
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import mysql.connector
import pandas as pd
import sys
#tablename = str(sys.argv[1])
csvname = "result1232.csv"
#総得点を出したいチーム名
target = str(sys.argv[1])
# 接続する
conn = mysql.connector.connect(
host="localhost",
database="toto",
user="root",
password="root"
)
print("connection: "+str(conn.is_connected()))
# カーソルを取得する
cur = conn.cursor(buffered=True, dictionary=True)
mysql = "SELECT SUM(IF( home='"+target+"' , homescore , IF( away='"+target+"' , awayscore , 0))) FROM matches;"
cur.execute(mysql)
ret=cur.fetchone()
mysql = "SUM(IF( home='"+target+"' , homescore , IF( away='"+target+"' , awayscore , 0)))"
#print(ret)
print(ret[mysql])
conn.commit()
cur.close()
conn.close()
| 22.53125
| 111
| 0.660194
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 356
| 0.461738
|
e9e13d802219234f0d6ee29b93ebb1c9e6c8a66a
| 629
|
py
|
Python
|
tests/core/test_annotations.py
|
penguinmenac3/leanai
|
6d26575b248ff03c4a24009cd82f26ea99d96d15
|
[
"MIT"
] | 1
|
2021-03-28T21:32:59.000Z
|
2021-03-28T21:32:59.000Z
|
tests/core/test_annotations.py
|
penguinmenac3/leanai
|
6d26575b248ff03c4a24009cd82f26ea99d96d15
|
[
"MIT"
] | null | null | null |
tests/core/test_annotations.py
|
penguinmenac3/leanai
|
6d26575b248ff03c4a24009cd82f26ea99d96d15
|
[
"MIT"
] | null | null | null |
import unittest
from leanai.core.annotations import RunOnlyOnce
class TestAnnotations(unittest.TestCase):
def setUp(self) -> None:
self.var = 0
def runner(self):
return self.var
@RunOnlyOnce
def run_once(self):
return self.var
def test_output(self):
self.var = 1
self.assertEquals(self.runner(), self.var)
self.var = 2
self.assertEquals(self.runner(), self.var)
self.var = 42
self.assertEquals(self.run_once(), 42)
self.var = 3
self.assertEquals(self.run_once(), 42)
if __name__ == "__main__":
unittest.main()
| 21.689655
| 50
| 0.616852
| 514
| 0.81717
| 0
| 0
| 60
| 0.09539
| 0
| 0
| 10
| 0.015898
|
756552474094f48324d4b90b89114e9f83580b79
| 1,971
|
py
|
Python
|
pyext/professor2/__init__.py
|
iamholger/professor
|
25753a19de0dbbd2db0eb80cedc87adc017459a1
|
[
"MIT"
] | 2
|
2016-12-01T13:08:45.000Z
|
2017-01-09T05:36:44.000Z
|
pyext/professor2/__init__.py
|
iamholger/professor
|
25753a19de0dbbd2db0eb80cedc87adc017459a1
|
[
"MIT"
] | null | null | null |
pyext/professor2/__init__.py
|
iamholger/professor
|
25753a19de0dbbd2db0eb80cedc87adc017459a1
|
[
"MIT"
] | null | null | null |
import sys
pyversion = sys.version_info
if sys.version_info[0] < 2 or (sys.version_info[0] == 2 and sys.version_info[1] < 7):
raise Exception("Professor2 requires Python 2.7 or greater")
## Import Cython wrappings on the C++ core library
from professor2.core import *
__version__ = version()
from professor2.errors import *
from professor2.dataio import *
from professor2.histos import *
from professor2.ipolio import *
from professor2.ipol import *
from professor2.minimize import *
from professor2.paramsio import *
from professor2.params import *
from professor2.sampling import * #< the only module which requires NumPy
from professor2.weights import *
from professor2.chi2 import *
from professor2.eigentunes import *
def mk_timestamp():
"""
Time stamp, taken from http://stackoverflow.com/questions/13890935/timestamp-python
"""
import time
ts = time.time()
import datetime
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
return st
# def mk_versionstring():
# """
# Get version from setup.py, courtesy of
# http://stackoverflow.com/questions/2058802/how-can-i-get-the-version-defined-in-setup-py-setuptools-in-my-package
# """
# import pkg_resources # part of setuptools
# return pkg_resources.require("professor2")[0].version
# __version__ = mk_versionstring()
logo = \
"""
Visit us on http://professor.hepforge.org/
Please cite arXiv:0907.2973 [hep-ph]
%s
______ __ _____ _____
| ___ \ / _| |_ _|_ _|
| |_/ / __ ___ | |_ ___ ___ ___ ___ _ __ | | | |
| __/ '__/ _ \| _/ _ \/ __/ __|/ _ \| '__| | | | |
| | | | | (_) | || __/\__ \__ \ (_) | | _| |_ _| |_
|_| |_| \___/|_| \___||___/___/\___/|_| |_____|_____|
Andy Buckley, Holger Schulz v%s
Copyright 2015-2017
""" % (mk_timestamp(), version())
| 33.40678
| 119
| 0.620497
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,170
| 0.593607
|
7567342e6e2ce849445abb8610ff24fc2aab8a0f
| 558
|
py
|
Python
|
stack/lambdas/rekopoc-check-status/lambda_function.py
|
anapt/rekognition-video-people-blurring-cdk
|
ce1a57178bcd81a17d7f287ff4ccf2be6aae93b2
|
[
"MIT-0"
] | 9
|
2021-10-01T08:21:03.000Z
|
2022-03-02T14:34:16.000Z
|
stack/lambdas/rekopoc-check-status/lambda_function.py
|
anapt/rekognition-video-people-blurring-cdk
|
ce1a57178bcd81a17d7f287ff4ccf2be6aae93b2
|
[
"MIT-0"
] | null | null | null |
stack/lambdas/rekopoc-check-status/lambda_function.py
|
anapt/rekognition-video-people-blurring-cdk
|
ce1a57178bcd81a17d7f287ff4ccf2be6aae93b2
|
[
"MIT-0"
] | 3
|
2021-10-01T08:33:32.000Z
|
2022-02-02T22:40:48.000Z
|
import boto3
reko = boto3.client('rekognition')
s3 = boto3.client('s3')
def lambda_handler(event, context):
job_id = event['job_id']
reko_client = boto3.client('rekognition')
response = reko_client.get_face_detection(JobId=job_id, MaxResults=100)
return {
"statusCode": 200,
"body":
{
"job_id": job_id,
"job_status": response['JobStatus'],
"s3_object_bucket": event['s3_object_bucket'],
"s3_object_key": event['s3_object_key']
}
}
| 26.571429
| 75
| 0.577061
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 153
| 0.274194
|
756844e5d69bfb4f7a881cf7182f56ff34c5aee1
| 201
|
py
|
Python
|
tests/manualThreadTests.py
|
velexio/pyLegos
|
64d3622f2b6d78a02b171e0438a0224a951d2644
|
[
"MIT"
] | null | null | null |
tests/manualThreadTests.py
|
velexio/pyLegos
|
64d3622f2b6d78a02b171e0438a0224a951d2644
|
[
"MIT"
] | 2
|
2016-11-23T00:36:34.000Z
|
2016-11-23T00:39:08.000Z
|
tests/manualThreadTests.py
|
velexio/pyLegos
|
64d3622f2b6d78a02b171e0438a0224a951d2644
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import time
from pylegos.core import Thread
def worker(sleepSec):
time.sleep(sleepSec)
t = Thread()
t.runAndWait(threadName='test', runFunc=worker(10))
print('finished')
| 14.357143
| 51
| 0.726368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 37
| 0.18408
|
756a6183aee4660b960c432b4510670a699bf9cb
| 1,314
|
py
|
Python
|
hazelcast/protocol/codec/count_down_latch_await_codec.py
|
tonytheonlypony/hazelcast-python-client
|
3aafeaf2ebc05aee4f2386c62c079db496a7c81f
|
[
"Apache-2.0"
] | 98
|
2015-12-08T14:26:27.000Z
|
2022-03-23T17:44:11.000Z
|
hazelcast/protocol/codec/count_down_latch_await_codec.py
|
tonytheonlypony/hazelcast-python-client
|
3aafeaf2ebc05aee4f2386c62c079db496a7c81f
|
[
"Apache-2.0"
] | 396
|
2016-02-23T11:07:55.000Z
|
2022-03-31T14:26:34.000Z
|
hazelcast/protocol/codec/count_down_latch_await_codec.py
|
tonytheonlypony/hazelcast-python-client
|
3aafeaf2ebc05aee4f2386c62c079db496a7c81f
|
[
"Apache-2.0"
] | 62
|
2015-12-09T11:20:53.000Z
|
2022-01-28T01:30:54.000Z
|
from hazelcast.serialization.bits import *
from hazelcast.protocol.builtin import FixSizedTypesCodec
from hazelcast.protocol.client_message import OutboundMessage, REQUEST_HEADER_SIZE, create_initial_buffer, RESPONSE_HEADER_SIZE
from hazelcast.protocol.codec.custom.raft_group_id_codec import RaftGroupIdCodec
from hazelcast.protocol.builtin import StringCodec
# hex: 0x0B0200
_REQUEST_MESSAGE_TYPE = 721408
# hex: 0x0B0201
_RESPONSE_MESSAGE_TYPE = 721409
_REQUEST_INVOCATION_UID_OFFSET = REQUEST_HEADER_SIZE
_REQUEST_TIMEOUT_MS_OFFSET = _REQUEST_INVOCATION_UID_OFFSET + UUID_SIZE_IN_BYTES
_REQUEST_INITIAL_FRAME_SIZE = _REQUEST_TIMEOUT_MS_OFFSET + LONG_SIZE_IN_BYTES
_RESPONSE_RESPONSE_OFFSET = RESPONSE_HEADER_SIZE
def encode_request(group_id, name, invocation_uid, timeout_ms):
buf = create_initial_buffer(_REQUEST_INITIAL_FRAME_SIZE, _REQUEST_MESSAGE_TYPE)
FixSizedTypesCodec.encode_uuid(buf, _REQUEST_INVOCATION_UID_OFFSET, invocation_uid)
FixSizedTypesCodec.encode_long(buf, _REQUEST_TIMEOUT_MS_OFFSET, timeout_ms)
RaftGroupIdCodec.encode(buf, group_id)
StringCodec.encode(buf, name, True)
return OutboundMessage(buf, True)
def decode_response(msg):
initial_frame = msg.next_frame()
return FixSizedTypesCodec.decode_boolean(initial_frame.buf, _RESPONSE_RESPONSE_OFFSET)
| 43.8
| 127
| 0.853881
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 30
| 0.022831
|
756b0adc0964779163796787d2e6398c5eb4706e
| 980
|
py
|
Python
|
LeetCode/088 Merge Sorted Array.py
|
gesuwen/Algorithms
|
0c9cf4412d76f8b69ef68cc80636323f5a0e5786
|
[
"MIT"
] | null | null | null |
LeetCode/088 Merge Sorted Array.py
|
gesuwen/Algorithms
|
0c9cf4412d76f8b69ef68cc80636323f5a0e5786
|
[
"MIT"
] | null | null | null |
LeetCode/088 Merge Sorted Array.py
|
gesuwen/Algorithms
|
0c9cf4412d76f8b69ef68cc80636323f5a0e5786
|
[
"MIT"
] | null | null | null |
# Array; Two Pointers
# Given two sorted integer arrays nums1 and nums2, merge nums2 into nums1 as one sorted array.
#
# Note:
#
# The number of elements initialized in nums1 and nums2 are m and n respectively.
# You may assume that nums1 has enough space (size that is greater or equal to m + n) to hold additional elements from nums2.
# Example:
#
# Input:
# nums1 = [1,2,3,0,0,0], m = 3
# nums2 = [2,5,6], n = 3
#
# Output: [1,2,2,3,5,6]
class Solution:
def merge(self, nums1, m, nums2, n):
"""
:type nums1: List[int]
:type m: int
:type nums2: List[int]
:type n: int
:rtype: void Do not return anything, modify nums1 in-place instead.
"""
p, q = m-1, n-1
while p >= 0 and q >= 0:
if nums1[p] > nums2[q]:
nums1[p+q+1] = nums1[p]
p -= 1
else:
nums1[p+q+1] = nums2[q]
q -= 1
nums1[:q+1] = nums2[:q+1]
| 28.823529
| 125
| 0.533673
| 531
| 0.541837
| 0
| 0
| 0
| 0
| 0
| 0
| 628
| 0.640816
|
756bf0598578d01db0afb38f8bafb682754f2e0c
| 1,007
|
py
|
Python
|
caf_verilog/test/test_capture_buffer.py
|
chiranthsiddappa/caf_verilog
|
cd3cfd00459dc03518fcce53d5d6ac5194fb2adc
|
[
"MIT"
] | 1
|
2019-06-04T22:05:12.000Z
|
2019-06-04T22:05:12.000Z
|
caf_verilog/test/test_capture_buffer.py
|
chiranthsiddappa/caf_verilog
|
cd3cfd00459dc03518fcce53d5d6ac5194fb2adc
|
[
"MIT"
] | 6
|
2019-04-17T17:21:42.000Z
|
2019-09-11T16:15:28.000Z
|
caf_verilog/test/test_capture_buffer.py
|
chiranthsiddappa/caf_verilog
|
cd3cfd00459dc03518fcce53d5d6ac5194fb2adc
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from .. import capture_buffer as capt_buff
from tempfile import mkdtemp
import os
class TestCaptureBuffer(TestCase):
def test_capture_buffer(self):
"""
Test that the files are written out for instantiation and testbench.
:return:
"""
tmpdir = mkdtemp()
cb = capt_buff.CaptureBuffer(100, output_dir=tmpdir)
cb.gen_tb()
files = os.listdir(tmpdir)
test_files = ['capture_buffer.v', 'capture_buffer_tb.v', 'capture_buffer_values.txt']
for file in test_files:
self.assertIn(file, files)
def test_capture_buffer_values_file(self):
"""
Test the file length of capture buffer values file.
:return:
"""
tmpdir = mkdtemp()
cb = capt_buff.CaptureBuffer(100, output_dir=tmpdir)
with open(os.path.join(tmpdir, 'capture_buffer_values.txt')) as cbv:
lines = len(cbv.readlines())
self.assertEqual(100, lines)
| 31.46875
| 93
| 0.637537
| 892
| 0.885799
| 0
| 0
| 0
| 0
| 0
| 0
| 294
| 0.291956
|
756c7eea74e1f5249521b52dff9a4f1dfed719d3
| 933
|
py
|
Python
|
db_to_excel.py
|
jfernandez04/fromdb_to_excel
|
f06bfbd83825f887afc814706dc6c34e6ba44f17
|
[
"Apache-2.0"
] | null | null | null |
db_to_excel.py
|
jfernandez04/fromdb_to_excel
|
f06bfbd83825f887afc814706dc6c34e6ba44f17
|
[
"Apache-2.0"
] | 3
|
2018-02-21T20:25:32.000Z
|
2018-02-23T18:25:44.000Z
|
db_to_excel.py
|
jfernandez04/fromdb_to_excel
|
f06bfbd83825f887afc814706dc6c34e6ba44f17
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
import mysql.connector
import xlsxwriter
from query import q, table,columns
from letters import letters
import string
import json
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
with open(dir_path + '/config.json', "r") as json_data_file:
conf = json.load(json_data_file)
conn = mysql.connector.connect(**conf)
cur = conn.cursor()
cur.execute("set innodb_lock_wait_timeout=100;")
q_describe = "describe " + table + ";"
cur.execute(q_describe)
bdescribe = cur.fetchall()
wb = xlsxwriter.Workbook('test.xlsx')
ws = wb.add_worksheet()
col = 0
for bdes_row in bdescribe:
ws.write(string.upper(letters[col] + str(1)), bdes_row[0])
col += 1
num1 = 2
col = 0
num = 1
cur.execute(q)
data = cur.fetchall()
for row in data:
col = 0
for line in range(len(row)):
l = letters[col] + str(num1)
ws.write(string.upper(l), row[line])
col += 1
num1 += 1
wb.close()
| 21.697674
| 62
| 0.681672
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 91
| 0.097535
|
756f24ba8abf0f406f6c9f0a863f8c02bdb32b06
| 1,317
|
py
|
Python
|
setup.py
|
tyler-a-cox/radio_sim
|
e54891905597578e2be6a9e6a9a201ba1cbd603c
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
tyler-a-cox/radio_sim
|
e54891905597578e2be6a9e6a9a201ba1cbd603c
|
[
"BSD-2-Clause"
] | 2
|
2021-06-22T19:31:52.000Z
|
2021-07-14T21:33:01.000Z
|
setup.py
|
tyler-a-cox/radio_sim
|
e54891905597578e2be6a9e6a9a201ba1cbd603c
|
[
"BSD-2-Clause"
] | null | null | null |
from setuptools import setup
import os
import sys
import json
sys.path.append("radio_sim")
def package_files(package_dir, subdirectory):
# walk the input package_dir/subdirectory
# return a package_data list
paths = []
directory = os.path.join(package_dir, subdirectory)
for (path, directories, filenames) in os.walk(directory):
for filename in filenames:
path = path.replace(package_dir + "/", "")
paths.append(os.path.join(path, filename))
return paths
data_files = package_files("hera_cal", "data") + package_files(
"hera_cal", "calibrations"
)
setup_args = {
"name": "radio_sim",
"version": "0.0.2",
"author": "Tyler Cox",
"url": "https://github.com/tyler-a-cox/radio_sim",
"license": "BSD",
"description": "Simple radio interferometer simulator for testing nucal",
"package_dir": {"radio_sim": "radio_sim"},
"packages": ["radio_sim"],
"include_package_data": True,
"scripts": [],
"package_data": {"radio_sim": data_files},
"install_requires": [
"numpy>=1.10",
"scipy",
"astropy",
"pyuvdata",
],
"extras_require": {
"all": [
"aipy>=3.0",
]
},
"zip_safe": False,
}
if __name__ == "__main__":
setup(*(), **setup_args)
| 23.945455
| 77
| 0.603645
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 529
| 0.40167
|
756fb9d469af8300eef5fa58dfbcbd277e34d405
| 1,959
|
py
|
Python
|
oremda/pipeline/engine/__init__.py
|
OpenChemistry/oremda
|
3fb4cb8318713b87ecd7999ee2b725da745dd023
|
[
"BSD-3-Clause"
] | 11
|
2021-09-01T23:10:51.000Z
|
2022-03-20T07:39:37.000Z
|
oremda/pipeline/engine/__init__.py
|
OpenChemistry/oremda
|
3fb4cb8318713b87ecd7999ee2b725da745dd023
|
[
"BSD-3-Clause"
] | 22
|
2021-05-18T14:10:27.000Z
|
2021-10-04T15:06:27.000Z
|
oremda/pipeline/engine/__init__.py
|
OpenChemistry/oremda
|
3fb4cb8318713b87ecd7999ee2b725da745dd023
|
[
"BSD-3-Clause"
] | 2
|
2021-09-01T22:11:13.000Z
|
2021-10-30T09:12:36.000Z
|
import asyncio
import logging
import sys
import coloredlogs
import signal
from oremda.typing import ContainerType
from oremda.clients.singularity import SingularityClient
from oremda.pipeline.engine.rpc.client import RpcClient
from oremda.pipeline.engine.context import pipeline_context
from oremda.pipeline.engine.config import settings
# Setup logger
logger = logging.getLogger("engine")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
formatter = coloredlogs.ColoredFormatter(
"%(asctime)s,%(msecs)03d - %(name)s - %(levelname)s - %(message)s"
)
handler.setFormatter(formatter)
logger.addHandler(handler)
async def run():
# Set the Singularity image path if we are using Singularity
if settings.OREMDA_CONTAINER_TYPE == ContainerType.Singularity:
SingularityClient.images_dir = settings.OREMDA_SINGULARITY_IMAGE_DIR
with pipeline_context() as context:
async with RpcClient(settings.SERVER_URL, context) as client:
logger.info("Connected to server.")
await client.wait_on_reader()
async def shutdown(signal, loop, run_task):
logger.info(f"Received exit signal {signal.name}...")
logger.info("Canceling engine task.")
if run_task is not None:
run_task.cancel()
tasks = [t for t in asyncio.all_tasks() if t is not asyncio.current_task()]
if len(tasks) > 0:
logger.info(f"Waiting for {len(tasks)} to complete.")
await asyncio.wait(tasks)
logger.info("Stopping event loop.")
loop = asyncio.get_event_loop()
loop.stop()
def start():
logger.info("Starting pipeline engine.")
loop = asyncio.get_event_loop()
run_task = loop.create_task(run())
signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT)
for s in signals:
loop.add_signal_handler(
s, lambda s=s: asyncio.create_task(shutdown(s, loop, run_task))
)
loop.run_forever()
| 29.681818
| 79
| 0.720265
| 0
| 0
| 0
| 0
| 0
| 0
| 920
| 0.469627
| 323
| 0.16488
|
7570df54465fd5d936a3ab3554540e61e267bf96
| 2,369
|
py
|
Python
|
main.py
|
RareDrops/discord-emote-script
|
bc1f4892fd4294449b2340a51b276e4ebb3b37e6
|
[
"MIT"
] | null | null | null |
main.py
|
RareDrops/discord-emote-script
|
bc1f4892fd4294449b2340a51b276e4ebb3b37e6
|
[
"MIT"
] | null | null | null |
main.py
|
RareDrops/discord-emote-script
|
bc1f4892fd4294449b2340a51b276e4ebb3b37e6
|
[
"MIT"
] | null | null | null |
from pynput import keyboard
from pynput.keyboard import Key, Controller
from os.path import exists
import win32clipboard
import os
from PIL import Image
from pystray import Icon as icon, Menu, MenuItem as item
import pystray
RECORDING = False
WORD = ""
keyboard_press = Controller()
def send_to_clipboard(filepath):
win32clipboard.OpenClipboard()
win32clipboard.EmptyClipboard()
#the two lines of code below only works for some programs, does work on disocrd though(it is to preserve transparency)
wide_path = os.path.abspath(filepath).encode('utf-16-le') + b'\0'
win32clipboard.SetClipboardData(win32clipboard.RegisterClipboardFormat('FileNameW'), wide_path)
win32clipboard.CloseClipboard()
#then simulates pressing ctrl+v using the keyboard module:
keyboard_press.release(Key.shift_r)
with keyboard_press.pressed(Key.ctrl):
keyboard_press.press('v')
keyboard_press.release('v')
keyboard_press.press(Key.backspace)
keyboard_press.release(Key.backspace)
def find_image(word):
filepath = f"Emotes/{word.lower()}.png"
file_exist = exists(filepath)
if file_exist == False:
return
image = Image.open(filepath)
if image.size != (48, 48):
image = image.resize((48, 48))
image.save(filepath)
send_to_clipboard(filepath)
def on_press(key):
global RECORDING, WORD
try:
if key.char == ':':
if RECORDING == False:
RECORDING = True
else:
RECORDING = False
find_image(WORD)
WORD = ""
elif RECORDING == True:
WORD += key.char
if len(WORD) > 30:
RECORDING = False
WORD = ""
except AttributeError:
if RECORDING == True:
if key == Key.backspace:
WORD = WORD[:-1]
elif key == Key.enter:
RECORDING = False
WORD = ""
# Collect events until released
listener = keyboard.Listener(on_press=on_press)
listener.start()
temp_iterable = []
image = Image.open('keyboard.ico')
icon = pystray.Icon('discord-emotes',image,'discord-emotes',temp_iterable)
menu = Menu(item('quit',lambda : icon.stop()),)
icon.menu = menu
icon.run()
| 29.987342
| 123
| 0.61545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 334
| 0.140988
|
7571df3479e0827912764d9107db9cc7c8bfd97c
| 27,986
|
py
|
Python
|
moloch_connector.py
|
splunk-soar-connectors/moloch
|
d1956ee500b2c3f3882f3512366ae480270e89f8
|
[
"Apache-2.0"
] | 1
|
2022-02-13T19:18:41.000Z
|
2022-02-13T19:18:41.000Z
|
moloch_connector.py
|
splunk-soar-connectors/moloch
|
d1956ee500b2c3f3882f3512366ae480270e89f8
|
[
"Apache-2.0"
] | 2
|
2021-12-09T01:35:35.000Z
|
2022-02-24T20:04:27.000Z
|
moloch_connector.py
|
splunk-soar-connectors/moloch
|
d1956ee500b2c3f3882f3512366ae480270e89f8
|
[
"Apache-2.0"
] | null | null | null |
# File: moloch_connector.py
#
# Copyright (c) 2019-2022 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
#
#
import ipaddress
import json
import os
import magic
import phantom.app as phantom
import phantom.rules as ph_rules
import requests
from bs4 import BeautifulSoup, UnicodeDammit
from phantom.action_result import ActionResult
from phantom.base_connector import BaseConnector
from requests.auth import HTTPDigestAuth
from moloch_consts import *
class RetVal(tuple):
def __new__(cls, val1, val2):
return tuple.__new__(RetVal, (val1, val2))
class MolochConnector(BaseConnector):
def __init__(self):
# Call the BaseConnectors init first
super(MolochConnector, self).__init__()
self._state = None
self._server_url = None
self._port = None
self._username = None
self._password = None
self._verify_server_cert = False
def initialize(self):
""" This is an optional function that can be implemented by the AppConnector derived class. Since the
configuration dictionary is already validated by the time this function is called, it's a good place to do any
extra initialization of any internal modules. This function MUST return a value of either phantom.APP_SUCCESS or
phantom.APP_ERROR. If this function returns phantom.APP_ERROR, then AppConnector::handle_action will not get
called.
"""
self._state = self.load_state()
# get the asset config
config = self.get_config()
# Access values in asset config by the name
self._server_url = config[MOLOCH_CONFIG_SERVER_URL].strip('/')
self._port = config.get(MOLOCH_CONFIG_PORT, 8005)
self._username = config[MOLOCH_CONFIG_USERNAME]
self._password = config[MOLOCH_CONFIG_PASSWORD]
self._verify_server_cert = config.get(MOLOCH_VERIFY_SERVER_CERT, False)
# Custom validation for IP address
self.set_validator(MOLOCH_PARAM_IP, self._is_ip)
return phantom.APP_SUCCESS
def _is_ip(self, ip_address):
""" Function that checks given address and return True if address is valid IP address.
:param ip_address: IP address
:return: status (success/failure)
"""
# Throws exception if IP is not valid IPv4 or IPv6
try:
ipaddress.ip_address(UnicodeDammit(ip_address).unicode_markup)
except Exception as e:
self.debug_print(MOLOCH_INVALID_IP, e)
return False
return True
def _process_empty_reponse(self, response, action_result):
""" This function is used to process empty response.
:param response: response data
:param action_result: object of Action Result
:return: status phantom.APP_ERROR/phantom.APP_SUCCESS(along with appropriate message)
"""
if response.status_code == 200:
return RetVal(phantom.APP_SUCCESS, {})
return RetVal(action_result.set_status(phantom.APP_ERROR, "Empty response and no information in the header"),
None)
def _process_html_response(self, response, action_result):
""" This function is used to process html response.
:param response: response data
:param action_result: object of Action Result
:return: status phantom.APP_ERROR/phantom.APP_SUCCESS(along with appropriate message)
"""
# An html response, treat it like an error
status_code = response.status_code
try:
soup = BeautifulSoup(response.text, "html.parser")
# Remove the script, style, footer and navigation part from the HTML message
for element in soup(["script", "style", "footer", "nav"]):
element.extract()
error_text = soup.text
split_lines = error_text.split('\n')
split_lines = [x.strip() for x in split_lines if x.strip()]
error_text = '\n'.join(split_lines)
except:
error_text = "Cannot parse error details"
message = "Status Code: {0}. Data from server:\n{1}\n".format(status_code, error_text)
message = message.replace('{', '{{').replace('}', '}}')
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
def _process_json_response(self, response, action_result):
""" This function is used to process json response.
:param response: response data
:param action_result: object of Action Result
:return: status phantom.APP_ERROR/phantom.APP_SUCCESS(along with appropriate message)
"""
# Try a json parse
try:
resp_json = response.json()
except Exception as e:
return RetVal(action_result.set_status(phantom.APP_ERROR, "Unable to parse JSON response. Error: {0}".
format(str(e))), None)
# Please specify the status codes here
if 200 <= response.status_code < 399:
return RetVal(phantom.APP_SUCCESS, resp_json)
# You should process the error returned in the json
message = "Error from server. Status Code: {0} Data from server: {1}".format(response.status_code,
response.text.replace('{', '{{').
replace('}', '}}'))
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
def _process_pcap_response(self, response, action_result):
""" This function is used to process pcap response.
:param response: response data
:param action_result: object of Action Result
:return: status phantom.APP_ERROR/phantom.APP_SUCCESS(along with appropriate message)
"""
if 200 <= response.status_code < 399:
return RetVal(phantom.APP_SUCCESS, {})
message = "Error from server. Status Code: {0} Data from server: {1}".format(response.status_code,
response.text.replace('{', '{{').
replace('}', '}}'))
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
def _process_response(self, response, action_result):
""" This function is used to process html response.
:param response: response data
:param action_result: object of Action Result
:return: status phantom.APP_ERROR/phantom.APP_SUCCESS(along with appropriate message)
"""
# store the r_text in debug data, it will get dumped in the logs if the action fails
if hasattr(action_result, 'add_debug_data') and (self.get_action_identifier() != "get_pcap" or not
(200 <= response.status_code < 399)):
action_result.add_debug_data({'r_status_code': response.status_code})
action_result.add_debug_data({'r_text': response.text})
action_result.add_debug_data({'r_headers': response.headers})
# Process each 'Content-Type' of response separately
# Process a json response
if 'json' in response.headers.get('Content-Type', ''):
return self._process_json_response(response, action_result)
if 'pcap' in response.headers.get('Content-Type', ''):
return self._process_pcap_response(response, action_result)
# Process an HTML resonse, Do this no matter what the API talks.
# There is a high chance of a PROXY in between phantom and the rest of
# world, in case of errors, PROXY's return HTML, this function parses
# the error and adds it to the action_result.
if 'html' in response.headers.get('Content-Type', ''):
return self._process_html_response(response, action_result)
# it's not content-type that is to be parsed, handle an empty response
if not response.text:
return self._process_empty_reponse(response, action_result)
# everything else is actually an error at this point
message = "Can't process response from server. Status Code: {0} Data from server: {1}".\
format(response.status_code, response.text.replace('{', '{{').replace('}', '}}'))
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
def _make_rest_call(self, endpoint, action_result, headers=None, params=None, data=None, method="get",
timeout=None):
""" Function that makes the REST call to the device. It's a generic function that can be called from various
action handlers.
:param endpoint: REST endpoint that needs to appended to the service address
:param action_result: object of ActionResult class
:param headers: request headers
:param params: request parameters
:param data: request body
:param method: GET/POST/PUT/DELETE (Default will be GET)
:param timeout: Timeout for API call
:return: status phantom.APP_ERROR/phantom.APP_SUCCESS(along with appropriate message),
response obtained by making an API call
"""
resp_json = None
try:
request_func = getattr(requests, method)
except AttributeError:
return RetVal(action_result.set_status(phantom.APP_ERROR, "Invalid method: {0}".format(method)), resp_json)
# Create a URL to connect to
try:
url = '{url}{endpoint}'.format(url=self._server_url, endpoint=endpoint)
except Exception:
return RetVal(action_result.set_status(phantom.APP_ERROR, "Invalid URL. Please provide a valid URL"), resp_json)
try:
# In case of get_pcap action stream the response and store it into temp file
if self.get_action_identifier() == 'get_pcap':
r = request_func(url, auth=HTTPDigestAuth(self._username, self._password), json=data, headers=headers,
verify=self._verify_server_cert, timeout=timeout, params=params, stream=True)
# Create temp_file_path using asset_id
temp_file_path = '{dir}{asset}_temp_pcap_file'.format(dir=self.get_state_dir(),
asset=self.get_asset_id())
# If API call is success
if 200 <= r.status_code < 399:
# Store response into file
with open(temp_file_path, 'wb') as pcap_file:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
pcap_file.write(chunk)
else:
r = request_func(url, auth=HTTPDigestAuth(self._username, self._password), json=data, headers=headers,
verify=self._verify_server_cert, timeout=timeout, params=params)
except Exception as e:
return RetVal(action_result.set_status(phantom.APP_ERROR, "Error Connecting to server. Details: {0}".
format(str(e))), resp_json)
return self._process_response(r, action_result)
def _handle_test_connectivity(self, param):
""" This function is used to test the connectivity of an asset with given credentials.
:param param: (not used in this method)
:return: status success/failure
"""
action_result = self.add_action_result(ActionResult(dict(param)))
self.save_progress(MOLOCH_TEST_CONNECTION)
# Validate port
if not str(self._port).isdigit() or int(self._port) not in list(range(0, 65536)):
self.save_progress(MOLOCH_TEST_CONNECTIVITY_FAILED)
return action_result.set_status(phantom.APP_ERROR, status_message='{}. {}'.format(
MOLOCH_CONNECTING_ERROR_MSG, MOLOCH_INVALID_CONFIG_PORT))
params = {'length': 1}
endpoint = ':{port}{endpoint}'.format(port=self._port, endpoint=MOLOCH_TEST_CONNECTIVITY_ENDPOINT)
# make REST call
ret_val, response = self._make_rest_call(endpoint=endpoint, params=params, action_result=action_result,
timeout=MOLOCH_TEST_CONNECTIVITY_TIMEOUT)
if phantom.is_fail(ret_val):
self.save_progress(MOLOCH_TEST_CONNECTIVITY_FAILED)
return action_result.get_status()
self.save_progress(MOLOCH_TEST_CONNECTIVITY_PASSED)
return action_result.set_status(phantom.APP_SUCCESS)
def _handle_get_pcap(self, param):
""" This function is used to get pcap file and store it into vault.
:param param: Dictionary of input parameters
:return: status success/failure
"""
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
summary = action_result.update_summary({})
# Validate port
if not str(self._port).isdigit() or int(self._port) not in list(range(0, 65536)):
self.debug_print(MOLOCH_INVALID_CONFIG_PORT)
return action_result.set_status(phantom.APP_ERROR, status_message=MOLOCH_INVALID_CONFIG_PORT)
# Get parameters
start_time = param[MOLOCH_JSON_START_TIME]
end_time = param[MOLOCH_JSON_END_TIME]
source_ip = param.get(MOLOCH_JSON_SOURCE_IP)
dest_ip = param.get(MOLOCH_JSON_DESTINATION_IP)
hostname = param.get(MOLOCH_JSON_HOSTNAME)
custom_query = param.get(MOLOCH_JSON_CUSTOM_QUERY)
limit = param.get(MOLOCH_JSON_LIMIT, 50)
# Validate start_time parameter
try:
start_time = int(float(start_time))
except:
self.debug_print(MOLOCH_INVALID_START_TIME)
return action_result.set_status(phantom.APP_ERROR, status_message=MOLOCH_INVALID_START_TIME)
# Validate end_time parameter
try:
end_time = int(float(end_time))
except:
self.debug_print(MOLOCH_INVALID_END_TIME)
return action_result.set_status(phantom.APP_ERROR, status_message=MOLOCH_INVALID_END_TIME)
# Compare value of start_time and end_time
if start_time >= end_time:
self.debug_print(MOLOCH_INVALID_TIME_RANGE)
return action_result.set_status(phantom.APP_ERROR, status_message=MOLOCH_INVALID_TIME_RANGE)
# Validate parameter limit
try:
limit = int(float(limit))
except:
self.debug_print(MOLOCH_INVALID_LIMIT_MSG)
return action_result.set_status(phantom.APP_ERROR, status_message=MOLOCH_INVALID_LIMIT_MSG)
# Validate parameter limit
if limit not in list(range(0, 2000001)):
self.debug_print(MOLOCH_INVALID_LIMIT_MSG)
return action_result.set_status(phantom.APP_ERROR, status_message=MOLOCH_INVALID_LIMIT_MSG)
params = dict()
params['length'] = limit
params['startTime'] = start_time
params['stopTime'] = end_time
expression = ''
expressions = []
# Add source_ip to expression, if available
if source_ip:
expression = 'ip.src == {source_ip}'.format(source_ip=source_ip)
expressions.append(expression)
# Add dest_ip to expression, if available
if dest_ip:
expression = 'ip.dst == {dst_ip}'.format(dst_ip=dest_ip)
expressions.append(expression)
# Add hostname to expression, if available
if hostname:
expression = 'host.http == {hostname}'.format(hostname=hostname)
expressions.append(expression)
# Add custom_query to expression, if available
if custom_query:
expression = custom_query
expressions.append(expression)
expression = " && ".join(expressions)
if expression:
params['expression'] = expression
endpoint = ':{port}{endpoint}'.format(port=self._port, endpoint=MOLOCH_GET_PCAP_ENDPOINT)
# make REST call
ret_val, response = self._make_rest_call(endpoint=endpoint, action_result=action_result, params=params)
if phantom.is_fail(ret_val):
return action_result.get_status()
# Create filename using input parameters
filename = 'moloch_{start_time}_{end_time}'.format(start_time=start_time, end_time=end_time)
inputs = [('src_ip', source_ip), ('dst_ip', dest_ip), ('hostname', hostname)]
for input_key, input_val in inputs:
if input_val:
filename = '{filename}_{input_key}_{input_val}'.format(filename=filename, input_key=input_key, input_val=input_val)
filename = '{filename}_limit_{limit}'.format(filename=filename, limit=limit)
filename = '{filename}.pcap'.format(filename=filename)
temp_file_path = '{dir}{asset}_temp_pcap_file'.format(dir=self.get_state_dir(), asset=self.get_asset_id())
# If file size is zero
if not os.path.getsize(temp_file_path):
# Delete file
os.unlink(temp_file_path)
self.debug_print(MOLOCH_NO_DATA_FOUND_MSG)
return action_result.set_status(phantom.APP_ERROR, status_message=MOLOCH_NO_DATA_FOUND_MSG)
# Check if file is text file
# mime=True only returns mimetypes instead of textual description
magic_obj = magic.Magic(mime=True)
file_type = magic_obj.from_file(temp_file_path)
if file_type == 'text/plain':
with open(temp_file_path) as temp_file:
temp_file_data = temp_file.read()
message = 'Error while getting data from server. {api_message}'.\
format(api_message=temp_file_data)
self.debug_print(message)
return action_result.set_status(phantom.APP_ERROR, status_message=message)
invalid_chars = r'[]<>/\():;"\'|*()`~!@#$%^&+={}?,'
# Remove special character defined in invalid_chars form filename
try:
filename = filename.translate(None, invalid_chars)
except:
# For Python v3 translate function expects a table for replacing the characters
translate_table = {}
for invalid_char in invalid_chars:
translate_table[ord(invalid_char)] = None
filename = filename.translate(translate_table)
_, _, vault_file_list = ph_rules.vault_info(file_name=filename)
vault_file_list = list(vault_file_list)
# Iterate through files of Vault
for file in vault_file_list:
# If file name and file size are same file is duplicate
if file.get('name') == filename and file.get('size') == os.path.getsize(temp_file_path):
self.debug_print(MOLOCH_FILE_ALREADY_AVAILABLE)
vault_file_details = {
phantom.APP_JSON_SIZE: file.get('size'),
phantom.APP_JSON_VAULT_ID: file.get('vault_id'),
'file_name': filename
}
summary['vault_id'] = file.get('vault_id')
# Delete temp file
os.unlink(temp_file_path)
action_result.add_data(vault_file_details)
return action_result.set_status(phantom.APP_SUCCESS)
vault_file_details = {phantom.APP_JSON_SIZE: os.path.getsize(temp_file_path)}
# Adding file to vault
success, _, vault_id = ph_rules.vault_add(file_location=temp_file_path, container=self.get_container_id(), file_name=filename,
metadata=vault_file_details)
# Updating report data with vault details
if not success:
self.debug_print('Error while adding the file to vault')
return action_result.set_status(phantom.APP_ERROR, status_message='Error while adding the file to vault')
vault_file_details[phantom.APP_JSON_VAULT_ID] = vault_id
vault_file_details['file_name'] = filename
action_result.add_data(vault_file_details)
summary['vault_id'] = vault_file_details['vault_id']
return action_result.set_status(phantom.APP_SUCCESS)
def _handle_list_fields(self, param):
""" This function is used to list all fields.
:param param: dictionary of input parameters
:return: status success/failure
"""
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
port = param.get(MOLOCH_PARAM_PORT, 9200)
# Validate port
if not str(port).isdigit() or int(port) not in list(range(0, 65536)):
self.debug_print(MOLOCH_INVALID_PARAM_PORT)
return action_result.set_status(phantom.APP_ERROR, status_message=MOLOCH_INVALID_PARAM_PORT)
endpoint = ':{port}{endpoint}'.format(port=port, endpoint=MOLOCH_LIST_FIELDS_ENDPOINT)
# make REST call
ret_val, response = self._make_rest_call(endpoint=endpoint, action_result=action_result)
# Something went wrong
if phantom.is_fail(ret_val):
message = action_result.get_message()
self.debug_print(message)
if "Status Code: 200" in message and "angular.module" in message:
action_result.set_status(phantom.APP_ERROR, "Unable to connect to server. "
"Please make sure that entered port is correct")
return action_result.get_status()
# Add data to action_result
for content in response.get("hits", {}).get("hits", []):
action_result.add_data(content)
summary = action_result.update_summary({})
summary['total_fields'] = action_result.get_data_size()
return action_result.set_status(phantom.APP_SUCCESS)
def _handle_list_files(self, param):
""" This function is used to list all files.
:param param: (not used in this method)
:return: status success/failure
"""
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
# Validate port
if not str(self._port).isdigit() or int(self._port) not in list(range(0, 65536)):
self.debug_print(MOLOCH_INVALID_CONFIG_PORT)
return action_result.set_status(phantom.APP_ERROR, status_message=MOLOCH_INVALID_CONFIG_PORT)
endpoint = ':{port}{endpoint}'.format(port=self._port, endpoint=MOLOCH_LIST_FILES_ENDPOINT)
# make REST call
ret_val, response = self._make_rest_call(endpoint=endpoint, action_result=action_result)
# Something went wrong
if phantom.is_fail(ret_val):
message = action_result.get_message()
self.debug_print(message)
return action_result.get_status()
# Add data to action_result
for content in response["data"]:
action_result.add_data(content)
summary = action_result.update_summary({})
summary['total_files'] = action_result.get_data_size()
return action_result.set_status(phantom.APP_SUCCESS)
def handle_action(self, param):
""" This function gets current action identifier and calls member function of its own to handle the action.
:param param: dictionary which contains information about the actions to be executed
:return: status success/failure
"""
self.debug_print("action_id", self.get_action_identifier())
# Dictionary mapping each action with its corresponding actions
action_mapping = {
'test_connectivity': self._handle_test_connectivity,
'get_pcap': self._handle_get_pcap,
'list_files': self._handle_list_files,
'list_fields': self._handle_list_fields
}
action = self.get_action_identifier()
action_execution_status = phantom.APP_SUCCESS
if action in list(action_mapping.keys()):
action_function = action_mapping[action]
action_execution_status = action_function(param)
return action_execution_status
def finalize(self):
""" This function gets called once all the param dictionary elements are looped over and no more handle_action
calls are left to be made. It gives the AppConnector a chance to loop through all the results that were
accumulated by multiple handle_action function calls and create any summary if required. Another usage is
cleanup, disconnect from remote devices etc.
:return: status (success/failure)
"""
self.save_state(self._state)
return phantom.APP_SUCCESS
if __name__ == '__main__':
import argparse
import sys
import pudb
pudb.set_trace()
argparser = argparse.ArgumentParser()
argparser.add_argument('input_test_json', help='Input Test JSON file')
argparser.add_argument('-u', '--username', help='username', required=False)
argparser.add_argument('-p', '--password', help='password', required=False)
argparser.add_argument('-v', '--verify', action='store_true', help='verify', required=False, default=False)
args = argparser.parse_args()
session_id = None
username = args.username
password = args.password
verify = args.verify
if username is not None and password is None:
# User specified a username but not a password, so ask
import getpass
password = getpass.getpass("Password: ")
if username and password:
login_url = BaseConnector._get_phantom_base_url() + "login"
try:
print("Accessing the Login page")
r = requests.get(login_url, verify=verify, timeout=MOLOCH_DEFAULT_TIMEOUT)
csrftoken = r.cookies['csrftoken']
data = dict()
data['username'] = username
data['password'] = password
data['csrfmiddlewaretoken'] = csrftoken
headers = dict()
headers['Cookie'] = 'csrftoken={}'.format(csrftoken)
headers['Referer'] = login_url
print("Logging into Platform to get the session id")
r2 = requests.post(login_url, verify=verify, data=data, headers=headers, timeout=MOLOCH_DEFAULT_TIMEOUT)
session_id = r2.cookies['sessionid']
except Exception as e:
print("Unable to get session id from the platform. Error: {}".format(str(e)))
sys.exit(1)
if len(sys.argv) < 2:
print("No test json specified as input")
sys.exit(0)
with open(sys.argv[1]) as f:
in_json = f.read()
in_json = json.loads(in_json)
print(json.dumps(in_json, indent=4))
connector = MolochConnector()
connector.print_progress_message = True
if session_id is not None:
in_json['user_session_token'] = session_id
ret_val = connector._handle_action(json.dumps(in_json), None)
print(json.dumps(json.loads(ret_val), indent=4))
sys.exit(0)
| 41.460741
| 134
| 0.643393
| 24,669
| 0.881476
| 0
| 0
| 0
| 0
| 0
| 0
| 8,966
| 0.320374
|
7574fa5420556c5e1887475cd923bc9a0ffab1f4
| 2,600
|
py
|
Python
|
testing/python-image-upload/upload.py
|
pkalauner-tuwien/polyglot-and-ambiguous-files
|
109eb7d5533de4a053841313e7c14918f9cd9df0
|
[
"MIT"
] | null | null | null |
testing/python-image-upload/upload.py
|
pkalauner-tuwien/polyglot-and-ambiguous-files
|
109eb7d5533de4a053841313e7c14918f9cd9df0
|
[
"MIT"
] | 1
|
2021-03-23T20:13:21.000Z
|
2021-03-23T20:13:21.000Z
|
testing/python-image-upload/upload.py
|
pkalauner-tuwien/polyglot-and-ambiguous-files
|
109eb7d5533de4a053841313e7c14918f9cd9df0
|
[
"MIT"
] | null | null | null |
from flask import *
from flask_csp.csp import csp_header, csp_default
import imghdr
import os
import hashlib
import subprocess
app = Flask(__name__)
app.config["UPLOAD_DIRECTORY"] = 'uploads'
app.config["ALLOWED_EXTENSIONS"] = ["jpg", "jpeg", "png", "gif"]
# Remove report-uri from default CSP header
h = csp_default()
h.update({'report-uri':""})
@app.route('/')
@app.route('/upload')
@csp_header()
def index():
return render_template("upload.html")
@app.route('/upload', methods = ['POST'])
@csp_header()
def upload():
f = request.files['file']
# Check extension
if not "." in f.filename:
return render_template("upload.html", msg="The selected file has an invalid extension.")
name, ext = f.filename.rsplit(".", 1)
ext = ext.lower()
if ext not in app.config["ALLOWED_EXTENSIONS"]:
return render_template("upload.html", msg="The selected file has an invalid extension.")
hashed_name = hashlib.md5(name.encode("utf-8")).hexdigest()
path = os.path.join(app.config["UPLOAD_DIRECTORY"], "{}.{}".format(hashed_name, ext))
# Append number if file already exists
id = 1
while os.path.isfile(path):
path = os.path.join(app.config["UPLOAD_DIRECTORY"], "{}_{}.{}".format(hashed_name, id, ext))
id += 1
f.save(path)
# Check file content so only changing extension cannot bypass the check
if imghdr.what(path).lower() not in app.config["ALLOWED_EXTENSIONS"]:
os.remove(path)
return render_template("upload.html", msg="The selected file is not an image.")
return render_template("upload.html", msg="Upload successful!", imagepath = path)
@app.route('/view')
@csp_header()
def view():
imagepath = request.args.get('image')
if not os.path.isfile(imagepath):
# Vulnerable, see method below
template = "{% extends 'index.html' %}{% block content %}<h4>Image " + imagepath + " does not exist.</h4>{% endblock %}"
return render_template_string(template)
return render_template("view.html", imagepath=imagepath)
# PoC method to show why attackers should not be able to upload arbitrary code.
# This method should obviously not exist in a real application, but code execution could also be achieved through other, more sophisticated ways.
def exec_script(file):
return subprocess.check_output(['python3', file])
app.jinja_env.globals['exec_script'] = exec_script # Allow usage in templates
@app.route('/uploads/<filename>')
@csp_header()
def send_file(filename):
return send_from_directory(app.config["UPLOAD_DIRECTORY"], filename)
| 34.666667
| 145
| 0.687308
| 0
| 0
| 0
| 0
| 1,863
| 0.716538
| 0
| 0
| 1,061
| 0.408077
|
7577e2f7df5f804c676013417ab035ff063a393c
| 8,767
|
py
|
Python
|
test.py
|
AllenChen1998/RAD
|
9778e2576e427a26b2181561648f82162237a7dd
|
[
"MIT"
] | 1
|
2021-08-05T04:08:15.000Z
|
2021-08-05T04:08:15.000Z
|
test.py
|
AllenChen1998/RAD
|
9778e2576e427a26b2181561648f82162237a7dd
|
[
"MIT"
] | null | null | null |
test.py
|
AllenChen1998/RAD
|
9778e2576e427a26b2181561648f82162237a7dd
|
[
"MIT"
] | null | null | null |
import os
import cv2
import json
import time
import shutil
import argparse
import numpy as np
import PIL.Image
from copy import deepcopy
import mmcv
from mmdet.apis import init_detector, inference_detector, show_result
# install mmdet v1 in https://github.com/open-mmlab/mmdetection
# download correspongding pretrained models from https://mmdetection.readthedocs.io/en/latest/model_zoo.html
config_dir = 'configs'
config_files = {
'ssd': config_dir + '/ssd512_coco.py',
'faster_rcnn': config_dir + '/faster_rcnn_r101_fpn_1x.py',
'mask_rcnn': config_dir + '/mask_rcnn_x101_64x4d_fpn_1x.py',
'retinanet': config_dir + '/retinanet_r101_fpn_1x.py',
'cascade_rcnn': config_dir + '/cascade_rcnn_r101_fpn_1x.py',
'cascade_mask_rcnn': config_dir + '/cascade_mask_rcnn_x101_64x4d_fpn_1x.py',
'htc': config_dir + '/htc/htc_x101_64x4d_fpn_20e_16gpu.py',
}
config_files_ori = deepcopy(config_files)
checkpoint_dir = 'models'
checkpoint_files = {
'ssd': checkpoint_dir + '/ssd512_coco_vgg16_caffe_120e_20181221-d48b0be8.pth',
'faster_rcnn': checkpoint_dir + '/faster_rcnn_r101_fpn_2x_20181129-73e7ade7.pth',
'mask_rcnn': checkpoint_dir + '/mask_rcnn_x101_64x4d_fpn_1x_20181218-cb159987.pth',
'retinanet': checkpoint_dir + '/retinanet_r101_fpn_2x_20181129-72c14526.pth',
'cascade_rcnn': checkpoint_dir + '/cascade_rcnn_r101_fpn_20e_20181129-b46dcede.pth',
'cascade_mask_rcnn': checkpoint_dir + '/cascade_mask_rcnn_x101_64x4d_fpn_20e_20181218-630773a7.pth',
'htc': checkpoint_dir + '/htc_x101_64x4d_fpn_20e_20190408-497f2561.pth',
}
model_order = list(config_files.keys())
assert model_order == list(checkpoint_files.keys())
paths = {'Annot': 'COCO/annotations', 'mmdet': 'mmdetection/tools/test.py'}
for key in paths: assert os.path.exists(paths[key]), paths[key] + ' does not exist'
for key in config_files: assert os.path.exists(config_files[key]), config_files[key] + ' does not exist'
for key in checkpoint_files: assert os.path.exists(checkpoint_files[key]), checkpoint_files[key] + ' does not exist'
dirs = ['adv', 'cache', 'index', 'detection']
mask = ['mask_rcnn', 'cascade_mask_rcnn', 'htc']
def calculate_rmse(dir_name):
# calculate the RMSE for all samples
rmses = []
for root, _, files in os.walk(dir_name):
if 'sample_adv.png' not in files or 'sample_ori.png' not in files: continue#print('Not found in', root); continue
adv = np.array(PIL.Image.open(root + '/sample_adv.png')).astype(np.float32)
ori = np.array(PIL.Image.open(root + '/sample_ori.png').resize((adv.shape[1], adv.shape[0]))).astype(np.float32)
rmse = np.sqrt(np.mean(np.square(adv-ori)))
if rmse < 20: rmses.append(rmse)
print('RMSE is %.3f in %d samples' % (sum(rmses)/(len(rmses)+0.001), len(rmses)))
def re_annotation(dir_name):
data = json.load(open(paths['Annot'] + '/instances_val2017.json', 'r', encoding='utf-8'))
scales = {}
size = 416 if ('MaskRCNN' not in dir_name) else 448
existing = [] # record the existing samples
for file in os.listdir(dir_name + '/' + dirs[0]): existing.append(file)
# record the resized scale for each sample
abandoned = []
for i in range(len(data['images'])):
new_name = data['images'][i]['file_name'][:-4] + '.png'
if new_name not in existing: abandoned.append(i)
data['images'][i]['file_name'] = new_name
ih, iw = data['images'][i]['height'], data['images'][i]['width']
scale = min(size/ih, size/iw)
data['images'][i]['height'], data['images'][i]['width'] = int(ih*scale), int(iw*scale)
scales[data['images'][i]['id']] = scale
for i, index in enumerate(abandoned): data['images'].remove(data['images'][index-i])
# resize the annotations for detection and segmentation
abandoned = []
for i in range(len(data['annotations'])):
image_id = data['annotations'][i]['image_id']
scale = scales[image_id]
new_name = str(image_id).zfill(12) + '.png'
if new_name not in existing: abandoned.append(i)
for j in range(len(data['annotations'][i]['segmentation'])):
try:
data['annotations'][i]['segmentation'][j] = list(np.array(data['annotations'][i]['segmentation'][j])*scale)
except KeyError: continue
data['annotations'][i]['area'] = data['annotations'][i]['area'] * (scale ** 2)
data['annotations'][i]['bbox'] = list(np.array(data['annotations'][i]['bbox']) * scale)
for i, index in enumerate(abandoned): data['annotations'].remove(data['annotations'][index-i])
result_dir = dir_name + '/' + dirs[1]
os.makedirs(result_dir, exist_ok=True)
json.dump(data, open(result_dir + '/instances_val2017_resized.json', 'w', encoding='utf-8'))
def change_config(model_name, dir_name):
global config_files, config_files_ori
# change the config files to test the generated adversarial samples
ori_config = config_files_ori[model_name]
py_file = open(ori_config, 'r').read()
py_file = py_file.replace("data_root + 'val2017/'", "'" + dir_name + "/" + dirs[0] + "'")
py_file = py_file.replace("data_root + 'annotations/instances_val2017.json'", "'" + dir_name + "/" + dirs[1] + "/instances_val2017_resized.json'")
new_config = dir_name + '/' + dirs[1] + '/' + os.path.basename(config_files_ori[model_name])
with open(new_config, 'w') as f: f.write(py_file)
config_files[model_name] = new_config
def test_index(model_name, dir_name, metric='bbox', unique_metric=False):
# test the performance of mmdetection models on adversarial samples
if 'MaskRCNN' in dir_name and model_name == 'mask_rcnn': return
result_dir = dir_name + '/' + dirs[2]
os.makedirs(result_dir, exist_ok=True)
file_name = 'test.py' if not unique_metric else 'test_ours.py'
command = 'python mmdetection/tools/%s %s %s --out %s --eval %s' % \
(file_name, config_files[model_name], checkpoint_files[model_name], result_dir + '/' + model_name + '.pickle', metric if (model_name not in mask or unique_metric) else (metric + ' segm'))
print(command)
os.system(command)
def test_bbox(model_name, dir_name, sample_num):
# generate visual results for sample_num samples
source_dir = dir_name + '/' + dirs[0]
result_dir = dir_name + '/' + dirs[3] + '/' + model_name
os.makedirs(result_dir, exist_ok=True)
config_file = config_files[model_name]
checkpoint_file = checkpoint_files[model_name]
model = init_detector(config_file, checkpoint_file, device='cuda:0')
model_id, model_num = model_order.index(model_name) + 1, len(model_order)
for i, file in enumerate(sorted(os.listdir(source_dir), key=lambda x: int(os.path.splitext(os.path.splitext(x)[0])[0]))):
if i >= sample_num: break
img = source_dir + '/' + file
try:
result = inference_detector(model, img)
final = show_result(img, result, model.CLASSES, show=False)
except: continue
PIL.Image.fromarray(final[:, :, ::-1]).save(result_dir + '/' + os.path.splitext(file)[0] + '.png')
print('[ Model %d/%d %s ] [ No %d/%d ] [ File %s ]' % (model_id, model_num, model_name, i+1, sample_num, file), end='\r')
def test_pipeline():
parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
parser.add_argument('dataset', type=str, help='dir name of the tested experiment')
parser.add_argument('gpu_id', help='GPU(s) used')
args, _ = parser.parse_known_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
assert os.path.exists(args.dataset)
print('Calculating RMSE for', args.dataset, 'with', len(os.listdir(args.dataset + '/' + dirs[0])), 'samples...')
calculate_rmse(args.dataset)
re_annotation(dir_name=args.dataset) # resize annotations for existing adversarial samples to dir_name/dirs[1]/instances_val2017_resized.json
# change paths in config file and saved in dir_name/dirs[1]/.py
for model_name in config_files: change_config(model_name=model_name, dir_name=args.dataset)
# run mAP, mAR for samples to dir_name/dirs[2]
for model_name in config_files: test_index(model_name=model_name, dir_name=args.dataset)
# get bbox detection result images in dir_name/dirs[3]/model_name
for model_name in config_files: test_bbox(model_name=model_name, dir_name=args.dataset, sample_num=500)
# run accuracy, IoU for samples to dir_name/dirs[2]
for model_name in config_files: test_index(model_name=model_name, dir_name=args.dataset, unique_metric=True)
if __name__ == "__main__":
test_pipeline()
| 50.97093
| 196
| 0.677883
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,858
| 0.325995
|
7578054f5e5fa0cd2bf6c67b5dfd6c6a49acba24
| 563
|
py
|
Python
|
posts/migrations/0002_auto_20181129_2311.py
|
ddeveloper72/Dhjango-Blog
|
8f9771a149a944e32aa192de97ab69092a1492d2
|
[
"CC-BY-3.0"
] | null | null | null |
posts/migrations/0002_auto_20181129_2311.py
|
ddeveloper72/Dhjango-Blog
|
8f9771a149a944e32aa192de97ab69092a1492d2
|
[
"CC-BY-3.0"
] | null | null | null |
posts/migrations/0002_auto_20181129_2311.py
|
ddeveloper72/Dhjango-Blog
|
8f9771a149a944e32aa192de97ab69092a1492d2
|
[
"CC-BY-3.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-11-29 23:11
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('posts', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='post',
old_name='piblished_date',
new_name='published_date',
),
migrations.RenameField(
model_name='post',
old_name='view',
new_name='views',
),
]
| 21.653846
| 49
| 0.57016
| 412
| 0.731794
| 0
| 0
| 0
| 0
| 0
| 0
| 150
| 0.26643
|
757912d9e4012e01b625eaf478b57827dc9d6ad6
| 415
|
py
|
Python
|
la/oblas/data/dgeev01.py
|
wtsia/gosl
|
8302f76dfe76d24ea5026b225bdad234383dacf9
|
[
"BSD-3-Clause"
] | 1,811
|
2015-05-21T12:47:27.000Z
|
2022-03-24T04:48:00.000Z
|
la/oblas/data/dgeev01.py
|
wtsia/gosl
|
8302f76dfe76d24ea5026b225bdad234383dacf9
|
[
"BSD-3-Clause"
] | 42
|
2016-09-29T05:23:28.000Z
|
2021-10-30T03:12:00.000Z
|
la/oblas/data/dgeev01.py
|
wtsia/gosl
|
8302f76dfe76d24ea5026b225bdad234383dacf9
|
[
"BSD-3-Clause"
] | 171
|
2015-07-14T07:50:35.000Z
|
2022-03-09T10:04:15.000Z
|
import numpy as np
import scipy.linalg as la
from auxiliary import *
a = np.matrix([
[+0.35, +0.45, -0.14, -0.17],
[+0.09, +0.07, -0.54, +0.35],
[-0.44, -0.33, -0.03, +0.17],
[+0.25, -0.32, -0.13, +0.11],
], dtype=float)
w, vl, vr = la.eig(a, left=True, right=True)
vprintC('w', w)
print
for i in range(4):
vprintC('vl%d'%i, vl[:,i])
print
for i in range(4):
vprintC('vr%d'%i, vr[:,i])
| 18.043478
| 44
| 0.53253
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 15
| 0.036145
|
757aed5f2d7b170e9c0c6e816158ab521912f796
| 9,969
|
py
|
Python
|
source/menus/menus.py
|
HugoPFe/Project-Asteroids
|
7a58ba00283216e83f02b2f58cf1944e9e217433
|
[
"MIT"
] | null | null | null |
source/menus/menus.py
|
HugoPFe/Project-Asteroids
|
7a58ba00283216e83f02b2f58cf1944e9e217433
|
[
"MIT"
] | 4
|
2021-06-20T21:32:53.000Z
|
2021-08-12T11:12:17.000Z
|
source/menus/menus.py
|
HugoPFe/Project-Asteroids
|
7a58ba00283216e83f02b2f58cf1944e9e217433
|
[
"MIT"
] | null | null | null |
import pygame
from pygame.locals import *
from util import *
from constants import FPS, VERSION, SCREEN_WIDTH, SCREEN_HEIGHT
from ui.button import *
from ui.font import *
from media.paths import bg, logo, body_font, title_font
class Main:
def __init__(self):
"""
It's the abstract class for all screens (with your own main loop)
"""
# Constants
self.BACKGROUND = pygame.image.load(bg)
# Variables
self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
self.screen_rect = self.screen.get_rect()
self.clock = pygame.time.Clock()
self.running = True
self._buttons = []
def main_loop(self):
while self.running:
self._base_loop()
def _base_loop(self):
self.clock.tick(FPS)
for event in pygame.event.get():
if event.type == QUIT: # Making sure that all screens is stopped to run
for sub in Main.__subclasses__():
sub.running = False
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
for sub in Main.__subclasses__():
sub.running = False
print(event)
self.check_events(event)
self.screen.blit(self.BACKGROUND, (0, 0))
self.loop()
pygame.display.flip()
def loop(self):
pass
def render_buttons(self):
""" Draw all buttons on screen """
for button in self._buttons:
button.render()
def add_buttons(self, *args):
for arg in args:
self._buttons.append(arg)
def check_events(self, event):
pass
@staticmethod
def change_screen(next_screen, previous_screen=None, kill_prev=False):
if kill_prev:
previous_screen.running = False
if previous_screen is not None:
next_screen(previous_screen)
else:
next_screen()
def back_screen(self):
self.running = False
@property
def running(self):
return self._running
@running.setter
def running(self, arg):
self._running = arg
print(f'[{self.__class__.__name__}]', f'running: {arg}')
def back_mainmenu(self, screen):
""" Returns directly to MainMenu """
self.back_screen()
screen.back_screen()
class MainMenu(Main):
def __init__(self, game_cls):
""" Class for Main menu """
Main.__init__(self)
self.logo = pygame.image.load(logo).convert_alpha()
self.logo_rect = self.logo.get_rect(center=(SCREEN_WIDTH / 2, 150))
# Buttons
self.play_button = Button(screen=self.screen,
x=120, y=SCREEN_HEIGHT - 220,
width=90, height=40,
text='Jogar',
padding=5,
command=lambda: self.change_screen(game_cls))
self.controls_button = Button(screen=self.screen,
x=120, y=SCREEN_HEIGHT - 160,
width=90, height=40,
text='Controles',
padding=5,
command=lambda: self.change_screen(ControlsMenu))
self.exit_button = Button(screen=self.screen,
x=120, y=SCREEN_HEIGHT - 100,
width=90, height=40,
text='Sair',
padding=5,
command=self.exit)
self.add_buttons(
self.play_button,
self.controls_button,
self.exit_button
)
# Version
self.version_txt = Font(f'version: {VERSION}', (SCREEN_WIDTH - 10, SCREEN_HEIGHT - 30), 'right')
self.version_txt.configure(font_name=body_font, size=15, color='white',
bg_color='black', screen=self.screen)
self.main_loop()
def loop(self):
self.screen.blit(self.logo, self.logo_rect)
self.render_buttons()
self.version_txt.render()
def exit(self):
self.running = False
class ControlsMenu(Main):
def __init__(self):
""" Class for Controls menu """
Main.__init__(self)
self.screen_x = self.screen.get_width()
self.screen_y = self.screen.get_height()
self.screen_rect = self.screen.get_rect()
self.keys_fonts_text = {
'up_font': {'command_text': 'Mover para cima', 'command_key': 'Seta para cima'},
'down_font': {'command_text': 'Mover para baixo', 'command_key': 'Seta para baixo'},
'left_font': {'command_text': 'Mover para esquerda', 'command_key': 'Seta para esquerda'},
'right_font': {'command_text': 'Mover para direita', 'command_key': 'Seta para direita'},
'clockwise_font': {'command_text': 'Girar em sentido horário', 'command_key': 'E'},
'anticlockwise_font': {'command_text': 'Girar em sentido anti-horário', 'command_key': 'Q'},
'shoot_font': {'command_text': 'Atirar', 'command_key': 'Espaço'},
'pause_font': {'command_text': 'Pausar', 'command_key': 'P'}
}
self.control_font = None
self.keys_fontgroup = None
self.keys_frame()
self.back_button = Button(screen=self.screen,
x=SCREEN_WIDTH / 2,
y=SCREEN_HEIGHT - 100,
width=80,height=40,
text='Voltar', padding=3,
command=lambda: self.back_screen())
self.add_buttons(self.back_button)
self.main_loop()
def loop(self):
self.screen.blit(self.frame, self.frame_rect)
self.render_buttons()
self.control_txt.render()
self.keys_fontgroup.render_fonts()
def keys_frame(self):
frame_color = '#353535'
self.frame = pygame.Surface((int(self.screen_x * 0.9), int(self.screen_y * 0.5)))
self.frame.fill(frame_color)
self.frame_rect = self.frame.get_rect(center=self.screen_rect.center)
self.frame_content(frame_color)
def frame_content(self, frame_color):
# Title command_list
self.control_txt = Font('Controles', pos=(self.frame_rect.centerx, 90))
self.control_txt.configure(screen=self.screen,
font_name=title_font,
size=50,
bold=True,
antialias=True,
color=(255, 255, 255),
bg_color=(0, 0, 0),
align='center')
# Keys fonts
font_space = 30
self.keys_fontgroup = FontsGroup(screen=self.screen,
font_name=body_font,
size=18,
bold=True,
antialias=True,
color=(255, 255, 255),
bg_color=frame_color)
keys_fonts_objects = []
for commands, value in self.keys_fonts_text.items(): # Adding fonts to list
keys_fonts_objects.append([Font(text=value['command_text'],
pos=(self.frame_rect.x + 30, self.frame_rect.y)),
Font(text=value['command_key'],
pos=(self.frame_rect.right - 30, self.frame_rect.y),
align='right')
])
c = 1
for command_font_list in keys_fonts_objects: # Rendering on screen
command_font_list[0].y += c * font_space
command_font_list[1].y += c * font_space
for i in range(2):
self.keys_fontgroup.add_fonts(command_font_list[i])
c += 1
class PauseScreen(Main):
def __init__(self, game):
""" Class for Pause screen """
Main.__init__(self)
self.paused_font = Font('Pausado', (self.screen_rect.centerx, 100), 'center')
self.paused_font.configure(screen=self.screen, font_name=title_font, size=50, bold=True,
antialias=True, color='white', bg_color='black')
# Buttons
self.continue_button = Button(screen=self.screen, x=self.screen_rect.centerx, y=400,
width=110, height=40, text='Continuar',
padding=10, command=self.back_screen)
self.controls_button = Button(screen=self.screen, x=self.screen_rect.centerx, y=460,
width=110, height=40, text='Controles',
padding=8, command=lambda: self.change_screen(ControlsMenu))
self.mainmenu_button = Button(screen=self.screen, x=self.screen_rect.centerx, y=520,
width=110, height=40, text='Menu',
padding=7, command=lambda: self.back_mainmenu(game))
self.add_buttons(
self.continue_button,
self.controls_button,
self.mainmenu_button
)
self.main_loop()
def loop(self):
self.paused_font.render()
self.render_buttons()
pygame.display.flip()
def check_events(self, event):
if event.type == KEYDOWN:
if event.key == K_p:
self.back_screen()
__all__ = ['Main', 'MainMenu', 'PauseScreen', 'ControlsMenu']
| 34.856643
| 104
| 0.51921
| 9,668
| 0.969515
| 0
| 0
| 482
| 0.048335
| 0
| 0
| 1,264
| 0.126755
|
757b44c079d1af1e49497f1e9f96873e80ae2cd3
| 15,155
|
py
|
Python
|
Cursos/treina_web.py
|
FranciscoAlveJr/Bot_Telegram
|
9960485a4a25648719ef6fafcb3b02c82db79253
|
[
"MIT"
] | null | null | null |
Cursos/treina_web.py
|
FranciscoAlveJr/Bot_Telegram
|
9960485a4a25648719ef6fafcb3b02c82db79253
|
[
"MIT"
] | null | null | null |
Cursos/treina_web.py
|
FranciscoAlveJr/Bot_Telegram
|
9960485a4a25648719ef6fafcb3b02c82db79253
|
[
"MIT"
] | null | null | null |
import requests
import json
import os
from bs4 import BeautifulSoup as bs
import random
import time
import base64
import m3u8
treinaweb_sessions = requests.Session()
class Downloader():
def index(self):
escolha = input('Qual plataforma voce deseja baixar?\n1 - TreinaWeb\n2 - AvMakers\n3 - Freelae\nResposta: ')
n = [1, 2, 3]
if escolha.isdigit():
escolha = int(escolha)
if escolha in n:
if escolha == 1:
self.main = 'treinaweb'
elif escolha == 2:
self.main = 'avmakers'
elif escolha == 3:
self.main = 'freelae'
else:
print('Erro. Saindo.')
exit(0)
self.headers = {
'authority': f'www.{self.main}.com.br',
'pragma': 'no-cache',
'cache-control': 'no-cache',
'upgrade-insecure-requests': '1',
'origin': f'https://www.{self.main}.com.br',
'content-type': 'application/x-www-form-urlencoded',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'navigate',
'sec-fetch-user': '?1',
'sec-fetch-dest': 'document',
'referer': f'https://www.{self.main}.com.br/login',
'accept-language': 'pt-BR,pt;q=0.9,en-US;q=0.8,en;q=0.7',
}
cookie_jar = treinaweb_sessions.get(f'https://www.{self.main}.com.br/login', headers=self.headers).cookies.get_dict()[f'{self.main}-site']
#self.headers['cookie'] = f"treinaweb-site={cookie_jar}; path=/; secure; httponly; samesite=lax"
#self.cookies = {"treinaweb-site": cookie_jar}
user = 'ocoisa081@gmail.com'
pswd = '18020301.pP'
data = {
'username': user,
'password': pswd
}
treinaweb_sessions.post(f'https://www.{self.main}.com.br/login', headers=self.headers, data=data)
infos = treinaweb_sessions.get(f'https://www.{self.main}.com.br/api/painel/v1/aluno', headers=self.headers)
self.headers['cookie'] = f"{self.main}-site={infos.cookies.get_dict()[f'{self.main}-site']}; path=/; secure; httponly; samesite=lax"
#print(teste.headers)
#
self.quest()
def quest(self):
escolha = input(f'Escolha uma das funções abaixo\n1 - Baixar Cursos\n2 - Baixar Formações\n3 - Informações\n4 - Sair\nResposta: ')
n = [1, 2, 3]
if escolha.isdigit():
escolha = int(escolha)
if escolha in n:
if escolha == 1:
self.get_cursos()
elif escolha == 2:
self.get_formacao()
elif escolha == 3:
self.infos()
else:
print('Erro. Saindo.')
exit(0)
def get_cursos(self):
#downloaded_read = json.loads(open('downloaded.json', 'r', encoding='utf-8').read())
#downloaded_write = open('downloaded.json', 'w', encoding='utf-8')
infos = treinaweb_sessions.get(f'https://www.{self.main}.com.br/api/painel/v1/cursos', headers=self.headers).json()
categorias = {}
try:
cats = infos['meta']['categorias']['data']
for cat in cats:
categorias[cat['id']] = cat['nome']
except:
categorias = {
1: 'Freelae',
2: 'Bonus',
3: 'Bonus',
4: 'Bonus',
}
cursos = infos['data']
for index, curso in enumerate(cursos, start=1):
categoria = curso['categorias']
if len(categoria) > 1:
random_num = random.choice(categoria)
self.categoria = categorias[random_num]
else:
self.categoria = categorias[categoria[0]]
self.curso_nome = self.replacer(curso['nome'])
print(f'{index} - {self.curso_nome}')
print(f'{index+1} - Baixar todos')
escolha = input('Qual curso vc quer baixar?\nR: ')
if escolha.isdigit():
escolha = int(escolha)
if escolha < index + 1 :
curso = cursos[escolha-1]
self.get_course_here(curso)
elif escolha == index + 1 :
for index, curso in enumerate(cursos, start=1):
categoria = curso['categorias']
if len(categoria) > 1:
random_num = random.choice(categoria)
self.categoria = categorias[random_num]
else:
self.categoria = categorias[categoria[0]]
self.curso_nome = self.replacer(curso['nome'])
self.get_course_here(curso)
else:
print('Erro. Saindo.')
exit(0)
#downloaded_read.append(self.curso_nome)
#if self.curso_nome in downloaded_read:
#continue
#tipos
#1: Cursos
#2: Direto ao ponto
def get_course_here(self, curso):
if curso['tipo'] == 1:
self.tipo = 'Cursos'
a = self.return_self(curso['links'])
au = a['data']
aul =au['aulas']
aulas = aul['data']
elif curso['tipo'] == 2:
self.tipo = 'Direto ao Ponto'
a = self.return_self(curso['links'])
au = a['data']
aul =au['aulas']
aulas = aul['data']
elif curso['tipo'] == 3:
self.tipo = 'Projeto Prático'
a = self.return_self(curso['links'])
au = a['data']
aul =au['aulas']
aulas = aul['data']
else:
print(curso)
for aula in aulas:
modulo = self.replacer(aula['titulo'])
modulo_count = aula['ordem']
self.final_modulo = f'{modulo_count} - {modulo}'
sub_aulas = aula['subaulas']['data']
for sub_aula in sub_aulas:
aula_t = self.replacer(sub_aula['titulo'])
aula_count = sub_aula['ordem']
self.final_aula = f'{aula_count} - {aula_t}'
tipo = sub_aula['tipo']
print(f'{self.categoria} | {self.curso_nome} | {self.final_modulo} | {self.final_aula} | ', end='')
path = self.create_path(f'{self.main.capitalize()}/{self.tipo}/{self.categoria}/{self.curso_nome}/{self.final_modulo}')
if tipo == 3:
print("Questionario")
continue
elif tipo == 1:
print("Apostila")
self.aula_path = f'{path}/{self.final_aula}.html'
apostilas = self.get_apostilas(sub_aula['links'][0]['uri'])
css = 'body {margin: 50px 150px 50px 150px; text-align: justify} .HtmlContentRenderer_text-content-style__2TWCB {background-color: #fff font-size: 16px; font-weight: 400; color: #707070; word-break: break-word}'
html = f"<html lang='pt-br' data-product='treinaweb'><head><meta charset='utf-8'><style>{css}</style></head><body><h1>{self.final_aula}</h1><br><div class='HtmlContentRenderer_text'>{apostilas}</div></body></html>"
with open(self.aula_path, 'w', encoding='utf-8') as out:
out.write(html)
continue
elif tipo == 2:
self.aula_path = f'{path}/{self.final_aula}.mp4'
if os.path.exists(self.aula_path):
continue
print('Video')
videos = self.get_video(sub_aula['links'][0]['uri'])
if videos['url_anexo'] != None:
ext = videos['url_anexo'].split('?')[0].split('.')[-1]
os.system(f'aria2c -o "{path}/{self.final_aula}.{ext}" "{videos["url_anexo"]}" --quiet --continue=true')
pass
url = videos['url']
encoded = str(bs(treinaweb_sessions.get(url, headers=self.headers).content, 'html.parser').find('head').find('script', {'type': 'text/javascript'}))
encoded = encoded.split("';")[0]
encoded = encoded.split("= '")[1]
data = json.loads(base64.b64decode(encoded))
signatures = data["signatures"]
m3u8_signatures = signatures['m']
key_signatures = signatures['k']
ts_signatures = signatures['t']
#all_signatures = [m3u8_signatures, key_signatures, ts_signatures]
s3_user_hash = data["s3_user_hash"]
s3_video_hash = data["s3_video_hash"]
sessionID = data["sessionID"]
master_m3u8_name = 'index.m3u8'
self.get_m3u8(master_m3u8_name, m3u8_signatures, s3_user_hash, s3_video_hash, sessionID)
master_content = open(f"tmp/{master_m3u8_name}", 'r').read()
master_m3u8 = m3u8.loads(master_content)
self.set_master(master_m3u8)
master_content = open(f"tmp/{master_m3u8_name}", 'w')
master_dumps = master_m3u8.dumps()
with master_content as master_output:
master_output.write(master_dumps)
max_resolution = master_m3u8.playlists.__dict__['uri']
self.get_m3u8(max_resolution, m3u8_signatures, s3_user_hash, s3_video_hash, sessionID)
video_1080_content = open(f'tmp/{max_resolution}', 'r').read()
video_1080_m3u8 = m3u8.loads(video_1080_content)
video_1080_content = open(f'tmp/{max_resolution}', 'w')
video_dumps = video_1080_m3u8.dumps()
with video_1080_content as video_output:
video_output.write(video_dumps)
video_segments = video_1080_m3u8.data['segments']
key_type = max_resolution.replace('m3u8', 'key')
self.get_key(key_type, key_signatures, s3_user_hash, s3_video_hash, sessionID)
self.get_ts(video_segments, ts_signatures, s3_user_hash, s3_video_hash, sessionID)
if os.path.exists(self.aula_path) is False:
os.system(f'ffmpeg -allowed_extensions ALL -i "tmp/index.m3u8" "{self.aula_path}" -preset ultrafast -nostats -loglevel 0')
try:
os.system('del /q tmp')
except:
pass
try:
os.system('rmdir /q /s tmp')
except:
pass
continue
elif tipo == 4:
print(sub_aula)
exit(0)
#tipos
#1 = apostila
#2 = video
#3 = questionario
#4 = ??
time.sleep(1)
#with downloaded_write as output:
#output.write(json.dumps(downloaded_read))
def get_key(self, tipo, signatures, s3_user_hash, s3_video_hash, sessionID):
path = f'tmp'
cfp = signatures['CloudFront-Policy']
cfs = signatures['CloudFront-Signature']
kpid = signatures['CloudFront-Key-Pair-Id']
url = f'https://hls2.videos.sproutvideo.com/{s3_user_hash}/{s3_video_hash}/video/{tipo}?Policy={cfp}&Signature={cfs}&Key-Pair-Id={kpid}&sessionID={sessionID}'
os.system(f'aria2c -o "{path}/{tipo}" "{url}" --quiet --continue=true')
def set_master(self, master):
for x in master.playlists:
if '1080.m3u8' in x.__dict__['uri']:
master.playlists = x
break
elif '720.m3u8' in x.__dict__['uri']:
master.playlists = x
else:
master.playlists = x
def get_m3u8(self, tipo, signatures, s3_user_hash, s3_video_hash, sessionID):
path = 'tmp'
if os.path.exists(path) is False:
os.makedirs(path)
cfp = signatures['CloudFront-Policy']
cfs = signatures['CloudFront-Signature']
kpid = signatures['CloudFront-Key-Pair-Id']
m3u8_file = f'https://hls2.videos.sproutvideo.com/{s3_user_hash}/{s3_video_hash}/video/{tipo}?Policy={cfp}&Signature={cfs}&Key-Pair-Id={kpid}&sessionID={sessionID}'
os.system(f'aria2c -o "{path}/{tipo}" "{m3u8_file}" --quiet --continue=true')
def get_ts(self, segments, signatures, s3_user_hash, s3_video_hash, sessionID):
cfp = signatures['CloudFront-Policy']
cfs = signatures['CloudFront-Signature']
kpid = signatures['CloudFront-Key-Pair-Id']
path = 'tmp'
for segment in segments:
url = segment['uri']
segment_link = f'https://hls2.videos.sproutvideo.com/{s3_user_hash}/{s3_video_hash}/video/{url}?Policy={cfp}&Signature={cfs}&Key-Pair-Id={kpid}&sessionID={sessionID}'
filename = url
ts_path = f'{path}/{filename}'
if os.path.exists(ts_path) is False:
os.system(f'aria2c -o "{ts_path}" "{segment_link}" --quiet --continue=true')
time.sleep(0.01)
time.sleep(0.5)
def get_video(self, api):
video = treinaweb_sessions.get(api, headers=self.headers).json()['data']['video']['data']
return video
def get_apostilas(self, api):
apostilas = treinaweb_sessions.get(api, headers=self.headers).json()['data']['apostila']['data']['html']
return apostilas
def replacer(self, text):
invalid = {'/': '-','//': ' - ', r'"': r"'", '\\': " - ", '|': " - ", '<': "«", '>': "»", '*': "x", ':': ' -', '?': "¿", '\n': ' - '}
for char in invalid:
if char in text:
text = text.replace(char, invalid[char])
return text
def return_self(self, api):
for link in api:
if link['type'] == 'GET' and link['rel'] == 'self':
uri = link['uri'] + '?include=aulas'
aulas = treinaweb_sessions.get(uri, headers=self.headers).json()
return aulas
def create_path(self, path):
if os.path.exists(path) is False:
os.makedirs(path)
return path
#Downloader().index()
| 37.512376
| 234
| 0.512966
| 14,968
| 0.98701
| 0
| 0
| 0
| 0
| 0
| 0
| 4,764
| 0.314144
|
757c4a2be3e6e27c73b14c6ddc8062d7cb6e67ce
| 10,724
|
py
|
Python
|
A037274/simple.py
|
sethtroisi/OEIS
|
2c10b86d8a8be69aa8020623d4802e3d68772ede
|
[
"Apache-2.0"
] | 3
|
2019-05-25T23:08:48.000Z
|
2021-12-11T03:59:42.000Z
|
A037274/simple.py
|
sethtroisi/OEIS
|
2c10b86d8a8be69aa8020623d4802e3d68772ede
|
[
"Apache-2.0"
] | 1
|
2019-03-07T21:22:52.000Z
|
2019-03-07T21:22:52.000Z
|
A037274/simple.py
|
sethtroisi/OEIS
|
2c10b86d8a8be69aa8020623d4802e3d68772ede
|
[
"Apache-2.0"
] | 1
|
2021-04-29T06:35:07.000Z
|
2021-04-29T06:35:07.000Z
|
import gmpy2
import itertools
import subprocess
import math
import time
from collections import defaultdict
from factordb.factordb import FactorDB
START = 2
STOP = 5000
# Also see A056938
def product(factors):
temp = 1
for factor in factors:
temp *= factor
return temp
def factordb_format(number):
if number < 1e10:
return str(number)
strN = str(number)
length = len(strN)
if number < 1e24:
return "{}<{}>".format(strN, length)
return "{}...{}<{}>".format(strN[:10], strN[-2:], length)
def split_to_lines(number, max_size):
size = max_size - 2
# split this number evenly over multiple lines
needed_lines = (len(number) - 1) // size + 1
assert size * needed_lines >= len(number)
# split evenly onto this many lines
per_line = len(number) // needed_lines
# this many lines get 1 extra
extra = len(number) % needed_lines
assert per_line + (extra > 0) <= size
lines = []
for l in range(1, needed_lines+1):
# take per_line, plus potentially one extra
this_line = number[:per_line + (extra > 0)]
number = number[len(this_line):]
this_line += " /" if l != needed_lines else ""
lines.append(this_line)
return lines
def row_format(string, max_size=60):
if len(string) <= max_size:
return string
mult = " * "
if mult in string:
parts = string.split(mult)
lines = []
line = ""
for part in parts:
merged = line + part + mult
if len(merged) <= max_size + 1: # trailing space
line = merged
continue
elif line:
lines.append(line.strip())
line = ""
assert line == ""
if len(part) <= max_size - 2:
lines.append(part + " *")
continue
lines += split_to_lines(part + " *", max_size)
temp = "<br>".join(lines)
assert temp.endswith(" *"), temp[-20:]
return temp[:-2]
return "<br>".join(split_to_lines(string, max_size))
def factor_large(n, b1=10**6):
args = ["ecm", "-q", "-c", "10", str(b1)]
print ("\t\t", " ".join(args))
result = subprocess.run(
args,
input=str(n).encode(),
stdout=subprocess.PIPE)
if result.returncode == 8:
# Need to rerun with smaller b1
print("\t\tfound self ({} with b1={})".format(n, b1))
return factor_large(n, b1= max(100, b1 // 90))
return list(map(int, result.stdout.strip().split()))
def attempt_factorization(s, known_factors):
t = s
factors = []
for factor in known_factors:
# Last factor maybe non-prime
if gmpy2.is_prime(factor):
t //= factor
factors.append(factor)
# Toggle to if True: to recheck factordb.
if t >= 1e10 and t not in known_factors:
# Check factorDB (probably already been done)
time.sleep(0.2)
factordb = FactorDB(t)
factordb.connect()
factordb_factors = factordb.get_factor_list()
if factordb_factors:
print ("\t\tfactordb:", factordb.get_status(), factordb_factors)
for factor in factordb_factors:
if gmpy2.is_prime(factor):
t //= factor
factors.append(factor)
# small trial division
p = 2
while t > 1 and t < 1e10:
while t % p == 0:
t //= p
factors.append(p)
if t == 1:
break
p += 1 + (p&1)
return t, factors
def load_from_file():
home_primes = defaultdict(list)
n = None
s = None
with open("home_primes.txt") as f:
# each line is "<base> <start> <step> <status>: <factor> <factor> ..."
for line in f.readlines():
pre, post = line.strip().split(":")
*pre, status = pre.split()
base, start, step, = map(int, pre)
if start != n:
n = start
s = n
factors = list(map(int, post.split()))
assert status in ("FF", "P", "CF"), line
home_primes[(base, start, step)] = factors
assert product(factors) == s, (start, step, s, factors)
s = int("".join(map(str, factors)))
min_step = {}
duplicates = {}
all_primes = set()
composites = defaultdict(set)
for key, factors in home_primes.items():
for p in factors:
if gmpy2.is_prime(p):
all_primes.add(p)
else:
composites[key].add(p)
is_terminal = len(factors) == 1 and factors[0] in all_primes
s = int("".join(map(str, factors)))
if s in min_step and not is_terminal:
# Make sure min step isn't previous step or that's stupid
if min_step[s] == (key[0], key[1], key[2]-1):
continue
duplicates[key] = min_step[s]
else:
min_step[s] = key
print ("Found {} primes, {} composites".format(
len(all_primes), len(composites)))
return home_primes, min_step, duplicates, composites
def process(home_primes, composites):
added = False
try:
for n in range(START, STOP+1):
print (n)
t = n
for step in itertools.count(1):
if gmpy2.is_prime(t):
break
s = t
key = (10, n, step)
original = home_primes[key]
t, factors = attempt_factorization(s, original)
factors.sort()
if t > 1:
# t is composite
factors.append(t)
composites[key].add(t)
assert product(factors) == s, (s, t, factors)
if factors != original:
home_primes[key] = factors
added = True
print ("\t\tnew factor", factors)
if t > 1:
print ("Breaking, failed to factor C{}: {}".format(len(str(t)), factordb_format(t)))
break
new = int("".join(map(str, factors)))
t = new
if False:
if gmpy2.is_prime(s):
if new < 1e40:
print ("\t", step, new, "from", s, factors)
else:
print ("\t", step, new)
print ("\t\tfrom", factors)
if gmpy2.is_prime(t):
home_primes[(10, n, step)] = [t]
else:
print ("\t {} Gave up on step {}".format(n, step))
except KeyboardInterrupt:
print("Stopping from ^C")
return added
# For use with kernprof -v --line-by-line simple.py
#@profile
def run():
home_primes, min_step, duplicates, composites = load_from_file()
added = False
added = process(home_primes, composites)
if added:
with open("home_primes.txt", "w") as f:
for base, start, step in sorted(home_primes.keys()):
factors = home_primes[(base, start, step)]
if not factors:
continue
if all(gmpy2.is_prime(f) for f in factors):
if len(factors) == 1:
status = "P"
else:
status = "FF"
else:
status = "CF"
f.write("{} {} {} {}: {}\n".format(
base, start, step, status, " ".join(map(str, factors))))
# Sections copied into README.md
if True:
ranges = [(2,100), (2,499)] + [(a*500, a*500 + 499) for a in range(1, STOP//500)]
for low, high in ranges:
filename = "RESULTS_{}_{}.md".format(low, high)
print ("Genarating", filename)
template = """
## [Back](../README.md)
## Results for A037274 a(n) n={}..{}
---
|start|step|number|factors|
|-----|----|------|-------|
{}
"""
rows = []
for (_,start,step),factors in sorted(home_primes.items()):
if start not in range(low, high+1):
continue
num = row_format(str(product(factors)), max_size=40)
if len(factors) == 1:
factors = "Home Prime!" if gmpy2.is_prime(min(factors)) else "Unfactored composite"
else:
mult = " * ".join(map(str, sorted(factors)))
factors = row_format(mult, max_size=50)
columns = [start, step, num, factors]
rows.append("|" + "|".join(map(str, columns)) + "|")
with open("results/" + filename, "w") as f:
f.write(template.format(
low, high,
"\n".join(rows)))
if True:
count = 0
print ()
print ()
print ("### Unterminated")
print ("---")
print ()
# Move the "These <X> a(n) that have not..." line here
print ()
print ("|start|step|composite|same as|")
print ("|-----|----|---------|-------|")
same = defaultdict(list)
for key, cfs in composites.items():
same[tuple(sorted(cfs))].append("HP({}).{}".format(key[1], key[2]))
merged_count = 0
for (base, start, step), cfs in composites.items():
assert (base, start, step+1) not in home_primes
assert len(cfs) and not gmpy2.is_prime(max(cfs))
formatted_factors = tuple(factordb_format(c) for c in sorted(cfs))
key = tuple(sorted(cfs))
if (base, start, step) not in duplicates:
same_c = same[key]
assert same_c[0].startswith("HP({})".format(start)), (key, same_c)
print ("|HP({})|{}|{}|{}|".format(
start, step, ", ".join(formatted_factors), " ".join(same_c[1:])))
merged_count += len(same_c) - 1
count += 1
print ("{} numbers ({} merged) <= {} have not yet reached a prime".format(
count, count - merged_count, STOP))
print ()
print ()
if True:
print ("### Work")
print ("---")
print ()
# TODO use datetime here
print ("This is a short list of the smallest (and largest) unfactored numbers as of 2020-03.")
print ()
print ("|size|start|step|composite|other factor|")
print ("|----|-----|----|---------|------------|")
by_size = sorted((c, key) for key, cfs in composites.items() for c in cfs)
for c, key in by_size[:30] + by_size[-20:]:
if key in duplicates:
continue
others = home_primes[key][:]
others.remove(c)
print ("|c{}|HP({})|step {}|{}|{}|".format(
len(str(c)), key[1], key[2],
c,
" * ".join(map(str, others))))
print()
print()
if True:
deltas = []
last = ""
for (base,start,step),factors in sorted(home_primes.items()):
assert factors == sorted(factors)
new = "".join(map(str, factors))
if step > 1 and (base, start, step) not in duplicates:
delta = len(new) - len(last)
deltas.append((delta, int(last), int(new), start, step-1))
last = new
# For smallest jump | find biggest number
# For biggest jumps | find smallest number
deltas.sort(key=lambda d: (d[0], d[1] if d[0] > 3 else -d[1]))
print ()
print ("Home Primes with smallest and largest increase in number of digits")
print ()
print ("|+digits|HP|current|next|link|")
print ("|-------|--|-------|----|----|")
for delta, s1, s2, start, step in deltas[:15] + deltas[-15:]:
print("|{}|{}|{}|{}|{}|".format(
delta,
f"HP({start}).{step}",
factordb_format(abs(s1)),
factordb_format(abs(s2)),
"[FactorDB](http://factordb.com/aliquot.php?type=10&aq={}&big=1)".format(start)))
run()
| 26.743142
| 98
| 0.563129
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,007
| 0.18715
|
757deb3edec28d6288b4d6073331b4f58c9cf7f2
| 14,541
|
py
|
Python
|
system/scripts/coordinator/strategies.py
|
maxvonhippel/snake
|
0805773dc34e1480dffaae40174aa1f82d1c6ce8
|
[
"BSD-3-Clause"
] | 11
|
2015-11-24T11:07:28.000Z
|
2021-12-23T04:10:29.000Z
|
system/scripts/coordinator/strategies.py
|
maxvonhippel/snake
|
0805773dc34e1480dffaae40174aa1f82d1c6ce8
|
[
"BSD-3-Clause"
] | null | null | null |
system/scripts/coordinator/strategies.py
|
maxvonhippel/snake
|
0805773dc34e1480dffaae40174aa1f82d1c6ce8
|
[
"BSD-3-Clause"
] | 6
|
2016-03-01T06:32:21.000Z
|
2022-03-24T19:31:41.000Z
|
#Samuel Jero <sjero@purdue.edu>
#vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
import os
import sys
import time
from datetime import datetime
import re
import pprint
from types import NoneType
import ast
import manipulations
import fields
system_home = os.path.split(os.path.dirname(os.path.realpath(__file__)))[0]
import config
class StrategyGenerator:
#Constructor
def __init__(self, lg, res_lg):
self.lg = lg
self.results = res_lg
self.ck_file = None
self.do_ckpt = False
self.pkt_actions = dict()
self.strat_list = []
self.tested_list = []
self.unique_tested = 0
self.bw_threshold = None
self.connection_threshold = None
self.bw_collection = []
self.connection_collection = []
def next_strategy(self):
if len(self.strat_list) == 0:
self.checkpoint()
return None
if len(self.strat_list) % 10 == 0:
print "Strategies Left: %d" % (len(self.strat_list))
self.lg.write("[%s] Strategies Left: %d" % (str(datetime.today()),len(self.strat_list)))
if len(self.strat_list) % 100 == 0:
self.checkpoint()
return self.strat_list.pop(0)
def return_strategy(self, strat):
self.strat_list.insert(0,strat)
def report_results(self, strat, info, perf):
bw,conn = perf
if strat.find("NONE") >= 0:
#Set Performance Thresholds
self.bw_collection.append(bw)
self.connection_collection.append(conn)
self.bw_threshold = sum(self.bw_collection)/len(self.bw_collection)
self.connection_threshold = sum(self.connection_collection)/len(self.connection_collection)
#Build info
d = dict()
d['strat'] = strat
d['bw'] = bw
d['conn'] = conn
d['info'] = info
d['bw_thres'] = self.bw_threshold
d['conn_thres'] = self.connection_threshold
d['time'] = str(datetime.today())
#Determine if attack
if self._is_attack(bw,conn):
d['result'] = "FAILED"
#check if this is a retry
retried = False
for t in self.tested_list:
if t['strat'] == strat:
retried = True
break
#Retry one time
if not retried:
self.strat_list.insert(0,strat)
self.lg.write("[%s] Strategy \"%s\" Failed! RETRYING\n" % (str(datetime.today()),strat))
self.lg.flush()
else:
self.results.write("%s\n"%(repr(d)))
self.results.flush()
self.lg.write("[%s] Strategy \"%s\" Failed! RECORDING\n" % (str(datetime.today()),strat))
self.lg.flush()
print "[%s] Strategy \"%s\" Failed! RECORDING\n" % (str(datetime.today()),strat)
self.unique_tested += 1
else:
d['result'] = "PASSED"
self.unique_tested += 1
#Add to strategy list
self.tested_list.append(d)
self._process_feedback(strat,info)
return
def _is_attack(self,bw,conn):
if self.bw_threshold == None or self.connection_threshold == None:
print "Warning: Performance Thresholds not established yet!!"
return False
if bw < 15:
return True
if bw > 1.5*self.bw_threshold:
return True
if bw < 0.5*self.bw_threshold:
return True
if conn > self.connection_threshold:
return True
return False
def _process_feedback(self,strat,fb):
adding = False
fb = self._build_fb_dict(fb)
#Proxy only considers server state
if 'server' not in fb:
return
for state in fb['server']:
for metric in fb['server'][state]:
#Packets received by server
if metric.find("r_pkt_cnt_") >= 0:
pkt = metric.replace("r_pkt_cnt_","")
if pkt in self.pkt_actions:
if state not in self.pkt_actions[pkt]['fw_manip_testing']:
self.pkt_actions[pkt]['fw_manip_testing'].append(state)
for s in self.pkt_actions[pkt]['manip_list']:
strategy = "{st}?{d}?{act}".format(st=state,d="1",act=s)
self.strat_list.append(strategy)
adding = True
#Packets sent by server
if metric.find("s_pkt_cnt_") >= 0:
pkt = metric.replace("s_pkt_cnt_","")
if pkt in self.pkt_actions:
if state not in self.pkt_actions[pkt]['rv_manip_testing']:
self.pkt_actions[pkt]['rv_manip_testing'].append(state)
for s in self.pkt_actions[pkt]['manip_list']:
strategy = "{st}?{d}?{act}".format(st=state,d="0",act=s)
self.strat_list.append(strategy)
adding = True
if adding:
print "Strategies: %d" % (len(self.strat_list) + self.unique_tested)
self.lg.write("[%s] Strategies: %d" % (str(datetime.today()),len(self.strat_list)+self.unique_tested))
self.checkpoint()
return
def _build_fb_dict(self, fb):
d = dict()
for l in fb:
parts = l.split(",")
if len(parts) != 4:
continue
metric = parts[0]
host = parts[1]
state = parts[2]
num = parts[3]
if host not in d:
d[host] = dict()
if state not in d[host]:
d[host][state] = dict()
d[host][state][metric] = num
return d
def build_strategies(self):
#Generate Packet Actions
for k in fields.packet_format:
p = fields.packet_format[k]
#Init
if p['name'] not in self.pkt_actions:
self.pkt_actions[p['name']] = dict()
self.pkt_actions[p['name']]['fw_manip_testing'] = []
self.pkt_actions[p['name']]['rv_manip_testing'] = []
self.pkt_actions[p['name']]['manip_list'] = []
self.pkt_actions[p['name']]['inject_list'] = []
#Inject Strategies
self._build_inject_strats(p)
#Delivery Strategies
for a in manipulations.delivery_manipulations:
self.pkt_actions[p['name']]['manip_list'].append("{msg} {act}".format(msg=p['name'],act=a))
#Modification Strategies
i = 0
for f in p['fields']:
if 'bitfield' in f:
for s in manipulations.bit_field_manipulations[f['bitfield']]:
strat = "{msg} LIE {act} {field}".format(msg=p['name'],act=s,field=i)
self.pkt_actions[p['name']]['manip_list'].append(strat)
else:
for s in manipulations.field_manipulations[f['length']]:
strat = "{msg} LIE {act} {field}".format(msg=p['name'],act=s,field=i)
self.pkt_actions[p['name']]['manip_list'].append(strat)
i +=1
#Prime Strategy List
self.strat_list.append("*?*?BaseMessage NONE 0")
self.strat_list.append("*?*?BaseMessage NONE 1")
self.strat_list.append("*?*?BaseMessage NONE 2")
for k in self.pkt_actions:
for s in self.pkt_actions[k]['inject_list']:
self.strat_list.append("*?*?" + s)
print "Initial Strategies: %d" % (len(self.strat_list))
self.lg.write("[%s] Initial Strategies: %d" % (str(datetime.today()),len(self.strat_list)))
def _build_inject_strats(self,p):
if config.protocol == "TCP":
strat = "{msg} INJECT t=10 0 {cip} {sip} 0={cp} 1={sp} 2=111 5=5 10={win}".format(msg=p['name'],cip=config.client_ip,sip=config.server_ip,cp=config.client_port,sp=config.server_port,win=config.default_window)
self.pkt_actions[p['name']]['inject_list'].append(strat)
strat = "{msg} INJECT t=10 0 {sip} {cip} 0={sp} 1={cp} 2=111 5=5 10={win}".format(msg=p['name'],cip=config.client_ip,sip=config.server_ip,cp=config.client_port,sp=config.server_port,win=config.default_window)
self.pkt_actions[p['name']]['inject_list'].append(strat)
strat = "{msg} WINDOW w={win} t=10 {cip} {sip} 0={cp} 1={sp} 5=5".format(msg=p['name'],cip=config.client_ip,sip=config.server_ip,cp=config.client_port,sp=config.server_port,win=config.default_window)
self.pkt_actions[p['name']]['inject_list'].append(strat)
strat = "{msg} WINDOW w={win} t=10 {sip} {cip} 0={sp} 1={cp} 5=5".format(msg=p['name'],cip=config.client_ip,sip=config.server_ip,cp=config.client_port,sp=config.server_port,win=config.default_window)
self.pkt_actions[p['name']]['inject_list'].append(strat)
elif config.protocol == "DCCP":
size = 0
if p['name'] == "BaseMessage":
size = 3
elif p['name'] == "Data":
size = 4
elif p['name'] == "Request":
size = 5
elif p['name'] == "Reset":
size = 7
elif p['name'] == "Response":
size = 7
else:
size = 6
strat = "{msg} INJECT t=10 0 {cip} {sip} 0={cp} 1={sp} 2={sz} 6=1 11=111".format(msg=p['name'],cip=config.client_ip,sip=config.server_ip,cp=config.client_port,sp=config.server_port,win=config.default_window,sz=size)
self.pkt_actions[p['name']]['inject_list'].append(strat)
strat = "{msg} INJECT t=10 0 {sip} {cip} 0={sp} 1={cp} 2={sz} 6=1 11=111".format(msg=p['name'],cip=config.client_ip,sip=config.server_ip,cp=config.client_port,sp=config.server_port,win=config.default_window,sz=size)
self.pkt_actions[p['name']]['inject_list'].append(strat)
strat = "{msg} WINDOW w={win} t=10 {cip} {sip} 0={cp} 1={sp} 2={sz} 6=1".format(msg=p['name'],cip=config.client_ip,sip=config.server_ip,cp=config.client_port,sp=config.server_port,win=config.default_window,sz=size)
self.pkt_actions[p['name']]['inject_list'].append(strat)
strat = "{msg} WINDOW w={win} t=10 {sip} {cip} 0={sp} 1={cp} 2={sz} 6=1".format(msg=p['name'],cip=config.client_ip,sip=config.server_ip,cp=config.client_port,sp=config.server_port,win=config.default_window,sz=size)
self.pkt_actions[p['name']]['inject_list'].append(strat)
else:
print "Warning: Unknown protocol, no Injection strategies generated!"
def enable_checkpointing(self, f):
self.ck_file = f
self.do_ckpt = True
self.checkpoint()
def checkpoint(self):
if self.do_ckpt and self.ck_file is not None:
self.lg.write("[%s] Making Checkpoint\n" % (str(datetime.today())))
print "[%s] Making Checkpoint" % (str(datetime.today()))
#Create backup
bkup = {}
bkup['version'] = 0
bkup['pkt_actions'] = self.pkt_actions
bkup['strat_list'] = self.strat_list
bkup['tested_list'] = self.tested_list
bkup['unique_tested'] = self.unique_tested
bkup['bw_threshold'] = self.bw_threshold
bkup['connection_threshold'] = self.connection_threshold
bkup['bw_collection'] = self.bw_collection
bkup['connection_collection'] = self.connection_collection
#Format
pp = pprint.PrettyPrinter()
fmtbkup = pp.pformat(bkup)
#Write backup
try:
self.ck_file.seek(0)
self.ck_file.truncate()
self.ck_file.write(fmtbkup)
self.ck_file.flush()
except Exception as e:
print "[%s] Checkpoint Failed: %s" % (str(datetime.today()),str(e))
return
self.lg.write("[%s] Checkpoint Finished\n" % (str(datetime.today())))
print "[%s] Checkpoint Finished" % (str(datetime.today()))
def restore(self, f):
#Read backup
try:
inp = f.readlines()
inp = "\n".join(inp)
bkup = ast.literal_eval(inp)
except Exception as e:
print "[%s] Failed to read checkpoint: %s" % (str(datetime.today()),str(e))
f.close()
return False
#Restore Backup
if bkup['version'] != 0:
print "Warning: Checkpoint is incompatable!!!"
f.close()
return False
self.pkt_actions = bkup['pkt_actions']
self.strat_list = bkup['strat_list']
self.tested_list = bkup['tested_list']
self.unique_tested = bkup['unique_tested']
self.bw_threshold = bkup['bw_threshold']
self.connection_threshold = bkup['connection_threshold']
self.bw_collection = bkup['bw_collection']
self.connection_collection = bkup['connection_collection']
f.close()
self.lg.write("[%s] Restore Finished\n" % (str(datetime.today())))
print "[%s] Restore Finished" % (str(datetime.today()))
return True
| 45.726415
| 231
| 0.510213
| 14,202
| 0.976687
| 0
| 0
| 0
| 0
| 0
| 0
| 2,846
| 0.195722
|
757f3810745dc98b37ec435828ecf0e2aaa534d5
| 1,212
|
py
|
Python
|
app/file2mysql.py
|
ToHanwei/CORD
|
09f75b136431222ec945b2ddd6798ae805ec332e
|
[
"MIT"
] | null | null | null |
app/file2mysql.py
|
ToHanwei/CORD
|
09f75b136431222ec945b2ddd6798ae805ec332e
|
[
"MIT"
] | null | null | null |
app/file2mysql.py
|
ToHanwei/CORD
|
09f75b136431222ec945b2ddd6798ae805ec332e
|
[
"MIT"
] | null | null | null |
#!coding:utf-8
import os
import sys
import pymysql
def connetc2mysql():
try:
conn = pymysql.connect(
host = '10.15.50.100',
port = 3306,
user= 'root',
password = 'Zhaolab@C809!!',
db = 'CORdbPro',
charset = 'utf8',
use_unicode=True)
except Exception as e:
print(e)
else:
print("connect seccess")
return(conn)
def create_table(cur, conn, tablename):
cur.execute("CREATE table "
+ tablename
+ "(id INT PRIMARY KEY AUTO_INCREMENT,"
+ "filename VARCHAR(100),"
+ "data MEDIUMBLOB);")
conn.commit()
def insert_data(cur, conn, infile, tablename):
with open(infile, 'rb') as fopen:
fread = fopen.read()
content = pymysql.Binary(fread)
filename = os.path.basename(infile)
#filename = filename.split('-')[0]
insert_sql="INSERT INTO "+tablename+" (filename, data) VALUES (%s, %s)"
if cur.execute(insert_sql , (filename, content)):
conn.commit()
else:
print('writed failed', cur.error)
def main():
indir = sys.argv[1]
conn = connetc2mysql()
cur = conn.cursor()
create_table(cur, conn, indir)
for infile in os.listdir(indir):
infile = os.path.join(indir, infile)
insert_data(cur, conn, infile, indir)
if __name__ == "__main__":
main()
| 19.548387
| 73
| 0.660066
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 291
| 0.240099
|
757fe53371e91dc422879bc5ad40243b0d086700
| 2,478
|
py
|
Python
|
start_simple_test.py
|
rartino/python-optimade-server
|
84457091c7ec0db52a7e034bb6a7cd4bcbdd4e57
|
[
"MIT"
] | null | null | null |
start_simple_test.py
|
rartino/python-optimade-server
|
84457091c7ec0db52a7e034bb6a7cd4bcbdd4e57
|
[
"MIT"
] | null | null | null |
start_simple_test.py
|
rartino/python-optimade-server
|
84457091c7ec0db52a7e034bb6a7cd4bcbdd4e57
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2019 Rickard Armiento
#
# This file is part of a Python candidate reference implementation of
# the optimade API [https://www.optimade.org/]
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
'''
This is part of a Python candidate reference implementation of the
optimade API [https://www.optimade.org/].
This program runs a simple test query against the example_sqlite3 backend.
'''
from __future__ import print_function
import os, sys
from pprint import pprint
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)),'src'))
from parse import parse_optimade_filter
if __name__ == "__main__":
import backends.example_sqlite3 as backend
backend.initialize()
# This represents the query being received (later to be received via a web URL query)
tables = ["structures"]
response_fields = ["id", "chemical_formula", "elements"]
if len(sys.argv) >= 2:
input_string = 'filter='+sys.argv[1]
else:
input_string = 'filter=elements="Ga,Ti" AND (nelements=3 OR nelements=2)'
response_limit = 50
filter_ast = parse_optimade_filter(input_string)
print("==== FILTER STRING PARSE RESULT:")
pprint(filter_ast)
print("====")
result = backend.execute_query(tables, response_fields, response_limit, filter_ast, debug=True)
print("==== END RESULT")
pprint(list(result))
print("===============")
backend.close()
| 34.901408
| 99
| 0.726796
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,691
| 0.682405
|
7581ce931238117bdcd49cbe392056bdbbeb384d
| 2,609
|
py
|
Python
|
examples/deep_dream.py
|
vacancy/LibNeuralArt
|
fb7696877ac2bf08e1e4e46caec9ccd14ce4797c
|
[
"MIT"
] | 1
|
2022-03-09T14:38:01.000Z
|
2022-03-09T14:38:01.000Z
|
examples/deep_dream.py
|
vacancy/LibNeuralArt
|
fb7696877ac2bf08e1e4e46caec9ccd14ce4797c
|
[
"MIT"
] | null | null | null |
examples/deep_dream.py
|
vacancy/LibNeuralArt
|
fb7696877ac2bf08e1e4e46caec9ccd14ce4797c
|
[
"MIT"
] | null | null | null |
import os
import argparse
import cv2
import numpy as np
import tensorflow as tf
from nart import opr, aopr
from nart.model import VGG16
from nart.logconf import logger
LEARNING_RATE = 1.5
JITTER = 32
as_netin = lambda x: x[np.newaxis, :]
def make_step(sess, net, end):
''' iter only one step, providing end '''
# random draw ox, oy
ox, oy = np.random.randint(-JITTER, JITTER+1, 2)
img = sess.run(net['input'])[0]
img = np.roll(np.roll(img, ox, 1), oy, 0) # apply jitter shift
# compute the gradient
# one shuold note that we are actually use L2 loss for an activation map to
# to compute the gradient for the input
sess.run(net['input'].assign(as_netin(img)))
target = net[end]
loss = 0.5 * tf.reduce_mean(tf.pow(target, 2))
grad = tf.gradients(loss, [net['input']])[0]
grad = sess.run(grad)[0]
# apply gradient ascent, with normalized gradient
img += LEARNING_RATE / np.abs(grad).mean() * grad
img = np.clip(img, 0, 255)
img = np.roll(np.roll(img, -ox, 1), -oy, 0) # unshift image
sess.run(net['input'].assign(as_netin(img)))
def main(args):
# read the image, and load the network
img = cv2.imread(args.image_path)
net = VGG16(args.weight_path, img.shape[0], img.shape[1])
os.makedirs(args.output_path, exist_ok=True)
# initialize the session
sess = tf.Session()
sess.run(tf.initialize_all_variables())
sess.run(net['input'].assign(as_netin(img)))
for i in range(0, args.nr_iters+1):
if i != 0:
make_step(sess, net, end=args.end)
# save the result image every ``args.save_step'' iterations
if i % args.save_step == 0:
current_img = sess.run(net['input'])[0]
output_path = os.path.join(args.output_path, 'epoch_{:04d}.png'.format(i))
cv2.imwrite(output_path, current_img)
logger.info('epoch {}: image written to {}'.format(i, output_path))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-w', dest='weight_path', required=True, help='weight path')
parser.add_argument('-i', dest='image_path', required=True, help='input image path')
parser.add_argument('-o', dest='output_path', required=True, help='output directory')
parser.add_argument('-e', '--end', dest='end', default='conv5_3', help='end')
parser.add_argument('--iter', dest='nr_iters', type=int, default=100, help='number of iterations')
parser.add_argument('--save-step', dest='save_step', type=int, default=5, help='save step (in iteration)')
main(parser.parse_args())
| 33.448718
| 110
| 0.651974
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 723
| 0.277118
|
75822d824753f70d530800d691025e523bb8dcb9
| 1,079
|
py
|
Python
|
5.py
|
niharikasingh/aoc2018
|
21d430d393321e6066eca22d7c6b49e5eb42d756
|
[
"MIT"
] | null | null | null |
5.py
|
niharikasingh/aoc2018
|
21d430d393321e6066eca22d7c6b49e5eb42d756
|
[
"MIT"
] | null | null | null |
5.py
|
niharikasingh/aoc2018
|
21d430d393321e6066eca22d7c6b49e5eb42d756
|
[
"MIT"
] | null | null | null |
import re
text = ''
with open('5input1.txt', 'r') as ifile:
text = ifile.read().strip()
def find_length(text):
text = list(text)
t0 = ''
t1 = ''
restart = True
while (restart):
restart = False
loop = len(text) - 1
i = 0
# print(text)
while (i < loop):
# print(i)
t0 = text[i]
t1 = text[i+1]
if (t0 != t1) and (t0.upper() == t1.upper()):
restart = True
# print("removing", t0, t1)
del text[i]
del text[i]
loop -= 2
i -= 1
else:
i += 1
# print(''.join(text))
return len(text)
current_min = len(text)
for a in list('abcdefghijklmnopqrstuvwxyz'):
to_remove = a + a.upper()
new_text = re.sub('[' + to_remove + ']', '', text)
# print("removing:", to_remove, "result:", new_text)
new_min_to_test = find_length(new_text)
# print(a, new_min_to_test)
current_min = min(current_min, new_min_to_test)
print(current_min)
| 25.690476
| 57
| 0.489342
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 209
| 0.193698
|
758282c4a758ece35ff671a6c5cb62d706708b74
| 2,903
|
py
|
Python
|
game/tests/node_test.py
|
madvid/42_Gomoku
|
e57ac625d977d874dc5791e5455d7c145669c6d8
|
[
"MIT"
] | null | null | null |
game/tests/node_test.py
|
madvid/42_Gomoku
|
e57ac625d977d874dc5791e5455d7c145669c6d8
|
[
"MIT"
] | null | null | null |
game/tests/node_test.py
|
madvid/42_Gomoku
|
e57ac625d977d874dc5791e5455d7c145669c6d8
|
[
"MIT"
] | null | null | null |
from board import Node
from metrics import *
parent = Node(None, np.zeros((5,5)), 1)
parent.nb_free_three = 0
def test_node1():
g = np.array([
[1, -1, -1],
[1, -1, -1],
[1, 1, 0]
])
n = Node(parent, g, BLACK)
next_mv = n.generate_next_moves()
assert (next_mv[0].grid == np.array([
[1, -1, -1],
[1, -1, -1],
[1, 1, -1]
])).all()
assert next_mv[0].color == WHITE
def test_node2():
g = np.array([
[0, -1, -1],
[1, -1, -1],
[1, 1, 1]
])
n = Node(parent, g, BLACK)
next_mv = n.generate_next_moves()
assert (next_mv[0].grid == np.array([
[-1, -1, -1],
[1, -1, -1],
[1, 1, 1]
])).all()
assert next_mv[0].color == WHITE
def test_row1():
g = np.array([
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, -1, -1, 0],
[1, 1, 1, 1],
[1, 1, 1, 1]
])
n = Node(parent, g, WHITE)
next_mv = n.generate_next_moves()
assert (next_mv[0].grid == np.array([
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 0, 0, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]
])).all()
def test_col1():
g = np.array([
[1, 0, 1, 1],
[1, -1, 1, 1],
[1, -1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]
])
n = Node(parent, g, WHITE)
next_mv = n.generate_next_moves()
assert (next_mv[0].grid == np.array([
[1, 1, 1, 1],
[1, 0, 1, 1],
[1, 0, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]
])).all()
def test_diag1():
g = np.array([
[1, 1, 1, 1],
[1, -1, 1, 1],
[1, 1, -1, 1],
[1, 1, 1, 0],
[1, 1, 1, 1]
])
n = Node(parent, g, WHITE)
next_mv = n.generate_next_moves()
assert (next_mv[0].grid == np.array([
[1, 1, 1, 1],
[1, 0, 1, 1],
[1, 1, 0, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]
])).all()
def test_diag2():
g = np.array([
[0, 1, 1, 1],
[1, -1, 1, 1],
[1, 1, -1, 1],
[1, 1, 1, 1]
])
n = Node(parent, g, WHITE)
next_mv = n.generate_next_moves()
assert (next_mv[0].grid == np.array([
[1, 1, 1, 1],
[1, 0, 1, 1],
[1, 1, 0, 1],
[1, 1, 1, 1]
])).all()
def test_rdiag1():
g = np.array([
[1, 1, 1, 1, 1],
[1, 1, 1, 0, 1],
[1, 1, -1, 1, 1],
[1, -1, 1, 1, 1],
[1, 1, 1, 1, 1]
])
n = Node(parent, g, WHITE)
next_mv = n.generate_next_moves()
assert (next_mv[0].grid == np.array([
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 0, 1, 1],
[1, 0, 1, 1, 1],
[1, 1, 1, 1, 1]
])).all()
| 23.224
| 41
| 0.349983
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
7584210fe482f4212d8e7879d8d01a58011b39a4
| 1,122
|
py
|
Python
|
venv/Lib/site-packages/pyo/examples/22-events/08-function-calls.py
|
mintzer/pupillometry-rf-back
|
cfa86fa984a49dce0123798f8de5b838c02e10d5
|
[
"CC-BY-4.0"
] | null | null | null |
venv/Lib/site-packages/pyo/examples/22-events/08-function-calls.py
|
mintzer/pupillometry-rf-back
|
cfa86fa984a49dce0123798f8de5b838c02e10d5
|
[
"CC-BY-4.0"
] | null | null | null |
venv/Lib/site-packages/pyo/examples/22-events/08-function-calls.py
|
mintzer/pupillometry-rf-back
|
cfa86fa984a49dce0123798f8de5b838c02e10d5
|
[
"CC-BY-4.0"
] | null | null | null |
"""
08-function-calls.py - Using custom algorithms with python function calls.
**EventCall** ::
EventCall(function, *args, occurrences=inf, stopEventsWhenDone=True)
EventCall calls a function, with any number of arguments (\*args) and uses
its return value for the given parameter. The example below use a function
from the random module, *randrange*, with arguments and a user-defined
function, without argument, to create a rising, then falling, amplitude curve.
"""
import random
from pyo import *
s = Server().boot()
db = -30
dir = 1
def riseFallAmp():
"Rises and falls amplitude between -30 and -3 dB, 1 db at the time."
global db, dir
db += dir
if db >= -3:
dir = -1
elif db < -30:
dir = 1
return db
# Midi notes are chosen randomly with a function from the random module,
# while the amplitude change according to the riseFallAmp function's output.
e = Events(
midinote=EventCall(random.randrange, 48, 72, 3),
beat=1 / 4.0,
db=EventCall(riseFallAmp),
attack=0.001,
decay=0.05,
sustain=0.5,
release=0.005,
).play()
s.gui(locals())
| 22.897959
| 78
| 0.680036
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 692
| 0.616756
|
7584b9125e63b40a450fd83b83b2635f253ee500
| 1,824
|
py
|
Python
|
modules/cisne_api.py
|
ppcamp/ReportCovid19Itabira
|
a48cb20072a1e9cecdaab589876f5b0e2e3440b6
|
[
"MIT"
] | 1
|
2020-06-23T14:54:19.000Z
|
2020-06-23T14:54:19.000Z
|
modules/cisne_api.py
|
ppcamp/ReportCovid19Itabira
|
a48cb20072a1e9cecdaab589876f5b0e2e3440b6
|
[
"MIT"
] | 8
|
2020-06-20T16:33:23.000Z
|
2020-07-31T17:33:05.000Z
|
modules/cisne_api.py
|
ppcamp/ReportCovid19Itabira
|
a48cb20072a1e9cecdaab589876f5b0e2e3440b6
|
[
"MIT"
] | null | null | null |
import requests
import json
import pandas as pd
from datetime import datetime
def GetEpidemicWeek(curr_date):
'''
Parameters
----------
curr_date: (str) Date in yyyy-mm-dd
Return
------
(int) Week number of year
'''
_aux = datetime.strptime(curr_date, '%Y-%m-%d')
return _aux.isocalendar()[1]
def CisneGetData(curr_date, save_json=False):
'''
Parameters
----------
curr_date: (str) Date in yyyy-mm-dd
Return
------
(DataFrame) obj
(True) if save successfully the file when save_json is enabled.
Example
-------
>> CisneGetData('2020-06-07')
'''
LOGIN_URL = "http://intranet.transportescisne.com.br/swan/login"
# Fill in your details here to be posted to the login form.
payload = json.load( open('credentials/cisne_credentials.json') )
# Use 'with' to ensure the session context is closed after use.
with requests.Session() as S:
S.post(LOGIN_URL, data=payload)
# print("Logged successfully!")
r = S.get("http://intranet.transportescisne.com.br/swan/api/passageiros/"+curr_date)
if save_json:
with open('json-'+curr_date, 'w') as file:
file.write(r.text)
return True
# Transform str to json object like
json_response = json.loads(r.text)
_linha = []
_sentido = []
_faixahr = []
_passageiros = []
for i in json_response:
_linha.append(i['linha'])
_sentido.append(i['sentido'])
_faixahr.append(i['faixahr'])
_passageiros.append(i['passageiros'])
return pd.DataFrame({
'linha':_linha,
'sentido':_sentido,
'faixahr':_faixahr,
'passageiros':_passageiros
})
| 26.057143
| 92
| 0.576754
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 820
| 0.449561
|
75858d15ba85e9ff5541366ae7ab4ccf2759852d
| 2,048
|
py
|
Python
|
main.py
|
Andrey22/Python_Lesson2_Neural_University
|
014f8da8e3002e081aba3fb1ce9dcf56e5af1d57
|
[
"MIT"
] | null | null | null |
main.py
|
Andrey22/Python_Lesson2_Neural_University
|
014f8da8e3002e081aba3fb1ce9dcf56e5af1d57
|
[
"MIT"
] | null | null | null |
main.py
|
Andrey22/Python_Lesson2_Neural_University
|
014f8da8e3002e081aba3fb1ce9dcf56e5af1d57
|
[
"MIT"
] | null | null | null |
'''
Задача 1
Вывести на экран циклом пять строк из нулей, причем каждая строка должна быть пронумерована.
'''
print ('Task1')
for i in range(5):
i+=1
print(i,'00000')
'''
Задача 2
Пользователь в цикле вводит 10 цифр. Найти количество введеных пользователем цифр 5.
'''
print ('Task2')
count=0
for i in range(10):
number = int(input('Введите 1 из 10 цифр'))
if number==5:
count+=1
print ('Количество цифр 5 равно', count)
'''
Задача 3
Найти сумму ряда чисел от 1 до 100. Полученный результат вывести на экран.
'''
print ('Task3')
countnum=0
for i in range(101):
countnum+=i
print (countnum)
'''
Задача 4
Найти произведение ряда чисел от 1 до 10. Полученный результат вывести на экран.
'''
print ('Task4')
countnum = 1
for i in range(1,11,1):
countnum*=i
print (countnum)
'''
Задача 5
Вывести цифры числа на каждой строчке.
'''
print ('Task5')
number1 = int(input('Введите число'))
while number1>0:
x = number1
x%=10
print (x)
number1//=10
'''
Задача 6
Найти сумму цифр числа.
'''
print ('Task6')
number1 = int(input('Введите число'))
sum=0
while number1>0:
x = number1
x%=10
sum+=x
number1//=10
print (sum)
'''
Задача 7
Найти произведение цифр числа.
'''
print ('Task7')
number1 = int(input('Введите число'))
multi=1
while number1>0:
x = number1
x%=10
multi*=x
number1//=10
print (multi)
'''
Задача 8
Дать ответ на вопрос: есть ли среди цифр числа 5?
'''
print ('Task8')
number = int(input('Введите число'))
while number>0:
x = number
x%=10
number //= 10
if x == 5:
print ('Yes')
break
else:
print ('No')
'''
Задача 9
Найти максимальную цифру в числе
'''
print ('Task9')
number = int(input('Введите число'))
max=0
while number>0:
x = number
x%=10
number //= 10
if x > max:
max=x
print (max)
'''
Задача 10
Найти количество цифр 5 в числе
'''
print ('Task10')
count=0
number = int(input('Введите число'))
while number>0:
x = number
x%=10
number //= 10
if x == 5:
count+=1
print (count)
| 17.210084
| 92
| 0.631348
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,523
| 0.576239
|
7585d58771f71f59f83ac3fcba41c6ed4b3d1ae1
| 1,836
|
py
|
Python
|
aboutdialog.py
|
ShawnDriscoll/PyQt5-Dice-App
|
90d64db322b5330800be43247d46e196258f6a6b
|
[
"MIT"
] | null | null | null |
aboutdialog.py
|
ShawnDriscoll/PyQt5-Dice-App
|
90d64db322b5330800be43247d46e196258f6a6b
|
[
"MIT"
] | null | null | null |
aboutdialog.py
|
ShawnDriscoll/PyQt5-Dice-App
|
90d64db322b5330800be43247d46e196258f6a6b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'aboutdialog.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_aboutDialog(object):
def setupUi(self, aboutDialog):
aboutDialog.setObjectName("aboutDialog")
aboutDialog.resize(285, 239)
aboutDialog.setMinimumSize(QtCore.QSize(285, 239))
aboutDialog.setMaximumSize(QtCore.QSize(285, 239))
self.aboutOKButton = QtWidgets.QPushButton(aboutDialog)
self.aboutOKButton.setGeometry(QtCore.QRect(110, 190, 75, 23))
self.aboutOKButton.setObjectName("aboutOKButton")
self.textLabel = QtWidgets.QLabel(aboutDialog)
self.textLabel.setGeometry(QtCore.QRect(50, 50, 191, 111))
self.textLabel.setTextFormat(QtCore.Qt.RichText)
self.textLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.textLabel.setWordWrap(True)
self.textLabel.setObjectName("textLabel")
self.retranslateUi(aboutDialog)
QtCore.QMetaObject.connectSlotsByName(aboutDialog)
def retranslateUi(self, aboutDialog):
_translate = QtCore.QCoreApplication.translate
aboutDialog.setWindowTitle(_translate("aboutDialog", "About Dice Roll"))
self.aboutOKButton.setText(_translate("aboutDialog", "OK"))
self.textLabel.setText(_translate("aboutDialog", "<html><head/><body><p align=\"center\"><br/><span style=\" font-size:10pt;\">Dice Roll</span></p><p align=\"center\"><span style=\" font-size:10pt;\">by </span></p><p align=\"center\"><span style=\" font-size:10pt;\">Shawn Driscoll</span></p></body></html>"))
| 48.315789
| 317
| 0.706972
| 1,496
| 0.814815
| 0
| 0
| 0
| 0
| 0
| 0
| 641
| 0.349129
|
7586aaf36cfc9aa4004d62afa11753f68be84c72
| 5,351
|
py
|
Python
|
PHASE_2/Application_SourceCode/backend/covid_utils.py
|
vicinx3/disease-outbreak
|
035e78875c374e2cdbd4720a4f2ed1370f63a88c
|
[
"MIT"
] | null | null | null |
PHASE_2/Application_SourceCode/backend/covid_utils.py
|
vicinx3/disease-outbreak
|
035e78875c374e2cdbd4720a4f2ed1370f63a88c
|
[
"MIT"
] | null | null | null |
PHASE_2/Application_SourceCode/backend/covid_utils.py
|
vicinx3/disease-outbreak
|
035e78875c374e2cdbd4720a4f2ed1370f63a88c
|
[
"MIT"
] | null | null | null |
import requests
import datetime
from db import convert_code
from pycountry_convert import country_name_to_country_alpha2
from pprint import pprint
import json
url = r'https://pomber.github.io/covid19/timeseries.json'
response = requests.get(url)
if response.status_code != 200:
print("Failed to connect to pomber")
def convert_country(country):
preset = {
'Congo (Brazzaville)': 'CG',
'Congo (Kinshasa)': 'CD',
'Cote d\'Ivoire': 'CI',
'Holy See': 'VA',
'Korea, South': 'KR',
'Taiwan*': 'TW',
'US': 'US',
'West Bank and Gaza': 'PS',
'Kosovo': 'XK',
'Burma': 'MM',
}
if country in preset:
return preset[country]
try:
return country_name_to_country_alpha2(country)
except Exception:
return False
result = response.json()
content = {}
for country in result:
code = convert_country(country)
if code:
content[code] = result[country]
def get_date(index):
date_str = content['AU'][index]['date']
return datetime.datetime.strptime(date_str, r'%Y-%m-%d')
first_date = get_date(0)
last_date = get_date(-1)
def get_last_day():
delta = last_date - first_date
return delta.days
total = []
for i in range(0, get_last_day() + 1):
total.append({
'confirmed': 0,
'recovered': 0,
'deaths': 0
})
for country in content:
for category in ['confirmed', 'recovered', 'deaths']:
total[i][category] += content[country][i][category]
######################
# Functions
######################
def get_codes():
return list(content.keys())
def get_countries():
result = {}
for code in content:
result[code] = convert_code(code)
return result
def get_slider_marks():
marks = []
template = r'%d %b'
marks.append({'value': 0, 'label': first_date.strftime(template)})
marks.append({'value': get_last_day(), 'label': last_date.strftime(template)})
for i in range(0, get_last_day() - 5, 14):
current_date = first_date + datetime.timedelta(days=i)
marks.append({'value': i, 'label': current_date.strftime(template)})
return marks
def get_cases_by_country_and_category(date, category, daily):
result = {}
for country in content:
if daily:
delta = content[country][date][category]
if date > 0:
delta -= content[country][date - 1][category]
result[country] = delta
else:
result[country] = content[country][date][category]
return result
def get_cases_by_country(date, prettify=False):
def calc_mortality(deaths, recovered):
total = deaths + recovered
return round(deaths * 100 / total, 2) if total > 0 else 0
result = []
for country in content:
current = content[country][date]
confirmed = current['confirmed']
recovered = current['recovered']
deaths = current['deaths']
mortality = calc_mortality(deaths, recovered)
result.append({
'country': convert_code(country),
'confirmed': confirmed,
'recovered': recovered,
'deaths': deaths,
'mortality': mortality
})
result.insert(0, {
'country': 'All countries',
'confirmed': total[date]['confirmed'],
'recovered': total[date]['recovered'],
'deaths': total[date]['deaths'],
'mortality': calc_mortality(total[date]['deaths'], total[date]['recovered'])
})
return result
def get_cases_by_day(daily):
result = {}
for category in ['confirmed', 'recovered', 'deaths']:
temp = []
for i in range(0, get_last_day() + 1):
current_date = first_date + datetime.timedelta(days=i)
if daily:
value = total[i][category]
if i > 0:
value -= total[i-1][category]
else:
value = total[i][category]
temp.append({
'date': current_date.strftime(r'%Y-%m-%d'),
'value': value
})
result[category] = temp
return result
def get_comparator_graph_data(country):
standard = {}
for category in ['confirmed', 'recovered', 'deaths']:
standard[category] = []
for i in range(0, get_last_day() + 1):
value = total[i][category] if country == '' else content[country][i][category]
standard[category].append({
'date': i,
'value': value
})
trajectory = []
for i in range(0, get_last_day() + 1):
if country == '':
get = lambda x: total[x]['confirmed']
else:
get = lambda x: content[country][x]['confirmed']
total_cases = get(i)
def daily_increase(j):
return get(j) - get(j-1) if j > 0 else get(j)
j = i
new_cases = 0
while (j >= 0 and i - j < 7):
new_cases += daily_increase(j)
j -= 1
new_cases = round(new_cases / (i - j))
if new_cases > 0:
trajectory.append({
'total': total_cases,
'new': new_cases
})
return {'standard': standard, 'trajectory': trajectory}
| 27.869792
| 90
| 0.555971
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 785
| 0.146702
|
758800528ccfe0918aa562d413d55854aa70f801
| 2,398
|
py
|
Python
|
cdc_kafka/parsed_row.py
|
woodlee/sqlserver-cdc-to-kafka
|
602c17432a87c1aaee94dc6c971cde8496314fda
|
[
"MIT"
] | 10
|
2020-04-09T09:32:54.000Z
|
2021-10-04T09:20:59.000Z
|
cdc_kafka/parsed_row.py
|
woodlee/sqlserver-cdc-to-kafka
|
602c17432a87c1aaee94dc6c971cde8496314fda
|
[
"MIT"
] | 4
|
2019-10-04T14:15:32.000Z
|
2020-05-13T18:48:58.000Z
|
cdc_kafka/parsed_row.py
|
woodlee/sqlserver-cdc-to-kafka
|
602c17432a87c1aaee94dc6c971cde8496314fda
|
[
"MIT"
] | 6
|
2019-11-11T18:01:00.000Z
|
2021-06-09T09:49:57.000Z
|
import datetime
from functools import total_ordering
from typing import Tuple, Any, Dict, Optional
from . import change_index
@total_ordering
class ParsedRow(object):
def __init__(self, table_fq_name: str, row_kind: str, operation_name: str, event_db_time: datetime.datetime,
change_idx: Optional[change_index.ChangeIndex], ordered_key_field_values: Tuple[Any],
destination_topic: str, avro_key_schema_id: int, avro_value_schema_id: int,
key_dict: Dict[str, Any], value_dict: Dict[str, Any]) -> None:
self.table_fq_name: str = table_fq_name
self.row_kind: str = row_kind
self.operation_name: str = operation_name
self.event_db_time: datetime.datetime = event_db_time
self.change_idx: Optional[change_index.ChangeIndex] = change_idx
self.ordered_key_field_values: Tuple = ordered_key_field_values
self.destination_topic: str = destination_topic
self.avro_key_schema_id: int = avro_key_schema_id
self.avro_value_schema_id: int = avro_value_schema_id
self.key_dict: Dict[str, Any] = key_dict
self.value_dict: Dict[str, Any] = value_dict
def __eq__(self, other) -> bool:
if isinstance(other, ParsedRow):
return (self.table_fq_name, self.value_dict) == (other.table_fq_name, other.value_dict)
return False
def __lt__(self, other: 'ParsedRow') -> bool:
if other is None:
return False
if isinstance(other, ParsedRow):
self_tuple = (
self.change_idx or change_index.LOWEST_CHANGE_INDEX,
self.event_db_time,
self.table_fq_name
)
other_tuple = (
other.change_idx or change_index.LOWEST_CHANGE_INDEX,
other.event_db_time,
other.table_fq_name
)
if self_tuple != other_tuple:
return self_tuple < other_tuple
# I know it seems backwards, but it's because we read snapshot rows backwards by their PKs:
return self.ordered_key_field_values > other.ordered_key_field_values
raise Exception(f'Cannot compare ParsedRow to object of type "{type(other)}"')
def __repr__(self) -> str:
return f'ParsedRow from {self.table_fq_name} of kind {self.row_kind}, change index {self.change_idx}'
| 42.070175
| 112
| 0.662219
| 2,252
| 0.939116
| 0
| 0
| 2,268
| 0.945788
| 0
| 0
| 257
| 0.107173
|
75893c568f3d251f68a9d4ffb2aa6e88611b92ae
| 446
|
py
|
Python
|
mikeio/xyz.py
|
rhaDHI/mikeio
|
eb24503d935df969eac32569a41d223d6f0e2edf
|
[
"BSD-3-Clause"
] | 65
|
2019-11-27T13:42:52.000Z
|
2022-03-31T11:41:56.000Z
|
mikeio/xyz.py
|
rhaDHI/mikeio
|
eb24503d935df969eac32569a41d223d6f0e2edf
|
[
"BSD-3-Clause"
] | 178
|
2019-12-17T19:43:04.000Z
|
2022-03-31T06:54:06.000Z
|
mikeio/xyz.py
|
rhaDHI/mikeio
|
eb24503d935df969eac32569a41d223d6f0e2edf
|
[
"BSD-3-Clause"
] | 41
|
2019-12-17T18:21:04.000Z
|
2022-03-16T12:15:40.000Z
|
import pandas as pd
def read_xyz(filename):
# try:
df = pd.read_csv(filename, sep="\t", header=None)
if df.shape[1] == 1:
df = pd.read_csv(filename, sep=" ", header=None)
ncol = df.shape[1]
names = ["x", "y", "z", "name"]
df.columns = names[0:ncol]
return df
def dataframe_to_xyz(self, filename):
self.to_csv(filename, sep="\t", header=False, index=False)
pd.DataFrame.to_xyz = dataframe_to_xyz
| 17.84
| 62
| 0.61435
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 32
| 0.071749
|
7589bb1ca93e2908a7e4afbfc16f38fe65552b2e
| 646
|
py
|
Python
|
tests/conftest.py
|
josiahls/fast-reinforcement-learning
|
66136009dd7052d4a9c07631d5c170c9aeba67f3
|
[
"Apache-2.0"
] | 42
|
2019-08-06T14:09:43.000Z
|
2022-03-03T19:03:26.000Z
|
tests/conftest.py
|
josiahls/fast-reinforcement-learning
|
66136009dd7052d4a9c07631d5c170c9aeba67f3
|
[
"Apache-2.0"
] | 7
|
2019-08-12T23:04:25.000Z
|
2021-01-15T16:39:57.000Z
|
tests/conftest.py
|
josiahls/fast-reinforcement-learning
|
66136009dd7052d4a9c07631d5c170c9aeba67f3
|
[
"Apache-2.0"
] | 6
|
2019-09-20T20:08:59.000Z
|
2021-04-03T15:34:11.000Z
|
import pytest
def pytest_addoption(parser):
parser.addoption("--include_performance_tests", action="store_true",
help="Will run the performance tests which do full model testing. This could take a few"
"days to fully accomplish.")
@pytest.fixture()
def include_performance_tests(pytestconfig):
return pytestconfig.getoption("include_performance_tests")
@pytest.fixture()
def skip_performance_check(include_performance_tests):
if not include_performance_tests:
pytest.skip('Skipping due to performance argument not specified. Add --include_performance_tests to not skip')
| 35.888889
| 118
| 0.732198
| 0
| 0
| 0
| 0
| 354
| 0.547988
| 0
| 0
| 275
| 0.425697
|
758a746fea53069cc01b12087b264b7e85fe4798
| 534
|
py
|
Python
|
chassis/rechteck.py
|
ThePBone/RobomasterCheatsheet
|
14089f4a20d72700e653e291137a4cbc9d13b694
|
[
"MIT"
] | 4
|
2022-02-08T21:53:57.000Z
|
2022-03-27T21:28:20.000Z
|
chassis/rechteck.py
|
ThePBone/RobomasterCheatsheet
|
14089f4a20d72700e653e291137a4cbc9d13b694
|
[
"MIT"
] | null | null | null |
chassis/rechteck.py
|
ThePBone/RobomasterCheatsheet
|
14089f4a20d72700e653e291137a4cbc9d13b694
|
[
"MIT"
] | null | null | null |
from robomaster import robot
import time
ep_robot = robot.Robot()
xy_speed = 1/2 # m/s
z_speed = 90/2 # m/s
if __name__ == '__main__':
#ep_robot.initialize(conn_type="sta", sn="3JKDH6U0011J02")
ep_robot.initialize(conn_type="ap")
ep_chassis = ep_robot.chassis
for i in range(4):
# 1 Meter nach vorne
ep_chassis.move(1, 0, 0, xy_speed).wait_for_completed()
time.sleep(50)
# 90° Drehung
ep_chassis.move(0, 0, 90, 0, z_speed).wait_for_completed()
ep_robot.close()
| 24.272727
| 66
| 0.640449
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 116
| 0.216822
|
758afd7cbb115376a34da86e1eeaae56905b6dcf
| 447
|
pyde
|
Python
|
processing/Mod. 6/sketch_6_1_l37/sketch_6_1_l37.pyde
|
nanam0rgana/2019-fall-polytech-cs
|
1a31acb3cf22edc930318dec17324b05dd7788d5
|
[
"MIT"
] | null | null | null |
processing/Mod. 6/sketch_6_1_l37/sketch_6_1_l37.pyde
|
nanam0rgana/2019-fall-polytech-cs
|
1a31acb3cf22edc930318dec17324b05dd7788d5
|
[
"MIT"
] | null | null | null |
processing/Mod. 6/sketch_6_1_l37/sketch_6_1_l37.pyde
|
nanam0rgana/2019-fall-polytech-cs
|
1a31acb3cf22edc930318dec17324b05dd7788d5
|
[
"MIT"
] | null | null | null |
def setup ():
size (500, 500)
smooth ()
background (255)
noStroke ()
colorMode(HSB)
flug = bool(True)
i=0
j=0
def draw ():
global i, j, flug
if(flug):
for i in range (10):
for j in range (5):
fill (10, random (0, 255) , random (10, 250))
rect(j*40+50 , i*40+50 , 35, 35)
rect ((10-j)*40+10 , i*40+50 , 35, 35)
def mouseClicked ():
flug = not flug
| 22.35
| 61
| 0.478747
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
758b01fbbf221b2664b2728c99e75c2df92abd02
| 280
|
py
|
Python
|
src/main.py
|
lauhuiyik/same-page
|
decd8b5f45eeca750edfecd4b21a37103553ad9d
|
[
"MIT"
] | 1
|
2020-05-25T21:32:48.000Z
|
2020-05-25T21:32:48.000Z
|
src/main.py
|
lauhuiyik/same-page
|
decd8b5f45eeca750edfecd4b21a37103553ad9d
|
[
"MIT"
] | null | null | null |
src/main.py
|
lauhuiyik/same-page
|
decd8b5f45eeca750edfecd4b21a37103553ad9d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
##########
import web
from handlers.front import FrontPage
from handlers.home import HomePage
##########
urls = ('/home', 'HomePage',
'/', 'FrontPage')
app = web.application(urls, globals())
##########
if __name__ == "__main__":
app.run()
| 14
| 38
| 0.582143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 92
| 0.328571
|
758b72cef82f8f852b093d91ef15a93d7537c56c
| 3,758
|
py
|
Python
|
ssfunc/fansub.py
|
End-of-Eternity/ssfunc
|
5adbd5602ebc1db1a3cc3483c759c936d24ad705
|
[
"MIT"
] | 3
|
2021-07-20T22:25:36.000Z
|
2021-12-07T10:05:41.000Z
|
ssfunc/fansub.py
|
End-of-Eternity/ssfunc
|
5adbd5602ebc1db1a3cc3483c759c936d24ad705
|
[
"MIT"
] | null | null | null |
ssfunc/fansub.py
|
End-of-Eternity/ssfunc
|
5adbd5602ebc1db1a3cc3483c759c936d24ad705
|
[
"MIT"
] | 1
|
2021-09-20T19:09:55.000Z
|
2021-09-20T19:09:55.000Z
|
import ass
import subdigest
import subprocess
import os
def dump_subs(subsfile: str, subsdata: subdigest.Subtitles):
"""
Exports subsdata to subsfile manually over using dump_file() to avoid the utf-8 encode warning.
"""
with open(subsfile, "w", encoding="utf_8_sig") as f:
for section in subsdata.sections.values():
f.write("\n".join(section.dump()))
f.write("\n\n")
def load_subs(subsfile: str):
"""
Loads up and parses subtitles from subsfile and returns subsdigest object.
"""
with open(subsfile, encoding="utf_8_sig") as f:
subsdata = subdigest.Subtitles(ass.parse(f), subsfile)
return subsdata
def crunchy_unroll(infile: str = None, styles: str = None):
"""
Restyles Crunchyroll subtitles using an external `styles` file.
"""
from util import get_episode_number
if infile.endswith(".ass"):
print("Processing subtitles.")
elif infile.endswith(".mkv"):
print("Demuxing subtitles")
subprocess.run(["mkvextract", "-q", "tracks", infile, f"2:{infile}.ass"])
infile += ".ass"
print("Processing subtitles.")
subs = load_subs(infile)
# Crunchyroll bad
subs.selection_set("style", "Top$")
subs.modify_field("text", "^", r"{\\an8}")
subs.modify_field("text", "}{", "")
subs.selection_set("style", "^Italics")
subs.modify_field("text", "^", r"{\\i1}")
subs.modify_field("text", "}{", "")
subs.selection_set("style", "^Main")
subs.modify_field("style", "^.*", "Dialogue")
subs.selection_set("style", "^Flashback")
subs.modify_field("style", "^.*", "Flashback")
subs.selection_set("style", "Top$")
subs.modify_field("style", "^.*", "Alt")
subs.selection_set("style", "^Italics")
subs.modify_field("style", "^.*", "Dialogue")
# nuke \N tags
subs.modify_field("text", r"\s*{\\i0}\s*\\N\s*{\\i1}\s*", " ")
subs.modify_field("text", r"\s*\\[Nn]\s*", " ")
subs.modify_field("text", r"\s*\\[Nn]", " ")
subs.modify_field("text", r"\\[Nn]\s*", " ")
subs.modify_field("text", r"\\[Nn]", " ")
# misc
subs.modify_field("text", "--", "—")
subs.use_styles()
subs.set_script_info("YCbCr Matrix", "TV.709")
subs.set_script_info("Script Updated By", "SeaSmoke")
# dump subs to temp file
ep = get_episode_number(infile)
temp = f"{ep}_temp.ass"
dump_subs(temp, subs)
# Loading video for resampling
video = infile.replace(".ass", "")
# Resampling subs using aegisub-cli
subprocess.run(["aegisub-cli", "--video", video, temp, temp, "tool/resampleres"])
# Copying styles from `styles` using prass
subprocess.run(
[
"python",
"-m",
"prass",
"copy-styles",
"--from",
styles,
"--to",
temp,
"-o",
temp,
]
)
# export subs file
subs = load_subs(temp)
dump_subs(infile.replace(".ass", "_fixed.ass"), subs)
# mux subs back into video
subprocess.run(
[
"mkvmerge",
"-o",
infile.replace(".ass", "").replace(".mkv", "_fixed.mkv"),
"-S",
"-A",
"--language",
"0:und",
video,
"-D",
"-S",
"--language",
"1:jpn",
video,
"-D",
"-A",
"--language",
"0:en",
"--track-name",
"0:[Smoke]",
infile.replace(".ass", "_fixed.ass"),
]
)
# Removing temporary files
os.remove(temp)
os.remove(infile)
os.remove(infile.replace(".ass", "_fixed.ass"))
print("Done!")
| 26.842857
| 99
| 0.537254
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,425
| 0.378989
|
758bfb17d11799615d242c0ec597dafd07b4d3fa
| 1,955
|
py
|
Python
|
tbot/twitch_bot/functions/faceit.py
|
thomaserlang/tbot
|
99cfa204d86ef35cf2cc9482ae5a44abb35b443a
|
[
"MIT"
] | null | null | null |
tbot/twitch_bot/functions/faceit.py
|
thomaserlang/tbot
|
99cfa204d86ef35cf2cc9482ae5a44abb35b443a
|
[
"MIT"
] | 10
|
2022-02-14T11:40:20.000Z
|
2022-03-09T22:44:03.000Z
|
tbot/twitch_bot/functions/faceit.py
|
thomaserlang/tbot
|
99cfa204d86ef35cf2cc9482ae5a44abb35b443a
|
[
"MIT"
] | 1
|
2020-09-19T16:38:24.000Z
|
2020-09-19T16:38:24.000Z
|
import logging
from tbot.twitch_bot.var_filler import fills_vars, Send_error
from tbot import config
@fills_vars('faceit.username', 'faceit.elo', 'faceit.level',
'faceit.next_level_points', 'faceit.next_level')
async def faceit_elo(bot, channel, args, var_args, **kwargs):
if not var_args or \
not 'faceit.username' in var_args or \
not var_args['faceit.username']:
raise Send_error('{faceit.username <username>} is missing')
params = {
'nickname': var_args['faceit.username'][0]
}
headers = {
'Authorization': f'Bearer {config["faceit_apikey"]}',
}
elos = (
(1, '1'),
(801, '2'),
(951, '3'),
(1101, '4'),
(1251, '5'),
(1401, '6'),
(1551, '7'),
(1701, '8'),
(1851, '9'),
(2001, '10'),
)
async with bot.ahttp.get('https://open.faceit.com/data/v4/players', params=params, headers=headers) as r:
if r.status == 404:
raise Send_error('Unknow user on Faceit (usernames are case sensitive)')
elif r.status >= 400:
error = await r.text()
raise Send_error(f'Faceit error: {error}')
d = await r.json()
if 'csgo' not in d['games']:
raise Send_error('The user does not have CSGO in their Faceit profile')
next_level_points = 0
next_level = 'unknown'
for i, e in enumerate(elos):
if e[0] < d['games']['csgo']['faceit_elo']:
if i+1 < len(elos):
next_level = elos[i+1][1]
next_level_points = elos[i+1][0] - d['games']['csgo']['faceit_elo']
return {
'faceit.username': '',
'faceit.elo': d['games']['csgo']['faceit_elo'],
'faceit.level': d['games']['csgo']['skill_level_label'],
'faceit.next_level_points': next_level_points,
'faceit.next_level': next_level,
}
| 34.298246
| 109
| 0.544246
| 0
| 0
| 0
| 0
| 1,853
| 0.947826
| 1,738
| 0.889003
| 662
| 0.338619
|
758d3ae4874f3aae353700d2388d8c12f38f9087
| 740
|
py
|
Python
|
setup.py
|
nickzhuang0613/BaiduSpider
|
f7c2dfc917c8617a8f5f3691bac642c376faed0f
|
[
"MIT"
] | 1
|
2021-03-13T04:35:34.000Z
|
2021-03-13T04:35:34.000Z
|
setup.py
|
nickzhuang0613/BaiduSpider
|
f7c2dfc917c8617a8f5f3691bac642c376faed0f
|
[
"MIT"
] | null | null | null |
setup.py
|
nickzhuang0613/BaiduSpider
|
f7c2dfc917c8617a8f5f3691bac642c376faed0f
|
[
"MIT"
] | null | null | null |
import setuptools
with open('README.md', 'r', encoding='utf-8') as fh:
long_description = fh.read()
setuptools.setup(
name='BaiduSpider',
version='0.0.6',
author='Sam Zhang',
author_email='samzhang951@outlook.com',
description='BaiduSpider,一个爬取百度的利器',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/BaiduSpider/BaiduSpider',
packages=setuptools.find_packages(),
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Development Status :: 3 - Alpha'
],
python_requires='>=3.6',
install_requires=[
'requests',
'bs4',
'htmlmin'
]
)
| 26.428571
| 53
| 0.636486
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 320
| 0.421053
|
758f66b1bfeec1c3413eae058bce0eb697970ad8
| 411
|
py
|
Python
|
utils/reward.py
|
1980744819/playing-mario-with-DQN
|
f263e3615bf4439ad17d95a9f449c6145792402b
|
[
"MIT"
] | 3
|
2020-03-12T19:20:27.000Z
|
2020-12-16T06:21:05.000Z
|
utils/reward.py
|
1980744819/playing-mario-with-DQN
|
f263e3615bf4439ad17d95a9f449c6145792402b
|
[
"MIT"
] | null | null | null |
utils/reward.py
|
1980744819/playing-mario-with-DQN
|
f263e3615bf4439ad17d95a9f449c6145792402b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : reward.py
# @Author: zixiao
# @Date : 2019-03-28
# @Desc :
def get_reward(info, last_info):
re = info['coins'] - last_info['coins'] + info['time'] - last_info['time'] + (
info['lives'] - last_info['lives']) * 10 + info['score'] - last_info['score'] + info['xscrollLo'] - \
last_info['xscrollLo'] - 0.1
return re / 1000.0
| 29.357143
| 113
| 0.562044
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 188
| 0.457421
|
758f716bb197ed14c12012351f797136c91cbd8a
| 1,270
|
py
|
Python
|
Buddystring.py
|
pgupta119/LeetCode
|
3e7418402d736cba19362fe7525fdc9067cfcaef
|
[
"MIT"
] | null | null | null |
Buddystring.py
|
pgupta119/LeetCode
|
3e7418402d736cba19362fe7525fdc9067cfcaef
|
[
"MIT"
] | null | null | null |
Buddystring.py
|
pgupta119/LeetCode
|
3e7418402d736cba19362fe7525fdc9067cfcaef
|
[
"MIT"
] | null | null | null |
# Given two strings a and b, return true if you can swap two letters in a so the result is equal to b, otherwise, return false.
# Swapping letters is defined as taking two indices i and j (0-indexed) such that i != j and swapping the characters at a[i] and b[j]. For example, swapping at indices 0 and 2 in "abcd" results in "cbad".
#Example
# Input: a = "ab", b = "ba"
# Output: True
# Explanation: You can swap a[0] = 'a' and a[1] = 'b' to get "ba", which is equal to b.
class Solution:
def buddyStrings(a, b):
#Length of a and b is not equal
if len(a)!= len(b): return False
#check a and b are equal and distinct element in a is not equal to length of a
if a==b and len(set(a))!=len(a):
return True
indices,indices1=[],[]
#check for each element where a and b are not equal, put into the new list
#new list should contain 2 values and compare the first new list with reverse second new list
for i in range(len(a)):
if a[i] != b[i]:
indices.append(a[i])
indices1.append(b[i])
#return the true and false
return len(indices)==2 and indices==indices1[::-1]
print(Solution.buddystrings("ab","ba))
#Output :True
| 42.333333
| 204
| 0.616535
| 731
| 0.575591
| 0
| 0
| 0
| 0
| 0
| 0
| 788
| 0.620472
|
759014d40b4767cde83fe06211c68c1440c3f6c4
| 14
|
py
|
Python
|
noop.py
|
OpenIotNetwork/echo-firmware
|
007ead22704231f320abdc1ce32430a6d69a57f7
|
[
"Apache-2.0"
] | 1
|
2021-03-08T18:34:06.000Z
|
2021-03-08T18:34:06.000Z
|
noop.py
|
OpenIotNetwork/echo-firmware
|
007ead22704231f320abdc1ce32430a6d69a57f7
|
[
"Apache-2.0"
] | null | null | null |
noop.py
|
OpenIotNetwork/echo-firmware
|
007ead22704231f320abdc1ce32430a6d69a57f7
|
[
"Apache-2.0"
] | null | null | null |
print("NOOP")
| 7
| 13
| 0.642857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6
| 0.428571
|
75903cb74255d9a040c92dad9af5ac01cf28b1cb
| 54
|
py
|
Python
|
srichand_cloudmesh_ex2.py
|
futuresystems/465-srijchand
|
f52127dfd2cefa7836e25b3ca8a4d81f8ec041ab
|
[
"Apache-2.0"
] | null | null | null |
srichand_cloudmesh_ex2.py
|
futuresystems/465-srijchand
|
f52127dfd2cefa7836e25b3ca8a4d81f8ec041ab
|
[
"Apache-2.0"
] | null | null | null |
srichand_cloudmesh_ex2.py
|
futuresystems/465-srijchand
|
f52127dfd2cefa7836e25b3ca8a4d81f8ec041ab
|
[
"Apache-2.0"
] | null | null | null |
import cloudmesh
print cloudmesh.shell("cloud list")
| 13.5
| 35
| 0.796296
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 0.222222
|
7590bc68d426088e6dfdde0c77099c6866ef8478
| 3,583
|
py
|
Python
|
datasets/robothor_data.py
|
SgtVincent/Robothor-2020---VIPL-ICT
|
5eee00c077c07e69120fb8108f574c2339688f34
|
[
"Apache-2.0"
] | null | null | null |
datasets/robothor_data.py
|
SgtVincent/Robothor-2020---VIPL-ICT
|
5eee00c077c07e69120fb8108f574c2339688f34
|
[
"Apache-2.0"
] | 1
|
2022-03-14T03:34:49.000Z
|
2022-03-14T03:34:49.000Z
|
datasets/robothor_data.py
|
SgtVincent/Robothor-2020---VIPL-ICT
|
5eee00c077c07e69120fb8108f574c2339688f34
|
[
"Apache-2.0"
] | null | null | null |
from .constants import (
ROBOTHOR_ORIGINAL_CLASS_LIST
)
import re
import os
import json
import networkx
import h5py
import numpy as np
scene_types = ['FloorPlan_Train1', 'FloorPlan_Train2', 'FloorPlan_Train3',
'FloorPlan_Train4', 'FloorPlan_Train5', 'FloorPlan_Train6',
'FloorPlan_Train7', 'FloorPlan_Train8', 'FloorPlan_Train9',
'FloorPlan_Train10', 'FloorPlan_Train11', 'FloorPlan_Train12']
DIFFICULTY = ['easy', 'medium', 'hard']
def get_scenes(scene_type):
# scene_type: "FloorPlan_TrainX" or "FloorPlan_ValY"
return [scene_type + "_{}".format(i) for i in range(1,6)]
# TODO: modify code relative to these two functions in test_val_episode_ithor.py and nonadaptivea3c_val.py
def name_to_num(name):
return scene_types.index(name)
def num_to_name(num):
return scene_types[num-1]
def get_data(scene_types):
idx = []
for j in range(len(scene_types)):
idx.append(scene_types.index(scene_types[j]))
scenes = [
get_scenes(scene_type) for scene_type in scene_types
]
possible_targets = ROBOTHOR_ORIGINAL_CLASS_LIST
# dump code since object class for all scene type are the same
# TODO: modify this code when using customized targets ...
targets = [ROBOTHOR_ORIGINAL_CLASS_LIST] * 12
return scenes, possible_targets, [targets[i] for i in idx]
def preload_metadata(args, scene_types,
train_scenes="[1-5]",
grid_file_name="grid.json",
graph_file_name="graph.json",
metadata_file_name="visible_object_map.json",
):
metadata = {}
i,j = re.findall(r"\d+", train_scenes)
# load all metadata to dictionary
for scene_type in scene_types:
for scene_name in [scene_type + "_{}".format(k) for k in range(int(i), int(j)+1)]:
metadata[scene_name] = {}
with open(os.path.join(args.offline_data_dir, scene_name, grid_file_name),"r",) as f:
metadata[scene_name]['grid'] = json.load(f)
with open(os.path.join(args.offline_data_dir, scene_name, graph_file_name),"r") as f:
graph_json = json.load(f)
metadata[scene_name]['graph_json'] = graph_json
metadata[scene_name]['graph'] = networkx.readwrite.node_link_graph(graph_json).to_directed()
with open(os.path.join(args.offline_data_dir, scene_name, metadata_file_name),"r") as f:
metadata[scene_name]['metadata'] = json.load(f)
with h5py.File(os.path.join(args.offline_data_dir, scene_name, args.images_file_name), "r") as images:
metadata[scene_name]['all_states'] = list(images.keys())
return metadata
def get_curriculum_meta(args, scenes):
scenes = np.array(scenes).reshape(-1)
curriculum_meta = {}
for scene in scenes:
meta_file_path = os.path.join(args.curriculum_meta_dir, scene, scene+'_'+args.meta_pattern)
with open(meta_file_path) as f:
scene_meta = json.loads(f.read())
curriculum_meta[scene] = scene_meta['episodes'][scene]
return curriculum_meta
def load_offline_shortest_path_data(args, scenes):
scenes = np.array(scenes).reshape(-1)
offline_shortest_path_data = {}
for scene in scenes:
path = os.path.join(args.curriculum_meta_dir, scene, "shortest_path_len.json")
with open(path, 'r') as f:
scene_path_len = json.load(f)
offline_shortest_path_data[scene] = scene_path_len
return offline_shortest_path_data
| 36.191919
| 115
| 0.662294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 720
| 0.200949
|
7593f4e87b009e30bfc06b0f207cd76f6db5a110
| 288
|
py
|
Python
|
QRcodegenerator.py
|
arpitarunkumaar/Hacktoberfest2021
|
0af40f90a6c0716caadbbfff44ece947b6146f60
|
[
"MIT"
] | 125
|
2021-10-01T19:05:26.000Z
|
2021-10-03T13:32:42.000Z
|
QRcodegenerator.py
|
arpitarunkumaar/Hacktoberfest2021
|
0af40f90a6c0716caadbbfff44ece947b6146f60
|
[
"MIT"
] | 201
|
2021-10-30T20:40:01.000Z
|
2022-03-22T17:26:28.000Z
|
QRcodegenerator.py
|
arpitarunkumaar/Hacktoberfest2021
|
0af40f90a6c0716caadbbfff44ece947b6146f60
|
[
"MIT"
] | 294
|
2021-10-01T18:46:05.000Z
|
2021-10-03T14:25:07.000Z
|
import pyqrcode
from pyqrcode import QRCode
# String which represent the QR code
s = "https://www.youtube.com/channel/UCeO9hPCfRzqb2yTuAn713Mg"
# Generate QR code
url = pyqrcode.create(s)
# Create and save the png file naming "myqr.png"
url.svg("myyoutube.svg", scale = 8)
| 24
| 62
| 0.725694
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 178
| 0.618056
|
759471eca6eb7bbbb400247ad8d624471bce9b4f
| 979
|
py
|
Python
|
tests/packerlicious/test_post_processor_docker.py
|
gnewson/packerlicious
|
9a5373bc3a63f949e7912dad0214340d5fddbd85
|
[
"Apache-2.0"
] | 109
|
2017-07-17T03:32:09.000Z
|
2022-02-27T18:24:18.000Z
|
tests/packerlicious/test_post_processor_docker.py
|
gnewson/packerlicious
|
9a5373bc3a63f949e7912dad0214340d5fddbd85
|
[
"Apache-2.0"
] | 175
|
2017-07-16T21:41:40.000Z
|
2021-03-19T22:28:19.000Z
|
tests/packerlicious/test_post_processor_docker.py
|
gnewson/packerlicious
|
9a5373bc3a63f949e7912dad0214340d5fddbd85
|
[
"Apache-2.0"
] | 68
|
2017-07-16T20:52:38.000Z
|
2022-01-08T18:24:17.000Z
|
import pytest
import packerlicious.post_processor as post_processor
class TestDockerImportPostProcessor(object):
def test_required_fields_missing(self):
b = post_processor.DockerImport()
with pytest.raises(ValueError) as excinfo:
b.to_dict()
assert 'required' in str(excinfo.value)
class TestDockerPushPostProcessor(object):
def test_no_required_fields(self):
b = post_processor.DockerPush()
b.to_dict()
class TestDockerSavePostProcessor(object):
def test_required_fields_missing(self):
b = post_processor.DockerSave()
with pytest.raises(ValueError) as excinfo:
b.to_dict()
assert 'required' in str(excinfo.value)
class TestDockerTagPostProcessor(object):
def test_required_fields_missing(self):
b = post_processor.DockerTag()
with pytest.raises(ValueError) as excinfo:
b.to_dict()
assert 'required' in str(excinfo.value)
| 23.309524
| 53
| 0.694586
| 898
| 0.917263
| 0
| 0
| 0
| 0
| 0
| 0
| 30
| 0.030644
|
7598e6392d65a78f154a1a2db4cb51bdef6f7043
| 3,017
|
py
|
Python
|
app/app.py
|
jemarulanda/microservicioMapeo
|
fbf3cef57a0a8aec611171460f4a3434339aa0fe
|
[
"MIT"
] | null | null | null |
app/app.py
|
jemarulanda/microservicioMapeo
|
fbf3cef57a0a8aec611171460f4a3434339aa0fe
|
[
"MIT"
] | null | null | null |
app/app.py
|
jemarulanda/microservicioMapeo
|
fbf3cef57a0a8aec611171460f4a3434339aa0fe
|
[
"MIT"
] | null | null | null |
'''Module main'''
import json
import os
from rabbitmq import RabbitMQ
from pika import exceptions
from parameter import Parameter
from send_grid import SendGrid
from traceability import Traceability
from transform import Transform
import uuid
class App:
'''class Application'''
@classmethod
def __init__(cls):
'''Method init'''
cls.accountName = os.getenv('ACCOUNT_NAME')
print('cls.accountName ',cls.accountName)
#cls.accountKey = os.getenv('ACCOUNT_KEY')
print('cls.accountKey ', cls.accountKey )
cls.config = Parameter(cls.accountName, cls.accountKey).get_parameters()
@classmethod
def callback(cls, channel, method, properties, body):
'''Receive message '''
try:
del properties
transaction_id = str(uuid.uuid4())
businessKey = cls.config['traceability']['businessKey']
data = json.loads(body.decode('utf-8'))
#print(data)
#ibmmq(**cls.config['traceability']).send_json('message')
#Traceability(**cls.config['traceability']).save(
# businessKey,transaction_id,"Desencolar topico",
# "Subscriber-Callback", "IN", str(data),
# "OK", "Mensaje recibido")
print('Transform.transformacion(data)', Transform.transformacion(data))
except Exception as error:
print(error)
SendGrid().create_message(
cls.config['sendGrid']['apiKey'],
cls.config['sendGrid']['fromEmail'],
cls.config['sendGrid']['toEmail'],
str(error))
#Traceability(**cls.config['traceability']).save(
# businessKey,transaction_id,"Error en la calidad del mensaje enviado",
# "Subscriber", "IN", str(body),
# "ERROR", "Lectura Fallida, "+str(error))
finally:
channel.basic_ack(delivery_tag=method.delivery_tag)
@classmethod
def main(cls):
while True:
try:
objqueue = RabbitMQ(**cls.config['source'])
objqueue.connect()
objqueue.channel.basic_consume(
queue=cls.config['source']['queue'],
on_message_callback=cls.callback,
auto_ack=False
)
#cls.traceability = Traceability(**cls.config['traceability'])
try:
objqueue.channel.start_consuming()
except KeyboardInterrupt:
objqueue.disconnect()
objqueue.channel.stop_consuming()
break
except (exceptions.ConnectionClosedByBroker,exceptions.AMQPChannelError,exceptions.AMQPConnectionError) as error_connection:
print('Conexion cerrada con a RabbitMQ', error_connection)
continue
if __name__ == '__main__':
App().main()
| 38.679487
| 136
| 0.569108
| 2,709
| 0.897912
| 0
| 0
| 2,654
| 0.879682
| 0
| 0
| 879
| 0.291349
|
759923fc156d69b7e7b7231814ffe05abf19e1c1
| 26,488
|
py
|
Python
|
modules/organizations_tab.py
|
scrummastermind/sumologictoolbox
|
02d9acb970943521685091d36b8d5135e817c22c
|
[
"Apache-2.0"
] | null | null | null |
modules/organizations_tab.py
|
scrummastermind/sumologictoolbox
|
02d9acb970943521685091d36b8d5135e817c22c
|
[
"Apache-2.0"
] | null | null | null |
modules/organizations_tab.py
|
scrummastermind/sumologictoolbox
|
02d9acb970943521685091d36b8d5135e817c22c
|
[
"Apache-2.0"
] | null | null | null |
class_name = 'organizations_tab'
from qtpy import QtCore, QtGui, QtWidgets, uic
import os
from logzero import logger
import pathlib
import json
from modules.sumologic_orgs import SumoLogic_Orgs
class CreateOrUpdateOrgDialog(QtWidgets.QDialog):
def __init__(self, deployments, org_details=None, trials_enabled=False):
super(CreateOrUpdateOrgDialog, self).__init__()
self.deployments = deployments
self.available_org_licenses = ["Paid"]
if trials_enabled and not org_details:
self.available_org_licenses.append("Trial")
self.org_details = org_details
self.setupUi(self)
def setupUi(self, Dialog):
Dialog.setObjectName("CreateOrg")
self.intValidator = QtGui.QIntValidator()
self.setWindowTitle('Enter Org Details')
QBtn = QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel
self.buttonBox = QtWidgets.QDialogButtonBox(QBtn)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
self.labelOrgName = QtWidgets.QLabel(Dialog)
self.labelOrgName.setObjectName("OrgName")
self.labelOrgName.setText('Organization Name:')
self.lineEditOrgName = QtWidgets.QLineEdit(Dialog)
self.layoutOrgName = QtWidgets.QHBoxLayout()
self.layoutOrgName.addWidget(self.labelOrgName)
self.layoutOrgName.addWidget(self.lineEditOrgName)
self.labelEmail = QtWidgets.QLabel(Dialog)
self.labelEmail.setObjectName("Email")
self.labelEmail.setText('Registration Email:')
self.lineEditEmail = QtWidgets.QLineEdit(Dialog)
self.layoutEmail = QtWidgets.QHBoxLayout()
self.layoutEmail.addWidget(self.labelEmail)
self.layoutEmail.addWidget(self.lineEditEmail)
self.labelFirstName = QtWidgets.QLabel(Dialog)
self.labelFirstName.setObjectName("FirstName")
self.labelFirstName.setText('First Name:')
self.lineEditFirstName = QtWidgets.QLineEdit(Dialog)
self.layoutFirstName = QtWidgets.QHBoxLayout()
self.layoutFirstName.addWidget(self.labelFirstName)
self.layoutFirstName.addWidget(self.lineEditFirstName)
self.labelLastName = QtWidgets.QLabel(Dialog)
self.labelLastName.setObjectName("LastName")
self.labelLastName.setText('Last Name:')
self.lineEditLastName = QtWidgets.QLineEdit(Dialog)
self.layoutLastName = QtWidgets.QHBoxLayout()
self.layoutLastName.addWidget(self.labelLastName)
self.layoutLastName.addWidget(self.lineEditLastName)
self.labelDeployment = QtWidgets.QLabel(Dialog)
self.labelDeployment.setObjectName("Deployment")
self.labelDeployment.setText('Deployment:')
self.comboBoxDeployment = QtWidgets.QComboBox(Dialog)
for deployment in self.deployments:
self.comboBoxDeployment.addItem(deployment['deploymentId'].strip())
self.layoutDeployment = QtWidgets.QHBoxLayout()
self.layoutDeployment.addWidget(self.labelDeployment)
self.layoutDeployment.addWidget(self.comboBoxDeployment)
self.labelLicenseType = QtWidgets.QLabel(Dialog)
self.labelLicenseType.setObjectName("LicenseType")
self.labelLicenseType.setText('License Type:')
self.comboBoxLicenseType = QtWidgets.QComboBox(Dialog)
for license in self.available_org_licenses:
self.comboBoxLicenseType.addItem(license.strip())
self.layoutLicenseType = QtWidgets.QHBoxLayout()
self.layoutLicenseType.addWidget(self.labelLicenseType)
self.layoutLicenseType.addWidget(self.comboBoxLicenseType)
self.labelTrialLength = QtWidgets.QLabel(Dialog)
self.labelTrialLength.setObjectName('TrialLength')
self.labelTrialLength.setText('Trial Length')
self.lineEditTrialLength = QtWidgets.QLineEdit(Dialog)
# Temporarily Disabled for V1 of Orgs. Trial length is fixed at 45 days
self.lineEditTrialLength.setText('45')
self.lineEditTrialLength.setReadOnly(True)
self.layoutTrialLength = QtWidgets.QHBoxLayout()
self.layoutTrialLength.addWidget(self.labelTrialLength)
self.layoutTrialLength.addWidget(self.lineEditTrialLength)
if self.org_details:
self.lineEditOrgName.setText(self.org_details['organizationName'])
self.lineEditOrgName.setReadOnly(True)
self.lineEditEmail.setText(self.org_details['email'])
self.lineEditEmail.setReadOnly(True)
self.lineEditFirstName.setText(self.org_details['firstName'])
self.lineEditFirstName.setReadOnly(True)
self.lineEditLastName.setText(self.org_details['lastName'])
self.lineEditLastName.setReadOnly(True)
index = self.comboBoxLicenseType.findText(self.org_details['subscription']['plan']['planName'],
QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBoxLicenseType.setCurrentIndex(index)
self.comboBoxLicenseType.setEditable(False)
self.layout = QtWidgets.QVBoxLayout()
self.layout.addLayout(self.layoutOrgName)
self.layout.addLayout(self.layoutEmail)
self.layout.addLayout(self.layoutFirstName)
self.layout.addLayout(self.layoutLastName)
self.layout.addLayout(self.layoutDeployment)
self.layout.addLayout(self.layoutLicenseType)
self.layout.addLayout(self.layoutTrialLength)
# Continuous
self.labelContinuousTierIngest = QtWidgets.QLabel(Dialog)
self.labelContinuousTierIngest.setObjectName("ContinuousTierIngest")
self.labelContinuousTierIngest.setText('Continuous Tier Ingest (0 - 1,000,000 GB/day):')
self.lineEditContinuousTierIngest = QtWidgets.QLineEdit(Dialog)
self.lineEditContinuousTierIngest.setValidator(self.intValidator)
self.layoutContinuousTierIngest = QtWidgets.QHBoxLayout()
self.layoutContinuousTierIngest.addWidget(self.labelContinuousTierIngest)
self.layoutContinuousTierIngest.addWidget(self.lineEditContinuousTierIngest)
self.labelContinuousTierStorage = QtWidgets.QLabel(Dialog)
self.labelContinuousTierStorage.setObjectName("ContinuousTierStorage")
self.labelContinuousTierStorage.setText('Continuous Tier Storage (0 - 1,000,000 GB):')
self.lineEditContinuousTierStorage = QtWidgets.QLineEdit(Dialog)
self.lineEditContinuousTierStorage.setValidator(self.intValidator)
self.layoutContinuousTierStorage = QtWidgets.QHBoxLayout()
self.layoutContinuousTierStorage.addWidget(self.labelContinuousTierStorage)
self.layoutContinuousTierStorage.addWidget(self.lineEditContinuousTierStorage)
# Frequent
self.labelFrequentTierIngest = QtWidgets.QLabel(Dialog)
self.labelFrequentTierIngest.setObjectName("FrequentTierIngest")
self.labelFrequentTierIngest.setText('Frequent Tier Ingest (0 - 1,000,000 GB/day):')
self.lineEditFrequentTierIngest = QtWidgets.QLineEdit(Dialog)
self.lineEditFrequentTierIngest.setValidator(self.intValidator)
self.layoutFrequentTierIngest = QtWidgets.QHBoxLayout()
self.layoutFrequentTierIngest.addWidget(self.labelFrequentTierIngest)
self.layoutFrequentTierIngest.addWidget(self.lineEditFrequentTierIngest)
self.labelFrequentTierStorage = QtWidgets.QLabel(Dialog)
self.labelFrequentTierStorage.setObjectName("FrequentTierStorage")
self.labelFrequentTierStorage.setText('Frequent Tier Storage (0 - 1,000,000 GB):')
self.lineEditFrequentTierStorage = QtWidgets.QLineEdit(Dialog)
self.lineEditFrequentTierStorage.setValidator(self.intValidator)
self.layoutFrequentTierStorage = QtWidgets.QHBoxLayout()
self.layoutFrequentTierStorage.addWidget(self.labelFrequentTierStorage)
self.layoutFrequentTierStorage.addWidget(self.lineEditFrequentTierStorage)
# Infrequent
self.labelInFrequentTierIngest = QtWidgets.QLabel(Dialog)
self.labelInFrequentTierIngest.setObjectName("InFrequentTierIngest")
self.labelInFrequentTierIngest.setText('InFrequent Tier Ingest (0 - 1,000,000 GB/day):')
self.lineEditInFrequentTierIngest = QtWidgets.QLineEdit(Dialog)
self.lineEditInFrequentTierIngest.setValidator(self.intValidator)
self.layoutInFrequentTierIngest = QtWidgets.QHBoxLayout()
self.layoutInFrequentTierIngest.addWidget(self.labelInFrequentTierIngest)
self.layoutInFrequentTierIngest.addWidget(self.lineEditInFrequentTierIngest)
self.labelInFrequentTierStorage = QtWidgets.QLabel(Dialog)
self.labelInFrequentTierStorage.setObjectName("InFrequentTierStorage")
self.labelInFrequentTierStorage.setText('InFrequent Tier Storage (0 - 1,000,000 GB):')
self.lineEditInFrequentTierStorage = QtWidgets.QLineEdit(Dialog)
self.lineEditInFrequentTierStorage.setValidator(self.intValidator)
self.layoutInFrequentTierStorage = QtWidgets.QHBoxLayout()
self.layoutInFrequentTierStorage.addWidget(self.labelInFrequentTierStorage)
self.layoutInFrequentTierStorage.addWidget(self.lineEditInFrequentTierStorage)
# Metrics
self.labelMetrics = QtWidgets.QLabel(Dialog)
self.labelMetrics.setObjectName("Metrics")
self.labelMetrics.setText('Metrics Ingest (0 - 100,000 DPM):')
self.lineEditMetrics = QtWidgets.QLineEdit(Dialog)
self.lineEditMetrics.setValidator(self.intValidator)
self.layoutMetrics = QtWidgets.QHBoxLayout()
self.layoutMetrics.addWidget(self.labelMetrics)
self.layoutMetrics.addWidget(self.lineEditMetrics)
# CSE
self.labelCSEIngest = QtWidgets.QLabel(Dialog)
self.labelCSEIngest.setObjectName("CSEIngest")
self.labelCSEIngest.setText('CSE Ingest (0 - 1,000,000 GB/day):')
self.lineEditCSEIngest = QtWidgets.QLineEdit(Dialog)
self.lineEditCSEIngest.setValidator(self.intValidator)
self.layoutCSEIngest = QtWidgets.QHBoxLayout()
self.layoutCSEIngest.addWidget(self.labelCSEIngest)
self.layoutCSEIngest.addWidget(self.lineEditCSEIngest)
self.labelCSEStorage = QtWidgets.QLabel(Dialog)
self.labelCSEStorage.setObjectName("CSEStorage")
self.labelCSEStorage.setText('CSE Storage (0 - 1,000,000 GB):')
self.lineEditCSEStorage = QtWidgets.QLineEdit(Dialog)
self.lineEditCSEStorage.setValidator(self.intValidator)
self.layoutCSEStorage = QtWidgets.QHBoxLayout()
self.layoutCSEStorage.addWidget(self.labelCSEStorage)
self.layoutCSEStorage.addWidget(self.lineEditCSEStorage)
if self.org_details:
self.lineEditContinuousTierIngest.setText(str(self.org_details['subscription']['baselines']['continuousIngest']))
self.lineEditContinuousTierStorage.setText(str(self.org_details['subscription']['baselines']['continuousStorage']))
self.lineEditFrequentTierIngest.setText(str(self.org_details['subscription']['baselines']['frequentIngest']))
self.lineEditFrequentTierStorage.setText(str(self.org_details['subscription']['baselines']['frequentStorage']))
self.lineEditInFrequentTierIngest.setText(str(self.org_details['subscription']['baselines']['infrequentIngest']))
self.lineEditInFrequentTierStorage.setText(str(self.org_details['subscription']['baselines']['infrequentStorage']))
self.lineEditCSEIngest.setText(str(self.org_details['subscription']['baselines']['cseIngest']))
self.lineEditCSEStorage.setText(str(self.org_details['subscription']['baselines']['cseStorage']))
self.lineEditMetrics.setText(str(self.org_details['subscription']['baselines']['metrics']))
else:
self.lineEditContinuousTierIngest.setText('0')
self.lineEditContinuousTierStorage.setText('0')
self.lineEditFrequentTierIngest.setText('0')
self.lineEditFrequentTierStorage.setText('0')
self.lineEditInFrequentTierIngest.setText('0')
self.lineEditInFrequentTierStorage.setText('0')
self.lineEditMetrics.setText('0')
self.lineEditCSEIngest.setText('0')
self.lineEditCSEStorage.setText('0')
self.layout.addLayout(self.layoutContinuousTierIngest)
self.layout.addLayout(self.layoutContinuousTierStorage)
self.layout.addLayout(self.layoutFrequentTierIngest)
self.layout.addLayout(self.layoutFrequentTierStorage)
self.layout.addLayout(self.layoutInFrequentTierIngest)
self.layout.addLayout(self.layoutInFrequentTierStorage)
self.layout.addLayout(self.layoutMetrics)
self.layout.addLayout(self.layoutCSEIngest)
self.layout.addLayout(self.layoutCSEStorage)
self.createPresetCheckbox = QtWidgets.QCheckBox("Create Credential Preset")
self.createPresetCheckbox.setChecked(True)
self.writeCredsToFileCheckbox = QtWidgets.QCheckBox("Write Credentials to File")
self.writeCredsToFileCheckbox.setChecked(False)
if not self.org_details:
self.layoutCheckboxes = QtWidgets.QHBoxLayout()
self.layoutCheckboxes.addWidget(self.createPresetCheckbox)
self.layoutCheckboxes.addWidget(self.writeCredsToFileCheckbox)
self.layout.addLayout(self.layoutCheckboxes)
self.layout.addWidget(self.buttonBox)
self.setLayout(self.layout)
return
def getresults(self):
results = {'organizationName': str(self.lineEditOrgName.text()),
'firstName': str(self.lineEditFirstName.text()),
'lastName': str(self.lineEditLastName.text()),
'email': str(self.lineEditEmail.text()),
'deploymentId': str(self.comboBoxDeployment.currentText()),
'baselines': {}
}
results['baselines']['continuousIngest'] = str(self.lineEditContinuousTierIngest.text())
results['baselines']['continuousStorage'] = str(self.lineEditContinuousTierStorage.text())
results['baselines']['frequentIngest'] = str(self.lineEditFrequentTierIngest.text())
results['baselines']['frequentStorage'] = str(self.lineEditFrequentTierStorage.text())
results['baselines']['infrequentIngest'] = str(self.lineEditInFrequentTierIngest.text())
results['baselines']['infrequentStorage'] = str(self.lineEditInFrequentTierStorage.text())
results['baselines']['metrics'] = self.lineEditMetrics.text()
results['baselines']['cseIngest'] = str(self.lineEditCSEIngest.text())
results['baselines']['cseStorage'] = str(self.lineEditCSEStorage.text())
if self.comboBoxLicenseType.currentText() == 'Trial':
results['trialPlanPeriod'] = str(self.lineEditTrialLength.text())
if not self.org_details:
results['create_preset'] = self.createPresetCheckbox.isChecked()
results['write_creds_to_file'] = self.writeCredsToFileCheckbox.isChecked()
return results
class organizations_tab(QtWidgets.QWidget):
def __init__(self, mainwindow):
super(organizations_tab, self).__init__()
self.mainwindow = mainwindow
self.tab_name = 'Organizations'
self.cred_usage = 'left'
collector_ui = os.path.join(self.mainwindow.basedir, 'data/organizations.ui')
uic.loadUi(collector_ui, self)
#self.font = "Waree"
#self.font_size = 12
# UI Buttons for Organizations API tab
self.pushButtonGetOrgs.clicked.connect(lambda: self.update_org_list(
str(self.mainwindow.comboBoxRegionLeft.currentText().lower()),
str(self.mainwindow.lineEditUserNameLeft.text()),
str(self.mainwindow.lineEditPasswordLeft.text()),
))
self.pushButtonCreateOrg.clicked.connect(lambda: self.create_org(
str(self.mainwindow.comboBoxRegionLeft.currentText().lower()),
str(self.mainwindow.lineEditUserNameLeft.text()),
str(self.mainwindow.lineEditPasswordLeft.text()),
))
self.pushButtonCancelSubscription.clicked.connect(lambda: self.cancel_subscription(
self.tableWidgetOrgs.selectedItems(),
str(self.mainwindow.comboBoxRegionLeft.currentText().lower()),
str(self.mainwindow.lineEditUserNameLeft.text()),
str(self.mainwindow.lineEditPasswordLeft.text())
))
self.pushButtonUpdateSubscription.clicked.connect(lambda: self.update_subscription(
self.tableWidgetOrgs.selectedItems(),
str(self.mainwindow.comboBoxRegionLeft.currentText().lower()),
str(self.mainwindow.lineEditUserNameLeft.text()),
str(self.mainwindow.lineEditPasswordLeft.text())
))
self.tableWidgetOrgs.itemDoubleClicked.connect(self.row_doubleclicked)
def row_doubleclicked(self, qtablewidgetitem):
selected = self.tableWidgetOrgs.selectedItems()
row_dict = self.create_dict_from_qtable_row(selected)
def create_dict_from_qtable_row(self, list_of_qtableitems):
row_dict = {}
for qtableitem in list_of_qtableitems:
column_number = qtableitem.column()
key = self.tableWidgetOrgs.horizontalHeaderItem(column_number).text()
row_dict[key] = qtableitem.text()
return row_dict
def reset_stateful_objects(self, side='both'):
self.tableWidgetOrgs.clearContents()
self.tableWidgetOrgs.raw_orgs =[]
self.tableWidgetOrgs.horizontalHeader().hide()
self.tableWidgetOrgs.setRowCount(0)
parent_deployment = str(self.mainwindow.comboBoxRegionLeft.currentText().lower())
id = str(self.mainwindow.lineEditUserNameLeft.text())
key = str(self.mainwindow.lineEditPasswordLeft.text())
self.pushButtonGetOrgs.setEnabled(True)
self.checkBoxShowActive.setEnabled(True)
self.pushButtonCreateOrg.setEnabled(True)
self.pushButtonUpdateSubscription.setEnabled(True)
self.pushButtonCancelSubscription.setEnabled(True)
try:
sumo_mam = SumoLogic_Orgs(id, key, parent_deployment, log_level=self.mainwindow.log_level)
test = sumo_mam.get_deployments()
except:
self.pushButtonGetOrgs.setEnabled(False)
self.checkBoxShowActive.setEnabled(False)
self.pushButtonCreateOrg.setEnabled(False)
self.pushButtonUpdateSubscription.setEnabled(False)
self.pushButtonCancelSubscription.setEnabled(False)
def update_org_list(self, parent_deployment, id, key):
logger.info("[Organizations] Getting Updated Org List")
if self.checkBoxShowActive.isChecked():
status_filter= "Active"
else:
status_filter= "All"
try:
sumo_mam = SumoLogic_Orgs(id, key, parent_deployment, log_level=self.mainwindow.log_level)
self.tableWidgetOrgs.raw_orgs = sumo_mam.get_orgs_sync(status_filter=status_filter)
self.update_org_table_widget()
except Exception as e:
logger.exception(e)
self.mainwindow.errorbox('Something went wrong:\n\n' + str(e))
self.reset_stateful_objects('left')
return
def update_org_table_widget(self):
logger.info("[Organizations] Updating Org Table Widget")
self.tableWidgetOrgs.clear()
orgs = []
for raw_org in self.tableWidgetOrgs.raw_orgs:
org = { 'Org Name': raw_org['organizationName'],
'Org ID': raw_org['orgId'],
'Owner Email': raw_org['email'],
'Credits': raw_org['subscription']['credits'],
'License': raw_org['subscription']['plan']['planName'],
'Status': raw_org['subscription']['status'],
'Continuous Ingest': raw_org['subscription']['baselines']['continuousIngest'],
'Continuous Storage': raw_org['subscription']['baselines']['continuousStorage'],
'Frequent Ingest': raw_org['subscription']['baselines']['frequentIngest'],
'Frequent Storage': raw_org['subscription']['baselines']['frequentStorage'],
'Infrequent Ingest': raw_org['subscription']['baselines']['infrequentIngest'],
'Infrequent Storage': raw_org['subscription']['baselines']['infrequentStorage'],
'CSE Ingest': raw_org['subscription']['baselines']['cseIngest'],
'CSE Storage': raw_org['subscription']['baselines']['cseStorage'],
'Metrics': raw_org['subscription']['baselines']['metrics']
}
orgs.append(org)
if len(orgs) > 0:
numrows = len(orgs)
self.tableWidgetOrgs.setRowCount(numrows)
numcolumns = len(orgs[0])
self.tableWidgetOrgs.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)
self.tableWidgetOrgs.setColumnCount(numcolumns)
self.tableWidgetOrgs.horizontalHeader().show()
self.tableWidgetOrgs.setHorizontalHeaderLabels((list(orgs[0].keys())))
for row in range(numrows):
for column in range(numcolumns):
entry = (list(orgs[row].values())[column])
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.DisplayRole, entry)
self.tableWidgetOrgs.setItem(row, column, item)
else:
self.mainwindow.errorbox('No orgs to display.')
def create_org(self, parent_deployment, id, key):
logger.info("[Organizations]Creating Org")
try:
sumo_orgs = SumoLogic_Orgs(id, key, parent_deployment, log_level=self.mainwindow.log_level)
deployments = sumo_orgs.get_deployments()
org_info = sumo_orgs.get_parent_org_info()
trials_enabled = org_info['isEligibleForTrialOrgs']
except Exception as e:
self.mainwindow.errorbox('Something went wrong:\n\n' + str(e))
logger.exception(e)
return
dialog = CreateOrUpdateOrgDialog(deployments, trials_enabled=trials_enabled)
dialog.exec()
dialog.show()
if str(dialog.result()) == '1':
org_details = dialog.getresults()
try:
response = sumo_orgs.create_org(org_details)
dialog.close()
except Exception as e:
self.mainwindow.errorbox('Something went wrong:\n\n' + str(e))
logger.exception(e)
dialog.close()
return
# if org_details['create_preset']:
# self.mainwindow.create_preset_non_interactive(response_dict['organizationName'],
# response_dict['deploymentId'],
# response_dict['accessKey']['id'],
# response_dict['accessKey']['key']
# )
# if org_details['write_creds_to_file']:
# savepath = QtWidgets.QFileDialog.getExistingDirectory(self, 'Save Credentials Location')
# file = pathlib.Path(savepath + r'/' + str(response_dict['organizationName'] + r'.user.json'))
# try:
# with open(str(file), 'w') as filepointer:
# json.dump(response_dict, filepointer)
#
# except Exception as e:
# self.mainwindow.errorbox('Something went wrong:\n\n' + str(e))
# logger.exception(e)
# # secure the credentials file
# os.chmod(file, 600)
self.update_org_list(parent_deployment, id, key)
else:
return
def cancel_subscription(self, selected_row, parent_deployment, id, key):
if len(selected_row) > 0:
logger.info("[Organizations] Canceling Subscription")
row_dict = self.create_dict_from_qtable_row(selected_row)
try:
sumo_orgs = SumoLogic_Orgs(id, key, parent_deployment=parent_deployment)
sumo_orgs.deactivate_org(row_dict['Org ID'])
except Exception as e:
self.mainwindow.errorbox('Something went wrong:\n\n' + str(e))
logger.exception(e)
return
self.update_org_list(parent_deployment, id, key)
return
else:
self.mainwindow.errorbox('Nothing Selected')
def update_subscription(self, selected_row, parent_deployment, id, key):
if len(selected_row) > 0:
logger.info("[Organizations] Updating Subscription")
row_dict = self.create_dict_from_qtable_row(selected_row)
try:
sumo_orgs = SumoLogic_Orgs(id, key, parent_deployment)
org_details = sumo_orgs.get_org_details(row_dict['Org ID'])
deployments = sumo_orgs.get_deployments()
org_info = sumo_orgs.get_parent_org_info()
trials_enabled = org_info['isEligibleForTrialOrgs']
except Exception as e:
self.mainwindow.errorbox('Something went wrong:\n\n' + str(e))
logger.exception(e)
return
dialog = CreateOrUpdateOrgDialog(deployments, org_details=org_details, trials_enabled=trials_enabled)
dialog.exec()
dialog.show()
if str(dialog.result()) == '1':
org_update_details = dialog.getresults()
try:
response = sumo_orgs.update_org(org_details['orgId'], org_update_details['baselines'])
except Exception as e:
self.mainwindow.errorbox('Something went wrong:\n\n' + str(e))
logger.exception(e)
dialog.close()
dialog.close()
self.update_org_list(parent_deployment, id, key)
| 48.247723
| 127
| 0.669435
| 26,287
| 0.992412
| 0
| 0
| 0
| 0
| 0
| 0
| 4,078
| 0.153957
|
759a0430a9251f3f49f413680d321c1b741036a9
| 562
|
py
|
Python
|
hello.py
|
Sid672/Music
|
ee3c35ae1dfa71372dc6ce5a101503beaac91fd5
|
[
"MIT"
] | null | null | null |
hello.py
|
Sid672/Music
|
ee3c35ae1dfa71372dc6ce5a101503beaac91fd5
|
[
"MIT"
] | null | null | null |
hello.py
|
Sid672/Music
|
ee3c35ae1dfa71372dc6ce5a101503beaac91fd5
|
[
"MIT"
] | null | null | null |
#Code
# python code
# script_name: hello
#
# author: Siddharth
# description: composition
#
# set up
from earsketch import *
# Initialized
init()
setTempo(120)
# varible
chord = RD_UK_HOUSE__5THCHORD_2
secondarybeat = HIPHOP_BASSSUB_001
mainbeat = HOUSE_MAIN_BEAT_003
# Music
fitMedia(chord, 1, 1, 16)
setEffect(1, VOLUME, GAIN, -60, 1, 5, 12)
setEffect(1, VOLUME, GAIN, 5, 12, -60, 16)
fitMedia(secondarybeat, 2, 1, 12)
setEffect(2, DELAY, DELAY_TIME, 500)
fitMedia(mainbeat, 3, 1, 8)
setEffect(2, REVERB, REVERB_TIME, 200)
# Finish
finish()
| 17.030303
| 42
| 0.709964
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 143
| 0.254448
|
759a621d0c21d47983881f0990e0d95c9d89af8b
| 575
|
py
|
Python
|
utf8_to_sjis.py
|
yo16/utf8_to_sjis
|
a0ea7205a2acb96743ca8cb24c38cf1db2cb0ffb
|
[
"MIT"
] | null | null | null |
utf8_to_sjis.py
|
yo16/utf8_to_sjis
|
a0ea7205a2acb96743ca8cb24c38cf1db2cb0ffb
|
[
"MIT"
] | null | null | null |
utf8_to_sjis.py
|
yo16/utf8_to_sjis
|
a0ea7205a2acb96743ca8cb24c38cf1db2cb0ffb
|
[
"MIT"
] | null | null | null |
import codecs
import os
codecs.register_error('none', lambda e: ('?', e.end))
def utf8_to_sjis(files, in_dir, out_dir):
os.makedirs(out_dir, exist_ok=True)
for f in files:
utf8_to_sjis_one(f, in_dir, out_dir)
def utf8_to_sjis_one(file, in_dir, out_dir):
with open(f'{in_dir}/{file}', mode='r', encoding='utf-8') as fi:
with open(f'{out_dir}/{file}', mode='w', encoding='sjis', errors='none') as fo:
fo.write(fi.read())
if __name__=='__main__':
files = [
'test_file.csv'
]
in_dir = 'in_utf8'
out_dir = 'sjis'
utf8_to_sjis(files, in_dir, out_dir)
| 19.166667
| 81
| 0.673043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 111
| 0.193043
|
759b22ff49969cf5635a933561ca5a0c9d611cf7
| 363
|
py
|
Python
|
mavweb/server/apps.py
|
vixadd/MAVWed
|
c86327a18c1f5260aa77a5975d2977df5e9267cc
|
[
"MIT"
] | null | null | null |
mavweb/server/apps.py
|
vixadd/MAVWed
|
c86327a18c1f5260aa77a5975d2977df5e9267cc
|
[
"MIT"
] | null | null | null |
mavweb/server/apps.py
|
vixadd/MAVWed
|
c86327a18c1f5260aa77a5975d2977df5e9267cc
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
from mavweb.mavlink_arbiter.main import Main
class ServerConfig(AppConfig):
"""
Django Server configurations.
"""
name = 'server'
class MavlinkArbiter(AppConfig):
"""
Mavlink arbiter used for application pairing with mavlink functions.
"""
name = 'mavlink_arbiter'
arbiter_module = Main()
| 20.166667
| 72
| 0.699725
| 278
| 0.76584
| 0
| 0
| 0
| 0
| 0
| 0
| 154
| 0.424242
|